blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8d58a7ad79d94af1b223cd99ce31c8c1f9c05b5c | ba694353a3cb1cfd02a6773b40f693386d0dba39 | /sdk/python/pulumi_google_native/firebaseappdistribution/v1/group.py | fb267bd983e42bb1c41b002ef390b34637ede4ca | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | pulumi/pulumi-google-native | cc57af8bd3d1d6b76f1f48333ed1f1b31d56f92b | 124d255e5b7f5440d1ef63c9a71e4cc1d661cd10 | refs/heads/master | 2023-08-25T00:18:00.300230 | 2023-07-20T04:25:48 | 2023-07-20T04:25:48 | 323,680,373 | 69 | 16 | Apache-2.0 | 2023-09-13T00:28:04 | 2020-12-22T16:39:01 | Python | UTF-8 | Python | false | false | 10,105 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['GroupArgs', 'Group']
@pulumi.input_type
class GroupArgs:
def __init__(__self__, *,
display_name: pulumi.Input[str],
group_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Group resource.
:param pulumi.Input[str] display_name: The display name of the group.
:param pulumi.Input[str] group_id: Optional. The "alias" to use for the group, which will become the final component of the group's resource name. This value must be unique per project. The field is named `groupId` to comply with AIP guidance for user-specified IDs. This value should be 4-63 characters, and valid characters are `/a-z-/`. If not set, it will be generated based on the display name.
:param pulumi.Input[str] name: The name of the group resource. Format: `projects/{project_number}/groups/{group_alias}`
"""
pulumi.set(__self__, "display_name", display_name)
if group_id is not None:
pulumi.set(__self__, "group_id", group_id)
if name is not None:
pulumi.set(__self__, "name", name)
if project is not None:
pulumi.set(__self__, "project", project)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Input[str]:
"""
The display name of the group.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: pulumi.Input[str]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> Optional[pulumi.Input[str]]:
"""
Optional. The "alias" to use for the group, which will become the final component of the group's resource name. This value must be unique per project. The field is named `groupId` to comply with AIP guidance for user-specified IDs. This value should be 4-63 characters, and valid characters are `/a-z-/`. If not set, it will be generated based on the display name.
"""
return pulumi.get(self, "group_id")
@group_id.setter
def group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the group resource. Format: `projects/{project_number}/groups/{group_alias}`
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
class Group(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
display_name: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Create a group.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] display_name: The display name of the group.
:param pulumi.Input[str] group_id: Optional. The "alias" to use for the group, which will become the final component of the group's resource name. This value must be unique per project. The field is named `groupId` to comply with AIP guidance for user-specified IDs. This value should be 4-63 characters, and valid characters are `/a-z-/`. If not set, it will be generated based on the display name.
:param pulumi.Input[str] name: The name of the group resource. Format: `projects/{project_number}/groups/{group_alias}`
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: GroupArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a group.
:param str resource_name: The name of the resource.
:param GroupArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(GroupArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
display_name: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = GroupArgs.__new__(GroupArgs)
if display_name is None and not opts.urn:
raise TypeError("Missing required property 'display_name'")
__props__.__dict__["display_name"] = display_name
__props__.__dict__["group_id"] = group_id
__props__.__dict__["name"] = name
__props__.__dict__["project"] = project
__props__.__dict__["invite_link_count"] = None
__props__.__dict__["release_count"] = None
__props__.__dict__["tester_count"] = None
replace_on_changes = pulumi.ResourceOptions(replace_on_changes=["project"])
opts = pulumi.ResourceOptions.merge(opts, replace_on_changes)
super(Group, __self__).__init__(
'google-native:firebaseappdistribution/v1:Group',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Group':
"""
Get an existing Group resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = GroupArgs.__new__(GroupArgs)
__props__.__dict__["display_name"] = None
__props__.__dict__["group_id"] = None
__props__.__dict__["invite_link_count"] = None
__props__.__dict__["name"] = None
__props__.__dict__["project"] = None
__props__.__dict__["release_count"] = None
__props__.__dict__["tester_count"] = None
return Group(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[str]:
"""
The display name of the group.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="groupId")
def group_id(self) -> pulumi.Output[Optional[str]]:
"""
Optional. The "alias" to use for the group, which will become the final component of the group's resource name. This value must be unique per project. The field is named `groupId` to comply with AIP guidance for user-specified IDs. This value should be 4-63 characters, and valid characters are `/a-z-/`. If not set, it will be generated based on the display name.
"""
return pulumi.get(self, "group_id")
@property
@pulumi.getter(name="inviteLinkCount")
def invite_link_count(self) -> pulumi.Output[int]:
"""
The number of invite links for this group.
"""
return pulumi.get(self, "invite_link_count")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the group resource. Format: `projects/{project_number}/groups/{group_alias}`
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
return pulumi.get(self, "project")
@property
@pulumi.getter(name="releaseCount")
def release_count(self) -> pulumi.Output[int]:
"""
The number of releases this group is permitted to access.
"""
return pulumi.get(self, "release_count")
@property
@pulumi.getter(name="testerCount")
def tester_count(self) -> pulumi.Output[int]:
"""
The number of testers who are members of this group.
"""
return pulumi.get(self, "tester_count")
| [
"[email protected]"
] | |
6b00216e57dd1ea5b6a02c4851dc0365d2342a92 | af8dfddd4ba4f03560f2f6930f88c4d8a0a8e8cf | /conf.py | 6fe28580ca8c11405a26bca8d52a206bd6a6746f | [] | no_license | Parham-Baghbanbashi/team-manual | 155e243d8152c1c63214db8a698881d08b4765b3 | 9dbd11a2e340f6ce4ffe17dfe0bb8b9619ece914 | refs/heads/master | 2023-01-19T20:23:40.094323 | 2020-12-02T14:14:09 | 2020-12-02T14:14:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,929 | py | import sphinx_rtd_theme
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Rams Robotics Manual'
copyright = '2020, FTC Team 16488'
author = 'FTC Team 16488'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx_rtd_theme"
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static'] | [
"[email protected]"
] | |
8869077a7e227809b58f62593c3a639db3ee87d1 | 8cd04470706414df9b8b937d2fb205f6219db6ed | /envs/wpt_env_dir/original.py | 6ce971239c04e238ad933e97b68326402c45f1b4 | [] | no_license | xiaogaogaoxiao/WirelessPowerTransfer | 1ee903c9eb3c7e59bdbc9836eb16448f81016261 | ed982ca144576e61415522ebc7073d81613d5d84 | refs/heads/master | 2022-10-13T03:36:08.862101 | 2020-06-14T15:14:19 | 2020-06-14T15:14:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,362 | py | import gym
import numpy as np
import math
import sys
import matplotlib.pyplot as plt
from gym import spaces, logger
from gym.utils import seeding
from gym.envs.toy_text import discrete
class WPTEnv(discrete.DiscreteEnv):
'''
Observation:
Type: Box(6 or 8)
Num Observation Min Max
0 User1 server X -r r
1 User1 server Y -r r
2 User2 server X isd-r isd+r
3 User2 server Y -r r
4 Serving BS Power 5 40W
5 Neighbor BS power 5 40W
6 BF codebook index for Serving 0 M-1
7 BF codebook index for Neighbor 0 M-1
'''
metadata={'render.modes':['human']}
def __init__(self):
print('A Radio Environment Has been Initialized')
self.M_ULA = 16
self.cell_radius = 150 # in meters.
self.inter_site_distance = 3 * self.cell_radius / 2.
self.num_users = 30 # number of users.
self.gamma_0 = 5 # beamforming constant SINR.
self.min_sinr = -3 # in dB
self.sinr_target = self.gamma_0 + 10*np.log10(self.M_ULA) # in dB.
self.max_tx_power = 40 # in Watts
self.max_tx_power_interference = 40 # in Watts
self.f_c = 28e9 # Hz
self.G_ant_no_beamforming = 11 # dBi
self.prob_LOS = 0.2 # Probability of LOS transmission
self.nA = 16
self.nS=8
self.step_count = 0 # which step
# Where are the base stations?
self.x_bs_1, self.y_bs_1 = 0, 0
self.x_bs_2, self.y_bs_2 = self.inter_site_distance, 0
# for Beamforming
self.use_beamforming = True
self.k_oversample = 1 # oversampling factor
self.Np = 4 # from 3 to 5 for mmWave
self.F = np.zeros([self.M_ULA, self.k_oversample*self.M_ULA], dtype=complex)
self.theta_n = math.pi * np.arange(start=0., stop=1., step=1./(self.k_oversample*self.M_ULA))
# Beamforming codebook F
for n in np.arange(self.k_oversample*self.M_ULA):
f_n = self._compute_bf_vector(self.theta_n[n])
self.F[:,n] = f_n
self.f_n_bs1 = None # The index in the codebook for serving BS
self.f_n_bs2 = None # The index in the codebook for interfering BS
# for Reinforcement Learning
self.reward_min = -20
self.reward_max = 100
bounds_lower = np.array([
-self.cell_radius,
-self.cell_radius,
self.inter_site_distance-self.cell_radius,
-self.cell_radius,
1,
1,
0,
0])
bounds_upper = np.array([
self.cell_radius,
self.cell_radius,
self.inter_site_distance+self.cell_radius,
self.cell_radius,
self.max_tx_power,
self.max_tx_power_interference,
self.k_oversample*self.M_ULA - 1,
self.k_oversample*self.M_ULA - 1])
self.action_space = spaces.Discrete(self.nA) # action size is here
self.observation_space = spaces.Box(bounds_lower, bounds_upper, dtype=np.float32) # spaces.Discrete(2) # state size is here
seed=0
self.seed(seed=seed)
self.state = None
# self.steps_beyond_done = None
self.received_sinr_dB = None
self.serving_transmit_power_dB = None
self.interfering_transmit_power_dB = None
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self):
# Initialize f_n of both cells
self.f_n_bs1 = self.np_random.randint(self.M_ULA)
self.f_n_bs2 = self.np_random.randint(self.M_ULA)
self.state = [self.np_random.uniform(low=-self.cell_radius, high=self.cell_radius),
self.np_random.uniform(low=-self.cell_radius, high=self.cell_radius),
self.np_random.uniform(low=self.inter_site_distance-self.cell_radius, high=self.inter_site_distance+self.cell_radius),
self.np_random.uniform(low=-self.cell_radius, high=self.cell_radius),
self.np_random.uniform(low=1, high=self.max_tx_power/2),
self.np_random.uniform(low=1, high=self.max_tx_power_interference/2),
self.f_n_bs1,
self.f_n_bs2
]
self.step_count = 0
return np.array(self.state)
def step(self, action):
# assert self.action_space.contains(action), "%r (%s) invalid"%(action, type(action))
state = self.state
reward = 0
x_ue_1, y_ue_1, x_ue_2, y_ue_2, pt_serving, pt_interferer, f_n_bs1, f_n_bs2 = state
# based on the action make your call
# only once a period, perform BF
# The action is derived from a decimal interpretation
################################################################
# log_2 M (serving) # log_2 M (interferer) # S # O #
################################################################
if (action != -1): # optimal
# int('0b0100101',2)
power_command_l = action & 0b0001 # 1 power up, 0 power down
power_command_b = (action & 0b0010) >> 1 # 1 power up, 0 power down
bf_selection_l = (action & 0b0100) >> 2 # 1 step up, 0 step down
bf_selection_b = (action & 0b1000) >> 3
self.step_count += 1
if (power_command_l == 0):
pt_serving *= 10**(-1/10.)
else:
pt_serving *= 10**(1/10.)
if (power_command_b == 0):
pt_interferer *= 10**(-1/10.)
else:
pt_interferer *= 10**(1/10.)
if (bf_selection_l == 1):
f_n_bs1 = (f_n_bs1 + 1) % self.k_oversample*self.M_ULA
else:
f_n_bs1 = (f_n_bs1 - 1) % self.k_oversample*self.M_ULA
if (bf_selection_b == 1):
f_n_bs2 = (f_n_bs2 + 1) % self.k_oversample*self.M_ULA
else:
f_n_bs2 = (f_n_bs2 - 1) % self.k_oversample*self.M_ULA
elif (action > self.num_actions - 1):
print('WARNING: Invalid action played!')
reward = 0
return [], 0, False, True
# move the UEs at a speed of v, in a random direction
v = 2 # km/h.
v *= 5./18 # in m/sec
theta_1, theta_2 = self.np_random.uniform(low=-math.pi, high=math.pi, size=2)
dx_1 = v * math.cos(theta_1)
dy_1 = v * math.sin(theta_1)
dx_2 = v * math.cos(theta_2)
dy_2 = v * math.sin(theta_2)
# Move UE 1
x_ue_1 += dx_1
y_ue_1 += dy_1
# Move UE 2
x_ue_2 += dx_2
y_ue_2 += dy_2
# Update the beamforming codebook index
self.f_n_bs1 = f_n_bs1
self.f_n_bs2 = f_n_bs2
received_power, interference_power, received_sinr = self._compute_rf(x_ue_1, y_ue_1, pt_serving, pt_interferer, is_ue_2=False)
received_power_ue2, interference_power_ue2, received_ue2_sinr = self._compute_rf(x_ue_2, y_ue_2, pt_serving, pt_interferer, is_ue_2=True)
# keep track of quantities...
self.received_sinr_dB = received_sinr
self.received_ue2_sinr_dB = received_ue2_sinr
self.serving_transmit_power_dBm = 10*np.log10(pt_serving*1e3)
self.interfering_transmit_power_dBm = 10*np.log10(pt_interferer*1e3)
# Did we find a FEASIBLE NON-DEGENERATE solution?
done = (pt_serving <= self.max_tx_power) and (pt_serving >= 0) and (pt_interferer <= self.max_tx_power_interference) and (pt_interferer >= 0) and \
(received_sinr >= self.min_sinr) and (received_ue2_sinr >= self.min_sinr) and (received_sinr >= self.sinr_target) and (received_ue2_sinr >= self.sinr_target)
abort = (pt_serving > self.max_tx_power) or (pt_interferer > self.max_tx_power_interference) or (received_sinr < self.min_sinr) or (received_ue2_sinr < self.min_sinr) \
or (received_sinr > 70) or (received_ue2_sinr > 70) #or (received_sinr < 10) or (received_ue2_sinr < 10)
print('{:.2f} dB | {:.2f} dB | {:.2f} W | {:.2f} W '.format(received_sinr, received_ue2_sinr, pt_serving, pt_interferer), end='')
print('Done: {}'.format(done))
print('UE moved to ({0:0.3f},{1:0.3f}) and their received SINR became {2:0.3f} dB.'.format(x_ue_1,y_ue_1,received_sinr))
# the reward
reward = received_sinr + received_ue2_sinr
# Update the state.
self.state = (x_ue_1, y_ue_1, x_ue_2, y_ue_2, pt_serving, pt_interferer, f_n_bs1, f_n_bs2)
if abort == True:
done = False
reward = self.reward_min
elif done:
reward += self.reward_max
# print(done, (received_sinr >= self.sinr_target) , (pt_serving <= self.max_tx_power) , (pt_serving >= 0) , \
# (pt_interferer <= self.max_tx_power_interference) , (pt_interferer >= 0) , (received_ue2_sinr >= self.sinr_target))
if action == -1: # for optimal
return np.array(self.state), reward, False, False
return np.array(self.state), reward, done, abort
def _compute_bf_vector(self, theta):
c = 3e8 # speed of light
wavelength = c / self.f_c
d = wavelength / 2. # antenna spacing
k = 2. * math.pi / wavelength
exponent = 1j * k * d * math.cos(theta) * np.arange(self.M_ULA)
f = 1. / math.sqrt(self.M_ULA) * np.exp(exponent)
# Test the norm square... is it equal to unity? YES.
#norm_f_sq = LA.norm(f, ord=2) ** 2
#print(norm_f_sq)
return f
def _compute_channel(self, x_ue, y_ue, x_bs, y_bs):
# Np is the number of paths p
PLE_L = 2
PLE_N = 4
G_ant = 3 # dBi for beamforming mmWave antennas
# Override the antenna gain if no beamforming
if self.use_beamforming == False:
G_ant = self.G_ant_no_beamforming
# theta is the steering angle. Sampled iid from unif(0,pi).
theta = np.random.uniform(low=0, high=math.pi, size=self.Np)
is_mmWave = (self.f_c > 25e9)
if is_mmWave:
path_loss_LOS = 10 ** (self._path_loss_mmWave(x_ue, y_ue, PLE_L, x_bs, y_bs) / 10.)
path_loss_NLOS = 10 ** (self._path_loss_mmWave(x_ue, y_ue, PLE_N, x_bs, y_bs) / 10.)
else:
path_loss_LOS = 10 ** (self._path_loss_sub6(x_ue, y_ue, x_bs, y_bs) / 10.)
path_loss_NLOS = 10 ** (self._path_loss_sub6(x_ue, y_ue, x_bs, y_bs) / 10.)
# Bernoulli for p
alpha = np.zeros(self.Np, dtype=complex)
p = np.random.binomial(1, self.prob_LOS)
if (p == 1):
self.Np = 1
alpha[0] = 1. / math.sqrt(path_loss_LOS)
else:
## just changed alpha to be complex in the case of NLOS
alpha = (np.random.normal(size=self.Np) + 1j * np.random.normal(size=self.Np)) / math.sqrt(path_loss_NLOS)
rho = 1. * 10 ** (G_ant / 10.)
# initialize the channel as a complex variable.
h = np.zeros(self.M_ULA, dtype=complex)
for p in np.arange(self.Np):
a_theta = self._compute_bf_vector(theta[p])
h += alpha[p] / rho * a_theta.T # scalar multiplication into a vector
h *= math.sqrt(self.M_ULA)
# print ('Warning: channel gain is {} dB.'.format(10*np.log10(LA.norm(h, ord=2))))
return h
def _compute_rf(self, x_ue, y_ue, pt_bs1, pt_bs2, is_ue_2=False):
T = 290 # Kelvins
B = 15000 # Hz
k_Boltzmann = 1.38e-23
noise_power = k_Boltzmann*T*B # this is in Watts
if is_ue_2 == False:
# Without loss of generality, the base station is at the origin
# The interfering base station is x = cell_radius, y = 0
x_bs_1, y_bs_1 = self.x_bs_1, self.y_bs_1
x_bs_2, y_bs_2 = self.x_bs_2, self.y_bs_2
# Now the channel h, which is a vector in beamforming.
# This computes the channel for user in serving BS from the serving BS.
h_1 = self._compute_channel(x_ue, y_ue, x_bs=x_bs_1, y_bs=y_bs_1)
# This computes the channel for user in serving BS from the interfering BS.
h_2 = self._compute_channel(x_ue, y_ue, x_bs=x_bs_2, y_bs=y_bs_2)
# if this is not beamforming, there is no precoder:
if (self.use_beamforming):
received_power = pt_bs1 * abs(np.dot(h_1.conj(), self.F[:, self.f_n_bs1])) ** 2
interference_power = pt_bs2 * abs(np.dot(h_2.conj(), self.F[:, self.f_n_bs2])) ** 2
else: # the gain is ||h||^2
received_power = pt_bs1 * LA.norm(h_1, ord=2) ** 2
interference_power = pt_bs2 * LA.norm(h_2, ord=2) ** 2
else:
x_bs_1, y_bs_1 = self.x_bs_1, self.y_bs_1
x_bs_2, y_bs_2 = self.x_bs_2, self.y_bs_2
# Now the channel h, which is a vector in beamforming.
# This computes the channel for user in serving BS from the serving BS.
h_1 = self._compute_channel(x_ue, y_ue, x_bs=x_bs_2, y_bs=y_bs_2)
# This computes the channel for user in serving BS from the interfering BS.
h_2 = self._compute_channel(x_ue, y_ue, x_bs=x_bs_1, y_bs=y_bs_1)
# if this is not beamforming, there is no precoder:
if (self.use_beamforming):
received_power = pt_bs2 * abs(np.dot(h_1.conj(), self.F[:, self.f_n_bs2])) ** 2
interference_power = pt_bs1 * abs(np.dot(h_2.conj(), self.F[:, self.f_n_bs1])) ** 2
else: # the gain is ||h||^2
received_power = pt_bs2 * LA.norm(h_1, ord=2) ** 2
interference_power = pt_bs1 * LA.norm(h_2, ord=2) ** 2
interference_plus_noise_power = interference_power + noise_power
received_sinr = 10*np.log10(received_power / interference_plus_noise_power)
return [received_power, interference_power, received_sinr]
# https://ieeexplore-ieee-org.ezproxy.lib.utexas.edu/stamp/stamp.jsp?tp=&arnumber=7522613
def _path_loss_mmWave(self, x, y, PLE, x_bs=0, y_bs=0):
# These are the parameters for f = 28000 MHz.
c = 3e8 # speed of light
wavelength = c / self.f_c
A = 0.0671
Nr = self.M_ULA
sigma_sf = 9.1
#PLE = 3.812
d = math.sqrt((x - x_bs)**2 + (y - y_bs)**2) # in meters
fspl = 10 * np.log10(((4*math.pi*d) / wavelength) ** 2)
pl = fspl + 10 * np.log10(d ** PLE) * (1 - A*np.log2(Nr))
chi_sigma = np.random.normal(0, sigma_sf) # log-normal shadowing
L = pl + chi_sigma
return L # in dB
def _path_loss_sub6(self, x, y, x_bs=0, y_bs=0):
f_c = self.f_c
c = 3e8 # speed of light
d = math.sqrt((x - x_bs)**2 + (y - y_bs)**2)
h_B = 20
h_R = 1.5
# print('Distance from cell site is: {} km'.format(d/1000.))
# FSPL
L_fspl = -10*np.log10((4.*math.pi*c/f_c / d) ** 2)
# COST231
C = 3
a = (1.1 * np.log10(f_c/1e6) - 0.7)*h_R - (1.56*np.log10(f_c/1e6) - 0.8)
L_cost231 = 46.3 + 33.9 * np.log10(f_c/1e6) + 13.82 * np.log10(h_B) - a + (44.9 - 6.55 * np.log10(h_B)) * np.log10(d/1000.) + C
L = L_cost231
return L # in dB
| [
"[email protected]"
] | |
d68496f8b528022a5ce09200a302dc3c2569c5b0 | 2293c76c3d18e2fcd44ded90bd40113d26285663 | /pyeccodes/defs/grib2/tables/25/4_244_table.py | a1c940653667995441617ed12e107d0f12316077 | [
"Apache-2.0"
] | permissive | ecmwf/pyeccodes | b1f121dbddf68d176a03805ed5144ba0b37ac211 | dce2c72d3adcc0cb801731366be53327ce13a00b | refs/heads/master | 2022-04-23T10:37:40.524078 | 2020-04-18T06:30:29 | 2020-04-18T06:30:29 | 255,554,540 | 9 | 3 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | def load(h):
return ({'abbr': 0, 'code': 0, 'title': 'No quality information available'},
{'abbr': 1, 'code': 1, 'title': 'Failed'},
{'abbr': 2, 'code': 2, 'title': 'Passed'},
{'abbr': None, 'code': 255, 'title': 'Missing'})
| [
"[email protected]"
] | |
7139040a4e2346cd3caee6871414bf5ab8b75d35 | a861a32ffdb368a721ff9e9006268f3dffbecc71 | /app/firestore_service.py | 4f3db3becefbccfafb97fa57a3a2f5575971f14a | [
"MIT"
] | permissive | agomusa/flask-taskapp | 039450080f551ff4ba9a7796835543290f8583ce | 3c456b0dcfe8c30be44954b6c280d843653b8a3e | refs/heads/main | 2023-05-30T17:16:57.256763 | 2021-06-14T20:04:22 | 2021-06-14T20:04:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,188 | py | import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
credential = credentials.ApplicationDefault()
firebase_admin.initialize_app(credential)
db = firestore.client()
def get_users():
return db.collection("users").get()
def get_user(user_id):
return db.collection("users").document(user_id).get()
def user_put(user_data):
user_ref = db.collection("users").document(user_data.username)
user_ref.set({"password": user_data.password})
def get_todos(user_id):
return db.collection("users").document(user_id).collection("todos").get()
def put_todo(user_id, description):
todos_collection_ref = db.collection(
"users").document(user_id).collection("todos")
todos_collection_ref.add({"description": description, "done": False})
def delete_todo(user_id, todo_id):
todo_ref = _get_todo_ref(user_id, todo_id)
todo_ref.delete()
def update_todo(user_id, todo_id, done):
todo_done = not bool(done)
todo_ref = _get_todo_ref(user_id, todo_id)
todo_ref.update({"done": todo_done})
def _get_todo_ref(user_id, todo_id):
return db.document("users/{}/todos/{}".format(user_id, todo_id))
| [
"[email protected]"
] | |
97da63adc46cd749c1cb8b35727cfa54ba4e723b | b0cdbad299f6174bfdb0fba173dbcf3889b82209 | /Modules/os/gettextfiles.py | 198d01bcec7b9bcc12a7a7c25a9bdee70cf4e946 | [] | no_license | deesaw/PythonD-06 | a33e676f1e0cfc13b4ea645c8b60547b198239ac | 3c6f065d7be2e3e10cafb6cef79d6cae9d55a7fa | refs/heads/master | 2023-03-18T08:24:42.030935 | 2021-03-02T14:15:09 | 2021-03-02T14:15:09 | 343,797,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | import os
for (dirname, subdir, files) in os.walk('c:\\'):
for myfile in files:
if (myfile.endswith('.txt')):
print os.path.join(dirname,myfile) | [
"[email protected]"
] | |
6ca4acfa15bb6d2e30f65bb4f97c459c35411d48 | 8a1987a6c762d1440f7ce1b60e6dbb02491db9f1 | /huntserver/migrations/0022_switch_to_utf8mb4_columns.py | aa1812f0fc8a5510d0598d437bea1ade6056f017 | [
"MIT"
] | permissive | dlareau/puzzlehunt_server | fd9807f074cbdc95ad46730e931da86a54b78f45 | 44f87cc5cfe8bb23a8e04fddee187b9056407741 | refs/heads/master | 2022-12-27T18:48:43.883587 | 2021-08-17T23:55:36 | 2021-08-17T23:55:36 | 37,299,424 | 20 | 23 | MIT | 2022-05-22T00:16:21 | 2015-06-12T03:12:51 | Python | UTF-8 | Python | false | false | 6,041 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import sys
fwd_operations = [
'ALTER TABLE `auth_user` MODIFY `password` varchar(128) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `auth_user` MODIFY `username` varchar(30) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `auth_user` MODIFY `first_name` varchar(30) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `auth_user` MODIFY `last_name` varchar(30) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `auth_user` MODIFY `email` varchar(254) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `django_admin_log` MODIFY `object_id` longtext CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `django_admin_log` MODIFY `object_repr` varchar(200) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `django_admin_log` MODIFY `change_message` longtext CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_hunt` MODIFY `hunt_name` varchar(200) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_hunt` MODIFY `location` varchar(100) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_hunt` MODIFY `template` longtext CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_huntassetfile` MODIFY `file` varchar(100) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_message` MODIFY `text` varchar(400) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_person` MODIFY `phone` varchar(20) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_person` MODIFY `comments` varchar(400) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_person` MODIFY `allergies` varchar(400) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_puzzle` MODIFY `puzzle_name` varchar(200) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_puzzle` MODIFY `puzzle_id` varchar(8) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_puzzle` MODIFY `answer` varchar(100) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_puzzle` MODIFY `link` varchar(200) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_response` MODIFY `regex` varchar(400) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_response` MODIFY `text` varchar(400) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_submission` MODIFY `submission_text` varchar(100) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_submission` MODIFY `response_text` varchar(400) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_team` MODIFY `team_name` varchar(200) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_team` MODIFY `location` varchar(80) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_team` MODIFY `join_code` varchar(5) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_unlockable` MODIFY `content_type` varchar(3) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
'ALTER TABLE `huntserver_unlockable` MODIFY `content` varchar(500) CHARSET utf8mb4 COLLATE utf8mb4_unicode_ci ',
]
reverse_operations = [
'ALTER TABLE `auth_user` MODIFY `password` varchar(128) ',
'ALTER TABLE `auth_user` MODIFY `username` varchar(30) ',
'ALTER TABLE `auth_user` MODIFY `first_name` varchar(30) ',
'ALTER TABLE `auth_user` MODIFY `last_name` varchar(30) ',
'ALTER TABLE `auth_user` MODIFY `email` varchar(254) ',
'ALTER TABLE `django_admin_log` MODIFY `object_id` longtext ',
'ALTER TABLE `django_admin_log` MODIFY `object_repr` varchar(200) ',
'ALTER TABLE `django_admin_log` MODIFY `change_message` longtext ',
'ALTER TABLE `huntserver_hunt` MODIFY `hunt_name` varchar(200) ',
'ALTER TABLE `huntserver_hunt` MODIFY `location` varchar(100) ',
'ALTER TABLE `huntserver_hunt` MODIFY `template` longtext ',
'ALTER TABLE `huntserver_huntassetfile` MODIFY `file` varchar(100) ',
'ALTER TABLE `huntserver_message` MODIFY `text` varchar(400) ',
'ALTER TABLE `huntserver_person` MODIFY `phone` varchar(20) ',
'ALTER TABLE `huntserver_person` MODIFY `comments` varchar(400) ',
'ALTER TABLE `huntserver_person` MODIFY `allergies` varchar(400) ',
'ALTER TABLE `huntserver_puzzle` MODIFY `puzzle_name` varchar(200) ',
'ALTER TABLE `huntserver_puzzle` MODIFY `puzzle_id` varchar(8) ',
'ALTER TABLE `huntserver_puzzle` MODIFY `answer` varchar(100) ',
'ALTER TABLE `huntserver_puzzle` MODIFY `link` varchar(200) ',
'ALTER TABLE `huntserver_response` MODIFY `regex` varchar(400) ',
'ALTER TABLE `huntserver_response` MODIFY `text` varchar(400) ',
'ALTER TABLE `huntserver_submission` MODIFY `submission_text` varchar(100) ',
'ALTER TABLE `huntserver_submission` MODIFY `response_text` varchar(400) ',
'ALTER TABLE `huntserver_team` MODIFY `team_name` varchar(200) ',
'ALTER TABLE `huntserver_team` MODIFY `location` varchar(80) ',
'ALTER TABLE `huntserver_team` MODIFY `join_code` varchar(5) ',
'ALTER TABLE `huntserver_unlockable` MODIFY `content_type` varchar(3) ',
'ALTER TABLE `huntserver_unlockable` MODIFY `content` varchar(500) ',
]
def forwards(apps, schema_editor):
if not schema_editor.connection.vendor.startswith('mysql'):
return
for command in fwd_operations:
schema_editor.execute(command)
def backwards(apps, schema_editor):
if not schema_editor.connection.vendor.startswith('mysql'):
return
for command in fwd_operations:
schema_editor.execute(command)
class Migration(migrations.Migration):
dependencies = [
('huntserver', '0021_auto_20180402_2224'),
]
operations = [
migrations.RunPython(forwards, backwards, atomic=False)
]
| [
"[email protected]"
] | |
4f1f35cd3e6246382f9d1003ac1d2188b27d3137 | 14a913fce4b538b22f28409645cd6abe3455808f | /bigquery_storage/to_dataframe/main_test.py | 8335b437063c827cd6d43c4af690752455ae19dd | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | iamLoi/Python-Random-Number-Generator | 8da7dbd37cb13a01232c8ed49b9df35a99c63d73 | 7579e8b15130802aaf519979e475c6c75c403eda | refs/heads/master | 2022-08-29T19:05:32.649931 | 2019-09-14T14:48:58 | 2019-09-14T14:48:58 | 208,454,877 | 2 | 1 | Apache-2.0 | 2022-08-05T21:57:49 | 2019-09-14T14:51:05 | Python | UTF-8 | Python | false | false | 5,502 | py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
@pytest.fixture
def clients():
# [START bigquerystorage_pandas_tutorial_all]
# [START bigquerystorage_pandas_tutorial_create_client]
import google.auth
from google.cloud import bigquery
from google.cloud import bigquery_storage_v1beta1
# Explicitly create a credentials object. This allows you to use the same
# credentials for both the BigQuery and BigQuery Storage clients, avoiding
# unnecessary API calls to fetch duplicate authentication tokens.
credentials, your_project_id = google.auth.default(
scopes=["https://www.googleapis.com/auth/cloud-platform"]
)
# Make clients.
bqclient = bigquery.Client(
credentials=credentials,
project=your_project_id,
)
bqstorageclient = bigquery_storage_v1beta1.BigQueryStorageClient(
credentials=credentials
)
# [END bigquerystorage_pandas_tutorial_create_client]
# [END bigquerystorage_pandas_tutorial_all]
return bqclient, bqstorageclient
def test_table_to_dataframe(capsys, clients):
from google.cloud import bigquery
bqclient, bqstorageclient = clients
# [START bigquerystorage_pandas_tutorial_all]
# [START bigquerystorage_pandas_tutorial_read_table]
# Download a table.
table = bigquery.TableReference.from_string(
"bigquery-public-data.utility_us.country_code_iso"
)
rows = bqclient.list_rows(
table,
selected_fields=[
bigquery.SchemaField("country_name", "STRING"),
bigquery.SchemaField("fips_code", "STRING"),
],
)
dataframe = rows.to_dataframe(bqstorage_client=bqstorageclient)
print(dataframe.head())
# [END bigquerystorage_pandas_tutorial_read_table]
# [END bigquerystorage_pandas_tutorial_all]
out, _ = capsys.readouterr()
assert "country_name" in out
def test_query_to_dataframe(capsys, clients):
bqclient, bqstorageclient = clients
# [START bigquerystorage_pandas_tutorial_all]
# [START bigquerystorage_pandas_tutorial_read_query_results]
# Download query results.
query_string = """
SELECT
CONCAT(
'https://stackoverflow.com/questions/',
CAST(id as STRING)) as url,
view_count
FROM `bigquery-public-data.stackoverflow.posts_questions`
WHERE tags like '%google-bigquery%'
ORDER BY view_count DESC
"""
dataframe = (
bqclient.query(query_string)
.result()
.to_dataframe(bqstorage_client=bqstorageclient)
)
print(dataframe.head())
# [END bigquerystorage_pandas_tutorial_read_query_results]
# [END bigquerystorage_pandas_tutorial_all]
out, _ = capsys.readouterr()
assert "stackoverflow" in out
def test_session_to_dataframe(capsys, clients):
from google.cloud import bigquery_storage_v1beta1
bqclient, bqstorageclient = clients
your_project_id = bqclient.project
# [START bigquerystorage_pandas_tutorial_all]
# [START bigquerystorage_pandas_tutorial_read_session]
table = bigquery_storage_v1beta1.types.TableReference()
table.project_id = "bigquery-public-data"
table.dataset_id = "new_york_trees"
table.table_id = "tree_species"
# Select columns to read with read options. If no read options are
# specified, the whole table is read.
read_options = bigquery_storage_v1beta1.types.TableReadOptions()
read_options.selected_fields.append("species_common_name")
read_options.selected_fields.append("fall_color")
parent = "projects/{}".format(your_project_id)
session = bqstorageclient.create_read_session(
table,
parent,
read_options=read_options,
# This API can also deliver data serialized in Apache Avro format.
# This example leverages Apache Arrow.
format_=bigquery_storage_v1beta1.enums.DataFormat.ARROW,
# We use a LIQUID strategy in this example because we only read from a
# single stream. Consider BALANCED if you're consuming multiple streams
# concurrently and want more consistent stream sizes.
sharding_strategy=(
bigquery_storage_v1beta1.enums.ShardingStrategy.LIQUID
),
)
# This example reads from only a single stream. Read from multiple streams
# to fetch data faster. Note that the session may not contain any streams
# if there are no rows to read.
stream = session.streams[0]
position = bigquery_storage_v1beta1.types.StreamPosition(stream=stream)
reader = bqstorageclient.read_rows(position)
# Parse all Avro blocks and create a dataframe. This call requires a
# session, because the session contains the schema for the row blocks.
dataframe = reader.to_dataframe(session)
print(dataframe.head())
# [END bigquerystorage_pandas_tutorial_read_session]
# [END bigquerystorage_pandas_tutorial_all]
out, _ = capsys.readouterr()
assert "species_common_name" in out
| [
"[email protected]"
] | |
e6313fe1285bba0a56b05db426f9c6c6861bde1e | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa3/sample/op_cmp_bool-94.py | 6c2f718b85e4de8c6dbed4cf3746b6a697bd5dc4 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | print(True == True)
print(True == False)
print(False == True)
print(False == False)
print(True != True)
print(True != False)
print(False != True)
print($Exp)
| [
"[email protected]"
] | |
369376d38046b8a203a95ecd37ed2f1dfe746ebb | c585583d366924d8977462035b631161094241a9 | /redbot/message/headers/_header.tpl | 31561e68d04690bc85b5ca29ebfd1dc659b8138a | [
"MIT"
] | permissive | QPC-database/redbot | a289248bbb24f8cc378001e38cb633e6f1aff098 | f05dd7754cd6f6ba005ae44beeb8ed21516a93c8 | refs/heads/main | 2023-05-07T00:13:59.733511 | 2021-05-28T02:42:02 | 2021-05-28T02:42:02 | 382,878,629 | 1 | 0 | MIT | 2021-07-04T14:57:42 | 2021-07-04T14:57:42 | null | UTF-8 | Python | false | false | 875 | tpl | #!/usr/bin/env python
from redbot.message import headers
from redbot.speak import Note, categories, levels
from redbot.type import AddNoteMethodType
class SHORT_NAME(headers.HttpHeader):
canonical_name = "SHORT_NAME"
description = """\
FIXME
"""
reference = None
syntax = False
list_header = False
deprecated = False
valid_in_requests = False
valid_in_responses = True
def parse(self, field_value: str, add_note: AddNoteMethodType) -> ...:
return field_value
def evaluate(self, add_note: AddNoteMethodType) -> None:
return
class SHORT_NAME_NOTE(Note):
category = categories.GENERAL
level = levels.INFO
summary = "FIXME"
text = """\
FIXME"""
class SHORT_NAMETest(headers.HeaderTest):
name = 'SHORT_NAME'
inputs = ['FIXME']
expected_out = ('FIXME')
expected_err = [] # type: ignore | [
"[email protected]"
] | |
8b702529300a28ebc932d16a695f5311094c469d | 48ffde5f19dce603afb5caffe2e71d752c526a52 | /tests/PyroTests/test_serialize.py | c740011d7425daf3732284e6fc5e82dcd0d82f30 | [
"MIT"
] | permissive | pevogam/Pyro4 | 704b5aec18e1ade7457830d1c7fcc406c4d464f5 | 4d009f6a111c071d22c21e1ab7ba43c5e9310b56 | refs/heads/master | 2020-03-25T07:46:31.978715 | 2018-08-04T12:24:51 | 2018-08-04T12:24:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,946 | py | """
Tests for the data serializer.
Pyro - Python Remote Objects. Copyright by Irmen de Jong ([email protected]).
"""
import array
import sys
import collections
import copy
import pprint
import pickle
import base64
import unittest
import serpent
import math
import uuid
import Pyro4.util
import Pyro4.errors
import Pyro4.core
import Pyro4.futures
from Pyro4.configuration import config
from testsupport import *
class SerializeTests_pickle(unittest.TestCase):
SERIALIZER = "pickle"
def setUp(self):
self.previous_serializer = config.SERIALIZER
config.SERIALIZER = self.SERIALIZER
self.ser = Pyro4.util.get_serializer(config.SERIALIZER)
config.REQUIRE_EXPOSE = True
def tearDown(self):
config.SERIALIZER = self.previous_serializer
def testSerItself(self):
s = Pyro4.util.get_serializer(config.SERIALIZER)
p, _ = self.ser.serializeData(s)
s2 = self.ser.deserializeData(p)
self.assertEqual(s, s2)
self.assertTrue(s == s2)
self.assertFalse(s != s2)
def testSerUnicode(self):
data = unicode("x")
self.ser.serializeData(data)
self.ser.serializeCall(data, unicode("method"), [], {})
def testSerCompression(self):
d1, c1 = self.ser.serializeData("small data", compress=True)
d2, c2 = self.ser.serializeData("small data", compress=False)
self.assertFalse(c1)
self.assertEqual(d1, d2)
bigdata = "x" * 1000
d1, c1 = self.ser.serializeData(bigdata, compress=False)
d2, c2 = self.ser.serializeData(bigdata, compress=True)
self.assertFalse(c1)
self.assertTrue(c2)
self.assertTrue(len(d2) < len(d1))
self.assertEqual(bigdata, self.ser.deserializeData(d1, compressed=False))
self.assertEqual(bigdata, self.ser.deserializeData(d2, compressed=True))
def testSerErrors(self):
e1 = Pyro4.errors.NamingError(unicode("x"))
e1._pyroTraceback = ["this is the remote traceback"]
orig_e = copy.copy(e1)
e2 = Pyro4.errors.PyroError(unicode("x"))
e3 = Pyro4.errors.ProtocolError(unicode("x"))
if sys.platform == "cli":
Pyro4.util.fixIronPythonExceptionForPickle(e1, True)
p, _ = self.ser.serializeData(e1)
e = self.ser.deserializeData(p)
if sys.platform == "cli":
Pyro4.util.fixIronPythonExceptionForPickle(e, False)
self.assertIsInstance(e, Pyro4.errors.NamingError)
self.assertEqual(repr(orig_e), repr(e))
self.assertEqual(["this is the remote traceback"], e._pyroTraceback, "remote traceback info should be present")
p, _ = self.ser.serializeData(e2)
e = self.ser.deserializeData(p)
self.assertIsInstance(e, Pyro4.errors.PyroError)
self.assertEqual(repr(e2), repr(e))
p, _ = self.ser.serializeData(e3)
e = self.ser.deserializeData(p)
self.assertIsInstance(e, Pyro4.errors.ProtocolError)
self.assertEqual(repr(e3), repr(e))
def testSerializeExceptionWithAttr(self):
ex = ZeroDivisionError("test error")
ex._pyroTraceback = ["test traceback payload"]
Pyro4.util.fixIronPythonExceptionForPickle(ex, True) # hack for ironpython
data, compressed = self.ser.serializeData(ex)
ex2 = self.ser.deserializeData(data, compressed)
Pyro4.util.fixIronPythonExceptionForPickle(ex2, False) # hack for ironpython
self.assertEqual(ZeroDivisionError, type(ex2))
self.assertTrue(hasattr(ex2, "_pyroTraceback"))
self.assertEqual(["test traceback payload"], ex2._pyroTraceback)
def testSerCoreOffline(self):
uri = Pyro4.core.URI("PYRO:[email protected]:4444")
p, _ = self.ser.serializeData(uri)
uri2 = self.ser.deserializeData(p)
self.assertEqual(uri, uri2)
self.assertEqual("PYRO", uri2.protocol)
self.assertEqual("9999", uri2.object)
self.assertEqual("host.com:4444", uri2.location)
self.assertEqual(4444, uri2.port)
self.assertIsNone(uri2.sockname)
uri = Pyro4.core.URI("PYRO:12345@./u:/tmp/socketname")
p, _ = self.ser.serializeData(uri)
uri2 = self.ser.deserializeData(p)
self.assertEqual(uri, uri2)
self.assertEqual("PYRO", uri2.protocol)
self.assertEqual("12345", uri2.object)
self.assertEqual("./u:/tmp/socketname", uri2.location)
self.assertIsNone(uri2.port)
self.assertEqual("/tmp/socketname", uri2.sockname)
proxy = Pyro4.core.Proxy("PYRO:[email protected]:4444")
proxy._pyroTimeout = 42
proxy._pyroMaxRetries = 78
self.assertIsNone(proxy._pyroConnection)
p, _ = self.ser.serializeData(proxy)
proxy2 = self.ser.deserializeData(p)
self.assertIsNone(proxy._pyroConnection)
self.assertIsNone(proxy2._pyroConnection)
self.assertEqual(proxy2._pyroUri, proxy._pyroUri)
self.assertEqual(0, proxy2._pyroTimeout, "must be reset to defaults")
self.assertEqual(0, proxy2._pyroMaxRetries, "must be reset to defaults")
def testNested(self):
if self.SERIALIZER == "marshal":
self.skipTest("marshal can't serialize custom objects")
uri1 = Pyro4.core.URI("PYRO:[email protected]:111")
uri2 = Pyro4.core.URI("PYRO:[email protected]:222")
_ = self.ser.serializeData(uri1)
data = [uri1, uri2]
p, _ = self.ser.serializeData(data)
[u1, u2] = self.ser.deserializeData(p)
self.assertEqual(uri1, u1)
self.assertEqual(uri2, u2)
def testSerDaemonHack(self):
# This tests the hack that a Daemon should be serializable,
# but only to support serializing Pyro objects.
# The serialized form of a Daemon should be empty (and thus, useless)
with Pyro4.core.Daemon(port=0) as daemon:
d, _ = self.ser.serializeData(daemon)
d2 = self.ser.deserializeData(d)
self.assertTrue(len(d2.__dict__) == 0, "deserialized daemon should be empty")
self.assertTrue("Pyro4.core.Daemon" in repr(d2))
self.assertTrue("unusable" in repr(d2))
try:
config.AUTOPROXY = False
obj = pprint.PrettyPrinter(stream="dummy", width=42)
obj.name = "hello"
daemon.register(obj)
o, _ = self.ser.serializeData(obj)
if self.SERIALIZER in ("pickle", "cloudpickle", "dill"):
# only pickle, cloudpickle and dill can deserialize the PrettyPrinter class without the need of explicit deserialization function
o2 = self.ser.deserializeData(o)
self.assertEqual("hello", o2.name)
self.assertEqual(42, o2._width)
finally:
config.AUTOPROXY = True
def testPyroClasses(self):
uri = Pyro4.core.URI("PYRO:object@host:4444")
s, c = self.ser.serializeData(uri)
x = self.ser.deserializeData(s, c)
self.assertIsInstance(x, Pyro4.core.URI)
self.assertEqual(uri, x)
self.assertTrue("Pyro4.core.URI" in repr(uri))
self.assertEqual("PYRO:object@host:4444", str(uri))
uri = Pyro4.core.URI("PYRO:12345@./u:/tmp/socketname")
s, c = self.ser.serializeData(uri)
x = self.ser.deserializeData(s, c)
self.assertIsInstance(x, Pyro4.core.URI)
self.assertEqual(uri, x)
proxy = Pyro4.core.Proxy(uri)
proxy._pyroAttrs = set("abc")
proxy._pyroMethods = set("def")
proxy._pyroOneway = set("ghi")
proxy._pyroTimeout = 42
proxy._pyroHmacKey = b"secret"
proxy._pyroHandshake = "apples"
proxy._pyroMaxRetries = 78
proxy._pyroSerializer = "serializer"
s, c = self.ser.serializeData(proxy)
x = self.ser.deserializeData(s, c)
self.assertIsInstance(x, Pyro4.core.Proxy)
self.assertEqual(proxy._pyroUri, x._pyroUri)
self.assertEqual(set("abc"), x._pyroAttrs)
self.assertEqual(set("def"), x._pyroMethods)
self.assertEqual(set("ghi"), x._pyroOneway)
self.assertEqual(b"secret", x._pyroHmacKey)
self.assertEqual("apples", x._pyroHandshake)
self.assertEqual("serializer", x._pyroSerializer)
self.assertEqual(0, x._pyroTimeout, "must be reset to defaults")
self.assertEqual(0, x._pyroMaxRetries, "must be reset to defaults")
self.assertTrue("Pyro4.core.Proxy" in repr(x))
self.assertTrue("Pyro4.core.Proxy" in str(x))
daemon = Pyro4.core.Daemon()
s, c = self.ser.serializeData(daemon)
x = self.ser.deserializeData(s, c)
self.assertIsInstance(x, Pyro4.core.Daemon)
self.assertTrue("Pyro4.core.Daemon" in repr(x))
self.assertTrue("unusable" in repr(x))
self.assertTrue("Pyro4.core.Daemon" in str(x))
self.assertTrue("unusable" in str(x))
wrapper = Pyro4.futures._ExceptionWrapper(ZeroDivisionError("divided by zero"))
s, c = self.ser.serializeData(wrapper)
x = self.ser.deserializeData(s, c)
self.assertIsInstance(x, Pyro4.futures._ExceptionWrapper)
self.assertEqual("divided by zero", str(x.exception))
self.assertTrue("ExceptionWrapper" in repr(x))
self.assertTrue("ExceptionWrapper" in str(x))
def testPyroClassesForDict(self):
uri = Pyro4.core.URI("PYRO:object@host:4444")
state = uri.__getstate_for_dict__()
self.assertEqual(('PYRO', 'object', None, 'host', 4444), state)
uri2 = Pyro4.core.URI("PYRONAME:xxx")
uri2.__setstate_from_dict__(state)
self.assertEqual(uri, uri2)
proxy = Pyro4.core.Proxy(uri)
proxy._pyroAttrs = set("abc")
proxy._pyroMethods = set("def")
proxy._pyroOneway = set("ghi")
proxy._pyroTimeout = 42
proxy._pyroHmacKey = b"secret"
proxy._pyroHandshake = "apples"
proxy._pyroMaxRetries = 78
proxy._pyroSerializer = "serializer"
state = proxy.__getstate_for_dict__()
b64_secret = "b64:"+base64.b64encode(b"secret").decode("utf-8")
self.assertEqual(('PYRO:object@host:4444', tuple(set("ghi")), tuple(set("def")), tuple(set("abc")), 42, b64_secret, "apples", 78, "serializer"), state)
proxy2 = Pyro4.core.Proxy("PYRONAME:xxx")
proxy2.__setstate_from_dict__(state)
self.assertEqual(proxy, proxy2)
self.assertEqual(proxy._pyroUri, proxy2._pyroUri)
self.assertEqual(proxy._pyroAttrs, proxy2._pyroAttrs)
self.assertEqual(proxy._pyroMethods, proxy2._pyroMethods)
self.assertEqual(proxy._pyroOneway, proxy2._pyroOneway)
self.assertEqual(proxy._pyroHmacKey, proxy2._pyroHmacKey)
self.assertEqual(proxy._pyroHandshake, proxy2._pyroHandshake)
self.assertEqual(proxy._pyroSerializer, proxy2._pyroSerializer)
self.assertEqual(0, proxy2._pyroTimeout, "must be reset to defaults")
self.assertEqual(0, proxy2._pyroMaxRetries, "must be reset to defaults")
daemon = Pyro4.core.Daemon()
state = daemon.__getstate_for_dict__()
self.assertEqual(tuple(), state)
daemon2 = Pyro4.core.Daemon()
daemon2.__setstate_from_dict__(state)
def testProxySerializationCompat(self):
proxy = Pyro4.core.Proxy("PYRO:object@host:4444")
proxy._pyroSerializer = "serializer"
pickle_state = proxy.__getstate__()
self.assertEqual(9, len(pickle_state))
pickle_state = pickle_state[:8]
proxy.__setstate__(pickle_state)
self.assertIsNone(proxy._pyroSerializer)
proxy._pyroSerializer = "serializer"
serpent_state = proxy.__getstate_for_dict__()
self.assertEqual(9, len(serpent_state))
serpent_state = serpent_state[:8]
proxy.__setstate_from_dict__(serpent_state)
self.assertIsNone(proxy._pyroSerializer)
def testAutoProxyPartlyExposed(self):
if self.SERIALIZER == "marshal":
self.skipTest("marshal can't serialize custom objects")
self.ser.register_type_replacement(MyThingPartlyExposed, Pyro4.core.pyroObjectToAutoProxy)
t1 = MyThingPartlyExposed("1")
t2 = MyThingPartlyExposed("2")
with Pyro4.core.Daemon() as d:
d.register(t1, "thingy1")
d.register(t2, "thingy2")
data = [t1, ["apple", t2]]
s, c = self.ser.serializeData(data)
data = self.ser.deserializeData(s, c)
self.assertEqual("apple", data[1][0])
p1 = data[0]
p2 = data[1][1]
self.assertIsInstance(p1, Pyro4.core.Proxy)
self.assertIsInstance(p2, Pyro4.core.Proxy)
self.assertEqual("thingy1", p1._pyroUri.object)
self.assertEqual("thingy2", p2._pyroUri.object)
self.assertEqual({"prop1", "readonly_prop1"}, p1._pyroAttrs)
self.assertEqual({"exposed", "oneway"}, p1._pyroMethods)
self.assertEqual({'oneway'}, p1._pyroOneway)
def testAutoProxyFullExposed(self):
if self.SERIALIZER == "marshal":
self.skipTest("marshal can't serialize custom objects")
self.ser.register_type_replacement(MyThingPartlyExposed, Pyro4.core.pyroObjectToAutoProxy)
t1 = MyThingFullExposed("1")
t2 = MyThingFullExposed("2")
with Pyro4.core.Daemon() as d:
d.register(t1, "thingy1")
d.register(t2, "thingy2")
data = [t1, ["apple", t2]]
s, c = self.ser.serializeData(data)
data = self.ser.deserializeData(s, c)
self.assertEqual("apple", data[1][0])
p1 = data[0]
p2 = data[1][1]
self.assertIsInstance(p1, Pyro4.core.Proxy)
self.assertIsInstance(p2, Pyro4.core.Proxy)
self.assertEqual("thingy1", p1._pyroUri.object)
self.assertEqual("thingy2", p2._pyroUri.object)
self.assertEqual({"prop1", "prop2", "readonly_prop1"}, p1._pyroAttrs)
self.assertEqual({'classmethod', 'method', 'oneway', 'staticmethod', 'exposed', "__dunder__"}, p1._pyroMethods)
self.assertEqual({'oneway'}, p1._pyroOneway)
def testRegisterTypeReplacementSanity(self):
if self.SERIALIZER == "marshal":
self.skipTest("marshal can't serialize custom objects")
self.ser.register_type_replacement(int, lambda: None)
with self.assertRaises(ValueError):
self.ser.register_type_replacement(type, lambda: None)
with self.assertRaises(ValueError):
self.ser.register_type_replacement(42, lambda: None)
def testCustomClassFail(self):
if self.SERIALIZER in ("pickle", "cloudpickle", "dill"):
self.skipTest("pickle, cloudpickle and dill simply serialize custom classes")
o = pprint.PrettyPrinter(stream="dummy", width=42)
s, c = self.ser.serializeData(o)
try:
_ = self.ser.deserializeData(s, c)
self.fail("error expected, shouldn't deserialize unknown class")
except Pyro4.errors.ProtocolError:
pass
def testCustomClassOk(self):
if self.SERIALIZER in ("pickle", "cloudpickle", "dill"):
self.skipTest("pickle, cloudpickle and dill simply serialize custom classes just fine")
o = MyThingPartlyExposed("test")
Pyro4.util.SerializerBase.register_class_to_dict(MyThingPartlyExposed, mything_dict)
Pyro4.util.SerializerBase.register_dict_to_class("CUSTOM-Mythingymabob", mything_creator)
s, c = self.ser.serializeData(o)
o2 = self.ser.deserializeData(s, c)
self.assertIsInstance(o2, MyThingPartlyExposed)
self.assertEqual("test", o2.name)
# unregister the deserializer
Pyro4.util.SerializerBase.unregister_dict_to_class("CUSTOM-Mythingymabob")
try:
self.ser.deserializeData(s, c)
self.fail("must fail")
except Pyro4.errors.ProtocolError:
pass # ok
# unregister the serializer
Pyro4.util.SerializerBase.unregister_class_to_dict(MyThingPartlyExposed)
s, c = self.ser.serializeData(o)
try:
self.ser.deserializeData(s, c)
self.fail("must fail")
except Pyro4.errors.SerializeError as x:
msg = str(x)
self.assertIn(msg, ["unsupported serialized class: testsupport.MyThingPartlyExposed",
"unsupported serialized class: PyroTests.testsupport.MyThingPartlyExposed"])
def testData(self):
data = [42, "hello"]
ser, compressed = self.ser.serializeData(data)
self.assertFalse(compressed)
data2 = self.ser.deserializeData(ser, compressed=False)
self.assertEqual(data, data2)
def testUnicodeData(self):
data = u"euro\u20aclowbytes\u0000\u0001\u007f\u0080\u00ff"
ser, compressed = self.ser.serializeData(data)
data2 = self.ser.deserializeData(ser, compressed=compressed)
self.assertEqual(data, data2)
def testUUID(self):
data = uuid.uuid1()
ser, compressed = self.ser.serializeData(data)
data2 = self.ser.deserializeData(ser, compressed=compressed)
uuid_as_str = str(data)
self.assertTrue(data2==data or data2==uuid_as_str)
def testSet(self):
data = {111, 222, 333}
ser, compressed = self.ser.serializeData(data)
data2 = self.ser.deserializeData(ser, compressed=compressed)
self.assertEqual(data, data2)
def testCircular(self):
data = [42, "hello", Pyro4.core.Proxy("PYRO:dummy@dummy:4444")]
data.append(data)
ser, compressed = self.ser.serializeData(data)
data2 = self.ser.deserializeData(ser, compressed)
self.assertIs(data2, data2[3])
self.assertEqual(42, data2[0])
def testCallPlain(self):
ser, compressed = self.ser.serializeCall("object", "method", ("vargs1", "vargs2"), {"kwargs": 999})
self.assertFalse(compressed)
obj, method, vargs, kwargs = self.ser.deserializeCall(ser, compressed=False)
self.assertEqual("object", obj)
self.assertEqual("method", method)
self.assertTrue(len(vargs) == 2)
self.assertTrue(vargs[0] == "vargs1")
self.assertTrue(vargs[1] == "vargs2")
self.assertDictEqual({"kwargs": 999}, kwargs)
def testCallPyroObjAsArg(self):
if self.SERIALIZER == "marshal":
self.skipTest("marshal can't serialize custom objects")
uri = Pyro4.core.URI("PYRO:555@localhost:80")
ser, compressed = self.ser.serializeCall("object", "method", [uri], {"thing": uri})
self.assertFalse(compressed)
obj, method, vargs, kwargs = self.ser.deserializeCall(ser, compressed=False)
self.assertEqual("object", obj)
self.assertEqual("method", method)
self.assertEqual([uri], vargs)
self.assertEqual({"thing": uri}, kwargs)
def testCallCustomObjAsArg(self):
if self.SERIALIZER == "marshal":
self.skipTest("marshal can't serialize custom objects")
e = ZeroDivisionError("hello")
ser, compressed = self.ser.serializeCall("object", "method", [e], {"thing": e})
self.assertFalse(compressed)
obj, method, vargs, kwargs = self.ser.deserializeCall(ser, compressed=False)
self.assertEqual("object", obj)
self.assertEqual("method", method)
self.assertIsInstance(vargs, list)
self.assertIsInstance(vargs[0], ZeroDivisionError)
self.assertEqual("hello", str(vargs[0]))
self.assertIsInstance(kwargs["thing"], ZeroDivisionError)
self.assertEqual("hello", str(kwargs["thing"]))
def testSerializeException(self):
e = ZeroDivisionError()
d, c = self.ser.serializeData(e)
e2 = self.ser.deserializeData(d, c)
self.assertIsInstance(e2, ZeroDivisionError)
self.assertEqual("", str(e2))
e = ZeroDivisionError("hello")
d, c = self.ser.serializeData(e)
e2 = self.ser.deserializeData(d, c)
self.assertIsInstance(e2, ZeroDivisionError)
self.assertEqual("hello", str(e2))
e = ZeroDivisionError("hello", 42)
d, c = self.ser.serializeData(e)
e2 = self.ser.deserializeData(d, c)
self.assertIsInstance(e2, ZeroDivisionError)
self.assertIn(str(e2), ("('hello', 42)", "(u'hello', 42)"))
e.custom_attribute = 999
if sys.platform == "cli":
Pyro4.util.fixIronPythonExceptionForPickle(e, True)
ser, compressed = self.ser.serializeData(e)
e2 = self.ser.deserializeData(ser, compressed)
if sys.platform == "cli":
Pyro4.util.fixIronPythonExceptionForPickle(e2, False)
self.assertIsInstance(e2, ZeroDivisionError)
self.assertIn(str(e2), ("('hello', 42)", "(u'hello', 42)"))
self.assertEqual(999, e2.custom_attribute)
def testSerializeSpecialException(self):
self.assertIn("GeneratorExit", Pyro4.util.all_exceptions)
e = GeneratorExit()
d, c = self.ser.serializeData(e)
e2 = self.ser.deserializeData(d, c)
self.assertIsInstance(e2, GeneratorExit)
def testRecreateClasses(self):
self.assertEqual([1, 2, 3], self.ser.recreate_classes([1, 2, 3]))
d = {"__class__": "invalid"}
try:
self.ser.recreate_classes(d)
self.fail("error expected")
except Pyro4.errors.ProtocolError:
pass # ok
d = {"__class__": "Pyro4.core.URI", "state": ['PYRO', '555', None, 'localhost', 80]}
uri = self.ser.recreate_classes(d)
self.assertEqual(Pyro4.core.URI("PYRO:555@localhost:80"), uri)
number, uri = self.ser.recreate_classes([1, {"uri": d}])
self.assertEqual(1, number)
self.assertEqual(Pyro4.core.URI("PYRO:555@localhost:80"), uri["uri"])
def testProtocolVersion(self):
self.assertGreaterEqual(config.PICKLE_PROTOCOL_VERSION, 2)
self.assertEqual(pickle.HIGHEST_PROTOCOL, config.PICKLE_PROTOCOL_VERSION)
def testUriSerializationWithoutSlots(self):
orig_protocol = config.PICKLE_PROTOCOL_VERSION
config.PICKLE_PROTOCOL_VERSION = 2
try:
u = Pyro4.core.URI("PYRO:obj@localhost:1234")
d, compr = self.ser.serializeData(u)
self.assertFalse(compr)
import pickletools
d = pickletools.optimize(d)
result1 = b'\x80\x02cPyro4.core\nURI\n)\x81(U\x04PYROU\x03objNU\tlocalhostM\xd2\x04tb.'
result2 = b'\x80\x02cPyro4.core\nURI\n)\x81(X\x04\x00\x00\x00PYROX\x03\x00\x00\x00objNX\t\x00\x00\x00localhostM\xd2\x04tb.'
self.assertTrue(d in (result1, result2))
finally:
config.PICKLE_PROTOCOL_VERSION = orig_protocol
def testFloatPrecision(self):
f1 = 1482514078.54635912345
f2 = 9876543212345.12345678987654321
f3 = 11223344.556677889988776655e33
floats = [f1, f2, f3]
d, compr = self.ser.serializeData(floats)
v = self.ser.deserializeData(d, compr)
self.assertEqual(floats, v, "float precision must not be compromised in any serializer")
def testSourceByteTypes_deserialize(self):
# uncompressed
call_ser, _ = self.ser.serializeCall("object", "method", [1, 2, 3], {"kwarg": 42}, False)
ser, _ = self.ser.serializeData([4, 5, 6], False)
_, _, vargs, _ = self.ser.deserializeCall(bytearray(call_ser), False)
self.assertEqual([1, 2, 3], vargs)
d = self.ser.deserializeData(bytearray(ser), False)
self.assertEqual([4, 5, 6], d)
if sys.version_info < (3, 0):
_, _, vargs, _ = self.ser.deserializeCall(buffer(call_ser), False)
self.assertEqual([1, 2, 3], vargs)
d = self.ser.deserializeData(buffer(ser), False)
self.assertEqual([4, 5, 6], d)
# compressed
call_ser, _ = self.ser.serializeCall("object", "method", [1, 2, 3]*100, {"kwarg": 42}, True)
ser, _ = self.ser.serializeData([4, 5, 6]*100, True)
_, _, vargs, _ = self.ser.deserializeCall(bytearray(call_ser), True)
self.assertEqual(300, len(vargs))
d = self.ser.deserializeData(bytearray(ser), True)
self.assertEqual(300, len(d))
if sys.version_info < (3, 0):
_, _, vargs, _ = self.ser.deserializeCall(buffer(call_ser), True)
self.assertEqual(300, len(vargs))
d = self.ser.deserializeData(buffer(ser), True)
self.assertEqual(300, len(d))
@unittest.skipIf(sys.platform == "cli", "ironpython can't properly create memoryviews from serialized data")
def testSourceByteTypes_deserialize_memoryview(self):
# uncompressed
call_ser, _ = self.ser.serializeCall("object", "method", [1, 2, 3], {"kwarg": 42}, False)
ser, _ = self.ser.serializeData([4, 5, 6], False)
_, _, vargs, _ = self.ser.deserializeCall(memoryview(call_ser), False)
self.assertEqual([1, 2, 3], vargs)
d = self.ser.deserializeData(memoryview(ser), False)
self.assertEqual([4, 5, 6], d)
# compressed
call_ser, _ = self.ser.serializeCall("object", "method", [1, 2, 3]*100, {"kwarg": 42}, True)
ser, _ = self.ser.serializeData([4, 5, 6]*100, True)
_, _, vargs, _ = self.ser.deserializeCall(memoryview(call_ser), True)
self.assertEqual(300, len(vargs))
d = self.ser.deserializeData(memoryview(ser), True)
self.assertEqual(300, len(d))
def testSourceByteTypes_loads(self):
call_ser, _ = self.ser.serializeCall("object", "method", [1, 2, 3], {"kwarg": 42}, False)
ser, _ = self.ser.serializeData([4, 5, 6], False)
_, _, vargs, _ = self.ser.loadsCall(bytearray(call_ser))
self.assertEqual([1, 2, 3], vargs)
d = self.ser.loads(bytearray(ser))
self.assertEqual([4, 5, 6], d)
if sys.version_info < (3, 0):
_, _, vargs, _ = self.ser.loadsCall(buffer(call_ser))
self.assertEqual([1, 2, 3], vargs)
d = self.ser.loads(buffer(ser))
self.assertEqual([4, 5, 6], d)
@unittest.skipIf(sys.platform == "cli", "ironpython can't properly create memoryviews from serialized data")
def testSourceByteTypes_loads_memoryview(self):
call_ser, _ = self.ser.serializeCall("object", "method", [1, 2, 3], {"kwarg": 42}, False)
ser, _ = self.ser.serializeData([4, 5, 6], False)
_, _, vargs, _ = self.ser.loadsCall(memoryview(call_ser))
self.assertEqual([1, 2, 3], vargs)
d = self.ser.loads(memoryview(ser))
self.assertEqual([4, 5, 6], d)
def testSerializeDumpsAndDumpsCall(self):
self.ser.dumps(uuid.uuid4())
self.ser.dumps(Pyro4.URI("PYRO:test@test:4444"))
self.ser.dumps(Pyro4.Proxy("PYRONAME:foobar"))
self.ser.dumpsCall("obj", "method", (1, 2, 3), {"arg1": 999})
self.ser.dumpsCall("obj", "method", (1, 2, array.array('i', [1, 2, 3])), {"arg1": 999})
self.ser.dumpsCall("obj", "method", (1, 2, array.array('i', [1, 2, 3])), {"arg1": array.array('i', [1, 2, 3])})
self.ser.dumpsCall("obj", "method", (1, 2, Pyro4.URI("PYRO:test@test:4444")), {"arg1": 999})
self.ser.dumpsCall("obj", "method", (1, 2, Pyro4.URI("PYRO:test@test:4444")), {"arg1": Pyro4.URI("PYRO:test@test:4444")})
self.ser.dumpsCall("obj", "method", (1, 2, Pyro4.Proxy("PYRONAME:foobar")), {"arg1": 999})
self.ser.dumpsCall("obj", "method", (1, 2, Pyro4.Proxy("PYRONAME:foobar")), {"arg1": Pyro4.Proxy("PYRONAME:foobar")})
class SerializeTests_cloudpickle(SerializeTests_pickle):
SERIALIZER = "cloudpickle"
@unittest.skip('not implemented')
def testUriSerializationWithoutSlots(self):
pass
def testSerializeLambda(self):
l = lambda x: x * x
ser, compressed = self.ser.serializeData(l)
l2 = self.ser.deserializeData(ser, compressed=compressed)
self.assertEqual(l2(3.), 9.)
def testSerializeLocalFunction(self):
def f(x):
return x * x
ser, compressed = self.ser.serializeData(f)
f2 = self.ser.deserializeData(ser, compressed=compressed)
self.assertEqual(f2(3.), 9.)
is_ironpython_without_dill = False
try:
import dill
except ImportError:
if sys.platform == "cli":
is_ironpython_without_dill = True
@unittest.skipIf(is_ironpython_without_dill, "dill with ironpython has issues so it's fine if we don't test this")
class SerializeTests_dill(SerializeTests_pickle):
SERIALIZER = "dill"
def testProtocolVersion(self):
import dill
self.assertEqual(dill.HIGHEST_PROTOCOL, config.DILL_PROTOCOL_VERSION)
@unittest.skip('not implemented')
def testUriSerializationWithoutSlots(self):
pass
def testSerializeLambda(self):
l = lambda x: x * x
ser, compressed = self.ser.serializeData(l)
l2 = self.ser.deserializeData(ser, compressed=compressed)
self.assertEqual(l2(3.), 9.)
def testSerializeLocalFunction(self):
def f(x):
return x * x
ser, compressed = self.ser.serializeData(f)
f2 = self.ser.deserializeData(ser, compressed=compressed)
self.assertEqual(f2(3.), 9.)
class SerializeTests_serpent(SerializeTests_pickle):
SERIALIZER = "serpent"
def testCircular(self):
with self.assertRaises(ValueError): # serpent doesn't support object graphs (since serpent 1.7 reports ValueError instead of crashing)
super(SerializeTests_serpent, self).testCircular()
def testSet(self):
# serpent serializes a set into a tuple on older python versions, so we override this
data = {111, 222, 333}
ser, compressed = self.ser.serializeData(data)
data2 = self.ser.deserializeData(ser, compressed=compressed)
if serpent.can_use_set_literals:
self.assertEqual(data, data2)
else:
self.assertEqual(tuple(data), data2)
def testDeque(self):
# serpent converts a deque into a primitive list
deq = collections.deque([1, 2, 3, 4])
ser, compressed = self.ser.serializeData(deq)
data2 = self.ser.deserializeData(ser, compressed=compressed)
self.assertEqual([1, 2, 3, 4], data2)
@unittest.skipIf(sys.version_info < (2, 7), "ordereddict is in Python 2.7+")
def testOrderedDict(self):
od = collections.OrderedDict()
od["a"] = 1
od["b"] = 2
od["c"] = 3
def recreate_OrderedDict(name, values):
self.assertEqual("collections.OrderedDict", name)
return collections.OrderedDict(values["items"])
Pyro4.util.SerializerBase.register_dict_to_class("collections.OrderedDict", recreate_OrderedDict)
ser, compressed = self.ser.serializeData(od)
self.assertIn(b"collections.OrderedDict", ser)
self.assertIn(b"[('a',1),('b',2),('c',3)]", ser)
data2 = self.ser.deserializeData(ser, compressed=compressed)
self.assertEqual(od, data2)
def testUriSerializationWithoutSlots(self):
u = Pyro4.core.URI("PYRO:obj@localhost:1234")
d, compr = self.ser.serializeData(u)
self.assertFalse(compr)
result1 = b"# serpent utf-8 python3.2\n{'__class__':'Pyro4.core.URI','state':('PYRO','obj',None,'localhost',1234)}"
result2 = b"# serpent utf-8 python3.2\n{'state':('PYRO','obj',None,'localhost',1234),'__class__':'Pyro4.core.URI'}"
result3 = b"# serpent utf-8 python2.6\n{'state':('PYRO','obj',None,'localhost',1234),'__class__':'Pyro4.core.URI'}"
result4 = b"# serpent utf-8 python2.6\n{'__class__':'Pyro4.core.URI','state':('PYRO','obj',None,'localhost',1234)}"
self.assertTrue(d in (result1, result2, result3, result4))
class SerializeTests_json(SerializeTests_pickle):
SERIALIZER = "json"
def testCircular(self):
with self.assertRaises(ValueError): # json doesn't support object graphs
super(SerializeTests_json, self).testCircular()
def testSet(self):
# json serializes a set into a list, so we override this
data = {111, 222, 333}
ser, compressed = self.ser.serializeData(data)
data2 = self.ser.deserializeData(ser, compressed=compressed)
self.assertEqual(list(data), data2)
def testUriSerializationWithoutSlots(self):
u = Pyro4.core.URI("PYRO:obj@localhost:1234")
d, compr = self.ser.serializeData(u)
self.assertFalse(compr)
result1 = b'{"__class__": "Pyro4.core.URI", "state": ["PYRO", "obj", null, "localhost", 1234]}'
result2 = b'{"state": ["PYRO", "obj", null, "localhost", 1234], "__class__": "Pyro4.core.URI"}'
self.assertTrue(d in (result1, result2))
class SerializeTests_marshal(SerializeTests_pickle):
SERIALIZER = "marshal"
def testCircular(self):
with self.assertRaises(ValueError): # marshal doesn't support object graphs
super(SerializeTests_marshal, self).testCircular()
@unittest.skip("marshaling is implementation dependent")
def testUriSerializationWithoutSlots(self):
pass
class SerializeTests_msgpack(SerializeTests_pickle):
SERIALIZER = "msgpack"
@unittest.skip("circular will crash msgpack")
def testCircular(self):
pass
def testSet(self):
# msgpack serializes a set into a list, so we override this
data = {111, 222, 333}
ser, compressed = self.ser.serializeData(data)
data2 = self.ser.deserializeData(ser, compressed=compressed)
self.assertEqual(list(data), data2)
@unittest.skip("msgpack is implementation dependent")
def testUriSerializationWithoutSlots(self):
pass
class GenericTests(unittest.TestCase):
def testSerializersAvailable(self):
Pyro4.util.get_serializer("pickle")
Pyro4.util.get_serializer("marshal")
try:
import json
Pyro4.util.get_serializer("json")
except ImportError:
pass
try:
import serpent
Pyro4.util.get_serializer("serpent")
except ImportError:
pass
try:
import cloudpickle
Pyro4.util.get_serializer("cloudpickle")
except ImportError:
pass
try:
import dill
Pyro4.util.get_serializer("dill")
except ImportError:
pass
def testAssignedSerializerIds(self):
self.assertEqual(1, Pyro4.util.SerpentSerializer.serializer_id)
self.assertEqual(2, Pyro4.util.JsonSerializer.serializer_id)
self.assertEqual(3, Pyro4.util.MarshalSerializer.serializer_id)
self.assertEqual(4, Pyro4.util.PickleSerializer.serializer_id)
self.assertEqual(5, Pyro4.util.DillSerializer.serializer_id)
self.assertEqual(6, Pyro4.util.MsgpackSerializer.serializer_id)
self.assertEqual(7, Pyro4.util.CloudpickleSerializer.serializer_id)
def testSerializersAvailableById(self):
Pyro4.util.get_serializer_by_id(1) # serpent
Pyro4.util.get_serializer_by_id(2) # json
Pyro4.util.get_serializer_by_id(3) # marshal
Pyro4.util.get_serializer_by_id(4) # pickle
# ids 5, 6 and 7 (dill, msgpack, cloudpickle) are not always available, so we skip those.
self.assertRaises(Pyro4.errors.SerializeError, lambda: Pyro4.util.get_serializer_by_id(0))
self.assertRaises(Pyro4.errors.SerializeError, lambda: Pyro4.util.get_serializer_by_id(8))
def testDictClassFail(self):
o = pprint.PrettyPrinter(stream="dummy", width=42)
d = Pyro4.util.SerializerBase.class_to_dict(o)
self.assertEqual(42, d["_width"])
self.assertEqual("pprint.PrettyPrinter", d["__class__"])
try:
_ = Pyro4.util.SerializerBase.dict_to_class(d)
self.fail("error expected")
except Pyro4.errors.ProtocolError:
pass
def testDictException(self):
x = ZeroDivisionError("hello", 42)
expected = {
"__class__": None,
"__exception__": True,
"args": ("hello", 42),
"attributes": {}
}
if sys.version_info < (3, 0):
expected["__class__"] = "exceptions.ZeroDivisionError"
else:
expected["__class__"] = "builtins.ZeroDivisionError"
d = Pyro4.util.SerializerBase.class_to_dict(x)
self.assertEqual(expected, d)
x.custom_attribute = 999
expected["attributes"] = {"custom_attribute": 999}
d = Pyro4.util.SerializerBase.class_to_dict(x)
self.assertEqual(expected, d)
def testDictClassOk(self):
uri = Pyro4.core.URI("PYRO:object@host:4444")
d = Pyro4.util.SerializerBase.class_to_dict(uri)
self.assertEqual("Pyro4.core.URI", d["__class__"])
self.assertIn("state", d)
x = Pyro4.util.SerializerBase.dict_to_class(d)
self.assertIsInstance(x, Pyro4.core.URI)
self.assertEqual(uri, x)
self.assertEqual(4444, x.port)
uri = Pyro4.core.URI("PYRO:12345@./u:/tmp/socketname")
d = Pyro4.util.SerializerBase.class_to_dict(uri)
self.assertEqual("Pyro4.core.URI", d["__class__"])
self.assertIn("state", d)
x = Pyro4.util.SerializerBase.dict_to_class(d)
self.assertIsInstance(x, Pyro4.core.URI)
self.assertEqual(uri, x)
self.assertEqual("/tmp/socketname", x.sockname)
def testCustomDictClass(self):
o = MyThingPartlyExposed("test")
Pyro4.util.SerializerBase.register_class_to_dict(MyThingPartlyExposed, mything_dict)
Pyro4.util.SerializerBase.register_dict_to_class("CUSTOM-Mythingymabob", mything_creator)
d = Pyro4.util.SerializerBase.class_to_dict(o)
self.assertEqual("CUSTOM-Mythingymabob", d["__class__"])
self.assertEqual("test", d["name"])
x = Pyro4.util.SerializerBase.dict_to_class(d)
self.assertIsInstance(x, MyThingPartlyExposed)
self.assertEqual("test", x.name)
# unregister the conversion functions and try again
Pyro4.util.SerializerBase.unregister_class_to_dict(MyThingPartlyExposed)
Pyro4.util.SerializerBase.unregister_dict_to_class("CUSTOM-Mythingymabob")
d_orig = Pyro4.util.SerializerBase.class_to_dict(o)
clsname = d_orig["__class__"]
self.assertTrue(clsname.endswith("testsupport.MyThingPartlyExposed"))
try:
_ = Pyro4.util.SerializerBase.dict_to_class(d)
self.fail("should crash")
except Pyro4.errors.ProtocolError:
pass # ok
def testExceptionNamespacePy2(self):
data = {'__class__': 'exceptions.ZeroDivisionError',
'__exception__': True,
'args': ('hello', 42),
'attributes': {"test_attribute": 99}}
exc = Pyro4.util.SerializerBase.dict_to_class(data)
self.assertIsInstance(exc, ZeroDivisionError)
self.assertEqual("ZeroDivisionError('hello', 42)", repr(exc))
self.assertEqual(99, exc.test_attribute)
def testExceptionNamespacePy3(self):
data = {'__class__': 'builtins.ZeroDivisionError',
'__exception__': True,
'args': ('hello', 42),
'attributes': {"test_attribute": 99}}
exc = Pyro4.util.SerializerBase.dict_to_class(data)
self.assertIsInstance(exc, ZeroDivisionError)
self.assertEqual("ZeroDivisionError('hello', 42)", repr(exc))
self.assertEqual(99, exc.test_attribute)
def testExceptionNotTagged(self):
data = {'__class__': 'builtins.ZeroDivisionError',
'args': ('hello', 42),
'attributes': {}}
with self.assertRaises(Pyro4.errors.SerializeError) as cm:
_ = Pyro4.util.SerializerBase.dict_to_class(data)
self.assertEqual("unsupported serialized class: builtins.ZeroDivisionError", str(cm.exception))
def testWeirdFloats(self):
ser = Pyro4.util.get_serializer(config.SERIALIZER)
p, _ = ser.serializeData([float("+inf"), float("-inf"), float("nan")])
s2 = ser.deserializeData(p)
self.assertTrue(math.isinf(s2[0]))
self.assertEqual(1.0, math.copysign(1, s2[0]))
self.assertTrue(math.isinf(s2[1]))
self.assertEqual(-1.0, math.copysign(1, s2[1]))
self.assertTrue(math.isnan(s2[2]))
def mything_dict(obj):
return {
"__class__": "CUSTOM-Mythingymabob",
"name": obj.name
}
def mything_creator(classname, d):
assert classname == "CUSTOM-Mythingymabob"
assert d["__class__"] == "CUSTOM-Mythingymabob"
return MyThingPartlyExposed(d["name"])
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| [
"[email protected]"
] | |
cf363c988b6badef51b74389c99ca6acff643e5a | 97543ae8e1ad7bf3d17dd87171aaac04f6737b5f | /bibliopixel/drivers/ledtype.py | b1a962f06ec533a9bbfeac352c4d4ccbe0cf78b5 | [
"MIT"
] | permissive | dr-aryone/BiblioPixel | a3c630bf1cd5db2b014b86775d283c61565a193e | fd97e6c651a4bbcade64733847f4eec8f7704b7c | refs/heads/master | 2020-05-27T16:19:15.043592 | 2019-03-23T08:52:37 | 2019-03-25T11:10:39 | 188,698,414 | 2 | 1 | MIT | 2019-05-26T15:12:38 | 2019-05-26T15:12:37 | null | UTF-8 | Python | false | false | 588 | py | from enum import IntEnum
class LEDTYPE(IntEnum):
"""Enumerated LED type names to be used with
:py:mod:`bibliopixel.drivers.serial` and
:py:mod:`bibliopixel.drivers.SPI`
"""
GENERIC = 0 # Use if the serial device only supports one chipset
LPD8806 = 1
WS2801 = 2
# These are all the same
WS2811 = 3
WS2812 = 3
WS2812B = 3
NEOPIXEL = 3
APA104 = 3
# 400khz variant of above
WS2811_400 = 4
TM1809 = 5
TM1804 = 5
TM1803 = 6
UCS1903 = 7
SM16716 = 8
APA102 = 9
SK9822 = 9
LPD1886 = 10
P9813 = 11
| [
"[email protected]"
] | |
edd2690a5b80bee5f27437cef21e1a4995e9a870 | 9a4755588bbe924270e0d92e04d3409281fbaf5b | /main/displayer.py | a0d3b2201298b0f2f77b759cc72098e188f45c3e | [] | no_license | chaobai-li/authenticated-info-displayer | 209488a8229d17b9d67371435e4aa576ef0bb0b3 | c19c6d477a3b96cda3d65f1833d28ade07aff7ba | refs/heads/master | 2021-01-25T11:48:49.936003 | 2018-03-03T11:59:48 | 2018-03-03T11:59:48 | 123,431,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,494 | py | __pragma__("alias", "S", "$")
class Displayer:
def __init__(self, authenticator):
self.authenticator = authenticator
self.authenticator.eventLogin.append(self.__initialize)
self.authenticator.eventLogin.append(lambda: self.__toggle(True))
self.authenticator.eventLogout.append(lambda: self.__toggle(False))
self.initialized = False
def __toggle(self, v):
S('[data-auth-display-toggle]').toggle(v)
def __initialize(self):
if self.initialized: return
self.database = firebase.database()
interests = list(S('[data-auth-display]'))
for each in interests:
path = S(each).attr("data-auth-display")
template = S(each).attr("data-auth-display-template")
targetAttr = S(each).attr("data-auth-display-attribute")
useHtml = S(each).attr("data-auth-display-html")
self.__bindListener(each, path, template, targetAttr, useHtml)
self.initialized = True
def __bindListener(self, domObj, path, template, targetAttr, useHtml):
if not template:
template = "{}"
def updater(dbValue):
text = template.format(dbValue.val())
if targetAttr:
S(domObj).attr(targetAttr, text)
else:
if useHtml:
S(domObj).html(text)
else:
S(domObj).text(text)
self.database.ref(path).on("value", updater)
| [
"[email protected]"
] | |
78ac77bbaba347ba0643688428339f03ef0ddee3 | 02b04b202550248a2b78ed069d94b7607640c866 | /DataTypes/Numbers/max.py | 5c3f7d5ee91951b547605f887089c82a0ca3b66a | [] | no_license | keshavkummari/python-nit-7am | c391fe96783c224b44419a258738168230e182cd | 0bc867ad673e40ad401d7473aab4791f21ee1945 | refs/heads/master | 2020-03-30T15:05:18.376222 | 2018-11-05T02:30:44 | 2018-11-05T02:30:44 | 151,347,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 793 | py | #!/usr/bin/python
"""9. max(x1, x2,...) : The largest of its arguments: the value closest to positive infinity
Python Number max() Method:"""
print "max(80, 100, 1000) : ", max(80, 100, 1000)
print "max(-20, 100, 400) : ", max(-20, 100, 400)
print "max(-80, -20, -10) : ", max(-80, -20, -10)
print "max(0, 100, -400) : ", max(0, 100, -400)
#max() Method; min() Method
print "max method", max(30, -30, 40, 50)
print "min method", min(30, -30, 40, 50)
#!/usr/bin/python
#10. min(x1, x2,...): The smallest of its arguments: the value closest to negative infinity
#Python Number min() Method:
print "min(-20, 100, 400) : ", min(-20, 100, 400)
print "min(80, 100, 1000) : ", min(80, 100, 1000)
print "min(-80, -20, -10) : ", min(-80, -20, -10)
print "min(0, 100, -400) : ", min(0, 100, -400)
| [
"[email protected]"
] | |
959b36ffc39fe17b4ec4cb1d925ad67bca447215 | 0452408a98e03408508b4889ed68a8d0f2d9f8cf | /alphatwirl/roottree/Events.py | dcd99a451aa2f0d07fe62f0b79f023eb3c2325ed | [
"BSD-3-Clause"
] | permissive | benkrikler/alphatwirl | 3318e79b89ce0e79c4a4399c7a40c789531f0e60 | cda7d12fec21291ea33af23234fc08be19430934 | refs/heads/master | 2021-01-23T12:54:05.101466 | 2018-09-26T13:13:18 | 2018-09-26T13:13:18 | 93,210,643 | 0 | 0 | BSD-3-Clause | 2018-03-19T12:27:16 | 2017-06-02T23:18:59 | Python | UTF-8 | Python | false | false | 2,629 | py | # Tai Sakuma <[email protected]>
##__________________________________________________________________||
class Events(object):
"""An iterative object for events.
You can use this class to iterate over entries in a ROOT TTree.
You can instantiate this class with a TTree object and an
optionally a maximum number of entries to loop over::
inputFile = ROOT.TFile.Open(inputPath)
tree = inputFile.Get(treeName)
events = Events(tree)
Then, the "for" loop for the tree entries can be::
for event in events:
Note: "event" and "events" are the same object. In each iteration,
"event" (and "events") is loaded with the next entry in the tree.
A content of the tree, e.g., a branch, can be accessed as an
attribute of "event"::
event.jet_pt
In order to access to a particular entry, you can use an index.
For example, to get 11th entry (the index for the first entry is
0)::
event = events[10]
Note: Again "event" and "events" are the same object.
"""
def __init__(self, tree, maxEvents=-1, start=0):
if start < 0:
raise ValueError("start must be greater than or equal to zero: {} is given".format(start))
self.tree = tree
nEventsInTree = self.tree.GetEntries()
start = min(nEventsInTree, start)
if maxEvents > -1:
self.nEvents = min(nEventsInTree - start, maxEvents)
else:
self.nEvents = nEventsInTree - start
self.maxEvents = maxEvents
self.start = start
self.iEvent = -1
def __len__(self):
return self.nEvents
def __repr__(self):
return '{}({})'.format(
self.__class__.__name__,
self._repr_contents()
)
def _repr_contents(self):
return 'tree = {!r}, maxEvents={!r}, start={!r}, nEvents={!r}, iEvent={!r}'.format(
self.tree,
self.maxEvents,
self.start,
self.nEvents,
self.iEvent
)
def __getitem__(self, i):
if i >= self.nEvents:
self.iEvent = -1
raise IndexError("the index is out of range: " + str(i))
self.iEvent = i
self.tree.GetEntry(self.start + self.iEvent)
return self
def __iter__(self):
for self.iEvent in range(self.nEvents):
self.tree.GetEntry(self.start + self.iEvent)
yield self
self.iEvent = -1
def __getattr__(self, name):
return getattr(self.tree, name)
##__________________________________________________________________||
| [
"[email protected]"
] | |
584017b8c603df166692fd584d0144e09d4a261b | 42a0befb594a6c081f3e788016c53889cfa2a9fb | /Codechef/factorial_easy_problem.py | e88719072ec1628291baff54eafc1b59d0b7f57f | [] | no_license | Laksh8/competitive-programming | f436e8c525220ad95ef1c7a9d3aa98b4689d4f92 | 14c20e5cc32263c89a73524ab596efbbba2cc85a | refs/heads/master | 2022-12-24T23:54:16.313515 | 2020-09-08T06:59:11 | 2020-09-08T06:59:11 | 293,727,288 | 2 | 1 | null | 2020-10-04T14:15:38 | 2020-09-08T06:57:35 | Python | UTF-8 | Python | false | false | 207 | py | testcase = int(input())
while testcase > 0:
num = int(input())
sum=0
divisor=5
while (num)>=5:
num = num // divisor
sum = sum + num
print(sum)
testcase = testcase - 1
| [
"[email protected]"
] | |
cf550ea8b5a7f3638b9bec4ef4e8ec1e243f0ce3 | 3740de0d6e43ea140fc09ab314e4c492603ba185 | /functions_legacy/FitVAR1.py | 348409cfa3620731799498087218091ba4892c20 | [
"MIT"
] | permissive | s0ap/arpmRes | 29c60c65fd3e11be1cc31d46494e5b3ebf6e05ab | ddcc4de713b46e3e9dcb77cc08c502ce4df54f76 | refs/heads/master | 2022-02-16T05:01:22.118959 | 2019-08-20T16:45:02 | 2019-08-20T16:45:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,390 | py | import matplotlib.pyplot as plt
from numpy import ones, diff, eye
from RobustLassoFPReg import RobustLassoFPReg
def FitVAR1(X, p=None, nu=10**9, lambda_beta=0, lambda_phi=0, flag_rescale=0):
# This function estimates the 1-step parameters of the VAR[0] process via lasso regression (on first differences)
# INPUTS
# X : [matrix] (n_ x t_end) historical series of independent variables
# p : [vector] (1 x t_end) flexible probabilities
# nu : [scalar] degrees of freedom of multivariate Student t
# lambda_beta : [scalar] lasso regression parameter for loadings
# lambda_phi : [scalar] lasso regression parameter for covariance matrix
# flag_rescale : [boolean flag] if 0 (default), the series is not rescaled before estimation
# OPS
# output1 : [vector](n_ x 1) output1 = alpha
# output2 : [matrix](n_ x n_) output2 = b
# output3 : [matrix](n_ x n_) output3 = sig2_U
## Code
dX = diff(X,1,1)
n_, t_ = dX.shape
if p is None:
p = ones((1,t_))/t_
# robust lasso + glasso regression
alpha, beta, sig2_U = RobustLassoFPReg(dX, X[:,:-1], p, nu, 10**-6, lambda_beta, lambda_phi, flag_rescale)
output1 = alpha
output2 = (eye(n_)+beta)
output3 = sig2_U
return output1, output2, output3
| [
"[email protected]"
] | |
a86f4e04fd293b02902b13f84e13a6a1da39451e | 8b2af3cff75ba2a6f8557cdea0d852b9076ff6a3 | /day014/venv/Scripts/easy_install-script.py | 765b7c6bbfc60d30e4d3962d2ae52fb465a43cb6 | [] | no_license | AlexYangLong/Foundations-of-Python | 98e5eaf7e7348120049f1ff4bb3d31393ad05592 | bcf3a1fe526140fd2b05283c104488698ebc99fd | refs/heads/master | 2020-03-16T21:45:34.232670 | 2018-05-11T10:19:21 | 2018-05-11T10:19:21 | 133,013,526 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | #!"D:\for python\0413\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==28.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==28.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==28.8.0', 'console_scripts', 'easy_install')()
)
| [
"[email protected]"
] | |
8c5bc5f4bd79e3341f8f0e73ae8eb742781ec259 | 4d05be863b63a56a90b4c46b15069827b33ecaae | /Algorithms/leetcode_charlie/001_two_sum.py | 620d566dbd384fec815ccab50c6a4b01c5519abe | [] | no_license | leeo1116/PyCharm | e532fa9754056019508cc454214ee1a8ad9b26a9 | b6942c05c27556e5fe47879e8b823845c84c5430 | refs/heads/master | 2022-11-06T00:43:14.882453 | 2017-07-13T04:50:00 | 2017-07-13T04:50:00 | 36,851,636 | 0 | 1 | null | 2022-10-20T10:44:39 | 2015-06-04T06:09:09 | Python | UTF-8 | Python | false | false | 946 | py | __doc__ = """
Given an array of integers, find two numbers such that they add up to a specific target number.The function twoSum
should return indices of the two numbers such that they add up to the target, where index1 must be less than index2.
Please note that your returned answers (both index1 and index2) are not zero-based.You may assume that each input would
have exactly one solution.
Input: numbers={2, 7, 11, 15}, target=9
Output: index1=1, index2=2
"""
class Solution(object):
def __init__(self, index):
self.index = index
def two_sum(self, nums, target):
print('#{0} Solution:\n'.format(self.index))
num_scanned = {}
for i, num in enumerate(nums):
if num_scanned.get(target-num, None) is not None:
return num_scanned[target-num]+1, i+1
else:
num_scanned[num] = i
s = Solution(1)
solution = s.two_sum([0, 4, 3, 0], 0)
print(solution)
| [
"[email protected]"
] | |
55d2099c22b2ef1df7eed3cdac7b86d9e3c15d97 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_230/ch33_2020_03_30_20_03_55_797460.py | 27b68ff699defdc00de4fdca5d880421d1e22da1 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | def eh_primo(n):
div=3
if n%2==0 and n!=2 or n==1 or n==0:
return False
while n > div:
if n%div==0:
return False
div +=2
return True
def primos_entre(a,b):
n_primos=0
while a<=b:
if eh_primo(a):
n_primos +=1
x+=1
return n_primos | [
"[email protected]"
] | |
d8c80ee0a2954ef4a10f0ebfbf034248dcc2d365 | a8fb5d37de019221e5897a98bd176c566037f813 | /Playground/objgraph_/obj_graph.py | 0890b1611b57af3cdb6b08c6f9339df38174a04e | [] | no_license | tdworowy/PythonPlayground | b743dc2b870d1681b24e654e2af3fe5957710265 | ff090fb44046c9c37501f5dbbcb08d56552540d4 | refs/heads/master | 2023-01-05T00:28:55.725894 | 2022-12-27T10:06:40 | 2022-12-27T10:06:40 | 72,983,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,219 | py | import objgraph
class Staff:
def __init__(self, ele):
self.ele = ele
def get(self):
return self.ele
def example(count):
x = range(3)
y = [Staff(i) for i in x]
if count == 0:
return Staff(y)
else:
return example(count - 1)
def example2():
y = 1
for i in range(10):
y = Staff(y)
return y
def example3():
l = []
l1 = []
for x in range(7):
z = example(5)
q = example2()
l.append(z)
l.append(q)
l.append((z, q))
l1.append(l)
l.append(l1)
return Staff(l)
def test1():
objgraph.show_refs(example(3), filename="obj.png", refcounts=True)
def test2():
x = range(100)
y = map(example, x)
objgraph.show_refs(y, filename="obj2.png", refcounts=True)
def test3():
objgraph.show_refs(example2(), filename="obj3.png", refcounts=True, max_depth=5, too_many=10)
def test4():
"""Take lot of time"""
objgraph.show_refs(example3(), filename="obj4.png", refcounts=True, max_depth=10, too_many=100)
def test5():
objgraph.show_refs(example3(), filename="obj5.png", refcounts=True, max_depth=10, too_many=20)
if __name__ == "__main__":
test5()
| [
"[email protected]"
] | |
17b37f4a03a4049d3afd2397497d08fa832d5305 | dcc62f725e8d1fdebc3be5192960584198d19813 | /meiduo_mall/meiduo_mall/utils/category.py | 535a51cb88960d742c97a2c71d02a628b6f21fb7 | [] | no_license | 1923488289/youprojects | e51cbb7958963fb8a3a82405f5df18e9a066b1ee | ebd1856dab02e45db69d2d5307473f0f22855988 | refs/heads/master | 2022-12-11T12:40:55.832289 | 2019-09-24T15:31:34 | 2019-09-24T15:31:34 | 210,625,080 | 0 | 0 | null | 2022-12-08T01:49:05 | 2019-09-24T14:36:24 | HTML | UTF-8 | Python | false | false | 1,745 | py | from goods.models import GoodsChannel
def get_category():
# 1.查询频道
channels = GoodsChannel.objects.order_by('group_id', 'sequence')
categories = {}
# 2.遍历频道,获取一级分类、二级分类数据
for channel in channels:
# 3.判断频道是否存在
if channel.group_id not in categories:
# 如果不存在则新建频道字典
categories[channel.group_id] = {
'channels': [], # 一级分类
'sub_cats': [] # 二级分类
}
# 3.1获取频道字典
channel_dict = categories[channel.group_id]
# 4.向频道中添加一级分类
channel_dict['channels'].append({
'name': channel.category.name, # 一级分类名称
'url': channel.url # 频道链接
})
# 5.向频道中添加二级分类
catetory2s = channel.category.subs.all()
# 6.遍历,逐个添加二级分类
for catetory2 in catetory2s:
channel_dict['sub_cats'].append({
'name': catetory2.name, # 二级分类名称
'sub_cats': catetory2.subs.all() # 三级分类
})
'''
{
1:{
'channels':[手机,相机,数码],
'sub_cats':[
{
'name':'手机通讯',
'sub_cats':[手机,游戏手机,..]
},
{
。。。。
}
]
},
2:{
'channels':[电脑,办公],
'sub_cats':[]
}
}
'''
return categories
| [
"[email protected]"
] | |
b9554abc231f4c37bde663fe622fb5a85107a16d | e5e2b7da41fda915cb849f031a0223e2ac354066 | /sdk/python/pulumi_azure_native/compute/v20190701/virtual_machine_scale_set_extension.py | ee871d7ddaf93e66afd1b6cd43de3dd831e08f3e | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | johnbirdau/pulumi-azure-native | b7d3bdddeb7c4b319a7e43a892ddc6e25e3bfb25 | d676cc331caa0694d8be99cb90b93fa231e3c705 | refs/heads/master | 2023-05-06T06:48:05.040357 | 2021-06-01T20:42:38 | 2021-06-01T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,493 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['VirtualMachineScaleSetExtensionArgs', 'VirtualMachineScaleSetExtension']
@pulumi.input_type
class VirtualMachineScaleSetExtensionArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
vm_scale_set_name: pulumi.Input[str],
auto_upgrade_minor_version: Optional[pulumi.Input[bool]] = None,
force_update_tag: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
protected_settings: Optional[Any] = None,
provision_after_extensions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
publisher: Optional[pulumi.Input[str]] = None,
settings: Optional[Any] = None,
type: Optional[pulumi.Input[str]] = None,
type_handler_version: Optional[pulumi.Input[str]] = None,
vmss_extension_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a VirtualMachineScaleSetExtension resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] vm_scale_set_name: The name of the VM scale set where the extension should be create or updated.
:param pulumi.Input[bool] auto_upgrade_minor_version: Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true.
:param pulumi.Input[str] force_update_tag: If a value is provided and is different from the previous value, the extension handler will be forced to update even if the extension configuration has not changed.
:param pulumi.Input[str] name: The name of the extension.
:param Any protected_settings: The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all.
:param pulumi.Input[Sequence[pulumi.Input[str]]] provision_after_extensions: Collection of extension names after which this extension needs to be provisioned.
:param pulumi.Input[str] publisher: The name of the extension handler publisher.
:param Any settings: Json formatted public settings for the extension.
:param pulumi.Input[str] type: Specifies the type of the extension; an example is "CustomScriptExtension".
:param pulumi.Input[str] type_handler_version: Specifies the version of the script handler.
:param pulumi.Input[str] vmss_extension_name: The name of the VM scale set extension.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "vm_scale_set_name", vm_scale_set_name)
if auto_upgrade_minor_version is not None:
pulumi.set(__self__, "auto_upgrade_minor_version", auto_upgrade_minor_version)
if force_update_tag is not None:
pulumi.set(__self__, "force_update_tag", force_update_tag)
if name is not None:
pulumi.set(__self__, "name", name)
if protected_settings is not None:
pulumi.set(__self__, "protected_settings", protected_settings)
if provision_after_extensions is not None:
pulumi.set(__self__, "provision_after_extensions", provision_after_extensions)
if publisher is not None:
pulumi.set(__self__, "publisher", publisher)
if settings is not None:
pulumi.set(__self__, "settings", settings)
if type is not None:
pulumi.set(__self__, "type", type)
if type_handler_version is not None:
pulumi.set(__self__, "type_handler_version", type_handler_version)
if vmss_extension_name is not None:
pulumi.set(__self__, "vmss_extension_name", vmss_extension_name)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="vmScaleSetName")
def vm_scale_set_name(self) -> pulumi.Input[str]:
"""
The name of the VM scale set where the extension should be create or updated.
"""
return pulumi.get(self, "vm_scale_set_name")
@vm_scale_set_name.setter
def vm_scale_set_name(self, value: pulumi.Input[str]):
pulumi.set(self, "vm_scale_set_name", value)
@property
@pulumi.getter(name="autoUpgradeMinorVersion")
def auto_upgrade_minor_version(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true.
"""
return pulumi.get(self, "auto_upgrade_minor_version")
@auto_upgrade_minor_version.setter
def auto_upgrade_minor_version(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "auto_upgrade_minor_version", value)
@property
@pulumi.getter(name="forceUpdateTag")
def force_update_tag(self) -> Optional[pulumi.Input[str]]:
"""
If a value is provided and is different from the previous value, the extension handler will be forced to update even if the extension configuration has not changed.
"""
return pulumi.get(self, "force_update_tag")
@force_update_tag.setter
def force_update_tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "force_update_tag", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the extension.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="protectedSettings")
def protected_settings(self) -> Optional[Any]:
"""
The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all.
"""
return pulumi.get(self, "protected_settings")
@protected_settings.setter
def protected_settings(self, value: Optional[Any]):
pulumi.set(self, "protected_settings", value)
@property
@pulumi.getter(name="provisionAfterExtensions")
def provision_after_extensions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Collection of extension names after which this extension needs to be provisioned.
"""
return pulumi.get(self, "provision_after_extensions")
@provision_after_extensions.setter
def provision_after_extensions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "provision_after_extensions", value)
@property
@pulumi.getter
def publisher(self) -> Optional[pulumi.Input[str]]:
"""
The name of the extension handler publisher.
"""
return pulumi.get(self, "publisher")
@publisher.setter
def publisher(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "publisher", value)
@property
@pulumi.getter
def settings(self) -> Optional[Any]:
"""
Json formatted public settings for the extension.
"""
return pulumi.get(self, "settings")
@settings.setter
def settings(self, value: Optional[Any]):
pulumi.set(self, "settings", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the type of the extension; an example is "CustomScriptExtension".
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="typeHandlerVersion")
def type_handler_version(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the version of the script handler.
"""
return pulumi.get(self, "type_handler_version")
@type_handler_version.setter
def type_handler_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type_handler_version", value)
@property
@pulumi.getter(name="vmssExtensionName")
def vmss_extension_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the VM scale set extension.
"""
return pulumi.get(self, "vmss_extension_name")
@vmss_extension_name.setter
def vmss_extension_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vmss_extension_name", value)
class VirtualMachineScaleSetExtension(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auto_upgrade_minor_version: Optional[pulumi.Input[bool]] = None,
force_update_tag: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
protected_settings: Optional[Any] = None,
provision_after_extensions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
publisher: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
settings: Optional[Any] = None,
type: Optional[pulumi.Input[str]] = None,
type_handler_version: Optional[pulumi.Input[str]] = None,
vm_scale_set_name: Optional[pulumi.Input[str]] = None,
vmss_extension_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Describes a Virtual Machine Scale Set Extension.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] auto_upgrade_minor_version: Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true.
:param pulumi.Input[str] force_update_tag: If a value is provided and is different from the previous value, the extension handler will be forced to update even if the extension configuration has not changed.
:param pulumi.Input[str] name: The name of the extension.
:param Any protected_settings: The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all.
:param pulumi.Input[Sequence[pulumi.Input[str]]] provision_after_extensions: Collection of extension names after which this extension needs to be provisioned.
:param pulumi.Input[str] publisher: The name of the extension handler publisher.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param Any settings: Json formatted public settings for the extension.
:param pulumi.Input[str] type: Specifies the type of the extension; an example is "CustomScriptExtension".
:param pulumi.Input[str] type_handler_version: Specifies the version of the script handler.
:param pulumi.Input[str] vm_scale_set_name: The name of the VM scale set where the extension should be create or updated.
:param pulumi.Input[str] vmss_extension_name: The name of the VM scale set extension.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: VirtualMachineScaleSetExtensionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Describes a Virtual Machine Scale Set Extension.
:param str resource_name: The name of the resource.
:param VirtualMachineScaleSetExtensionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(VirtualMachineScaleSetExtensionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auto_upgrade_minor_version: Optional[pulumi.Input[bool]] = None,
force_update_tag: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
protected_settings: Optional[Any] = None,
provision_after_extensions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
publisher: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
settings: Optional[Any] = None,
type: Optional[pulumi.Input[str]] = None,
type_handler_version: Optional[pulumi.Input[str]] = None,
vm_scale_set_name: Optional[pulumi.Input[str]] = None,
vmss_extension_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = VirtualMachineScaleSetExtensionArgs.__new__(VirtualMachineScaleSetExtensionArgs)
__props__.__dict__["auto_upgrade_minor_version"] = auto_upgrade_minor_version
__props__.__dict__["force_update_tag"] = force_update_tag
__props__.__dict__["name"] = name
__props__.__dict__["protected_settings"] = protected_settings
__props__.__dict__["provision_after_extensions"] = provision_after_extensions
__props__.__dict__["publisher"] = publisher
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["settings"] = settings
__props__.__dict__["type"] = type
__props__.__dict__["type_handler_version"] = type_handler_version
if vm_scale_set_name is None and not opts.urn:
raise TypeError("Missing required property 'vm_scale_set_name'")
__props__.__dict__["vm_scale_set_name"] = vm_scale_set_name
__props__.__dict__["vmss_extension_name"] = vmss_extension_name
__props__.__dict__["provisioning_state"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:compute/v20190701:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-native:compute:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-nextgen:compute:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-native:compute/v20170330:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20170330:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-native:compute/v20171201:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20171201:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-native:compute/v20180401:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20180401:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-native:compute/v20180601:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20180601:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-native:compute/v20181001:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20181001:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-native:compute/v20190301:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20190301:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-native:compute/v20191201:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20191201:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-native:compute/v20200601:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20200601:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-native:compute/v20201201:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20201201:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-native:compute/v20210301:VirtualMachineScaleSetExtension"), pulumi.Alias(type_="azure-nextgen:compute/v20210301:VirtualMachineScaleSetExtension")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(VirtualMachineScaleSetExtension, __self__).__init__(
'azure-native:compute/v20190701:VirtualMachineScaleSetExtension',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'VirtualMachineScaleSetExtension':
"""
Get an existing VirtualMachineScaleSetExtension resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = VirtualMachineScaleSetExtensionArgs.__new__(VirtualMachineScaleSetExtensionArgs)
__props__.__dict__["auto_upgrade_minor_version"] = None
__props__.__dict__["force_update_tag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["protected_settings"] = None
__props__.__dict__["provision_after_extensions"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["publisher"] = None
__props__.__dict__["settings"] = None
__props__.__dict__["type"] = None
__props__.__dict__["type_handler_version"] = None
return VirtualMachineScaleSetExtension(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="autoUpgradeMinorVersion")
def auto_upgrade_minor_version(self) -> pulumi.Output[Optional[bool]]:
"""
Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true.
"""
return pulumi.get(self, "auto_upgrade_minor_version")
@property
@pulumi.getter(name="forceUpdateTag")
def force_update_tag(self) -> pulumi.Output[Optional[str]]:
"""
If a value is provided and is different from the previous value, the extension handler will be forced to update even if the extension configuration has not changed.
"""
return pulumi.get(self, "force_update_tag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the extension.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="protectedSettings")
def protected_settings(self) -> pulumi.Output[Optional[Any]]:
"""
The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all.
"""
return pulumi.get(self, "protected_settings")
@property
@pulumi.getter(name="provisionAfterExtensions")
def provision_after_extensions(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Collection of extension names after which this extension needs to be provisioned.
"""
return pulumi.get(self, "provision_after_extensions")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state, which only appears in the response.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def publisher(self) -> pulumi.Output[Optional[str]]:
"""
The name of the extension handler publisher.
"""
return pulumi.get(self, "publisher")
@property
@pulumi.getter
def settings(self) -> pulumi.Output[Optional[Any]]:
"""
Json formatted public settings for the extension.
"""
return pulumi.get(self, "settings")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="typeHandlerVersion")
def type_handler_version(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the version of the script handler.
"""
return pulumi.get(self, "type_handler_version")
| [
"[email protected]"
] | |
f8940de643087082e5912d2288535fcea3c528d7 | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /all-gists/1073585/snippet.py | 3b5fdb9cca782aeebdcb2fd67a5527bed28bd730 | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 306 | py | def proc_starttime(pid):
p = re.compile(r"^btime (\d+)$", re.MULTILINE)
m = p.search(open("/proc/stat").read())
btime = int(m.groups()[0])
clk_tck = os.sysconf(os.sysconf_names["SC_CLK_TCK"])
stime = int(open("/proc/%d/stat" % pid).read().split()[21]) / clk_tck
return btime + stime | [
"[email protected]"
] | |
3c84cf1aa382ae73435312ccf759eef54d752f84 | 845f627d3b28f88e7a5367ba8bf3b669cf5a6eae | /script/report/report.py | 36a843d34ee338301436cb85b89184b33530581b | [] | no_license | quanrd/nf-reseq-om | 42e5066c99326c30e6aa650acbdc0ab2d4e52683 | 1ed90fff58fba5095f3454be07b803e82ced98b6 | refs/heads/master | 2022-11-18T22:03:49.556357 | 2020-01-06T06:40:13 | 2020-01-06T06:40:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,136 | py | import os
import glob
import sys
import jinja2
import fire
import pandas as pd
from io import StringIO
from pathlib import Path, PurePath
pd.set_option('precision', 3)
script_dir, _ = os.path.split(os.path.abspath(__file__))
env = jinja2.Environment(loader=jinja2.FileSystemLoader(
searchpath='{}/template'.format(script_dir)
)
)
template = env.get_template('index.html')
# code that fills in display_dictionary with the values to send to the template
def format_map_df(map_df):
t_map_df = map_df.T
t_map_df.columns = [each.rstrip(':') for each in t_map_df.columns]
int_df = t_map_df.iloc[:, [0, 1, 4, 3]]
float_df = t_map_df.iloc[:, -3:]
int_df = int_df.astype('int')
int_df = int_df.applymap(lambda x: f'{x:,}')
clean_df = pd.concat([int_df, float_df], axis=1)
clean_df.index.name = 'Item'
return clean_df
def format_reads_df(reads_df):
int_df = reads_df.iloc[:, 0:4]
float_df = reads_df.iloc[:, -5:]
float_df = float_df.applymap(lambda x: f'{x:.3f}')
int_df = int_df.astype('int').applymap(lambda x: f'{x:,}')
clean_df = pd.concat([int_df, float_df], axis=1)
clean_df.index.name = 'Item'
return clean_df
def table2dict(table_file, name, sep='\t', format_func=None):
table_dict = dict()
if table_file.is_file():
table_df = pd.read_csv(table_file, sep=sep, index_col=0)
if format_func is not None:
table_df = format_func(table_df)
table_df.sort_index(inplace=True)
table_df = table_df.reset_index()
for idx_i in table_df.index:
table_dict.setdefault(
f'{name}_body', []).append(list(table_df.loc[idx_i]))
table_dict[f'{name}_header'] = list(table_df.columns)
if 'snp' in name:
table_dict['snp'] = True
return table_dict
def plot2report(plot_path, outpath, plot_flag, plot_name=None):
plot_dict = dict()
plots = glob.glob(str(plot_path))
outpath = PurePath(outpath)
if plots:
plot = plots[0]
plot_path = PurePath(plot)
if plot_name is None:
plot_name = plot_path.stem
outfile_path = outpath / f'{plot_name}{plot_path.suffix}'
os.system(f'cp {plot_path} {outfile_path}')
plot_dict[plot_flag] = True
return plot_dict
def plotlist2report(plot_list, outpath, plot_flag):
plot_dict = dict()
if plot_list:
for plot in plot_list:
os.system(f'cp {plot} {outpath}')
plot_dict[plot_flag] = [PurePath(each).name for each in plot_list]
return plot_dict
def exom_report(result_dir, proj_name, report_dir=None):
result_dir = Path(result_dir)
if report_dir is None:
report_dir = result_dir / 'report'
else:
report_dir = Path(report_dir)
if report_dir.is_dir():
os.system(f'rm -r {report_dir}')
display_dictionary = {}
display_dictionary['project_name'] = proj_name
# add fastqc table
qc_table = result_dir / 'qc/data.summary.csv'
display_dictionary.update(
table2dict(qc_table, 'seq', sep=',', format_func=format_reads_df))
# add aligment table
align_table = result_dir / 'alignment/mapping.summary.csv'
display_dictionary.update(
table2dict(
align_table, 'align', sep=',', format_func=format_map_df))
# snp stats
# summary
snp_summary_table = result_dir / 'snp/overall.varSummary.txt'
display_dictionary.update(
table2dict(snp_summary_table, 'snp_summary'))
snp_number_table = result_dir / 'snp/overall.varNum.txt'
display_dictionary.update(
table2dict(snp_number_table, 'snp_number'))
snp_impact_table = result_dir / 'snp/overall.varImpact.txt'
display_dictionary.update(
table2dict(snp_impact_table, 'snp_impact'))
snp_effect_table = result_dir / 'snp/overall.varEffects.txt'
display_dictionary.update(
table2dict(snp_effect_table, 'snp_effect'))
snp_region_table = result_dir / 'snp/overall.varRegion.txt'
display_dictionary.update(
table2dict(snp_region_table, 'snp_region'))
report_dir.mkdir(parents=True, exist_ok=True)
os.system('cp -r {script_dir}/template/* {report_dir}'.format(
script_dir=script_dir,
report_dir=report_dir
))
# plots
report_plot_path = report_dir / 'imgs'
mapping_plot = result_dir / 'plot/alignment/Mapping_stats.png'
display_dictionary.update(
plot2report(mapping_plot, report_plot_path, 'mapping_plot'))
# genome_cov_plot = result_dir / 'plot/alignment/Reads_coverage_genome.png'
# display_dictionary.update(
# plot2report(genome_cov_plot, report_plot_path, 'genome_cov_plot')
# )
exon_cov_plot = result_dir / 'plot/alignment/Reads_coverage_exon.png'
display_dictionary.update(
plot2report(exon_cov_plot, report_plot_path, 'exon_cov_plot')
)
variant_summary_plot = result_dir / \
'plot/variants/Variant_stats_summary.png'
if variant_summary_plot.exists():
display_dictionary.update(
plot2report(variant_summary_plot,
report_plot_path, 'variant_summary')
)
variant_summary_plot_dir = result_dir / 'plot/variants/'
for dir_i in variant_summary_plot_dir.iterdir():
if dir_i.is_dir():
example_sample = dir_i.name
varType_plot = glob.glob(f'{result_dir}/plot/variants/*/*_varType.png')
display_dictionary.update(
plotlist2report(varType_plot, report_plot_path,
'varType_plots'))
varRegion_plot = glob.glob(
f'{result_dir}/plot/variants/*/*_varRegion.png')
display_dictionary.update(
plotlist2report(varRegion_plot, report_plot_path,
'varRegion_plots'))
varImpact_plot = glob.glob(
f'{result_dir}/plot/variants/*/*_varImpact.png')
display_dictionary.update(
plotlist2report(varImpact_plot,
report_plot_path, 'varImpact_plots'))
# varEffects_high_plot = result_dir / \
# f'plot/variants/{example_sample}/{example_sample}_varEffects-HIGH.png'
# display_dictionary.update(
# plot2report(varEffects_high_plot, report_plot_path,
# 'variant_effect_high', 'varEffects-HIGH'))
# varEffects_moderate_plot = result_dir / \
# f'plot/variants/{example_sample}/{example_sample}_varEffects-MODERATE.png'
# display_dictionary.update(
# plot2report(varEffects_moderate_plot,
# report_plot_path,
# 'variant_effect_moderate', 'varEffects-MODERATE'))
# varEffects_low_plot = result_dir / \
# f'plot/variants/{example_sample}/{example_sample}_varEffects-LOW.png'
# display_dictionary.update(
# plot2report(varEffects_low_plot, report_plot_path,
# 'variant_effect_low', 'varEffects-LOW'))
# varEffects_modifier_plot = result_dir / \
# f'plot/variants/{example_sample}/{example_sample}_varEffects-MODIFIER.png'
# display_dictionary.update(
# plot2report(varEffects_modifier_plot,
# report_plot_path,
# 'variant_effect_modifier', 'varEffects-MODIFIER'))
# deltaSNP_plot = result_dir / 'mapping/*deltaSNP.png'
# Gprime_plot = result_dir / 'mapping/*Gprime.png'
# negLog10Pval_plot = result_dir / 'mapping/*negLog10Pval.png'
# plot2report(deltaSNP_plot, report_plot_path, 'deltaSNP')
# plot2report(Gprime_plot, report_plot_path, 'Gprime')
# plot2report(negLog10Pval_plot, report_plot_path, 'negLog10Pval')
# display_dictionary.update({'pca': True, 'snp_index': True})
display_html = template.render(display_dictionary)
report_html = report_dir / 'index.html'
with open(report_html, 'w') as out_inf:
out_inf.write(display_html)
os.system(f'tar -zcf {report_dir}.tar.gz -C {report_dir.parent} {report_dir.name}')
if __name__ == '__main__':
fire.Fire(exom_report)
| [
"[email protected]"
] | |
de693df1430585e4e82e8e60b7a7241ff863692c | 20c979fc8a88dc893692c3d83c9907c928c78074 | /prog9.py | 0b026f3b1e73d7694c124def464e67c57bac49f8 | [] | no_license | ParulProgrammingHub/assignment-1-kheniparth1998 | 57edba326325af3b6dfbc6aea59e701ff5634d6c | 8c277dfb8c4a4cdf25ad7f1851d1247a6a3dc86d | refs/heads/master | 2021-01-19T09:14:42.309237 | 2017-02-15T17:24:07 | 2017-02-15T17:24:07 | 82,086,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | a=input("enter maximum marks for a subject: ")
total_marks=a*5.0
sub1=input("enter marks of subject 1: ")
sub2=input("enter marks of subject 2: ")
sub3=input("enter marks of subject 3: ")
sub4=input("enter marks of subject 4: ")
sub5=input("enter marks of subject 5: ")
obtain_marks=sub1+sub2+sub3+sub4+sub5
avg_marks=obtain_marks/5.0
percent=(obtain_marks*100)/total_marks
print "average is :",avg_marks
print "percentage is :",percent
if percent<35:
print "FAIL"
else:
print "PASS"
| [
"[email protected]"
] | |
7c3a5292dbdf6072cb25a109cfdd13c7983d7548 | 6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386 | /google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/common/types/click_location.py | 89822bb622bd68c49742d2af6245686e64e28b2b | [
"Apache-2.0"
] | permissive | oltoco/googleapis-gen | bf40cfad61b4217aca07068bd4922a86e3bbd2d5 | 00ca50bdde80906d6f62314ef4f7630b8cdb6e15 | refs/heads/master | 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,988 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v7.common',
marshal='google.ads.googleads.v7',
manifest={
'ClickLocation',
},
)
class ClickLocation(proto.Message):
r"""Location criteria associated with a click.
Attributes:
city (str):
The city location criterion associated with
the impression.
country (str):
The country location criterion associated
with the impression.
metro (str):
The metro location criterion associated with
the impression.
most_specific (str):
The most specific location criterion
associated with the impression.
region (str):
The region location criterion associated with
the impression.
"""
city = proto.Field(
proto.STRING,
number=6,
optional=True,
)
country = proto.Field(
proto.STRING,
number=7,
optional=True,
)
metro = proto.Field(
proto.STRING,
number=8,
optional=True,
)
most_specific = proto.Field(
proto.STRING,
number=9,
optional=True,
)
region = proto.Field(
proto.STRING,
number=10,
optional=True,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
6c32637f146447fc95c3941386d8534c7c68f874 | 73d9e70adfbc6043ecdb8de2ea1b2339007ea5e9 | /tests/features/stdin_input_steps.py | 3256a36e7707594433db8cc6b255a844a6491819 | [
"Apache-2.0"
] | permissive | cheesinglee/bigmler | e147df8d98bcc0624b325fccf381577e74e62b1e | cda58f6149e211897c931300083c6b1b3686ff11 | refs/heads/master | 2020-04-06T07:01:11.195760 | 2015-02-12T23:14:31 | 2015-02-12T23:14:31 | 20,578,762 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,242 | py | import os
import time
import csv
import json
from lettuce import step, world
from subprocess import check_call, CalledProcessError
from bigml.api import check_resource
from bigmler.checkpoint import file_number_of_lines
from common_steps import check_debug
from basic_test_prediction_steps import shell_execute
@step(r'I create BigML resources uploading train "(.*)" file to test "(.*)" read from stdin and log predictions in "(.*)"$')
def i_create_all_resources_to_test_from_stdin(step, data=None, test=None, output=None):
if data is None or test is None or output is None:
assert False
command = ("cat " + test + "|bigmler --train " + data +
" --test --store --output " + output + " --max-batch-models 1")
shell_execute(command, output, test=test)
@step(r'I create a BigML source from stdin using train "(.*)" file and logging in "(.*)"$')
def i_create_source_from_stdin(step, data=None, output_dir=None):
if data is None or output_dir is None:
assert False
command = ("cat " + data + "|bigmler --train " +
"--store --no-dataset --no-model --output-dir " +
output_dir + " --max-batch-models 1")
shell_execute(command, output_dir + "/test", test=None)
| [
"[email protected]"
] | |
9e9bad4bc3e7dda2714a737d0825c060b870f001 | 55540f3e86f1d5d86ef6b5d295a63518e274efe3 | /toolchain/riscv/Darwin/python/lib/python3.7/pickle.py | bfa3c0361b73c6ba406beff72276e2433c78dfc2 | [
"Apache-2.0",
"Python-2.0"
] | permissive | bouffalolab/bl_iot_sdk | bc5eaf036b70f8c65dd389439062b169f8d09daa | b90664de0bd4c1897a9f1f5d9e360a9631d38b34 | refs/heads/master | 2023-08-31T03:38:03.369853 | 2023-08-16T08:50:33 | 2023-08-18T09:13:27 | 307,347,250 | 244 | 101 | Apache-2.0 | 2023-08-28T06:29:02 | 2020-10-26T11:16:30 | C | UTF-8 | Python | false | false | 57,994 | py | """Create portable serialized representations of Python objects.
See module copyreg for a mechanism for registering custom picklers.
See module pickletools source for extensive comments.
Classes:
Pickler
Unpickler
Functions:
dump(object, file)
dumps(object) -> string
load(file) -> object
loads(string) -> object
Misc variables:
__version__
format_version
compatible_formats
"""
from types import FunctionType
from copyreg import dispatch_table
from copyreg import _extension_registry, _inverted_registry, _extension_cache
from itertools import islice
from functools import partial
import sys
from sys import maxsize
from struct import pack, unpack
import re
import io
import codecs
import _compat_pickle
__all__ = ["PickleError", "PicklingError", "UnpicklingError", "Pickler",
"Unpickler", "dump", "dumps", "load", "loads"]
# Shortcut for use in isinstance testing
bytes_types = (bytes, bytearray)
# These are purely informational; no code uses these.
format_version = "4.0" # File format version we write
compatible_formats = ["1.0", # Original protocol 0
"1.1", # Protocol 0 with INST added
"1.2", # Original protocol 1
"1.3", # Protocol 1 with BINFLOAT added
"2.0", # Protocol 2
"3.0", # Protocol 3
"4.0", # Protocol 4
] # Old format versions we can read
# This is the highest protocol number we know how to read.
HIGHEST_PROTOCOL = 4
# The protocol we write by default. May be less than HIGHEST_PROTOCOL.
# We intentionally write a protocol that Python 2.x cannot read;
# there are too many issues with that.
DEFAULT_PROTOCOL = 3
class PickleError(Exception):
"""A common base class for the other pickling exceptions."""
pass
class PicklingError(PickleError):
"""This exception is raised when an unpicklable object is passed to the
dump() method.
"""
pass
class UnpicklingError(PickleError):
"""This exception is raised when there is a problem unpickling an object,
such as a security violation.
Note that other exceptions may also be raised during unpickling, including
(but not necessarily limited to) AttributeError, EOFError, ImportError,
and IndexError.
"""
pass
# An instance of _Stop is raised by Unpickler.load_stop() in response to
# the STOP opcode, passing the object that is the result of unpickling.
class _Stop(Exception):
def __init__(self, value):
self.value = value
# Jython has PyStringMap; it's a dict subclass with string keys
try:
from org.python.core import PyStringMap
except ImportError:
PyStringMap = None
# Pickle opcodes. See pickletools.py for extensive docs. The listing
# here is in kind-of alphabetical order of 1-character pickle code.
# pickletools groups them by purpose.
MARK = b'(' # push special markobject on stack
STOP = b'.' # every pickle ends with STOP
POP = b'0' # discard topmost stack item
POP_MARK = b'1' # discard stack top through topmost markobject
DUP = b'2' # duplicate top stack item
FLOAT = b'F' # push float object; decimal string argument
INT = b'I' # push integer or bool; decimal string argument
BININT = b'J' # push four-byte signed int
BININT1 = b'K' # push 1-byte unsigned int
LONG = b'L' # push long; decimal string argument
BININT2 = b'M' # push 2-byte unsigned int
NONE = b'N' # push None
PERSID = b'P' # push persistent object; id is taken from string arg
BINPERSID = b'Q' # " " " ; " " " " stack
REDUCE = b'R' # apply callable to argtuple, both on stack
STRING = b'S' # push string; NL-terminated string argument
BINSTRING = b'T' # push string; counted binary string argument
SHORT_BINSTRING= b'U' # " " ; " " " " < 256 bytes
UNICODE = b'V' # push Unicode string; raw-unicode-escaped'd argument
BINUNICODE = b'X' # " " " ; counted UTF-8 string argument
APPEND = b'a' # append stack top to list below it
BUILD = b'b' # call __setstate__ or __dict__.update()
GLOBAL = b'c' # push self.find_class(modname, name); 2 string args
DICT = b'd' # build a dict from stack items
EMPTY_DICT = b'}' # push empty dict
APPENDS = b'e' # extend list on stack by topmost stack slice
GET = b'g' # push item from memo on stack; index is string arg
BINGET = b'h' # " " " " " " ; " " 1-byte arg
INST = b'i' # build & push class instance
LONG_BINGET = b'j' # push item from memo on stack; index is 4-byte arg
LIST = b'l' # build list from topmost stack items
EMPTY_LIST = b']' # push empty list
OBJ = b'o' # build & push class instance
PUT = b'p' # store stack top in memo; index is string arg
BINPUT = b'q' # " " " " " ; " " 1-byte arg
LONG_BINPUT = b'r' # " " " " " ; " " 4-byte arg
SETITEM = b's' # add key+value pair to dict
TUPLE = b't' # build tuple from topmost stack items
EMPTY_TUPLE = b')' # push empty tuple
SETITEMS = b'u' # modify dict by adding topmost key+value pairs
BINFLOAT = b'G' # push float; arg is 8-byte float encoding
TRUE = b'I01\n' # not an opcode; see INT docs in pickletools.py
FALSE = b'I00\n' # not an opcode; see INT docs in pickletools.py
# Protocol 2
PROTO = b'\x80' # identify pickle protocol
NEWOBJ = b'\x81' # build object by applying cls.__new__ to argtuple
EXT1 = b'\x82' # push object from extension registry; 1-byte index
EXT2 = b'\x83' # ditto, but 2-byte index
EXT4 = b'\x84' # ditto, but 4-byte index
TUPLE1 = b'\x85' # build 1-tuple from stack top
TUPLE2 = b'\x86' # build 2-tuple from two topmost stack items
TUPLE3 = b'\x87' # build 3-tuple from three topmost stack items
NEWTRUE = b'\x88' # push True
NEWFALSE = b'\x89' # push False
LONG1 = b'\x8a' # push long from < 256 bytes
LONG4 = b'\x8b' # push really big long
_tuplesize2code = [EMPTY_TUPLE, TUPLE1, TUPLE2, TUPLE3]
# Protocol 3 (Python 3.x)
BINBYTES = b'B' # push bytes; counted binary string argument
SHORT_BINBYTES = b'C' # " " ; " " " " < 256 bytes
# Protocol 4
SHORT_BINUNICODE = b'\x8c' # push short string; UTF-8 length < 256 bytes
BINUNICODE8 = b'\x8d' # push very long string
BINBYTES8 = b'\x8e' # push very long bytes string
EMPTY_SET = b'\x8f' # push empty set on the stack
ADDITEMS = b'\x90' # modify set by adding topmost stack items
FROZENSET = b'\x91' # build frozenset from topmost stack items
NEWOBJ_EX = b'\x92' # like NEWOBJ but work with keyword only arguments
STACK_GLOBAL = b'\x93' # same as GLOBAL but using names on the stacks
MEMOIZE = b'\x94' # store top of the stack in memo
FRAME = b'\x95' # indicate the beginning of a new frame
__all__.extend([x for x in dir() if re.match("[A-Z][A-Z0-9_]+$", x)])
class _Framer:
_FRAME_SIZE_MIN = 4
_FRAME_SIZE_TARGET = 64 * 1024
def __init__(self, file_write):
self.file_write = file_write
self.current_frame = None
def start_framing(self):
self.current_frame = io.BytesIO()
def end_framing(self):
if self.current_frame and self.current_frame.tell() > 0:
self.commit_frame(force=True)
self.current_frame = None
def commit_frame(self, force=False):
if self.current_frame:
f = self.current_frame
if f.tell() >= self._FRAME_SIZE_TARGET or force:
data = f.getbuffer()
write = self.file_write
if len(data) >= self._FRAME_SIZE_MIN:
# Issue a single call to the write method of the underlying
# file object for the frame opcode with the size of the
# frame. The concatenation is expected to be less expensive
# than issuing an additional call to write.
write(FRAME + pack("<Q", len(data)))
# Issue a separate call to write to append the frame
# contents without concatenation to the above to avoid a
# memory copy.
write(data)
# Start the new frame with a new io.BytesIO instance so that
# the file object can have delayed access to the previous frame
# contents via an unreleased memoryview of the previous
# io.BytesIO instance.
self.current_frame = io.BytesIO()
def write(self, data):
if self.current_frame:
return self.current_frame.write(data)
else:
return self.file_write(data)
def write_large_bytes(self, header, payload):
write = self.file_write
if self.current_frame:
# Terminate the current frame and flush it to the file.
self.commit_frame(force=True)
# Perform direct write of the header and payload of the large binary
# object. Be careful not to concatenate the header and the payload
# prior to calling 'write' as we do not want to allocate a large
# temporary bytes object.
# We intentionally do not insert a protocol 4 frame opcode to make
# it possible to optimize file.read calls in the loader.
write(header)
write(payload)
class _Unframer:
def __init__(self, file_read, file_readline, file_tell=None):
self.file_read = file_read
self.file_readline = file_readline
self.current_frame = None
def read(self, n):
if self.current_frame:
data = self.current_frame.read(n)
if not data and n != 0:
self.current_frame = None
return self.file_read(n)
if len(data) < n:
raise UnpicklingError(
"pickle exhausted before end of frame")
return data
else:
return self.file_read(n)
def readline(self):
if self.current_frame:
data = self.current_frame.readline()
if not data:
self.current_frame = None
return self.file_readline()
if data[-1] != b'\n'[0]:
raise UnpicklingError(
"pickle exhausted before end of frame")
return data
else:
return self.file_readline()
def load_frame(self, frame_size):
if self.current_frame and self.current_frame.read() != b'':
raise UnpicklingError(
"beginning of a new frame before end of current frame")
self.current_frame = io.BytesIO(self.file_read(frame_size))
# Tools used for pickling.
def _getattribute(obj, name):
for subpath in name.split('.'):
if subpath == '<locals>':
raise AttributeError("Can't get local attribute {!r} on {!r}"
.format(name, obj))
try:
parent = obj
obj = getattr(obj, subpath)
except AttributeError:
raise AttributeError("Can't get attribute {!r} on {!r}"
.format(name, obj)) from None
return obj, parent
def whichmodule(obj, name):
"""Find the module an object belong to."""
module_name = getattr(obj, '__module__', None)
if module_name is not None:
return module_name
# Protect the iteration by using a list copy of sys.modules against dynamic
# modules that trigger imports of other modules upon calls to getattr.
for module_name, module in list(sys.modules.items()):
if module_name == '__main__' or module is None:
continue
try:
if _getattribute(module, name)[0] is obj:
return module_name
except AttributeError:
pass
return '__main__'
def encode_long(x):
r"""Encode a long to a two's complement little-endian binary string.
Note that 0 is a special case, returning an empty string, to save a
byte in the LONG1 pickling context.
>>> encode_long(0)
b''
>>> encode_long(255)
b'\xff\x00'
>>> encode_long(32767)
b'\xff\x7f'
>>> encode_long(-256)
b'\x00\xff'
>>> encode_long(-32768)
b'\x00\x80'
>>> encode_long(-128)
b'\x80'
>>> encode_long(127)
b'\x7f'
>>>
"""
if x == 0:
return b''
nbytes = (x.bit_length() >> 3) + 1
result = x.to_bytes(nbytes, byteorder='little', signed=True)
if x < 0 and nbytes > 1:
if result[-1] == 0xff and (result[-2] & 0x80) != 0:
result = result[:-1]
return result
def decode_long(data):
r"""Decode a long from a two's complement little-endian binary string.
>>> decode_long(b'')
0
>>> decode_long(b"\xff\x00")
255
>>> decode_long(b"\xff\x7f")
32767
>>> decode_long(b"\x00\xff")
-256
>>> decode_long(b"\x00\x80")
-32768
>>> decode_long(b"\x80")
-128
>>> decode_long(b"\x7f")
127
"""
return int.from_bytes(data, byteorder='little', signed=True)
# Pickling machinery
class _Pickler:
def __init__(self, file, protocol=None, *, fix_imports=True):
"""This takes a binary file for writing a pickle data stream.
The optional *protocol* argument tells the pickler to use the
given protocol; supported protocols are 0, 1, 2, 3 and 4. The
default protocol is 3; a backward-incompatible protocol designed
for Python 3.
Specifying a negative protocol version selects the highest
protocol version supported. The higher the protocol used, the
more recent the version of Python needed to read the pickle
produced.
The *file* argument must have a write() method that accepts a
single bytes argument. It can thus be a file object opened for
binary writing, an io.BytesIO instance, or any other custom
object that meets this interface.
If *fix_imports* is True and *protocol* is less than 3, pickle
will try to map the new Python 3 names to the old module names
used in Python 2, so that the pickle data stream is readable
with Python 2.
"""
if protocol is None:
protocol = DEFAULT_PROTOCOL
if protocol < 0:
protocol = HIGHEST_PROTOCOL
elif not 0 <= protocol <= HIGHEST_PROTOCOL:
raise ValueError("pickle protocol must be <= %d" % HIGHEST_PROTOCOL)
try:
self._file_write = file.write
except AttributeError:
raise TypeError("file must have a 'write' attribute")
self.framer = _Framer(self._file_write)
self.write = self.framer.write
self._write_large_bytes = self.framer.write_large_bytes
self.memo = {}
self.proto = int(protocol)
self.bin = protocol >= 1
self.fast = 0
self.fix_imports = fix_imports and protocol < 3
def clear_memo(self):
"""Clears the pickler's "memo".
The memo is the data structure that remembers which objects the
pickler has already seen, so that shared or recursive objects
are pickled by reference and not by value. This method is
useful when re-using picklers.
"""
self.memo.clear()
def dump(self, obj):
"""Write a pickled representation of obj to the open file."""
# Check whether Pickler was initialized correctly. This is
# only needed to mimic the behavior of _pickle.Pickler.dump().
if not hasattr(self, "_file_write"):
raise PicklingError("Pickler.__init__() was not called by "
"%s.__init__()" % (self.__class__.__name__,))
if self.proto >= 2:
self.write(PROTO + pack("<B", self.proto))
if self.proto >= 4:
self.framer.start_framing()
self.save(obj)
self.write(STOP)
self.framer.end_framing()
def memoize(self, obj):
"""Store an object in the memo."""
# The Pickler memo is a dictionary mapping object ids to 2-tuples
# that contain the Unpickler memo key and the object being memoized.
# The memo key is written to the pickle and will become
# the key in the Unpickler's memo. The object is stored in the
# Pickler memo so that transient objects are kept alive during
# pickling.
# The use of the Unpickler memo length as the memo key is just a
# convention. The only requirement is that the memo values be unique.
# But there appears no advantage to any other scheme, and this
# scheme allows the Unpickler memo to be implemented as a plain (but
# growable) array, indexed by memo key.
if self.fast:
return
assert id(obj) not in self.memo
idx = len(self.memo)
self.write(self.put(idx))
self.memo[id(obj)] = idx, obj
# Return a PUT (BINPUT, LONG_BINPUT) opcode string, with argument i.
def put(self, idx):
if self.proto >= 4:
return MEMOIZE
elif self.bin:
if idx < 256:
return BINPUT + pack("<B", idx)
else:
return LONG_BINPUT + pack("<I", idx)
else:
return PUT + repr(idx).encode("ascii") + b'\n'
# Return a GET (BINGET, LONG_BINGET) opcode string, with argument i.
def get(self, i):
if self.bin:
if i < 256:
return BINGET + pack("<B", i)
else:
return LONG_BINGET + pack("<I", i)
return GET + repr(i).encode("ascii") + b'\n'
def save(self, obj, save_persistent_id=True):
self.framer.commit_frame()
# Check for persistent id (defined by a subclass)
pid = self.persistent_id(obj)
if pid is not None and save_persistent_id:
self.save_pers(pid)
return
# Check the memo
x = self.memo.get(id(obj))
if x is not None:
self.write(self.get(x[0]))
return
# Check the type dispatch table
t = type(obj)
f = self.dispatch.get(t)
if f is not None:
f(self, obj) # Call unbound method with explicit self
return
# Check private dispatch table if any, or else copyreg.dispatch_table
reduce = getattr(self, 'dispatch_table', dispatch_table).get(t)
if reduce is not None:
rv = reduce(obj)
else:
# Check for a class with a custom metaclass; treat as regular class
try:
issc = issubclass(t, type)
except TypeError: # t is not a class (old Boost; see SF #502085)
issc = False
if issc:
self.save_global(obj)
return
# Check for a __reduce_ex__ method, fall back to __reduce__
reduce = getattr(obj, "__reduce_ex__", None)
if reduce is not None:
rv = reduce(self.proto)
else:
reduce = getattr(obj, "__reduce__", None)
if reduce is not None:
rv = reduce()
else:
raise PicklingError("Can't pickle %r object: %r" %
(t.__name__, obj))
# Check for string returned by reduce(), meaning "save as global"
if isinstance(rv, str):
self.save_global(obj, rv)
return
# Assert that reduce() returned a tuple
if not isinstance(rv, tuple):
raise PicklingError("%s must return string or tuple" % reduce)
# Assert that it returned an appropriately sized tuple
l = len(rv)
if not (2 <= l <= 5):
raise PicklingError("Tuple returned by %s must have "
"two to five elements" % reduce)
# Save the reduce() output and finally memoize the object
self.save_reduce(obj=obj, *rv)
def persistent_id(self, obj):
# This exists so a subclass can override it
return None
def save_pers(self, pid):
# Save a persistent id reference
if self.bin:
self.save(pid, save_persistent_id=False)
self.write(BINPERSID)
else:
try:
self.write(PERSID + str(pid).encode("ascii") + b'\n')
except UnicodeEncodeError:
raise PicklingError(
"persistent IDs in protocol 0 must be ASCII strings")
def save_reduce(self, func, args, state=None, listitems=None,
dictitems=None, obj=None):
# This API is called by some subclasses
if not isinstance(args, tuple):
raise PicklingError("args from save_reduce() must be a tuple")
if not callable(func):
raise PicklingError("func from save_reduce() must be callable")
save = self.save
write = self.write
func_name = getattr(func, "__name__", "")
if self.proto >= 2 and func_name == "__newobj_ex__":
cls, args, kwargs = args
if not hasattr(cls, "__new__"):
raise PicklingError("args[0] from {} args has no __new__"
.format(func_name))
if obj is not None and cls is not obj.__class__:
raise PicklingError("args[0] from {} args has the wrong class"
.format(func_name))
if self.proto >= 4:
save(cls)
save(args)
save(kwargs)
write(NEWOBJ_EX)
else:
func = partial(cls.__new__, cls, *args, **kwargs)
save(func)
save(())
write(REDUCE)
elif self.proto >= 2 and func_name == "__newobj__":
# A __reduce__ implementation can direct protocol 2 or newer to
# use the more efficient NEWOBJ opcode, while still
# allowing protocol 0 and 1 to work normally. For this to
# work, the function returned by __reduce__ should be
# called __newobj__, and its first argument should be a
# class. The implementation for __newobj__
# should be as follows, although pickle has no way to
# verify this:
#
# def __newobj__(cls, *args):
# return cls.__new__(cls, *args)
#
# Protocols 0 and 1 will pickle a reference to __newobj__,
# while protocol 2 (and above) will pickle a reference to
# cls, the remaining args tuple, and the NEWOBJ code,
# which calls cls.__new__(cls, *args) at unpickling time
# (see load_newobj below). If __reduce__ returns a
# three-tuple, the state from the third tuple item will be
# pickled regardless of the protocol, calling __setstate__
# at unpickling time (see load_build below).
#
# Note that no standard __newobj__ implementation exists;
# you have to provide your own. This is to enforce
# compatibility with Python 2.2 (pickles written using
# protocol 0 or 1 in Python 2.3 should be unpicklable by
# Python 2.2).
cls = args[0]
if not hasattr(cls, "__new__"):
raise PicklingError(
"args[0] from __newobj__ args has no __new__")
if obj is not None and cls is not obj.__class__:
raise PicklingError(
"args[0] from __newobj__ args has the wrong class")
args = args[1:]
save(cls)
save(args)
write(NEWOBJ)
else:
save(func)
save(args)
write(REDUCE)
if obj is not None:
# If the object is already in the memo, this means it is
# recursive. In this case, throw away everything we put on the
# stack, and fetch the object back from the memo.
if id(obj) in self.memo:
write(POP + self.get(self.memo[id(obj)][0]))
else:
self.memoize(obj)
# More new special cases (that work with older protocols as
# well): when __reduce__ returns a tuple with 4 or 5 items,
# the 4th and 5th item should be iterators that provide list
# items and dict items (as (key, value) tuples), or None.
if listitems is not None:
self._batch_appends(listitems)
if dictitems is not None:
self._batch_setitems(dictitems)
if state is not None:
save(state)
write(BUILD)
# Methods below this point are dispatched through the dispatch table
dispatch = {}
def save_none(self, obj):
self.write(NONE)
dispatch[type(None)] = save_none
def save_bool(self, obj):
if self.proto >= 2:
self.write(NEWTRUE if obj else NEWFALSE)
else:
self.write(TRUE if obj else FALSE)
dispatch[bool] = save_bool
def save_long(self, obj):
if self.bin:
# If the int is small enough to fit in a signed 4-byte 2's-comp
# format, we can store it more efficiently than the general
# case.
# First one- and two-byte unsigned ints:
if obj >= 0:
if obj <= 0xff:
self.write(BININT1 + pack("<B", obj))
return
if obj <= 0xffff:
self.write(BININT2 + pack("<H", obj))
return
# Next check for 4-byte signed ints:
if -0x80000000 <= obj <= 0x7fffffff:
self.write(BININT + pack("<i", obj))
return
if self.proto >= 2:
encoded = encode_long(obj)
n = len(encoded)
if n < 256:
self.write(LONG1 + pack("<B", n) + encoded)
else:
self.write(LONG4 + pack("<i", n) + encoded)
return
if -0x80000000 <= obj <= 0x7fffffff:
self.write(INT + repr(obj).encode("ascii") + b'\n')
else:
self.write(LONG + repr(obj).encode("ascii") + b'L\n')
dispatch[int] = save_long
def save_float(self, obj):
if self.bin:
self.write(BINFLOAT + pack('>d', obj))
else:
self.write(FLOAT + repr(obj).encode("ascii") + b'\n')
dispatch[float] = save_float
def save_bytes(self, obj):
if self.proto < 3:
if not obj: # bytes object is empty
self.save_reduce(bytes, (), obj=obj)
else:
self.save_reduce(codecs.encode,
(str(obj, 'latin1'), 'latin1'), obj=obj)
return
n = len(obj)
if n <= 0xff:
self.write(SHORT_BINBYTES + pack("<B", n) + obj)
elif n > 0xffffffff and self.proto >= 4:
self._write_large_bytes(BINBYTES8 + pack("<Q", n), obj)
elif n >= self.framer._FRAME_SIZE_TARGET:
self._write_large_bytes(BINBYTES + pack("<I", n), obj)
else:
self.write(BINBYTES + pack("<I", n) + obj)
self.memoize(obj)
dispatch[bytes] = save_bytes
def save_str(self, obj):
if self.bin:
encoded = obj.encode('utf-8', 'surrogatepass')
n = len(encoded)
if n <= 0xff and self.proto >= 4:
self.write(SHORT_BINUNICODE + pack("<B", n) + encoded)
elif n > 0xffffffff and self.proto >= 4:
self._write_large_bytes(BINUNICODE8 + pack("<Q", n), encoded)
elif n >= self.framer._FRAME_SIZE_TARGET:
self._write_large_bytes(BINUNICODE + pack("<I", n), encoded)
else:
self.write(BINUNICODE + pack("<I", n) + encoded)
else:
obj = obj.replace("\\", "\\u005c")
obj = obj.replace("\0", "\\u0000")
obj = obj.replace("\n", "\\u000a")
obj = obj.replace("\r", "\\u000d")
obj = obj.replace("\x1a", "\\u001a") # EOF on DOS
self.write(UNICODE + obj.encode('raw-unicode-escape') +
b'\n')
self.memoize(obj)
dispatch[str] = save_str
def save_tuple(self, obj):
if not obj: # tuple is empty
if self.bin:
self.write(EMPTY_TUPLE)
else:
self.write(MARK + TUPLE)
return
n = len(obj)
save = self.save
memo = self.memo
if n <= 3 and self.proto >= 2:
for element in obj:
save(element)
# Subtle. Same as in the big comment below.
if id(obj) in memo:
get = self.get(memo[id(obj)][0])
self.write(POP * n + get)
else:
self.write(_tuplesize2code[n])
self.memoize(obj)
return
# proto 0 or proto 1 and tuple isn't empty, or proto > 1 and tuple
# has more than 3 elements.
write = self.write
write(MARK)
for element in obj:
save(element)
if id(obj) in memo:
# Subtle. d was not in memo when we entered save_tuple(), so
# the process of saving the tuple's elements must have saved
# the tuple itself: the tuple is recursive. The proper action
# now is to throw away everything we put on the stack, and
# simply GET the tuple (it's already constructed). This check
# could have been done in the "for element" loop instead, but
# recursive tuples are a rare thing.
get = self.get(memo[id(obj)][0])
if self.bin:
write(POP_MARK + get)
else: # proto 0 -- POP_MARK not available
write(POP * (n+1) + get)
return
# No recursion.
write(TUPLE)
self.memoize(obj)
dispatch[tuple] = save_tuple
def save_list(self, obj):
if self.bin:
self.write(EMPTY_LIST)
else: # proto 0 -- can't use EMPTY_LIST
self.write(MARK + LIST)
self.memoize(obj)
self._batch_appends(obj)
dispatch[list] = save_list
_BATCHSIZE = 1000
def _batch_appends(self, items):
# Helper to batch up APPENDS sequences
save = self.save
write = self.write
if not self.bin:
for x in items:
save(x)
write(APPEND)
return
it = iter(items)
while True:
tmp = list(islice(it, self._BATCHSIZE))
n = len(tmp)
if n > 1:
write(MARK)
for x in tmp:
save(x)
write(APPENDS)
elif n:
save(tmp[0])
write(APPEND)
# else tmp is empty, and we're done
if n < self._BATCHSIZE:
return
def save_dict(self, obj):
if self.bin:
self.write(EMPTY_DICT)
else: # proto 0 -- can't use EMPTY_DICT
self.write(MARK + DICT)
self.memoize(obj)
self._batch_setitems(obj.items())
dispatch[dict] = save_dict
if PyStringMap is not None:
dispatch[PyStringMap] = save_dict
def _batch_setitems(self, items):
# Helper to batch up SETITEMS sequences; proto >= 1 only
save = self.save
write = self.write
if not self.bin:
for k, v in items:
save(k)
save(v)
write(SETITEM)
return
it = iter(items)
while True:
tmp = list(islice(it, self._BATCHSIZE))
n = len(tmp)
if n > 1:
write(MARK)
for k, v in tmp:
save(k)
save(v)
write(SETITEMS)
elif n:
k, v = tmp[0]
save(k)
save(v)
write(SETITEM)
# else tmp is empty, and we're done
if n < self._BATCHSIZE:
return
def save_set(self, obj):
save = self.save
write = self.write
if self.proto < 4:
self.save_reduce(set, (list(obj),), obj=obj)
return
write(EMPTY_SET)
self.memoize(obj)
it = iter(obj)
while True:
batch = list(islice(it, self._BATCHSIZE))
n = len(batch)
if n > 0:
write(MARK)
for item in batch:
save(item)
write(ADDITEMS)
if n < self._BATCHSIZE:
return
dispatch[set] = save_set
def save_frozenset(self, obj):
save = self.save
write = self.write
if self.proto < 4:
self.save_reduce(frozenset, (list(obj),), obj=obj)
return
write(MARK)
for item in obj:
save(item)
if id(obj) in self.memo:
# If the object is already in the memo, this means it is
# recursive. In this case, throw away everything we put on the
# stack, and fetch the object back from the memo.
write(POP_MARK + self.get(self.memo[id(obj)][0]))
return
write(FROZENSET)
self.memoize(obj)
dispatch[frozenset] = save_frozenset
def save_global(self, obj, name=None):
write = self.write
memo = self.memo
if name is None:
name = getattr(obj, '__qualname__', None)
if name is None:
name = obj.__name__
module_name = whichmodule(obj, name)
try:
__import__(module_name, level=0)
module = sys.modules[module_name]
obj2, parent = _getattribute(module, name)
except (ImportError, KeyError, AttributeError):
raise PicklingError(
"Can't pickle %r: it's not found as %s.%s" %
(obj, module_name, name)) from None
else:
if obj2 is not obj:
raise PicklingError(
"Can't pickle %r: it's not the same object as %s.%s" %
(obj, module_name, name))
if self.proto >= 2:
code = _extension_registry.get((module_name, name))
if code:
assert code > 0
if code <= 0xff:
write(EXT1 + pack("<B", code))
elif code <= 0xffff:
write(EXT2 + pack("<H", code))
else:
write(EXT4 + pack("<i", code))
return
lastname = name.rpartition('.')[2]
if parent is module:
name = lastname
# Non-ASCII identifiers are supported only with protocols >= 3.
if self.proto >= 4:
self.save(module_name)
self.save(name)
write(STACK_GLOBAL)
elif parent is not module:
self.save_reduce(getattr, (parent, lastname))
elif self.proto >= 3:
write(GLOBAL + bytes(module_name, "utf-8") + b'\n' +
bytes(name, "utf-8") + b'\n')
else:
if self.fix_imports:
r_name_mapping = _compat_pickle.REVERSE_NAME_MAPPING
r_import_mapping = _compat_pickle.REVERSE_IMPORT_MAPPING
if (module_name, name) in r_name_mapping:
module_name, name = r_name_mapping[(module_name, name)]
elif module_name in r_import_mapping:
module_name = r_import_mapping[module_name]
try:
write(GLOBAL + bytes(module_name, "ascii") + b'\n' +
bytes(name, "ascii") + b'\n')
except UnicodeEncodeError:
raise PicklingError(
"can't pickle global identifier '%s.%s' using "
"pickle protocol %i" % (module, name, self.proto)) from None
self.memoize(obj)
def save_type(self, obj):
if obj is type(None):
return self.save_reduce(type, (None,), obj=obj)
elif obj is type(NotImplemented):
return self.save_reduce(type, (NotImplemented,), obj=obj)
elif obj is type(...):
return self.save_reduce(type, (...,), obj=obj)
return self.save_global(obj)
dispatch[FunctionType] = save_global
dispatch[type] = save_type
# Unpickling machinery
class _Unpickler:
def __init__(self, file, *, fix_imports=True,
encoding="ASCII", errors="strict"):
"""This takes a binary file for reading a pickle data stream.
The protocol version of the pickle is detected automatically, so
no proto argument is needed.
The argument *file* must have two methods, a read() method that
takes an integer argument, and a readline() method that requires
no arguments. Both methods should return bytes. Thus *file*
can be a binary file object opened for reading, an io.BytesIO
object, or any other custom object that meets this interface.
The file-like object must have two methods, a read() method
that takes an integer argument, and a readline() method that
requires no arguments. Both methods should return bytes.
Thus file-like object can be a binary file object opened for
reading, a BytesIO object, or any other custom object that
meets this interface.
Optional keyword arguments are *fix_imports*, *encoding* and
*errors*, which are used to control compatibility support for
pickle stream generated by Python 2. If *fix_imports* is True,
pickle will try to map the old Python 2 names to the new names
used in Python 3. The *encoding* and *errors* tell pickle how
to decode 8-bit string instances pickled by Python 2; these
default to 'ASCII' and 'strict', respectively. *encoding* can be
'bytes' to read theses 8-bit string instances as bytes objects.
"""
self._file_readline = file.readline
self._file_read = file.read
self.memo = {}
self.encoding = encoding
self.errors = errors
self.proto = 0
self.fix_imports = fix_imports
def load(self):
"""Read a pickled object representation from the open file.
Return the reconstituted object hierarchy specified in the file.
"""
# Check whether Unpickler was initialized correctly. This is
# only needed to mimic the behavior of _pickle.Unpickler.dump().
if not hasattr(self, "_file_read"):
raise UnpicklingError("Unpickler.__init__() was not called by "
"%s.__init__()" % (self.__class__.__name__,))
self._unframer = _Unframer(self._file_read, self._file_readline)
self.read = self._unframer.read
self.readline = self._unframer.readline
self.metastack = []
self.stack = []
self.append = self.stack.append
self.proto = 0
read = self.read
dispatch = self.dispatch
try:
while True:
key = read(1)
if not key:
raise EOFError
assert isinstance(key, bytes_types)
dispatch[key[0]](self)
except _Stop as stopinst:
return stopinst.value
# Return a list of items pushed in the stack after last MARK instruction.
def pop_mark(self):
items = self.stack
self.stack = self.metastack.pop()
self.append = self.stack.append
return items
def persistent_load(self, pid):
raise UnpicklingError("unsupported persistent id encountered")
dispatch = {}
def load_proto(self):
proto = self.read(1)[0]
if not 0 <= proto <= HIGHEST_PROTOCOL:
raise ValueError("unsupported pickle protocol: %d" % proto)
self.proto = proto
dispatch[PROTO[0]] = load_proto
def load_frame(self):
frame_size, = unpack('<Q', self.read(8))
if frame_size > sys.maxsize:
raise ValueError("frame size > sys.maxsize: %d" % frame_size)
self._unframer.load_frame(frame_size)
dispatch[FRAME[0]] = load_frame
def load_persid(self):
try:
pid = self.readline()[:-1].decode("ascii")
except UnicodeDecodeError:
raise UnpicklingError(
"persistent IDs in protocol 0 must be ASCII strings")
self.append(self.persistent_load(pid))
dispatch[PERSID[0]] = load_persid
def load_binpersid(self):
pid = self.stack.pop()
self.append(self.persistent_load(pid))
dispatch[BINPERSID[0]] = load_binpersid
def load_none(self):
self.append(None)
dispatch[NONE[0]] = load_none
def load_false(self):
self.append(False)
dispatch[NEWFALSE[0]] = load_false
def load_true(self):
self.append(True)
dispatch[NEWTRUE[0]] = load_true
def load_int(self):
data = self.readline()
if data == FALSE[1:]:
val = False
elif data == TRUE[1:]:
val = True
else:
val = int(data, 0)
self.append(val)
dispatch[INT[0]] = load_int
def load_binint(self):
self.append(unpack('<i', self.read(4))[0])
dispatch[BININT[0]] = load_binint
def load_binint1(self):
self.append(self.read(1)[0])
dispatch[BININT1[0]] = load_binint1
def load_binint2(self):
self.append(unpack('<H', self.read(2))[0])
dispatch[BININT2[0]] = load_binint2
def load_long(self):
val = self.readline()[:-1]
if val and val[-1] == b'L'[0]:
val = val[:-1]
self.append(int(val, 0))
dispatch[LONG[0]] = load_long
def load_long1(self):
n = self.read(1)[0]
data = self.read(n)
self.append(decode_long(data))
dispatch[LONG1[0]] = load_long1
def load_long4(self):
n, = unpack('<i', self.read(4))
if n < 0:
# Corrupt or hostile pickle -- we never write one like this
raise UnpicklingError("LONG pickle has negative byte count")
data = self.read(n)
self.append(decode_long(data))
dispatch[LONG4[0]] = load_long4
def load_float(self):
self.append(float(self.readline()[:-1]))
dispatch[FLOAT[0]] = load_float
def load_binfloat(self):
self.append(unpack('>d', self.read(8))[0])
dispatch[BINFLOAT[0]] = load_binfloat
def _decode_string(self, value):
# Used to allow strings from Python 2 to be decoded either as
# bytes or Unicode strings. This should be used only with the
# STRING, BINSTRING and SHORT_BINSTRING opcodes.
if self.encoding == "bytes":
return value
else:
return value.decode(self.encoding, self.errors)
def load_string(self):
data = self.readline()[:-1]
# Strip outermost quotes
if len(data) >= 2 and data[0] == data[-1] and data[0] in b'"\'':
data = data[1:-1]
else:
raise UnpicklingError("the STRING opcode argument must be quoted")
self.append(self._decode_string(codecs.escape_decode(data)[0]))
dispatch[STRING[0]] = load_string
def load_binstring(self):
# Deprecated BINSTRING uses signed 32-bit length
len, = unpack('<i', self.read(4))
if len < 0:
raise UnpicklingError("BINSTRING pickle has negative byte count")
data = self.read(len)
self.append(self._decode_string(data))
dispatch[BINSTRING[0]] = load_binstring
def load_binbytes(self):
len, = unpack('<I', self.read(4))
if len > maxsize:
raise UnpicklingError("BINBYTES exceeds system's maximum size "
"of %d bytes" % maxsize)
self.append(self.read(len))
dispatch[BINBYTES[0]] = load_binbytes
def load_unicode(self):
self.append(str(self.readline()[:-1], 'raw-unicode-escape'))
dispatch[UNICODE[0]] = load_unicode
def load_binunicode(self):
len, = unpack('<I', self.read(4))
if len > maxsize:
raise UnpicklingError("BINUNICODE exceeds system's maximum size "
"of %d bytes" % maxsize)
self.append(str(self.read(len), 'utf-8', 'surrogatepass'))
dispatch[BINUNICODE[0]] = load_binunicode
def load_binunicode8(self):
len, = unpack('<Q', self.read(8))
if len > maxsize:
raise UnpicklingError("BINUNICODE8 exceeds system's maximum size "
"of %d bytes" % maxsize)
self.append(str(self.read(len), 'utf-8', 'surrogatepass'))
dispatch[BINUNICODE8[0]] = load_binunicode8
def load_binbytes8(self):
len, = unpack('<Q', self.read(8))
if len > maxsize:
raise UnpicklingError("BINBYTES8 exceeds system's maximum size "
"of %d bytes" % maxsize)
self.append(self.read(len))
dispatch[BINBYTES8[0]] = load_binbytes8
def load_short_binstring(self):
len = self.read(1)[0]
data = self.read(len)
self.append(self._decode_string(data))
dispatch[SHORT_BINSTRING[0]] = load_short_binstring
def load_short_binbytes(self):
len = self.read(1)[0]
self.append(self.read(len))
dispatch[SHORT_BINBYTES[0]] = load_short_binbytes
def load_short_binunicode(self):
len = self.read(1)[0]
self.append(str(self.read(len), 'utf-8', 'surrogatepass'))
dispatch[SHORT_BINUNICODE[0]] = load_short_binunicode
def load_tuple(self):
items = self.pop_mark()
self.append(tuple(items))
dispatch[TUPLE[0]] = load_tuple
def load_empty_tuple(self):
self.append(())
dispatch[EMPTY_TUPLE[0]] = load_empty_tuple
def load_tuple1(self):
self.stack[-1] = (self.stack[-1],)
dispatch[TUPLE1[0]] = load_tuple1
def load_tuple2(self):
self.stack[-2:] = [(self.stack[-2], self.stack[-1])]
dispatch[TUPLE2[0]] = load_tuple2
def load_tuple3(self):
self.stack[-3:] = [(self.stack[-3], self.stack[-2], self.stack[-1])]
dispatch[TUPLE3[0]] = load_tuple3
def load_empty_list(self):
self.append([])
dispatch[EMPTY_LIST[0]] = load_empty_list
def load_empty_dictionary(self):
self.append({})
dispatch[EMPTY_DICT[0]] = load_empty_dictionary
def load_empty_set(self):
self.append(set())
dispatch[EMPTY_SET[0]] = load_empty_set
def load_frozenset(self):
items = self.pop_mark()
self.append(frozenset(items))
dispatch[FROZENSET[0]] = load_frozenset
def load_list(self):
items = self.pop_mark()
self.append(items)
dispatch[LIST[0]] = load_list
def load_dict(self):
items = self.pop_mark()
d = {items[i]: items[i+1]
for i in range(0, len(items), 2)}
self.append(d)
dispatch[DICT[0]] = load_dict
# INST and OBJ differ only in how they get a class object. It's not
# only sensible to do the rest in a common routine, the two routines
# previously diverged and grew different bugs.
# klass is the class to instantiate, and k points to the topmost mark
# object, following which are the arguments for klass.__init__.
def _instantiate(self, klass, args):
if (args or not isinstance(klass, type) or
hasattr(klass, "__getinitargs__")):
try:
value = klass(*args)
except TypeError as err:
raise TypeError("in constructor for %s: %s" %
(klass.__name__, str(err)), sys.exc_info()[2])
else:
value = klass.__new__(klass)
self.append(value)
def load_inst(self):
module = self.readline()[:-1].decode("ascii")
name = self.readline()[:-1].decode("ascii")
klass = self.find_class(module, name)
self._instantiate(klass, self.pop_mark())
dispatch[INST[0]] = load_inst
def load_obj(self):
# Stack is ... markobject classobject arg1 arg2 ...
args = self.pop_mark()
cls = args.pop(0)
self._instantiate(cls, args)
dispatch[OBJ[0]] = load_obj
def load_newobj(self):
args = self.stack.pop()
cls = self.stack.pop()
obj = cls.__new__(cls, *args)
self.append(obj)
dispatch[NEWOBJ[0]] = load_newobj
def load_newobj_ex(self):
kwargs = self.stack.pop()
args = self.stack.pop()
cls = self.stack.pop()
obj = cls.__new__(cls, *args, **kwargs)
self.append(obj)
dispatch[NEWOBJ_EX[0]] = load_newobj_ex
def load_global(self):
module = self.readline()[:-1].decode("utf-8")
name = self.readline()[:-1].decode("utf-8")
klass = self.find_class(module, name)
self.append(klass)
dispatch[GLOBAL[0]] = load_global
def load_stack_global(self):
name = self.stack.pop()
module = self.stack.pop()
if type(name) is not str or type(module) is not str:
raise UnpicklingError("STACK_GLOBAL requires str")
self.append(self.find_class(module, name))
dispatch[STACK_GLOBAL[0]] = load_stack_global
def load_ext1(self):
code = self.read(1)[0]
self.get_extension(code)
dispatch[EXT1[0]] = load_ext1
def load_ext2(self):
code, = unpack('<H', self.read(2))
self.get_extension(code)
dispatch[EXT2[0]] = load_ext2
def load_ext4(self):
code, = unpack('<i', self.read(4))
self.get_extension(code)
dispatch[EXT4[0]] = load_ext4
def get_extension(self, code):
nil = []
obj = _extension_cache.get(code, nil)
if obj is not nil:
self.append(obj)
return
key = _inverted_registry.get(code)
if not key:
if code <= 0: # note that 0 is forbidden
# Corrupt or hostile pickle.
raise UnpicklingError("EXT specifies code <= 0")
raise ValueError("unregistered extension code %d" % code)
obj = self.find_class(*key)
_extension_cache[code] = obj
self.append(obj)
def find_class(self, module, name):
# Subclasses may override this.
if self.proto < 3 and self.fix_imports:
if (module, name) in _compat_pickle.NAME_MAPPING:
module, name = _compat_pickle.NAME_MAPPING[(module, name)]
elif module in _compat_pickle.IMPORT_MAPPING:
module = _compat_pickle.IMPORT_MAPPING[module]
__import__(module, level=0)
if self.proto >= 4:
return _getattribute(sys.modules[module], name)[0]
else:
return getattr(sys.modules[module], name)
def load_reduce(self):
stack = self.stack
args = stack.pop()
func = stack[-1]
stack[-1] = func(*args)
dispatch[REDUCE[0]] = load_reduce
def load_pop(self):
if self.stack:
del self.stack[-1]
else:
self.pop_mark()
dispatch[POP[0]] = load_pop
def load_pop_mark(self):
self.pop_mark()
dispatch[POP_MARK[0]] = load_pop_mark
def load_dup(self):
self.append(self.stack[-1])
dispatch[DUP[0]] = load_dup
def load_get(self):
i = int(self.readline()[:-1])
self.append(self.memo[i])
dispatch[GET[0]] = load_get
def load_binget(self):
i = self.read(1)[0]
self.append(self.memo[i])
dispatch[BINGET[0]] = load_binget
def load_long_binget(self):
i, = unpack('<I', self.read(4))
self.append(self.memo[i])
dispatch[LONG_BINGET[0]] = load_long_binget
def load_put(self):
i = int(self.readline()[:-1])
if i < 0:
raise ValueError("negative PUT argument")
self.memo[i] = self.stack[-1]
dispatch[PUT[0]] = load_put
def load_binput(self):
i = self.read(1)[0]
if i < 0:
raise ValueError("negative BINPUT argument")
self.memo[i] = self.stack[-1]
dispatch[BINPUT[0]] = load_binput
def load_long_binput(self):
i, = unpack('<I', self.read(4))
if i > maxsize:
raise ValueError("negative LONG_BINPUT argument")
self.memo[i] = self.stack[-1]
dispatch[LONG_BINPUT[0]] = load_long_binput
def load_memoize(self):
memo = self.memo
memo[len(memo)] = self.stack[-1]
dispatch[MEMOIZE[0]] = load_memoize
def load_append(self):
stack = self.stack
value = stack.pop()
list = stack[-1]
list.append(value)
dispatch[APPEND[0]] = load_append
def load_appends(self):
items = self.pop_mark()
list_obj = self.stack[-1]
try:
extend = list_obj.extend
except AttributeError:
pass
else:
extend(items)
return
# Even if the PEP 307 requires extend() and append() methods,
# fall back on append() if the object has no extend() method
# for backward compatibility.
append = list_obj.append
for item in items:
append(item)
dispatch[APPENDS[0]] = load_appends
def load_setitem(self):
stack = self.stack
value = stack.pop()
key = stack.pop()
dict = stack[-1]
dict[key] = value
dispatch[SETITEM[0]] = load_setitem
def load_setitems(self):
items = self.pop_mark()
dict = self.stack[-1]
for i in range(0, len(items), 2):
dict[items[i]] = items[i + 1]
dispatch[SETITEMS[0]] = load_setitems
def load_additems(self):
items = self.pop_mark()
set_obj = self.stack[-1]
if isinstance(set_obj, set):
set_obj.update(items)
else:
add = set_obj.add
for item in items:
add(item)
dispatch[ADDITEMS[0]] = load_additems
def load_build(self):
stack = self.stack
state = stack.pop()
inst = stack[-1]
setstate = getattr(inst, "__setstate__", None)
if setstate is not None:
setstate(state)
return
slotstate = None
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
if state:
inst_dict = inst.__dict__
intern = sys.intern
for k, v in state.items():
if type(k) is str:
inst_dict[intern(k)] = v
else:
inst_dict[k] = v
if slotstate:
for k, v in slotstate.items():
setattr(inst, k, v)
dispatch[BUILD[0]] = load_build
def load_mark(self):
self.metastack.append(self.stack)
self.stack = []
self.append = self.stack.append
dispatch[MARK[0]] = load_mark
def load_stop(self):
value = self.stack.pop()
raise _Stop(value)
dispatch[STOP[0]] = load_stop
# Shorthands
def _dump(obj, file, protocol=None, *, fix_imports=True):
_Pickler(file, protocol, fix_imports=fix_imports).dump(obj)
def _dumps(obj, protocol=None, *, fix_imports=True):
f = io.BytesIO()
_Pickler(f, protocol, fix_imports=fix_imports).dump(obj)
res = f.getvalue()
assert isinstance(res, bytes_types)
return res
def _load(file, *, fix_imports=True, encoding="ASCII", errors="strict"):
return _Unpickler(file, fix_imports=fix_imports,
encoding=encoding, errors=errors).load()
def _loads(s, *, fix_imports=True, encoding="ASCII", errors="strict"):
if isinstance(s, str):
raise TypeError("Can't load pickle from unicode string")
file = io.BytesIO(s)
return _Unpickler(file, fix_imports=fix_imports,
encoding=encoding, errors=errors).load()
# Use the faster _pickle if possible
try:
from _pickle import (
PickleError,
PicklingError,
UnpicklingError,
Pickler,
Unpickler,
dump,
dumps,
load,
loads
)
except ImportError:
Pickler, Unpickler = _Pickler, _Unpickler
dump, dumps, load, loads = _dump, _dumps, _load, _loads
# Doctest
def _test():
import doctest
return doctest.testmod()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description='display contents of the pickle files')
parser.add_argument(
'pickle_file', type=argparse.FileType('br'),
nargs='*', help='the pickle file')
parser.add_argument(
'-t', '--test', action='store_true',
help='run self-test suite')
parser.add_argument(
'-v', action='store_true',
help='run verbosely; only affects self-test run')
args = parser.parse_args()
if args.test:
_test()
else:
if not args.pickle_file:
parser.print_help()
else:
import pprint
for f in args.pickle_file:
obj = load(f)
pprint.pprint(obj)
| [
"[email protected]"
] | |
28d39c6dea81506b8dd1e1f53230c70c25166d80 | 6536e42c9a336c80d370d7f07cc4260e4055f683 | /wsgitest.py | f50c5fd0888492f0b48b4b2933233864a3b2cb8a | [
"BSD-2-Clause"
] | permissive | jonashaag/WSGITest | 4ca01144b6217b4769020c0597d075dd03d4549a | fb6f6981f8cc8192b2207a803c078a03bab31a84 | refs/heads/master | 2020-05-20T09:41:52.598794 | 2011-01-17T18:09:21 | 2011-01-17T18:09:21 | 818,200 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | import optparse
parser = optparse.OptionParser()
parser.add_option('-d', dest='default_tests',
action='store_true', default=None)
if __name__ == '__main__':
from wsgitest.run import run_tests
options, files = parser.parse_args()
if not files:
if options.default_tests is None:
options.default_tests = True
if options.default_tests:
from wsgitest import DEFAULT_TESTS_DIR
files.append(DEFAULT_TESTS_DIR)
result = run_tests(files)
print result.summary()
| [
"[email protected]"
] | |
e2ff3e605f5d643adb4a22ce53b2aa918cf781f4 | e5c9fc4dc73536e75cf4ab119bbc642c28d44591 | /src/leetcodepython/array/day_week_1185.py | d66cac9a636edc8be0acebb4fb26b98b46b0000b | [
"MIT"
] | permissive | zhangyu345293721/leetcode | 0a22034ac313e3c09e8defd2d351257ec9f285d0 | 50f35eef6a0ad63173efed10df3c835b1dceaa3f | refs/heads/master | 2023-09-01T06:03:18.231266 | 2023-08-31T15:23:03 | 2023-08-31T15:23:03 | 163,050,773 | 101 | 29 | null | 2020-12-09T06:26:35 | 2018-12-25T05:58:16 | Java | UTF-8 | Python | false | false | 1,803 | py | # -*- coding:utf-8 -*-
'''
/**
* This is the solution of No. 1185 problem in the LeetCode,
* the website of the problem is as follow:
* https://leetcode-cn.com/problems/day-of-the-week
*
* The description of problem is as follow:
* ==========================================================================================================
* 给你一个日期,请你设计一个算法来判断它是对应一周中的哪一天。
*
* 输入为三个整数:day、month 和 year,分别表示日、月、年。
*
* 您返回的结果必须是这几个值中的一个 {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}。
* 示例 1:
*
* 输入:day = 31, month = 8, year = 2019
* 输出:"Saturday"
* 示例 2:
*
* 输入:day = 18, month = 7, year = 1999
* 输出:"Sunday"
* 示例 3:
*
* 输入:day = 15, month = 8, year = 1993
* 输出:"Sunday"
*
* 来源:力扣(LeetCode)
* ==========================================================================================================
*
* @author zhangyu ([email protected])
*/
'''
import datetime
class Solution:
def day_of_the_week(self, day: int, month: int, year: int) -> str:
'''
判断一天是星期几?
Args:
day:天
month:月
year:年
Returns:
年字符串
'''
lis = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
dic = dict(enumerate(lis))
w = datetime.date(year, month, day)
return dic[w.weekday()]
if __name__ == '__main__':
y, m, d = 2020, 2, 8
solution = Solution()
day_of_week = solution.day_of_the_week(d, m, y)
print(day_of_week)
assert day_of_week=='Saturday'
| [
"[email protected]"
] | |
fdaa58c6c8d679b9e5214b8455713c37be838bfc | fa097257d8ec4167db24b17076a38e60dbbb0b36 | /Code/27. Quadratic primes.py | fe1e7e6ad3f76dd53febf405107f55c1dfee5f04 | [] | no_license | SergeyShk/Project-Euler | 5e0d5bb3f03e2baaa25bd895f53603026fb147c7 | 6f3019ca88a545bf85e714526aa6ca661f89e4a9 | refs/heads/master | 2021-08-16T03:04:28.000466 | 2020-04-15T20:13:29 | 2020-04-15T20:13:29 | 159,189,991 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,448 | py | '''
Euler discovered the remarkable quadratic formula:
n^2+n+41
It turns out that the formula will produce 40 primes for the consecutive integer values 0≤n≤39. However, when n=40,40^2+40+41=40(40+1)+41 is divisible by 41, and certainly when n=41,41^2+41+41 is clearly divisible by 41.
The incredible formula n^2−79n+1601 was discovered, which produces 80 primes for the consecutive values 0≤n≤79. The product of the coefficients, −79 and 1601, is −126479.
Considering quadratics of the form:
n^2+an+b, where |a|<1000 and |b|≤1000
where |n| is the modulus/absolute value of n
e.g. |11|=11 and |−4|=4
Find the product of the coefficients, a and b, for the quadratic expression that produces the maximum number of primes for consecutive values of n, starting with n=0.
'''
def problem_27(a, b):
max_primes = {'a': 0, 'b': 0, 'n': 0}
for a in range(-1 * a, a + 1):
for b in range(-1 * b, b + 1):
n = 0
while True:
num = n**2 + a * n + b
if len([i for i in range(2, round(abs(num)**0.5) + 1) if num % i == 0]) == 0:
n += 1
else:
break
if n > max_primes['n']:
max_primes['a'] = a
max_primes['b'] = b
max_primes['n'] = n
return max_primes['a'] * max_primes['b']
print(problem_27(1000, 1000)) | [
"[email protected]"
] | |
3f2d82a98c7780670df6738341d6c92a64e95c4f | 8b71aaab38dbe1adac0c3dfa97bd39997272e0d1 | /main.py | 35138ba0d0bd7dd7909fbd7bb9db47ccdd44538f | [
"MIT"
] | permissive | sreekesari-vangeepuram/visual-card-generator | 39486d0d0565d8400c3d1e4f2b6f77ea8a1d2add | f39b253c21d98119e44ab741d992bde7987354c3 | refs/heads/main | 2023-07-16T17:03:04.148380 | 2021-09-07T15:41:33 | 2021-09-07T15:41:33 | 339,816,805 | 1 | 1 | null | 2021-08-11T08:28:56 | 2021-02-17T18:22:34 | Python | UTF-8 | Python | false | false | 5,388 | py | """
Copyright © 2021
Vangeepuram Sreekesari
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from sys import argv
from PIL import Image, ImageDraw, ImageFilter, ImageFont
# If you are developing an API
# just change the parameters
# in your convenient way!
global vc_size, profile_pic_size, overlay_location, watermark_location, uname_fntsz, other_fntsz
vc_size, profile_pic_size = (<int>, <int>), (<int>, <int>)
overlay_location = (vc_size[0] // 2 - profile_pic_size[0] // 2,
vc_size[1] // 2 - profile_pic_size[1] // 2)
uname_fntsz, other_fntsz = <int>, <int>
profile_pic_path = argv[1]
color = argv[2]
# --------------------------------------------------
username = "<username>"
user_handle = f"@{'<userhandle>'}"
user_location = "<user-location>"
# --------------------------------------------------
def crop_center(pil_img, crop_width, crop_height):
img_width, img_height = pil_img.size
offset = 5
return pil_img.crop(((img_width - crop_width) // 2 + offset,
(img_height - crop_height) // 2 + offset,
(img_width + crop_width) // 2 + offset,
(img_height + crop_height) // 2 + offset))
crop_max_square = lambda pil_img: crop_center(pil_img, min(pil_img.size), min(pil_img.size))
def mask_circle_transparent(pil_img, blur_radius, offset=0):
"Returns a card after masking the profile pic"
offset += blur_radius * 2
mask = Image.new("L", pil_img.size, 0)
draw = ImageDraw.Draw(mask)
draw.ellipse((offset, offset, pil_img.size[0] - offset, pil_img.size[1] - offset), fill = 255)
mask = mask.filter(ImageFilter.GaussianBlur(blur_radius)) # Filtering the mask
result = pil_img.copy() # Buffer of same type to add alpha-gradient with mask
result.putalpha(mask)
return result
def render_text(image, text, text_location, font_size):
"Returns a card by rendering the given text"
card = ImageDraw.Draw(image)
font_path = "./etc/font.ttf"
if "|" not in text:
card.text(text_location, text, font=ImageFont.truetype(font_path, font_size))
else:
card.text(text_location, text.split("|")[0], font=ImageFont.truetype(font_path, font_size))
width, height = card.textsize(text.split("|")[0], font=ImageFont.truetype(font_path, font_size))
n_spaces = width // len(text.split("|")[0]) + 2 # since word-size is diff. based on font-style
card.text((text_location[0] + width + n_spaces, text_location[1] + height // 5),
text.split("|")[1], font=ImageFont.truetype(font_path, other_fntsz))
return image
def create_broder(image, y):
"Returns a card by rendering border line to text"
card = ImageDraw.Draw(image)
x1, x2 = 0, vc_size[0] # To vary the length of the border-line
y1 = y2 = y # To drag down the border-line
line_segment, line_color = [(x1, y1), (x2, y2)], (255,255,255,128)
card.line(line_segment, fill = line_color, width=1)
return image
def stamp_watermark(image, filepath_of_watermark):
"Returns the card by stamping the watermark at bottom right corner"
offset = 10 # Distance between image border and watermark
watermark = Image.open(filepath_of_watermark).convert("RGBA")
wm_size = (watermark.size[0] // (offset + 5), watermark.size[1] // (offset + 5))
watermark = watermark.resize(wm_size)
watermark_location = (vc_size[0] - wm_size[0] - offset,
vc_size[1] - wm_size[1] - offset) # Bottom right corner
image.paste(watermark, watermark_location, mask=watermark)
watermark.close()
return image
visual_card = Image.new("RGBA", vc_size, color)
visual_card = stamp_watermark(visual_card, "<watermark-filepath>")
profile_pic = Image.open(profile_pic_path)
profile_pic = crop_max_square(profile_pic).resize((profile_pic_size), Image.LANCZOS)
# In fn-call of `mask_circle_transparent`, increase 2nd arg to create blur effect at border
profile_pic = mask_circle_transparent(profile_pic, 0)
visual_card.paste(profile_pic, overlay_location, mask=profile_pic) # Overlay profile-pic on visual-card
visual_card = render_text(visual_card, f'{username}|{user_handle}', (uname_fntsz - 10, 10), uname_fntsz)
visual_card = render_text(visual_card, user_location, (uname_fntsz - 10, 35), other_fntsz)
visual_card = create_broder(visual_card, 60)
visual_card.show()
#visual_card.save("./visual_card.png")
| [
"[email protected]"
] | |
72efc772d005b199ba2344008550607a08ac3f5d | 6923f79f1eaaba0ab28b25337ba6cb56be97d32d | /Mastering_Probabilistic_Graphical_Models_Using_Python/pgmpy/factors/JointProbabilityDistribution.py | 4e8ceff854544881423360c5c64189a912298156 | [
"MIT"
] | permissive | burakbayramli/books | 9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0 | 5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95 | refs/heads/master | 2023-08-17T05:31:08.885134 | 2023-08-14T10:05:37 | 2023-08-14T10:05:37 | 72,460,321 | 223 | 174 | null | 2022-10-24T12:15:06 | 2016-10-31T17:24:00 | Jupyter Notebook | UTF-8 | Python | false | false | 9,729 | py | #!/usr/bin/env python3
from itertools import product
import numpy as np
from pgmpy.factors import Factor
from pgmpy.independencies import Independencies
class JointProbabilityDistribution(Factor):
"""
Base class for Joint Probability Distribution
Public Methods
--------------
conditional_distribution(values)
create_bayesian_model()
get_independencies()
pmap()
marginal_distribution(variables)
minimal_imap()
"""
def __init__(self, variables, cardinality, values):
"""
Initialize a Joint Probability Distribution class.
Defined above, we have the following mapping from variable
assignments to the index of the row vector in the value field:
+-----+-----+-----+-------------------------+
| x1 | x2 | x3 | P(x1, x2, x2) |
+-----+-----+-----+-------------------------+
| x1_0| x2_0| x3_0| P(x1_0, x2_0, x3_0) |
+-----+-----+-----+-------------------------+
| x1_1| x2_0| x3_0| P(x1_1, x2_0, x3_0) |
+-----+-----+-----+-------------------------+
| x1_0| x2_1| x3_0| P(x1_0, x2_1, x3_0) |
+-----+-----+-----+-------------------------+
| x1_1| x2_1| x3_0| P(x1_1, x2_1, x3_0) |
+-----+-----+-----+-------------------------+
| x1_0| x2_0| x3_1| P(x1_0, x2_0, x3_1) |
+-----+-----+-----+-------------------------+
| x1_1| x2_0| x3_1| P(x1_1, x2_0, x3_1) |
+-----+-----+-----+-------------------------+
| x1_0| x2_1| x3_1| P(x1_0, x2_1, x3_1) |
+-----+-----+-----+-------------------------+
| x1_1| x2_1| x3_1| P(x1_1, x2_1, x3_1) |
+-----+-----+-----+-------------------------+
Parameters
----------
variables: list
List of scope of Joint Probability Distribution.
cardinality: list, array_like
List of cardinality of each variable
value: list, array_like
List or array of values of factor.
A Joint Probability Distribution's values are stored in a row
vector in the value using an ordering such that the left-most
variables as defined in the variable field cycle through their
values the fastest.
Examples
--------
>>> from pgmpy.factors import JointProbabilityDistribution
>>> prob = JointProbabilityDistribution(['x1', 'x2', 'x3'], [2, 2, 2], np.ones(8)/8)
>>> print(prob)
print(prob)
x1 x2 x3 P(x1, x2, x3)
x1_0 x2_0 x3_0 0.125
x1_0 x2_0 x3_1 0.125
x1_0 x2_1 x3_0 0.125
x1_0 x2_1 x3_1 0.125
x1_1 x2_0 x3_0 0.125
x1_1 x2_0 x3_1 0.125
x1_1 x2_1 x3_0 0.125
x1_1 x2_1 x3_1 0.125
"""
if np.isclose(np.sum(values), 1):
Factor.__init__(self, variables, cardinality, values)
else:
raise ValueError("The probability values doesn't sum to 1.")
def __repr__(self):
var_card = ", ".join(['{var}:{card}'.format(var=var, card=card)
for var, card in zip(self.variables, self.cardinality)])
return "<Joint Distribution representing P({var_card}) at {address}>".format(address=hex(id(self)),
var_card=var_card)
def __str__(self):
return self._str(phi_or_p='P')
def marginal_distribution(self, variables, inplace=True):
"""
Returns the marginal distribution over variables.
Parameters
----------
variables: string, list, tuple, set, dict
Variable or list of variables over which marginal distribution needs
to be calculated
Examples
--------
>>> from pgmpy.factors import JointProbabilityDistribution
>>> values = np.random.rand(12)
>>> prob = JointProbabilityDistribution(['x1, x2, x3'], [2, 3, 2], values/np.sum(values))
>>> prob.marginal_distribution(['x1', 'x2'])
>>> print(prob)
x1 x2 P(x1, x2)
x1_0 x2_0 0.290187723512
x1_0 x2_1 0.203569992198
x1_0 x2_2 0.00567786144202
x1_1 x2_0 0.116553704043
x1_1 x2_1 0.108469538521
x1_1 x2_2 0.275541180284
"""
return self.marginalize(list(set(list(self.variables)) -
set(variables if isinstance(
variables, (list, set, dict, tuple)) else [variables])),
inplace=inplace)
def check_independence(self, event1, event2, event3=None):
"""
Check if the Joint Probability Distribution satisfies the given independence condition.
Parameters
----------
event1: list or string
random variable whose independence is to be checked.
event2: list or string
random variable from which event1 is independent.
event3: list or string
event1 is independent of event2 given event3.
For random variables say X, Y, Z to check if X is independent of Y given Z.
event1 should be either X or Y.
event2 should be either Y or X.
event3 should Z.
Examples
--------
>>> from pgmpy.factors import JointProbabilityDistribution
>>> prob = JointProbabilityDistribution(['x1', 'x2', 'x3'], [2, 3, 2], np.ones(12)/12)
>>> prob.check_independence('x1', 'x2')
True
>>> prob.check_independence(['x1'], ['x2'], 'x3')
True
"""
if event3:
self.conditional_distribution(event3)
for variable_pair in product(event1, event2):
if (self.marginal_distribution(variable_pair, inplace=False) !=
self.marginal_distribution(variable_pair[0], inplace=False) *
self.marginal_distribution(variable_pair[1], inplace=False)):
return False
return True
def get_independencies(self, condition=None):
"""
Returns the independent variables in the joint probability distribution.
Returns marginally independent variables if condition=None.
Returns conditionally independent variables if condition!=None
Parameter
---------
condition: array_like
Random Variable on which to condition the Joint Probability Distribution.
Examples
--------
>>> from pgmpy.factors import JointProbabilityDistribution
>>> prob = JointProbabilityDistribution(['x1', 'x2', 'x3'], [2, 3, 2], np.ones(8)/8)
>>> prob.get_independencies()
"""
if condition:
self.conditional_distribution(condition)
independencies = Independencies()
from itertools import combinations
for variable_pair in combinations(list(self.variables), 2):
from copy import deepcopy
if JointProbabilityDistribution.marginal_distribution(deepcopy(self), variable_pair) == \
JointProbabilityDistribution.marginal_distribution(deepcopy(self), variable_pair[0]) * \
JointProbabilityDistribution.marginal_distribution(deepcopy(self), variable_pair[1]):
independencies.add_assertions(variable_pair)
return independencies
def conditional_distribution(self, values):
"""
Returns Conditional Probability Distribution after setting values to 1.
Parameters
----------
values: string or array_like
The values on which to condition the Joint Probability Distribution.
Examples
--------
>>> from pgmpy.factors import JointProbabilityDistribution
>>> prob = JointProbabilityDistribution(['x1', 'x2', 'x3'], [2, 2, 2], np.ones(8)/8)
>>> prob.conditional_distribution(('x1', 1))
>>> print(prob)
x2 x3 P(x1, x2)
x2_0 x3_0 0.25
x2_0 x3_1 0.25
x2_1 x3_0 0.25
x2_1 x3_1 0.25
"""
self.reduce(values)
self.normalize()
def minimal_imap(self, order):
"""
Returns a Bayesian Model which is minimal IMap of the Joint Probability Distribution
considering the order of the variables.
Parameters
----------
order: array-like
The order of the random variables.
Examples
--------
>>> from pgmpy.factors import JointProbabilityDistribution
>>> prob = JointProbabilityDistribution(['x1', 'x2', 'x3'], [2, 3, 2], np.ones(12)/12)
>>> bayesian_model = prob.minimal_imap(order=['x2', 'x1', 'x3'])
>>> bayesian_model
<pgmpy.models.models.models at 0x7fd7440a9320>
"""
from pgmpy import models as bm
import itertools
def combinations(u):
for r in range(len(u) + 1):
for i in itertools.combinations(u, r):
yield i
G = bm.BayesianModel()
for variable_index in range(len(order)):
u = order[:variable_index]
for subset in combinations(u):
if self.check_independence(order[variable_index], set(u)-set(subset), subset):
G.add_edges_from([(variable, order[variable_index]) for variable in subset])
return G
def pmap(self):
pass
| [
"[email protected]"
] | |
24628a937c4bb015580dcf7db437fbac6c5eb40d | 13696a9691b173d75b11b4aee22b79d4ea6b7c0b | /test/test_order_line_item.py | 0f2cd0135271ef7e6096299470663b97c0befed0 | [
"Apache-2.0"
] | permissive | square/connect-python-sdk | 410613bc4b04f0f70176275591a16c9e49e25ede | e00e2889b2dd2c55048219cbe64db79962a68633 | refs/heads/master | 2023-06-15T09:24:17.190416 | 2019-08-15T17:44:41 | 2019-08-15T17:44:41 | 64,772,029 | 53 | 45 | Apache-2.0 | 2020-12-20T18:41:31 | 2016-08-02T16:07:17 | Python | UTF-8 | Python | false | false | 1,186 | py | # coding: utf-8
"""
Copyright 2017 Square, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import squareconnect
from squareconnect.rest import ApiException
from squareconnect.models.order_line_item import OrderLineItem
class TestOrderLineItem(unittest.TestCase):
""" OrderLineItem unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testOrderLineItem(self):
"""
Test OrderLineItem
"""
model = squareconnect.models.order_line_item.OrderLineItem()
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
1e616727b698fb933c3679722bfecdc53bf353af | 2bdedcda705f6dcf45a1e9a090377f892bcb58bb | /src/main/output/kid/way_team_business/family/face_issue_city/number/year/health_kerberos_back.py | 655af272c1deb6d1ca6522de2970d1ca8ae96cfa | [] | no_license | matkosoric/GenericNameTesting | 860a22af1098dda9ea9e24a1fc681bb728aa2d69 | 03f4a38229c28bc6d83258e5a84fce4b189d5f00 | refs/heads/master | 2021-01-08T22:35:20.022350 | 2020-02-21T11:28:21 | 2020-02-21T11:28:21 | 242,123,053 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,934 | py | using System;
using System.Net.Http;
using System.Text;
using System.Threading.Tasks;
// Install Newtonsoft.Json with NuGet
using Newtonsoft.Json;
namespace translate_sample
{
class Program
{
private const string key_var = "TRANSLATOR_TEXT_SUBSCRIPTION_KEY";
private static readonly string subscriptionKey = "9264e06fc74e856e6ad7039efbb924c4";
private const string endpoint_var = "TRANSLATOR_TEXT_ENDPOINT";
private static readonly string endpoint = Environment.GetEnvironmentVariable(endpoint_var);
static Program()
{
if (null == subscriptionKey)
{
throw new Exception("Please set/export the environment variable: " + key_var);
}
if (null == endpoint)
{
throw new Exception("Please set/export the environment variable: " + endpoint_var);
}
}
// The code in the next section goes here.
// This sample requires C# 7.1 or later for async/await.
// Async call to the Translator Text API
static public async Task TranslateTextRequest(string subscriptionKey, string endpoint, string route, string inputText)
{
object[] body = new object[] { new { Text = inputText } };
var requestBody = JsonConvert.SerializeObject(body);
using (var client = new HttpClient())
using (var request = new HttpRequestMessage())
{
// Build the request.
// Set the method to Post.
request.Method = HttpMethod.Post;
// Construct the URI and add headers.
request.RequestUri = new Uri(endpoint + route);
request.Content = new StringContent(requestBody, Encoding.UTF8, "application/json");
request.Headers.Add("a19ae802003b91b483269a2d3ca373a1", subscriptionKey);
// Send the request and get response.
HttpResponseMessage response = await client.SendAsync(request).ConfigureAwait(false);
// Read response as a string.
string result = await response.Content.ReadAsStringAsync();
// Deserialize the response using the classes created earlier.
TranslationResult[] deserializedOutput = JsonConvert.DeserializeObject<TranslationResult[]>(result);
// Iterate over the deserialized results.
foreach (TranslationResult o in deserializedOutput)
{
// Print the detected input language and confidence score.
Console.WriteLine("Detected input language: {0}\nConfidence score: {1}\n", o.DetectedLanguage.Language, o.DetectedLanguage.Score);
// Iterate over the results and print each translation.
foreach (Translation t in o.Translations)
{
Console.WriteLine("Translated to {0}: {1}", t.To, t.Text);
}
}
}
}
static async Task Main(string[] args)
{
// This is our main function.
// Output languages are defined in the route.
// For a complete list of options, see API reference.
// https://docs.microsoft.com/azure/cognitive-services/translator/reference/v3-0-translate
string route = "/translate?api-version=3.0&to=de&to=it&to=ja&to=th";
// Prompts you for text to translate. If you'd prefer, you can
// provide a string as textToTranslate.
Console.Write("Type the phrase you'd like to translate? ");
string textToTranslate = Console.ReadLine();
await TranslateTextRequest(subscriptionKey, endpoint, route, textToTranslate);
Console.WriteLine("Press any key to continue.");
Console.ReadKey();
}
}
}
| [
"[email protected]"
] | |
91c25e9e1439da3790676816b093c0f9a27f9de5 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03559/s351538346.py | 07b30fc56f836903884b21d2c40f39a3645029b7 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | import bisect
N=int(input().strip())
a=list(map(int, input().split()))
b=list(map(int, input().split()))
c=list(map(int, input().split()))
a.sort()
#b.sort(reverse=True)
c.sort()
ans=0
#for i in range(len(b)):
# la=bisect.bisect_left(a, b[i])
# ra=bisect.bisect_right(c, b[i])
# ans+=la*(len(c)-ra)
# print(la,(len(c)-ra))
for i in range(len(b)):
la=bisect.bisect_left(a, b[i])
ra=bisect.bisect_right(c, b[i])
#print(la*(len(c)-ra))
ans+=la*(len(c)-ra)
#print(ans)
#la=bisect.bisect_left(a, 8)
#ra=bisect.bisect_right(a, 8)
#print(la,len(a)-ra)
print(ans) | [
"[email protected]"
] | |
6b0f05b24305838a791d7539f7b5f6e7fa6c8395 | 850d778687e3692ab2a38d4d2227391d92c21e6b | /atcoder.jp/arc008/arc008_4/Main.py | 1bc7f5184d2c991ab49a12ce1a26ad20d78090fc | [] | no_license | Valkyrja3607/AtCoder | 77e2e5e66c0e8e12bb902c35f679119c6576fad7 | 9218a50b1eb83e4498845d15d9dda41fab90ed73 | refs/heads/master | 2023-07-15T20:38:52.911301 | 2018-05-30T17:56:22 | 2018-05-30T17:56:22 | 294,980,006 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 681 | py | n,m=[int(j) for j in input().split()]
pab=[[float(j) for j in input().split()] for i in range(m)]
ll=[]
for p,a,b in pab:
ll.append(p)
ll.sort()
from collections import Counter
l=Counter()
for i in range(m):
l[ll[i]]=i
#A[k]をxに変更 O(logN)
def update(k,x,y):
k += num
seg_min[k] = x
seg_max[k] = y
while k>1:
k //= 2
seg_min[k] = seg_min[k*2]*seg_min[k*2+1]
seg_max[k] = seg_max[k*2]*seg_min[k*2+1]+seg_max[k*2+1]
num=2**(m-1).bit_length()
seg_min=[1]*2*num
seg_max=[0]*2*num
ans1=1
ans2=1
for p,a,b in pab:
update(l[p],a,b)
t=seg_min[1]+seg_max[1]
ans1=min(ans1,t)
ans2=max(ans2,t)
print(ans1)
print(ans2)
| [
"[email protected]"
] | |
925db904b3e3bce5fd3d07069328ae9b575f7401 | 1e99d202f94d26d8af5405a8c8284a5ffc345bba | /user/models.py | 09b73ff470a86207cf8f06d317e689aca1d5b450 | [] | no_license | madlad33/drf_pythonmate | 889b6a057ab9ac60b1e1138c2eb4ebc96d873e7c | 0b47ed41e847b0e0a7920e008867cdf971bddd6c | refs/heads/master | 2023-02-18T09:02:35.313419 | 2021-01-11T17:03:19 | 2021-01-11T17:03:19 | 328,583,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,088 | py | from django.db import models
from django.contrib.auth.models import AbstractUser
from django.contrib.auth.base_user import BaseUserManager
# Create your models here.
class UserManager(BaseUserManager):
def create_user(self,email,password=None,**extrafields):
if not email:
raise ValueError("Email is a required field")
user = self.model(email=self.normalize_email(email),**extrafields)
user.set_password(password)
user.save(using= self._db)
return user
def create_superuser(self,email,password,**extrafields):
user = self.create_user(email,password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class CustomUser(AbstractUser):
username = None
email = models.EmailField(max_length=255,unique=True)
USERNAME_FIELD = 'email'
objects = UserManager()
REQUIRED_FIELDS = ['password']
def __str__(self):
return self.email
class Client(models.Model):
user = models.OneToOneField(CustomUser,on_delete=models.CASCADE) | [
"[email protected]"
] | |
a4a5d7c166a9d300707f6a1c1407b5a9c15b1ace | 14e3a6d5d5ef1a7fc576c0670361fc908630b495 | /python/clx/eda/__init__.py | f5615c081f99808a87e4ee8d070f7f3c2db6964d | [
"Apache-2.0"
] | permissive | rapidsai/clx | 3b6e49b53704de7f81fcd923ae88148a6ed5f031 | 68c14f460b5d3ab41ade9b2450126db0d2536745 | refs/heads/branch-23.04 | 2023-05-25T09:37:15.553353 | 2023-05-19T16:07:00 | 2023-05-19T16:07:00 | 186,716,715 | 169 | 93 | Apache-2.0 | 2023-05-19T16:07:02 | 2019-05-14T23:47:32 | Jupyter Notebook | UTF-8 | Python | false | false | 42 | py | from clx.eda.eda import EDA # noqa: F401
| [
"[email protected]"
] | |
4f6edf20cdebe577b8864010fef1e297df7d682c | e18a353582609732c795401f1a01bc762bd939f2 | /top/python/test_reweighting.py | 091f786c198a373f5f95cb41e3770088680d9081 | [] | no_license | s-farry/workspaces | 06741807bb464bb0712d52108c2d1b7ae62b1353 | 0dcf3868dcbe110206ea88ff5c9e04a3b44b1ca1 | refs/heads/master | 2020-04-03T00:45:39.152227 | 2017-06-15T16:33:33 | 2017-06-15T16:33:33 | 64,213,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,370 | py | from Jawa import MWTemplate
from ROOT import TFile, TTree, TCut, TPaveText, TLine
f = TFile("/user2/sfarry/workspaces/top/output.root")
t = f.Get("topTuple")
mwt = MWTemplate("top_eft")
mwt.AddTree(t)
mwt.ApplyCut()
mwt.AddVar("top_rap", "abs(top_rap)", 50, 0, 4)
mwt.AddVar("antitop_rap", "abs(antitop_rap)", 50, 0, 4)
for i in range(18):
mwt.AddWeight("rwgt_"+str(i+1), "rwgt_"+str(i+1))
mwt.FillVars()
mwt.Scale(1.0/t.GetEntries())
mwt.ScaleAllWeights(1.0/t.GetEntries())
mwt.SaveToFile()
fwd = TCut("abs(top_rap) > abs(antitop_rap)")
bwd = TCut("abs(top_rap) < abs(antitop_rap)")
lhcb = TCut("top_rap > 2.0 && top_rap < 4.5 && antitop_rap > 2.0 && antitop_rap < 4.5")
gpd = TCut("abs(top_rap) < 2.5 && abs(antitop_rap) < 2.5")
c13qq_label = TPaveText(0.2,0.38,0.33, 0.5, 'NDC')
c13qq_label.AddText('C^{(1,3)}_{qq}')
line = TLine(0.0, 1.0, 5.0, 1.0)
line.SetLineStyle(2)
line.SetLineColor(1)
'''
mwt_fwd = MWTemplate("top_eft_fwd")
mwt_fwd.AddTree(f.Get("topTuple"))
mwt_fwd.ApplyCut()
mwt_fwd.AddVar("top_rap", "abs(top_rap)", 100, 0, 5)
mwt_fwd.AddWeight("central", "w")
for i in range(24):
mwt_fwd.AddWeight("rwgt_"+str(i+1), "rwgt_"+str(i+1))
mwt_fwd.FillVars()
mwt_fwd.SaveToFile()
'''
from Style import *
SetLHCbStyle()
from PlotTools import *
p = Plot([ mwt.GetWeightHist("top_rap", "rwgt_"+str(i+1)) for i in range(11)])
for pp in p.plots: pp.UseCurrentStyle()
p.setProp('forcestyle', True)
p.setProp('filename', 'test.pdf')
p.setProp('location', '/user2/sfarry/workspaces/top/figures')
p.setProp('colors', [1, 2, 4, 6, 9, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 30, 31, 32, 33])
p.setProp('drawOpts', ['hist' for i in range(12)])
p.setProp('fillstyles', 0)
p.setProp('toCompare', { 0 : [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] })
p.setProp('ycomplims', [0.8, 1.15])
p.setProp('ylabel', '#sigma [pb]')
p.setProp('xlabel', 'y (top)')
p.drawROOT()
p = Plot([mwt.GetWeightHist("top_rap", "rwgt_"+str(i)) for i in (1,2,3)])
for pp in p.plots: pp.UseCurrentStyle()
p.AutoYlims(ylow=0.000001)
p.setProp('forcestyle', True)
p.setProp('filename', 'topeft_C13qq.pdf')
p.setProp('location', '/user2/sfarry/workspaces/top/figures')
p.setProp('colors', [1, 2, 4, 4, 6, 6, 9, 9, 40, 40, 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 30, 31, 32, 33])
p.setProp('drawOpts', ['hist' for i in range(17)])
p.setProp('fillstyles', 0)
p.setProp('toCompare', { 0 : [1,2] })
p.setProp('ycomplims', [0.5, 1.49])
#p.setProp('extraObjs', [ c13qq_label])
p.setProp('compObjs', [ line])
p.setProp('labels', ['C^{1,3}_{qq} = 0', 'C^{1,3}_{qq} = +4', 'C^{1,3}_{qq} = -4'])
p.setProp('ylabel', '#sigma [pb]')
p.setProp('xlabel', 'y (top)')
p.setProp('leglims', [0.7, 0.6, 0.9, 0.9])
p.drawROOT()
p = Plot([mwt.GetWeightHist("top_rap", "rwgt_"+str(i)) for i in (1,4,5)])
for pp in p.plots: pp.UseCurrentStyle()
p.AutoYlims(ylow=0.000001)
p.setProp('forcestyle', True)
p.setProp('filename', 'topeft_C81qq.pdf')
p.setProp('location', '/user2/sfarry/workspaces/top/figures')
p.setProp('colors', [1, 2, 4, 4, 6, 6, 9, 9, 40, 40, 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 30, 31, 32, 33])
p.setProp('drawOpts', ['hist' for i in range(17)])
p.setProp('fillstyles', 0)
p.setProp('toCompare', { 0 : [1,2] })
p.setProp('ycomplims', [0.8, 1.2])
p.setProp('compObjs', [ line])
p.setProp('labels', ['C^{8,1}_{qq} = 0', 'C^{8,1}_{qq} = +4', 'C^{8,1}_{qq} = -4'])
p.setProp('ylabel', '#sigma [pb]')
p.setProp('xlabel', 'y (top)')
p.setProp('leglims', [0.7, 0.6, 0.9, 0.9])
p.drawROOT()
p = Plot([mwt.GetWeightHist("top_rap", "rwgt_"+str(i)) for i in (1,6,7)])
for pp in p.plots: pp.UseCurrentStyle()
p.AutoYlims(ylow=0.000001)
p.setProp('forcestyle', True)
p.setProp('filename', 'topeft_C83qq.pdf')
p.setProp('location', '/user2/sfarry/workspaces/top/figures')
p.setProp('colors', [1, 2, 4, 4, 6, 6, 9, 9, 40, 40, 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 30, 31, 32, 33])
p.setProp('drawOpts', ['hist' for i in range(17)])
p.setProp('fillstyles', 0)
p.setProp('toCompare', { 0 : [1,2] })
p.setProp('ycomplims', [0.8, 1.2])
p.setProp('compObjs', [ line])
p.setProp('labels', ['C^{8,3}_{qq} = 0', 'C^{8,3}_{qq} = +4', 'C^{8,3}_{qq} = -4'])
p.setProp('ylabel', '#sigma [pb]')
p.setProp('xlabel', 'y (top)')
p.setProp('leglims', [0.7, 0.6, 0.9, 0.9])
p.drawROOT()
p = Plot([mwt.GetWeightHist("top_rap", "rwgt_"+str(i)) for i in (1,8,9)])
for pp in p.plots: pp.UseCurrentStyle()
p.AutoYlims(ylow=0.000001)
p.setProp('forcestyle', True)
p.setProp('filename', 'topeft_C8ut.pdf')
p.setProp('location', '/user2/sfarry/workspaces/top/figures')
p.setProp('colors', [1, 2, 4, 4, 6, 6, 9, 9, 40, 40, 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 30, 31, 32, 33])
p.setProp('drawOpts', ['hist' for i in range(17)])
p.setProp('fillstyles', 0)
p.setProp('toCompare', { 0 : [1,2] })
p.setProp('ycomplims', [0.8, 1.2])
p.setProp('compObjs', [ line])
p.setProp('labels', ['C^{8}_{ut} = 0', 'C^{8}_{ut} = +4', 'C^{8}_{ut} = -4'])
p.setProp('ylabel', '#sigma [pb]')
p.setProp('xlabel', 'y (top)')
p.setProp('leglims', [0.7, 0.6, 0.9, 0.9])
p.drawROOT()
p = Plot([mwt.GetWeightHist("top_rap", "rwgt_"+str(i)) for i in (1,10,11)])
for pp in p.plots: pp.UseCurrentStyle()
p.AutoYlims(ylow=0.000001)
p.setProp('forcestyle', True)
p.setProp('filename', 'topeft_C8dt.pdf')
p.setProp('location', '/user2/sfarry/workspaces/top/figures')
p.setProp('colors', [1, 2, 4, 4, 6, 6, 9, 9, 40, 40, 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 30, 31, 32, 33])
p.setProp('drawOpts', ['hist' for i in range(17)])
p.setProp('fillstyles', 0)
p.setProp('toCompare', { 0 : [1,2] })
p.setProp('ycomplims', [0.8, 1.2])
p.setProp('compObjs', [ line])
p.setProp('labels', ['C^{8}_{dt} = 0', 'C^{8}_{dt} = +4', 'C^{8}_{dt} = -4'])
p.setProp('ylabel', '#sigma [pb]')
p.setProp('xlabel', 'y (top)')
p.setProp('leglims', [0.7, 0.6, 0.9, 0.9])
p.drawROOT()
p = Plot([mwt.GetWeightHist("top_rap", "rwgt_"+str(i)) for i in (1,12,13)])
for pp in p.plots: pp.UseCurrentStyle()
p.AutoYlims(ylow=0.000001)
p.setProp('forcestyle', True)
p.setProp('filename', 'topeft_C1qu.pdf')
p.setProp('location', '/user2/sfarry/workspaces/top/figures')
p.setProp('colors', [1, 2, 4, 4, 6, 6, 9, 9, 40, 40, 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 30, 31, 32, 33])
p.setProp('drawOpts', ['hist' for i in range(17)])
p.setProp('fillstyles', 0)
p.setProp('toCompare', { 0 : [1,2] })
p.setProp('ycomplims', [0.8, 1.2])
p.setProp('compObjs', [ line])
p.setProp('labels', ['C^{1}_{qu} = 0', 'C^{1}_{qu} = +4', 'C^{1}_{qu} = -4'])
p.setProp('ylabel', '#sigma [pb]')
p.setProp('xlabel', 'y (top)')
p.setProp('leglims', [0.7, 0.6, 0.9, 0.9])
p.drawROOT()
p = Plot([mwt.GetWeightHist("top_rap", "rwgt_"+str(i)) for i in (1,14,15)])
for pp in p.plots: pp.UseCurrentStyle()
p.AutoYlims(ylow=0.000001)
p.setProp('forcestyle', True)
p.setProp('filename', 'topeft_C1qd.pdf')
p.setProp('location', '/user2/sfarry/workspaces/top/figures')
p.setProp('colors', [1, 2, 4, 4, 6, 6, 9, 9, 40, 40, 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 30, 31, 32, 33])
p.setProp('drawOpts', ['hist' for i in range(17)])
p.setProp('fillstyles', 0)
p.setProp('toCompare', { 0 : [1,2] })
p.setProp('ycomplims', [0.8, 1.2])
p.setProp('compObjs', [ line])
p.setProp('labels', ['C^{1}_{qd} = 0', 'C^{1}_{qd} = +4', 'C^{1}_{qd} = -4'])
p.setProp('ylabel', '#sigma [pb]')
p.setProp('xlabel', 'y (top)')
p.setProp('leglims', [0.7, 0.6, 0.9, 0.9])
p.drawROOT()
p = Plot([mwt.GetWeightHist("top_rap", "rwgt_"+str(i)) for i in (1,16,17)])
for pp in p.plots: pp.UseCurrentStyle()
p.AutoYlims(ylow=0.000001)
p.setProp('forcestyle', True)
p.setProp('filename', 'topeft_C1qt.pdf')
p.setProp('location', '/user2/sfarry/workspaces/top/figures')
p.setProp('colors', [1, 2, 4, 4, 6, 6, 9, 9, 40, 40, 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 30, 31, 32, 33])
p.setProp('drawOpts', ['hist' for i in range(17)])
p.setProp('fillstyles', 0)
p.setProp('toCompare', { 0 : [1,2] })
p.setProp('ycomplims', [0.8, 1.2])
p.setProp('compObjs', [ line])
p.setProp('labels', ['C^{1}_{qt} = 0', 'C^{1}_{qt} = +4', 'C^{1}_{qt} = -4'])
p.setProp('ylabel', '#sigma [pb]')
p.setProp('xlabel', 'y (top)')
p.setProp('leglims', [0.7, 0.6, 0.9, 0.9])
p.drawROOT()
p = Plot([mwt.GetWeightHist("top_rap", "rwgt_"+str(i)).GetAsymmetry(mwt.GetWeightHist("antitop_rap", "rwgt_"+str(i))) for i in (1,2,3)])
for pp in p.plots: pp.UseCurrentStyle()
p.setProp('forcestyle', True)
p.setProp('filename', 'topeft_asymm_C13qq.pdf')
p.setProp('location', '/user2/sfarry/workspaces/top/figures')
p.setProp('colors', [1, 2, 2, 4, 4, 6, 6, 9, 9, 40, 40, 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 30, 31, 32, 33])
p.setProp('drawOpts', ['hist' for i in range(17)])
p.setProp('fillstyles', 0)
p.setProp('toCompare', { 0 : [1,2] })
p.setProp('ycomplims', [0.8, 1.2])
p.drawROOT()
p = Plot([mwt.GetWeightHist("top_rap", "rwgt_"+str(i)).GetAsymmetry(mwt.GetWeightHist("antitop_rap", "rwgt_"+str(i))) for i in (1,4,5)])
for pp in p.plots: pp.UseCurrentStyle()
p.setProp('forcestyle', True)
p.setProp('filename', 'topeft_asymm_C81qq.pdf')
p.setProp('location', '/user2/sfarry/workspaces/top/figures')
p.setProp('colors', [1, 2, 2, 4, 4, 6, 6, 9, 9, 40, 40, 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 30, 31, 32, 33])
p.setProp('drawOpts', ['hist' for i in range(17)])
p.setProp('fillstyles', 0)
p.setProp('toCompare', { 0 : [1,2] })
p.setProp('ycomplims', [0.8, 1.2])
p.drawROOT()
p = Plot([mwt.GetWeightHist("top_rap", "rwgt_"+str(i)).GetAsymmetry(mwt.GetWeightHist("antitop_rap", "rwgt_"+str(i))) for i in (1,6,7)])
for pp in p.plots: pp.UseCurrentStyle()
p.setProp('forcestyle', True)
p.setProp('filename', 'topeft_asymm_C83qq.pdf')
p.setProp('location', '/user2/sfarry/workspaces/top/figures')
p.setProp('colors', [1, 2, 2, 4, 4, 6, 6, 9, 9, 40, 40, 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 30, 31, 32, 33])
p.setProp('drawOpts', ['hist' for i in range(17)])
p.setProp('fillstyles', 0)
p.setProp('toCompare', { 0 : [1,2] })
p.setProp('ycomplims', [0.8, 1.2])
p.drawROOT()
p = Plot([mwt.GetWeightHist("top_rap", "rwgt_"+str(i)).GetAsymmetry(mwt.GetWeightHist("antitop_rap", "rwgt_"+str(i))) for i in (1,8,9)])
for pp in p.plots: pp.UseCurrentStyle()
p.setProp('forcestyle', True)
p.setProp('filename', 'topeft_asymm_C8ut.pdf')
p.setProp('location', '/user2/sfarry/workspaces/top/figures')
p.setProp('colors', [1, 2, 2, 4, 4, 6, 6, 9, 9, 40, 40, 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 30, 31, 32, 33])
p.setProp('drawOpts', ['hist' for i in range(17)])
p.setProp('fillstyles', 0)
p.setProp('toCompare', { 0 : [1,2] })
p.setProp('ycomplims', [0.8, 1.2])
p.drawROOT()
p = Plot([mwt.GetWeightHist("top_rap", "rwgt_"+str(i)).GetAsymmetry(mwt.GetWeightHist("antitop_rap", "rwgt_"+str(i))) for i in (1,10,11)])
for pp in p.plots: pp.UseCurrentStyle()
p.setProp('forcestyle', True)
p.setProp('filename', 'topeft_asymm_C8dt.pdf')
p.setProp('location', '/user2/sfarry/workspaces/top/figures')
p.setProp('colors', [1, 2, 2, 4, 4, 6, 6, 9, 9, 40, 40, 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 30, 31, 32, 33])
p.setProp('drawOpts', ['hist' for i in range(17)])
p.setProp('fillstyles', 0)
p.setProp('toCompare', { 0 : [1,2] })
p.setProp('ycomplims', [0.8, 1.2])
p.drawROOT()
p = Plot([mwt.GetWeightHist("top_rap", "rwgt_"+str(i)).GetAsymmetry(mwt.GetWeightHist("antitop_rap", "rwgt_"+str(i))) for i in (1,12,13)])
for pp in p.plots: pp.UseCurrentStyle()
p.setProp('forcestyle', True)
p.setProp('filename', 'topeft_asymm_C1qu.pdf')
p.setProp('location', '/user2/sfarry/workspaces/top/figures')
p.setProp('colors', [1, 2, 2, 4, 4, 6, 6, 9, 9, 40, 40, 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 30, 31, 32, 33])
p.setProp('drawOpts', ['hist' for i in range(17)])
p.setProp('fillstyles', 0)
p.setProp('toCompare', { 0 : [1,2] })
p.setProp('ycomplims', [0.8, 1.2])
p.drawROOT()
p = Plot([mwt.GetWeightHist("top_rap", "rwgt_"+str(i)).GetAsymmetry(mwt.GetWeightHist("antitop_rap", "rwgt_"+str(i))) for i in (1,14,15)])
for pp in p.plots: pp.UseCurrentStyle()
p.setProp('forcestyle', True)
p.setProp('filename', 'topeft_asymm_C1qd.pdf')
p.setProp('location', '/user2/sfarry/workspaces/top/figures')
p.setProp('colors', [1, 2, 2, 4, 4, 6, 6, 9, 9, 40, 40, 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 30, 31, 32, 33])
p.setProp('drawOpts', ['hist' for i in range(17)])
p.setProp('fillstyles', 0)
p.setProp('toCompare', { 0 : [1,2] })
p.setProp('ycomplims', [0.8, 1.2])
p.drawROOT()
p = Plot([mwt.GetWeightHist("top_rap", "rwgt_"+str(i)).GetAsymmetry(mwt.GetWeightHist("antitop_rap", "rwgt_"+str(i))) for i in (1,16,17)])
for pp in p.plots: pp.UseCurrentStyle()
p.setProp('forcestyle', True)
p.setProp('filename', 'topeft_asymm_C1qt.pdf')
p.setProp('location', '/user2/sfarry/workspaces/top/figures')
p.setProp('colors', [1, 2, 2, 4, 4, 6, 6, 9, 9, 40, 40, 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 30, 31, 32, 33])
p.setProp('drawOpts', ['hist' for i in range(17)])
p.setProp('fillstyles', 0)
p.setProp('toCompare', { 0 : [1,2] })
p.setProp('ycomplims', [0.8, 1.2])
p.drawROOT()
| [
"[email protected]"
] | |
36b8165874527ec6b6a038f2526a3b40284cad6c | 80075edf813fa1c7ef3126b153e7ab2f6c42f0be | /xml/Reading_XML_File_From_Python_Code.py | 992076a09487b050d0949ebe21ed811ab5e7f2c2 | [] | no_license | keshavkummari/python-nit-930pm | a7e16701d981145f8fdc27e741169ef76616bc8a | aa3bb7654c091e3d04098483525768e287604c38 | refs/heads/master | 2020-04-01T15:00:20.366890 | 2018-11-29T17:14:41 | 2018-11-29T17:14:41 | 153,316,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,404 | py | # A simple XML file, later parse it with Python minidom.
'''
staff.xml
<?xml version="1.0"?>
<company>
<name>Mkyong Enterprise</name>
<staff id="1001">
<nickname>mkyong</nickname>
<salary>100,000</salary>
</staff>
<staff id="1002">
<nickname>yflow</nickname>
<salary>200,000</salary>
</staff>
<staff id="1003">
<nickname>alex</nickname>
<salary>20,000</salary>
</staff>
</company>
'''
"""
<?xml version="1.0"?>
<company>
<name>Online Ucator</name>
<staff id="1001">
<nickname>Minnu</nickname>
<salary>100,000</salary>
</staff>
<staff id="1002">
<nickname>Keshav</nickname>
<salary>200,000</salary>
</staff>
<staff id="1003">
<nickname>Jessi</nickname>
<salary>20,000</salary>
</staff>
</company>
"""
#2. DOM Example 1
#A simple Python minidom example.
# dom-example.py
from xml.dom import minidom
doc = minidom.parse("staff.xml")
# doc.getElementsByTagName returns NodeList
name = doc.getElementsByTagName("name")[0]
print(name.firstChild.data)
staffs = doc.getElementsByTagName("staff")
for staff in staffs:
sid = staff.getAttribute("id")
nickname = staff.getElementsByTagName("nickname")[0]
salary = staff.getElementsByTagName("salary")[0]
print("id:%s, nickname:%s, salary:%s" %
(sid, nickname.firstChild.data, salary.firstChild.data))
| [
"[email protected]"
] | |
9a5c1c8d4f358589a5a518cc0e191b06f084541c | a2e638cd0c124254e67963bda62c21351881ee75 | /Extensions/Accounting/FPythonCode/FAccountingRollForward.py | aadcf523a93669d6ecde20daab2b4b7f22aa7ead | [] | no_license | webclinic017/fa-absa-py3 | 1ffa98f2bd72d541166fdaac421d3c84147a4e01 | 5e7cc7de3495145501ca53deb9efee2233ab7e1c | refs/heads/main | 2023-04-19T10:41:21.273030 | 2021-05-10T08:50:05 | 2021-05-10T08:50:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,284 | py | """ Compiled: 2020-09-18 10:38:46 """
#__src_file__ = "extensions/accounting/etc/FAccountingRollForward.py"
# operations
from FOperationsCollectionUtils import PopObject
# accounting
from FAccountingEngineEOFY import IAccountingEOFYEngine
from FAccountingCreation import CreateRollForwardJournals
from FAccountingPairReverser import PerformCancellation
from FAccountingReader import ReadRollForwardPairs
from FAccountingDRCRPairGenerator import GenerateDRCRPairs
from FAccountingCalculations import IsAmountZero
#-------------------------------------------------------------------------
class BalanceRollForward(IAccountingEOFYEngine.IRollForwardProvider):
#-------------------------------------------------------------------------
def __init__(self, fiscalYear):
self.__provider = None
self.__toRollForwardBalances = dict()
#-------------------------------------------------------------------------
def PO_Init(self, provider):
self.__provider = provider
#-------------------------------------------------------------------------
def PO_Clear(self):
self.__toRollForwardBalances.clear()
#-------------------------------------------------------------------------
def RFP_IsValidForRollForward(self, balance):
return balance.ChartOfAccount().HasActiveRollForwardTAccount() and \
(not IsAmountZero(balance.Amount()) or not IsAmountZero(balance.BaseAmount()))
#-------------------------------------------------------------------------
def RFP_AddForRollForward(self, key, balance):
self.__toRollForwardBalances[key] = balance
#-------------------------------------------------------------------------
def RFP_RollForward(self, book, fiscalYear, endPeriod, keyFunc):
accountMapper = self.__provider.LKMP_TAccountLedgerKeyMapper()
oldPairs = dict((self.__FindKey(pair, keyFunc), pair) for pair in ReadRollForwardPairs(book, fiscalYear))
for key, balance in self.__toRollForwardBalances.items():
oldPair = PopObject(oldPairs, key)
rollforwardAmount, rollforwardBaseAmount = self.__CalculateRollForwardAmount(balance, oldPair)
newPair = next(GenerateDRCRPairs(CreateRollForwardJournals(rollforwardAmount, rollforwardBaseAmount, balance, endPeriod, accountMapper), True))
self.__ProcessPairs(oldPair, newPair, keyFunc, balance.AccountingPeriod())
#-------------------------------------------------------------------------
def __FindKey(self, pair, keyFunc):
for journal in pair.Journals():
if journal.Account().RollForwardTAccount():
return keyFunc(journal)
return None
#-------------------------------------------------------------------------
def __CalculateRollForwardAmount(self, balance, oldPair):
rollforwardAmount = balance.Amount()
rollforwardBaseAmount = balance.BaseAmount()
if oldPair:
for journal in oldPair.Journals():
if journal.Balance() == balance.Original():
rollforwardAmount -= journal.Amount()
rollforwardBaseAmount -= journal.BaseAmount()
return rollforwardAmount, rollforwardBaseAmount
#-------------------------------------------------------------------------
def __ProcessPairs(self, oldPair, newPair, keyFunc, startPeriod):
if newPair and oldPair:
connectedPairs = [pair for pair in PerformCancellation(oldPair, None, None)]
connectedPairs.append(newPair)
for pair in connectedPairs:
self.__SetBalanceRef(pair, keyFunc, startPeriod)
self.__provider.STPUP_AddConnectedPairsForUpdate(connectedPairs)
elif newPair:
self.__SetBalanceRef(newPair, keyFunc, startPeriod)
self.__provider.STPUP_AddPairForUpdate(newPair)
#-------------------------------------------------------------------------
def __SetBalanceRef(self, pair, keyFunc, startPeriod):
for journal in pair.Journals():
key = keyFunc(journal)
balanceForKey = self.__provider.BC_GetOrCreateBalanceForKey(key, journal, startPeriod)
journal.Balance(balanceForKey)
| [
"[email protected]"
] | |
bab242cced1e1ad5251f1876544fa92f2c8f4c73 | 8ee5dcbdbd407eb5f294d430813b16eca22f571c | /data/HW3/hw3_359.py | 802a68fe38d1f46796648e9870bd99992298710a | [] | no_license | MAPLE-Robot-Subgoaling/IPT | 5e60e2ee4d0a5688bc8711ceed953e76cd2ad5d9 | f512ea3324bfdceff8df63b4c7134b5fcbb0514e | refs/heads/master | 2021-01-11T12:31:00.939051 | 2018-08-13T23:24:19 | 2018-08-13T23:24:19 | 79,373,489 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,070 | py | temp = float(input("Please enter the temperature: "))
scale = input("Please enter 'C' for Celsius, or 'K' for Kelvin: ")
MELTING_POINT_C = 32
BOILING_POINT_C = 100
MELTING_POINT_K = 273.15
BOILING_POINT_K = 373.15
def main():
if scale == "C":
if temp >= 0 and temp < MELTING_POINT_C:
print("At this temperature, water is a (frozen) solid.")
elif temp >= MELTING_POINT_C and temp < BOILING_POINT_C:
print("At this temperature, water is a liquid.")
elif temp >= BOILING_POINT_C:
print("At this temperature, water is a gas.")
else:
if temp >= 0 and temp < MELTING_POINT_K:
print("At this temperature, water is a (frozen) solid.")
elif temp >= MELTING_POINT_K and temp < BOILING_POINT_K:
print("At this temperature, water is a liquid.")
elif temp >= BOILING_POINT_K:
print("At this temperature, water is a gas.")
main()
| [
"[email protected]"
] | |
bd3f59a9d11388780a80aad702971349799580c5 | 0320ac4a623f9153468952a64af9093430801dcb | /tests/callbacks/learning_rate_test.py | 9503c866bcca0a3f3d20446c89f8d9a9d3d4676a | [
"MIT"
] | permissive | carmocca/PyLaia | 330629610569f9347de5cb3eb479c2ed5abaceb6 | 65b0dde6211f96d061ce6264e50ba316e8f0e7f3 | refs/heads/master | 2023-02-25T06:23:51.755052 | 2021-01-24T13:16:48 | 2021-01-24T13:16:48 | 277,486,482 | 1 | 1 | MIT | 2020-12-02T03:08:13 | 2020-07-06T08:32:49 | Python | UTF-8 | Python | false | false | 1,556 | py | import pytest
import torch
from laia.callbacks import LearningRate
from laia.dummies import DummyEngine, DummyLoggingPlugin, DummyMNIST, DummyTrainer
def test_learning_rate_warns(tmpdir):
trainer = DummyTrainer(
default_root_dir=tmpdir,
max_epochs=1,
callbacks=[LearningRate()],
)
with pytest.warns(RuntimeWarning, match=r"You are using LearningRateMonitor.*"):
trainer.fit(DummyEngine(), datamodule=DummyMNIST())
class __TestEngine(DummyEngine):
def configure_optimizers(self):
optimizer = super().configure_optimizers()
return [optimizer], [torch.optim.lr_scheduler.StepLR(optimizer, 1)]
@pytest.mark.parametrize("num_processes", (1, 2))
def test_learning_rate(tmpdir, num_processes):
log_filepath = tmpdir / "log"
trainer = DummyTrainer(
default_root_dir=tmpdir,
max_epochs=3,
callbacks=[LearningRate()],
accelerator="ddp_cpu" if num_processes > 1 else None,
num_processes=num_processes,
plugins=[DummyLoggingPlugin(log_filepath)],
)
trainer.fit(__TestEngine(), datamodule=DummyMNIST())
if num_processes > 1:
log_filepath_rank1 = tmpdir.join("log.rank1")
assert log_filepath_rank1.exists()
assert not log_filepath_rank1.read_text("utf-8")
assert log_filepath.exists()
lines = [l.strip() for l in log_filepath.readlines()]
for e in range(1, trainer.max_epochs):
expected = f"E{e}: lr-Adam 1.000e-0{e + 2} ⟶ 1.000e-0{e + 3}"
assert lines.count(expected) == 1
| [
"[email protected]"
] | |
c32161bd88210e1a6c87cb5395adf9e602d68732 | 61aa319732d3fa7912e28f5ff7768498f8dda005 | /src/cpu/testers/gpu_ruby_test/ProtocolTester.py | cf24aec71ce40a0c2c4589ca6fb05c77686a5dd2 | [
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"MIT"
] | permissive | TeCSAR-UNCC/gem5-SALAM | 37f2f7198c93b4c18452550df48c1a2ab14b14fb | c14c39235f4e376e64dc68b81bd2447e8a47ff65 | refs/heads/main | 2023-06-08T22:16:25.260792 | 2023-05-31T16:43:46 | 2023-05-31T16:43:46 | 154,335,724 | 62 | 22 | BSD-3-Clause | 2023-05-31T16:43:48 | 2018-10-23T13:45:44 | C++ | UTF-8 | Python | false | false | 3,587 | py | # Copyright (c) 2017-2021 Advanced Micro Devices, Inc.
# All rights reserved.
#
# For use for simulation and test purposes only
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from m5.objects.ClockedObject import ClockedObject
from m5.params import *
from m5.proxy import *
class ProtocolTester(ClockedObject):
type = 'ProtocolTester'
cxx_header = "cpu/testers/gpu_ruby_test/protocol_tester.hh"
cxx_class = 'gem5::ProtocolTester'
cpu_ports = VectorRequestPort("Ports for CPUs")
dma_ports = VectorRequestPort("Ports for DMAs")
cu_vector_ports = VectorRequestPort("Vector ports for GPUs")
cu_sqc_ports = VectorRequestPort("SQC ports for GPUs")
cu_scalar_ports = VectorRequestPort("Scalar ports for GPUs")
cu_token_ports = VectorRequestPort("Token ports for GPU")
cus_per_sqc = Param.Int(4, "Number of CUs per SQC")
cus_per_scalar = Param.Int(4, "Number of CUs per scalar cache")
wavefronts_per_cu = Param.Int(1, "Number of wavefronts per CU")
workitems_per_wavefront = Param.Int(64, "Number of workitems per wf")
max_cu_tokens = Param.Int(4, "Maximum number of tokens, i.e., the number"
" of instructions that can be uncoalesced"
" before back-pressure occurs from the"
" coalescer.")
cpu_threads = VectorParam.CpuThread("All cpus")
dma_threads = VectorParam.DmaThread("All DMAs")
wavefronts = VectorParam.GpuWavefront("All wavefronts")
num_atomic_locations = Param.Int(2, "Number of atomic locations")
num_normal_locs_per_atomic = Param.Int(1000, \
"Number of normal locations per atomic")
episode_length = Param.Int(10, "Number of actions per episode")
max_num_episodes = Param.Int(20, "Maximum number of episodes")
debug_tester = Param.Bool(False, "Are we debugging the tester?")
random_seed = Param.Int(0, "Random seed number. Default value (0) means \
using runtime-specific value.")
log_file = Param.String("Log file's name")
system = Param.System(Parent.any, "System we belong to")
| [
"[email protected]"
] | |
2678427304e86a98502f35d1db2967dda840a57b | 10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94 | /Python/can-you-eat-your-favorite-candy-on-your-favorite-day.py | bdb18cca50672369c8859fc0b27ba4afe75dc6ed | [
"MIT"
] | permissive | kamyu104/LeetCode-Solutions | f54822059405ef4df737d2e9898b024f051fd525 | 4dc4e6642dc92f1983c13564cc0fd99917cab358 | refs/heads/master | 2023-09-02T13:48:26.830566 | 2023-08-28T10:11:12 | 2023-08-28T10:11:12 | 152,631,182 | 4,549 | 1,651 | MIT | 2023-05-31T06:10:33 | 2018-10-11T17:38:35 | C++ | UTF-8 | Python | false | false | 429 | py | # Time: O(n)
# Space: O(n)
class Solution(object):
def canEat(self, candiesCount, queries):
"""
:type candiesCount: List[int]
:type queries: List[List[int]]
:rtype: List[bool]
"""
prefix = [0]*(len(candiesCount)+1)
for i, c in enumerate(candiesCount):
prefix[i+1] = prefix[i]+c
return [prefix[t]//c < d+1 <= prefix[t+1]//1 for t, d, c in queries]
| [
"[email protected]"
] | |
d306243861c84fe774348629e0f6ea3b171e152b | e3b9aa9b17ebb55e53dbc4fa9d1f49c3a56c6488 | /duo_admin/komand_duo_admin/actions/enroll_user/__init__.py | 7def71a20edba019a1102a426347c6cd09323cbe | [
"MIT"
] | permissive | OSSSP/insightconnect-plugins | ab7c77f91c46bd66b10db9da1cd7571dfc048ab7 | 846758dab745170cf1a8c146211a8bea9592e8ff | refs/heads/master | 2023-04-06T23:57:28.449617 | 2020-03-18T01:24:28 | 2020-03-18T01:24:28 | 248,185,529 | 1 | 0 | MIT | 2023-04-04T00:12:18 | 2020-03-18T09:14:53 | null | UTF-8 | Python | false | false | 71 | py | # GENERATED BY KOMAND SDK - DO NOT EDIT
from .action import EnrollUser
| [
"[email protected]"
] | |
ab874f54709718eb18eb3c5f718ae9204a92281a | b4bb9a937e0904db89c6496389f49ae555258fc5 | /apps/messages.py | b446216ae5a1105af91ba51c24960a4feb5e9fa3 | [] | no_license | vitoralves/python-api | 3e1f5f77ba61e0df2770c9d24240b46ee9c37449 | 125172ee7906392c49884f8e8fdf21bc9aa60c2c | refs/heads/master | 2020-05-24T04:21:48.857073 | 2019-05-22T16:19:52 | 2019-05-22T16:19:52 | 187,090,895 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,000 | py | MSG_FIELD_REQUIRED = 'Campo obrigatório.'
MSG_INVALID_DATA = 'Ocorreu um erro nos campos informados.'
MSG_DOES_NOT_EXIST = 'Este(a) {} não existe.'
MSG_EXCEPTION = 'Ocorreu um erro no servidor. Contate o administrador.'
MSG_ALREADY_EXISTS = 'Já existe um(a) {} com estes dados.'
MSG_NO_DATA = "Os dados não podem ser nulos."
MSG_PASSWORD_WRONG = "As senhas não conferem."
MSG_RESOURCE_CREATED = "{} criado com sucesso."
MSG_RESOURCE_FETCHED_PAGINATED = 'Lista os/as {} paginados(as).'
MSG_RESOURCE_FETCHED = '{} retornado(a).'
MSG_RESOURCE_UPDATED = '{} atualizado(a).'
MSG_RESOURCE_DELETED = '{} deletado(a).'
MSG_TOKEN_CREATED = 'Token criado.'
MSG_INVALID_CREDENTIALS = 'As credenciais estão inválidas para log in.'
MSG_TOKEN_EXPIRED = 'Token expirou.'
MSG_PERMISSION_DENIED = 'Permissão negada.'
MSG_TOKEN_CREATED = 'Token criado.'
MSG_INVALID_CREDENTIALS = 'As credenciais estão inválidas para log in.'
MSG_TOKEN_EXPIRED = 'Token expirou.'
MSG_PERMISSION_DENIED = 'Permissão negada.'
| [
"="
] | = |
fe31ab89f3e3accf47cecdd7b82dfdfe1dc82ed0 | 66e6360325b781ed0791868765f1fd8a6303726f | /TB2009/WorkDirectory/5161 Profile Check/Profile_108541.py | 2258d75bb8e0e950291c863f0631348a9989fb97 | [] | no_license | alintulu/FHead2011PhysicsProject | c969639b212d569198d8fce2f424ce866dcfa881 | 2568633d349810574354ad61b0abab24a40e510e | refs/heads/master | 2022-04-28T14:19:30.534282 | 2020-04-23T17:17:32 | 2020-04-23T17:17:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,101 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("ProfileCleanedMIP")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.source = cms.Source("HcalTBSource",
fileNames = cms.untracked.vstring("file:/tmp/chenyi/HTB_108541.root"),
streams = cms.untracked.vstring('Chunk699', 'HCAL_Trigger', 'HCAL_SlowData', 'HCAL_QADCTDC', 'HCAL_DCC021')
)
process.tbunpack = cms.EDFilter("HcalTBObjectUnpacker",
#IncludeUnmatchedHits = cms.untracked.bool(False),
HcalTriggerFED = cms.untracked.int32(1),
HcalVLSBFED = cms.untracked.int32(699),
HcalTDCFED = cms.untracked.int32(8),
HcalQADCFED = cms.untracked.int32(8),
HcalSlowDataFED = cms.untracked.int32(3),
ConfigurationFile = cms.untracked.string('configQADCTDC_TB2009.txt')
)
process.vlsbinfo = cms.EDProducer("VLSBInformationProducer",
minSample = cms.untracked.uint32(0),
maxSample = cms.untracked.uint32(31),
baselineSamples = cms.untracked.uint32(2),
useMotherBoard0 = cms.untracked.bool(True),
useMotherBoard1 = cms.untracked.bool(True),
useMotherBoard2 = cms.untracked.bool(False),
useMotherBoard3 = cms.untracked.bool(True),
usePedestalMean = cms.untracked.bool(True),
pedestalMean = cms.untracked.string('PedestalMean_108541.txt'),
mip = cms.untracked.string('SecondaryMIP.txt'),
roughmip = cms.untracked.string('PercentageCorrectedGeV.txt'),
secondaryShift = cms.untracked.string("PercentageCorrectedGeV_SecondaryShift.txt"),
beamEnergy = cms.untracked.double(150),
adcMap = cms.untracked.string('FinalAdcMapping_All.txt'),
lowestSampleSubtraction = cms.untracked.bool(True),
numberOfSamplesForSubtraction = cms.untracked.int32(16),
numberOfSamplesToSkip = cms.untracked.int32(16)
)
process.averagecharge = cms.EDAnalyzer("FillAverageChargeLayerAnalyzer",
output = cms.untracked.string("TotalEnergy_108541.root"),
textOutput = cms.untracked.bool(True),
interpolate = cms.untracked.bool(False) # interpolate for missing channels by averaging neighboring channels
)
process.averagecharge_interpolated = cms.EDAnalyzer("FillAverageChargeLayerAnalyzer",
output = cms.untracked.string("TotalEnergy_Interpolated_108541.root"),
textOutput = cms.untracked.bool(True),
interpolate = cms.untracked.bool(True)
)
process.filladc = cms.EDAnalyzer("FillAdcDistributionAnalyzer",
invert = cms.untracked.bool(False),
highdef = cms.untracked.bool(True),
divideMIP = cms.untracked.bool(False),
baselineSubtraction = cms.untracked.bool(True),
output = cms.untracked.string("AdcDistribution_108541.root")
)
process.ABCcut = cms.EDFilter("SingleTowerParticleFilter")
process.MessageLogger = cms.Service("MessageLogger",
default = cms.untracked.PSet(
reportEvery = cms.untracked.int32(239)
)
)
process.muonveto = cms.EDFilter("MuonVetoFilter")
process.p = cms.Path(
process.tbunpack *
process.ABCcut *
process.vlsbinfo *
process.muonveto *
process.averagecharge
# process.averagecharge_interpolated *
# process.filladc
)
| [
"[email protected]"
] | |
43a44eb94d4c3cdc0eb12a66ca6aeb7e6f8ab7c6 | 49253f12cea4b2ec1df4d68876c3c330fec3f52b | /001_数据结构相关/001_set集合_交集_并集_差集_对称差集.py | f1a21ae3614ae074ef531ce11370b4832eeadf37 | [] | no_license | FelixZFB/Python_development_skills_summary | b2877652a5396936a28d5c65fb407df201ffa158 | 998679496de8385bda34734f83d927a7d340876a | refs/heads/master | 2020-06-09T16:58:51.242686 | 2020-02-27T07:02:48 | 2020-02-27T07:02:48 | 193,472,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | # -*- coding:utf-8 -*-
#
# 集合支持一系列标准操作,包括并集、交集、差集和对称差集,例如:
# a = t | s # t 和 s的并集
# b = t & s # t 和 s的交集
# c = t – s # 求差集(项在t中,但不在s中)
# d = t ^ s # 对称差集(项在t或s中,但不会同时出现在二者中)
a = [1, 5, 10, 15, 10]
b = [1, 5, 10, 9, 12]
# 集合会去掉重复元素
print(set(a))
print("*" * 50)
# 并集
c1 = set(a) | set(b)
print(c1)
# 交集
c2 = set(a) & set(b)
print(c2)
# 交集
c3 = set(a) - set(b)
print(c3)
c3 = set(b) - set(a)
print(c3)
# 对称差集
c4 = set(a) ^ set(b)
print(c4) | [
"[email protected]"
] | |
81baef8090682ce775be599e4786806b1672e33f | 8a7abed7c441600a66bf2ef9135ff3a367ac0eb2 | /website/goals/migrations/0001_initial.py | 00da811bea795906390ec6595dd4df58f5432e91 | [] | no_license | mrooney/mikesgoals | 094d30160817879243b7539df5a3759d19583edc | dd0b0aee7ce20d43852cf694bc1ecb5af23dde94 | refs/heads/master | 2023-04-09T16:10:16.008923 | 2022-07-07T17:33:00 | 2022-07-07T17:33:00 | 4,474,379 | 2 | 0 | null | 2023-03-31T14:38:43 | 2012-05-28T20:30:43 | Python | UTF-8 | Python | false | false | 1,019 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Goal'
db.create_table('goals_goal', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.TextField')()),
('frequency', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('goals', ['Goal'])
def backwards(self, orm):
# Deleting model 'Goal'
db.delete_table('goals_goal')
models = {
'goals.goal': {
'Meta': {'object_name': 'Goal'},
'frequency': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['goals'] | [
"[email protected]"
] | |
6009fe56a5b567eb3751301b21273398f872f28d | b13c57843cb8886c6f5d630ca099ad9130b26f25 | /python/장고/first.py | 94cc29b0619ffd718f3cfc6ee9a510900562b741 | [] | no_license | schw240/07.27-12.1_CLOUD | 6b563318f7208b843a13634a1cf46206197d6dfc | 8b4dc2d31e5d2ba96bde143116aba3ba0dad7a49 | refs/heads/master | 2023-03-25T15:44:03.555567 | 2021-03-30T02:09:32 | 2021-03-30T02:09:32 | 282,791,349 | 4 | 0 | null | 2021-03-19T15:00:00 | 2020-07-27T04:10:56 | Jupyter Notebook | UTF-8 | Python | false | false | 1,297 | py | from http.server import BaseHTTPRequestHandler, HTTPServer
import datetime
class HelloHandler(BaseHTTPRequestHandler):
def do_GET(self):
print(self.path)
if self.path == '/my':
self.send_response(200)
self.end_headers()
self.wfile.write("MyPage!".encode('utf-8'))
elif self.path == '/portfolio':
self.send_response(200)
self.end_headers()
self.wfile.write("Portfolio!".encode('utf-8'))
elif self.path == '/':
html = f"""
<html>
<head>
<title>나의홈페이지</title>
</head>
<body>
<h1>안녕하세요~ 저의 웹사이트에 오신걸 환영합니다.</h1>
<h2>{datetime.datetime.now()}</h2>
</body>
</html>
"""
self.send_response(200)
self.send_header("content-type", "text/html; charset=UTF-8")
self.end_headers()
self.wfile.write(html.encode('utf-8'))
else:
self.send_response(404)
self.end_headers()
self.wfile.write("404".encode('utf-8'))
if __name__== '__main__':
server = HTTPServer(('', 8888), HelloHandler)
print("Start Server")
server.serve_forever() | [
"[email protected]"
] | |
1f59f228ff0e99901be7116e146557c93e7e6de9 | 8957f0b42ba945399a2eeb71f796c11c9eb35b06 | /lib/shutil.py | 9fbc746cde5e9927a18068b6eab8284c354c82f8 | [] | no_license | notro/tmp_CircuitPython_stdlib | 4de177cbb45b2209f07171c27f844c7d377dffc9 | 641727294039a9441c35ba1a1d22de403664b710 | refs/heads/master | 2020-03-27T18:26:33.544047 | 2019-02-15T20:49:34 | 2019-02-15T20:49:34 | 146,922,496 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 41,703 | py | #"""Utility functions for copying and archiving files and directory trees.
#
#XXX The functions here don't copy the resource fork or other metadata on Mac.
#
#"""
#
import os
import sys
import stat
from os.path import abspath
import fnmatch
#import collections
#import errno
#import tarfile
#
#try:
# import bz2
# del bz2
# _BZ2_SUPPORTED = True
#except ImportError:
# _BZ2_SUPPORTED = False
#
#try:
# from pwd import getpwnam
#except ImportError:
# getpwnam = None
#
#try:
# from grp import getgrnam
#except ImportError:
# getgrnam = None
#
#__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2",
# "copytree", "move", "rmtree", "Error", "SpecialFileError",
# "ExecError", "make_archive", "get_archive_formats",
# "register_archive_format", "unregister_archive_format",
# "get_unpack_formats", "register_unpack_format",
# "unregister_unpack_format", "unpack_archive",
# "ignore_patterns", "chown", "which", "get_terminal_size",
# "SameFileError"]
# # disk_usage is added later, if available on the platform
#
class Error(OSError):
pass
class SameFileError(Error):
"""Raised when source and destination are the same file."""
class SpecialFileError(OSError):
"""Raised when trying to do a kind of operation (e.g. copying) which is
not supported on a special file (e.g. a named pipe)"""
class ExecError(OSError):
"""Raised when a command could not be executed"""
class ReadError(OSError):
"""Raised when an archive cannot be read"""
class RegistryError(Exception):
"""Raised when a registry operation with the archiving
and unpacking registeries fails"""
#def copyfileobj(fsrc, fdst, length=16*1024):
def copyfileobj(fsrc, fdst, length=128): ###
"""copy data from file-like object fsrc to file-like object fdst"""
while 1:
buf = fsrc.read(length)
if not buf:
break
fdst.write(buf)
def _samefile(src, dst):
# # Macintosh, Unix.
# if hasattr(os.path, 'samefile'):
# try:
# return os.path.samefile(src, dst)
# except OSError:
# return False
# All other platforms: check for same pathname.
return (os.path.normcase(os.path.abspath(src)) ==
os.path.normcase(os.path.abspath(dst)))
def copyfile(src, dst, *, follow_symlinks=True):
"""Copy data from src to dst.
If follow_symlinks is not set and src is a symbolic link, a new
symlink will be created instead of copying the file it points to.
"""
if _samefile(src, dst):
raise SameFileError("{!r} and {!r} are the same file".format(src, dst))
for fn in [src, dst]:
try:
st = os.stat(fn)
except OSError:
# File most likely does not exist
pass
# else:
# # XXX What about other special files? (sockets, devices...)
# if stat.S_ISFIFO(st.st_mode):
# raise SpecialFileError("`%s` is a named pipe" % fn)
# if not follow_symlinks and os.path.islink(src):
# os.symlink(os.readlink(src), dst)
# else:
if True: ###
with open(src, 'rb') as fsrc:
with open(dst, 'wb') as fdst:
copyfileobj(fsrc, fdst)
return dst
#def copymode(src, dst, *, follow_symlinks=True):
# """Copy mode bits from src to dst.
#
# If follow_symlinks is not set, symlinks aren't followed if and only
# if both `src` and `dst` are symlinks. If `lchmod` isn't available
# (e.g. Linux) this method does nothing.
#
# """
# if not follow_symlinks and os.path.islink(src) and os.path.islink(dst):
# if hasattr(os, 'lchmod'):
# stat_func, chmod_func = os.lstat, os.lchmod
# else:
# return
# elif hasattr(os, 'chmod'):
# stat_func, chmod_func = os.stat, os.chmod
# else:
# return
#
# st = stat_func(src)
# chmod_func(dst, stat.S_IMODE(st.st_mode))
#
#if hasattr(os, 'listxattr'):
# def _copyxattr(src, dst, *, follow_symlinks=True):
# """Copy extended filesystem attributes from `src` to `dst`.
#
# Overwrite existing attributes.
#
# If `follow_symlinks` is false, symlinks won't be followed.
#
# """
#
# try:
# names = os.listxattr(src, follow_symlinks=follow_symlinks)
# except OSError as e:
# if e.errno not in (errno.ENOTSUP, errno.ENODATA):
# raise
# return
# for name in names:
# try:
# value = os.getxattr(src, name, follow_symlinks=follow_symlinks)
# os.setxattr(dst, name, value, follow_symlinks=follow_symlinks)
# except OSError as e:
# if e.errno not in (errno.EPERM, errno.ENOTSUP, errno.ENODATA):
# raise
#else:
# def _copyxattr(*args, **kwargs):
# pass
#
#def copystat(src, dst, *, follow_symlinks=True):
# """Copy all stat info (mode bits, atime, mtime, flags) from src to dst.
#
# If the optional flag `follow_symlinks` is not set, symlinks aren't followed if and
# only if both `src` and `dst` are symlinks.
#
# """
# def _nop(*args, ns=None, follow_symlinks=None):
# pass
#
# # follow symlinks (aka don't not follow symlinks)
# follow = follow_symlinks or not (os.path.islink(src) and os.path.islink(dst))
# if follow:
# # use the real function if it exists
# def lookup(name):
# return getattr(os, name, _nop)
# else:
# # use the real function only if it exists
# # *and* it supports follow_symlinks
# def lookup(name):
# fn = getattr(os, name, _nop)
# if fn in os.supports_follow_symlinks:
# return fn
# return _nop
#
# st = lookup("stat")(src, follow_symlinks=follow)
# mode = stat.S_IMODE(st.st_mode)
# lookup("utime")(dst, ns=(st.st_atime_ns, st.st_mtime_ns),
# follow_symlinks=follow)
# try:
# lookup("chmod")(dst, mode, follow_symlinks=follow)
# except NotImplementedError:
# # if we got a NotImplementedError, it's because
# # * follow_symlinks=False,
# # * lchown() is unavailable, and
# # * either
# # * fchownat() is unavailable or
# # * fchownat() doesn't implement AT_SYMLINK_NOFOLLOW.
# # (it returned ENOSUP.)
# # therefore we're out of options--we simply cannot chown the
# # symlink. give up, suppress the error.
# # (which is what shutil always did in this circumstance.)
# pass
# if hasattr(st, 'st_flags'):
# try:
# lookup("chflags")(dst, st.st_flags, follow_symlinks=follow)
# except OSError as why:
# for err in 'EOPNOTSUPP', 'ENOTSUP':
# if hasattr(errno, err) and why.errno == getattr(errno, err):
# break
# else:
# raise
# _copyxattr(src, dst, follow_symlinks=follow)
#
def copy(src, dst, *, follow_symlinks=True):
"""Copy data and mode bits ("cp src dst"). Return the file's destination.
The destination may be a directory.
If follow_symlinks is false, symlinks won't be followed. This
resembles GNU's "cp -P src dst".
If source and destination are the same file, a SameFileError will be
raised.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst, follow_symlinks=follow_symlinks)
# copymode(src, dst, follow_symlinks=follow_symlinks)
return dst
def copy2(src, dst, *, follow_symlinks=True):
"""Copy data and all stat info ("cp -p src dst"). Return the file's
destination."
The destination may be a directory.
If follow_symlinks is false, symlinks won't be followed. This
resembles GNU's "cp -P src dst".
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst, follow_symlinks=follow_symlinks)
# copystat(src, dst, follow_symlinks=follow_symlinks)
return dst
def ignore_patterns(*patterns):
"""Function that can be used as copytree() ignore parameter.
Patterns is a sequence of glob-style patterns
that are used to exclude files"""
def _ignore_patterns(path, names):
ignored_names = []
for pattern in patterns:
ignored_names.extend(fnmatch.filter(names, pattern))
return set(ignored_names)
return _ignore_patterns
def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2,
ignore_dangling_symlinks=False):
"""Recursively copy a directory tree.
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied. If the file pointed by the symlink doesn't
exist, an exception will be added in the list of errors raised in
an Error exception at the end of the copy process.
You can set the optional ignore_dangling_symlinks flag to true if you
want to silence this exception. Notice that this has no effect on
platforms that don't support os.symlink.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
The optional copy_function argument is a callable that will be used
to copy each file. It will be called with the source path and the
destination path as arguments. By default, copy2() is used, but any
function that supports the same signature (like copy()) can be used.
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
# if os.path.islink(srcname):
# linkto = os.readlink(srcname)
# if symlinks:
# # We can't just leave it to `copy_function` because legacy
# # code with a custom `copy_function` may rely on copytree
# # doing the right thing.
# os.symlink(linkto, dstname)
# copystat(srcname, dstname, follow_symlinks=not symlinks)
# else:
# # ignore dangling symlink if the flag is on
# if not os.path.exists(linkto) and ignore_dangling_symlinks:
# continue
# # otherwise let the copy occurs. copy2 will raise an error
# if os.path.isdir(srcname):
# copytree(srcname, dstname, symlinks, ignore,
# copy_function)
# else:
# copy_function(srcname, dstname)
# elif os.path.isdir(srcname):
if os.path.isdir(srcname): ###
copytree(srcname, dstname, symlinks, ignore, copy_function)
else:
# Will raise a SpecialFileError for unsupported file types
copy_function(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as err:
errors.extend(err.args[0])
except OSError as why:
errors.append((srcname, dstname, str(why)))
# try:
# copystat(src, dst)
# except OSError as why:
# # Copying file access times may fail on Windows
# if getattr(why, 'winerror', None) is None:
# errors.append((src, dst, str(why)))
if errors:
raise Error(errors)
return dst
# version vulnerable to race conditions
def _rmtree_unsafe(path, onerror):
# try:
# if os.path.islink(path):
# # symlinks to directories are forbidden, see bug #1669
# raise OSError("Cannot call rmtree on a symbolic link")
# except OSError:
# onerror(os.path.islink, path, sys.exc_info())
# # can't continue even if onerror hook returns
# return
names = []
try:
names = os.listdir(path)
except OSError:
if onerror is None: ###
raise ###
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except OSError:
mode = 0
if stat.S_ISDIR(mode):
_rmtree_unsafe(fullname, onerror)
else:
try:
os.unlink(fullname)
except OSError:
if onerror is None: ###
raise ###
onerror(os.unlink, fullname, sys.exc_info())
try:
os.rmdir(path)
except OSError:
if onerror is None: ###
raise ###
onerror(os.rmdir, path, sys.exc_info())
## Version using fd-based APIs to protect against races
#def _rmtree_safe_fd(topfd, path, onerror):
# names = []
# try:
# names = os.listdir(topfd)
# except OSError as err:
# err.filename = path
# onerror(os.listdir, path, sys.exc_info())
# for name in names:
# fullname = os.path.join(path, name)
# try:
# orig_st = os.stat(name, dir_fd=topfd, follow_symlinks=False)
# mode = orig_st.st_mode
# except OSError:
# mode = 0
# if stat.S_ISDIR(mode):
# try:
# dirfd = os.open(name, os.O_RDONLY, dir_fd=topfd)
# except OSError:
# onerror(os.open, fullname, sys.exc_info())
# else:
# try:
# if os.path.samestat(orig_st, os.fstat(dirfd)):
# _rmtree_safe_fd(dirfd, fullname, onerror)
# try:
# os.rmdir(name, dir_fd=topfd)
# except OSError:
# onerror(os.rmdir, fullname, sys.exc_info())
# else:
# try:
# # This can only happen if someone replaces
# # a directory with a symlink after the call to
# # stat.S_ISDIR above.
# raise OSError("Cannot call rmtree on a symbolic "
# "link")
# except OSError:
# onerror(os.path.islink, fullname, sys.exc_info())
# finally:
# os.close(dirfd)
# else:
# try:
# os.unlink(name, dir_fd=topfd)
# except OSError:
# onerror(os.unlink, fullname, sys.exc_info())
#
#_use_fd_functions = ({os.open, os.stat, os.unlink, os.rmdir} <=
# os.supports_dir_fd and
# os.listdir in os.supports_fd and
# os.stat in os.supports_follow_symlinks)
#
def rmtree(path, ignore_errors=False, onerror=None):
"""Recursively delete a directory tree.
If ignore_errors is set, errors are ignored; otherwise, if onerror
is set, it is called to handle the error with arguments (func,
path, exc_info) where func is platform and implementation dependent;
path is the argument to that function that caused it to fail; and
exc_info is a tuple returned by sys.exc_info(). If ignore_errors
is false and onerror is None, an exception is raised.
"""
if ignore_errors:
def onerror(*args):
pass
# elif onerror is None:
# def onerror(*args):
# raise
# if _use_fd_functions:
# # While the unsafe rmtree works fine on bytes, the fd based does not.
# if isinstance(path, bytes):
# path = os.fsdecode(path)
# # Note: To guard against symlink races, we use the standard
# # lstat()/open()/fstat() trick.
# try:
# orig_st = os.lstat(path)
# except Exception:
# onerror(os.lstat, path, sys.exc_info())
# return
# try:
# fd = os.open(path, os.O_RDONLY)
# except Exception:
# onerror(os.lstat, path, sys.exc_info())
# return
# try:
# if os.path.samestat(orig_st, os.fstat(fd)):
# _rmtree_safe_fd(fd, path, onerror)
# try:
# os.rmdir(path)
# except OSError:
# onerror(os.rmdir, path, sys.exc_info())
# else:
# try:
# # symlinks to directories are forbidden, see bug #1669
# raise OSError("Cannot call rmtree on a symbolic link")
# except OSError:
# onerror(os.path.islink, path, sys.exc_info())
# finally:
# os.close(fd)
# else:
# return _rmtree_unsafe(path, onerror)
return _rmtree_unsafe(path, onerror) ###
## Allow introspection of whether or not the hardening against symlink
## attacks is supported on the current platform
#rmtree.avoids_symlink_attacks = _use_fd_functions
#
def _basename(path):
# A basename() variant which first strips the trailing slash, if present.
# Thus we always get the last component of the path, even for directories.
sep = os.path.sep + (os.path.altsep or '')
return os.path.basename(path.rstrip(sep))
def move(src, dst):
"""Recursively move a file or directory to another location. This is
similar to the Unix "mv" command. Return the file or directory's
destination.
If the destination is a directory or a symlink to a directory, the source
is moved inside the directory. The destination path must not already
exist.
If the destination already exists but is not a directory, it may be
overwritten depending on os.rename() semantics.
If the destination is on our current filesystem, then rename() is used.
Otherwise, src is copied to the destination and then removed. Symlinks are
recreated under the new name if os.rename() fails because of cross
filesystem renames.
A lot more could be done here... A look at a mv.c shows a lot of
the issues this implementation glosses over.
"""
real_dst = dst
if os.path.isdir(dst):
if _samefile(src, dst):
# We might be on a case insensitive filesystem,
# perform the rename anyway.
os.rename(src, dst)
return
real_dst = os.path.join(dst, _basename(src))
if os.path.exists(real_dst):
raise Error("Destination path '%s' already exists" % real_dst)
try:
os.rename(src, real_dst)
except OSError:
# if os.path.islink(src):
# linkto = os.readlink(src)
# os.symlink(linkto, real_dst)
# os.unlink(src)
# elif os.path.isdir(src):
if os.path.isdir(src): ###
if _destinsrc(src, dst):
raise Error("Cannot move a directory '%s' into itself '%s'." % (src, dst))
copytree(src, real_dst, symlinks=True)
rmtree(src)
else:
copy2(src, real_dst)
os.unlink(src)
return real_dst
def _destinsrc(src, dst):
src = abspath(src)
dst = abspath(dst)
if not src.endswith(os.path.sep):
src += os.path.sep
if not dst.endswith(os.path.sep):
dst += os.path.sep
return dst.startswith(src)
#def _get_gid(name):
# """Returns a gid, given a group name."""
# if getgrnam is None or name is None:
# return None
# try:
# result = getgrnam(name)
# except KeyError:
# result = None
# if result is not None:
# return result[2]
# return None
#
#def _get_uid(name):
# """Returns an uid, given a user name."""
# if getpwnam is None or name is None:
# return None
# try:
# result = getpwnam(name)
# except KeyError:
# result = None
# if result is not None:
# return result[2]
# return None
#
#def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0,
# owner=None, group=None, logger=None):
# """Create a (possibly compressed) tar file from all the files under
# 'base_dir'.
#
# 'compress' must be "gzip" (the default), "bzip2", or None.
#
# 'owner' and 'group' can be used to define an owner and a group for the
# archive that is being built. If not provided, the current owner and group
# will be used.
#
# The output tar file will be named 'base_name' + ".tar", possibly plus
# the appropriate compression extension (".gz", or ".bz2").
#
# Returns the output filename.
# """
# tar_compression = {'gzip': 'gz', None: ''}
# compress_ext = {'gzip': '.gz'}
#
# if _BZ2_SUPPORTED:
# tar_compression['bzip2'] = 'bz2'
# compress_ext['bzip2'] = '.bz2'
#
# # flags for compression program, each element of list will be an argument
# if compress is not None and compress not in compress_ext:
# raise ValueError("bad value for 'compress', or compression format not "
# "supported : {0}".format(compress))
#
# archive_name = base_name + '.tar' + compress_ext.get(compress, '')
# archive_dir = os.path.dirname(archive_name)
#
# if archive_dir and not os.path.exists(archive_dir):
# if logger is not None:
# logger.info("creating %s", archive_dir)
# if not dry_run:
# os.makedirs(archive_dir)
#
# # creating the tarball
# if logger is not None:
# logger.info('Creating tar archive')
#
# uid = _get_uid(owner)
# gid = _get_gid(group)
#
# def _set_uid_gid(tarinfo):
# if gid is not None:
# tarinfo.gid = gid
# tarinfo.gname = group
# if uid is not None:
# tarinfo.uid = uid
# tarinfo.uname = owner
# return tarinfo
#
# if not dry_run:
# tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress])
# try:
# tar.add(base_dir, filter=_set_uid_gid)
# finally:
# tar.close()
#
# return archive_name
#
#def _call_external_zip(base_dir, zip_filename, verbose=False, dry_run=False):
# # XXX see if we want to keep an external call here
# if verbose:
# zipoptions = "-r"
# else:
# zipoptions = "-rq"
# from distutils.errors import DistutilsExecError
# from distutils.spawn import spawn
# try:
# spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run)
# except DistutilsExecError:
# # XXX really should distinguish between "couldn't find
# # external 'zip' command" and "zip failed".
# raise ExecError("unable to create zip file '%s': "
# "could neither import the 'zipfile' module nor "
# "find a standalone zip utility") % zip_filename
#
#def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None):
# """Create a zip file from all the files under 'base_dir'.
#
# The output zip file will be named 'base_name' + ".zip". Uses either the
# "zipfile" Python module (if available) or the InfoZIP "zip" utility
# (if installed and found on the default search path). If neither tool is
# available, raises ExecError. Returns the name of the output zip
# file.
# """
# zip_filename = base_name + ".zip"
# archive_dir = os.path.dirname(base_name)
#
# if archive_dir and not os.path.exists(archive_dir):
# if logger is not None:
# logger.info("creating %s", archive_dir)
# if not dry_run:
# os.makedirs(archive_dir)
#
# # If zipfile module is not available, try spawning an external 'zip'
# # command.
# try:
# import zipfile
# except ImportError:
# zipfile = None
#
# if zipfile is None:
# _call_external_zip(base_dir, zip_filename, verbose, dry_run)
# else:
# if logger is not None:
# logger.info("creating '%s' and adding '%s' to it",
# zip_filename, base_dir)
#
# if not dry_run:
# with zipfile.ZipFile(zip_filename, "w",
# compression=zipfile.ZIP_DEFLATED) as zf:
# path = os.path.normpath(base_dir)
# zf.write(path, path)
# if logger is not None:
# logger.info("adding '%s'", path)
# for dirpath, dirnames, filenames in os.walk(base_dir):
# for name in sorted(dirnames):
# path = os.path.normpath(os.path.join(dirpath, name))
# zf.write(path, path)
# if logger is not None:
# logger.info("adding '%s'", path)
# for name in filenames:
# path = os.path.normpath(os.path.join(dirpath, name))
# if os.path.isfile(path):
# zf.write(path, path)
# if logger is not None:
# logger.info("adding '%s'", path)
#
# return zip_filename
#
#_ARCHIVE_FORMATS = {
# 'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
# 'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"),
# 'zip': (_make_zipfile, [], "ZIP file")
# }
#
#if _BZ2_SUPPORTED:
# _ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')],
# "bzip2'ed tar-file")
#
#def get_archive_formats():
# """Returns a list of supported formats for archiving and unarchiving.
#
# Each element of the returned sequence is a tuple (name, description)
# """
# formats = [(name, registry[2]) for name, registry in
# _ARCHIVE_FORMATS.items()]
# formats.sort()
# return formats
#
#def register_archive_format(name, function, extra_args=None, description=''):
# """Registers an archive format.
#
# name is the name of the format. function is the callable that will be
# used to create archives. If provided, extra_args is a sequence of
# (name, value) tuples that will be passed as arguments to the callable.
# description can be provided to describe the format, and will be returned
# by the get_archive_formats() function.
# """
# if extra_args is None:
# extra_args = []
# if not callable(function):
# raise TypeError('The %s object is not callable' % function)
# if not isinstance(extra_args, (tuple, list)):
# raise TypeError('extra_args needs to be a sequence')
# for element in extra_args:
# if not isinstance(element, (tuple, list)) or len(element) !=2:
# raise TypeError('extra_args elements are : (arg_name, value)')
#
# _ARCHIVE_FORMATS[name] = (function, extra_args, description)
#
#def unregister_archive_format(name):
# del _ARCHIVE_FORMATS[name]
#
#def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,
# dry_run=0, owner=None, group=None, logger=None):
# """Create an archive file (eg. zip or tar).
#
# 'base_name' is the name of the file to create, minus any format-specific
# extension; 'format' is the archive format: one of "zip", "tar", "bztar"
# or "gztar".
#
# 'root_dir' is a directory that will be the root directory of the
# archive; ie. we typically chdir into 'root_dir' before creating the
# archive. 'base_dir' is the directory where we start archiving from;
# ie. 'base_dir' will be the common prefix of all files and
# directories in the archive. 'root_dir' and 'base_dir' both default
# to the current directory. Returns the name of the archive file.
#
# 'owner' and 'group' are used when creating a tar archive. By default,
# uses the current owner and group.
# """
# save_cwd = os.getcwd()
# if root_dir is not None:
# if logger is not None:
# logger.debug("changing into '%s'", root_dir)
# base_name = os.path.abspath(base_name)
# if not dry_run:
# os.chdir(root_dir)
#
# if base_dir is None:
# base_dir = os.curdir
#
# kwargs = {'dry_run': dry_run, 'logger': logger}
#
# try:
# format_info = _ARCHIVE_FORMATS[format]
# except KeyError:
# raise ValueError("unknown archive format '%s'" % format)
#
# func = format_info[0]
# for arg, val in format_info[1]:
# kwargs[arg] = val
#
# if format != 'zip':
# kwargs['owner'] = owner
# kwargs['group'] = group
#
# try:
# filename = func(base_name, base_dir, **kwargs)
# finally:
# if root_dir is not None:
# if logger is not None:
# logger.debug("changing back to '%s'", save_cwd)
# os.chdir(save_cwd)
#
# return filename
#
#
#def get_unpack_formats():
# """Returns a list of supported formats for unpacking.
#
# Each element of the returned sequence is a tuple
# (name, extensions, description)
# """
# formats = [(name, info[0], info[3]) for name, info in
# _UNPACK_FORMATS.items()]
# formats.sort()
# return formats
#
#def _check_unpack_options(extensions, function, extra_args):
# """Checks what gets registered as an unpacker."""
# # first make sure no other unpacker is registered for this extension
# existing_extensions = {}
# for name, info in _UNPACK_FORMATS.items():
# for ext in info[0]:
# existing_extensions[ext] = name
#
# for extension in extensions:
# if extension in existing_extensions:
# msg = '%s is already registered for "%s"'
# raise RegistryError(msg % (extension,
# existing_extensions[extension]))
#
# if not callable(function):
# raise TypeError('The registered function must be a callable')
#
#
#def register_unpack_format(name, extensions, function, extra_args=None,
# description=''):
# """Registers an unpack format.
#
# `name` is the name of the format. `extensions` is a list of extensions
# corresponding to the format.
#
# `function` is the callable that will be
# used to unpack archives. The callable will receive archives to unpack.
# If it's unable to handle an archive, it needs to raise a ReadError
# exception.
#
# If provided, `extra_args` is a sequence of
# (name, value) tuples that will be passed as arguments to the callable.
# description can be provided to describe the format, and will be returned
# by the get_unpack_formats() function.
# """
# if extra_args is None:
# extra_args = []
# _check_unpack_options(extensions, function, extra_args)
# _UNPACK_FORMATS[name] = extensions, function, extra_args, description
#
#def unregister_unpack_format(name):
# """Removes the pack format from the registery."""
# del _UNPACK_FORMATS[name]
#
#def _ensure_directory(path):
# """Ensure that the parent directory of `path` exists"""
# dirname = os.path.dirname(path)
# if not os.path.isdir(dirname):
# os.makedirs(dirname)
#
#def _unpack_zipfile(filename, extract_dir):
# """Unpack zip `filename` to `extract_dir`
# """
# try:
# import zipfile
# except ImportError:
# raise ReadError('zlib not supported, cannot unpack this archive.')
#
# if not zipfile.is_zipfile(filename):
# raise ReadError("%s is not a zip file" % filename)
#
# zip = zipfile.ZipFile(filename)
# try:
# for info in zip.infolist():
# name = info.filename
#
# # don't extract absolute paths or ones with .. in them
# if name.startswith('/') or '..' in name:
# continue
#
# target = os.path.join(extract_dir, *name.split('/'))
# if not target:
# continue
#
# _ensure_directory(target)
# if not name.endswith('/'):
# # file
# data = zip.read(info.filename)
# f = open(target, 'wb')
# try:
# f.write(data)
# finally:
# f.close()
# del data
# finally:
# zip.close()
#
#def _unpack_tarfile(filename, extract_dir):
# """Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
# """
# try:
# tarobj = tarfile.open(filename)
# except tarfile.TarError:
# raise ReadError(
# "%s is not a compressed or uncompressed tar file" % filename)
# try:
# tarobj.extractall(extract_dir)
# finally:
# tarobj.close()
#
#_UNPACK_FORMATS = {
# 'gztar': (['.tar.gz', '.tgz'], _unpack_tarfile, [], "gzip'ed tar-file"),
# 'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"),
# 'zip': (['.zip'], _unpack_zipfile, [], "ZIP file")
# }
#
#if _BZ2_SUPPORTED:
# _UNPACK_FORMATS['bztar'] = (['.bz2'], _unpack_tarfile, [],
# "bzip2'ed tar-file")
#
#def _find_unpack_format(filename):
# for name, info in _UNPACK_FORMATS.items():
# for extension in info[0]:
# if filename.endswith(extension):
# return name
# return None
#
#def unpack_archive(filename, extract_dir=None, format=None):
# """Unpack an archive.
#
# `filename` is the name of the archive.
#
# `extract_dir` is the name of the target directory, where the archive
# is unpacked. If not provided, the current working directory is used.
#
# `format` is the archive format: one of "zip", "tar", or "gztar". Or any
# other registered format. If not provided, unpack_archive will use the
# filename extension and see if an unpacker was registered for that
# extension.
#
# In case none is found, a ValueError is raised.
# """
# if extract_dir is None:
# extract_dir = os.getcwd()
#
# if format is not None:
# try:
# format_info = _UNPACK_FORMATS[format]
# except KeyError:
# raise ValueError("Unknown unpack format '{0}'".format(format))
#
# func = format_info[1]
# func(filename, extract_dir, **dict(format_info[2]))
# else:
# # we need to look at the registered unpackers supported extensions
# format = _find_unpack_format(filename)
# if format is None:
# raise ReadError("Unknown archive format '{0}'".format(filename))
#
# func = _UNPACK_FORMATS[format][1]
# kwargs = dict(_UNPACK_FORMATS[format][2])
# func(filename, extract_dir, **kwargs)
#
#
#if hasattr(os, 'statvfs'):
#
# __all__.append('disk_usage')
# _ntuple_diskusage = collections.namedtuple('usage', 'total used free')
#
# def disk_usage(path):
# """Return disk usage statistics about the given path.
#
# Returned value is a named tuple with attributes 'total', 'used' and
# 'free', which are the amount of total, used and free space, in bytes.
# """
# st = os.statvfs(path)
# free = st.f_bavail * st.f_frsize
# total = st.f_blocks * st.f_frsize
# used = (st.f_blocks - st.f_bfree) * st.f_frsize
# return _ntuple_diskusage(total, used, free)
#
#elif os.name == 'nt':
#
# import nt
# __all__.append('disk_usage')
# _ntuple_diskusage = collections.namedtuple('usage', 'total used free')
#
# def disk_usage(path):
# """Return disk usage statistics about the given path.
#
# Returned values is a named tuple with attributes 'total', 'used' and
# 'free', which are the amount of total, used and free space, in bytes.
# """
# total, free = nt._getdiskusage(path)
# used = total - free
# return _ntuple_diskusage(total, used, free)
#
#
#def chown(path, user=None, group=None):
# """Change owner user and group of the given path.
#
# user and group can be the uid/gid or the user/group names, and in that case,
# they are converted to their respective uid/gid.
# """
#
# if user is None and group is None:
# raise ValueError("user and/or group must be set")
#
# _user = user
# _group = group
#
# # -1 means don't change it
# if user is None:
# _user = -1
# # user can either be an int (the uid) or a string (the system username)
# elif isinstance(user, str):
# _user = _get_uid(user)
# if _user is None:
# raise LookupError("no such user: {!r}".format(user))
#
# if group is None:
# _group = -1
# elif not isinstance(group, int):
# _group = _get_gid(group)
# if _group is None:
# raise LookupError("no such group: {!r}".format(group))
#
# os.chown(path, _user, _group)
#
#def get_terminal_size(fallback=(80, 24)):
# """Get the size of the terminal window.
#
# For each of the two dimensions, the environment variable, COLUMNS
# and LINES respectively, is checked. If the variable is defined and
# the value is a positive integer, it is used.
#
# When COLUMNS or LINES is not defined, which is the common case,
# the terminal connected to sys.__stdout__ is queried
# by invoking os.get_terminal_size.
#
# If the terminal size cannot be successfully queried, either because
# the system doesn't support querying, or because we are not
# connected to a terminal, the value given in fallback parameter
# is used. Fallback defaults to (80, 24) which is the default
# size used by many terminal emulators.
#
# The value returned is a named tuple of type os.terminal_size.
# """
# # columns, lines are the working values
# try:
# columns = int(os.environ['COLUMNS'])
# except (KeyError, ValueError):
# columns = 0
#
# try:
# lines = int(os.environ['LINES'])
# except (KeyError, ValueError):
# lines = 0
#
# # only query if necessary
# if columns <= 0 or lines <= 0:
# try:
# size = os.get_terminal_size(sys.__stdout__.fileno())
# except (NameError, OSError):
# size = os.terminal_size(fallback)
# if columns <= 0:
# columns = size.columns
# if lines <= 0:
# lines = size.lines
#
# return os.terminal_size((columns, lines))
#
#def which(cmd, mode=os.F_OK | os.X_OK, path=None):
# """Given a command, mode, and a PATH string, return the path which
# conforms to the given mode on the PATH, or None if there is no such
# file.
#
# `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
# of os.environ.get("PATH"), or can be overridden with a custom search
# path.
#
# """
# # Check that a given file can be accessed with the correct mode.
# # Additionally check that `file` is not a directory, as on Windows
# # directories pass the os.access check.
# def _access_check(fn, mode):
# return (os.path.exists(fn) and os.access(fn, mode)
# and not os.path.isdir(fn))
#
# # If we're given a path with a directory part, look it up directly rather
# # than referring to PATH directories. This includes checking relative to the
# # current directory, e.g. ./script
# if os.path.dirname(cmd):
# if _access_check(cmd, mode):
# return cmd
# return None
#
# if path is None:
# path = os.environ.get("PATH", os.defpath)
# if not path:
# return None
# path = path.split(os.pathsep)
#
# if sys.platform == "win32":
# # The current directory takes precedence on Windows.
# if not os.curdir in path:
# path.insert(0, os.curdir)
#
# # PATHEXT is necessary to check on Windows.
# pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# # See if the given file matches any of the expected path extensions.
# # This will allow us to short circuit when given "python.exe".
# # If it does match, only test that one, otherwise we have to try
# # others.
# if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
# files = [cmd]
# else:
# files = [cmd + ext for ext in pathext]
# else:
# # On other platforms you don't have things like PATHEXT to tell you
# # what file suffixes are executable, so just pass on cmd as-is.
# files = [cmd]
#
# seen = set()
# for dir in path:
# normdir = os.path.normcase(dir)
# if not normdir in seen:
# seen.add(normdir)
# for thefile in files:
# name = os.path.join(dir, thefile)
# if _access_check(name, mode):
# return name
# return None
| [
"[email protected]"
] | |
393d712064d56ab0df11650c6d6f49b01aafb3b7 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/sieve-big-9296.py | 9ce70cba78da7c66126b2610ec5adcf9316b16a0 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,754 | py | # A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = $Exp
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"[email protected]"
] | |
ad6cc0a08e8ba3d2ad47ab45d0395df6b071594b | 006341ca12525aa0979d6101600e78c4bd9532ab | /CMS/Zope-3.2.1/Dependencies/zope.app-Zope-3.2.1/zope.app/i18n/filters.py | 2807d5ca114aaa6b7749be72ef8b4ab16fdbd8fe | [
"ZPL-2.1",
"Python-2.0",
"ICU",
"LicenseRef-scancode-public-domain",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0"
] | permissive | germanfriday/code-examples-sandbox | d0f29e20a3eed1f8430d06441ac2d33bac5e4253 | 4c538584703754c956ca66392fdcecf0a0ca2314 | refs/heads/main | 2023-05-30T22:21:57.918503 | 2021-06-15T15:06:47 | 2021-06-15T15:06:47 | 377,200,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,811 | py | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Translation Domain Message Export and Import Filters
$Id: filters.py 38178 2005-08-30 21:50:19Z mj $
"""
__docformat__ = 'restructuredtext'
import time, re
from types import StringTypes
from zope.interface import implements
from zope.i18n.interfaces import IMessageExportFilter, IMessageImportFilter
from zope.app.i18n.interfaces import ILocalTranslationDomain
class ParseError(Exception):
def __init__(self, state, lineno):
Exception.__init__(self, state, lineno)
self.state = state
self.lineno = lineno
def __str__(self):
return "state %s, line %s" % (self.state, self.lineno)
class GettextExportFilter(object):
implements(IMessageExportFilter)
__used_for__ = ILocalTranslationDomain
def __init__(self, domain):
self.domain = domain
def exportMessages(self, languages):
'See IMessageExportFilter'
domain = self.domain.domain
if isinstance(languages, StringTypes):
language = languages
elif len(languages) == 1:
language = languages[0]
else:
raise TypeError(
'Only one language at a time is supported for gettext export.')
dt = time.time()
dt = time.localtime(dt)
dt = time.strftime('%Y/%m/%d %H:%M', dt)
output = _file_header %(dt, language.encode('UTF-8'),
domain.encode('UTF-8'))
for msgid in self.domain.getMessageIds():
msgstr = self.domain.translate(msgid, target_language=language)
msgstr = msgstr.encode('UTF-8')
msgid = msgid.encode('UTF-8')
output += _msg_template %(msgid, msgstr)
return output
class GettextImportFilter(object):
implements(IMessageImportFilter)
__used_for__ = ILocalTranslationDomain
def __init__(self, domain):
self.domain = domain
def importMessages(self, languages, file):
'See IMessageImportFilter'
if isinstance(languages, StringTypes):
language = languages
elif len(languages) == 1:
language = languages[0]
else:
raise TypeError(
'Only one language at a time is supported for gettext export.')
result = parseGetText(file.readlines())[3]
headers = parserHeaders(''.join(result[('',)][1]))
del result[('',)]
charset = extractCharset(headers['content-type'])
for msg in result.items():
msgid = unicode(''.join(msg[0]), charset)
msgid = msgid.replace('\\n', '\n')
msgstr = unicode(''.join(msg[1][1]), charset)
msgstr = msgstr.replace('\\n', '\n')
self.domain.addMessage(msgid, msgstr, language)
def extractCharset(header):
charset = header.split('charset=')[-1]
return charset.lower()
def parserHeaders(headers_text):
headers = {}
for line in headers_text.split('\\n'):
name = line.split(':')[0]
value = ''.join(line.split(':')[1:])
headers[name.lower()] = value
return headers
def parseGetText(content):
# The regular expressions
com = re.compile('^#.*')
msgid = re.compile(r'^ *msgid *"(.*?[^\\]*)"')
msgstr = re.compile(r'^ *msgstr *"(.*?[^\\]*)"')
re_str = re.compile(r'^ *"(.*?[^\\])"')
blank = re.compile(r'^\s*$')
trans = {}
pointer = 0
state = 0
COM, MSGID, MSGSTR = [], [], []
while pointer < len(content):
line = content[pointer]
#print 'STATE:', state
#print 'LINE:', line, content[pointer].strip()
if state == 0:
COM, MSGID, MSGSTR = [], [], []
if com.match(line):
COM.append(line.strip())
state = 1
pointer = pointer + 1
elif msgid.match(line):
MSGID.append(msgid.match(line).group(1))
state = 2
pointer = pointer + 1
elif blank.match(line):
pointer = pointer + 1
else:
raise ParseError(0, pointer + 1)
elif state == 1:
if com.match(line):
COM.append(line.strip())
state = 1
pointer = pointer + 1
elif msgid.match(line):
MSGID.append(msgid.match(line).group(1))
state = 2
pointer = pointer + 1
elif blank.match(line):
pointer = pointer + 1
else:
raise ParseError(1, pointer + 1)
elif state == 2:
if com.match(line):
COM.append(line.strip())
state = 2
pointer = pointer + 1
elif re_str.match(line):
MSGID.append(re_str.match(line).group(1))
state = 2
pointer = pointer + 1
elif msgstr.match(line):
MSGSTR.append(msgstr.match(line).group(1))
state = 3
pointer = pointer + 1
elif blank.match(line):
pointer = pointer + 1
else:
raise ParseError(2, pointer + 1)
elif state == 3:
if com.match(line) or msgid.match(line):
# print "\nEn", language, "detected", MSGID
trans[tuple(MSGID)] = (COM, MSGSTR)
state = 0
elif re_str.match(line):
MSGSTR.append(re_str.match(line).group(1))
state = 3
pointer = pointer + 1
elif blank.match(line):
pointer = pointer + 1
else:
raise ParseError(3, pointer + 1)
# the last also goes in
if tuple(MSGID):
trans[tuple(MSGID)] = (COM, MSGSTR)
return COM, MSGID, MSGSTR, trans
_file_header = '''
msgid ""
msgstr ""
"Project-Id-Version: Zope 3\\n"
"PO-Revision-Date: %s\\n"
"Last-Translator: Zope 3 Gettext Export Filter\\n"
"Zope-Language: %s\\n"
"Zope-Domain: %s\\n"
"MIME-Version: 1.0\\n"
"Content-Type: text/plain; charset=UTF-8\\n"
"Content-Transfer-Encoding: 8bit\\n"
'''
_msg_template = '''
msgid "%s"
msgstr "%s"
'''
| [
"[email protected]"
] | |
666734fbab3cb22c4ae127e2c5eb2dfdc12998ad | e8f99a162207cba82d4e0f969d7bcdb2b9d8b522 | /bilibili/__init__.py | 8575f7bf75a23b0ea81613856cde28d78cc161cf | [] | no_license | TesterCC/Python3Scripts | edb5446278ebf13edb64336001081941ca27d67d | 58be67e1ffc74ef50289a885aa4ad05f58e2c383 | refs/heads/master | 2023-08-30T21:16:38.328045 | 2023-08-17T11:23:08 | 2023-08-17T11:23:08 | 93,401,996 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'MFC'
__time__ = '2020-04-22 06:50' | [
"[email protected]"
] | |
018a186845663f611cb74d50a4a07985de62ac46 | 1a093cb59320db7327d665910d8ed36ea5bba95b | /banana/analysis/mri/dwi.py | 06c6f5ca8cf63784e5d819b09cb611a69248c58d | [
"Apache-2.0"
] | permissive | MonashBI/banana | 23bfa1aff12fe0ded709c99679a1c0d9687e4ffa | 37364243b520ab14ac1243005dbd465f824542b4 | refs/heads/master | 2022-07-23T08:13:33.191149 | 2022-06-15T02:17:18 | 2022-06-15T02:17:18 | 134,526,663 | 3 | 3 | Apache-2.0 | 2019-11-13T23:00:15 | 2018-05-23T06:53:38 | Python | UTF-8 | Python | false | false | 49,248 | py | import os
from logging import getLogger
import tempfile
import subprocess as sp
from nipype.interfaces.utility import Merge, IdentityInterface
from nipype.interfaces.fsl import (
TOPUP, ApplyTOPUP, BET, FUGUE, Merge as FslMerge)
from nipype.interfaces.utility import Merge as merge_lists
from nipype.interfaces.fsl.epi import PrepareFieldmap, EddyQuad # , EddySquad
from nipype.interfaces.mrtrix3 import ResponseSD, Tractography
from nipype.interfaces.mrtrix3.utils import BrainMask, TensorMetrics
from nipype.interfaces.mrtrix3.reconst import (
FitTensor, ConstrainedSphericalDeconvolution)
# from nipype.workflows.dwi.fsl.tbss import create_tbss_all
# from banana.interfaces.noddi import (
# CreateROI, BatchNODDIFitting, SaveParamsAsNIfTI)
from nipype.interfaces import fsl, mrtrix3, utility
from arcana.utils.interfaces import MergeTuple, Chain
from arcana.data import FilesetSpec, InputFilesetSpec
from arcana.utils.interfaces import SelectSession
from arcana.analysis import ParamSpec, SwitchSpec
from arcana.exceptions import ArcanaMissingDataException, ArcanaNameError
from arcana.data.file_format import pdf_format
from banana.interfaces.motion_correction import GenTopupConfigFiles
from banana.interfaces.mrtrix import (
DWIPreproc, MRCat, ExtractDWIorB0, MRMath, DWIBiasCorrect, DWIDenoise,
MRCalc, DWIIntensityNorm, AverageResponse, DWI2Mask, MergeFslGrads)
from banana.requirement import (
fsl_req, mrtrix_req, ants_req)
from banana.interfaces.mrtrix import MRConvert, ExtractFSLGradients
from banana.analysis import AnalysisMetaClass
from banana.interfaces.motion_correction import (
PrepareDWI, AffineMatrixGeneration)
from banana.interfaces.dwi import TransformGradients, SelectShell
from banana.interfaces.utility import AppendPath
from banana.analysis.base import Analysis
from banana.bids_ import BidsInputs, BidsAssocInputs
from banana.exceptions import BananaUsageError
from banana.citation import (
mrtrix_cite, fsl_cite, eddy_cite, topup_cite, distort_correct_cite,
n4_cite, dwidenoise_cites, eddy_repol_cite)
from banana.file_format import (
mrtrix_image_format, nifti_gz_format, nifti_gz_x_format, fsl_bvecs_format,
fsl_bvals_format, text_format, dicom_format, eddy_par_format,
mrtrix_track_format, motion_mats_format, text_matrix_format,
directory_format, csv_format, zip_format, STD_IMAGE_FORMATS, json_format)
from .base import MriAnalysis
from .epi import EpiSeriesAnalysis, EpiAnalysis
logger = getLogger('banana')
class DwiAnalysis(EpiSeriesAnalysis, metaclass=AnalysisMetaClass):
desc = "Diffusion-weighted MRI contrast"
add_data_specs = [
InputFilesetSpec('anat_5tt', mrtrix_image_format,
desc=("A co-registered segmentation image taken from "
"freesurfer output and simplified into 5 tissue"
" types. Used in ACT streamlines tractography"),
optional=True),
InputFilesetSpec('anat_fs_recon_all', zip_format, optional=True,
desc=("Co-registered freesurfer recon-all output. "
"Used in building the connectome")),
InputFilesetSpec('reverse_phase', STD_IMAGE_FORMATS, optional=True),
FilesetSpec('grad_dirs', fsl_bvecs_format, 'preprocess_pipeline'),
FilesetSpec('grad_dirs_coreg', fsl_bvecs_format,
'series_coreg_pipeline',
desc=("The gradient directions coregistered to the "
"orientation of the coreg reference")),
FilesetSpec('bvalues', fsl_bvals_format, 'preprocess_pipeline',
desc=("")),
FilesetSpec('eddy_par', eddy_par_format, 'preprocess_pipeline',
desc=("Parameters used by Eddy preprocessing tool")),
FilesetSpec('eddy_qc', zip_format, 'preprocess_pipeline',
desc=("QC output generated by Eddy preprocessing tool")),
FilesetSpec('eddy_qc_summary', json_format, 'eddy_qc_summary_pipeline',
desc=("Study-wise database containing quality metrics and "
"data info."), frequency='per_visit'),
FilesetSpec('eddy_qc_report', pdf_format, 'eddy_qc_summary_pipeline',
desc=("Study-wise database containing quality metrics and "
"data info."), frequency='per_visit'),
FilesetSpec('noise_residual', mrtrix_image_format,
'preprocess_pipeline',
desc=("")),
FilesetSpec('tensor', nifti_gz_format, 'tensor_pipeline',
desc=("")),
FilesetSpec('tensor_residual', nifti_gz_format,
'residual_pipeline',
desc=("The residual signal after the tensor has been "
"fit to the signal")),
FilesetSpec('fa', nifti_gz_format, 'tensor_metrics_pipeline',
desc=("")),
FilesetSpec('adc', nifti_gz_format, 'tensor_metrics_pipeline',
desc=("")),
FilesetSpec('wm_response', text_format, 'response_pipeline',
desc=("")),
FilesetSpec('gm_response', text_format, 'response_pipeline',
desc=("")),
FilesetSpec('csf_response', text_format, 'response_pipeline',
desc=("")),
FilesetSpec('avg_response', text_format, 'average_response_pipeline',
desc=("")),
FilesetSpec('wm_odf', mrtrix_image_format, 'fod_pipeline',
desc=("")),
FilesetSpec('gm_odf', mrtrix_image_format, 'fod_pipeline',
desc=("")),
FilesetSpec('csf_odf', mrtrix_image_format, 'fod_pipeline',
desc=("")),
FilesetSpec('norm_intensity', mrtrix_image_format,
'intensity_normalisation_pipeline',
desc=("")),
FilesetSpec('norm_intens_fa_template', mrtrix_image_format,
'intensity_normalisation_pipeline', frequency='per_dataset',
desc=("")),
FilesetSpec('norm_intens_wm_mask', mrtrix_image_format,
'intensity_normalisation_pipeline', frequency='per_dataset',
desc=("")),
FilesetSpec('global_tracks', mrtrix_track_format,
'global_tracking_pipeline',
desc=("")),
FilesetSpec('wm_mask', mrtrix_image_format,
'global_tracking_pipeline',
desc=("")),
FilesetSpec('connectome', csv_format, 'connectome_pipeline',
desc=(""))]
add_param_specs = [
ParamSpec('pe_dir', None, dtype=str,
desc=("Phase-encoding direction of DW series")),
ParamSpec('intra_moco_parts', 0, dtype=int,
desc=("Number of partitions within a volume to motion "
"correct w.r.t the volume. If == 0, intra-volume MoCo "
"is disabled. Intra-volume MoCo requires slice timings"
" to be found in 'series' header")),
SwitchSpec('eddy_moco_by_suscep', False,
desc="Use susceptibility to determine motion correction"),
SwitchSpec('force_shelled', False,
desc=("Force eddy to treat gradient encoding scheme as "
"being shelled")),
ParamSpec('eddy_model', 'none',
choices=('none', 'linear', 'quadratic'),
desc=("Model for how diffusion gradients generate eddy "
"currents.")),
ParamSpec('tbss_skel_thresh', 0.2,
desc=("")),
ParamSpec('fsl_mask_f', 0.25,
desc=("")),
ParamSpec('bet_robust', True,
desc=("")),
ParamSpec('bet_f_threshold', 0.2,
desc=("")),
ParamSpec('bet_reduce_bias', False,
desc=("")),
ParamSpec('num_global_tracks', int(1e9),
desc=("")),
ParamSpec('global_tracks_cutoff', 0.05,
desc=("")),
SwitchSpec('preproc_denoise', False,
desc=("")),
SwitchSpec('response_algorithm', 'tax',
('tax', 'dhollander', 'msmt_5tt'),
desc=("")),
ParamSpec('num_shells', None, desc=('Number of b-value shells')),
MriAnalysis.param_spec('bet_method').with_new_choices('mrtrix'),
SwitchSpec('reorient2std', False,
desc=(""))]
primary_bids_input = BidsInputs(
spec_name='series', type='dwi',
valid_formats=(nifti_gz_x_format, nifti_gz_format))
default_bids_inputs = [primary_bids_input,
BidsAssocInputs(
spec_name='bvalues',
primary=primary_bids_input,
association='grads',
type='bval',
format=fsl_bvals_format),
BidsAssocInputs(
spec_name='grad_dirs',
primary=primary_bids_input,
association='grads',
type='bvec',
format=fsl_bvecs_format),
BidsAssocInputs(
spec_name='reverse_phase',
primary=primary_bids_input,
association='epi',
format=nifti_gz_format,
drop_if_missing=True)]
RECOMMENDED_NUM_SESSIONS_FOR_INTENS_NORM = 5
primary_scan_name = 'series'
def b_shells(self):
bpaths = []
if 'bvalues' in self.input_names:
bpaths = [f.path for f in self.spec('bvalues').slice]
elif 'series' in self.input_names:
mrtrix_ver = self.environment.satisfy(mrtrix_req.v(3.0))
tmp_dir = tempfile.mkdtemp()
self.environment.load(mrtrix_ver)
try:
bpaths = []
for fileset in self.spec('series').slice:
bpath = os.path.join(
tmp_dir, '{}__{}'.format(fileset.subject_id,
fileset.visit_id))
try:
sp.check_call(
('mrconvert {} {} -export_grad_fsl {} {}'
.format(fileset.path,
bpath + '.mif', bpath + '.bvec',
bpath + '.bval')), shell=True)
except sp.CalledProcessError as e:
logger.error(
("Could not extract bvalues from series file "
"'%s'"), fileset.path)
raise e
bpaths.append(bpath + '.bval')
finally:
self.environment.unload(mrtrix_ver)
else:
raise BananaUsageError(
"b-values not provided to study, required to determine "
"number of shells")
bvalues = set()
for bpath in bpaths:
with open(bpath) as f:
bvalues.update(round(float(b), -1)
for b in f.read().split())
return sorted(bvalues)
@property
def multi_tissue(self):
return self.branch('response_algorithm',
('msmt_5tt', 'dhollander'))
@property
def fod_algorithm(self):
if self.parameter('response_algorithm') == 'msmt_5tt':
algorithm = 'msmt_csd'
else:
algorithm = 'csd'
return algorithm
def fsl_grads(self, pipeline, coregistered=True):
"Adds and returns a node to the pipeline to merge the FSL grads and "
"bvecs"
try:
grad_fsl = pipeline.node('grad_fsl')
except ArcanaNameError:
if self.is_coregistered and coregistered:
grad_dirs = 'grad_dirs_coreg'
else:
grad_dirs = 'grad_dirs'
# Gradient merge node
grad_fsl = pipeline.add(
"grad_fsl",
MergeFslGrads(),
inputs={
'grad_dirs': (grad_dirs, fsl_bvecs_format),
'bvals': ('bvalues', fsl_bvals_format)})
return (grad_fsl, 'out')
def extract_magnitude_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
'extract_magnitude',
desc="Extracts the first b==0 volume from the series",
citations=[],
name_maps=name_maps)
dwiextract = pipeline.add(
'dwiextract',
ExtractDWIorB0(
bzero=True,
out_ext='.nii.gz'),
inputs={
'in_file': ('series', nifti_gz_format),
'grad_fsl': self.fsl_grads(pipeline, coregistered=False)},
requirements=[mrtrix_req.v('3.0rc3')])
pipeline.add(
"extract_first_vol",
MRConvert(
coord=(3, 0),
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0)),
inputs={
'in_file': (dwiextract, 'out_file')},
outputs={
'magnitude': ('out_file', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
return pipeline
def preprocess_pipeline(self, **name_maps):
"""
Performs a series of FSL preprocessing steps, including Eddy and Topup
Parameters
----------
phase_dir : str{AP|LR|IS}
The phase encode direction
"""
# Determine whether we can correct for distortion, i.e. if reference
# scans are provided
# Include all references
references = [fsl_cite, eddy_cite, topup_cite, eddy_repol_cite,
distort_correct_cite, n4_cite]
if self.branch('preproc_denoise'):
references.extend(dwidenoise_cites)
pipeline = self.new_pipeline(
name='preprocess',
name_maps=name_maps,
desc=(
"Preprocess dMRI studies using distortion correction"),
citations=references)
dw_series = ('series', mrtrix_image_format)
# Denoise the dwi-scan
if self.branch('preproc_denoise'):
# Run denoising
denoise = pipeline.add(
'denoise',
DWIDenoise(
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0)),
inputs={
'in_file': dw_series},
requirements=[mrtrix_req.v('3.0rc3')])
# Calculate residual noise
subtract_operands = pipeline.add(
'subtract_operands',
Merge(2),
inputs={
'in1': dw_series,
'in2': (denoise, 'noise')})
pipeline.add(
'subtract',
MRCalc(
operation='subtract',
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0)),
inputs={
'operands': (subtract_operands, 'out')},
outputs={
'noise_residual': ('out_file', mrtrix_image_format)},
requirements=[mrtrix_req.v('3.0rc3')])
denoised = (denoise, 'out_file')
else:
denoised = dw_series
# Preproc kwargs
preproc_inputs = {'in_file': denoised}
preproc_kwargs = {}
if self.provided('grad_dirs') and self.provided('bvalues'):
# Gradient merge node
grad_fsl = pipeline.add(
"grad_fsl",
MergeFslGrads(),
inputs={
'grad_dirs': ('grad_dirs', fsl_bvecs_format),
'bvals': ('bvalues', fsl_bvals_format)})
preproc_inputs['grad_fsl'] = (grad_fsl, 'out')
elif self.spec('series').format not in (dicom_format,
mrtrix_image_format):
raise BananaUsageError(
"Either input 'series' image needs to gradient directions and "
"b-values in its header or they need to be explicitly "
"provided to 'grad_dirs' and 'bvalues' {}".format(self))
if self.provided('reverse_phase'):
if self.provided('magnitude', default_okay=False):
dwi_reference = ('magnitude', mrtrix_image_format)
else:
# Extract b=0 volumes
dwiextract = pipeline.add(
'dwiextract',
ExtractDWIorB0(
bzero=True,
out_ext='.mif'),
inputs=preproc_inputs,
requirements=[mrtrix_req.v('3.0rc3')])
# Get first b=0 from dwi b=0 volumes
extract_first_b0 = pipeline.add(
"extract_first_vol",
MRConvert(
coord=(3, 0),
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0)),
inputs={
'in_file': (dwiextract, 'out_file')},
requirements=[mrtrix_req.v('3.0rc3')])
dwi_reference = (extract_first_b0, 'out_file')
merge_rphase = pipeline.add(
'merge_rphase',
Merge(2),
inputs={
'in1': dwi_reference,
'in2': ('reverse_phase', mrtrix_image_format)})
# Concatenate extracted forward rpe with reverse rpe
combined_images = pipeline.add(
'combined_images',
MRCat(
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0),
axis=3),
inputs={
'input_files': (merge_rphase, 'out')},
requirements=[mrtrix_req.v('3.0rc3')])
# Create node to extract the phase-encoding direction
# prep_dwi = pipeline.add(
# 'prepare_dwi',
# PrepareDWI(),
# inputs={
# 'pe_dir': ('ped', float),
# 'ped_polarity': ('pe_angle', float)})
preproc_kwargs['rpe_pair'] = True
# distortion_correction = True
preproc_inputs['se_epi'] = (combined_images, 'out_file')
else:
# distortion_correction = False
preproc_kwargs['rpe_none'] = True
if self.parameter('pe_dir') is not None:
preproc_kwargs['pe_dir'] = self.parameter('pe_dir')
eddy_parameters = '--repol --cnr_maps --slm={}'.format(
self.parameter('eddy_model'))
if self.parameter('intra_moco_parts') > 0:
eddy_parameters += ' --mporder={}'.format(
self.parameter('intra_moco_parts'))
if self.branch('eddy_moco_by_suscep'):
eddy_parameters += ' --estimate_move_by_susceptibility'
if self.branch('force_shelled'):
eddy_parameters += ' --data_is_shelled '
preproc = pipeline.add(
'dwipreproc',
DWIPreproc(
no_clean_up=True,
out_file_ext='.mif',
eddy_parameters=eddy_parameters,
eddyqc_all='qc-all',
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0),
**preproc_kwargs),
inputs=preproc_inputs,
outputs={
'eddy_par': ('eddy_parameters', eddy_par_format),
'eddy_qc': ('eddyqc_all', directory_format)},
requirements=[mrtrix_req.v('3.0rc3'), fsl_req.v('6.0.1')],
wall_time=60)
# if distortion_correction:
# pipeline.connect(prep_dwi, 'pe', preproc, 'pe_dir')
mask = pipeline.add(
'dwi2mask',
BrainMask(
out_file='brainmask.mif',
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0)),
inputs={
'in_file': (preproc, 'out_file')},
requirements=[mrtrix_req.v('3.0rc3')])
# Create bias correct node
bias_correct = pipeline.add(
"bias_correct",
DWIBiasCorrect(
algorithm='ants'),
inputs={
'in_file': (preproc, 'out_file'),
'mask': (mask, 'out_file')},
outputs={
'series_preproc': ('out_file', mrtrix_image_format)},
requirements=[mrtrix_req.v('3.0rc3'), ants_req.v('2.0')])
# Extract gradient directions that have been motion-corrected
# by dwipreproc
pipeline.add(
"extract_moco_grad",
ExtractFSLGradients(),
inputs={
'in_file': (bias_correct, 'out_file')},
outputs={
'grad_dirs': ('bvecs_file', fsl_bvecs_format),
'bvalues': ('bvals_file', fsl_bvals_format)},
requirements=[mrtrix_req.v('3.0rc3')])
# Create node to reorient preproc out_file
if self.branch('reorient2std'):
raise NotImplementedError(
"Reorientation to standard isn't handle at this stage because "
"gradients would also need to be rotated accordingly by a "
"bespoke interface")
# reorient = pipeline.add(
# 'fslreorient2std',
# fsl.utils.Reorient2Std(
# output_type='NIFTI_GZ'),
# inputs={
# 'in_file': ('series', nifti_gz_format)},
# requirements=[fsl_req.v('5.0.9')])
# reoriented = (reorient, 'out_file')
else:
pass
# reoriented = ('series', nifti_gz_format)
return pipeline
def eddy_qc_summary_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
name='eddy_qc_summary',
name_maps=name_maps,
desc=("Run group-wise analysis of Eddy QC output"))
pipeline.add(
'eddy_squad',
EddySquad(),
inputs={
'quad_dirs': ('eddy_qc', directory_format)},
outputs={
'eddy_qc_summary': ('group_db_json', json_format),
'eddy_qc_report': ('group_qc_pdf', pdf_format)},
requirements=[fsl_req.v('5.0.11')],
joinfield=['quad_dirs'],
joinsource=self.SUBJECT_ID)
return pipeline
def brain_extraction_pipeline(self, **name_maps):
"""
Generates a whole brain mask using MRtrix's 'dwi2mask' command
Parameters
----------
mask_tool: Str
Can be either 'bet' or 'dwi2mask' depending on which mask tool you
want to use
"""
if self.branch('bet_method', 'mrtrix'):
pipeline = self.new_pipeline(
'brain_extraction',
desc="Generate brain mask from b0 images",
citations=[mrtrix_cite],
name_maps=name_maps)
if self.provided('coreg_ref'):
series = 'series_coreg'
else:
series = 'series_preproc'
# Create mask node
masker = pipeline.add(
'dwi2mask',
BrainMask(
out_file='brain_mask.nii.gz',
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0)),
inputs={
'in_file': (series, nifti_gz_format),
'grad_fsl': self.fsl_grads(pipeline, coregistered=False)},
outputs={
'brain_mask': ('out_file', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
merge = pipeline.add(
'merge_operands',
Merge(2),
inputs={
'in1': ('mag_preproc', nifti_gz_format),
'in2': (masker, 'out_file')})
pipeline.add(
'apply_mask',
MRCalc(
operation='multiply',
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0)),
inputs={
'operands': (merge, 'out')},
outputs={
'brain': ('out_file', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
else:
pipeline = super().brain_extraction_pipeline(**name_maps)
return pipeline
def series_coreg_pipeline(self, **name_maps):
pipeline = super().series_coreg_pipeline(**name_maps)
# Apply coregistration transform to gradients
pipeline.add(
'transform_grads',
TransformGradients(),
inputs={
'gradients': ('grad_dirs', fsl_bvecs_format),
'transform': ('coreg_fsl_mat', text_matrix_format)},
outputs={
'grad_dirs_coreg': ('transformed', fsl_bvecs_format)})
return pipeline
def intensity_normalisation_pipeline(self, **name_maps):
if self.num_sessions < 2:
raise ArcanaMissingDataException(
"Cannot normalise intensities of DWI images as analysis only "
"contains a single session")
elif self.num_sessions < self.RECOMMENDED_NUM_SESSIONS_FOR_INTENS_NORM:
logger.warning(
"The number of sessions in the analysis ({}) is less than the "
"recommended number for intensity normalisation ({}). The "
"results may be unreliable".format(
self.num_sessions,
self.RECOMMENDED_NUM_SESSIONS_FOR_INTENS_NORM))
pipeline = self.new_pipeline(
name='intensity_normalization',
desc="Corrects for B1 field inhomogeneity",
citations=[mrtrix_req.v('3.0rc3')],
name_maps=name_maps)
mrconvert = pipeline.add(
'mrconvert',
MRConvert(
out_ext='.mif',
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0)),
inputs={
'in_file': (self.series_preproc_spec_name, nifti_gz_format),
'grad_fsl': self.fsl_grads(pipeline)},
requirements=[mrtrix_req.v('3.0rc3')])
# Pair subject and visit ids together, expanding so they can be
# joined and chained together
session_ids = pipeline.add(
'session_ids',
utility.IdentityInterface(
['subject_id', 'visit_id']),
inputs={
'subject_id': (Analysis.SUBJECT_ID, int),
'visit_id': (Analysis.VISIT_ID, int)})
# Set up join nodes
join_fields = ['dwis', 'masks', 'subject_ids', 'visit_ids']
join_over_subjects = pipeline.add(
'join_over_subjects',
utility.IdentityInterface(
join_fields),
inputs={
'masks': (self.brain_mask_spec_name, nifti_gz_format),
'dwis': (mrconvert, 'out_file'),
'subject_ids': (session_ids, 'subject_id'),
'visit_ids': (session_ids, 'visit_id')},
joinsource=self.SUBJECT_ID,
joinfield=join_fields)
join_over_visits = pipeline.add(
'join_over_visits',
Chain(
join_fields),
inputs={
'dwis': (join_over_subjects, 'dwis'),
'masks': (join_over_subjects, 'masks'),
'subject_ids': (join_over_subjects, 'subject_ids'),
'visit_ids': (join_over_subjects, 'visit_ids')},
joinsource=self.VISIT_ID,
joinfield=join_fields)
# Intensity normalization
intensity_norm = pipeline.add(
'dwiintensitynorm',
DWIIntensityNorm(
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0)),
inputs={
'in_files': (join_over_visits, 'dwis'),
'masks': (join_over_visits, 'masks')},
outputs={
'norm_intens_fa_template': ('fa_template',
mrtrix_image_format),
'norm_intens_wm_mask': ('wm_mask', mrtrix_image_format)},
requirements=[mrtrix_req.v('3.0rc3')])
# Set up expand nodes
pipeline.add(
'expand', SelectSession(),
inputs={
'subject_ids': (join_over_visits, 'subject_ids'),
'visit_ids': (join_over_visits, 'visit_ids'),
'inlist': (intensity_norm, 'out_files'),
'subject_id': (Analysis.SUBJECT_ID, int),
'visit_id': (Analysis.VISIT_ID, int)},
outputs={
'norm_intensity': ('item', mrtrix_image_format)})
# Connect inputs
return pipeline
def tensor_pipeline(self, **name_maps):
"""
Fits the apparrent diffusion tensor (DT) to each voxel of the image
"""
pipeline = self.new_pipeline(
name='tensor',
desc=("Estimates the apparent diffusion tensor in each "
"voxel"),
citations=[],
name_maps=name_maps)
# Create tensor fit node
pipeline.add(
'dwi2tensor',
FitTensor(
out_file='dti.nii.gz',
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0),
predicted_signal='predicted.mif'),
inputs={
'grad_fsl': self.fsl_grads(pipeline),
'in_file': (self.series_preproc_spec_name, nifti_gz_format),
'in_mask': (self.brain_mask_spec_name, nifti_gz_format)},
outputs={
'tensor': ('out_file', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
return pipeline
def residual_pipeline(self, **name_maps):
"""
Fits the apparrent diffusion tensor (DT) to each voxel of the image
"""
pipeline = self.new_pipeline(
name='residuals',
desc=("Calculates the residuals after fitting tensor to each "
"shell"),
citations=[],
name_maps=name_maps)
b_shells = set(self.b_shells())
b_shells.remove(0.0)
iterate_shells = pipeline.add(
'iterate_shells',
IdentityInterface(
fields=['b']))
iterate_shells.iterables = ('b', b_shells)
select_shell = pipeline.add(
'select_shell',
SelectShell(
tol=5.0),
inputs={
'target': (iterate_shells, 'b'),
'bvals': ('bvalues', fsl_bvals_format)})
merge0 = pipeline.add(
'merge_axis_n_indices',
MergeTuple(2),
inputs={
'in2': (select_shell, 'indices')})
merge0.inputs.in1 = 3
split_shells = pipeline.add(
'split_shells',
MRConvert(
out_ext='.mif'),
inputs={
'in_file': (self.series_preproc_spec_name, nifti_gz_format),
'grad_fsl': self.fsl_grads(pipeline),
'coord': (merge0, 'out')},
requirements=[mrtrix_req.v('3.0')])
# Create tensor fit node
tensor = pipeline.add(
'dwi2tensor',
FitTensor(
out_file='dti.nii.gz',
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0),
predicted_signal='predicted.mif'),
inputs={
'in_file': (split_shells, 'out_file'),
'in_mask': (self.brain_mask_spec_name, nifti_gz_format)},
requirements=[mrtrix_req.v('3.0')])
merge1 = pipeline.add(
'merge_tensor_predicted',
Merge(2),
inputs={
'in1': (split_shells, 'out_file'),
'in2': (tensor, 'predicted_signal')})
residual = pipeline.add(
'residual',
MRCalc(
operation='subtract'),
inputs={
'operands': (merge1, 'out')})
max_residual = pipeline.add(
'max_residual',
MRMath(
operation='max',
axis=3),
inputs={
'in_files': (residual, 'out_file')})
merge3 = pipeline.add(
'merge_operands3',
Merge(2),
inputs={
'in1': (max_residual, 'out_file'),
'in2': (self.brain_mask_spec_name, nifti_gz_format)})
mask = pipeline.add(
'apply_mask',
MRCalc(
operation='multiply',
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0)),
inputs={
'operands': (merge3, 'out')},
requirements=[mrtrix_req.v('3.0rc3')])
merge_shells = pipeline.add(
'merge_shells',
MRCat(
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0),
axis=3),
inputs={
'input_scans': (mask, 'out_file')},
outputs={
'tensor_residual': ('out_file', mrtrix_image_format)},
joinsource='iterate_shells',
joinfield=['input_scans'],
requirements=[mrtrix_req.v('3.0rc3')])
# mean = pipeline.add(
# 'mean',
# MRMath(
# operation='mean'),
# inputs={
# 'input_files': (merge_shells, 'out_file')})
# stddev = pipeline.add(
# 'stddev',
# MRMath(
# operation='std'),
# inputs={
# 'input_files': (merge_shells, 'out_file')})
return pipeline
def tensor_metrics_pipeline(self, **name_maps):
"""
Fits the apparrent diffusion tensor (DT) to each voxel of the image
"""
pipeline = self.new_pipeline(
name='fa',
desc=("Calculates the FA and ADC from a tensor image"),
citations=[],
name_maps=name_maps)
# Create tensor fit node
pipeline.add(
'metrics',
TensorMetrics(
out_fa='fa.nii.gz',
out_adc='adc.nii.gz'),
inputs={
'in_file': ('tensor', nifti_gz_format),
'in_mask': (self.brain_mask_spec_name, nifti_gz_format)},
outputs={
'fa': ('out_fa', nifti_gz_format),
'adc': ('out_adc', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
return pipeline
def response_pipeline(self, **name_maps):
"""
Estimates the fibre orientation distribution (FOD) using constrained
spherical deconvolution
Parameters
----------
response_algorithm : str
Algorithm used to estimate the response
"""
pipeline = self.new_pipeline(
name='response',
desc=("Estimates the fibre response function"),
citations=[mrtrix_cite],
name_maps=name_maps)
# Create fod fit node
response = pipeline.add(
'response',
ResponseSD(
algorithm=self.parameter('response_algorithm'),
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0)),
inputs={
'grad_fsl': self.fsl_grads(pipeline),
'in_file': (self.series_preproc_spec_name, nifti_gz_format),
'in_mask': (self.brain_mask_spec_name, nifti_gz_format)},
outputs={
'wm_response': ('wm_file', text_format)},
requirements=[mrtrix_req.v('3.0rc3')])
# Connect to outputs
if self.multi_tissue:
response.inputs.gm_file = 'gm.txt',
response.inputs.csf_file = 'csf.txt',
pipeline.connect_output('gm_response', response, 'gm_file',
text_format)
pipeline.connect_output('csf_response', response, 'csf_file',
text_format)
return pipeline
def average_response_pipeline(self, **name_maps):
"""
Averages the estimate response function over all subjects in the
project
"""
pipeline = self.new_pipeline(
name='average_response',
desc=(
"Averages the fibre response function over the project"),
citations=[mrtrix_cite],
name_maps=name_maps)
join_subjects = pipeline.add(
'join_subjects',
utility.IdentityInterface(['responses']),
inputs={
'responses': ('wm_response', text_format)},
outputs={},
joinsource=self.SUBJECT_ID,
joinfield=['responses'])
join_visits = pipeline.add(
'join_visits',
Chain(['responses']),
inputs={
'responses': (join_subjects, 'responses')},
joinsource=self.VISIT_ID,
joinfield=['responses'])
pipeline.add(
'avg_response',
AverageResponse(
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0)),
inputs={
'in_files': (join_visits, 'responses')},
outputs={
'avg_response': ('out_file', text_format)},
requirements=[mrtrix_req.v('3.0rc3')])
return pipeline
def fod_pipeline(self, **name_maps):
"""
Estimates the fibre orientation distribution (FOD) using constrained
spherical deconvolution
Parameters
----------
"""
pipeline = self.new_pipeline(
name='fod',
desc=("Estimates the fibre orientation distribution in each"
" voxel"),
citations=[mrtrix_cite],
name_maps=name_maps)
# Create fod fit node
dwi2fod = pipeline.add(
'dwi2fod',
ConstrainedSphericalDeconvolution(
algorithm=self.fod_algorithm,
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0),
predicted_signal='predicted.mif'),
inputs={
'in_file': (self.series_preproc_spec_name, nifti_gz_format),
'wm_txt': ('wm_response', text_format),
'mask_file': (self.brain_mask_spec_name, nifti_gz_format),
'grad_fsl': self.fsl_grads(pipeline)},
outputs={
'wm_odf': ('wm_odf', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
if self.multi_tissue:
dwi2fod.inputs.gm_odf = 'gm.mif',
dwi2fod.inputs.csf_odf = 'csf.mif',
pipeline.connect_input('gm_response', dwi2fod, 'gm_txt',
text_format),
pipeline.connect_input('csf_response', dwi2fod, 'csf_txt',
text_format),
pipeline.connect_output('gm_odf', dwi2fod, 'gm_odf',
nifti_gz_format),
pipeline.connect_output('csf_odf', dwi2fod, 'csf_odf',
nifti_gz_format),
# Check inputs/output are connected
return pipeline
def extract_b0_pipeline(self, **name_maps):
"""
Extracts the b0 images from a DWI analysis and takes their mean
"""
pipeline = self.new_pipeline(
name='extract_b0',
desc="Extract b0 image from a DWI analysis",
citations=[mrtrix_cite],
name_maps=name_maps)
# Extraction node
extract_b0s = pipeline.add(
'extract_b0s',
ExtractDWIorB0(
bzero=True,
quiet=True),
inputs={
'grad_fsl': self.fsl_grads(pipeline),
'in_file': (self.series_preproc_spec_name, nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
# FIXME: Need a registration step before the mean
# Mean calculation node
mean = pipeline.add(
"mean",
MRMath(
axis=3,
operation='mean',
quiet=True,
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0)),
inputs={
'in_files': (extract_b0s, 'out_file')},
requirements=[mrtrix_req.v('3.0rc3')])
# Convert to Nifti
pipeline.add(
"output_conversion",
MRConvert(
out_ext='.nii.gz',
quiet=True),
inputs={
'in_file': (mean, 'out_file')},
outputs={
'b0': ('out_file', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
return pipeline
def global_tracking_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
name='global_tracking',
desc="Extract b0 image from a DWI analysis",
citations=[mrtrix_cite],
name_maps=name_maps)
mask = pipeline.add(
'mask',
DWI2Mask(
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0)),
inputs={
'grad_fsl': self.fsl_grads(pipeline),
'in_file': (self.series_preproc_spec_name, nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
tracking = pipeline.add(
'tracking',
Tractography(
select=self.parameter('num_global_tracks'),
cutoff=self.parameter('global_tracks_cutoff'),
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0)),
inputs={
'seed_image': (mask, 'out_file'),
'in_file': ('wm_odf', mrtrix_image_format)},
outputs={
'global_tracks': ('out_file', mrtrix_track_format)},
requirements=[mrtrix_req.v('3.0rc3')])
if self.provided('anat_5tt'):
pipeline.connect_input('anat_5tt', tracking, 'act_file',
mrtrix_image_format)
return pipeline
def intrascan_alignment_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
name='affine_mat_generation',
desc=("Generation of the affine matrices for the main dwi "
"sequence starting from eddy motion parameters"),
citations=[fsl_cite],
name_maps=name_maps)
pipeline.add(
'gen_aff_mats',
AffineMatrixGeneration(),
inputs={
'reference_image': ('mag_preproc', nifti_gz_format),
'motion_parameters': ('eddy_par', eddy_par_format)},
outputs={
'align_mats': ('affine_matrices', motion_mats_format)})
return pipeline
def connectome_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
name='connectome',
desc=("Generate a connectome from whole brain connectivity"),
citations=[],
name_maps=name_maps)
aseg_path = pipeline.add(
'aseg_path',
AppendPath(
sub_paths=['mri', 'aparc+aseg.mgz']),
inputs={
'base_path': ('anat_fs_recon_all', directory_format)})
pipeline.add(
'connectome',
mrtrix3.BuildConnectome(
nthreads=(self.processor.cpus_per_task
if self.processor.cpus_per_task else 0)),
inputs={
'in_file': ('global_tracks', mrtrix_track_format),
'in_parc': (aseg_path, 'out_path')},
outputs={
'connectome': ('out_file', csv_format)},
requirements=[mrtrix_req.v('3.0rc3')])
return pipeline
class DwiRefAnalysis(EpiAnalysis, metaclass=AnalysisMetaClass):
add_data_specs = [
InputFilesetSpec('reverse_phase', STD_IMAGE_FORMATS, optional=True)
]
desc = ("A special analysis used in the MR-PET motion correction algorithm to"
" perform distortion correction on the reverse-phase/reference b0 "
"scans by flipping it around and using the DWI series as the "
"reference")
def preprocess_pipeline(self, **name_maps):
if self.provided('reverse_phase'):
return self._topup_pipeline(**name_maps)
else:
return super().preprocess_pipeline(**name_maps)
def _topup_pipeline(self, **name_maps):
"""
Implementation of separate topup pipeline, moved from EPI analysis as it
is only really relevant for spin-echo DWI. Need to work out what to do
with it
"""
pipeline = self.new_pipeline(
name='preprocess_pipeline',
desc=("Topup distortion correction pipeline"),
citations=[fsl_cite],
name_maps=name_maps)
reorient_epi_in = pipeline.add(
'reorient_epi_in',
fsl.utils.Reorient2Std(),
inputs={
'in_file': ('magnitude', nifti_gz_format)},
requirements=[fsl_req.v('5.0.9')])
reorient_epi_opposite = pipeline.add(
'reorient_epi_opposite',
fsl.utils.Reorient2Std(),
inputs={
'in_file': ('reverse_phase', nifti_gz_format)},
requirements=[fsl_req.v('5.0.9')])
prep_dwi = pipeline.add(
'prepare_dwi',
PrepareDWI(
topup=True),
inputs={
'pe_dir': ('ped', str),
'ped_polarity': ('pe_angle', str),
'dwi': (reorient_epi_in, 'out_file'),
'dwi1': (reorient_epi_opposite, 'out_file')})
ped = pipeline.add(
'gen_config',
GenTopupConfigFiles(),
inputs={
'ped': (prep_dwi, 'pe')})
merge_outputs = pipeline.add(
'merge_files',
merge_lists(2),
inputs={
'in1': (prep_dwi, 'main'),
'in2': (prep_dwi, 'secondary')})
merge = pipeline.add(
'FslMerge',
FslMerge(
dimension='t',
output_type='NIFTI_GZ'),
inputs={
'in_files': (merge_outputs, 'out')},
requirements=[fsl_req.v('5.0.9')])
topup = pipeline.add(
'topup',
TOPUP(
output_type='NIFTI_GZ'),
inputs={
'in_file': (merge, 'merged_file'),
'encoding_file': (ped, 'config_file')},
requirements=[fsl_req.v('5.0.9')])
in_apply_tp = pipeline.add(
'in_apply_tp',
merge_lists(1),
inputs={
'in1': (reorient_epi_in, 'out_file')})
pipeline.add(
'applytopup',
ApplyTOPUP(
method='jac',
in_index=[1],
output_type='NIFTI_GZ'),
inputs={
'in_files': (in_apply_tp, 'out'),
'encoding_file': (ped, 'apply_topup_config'),
'in_topup_movpar': (topup, 'out_movpar'),
'in_topup_fieldcoef': (topup, 'out_fieldcoef')},
outputs={
'mag_preproc': ('out_corrected', nifti_gz_format)},
requirements=[fsl_req.v('5.0.9')])
return pipeline
| [
"[email protected]"
] | |
4c1fac6ffc39bfa3667bc5a2ef3b71ca0e4f0283 | bee2af5228232ce94f418b61810cecd93af62615 | /movies/tests.py | a6adfa1859a67fd17757470bea1d839c9c970cc3 | [] | no_license | thuitafaith/djangoapp | b64c2e1a05c67b1135d4d9dd7975c17522238a69 | e06280b34a7b1ec012d0baab6f0fb153875a39b4 | refs/heads/master | 2022-12-11T19:06:08.540528 | 2019-08-29T12:36:45 | 2019-08-29T12:36:45 | 203,321,071 | 0 | 0 | null | 2022-11-22T04:13:07 | 2019-08-20T07:15:28 | Python | UTF-8 | Python | false | false | 1,600 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
from .models import Editor,Article,tags
import datetime as dt
# Create your tests here.
class EditorTestClass(TestCase):
# set up method
def setUp(self):
self.faith=Editor(first_name='faith',last_name='thuita',email='[email protected]')
# testing instance
def test_instance(self):
self.assertTrue(isinstance(self.faith,Editor))
# testing save method
def test_save_method(self):
self.faith.save_editor()
editors = Editor.objects.all()
self.assertTrue(len(editors)>0)
class ArticleTestClass(TestCase):
def setUp(self):
# creating a new editor and saving it
self.faith= Editor(first_name='faith',last_name='thuita',email='[email protected]')
self.faith.save_editor()
# creating a new tag saving it
self.new_tag = tags(name='testing')
self.new_tag.save()
self.new_article = Article(title='Test Article',post= 'this is a random test post',editor=self.faith)
self.new_article.save()
self.new_article.tags.add(self.new_tag)
def tearDown(self):
Editor.objects.all().delete()
tags.objects.all().delete()
Article.objects.all().delete()
def test_get_news_today(self):
today_news = Article.todays_news()
self.assertTrue(len(today_news) > 0)
def test_get_news_by_date(self):
test_date = '2017-03-17'
date = dt.datetime.strptime(test_date, '%Y-%m-%d').date()
| [
"[email protected]"
] | |
f287244a91e88664b5d41777c7749b04894158ea | f4b16d247195621a5413aab56919b4e623b604b8 | /src/faimes/urban/dataimport/opinionmakers/settings.py | ed4e0587d66bb5c2e6bf895523ee08c2b6023e75 | [] | no_license | IMIO/faimes.urban.dataimport | cc1a7e3050538f409c29e3031a175e8d1a96c7db | 67fcaa14a5951df7cbaf64b59794aab0a2b88f7f | refs/heads/master | 2021-01-10T17:52:51.975421 | 2017-03-16T13:27:27 | 2017-03-16T13:27:27 | 52,949,709 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 904 | py | # -*- coding: utf-8 -*-
from faimes.urban.dataimport.opinionmakers.importer import OpinionMakersImporter
from imio.urban.dataimport.access.settings import AccessImporterFromImportSettings
class OpinionMakersImporterFromImportSettings(AccessImporterFromImportSettings):
""" """
def __init__(self, settings_form, importer_class=OpinionMakersImporter):
"""
"""
super(OpinionMakersImporterFromImportSettings, self).__init__(settings_form, importer_class)
def get_importer_settings(self):
"""
Return the access file to read.
"""
settings = super(OpinionMakersImporterFromImportSettings, self).get_importer_settings()
access_settings = {
'db_name': 'Tab_Urba 97.mdb',
'table_name': 'CONSUL',
'key_column': 'Sigle',
}
settings.update(access_settings)
return settings
| [
"[email protected]"
] | |
e4b32b9e6339263746a7889bc6d5cedd7cda3845 | edad2e75198bcfdf75965e132e268b4ebbbd70e2 | /old/other_scripts_notneeded/main_scripts_for_yuva/python_utility3_remote_seed_yuva.py | f79d87f760065f05716ccbed7e8ca8662d20bc47 | [] | no_license | nandithaec/python-utility | d50a6e802bf3a9afadbaf3f5207efdba875c7e70 | b8b7377b87630375ff804c7204d37a1c7ecee826 | refs/heads/master | 2021-01-10T07:00:18.820227 | 2015-04-16T06:01:44 | 2015-04-16T06:01:44 | 36,727,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,448 | py |
#!/usr/bin/env python
#IMPORTANT: It is assumed that we are running parallel ngspice simulations on a remote 48-core cluster at 10.107.105.201. If this is not the case, you will need to modify this script to run it on this machine, by commenting out the scp and ssh commands.
#Example usage: python /home/external/iitb/nanditha/simulations/c499_ecat_yuva/python_utility3_remote_seed_yuva.py -m c499_clk -p /home/external/iitb/nanditha/simulations/c499_ecat_yuva -d c499_ecat_yuva -t 180 -n 10 --group 10 --clk 125 --std_lib osu018_stdcells_correct_vdd_gnd.sp
import optparse
import re,os
import glob,shutil,csv
import random
import subprocess, time
import random,sys
#import python_compare_remote
from optparse import OptionParser
parser = OptionParser('This script reads in the template spice file and the inputs to the script are listed as arguments below, which are all necessary arguments.\nAfter a previous script has copied the current working directory to a remote cluster, this script invokes several scripts inturn:\n1.perl_calculate_gates_clk.pl\n2.perl_calculate_drain.pl\n3.deckgen_remote_seed.pl\n4.python_GNUparallel_ngspice_remote.py\n5.python_compare_remote_seed.py\n6.python_count_flips_remote_seed.py\n\nThe tasks of these scripts will be described in the help section of the respective scripts. The current script needs pnr/reports/5.postRouteOpt_mult/mult_postRoute.slk as an input. The current script will calculate the number of gates in the design(spice) file, pick a random gate, calculate the number of distinct drains for this gate and pick a drain to introduce glitch it.The location of the glitch is calculated based on the timing/slack information from the SoC encounter output: (pnr/reports/5.postRouteOpt_mult/mult_postRoute.slk) for the particular design, so that we introduce glitch only after the input has changed in the clk period, and before the next rising edge of the clk (when the latch is open). It then invokes deckgen.pl to modify the template spice file to introduce the glitched version of the gate in the spice file. The deckgen creates multiple spice files which will contain different input conditions since they are generated at different clk cycles.\nThe python_GNUparallel_ngspice_remote.py will then distribute these spice files across the different machines in the cluster and simulate these decks using ngspice. The results are csv files which contain output node values after spice simulation.\nThe results are then concatenated into one file and compared against the expected reference outputs that were obtained by the RTL simulation. If the results match, then it means that there was no bit-flip, so a 0 is reported, else a 1 is reported for a bit-flip. The number of flips in a single simulation is counted. Finally, if there are multiple flips given atleast one flip, it is reported as a percentage.\nAuthor:Nanditha Rao([email protected])\n')
parser.add_option("-m", "--mod",dest='module', help='Enter the entity name(vhdl) or module name (verilog)')
parser.add_option("-n", "--num",dest='num', help='Enter the number of spice decks to be generated and simulated')
parser.add_option("-p", "--path", dest="path",help="Enter the ENTIRE path to your design folder (your working dir)- either this machine or remote machine. IF remote machine, enter ~/simulations/<design_folder_name>")
parser.add_option("-d", "--design", dest="design_folder",help="Enter the name of your design folder")
parser.add_option("-t", "--tech",dest='tech', help='Enter the technology node-for eg., For 180nm, enter 180')
parser.add_option("--group",dest='group', help='Enter the number of spice decks to be simulated at a time. For eg., if -n option is 10000, and say we want to run 100 at a time, then enter 100')
#parser.add_option("--backup",dest='backup', help='Enter the number of spice decks you want to backup/save per run. For ef., if you entered -n 1000 and --group 100, and if you want to save 2 decks per 100, enter 2 ')
#parser.add_option("-s", "--seed",dest='seed', help='Enter the random seed')
parser.add_option("-c", "--clk",dest='clk', help='Enter the clk freq in MHz')
parser.add_option("-l", "--std_lib",dest='std_lib', help='Enter the file name of the standard cell library (sp file)')
(options, args) = parser.parse_args()
module=options.module
num=options.num
path=options.path
design_folder=options.design_folder
tech=options.tech
num_at_a_time=options.group
#backup_per_run=options.backup
#seed=int(options.seed)
clk=(options.clk)
std_lib = options.std_lib
clk_period = (1.0/float(clk))*(0.000001)
half_clk_period = clk_period/2.0
change_time= half_clk_period/3.0
end_PWL= half_clk_period + change_time #in ns generally
#To determine when the glitch needs to be introduced, depends on the slack information
with open("%s/pnr/reports/5.postRouteOpt_%s/%s_postRoute.slk" %(path,module,module),"r") as f:
words=map(str.split, f)
line1=words[1] #2nd line after header
slack_read=line1[2]
print "\nSlack is: %s" %slack_read
slack_string=slack_read.replace("*/","")
slack_time=float(slack_string)
print "\nSlack is: %f ns" %slack_time
reqdtime_read=line1[1]
print "\nReqd time is: %s" %reqdtime_read
reqdtime_string=reqdtime_read.replace("*/","")
reqd_time=float(reqdtime_string)
print "\nReqd time is: %f ns" %reqd_time
arrival_time = reqd_time - slack_time
arrival_time_ns = arrival_time *(0.000000001)
print "\nArrival time is: %e " %arrival_time_ns
#What fraction of the clk period is the arrival time?
arrival_clk_part = arrival_time_ns / clk_period
print "\nArrival time is: %f clk periods" %arrival_clk_part
#Whatever number of decks to be simulated- is assumed to be more than or equal to 1000.
#At a time, only 1000 are generated and run- to save disk space. After collecting results, they are deleted
num_of_loops=(int(num)/int(num_at_a_time))
if os.path.exists('%s/spice_results' %path):
os.chdir('%s/spice_results' %path)
for f in glob.glob("count*.csv"):
os.remove(f)
if os.path.exists('%s/spice_results' %path):
os.chdir('%s/spice_results' %path)
for f in glob.glob("spice_rtl_*.csv"):
os.remove(f)
if os.path.exists('%s/spice_results' %path):
os.chdir('%s/spice_results' %path)
for f in glob.glob("final_results_spice_outputs_*.csv"):
os.remove(f)
if os.path.isfile('%s/spice_results/result_summary_flipcount.csv' %(path)):
os.remove('%s/spice_results/result_summary_flipcount.csv' %(path))
#Clear Back up directory
backup_dir = '%s/backup_spice_decks' %(path)
if os.path.exists(backup_dir):
shutil.rmtree(backup_dir)
if not os.path.exists(backup_dir):
os.mkdir(backup_dir)
print "Deleting the existing spice decks before creating new ones!\n"
os.system('rm -rf %s/spice_decks_*' %path)
start_loop=1
frand = open('%s/random_number_histogram.txt' %(path), 'w')
seed = random.randint(0, sys.maxint)
print "seed is: ", seed
frand.write("Seed:%d\n" %seed)
random.seed(seed) #Seeding the random number generator
clk_period = (1.0/float(clk))*(0.000001)
print "\nclk is ",clk
print "\nClk_period: ", clk_period
os.system('cat $PBS_NODEFILE > %s/nodes.txt' %path)
print "PBS NODEFILE contents....written to nodes.txt\n"
time.sleep(3)
os.system('python %s/python_ssh_addr_yuva.py -p %s' %(path,path))
os.system('cat %s/sshmachines.txt' %path)
print "Check contents of sshmachines.txt file....\n"
time.sleep(10)
#Uncomment this for future designs. For decoder example, decoder folder has already been created on desktop
#os.system('ssh [email protected] mkdir /home/nanditha/simulations/%s' %(design_folder))
###########################################Comment this out if not using desktop to run##################################
"""
print "\nCopying a python script to desktop machine!\n"
os.system('scp %s/python_desktop_copy.py %s/glitch_%s.sp %s/tsmc018.m [email protected]:/home/nanditha/simulations/%s/' %(path,path,std_lib,path,design_folder))
"""
######################################################################################################
#perl perl_calculate_gates_clk.pl -s reference_spice.sp -l glitch_osu018_stdcells_correct_vdd_gnd.sp -r decoder_behav_pnr_reference_out/tool_reference_out.txt -m decoder_behav_pnr -f /home/user1/simulations/decoder
os.system('perl %s/perl_calculate_gates_clk.pl -s %s/reference_spice.sp -l %s/glitch_%s.sp -r %s/%s_reference_out/tool_reference_out.txt -m %s -f %s ' %(path,path,path,std_lib,path,module,module,path))
fg = open('%s/tmp_random.txt' %(path), 'r')
gate_clk_data = [line.strip() for line in fg]
num_of_gates=int(gate_clk_data[0])
print "\nnum of gates is %d" %num_of_gates
num_of_clks=int(gate_clk_data[1])
print "\nnum of clocks is %d" %num_of_clks
fg.close()
#Fresh simulation
for loop in range(start_loop, (num_of_loops+1)):
#time.sleep(2)
#os.system('cd /home/user1/simulations/decoder ; ls; pwd;ls | wc -l' )
#time.sleep(5)
print "Now, creating multiple spice decks in spice_decks folder in current directory on the remote machine\n"
#os.system('python %s/python_repeat_deckgen_remote_seed.py -m %s -n %s -f %s -o %s -s %d' %(path,module,num_at_a_time,path,loop,seed_new))
#########################################repeat_deckgen copied starting from here#######################################
if os.path.isfile("%s/%s_reference_out/RTL.csv" %(path,module)):
print "****Removing the existing RTL.csv file in folder %s_reference_out ****\n" %(module)
os.remove("%s/%s_reference_out/RTL.csv" %(path,module))
#Now, we need the header in RTL.csv, so we create an RTL.csv and copy the headers from the RTL_backup.csv that we had saved from Netlstfrmt.pl
fout = open('%s/%s_reference_out/RTL.csv' %(path,module), 'w')
fin = open('%s/%s_reference_out/RTL_backup.csv' %(path,module), 'r')
in_data=fin.read()
fout.write(in_data)
fout.close()
fin.close()
if not os.path.exists('%s/spice_decks_%s' %(path,loop)):
os.mkdir('%s/spice_decks_%s' %(path,loop))
start= ((loop-1)*int(num_at_a_time)) + 1 # ((1-1)*10) +1 =1 , ((2-1)*10) +1 =11
end = (int(num_at_a_time))*loop #(10*1) = 10, (10*2)=20
print "***Inside repeat_deckgen. Executing deckgen to create decks and RTL.csv reference file\n***"
for loop_var in range(start, end+1):
rand_gate= int(random.randrange(num_of_gates)) #A random gate picked
#print "Random gate is: ",rand_gate
rand_clk= int(random.randrange(num_of_clks)) #A random clk picked
#print "Random clock cycle is: ",rand_clk
#perl perl_calculate_drain.pl -s reference_spice.sp -l glitch_osu018_stdcells_correct_vdd_gnd.sp -r decoder_behav_pnr_reference_out/tool_reference_out.txt -m decoder_behav_pnr -f /home/user1/simulations/decoder -g 27
os.system('perl %s/perl_calculate_drain.pl -s %s/reference_spice.sp -l %s/glitch_%s -r %s/%s_reference_out/tool_reference_out.txt -m %s -f %s -g %d ' %(path,path,path,std_lib,path,module,module,path,rand_gate))
fg = open('%s/tmp_random.txt' %(path), 'r')
drain_data = [line.strip() for line in fg]
num_of_drains=int(drain_data[0])
print "\nnum of drains is %d" %num_of_drains
fg.close()
#If num of drains is 2, randrange(2) returns 0 or 1,where as we want drain number 1 or drain number 2. so, doing +1
rand_drain= int(random.randrange(num_of_drains))+1 #A random drain picked.
#Arrival_time_part + initial_clk_part should add up to 1.5 clk periods
#The clk starts from low to high and then low, before the 2nd rising edge starts. The input is changed in the high period and the glitch is expected to arrrive later on, and before the next rising edge (when the latch will open)
#In every iteration, a different random number needs to be picked. Hence, this is inside the for loop
initial_clk_part = 1.5 - arrival_clk_part
initial_clk_part_abs = initial_clk_part * clk_period
#This means, glitch "can" occur before the input changes in the clk period as well. So, force the glitch to start only after input has changed
if (initial_clk_part_abs < end_PWL) :
initial_clk_part = end_PWL/clk_period
#unif=random.uniform(0,arrival_clk_part*clk_period)
#srand_glitch= (initial_clk_part*clk_period) + unif #A random glitch picked
unif=random.uniform(0,0.05*clk_period)
rand_glitch= (1.45*clk_period) + unif #arrival_clk + initial_clk should add up to 1.5
print "\nglitch within clk cycle= ",unif
print "\nRandom gate: %d\nRandom drain: %d\nRandom clock cycle:%d\nRandom glitch location:%e\n " %(rand_gate,rand_drain,rand_clk,rand_glitch)
frand.write("%d, %d, %d,%e\n" %(rand_gate,rand_drain,rand_clk,rand_glitch))
#perl deckgen_remote_seed.pl -s reference_spice.sp -l glitch_osu018_stdcells_correct_vdd_gnd.sp -r decoder_behav_pnr_reference_out/tool_reference_out.txt -n 1 -m decoder_behav_pnr -f /home/user1/simulations/decoder -g 27 -d 2 -c 10 -i 1.42061344093991e-09 -o 1
#deckgen.pl will need to be remotely executed through python_repeat_deckgen.py multiple number of times
os.system('perl %s/deckgen_remote_seed.pl -s %s/reference_spice.sp -l %s/glitch_%s -r %s/%s_reference_out/tool_reference_out.txt -n %d -m %s -f %s -o %s -g %s -d %s -c %s -i %s' %(path,path,path,std_lib,path,module,loop_var,module,path,loop,rand_gate,rand_drain,rand_clk,rand_glitch))
##################Script repeat_deckgen copied ends here####################################
##################################Comment this out if not using desktop to run##################################
#delete existing files on desktop machine and copy new files for simulation
#os.system('ssh [email protected] python /home/nanditha/simulations/%s/python_desktop_copy.py -p %s -d %s -l %d' %(design_folder,path,design_folder,loop))
################################################################################################################
#print "\nmaster machine.. listing the files and pausing\n"
#os.system('cd /home/user1/simulations/decoder/spice_decks_%d ; ls; pwd;ls | wc -l' %loop)
#time.sleep(1)
#print "\nssh to slave.. listing the files and pausing\n"
#os.system('ssh [email protected] pwd; cd /home/user1/simulations/decoder/spice_decks_%d; pwd;ls;pwd;ls | wc -l' %loop)
#time.sleep(3)
print "Running GNU Parallel and ngspice on the created decks\n"
os.system('python %s/python_GNUparallel_ngspice_remote_yuva.py -n %s -d %s -o %s -p %s' %(path,num_at_a_time,design_folder,loop,path))
seed_new= int(random.randrange(100000)*random.random()) #Used by compare script to backup random decks
#seed_new=seed*loop
print "New seed every outer loop is ", seed_new
#python_results_compare.py will then need to be remotely executed
#Might need to execute these last 3 in a loop till the results are acceptable
print "Comparing the RTL and spice outputs\n"
os.system('python %s/python_compare_remote_seed.py -m %s -f %s -n %s -t %s -l %d' %(path,module,path,num_at_a_time,tech,loop))
##########################################################
spice_dir = '%s/spice_decks_%s' %(path,loop)
if os.path.exists(spice_dir):
shutil.rmtree(spice_dir)
########################################End of loop########################################################
#For validation of backup spice files
shutil.copy('%s/glitch_%s' %(path,std_lib), '%s/backup_spice_decks' %path )
shutil.copy('%s/tsmc018.m' %path, '%s/backup_spice_decks' %path )
print "Combining all rtl diff files\n"
os.system('python %s/python_count_flips_remote_seed.py -f %s -n %s --group %s -s %s' %(path,path,num,num_at_a_time,seed)) #To save the seed to results file
| [
"[email protected]"
] | |
9e20dbe06af05a8d7d2965f3a10c0d4cddb65dd4 | 6a928ba05fb0f0ff1c5fc2b16792299b8b0944d6 | /_Attic/btleclassifier_orig.py | 119176d090171d7c677fcde43c9a509d4e41efe9 | [
"MIT"
] | permissive | simsong/python-corebluetooth | 22b933c31f9aaaa614e76eafe6bfca679a50d626 | 29ef9e8d3e5ab8fd838fd254cd079449d8064441 | refs/heads/master | 2021-08-07T13:22:13.170086 | 2020-07-06T17:47:31 | 2020-07-06T17:47:31 | 197,768,612 | 3 | 3 | null | 2019-07-20T05:32:35 | 2019-07-19T12:25:17 | Python | UTF-8 | Python | false | false | 15,356 | py | # File: btleclassifier.py
# Author: Johannes K Becker <[email protected]>
# Date: 2019-01-29
# Last Modified Date: 2019-07-18
# Last Modified By: Johannes K Becker <[email protected]>
# Advertising Data Type (AD Type) Definitions here:
# https://www.bluetooth.com/specifications/assigned-numbers/generic-access-profile
#
# Data Type Value Data Type Name Reference for Definition
# 0x01 "Flags" Bluetooth Core Specification:Vol. 3, Part C, section 8.1.3 (v2.1 + EDR, 3.0 + HS and 4.0)Vol. 3, Part C, sections 11.1.3 and 18.1 (v4.0)Core Specification Supplement, Part A, section 1.3
# 0x02 "Incomplete List of 16-bit Service Class UUIDs" Bluetooth Core Specification:Vol. 3, Part C, section 8.1.1 (v2.1 + EDR, 3.0 + HS and 4.0)Vol. 3, Part C, sections 11.1.1 and 18.2 (v4.0)Core Specification Supplement, Part A, section 1.1
# 0x03 "Complete List of 16-bit Service Class UUIDs" Bluetooth Core Specification:Vol. 3, Part C, section 8.1.1 (v2.1 + EDR, 3.0 + HS and 4.0)Vol. 3, Part C, sections 11.1.1 and 18.2 (v4.0)Core Specification Supplement, Part A, section 1.1
# 0x04 "Incomplete List of 32-bit Service Class UUIDs" Bluetooth Core Specification:Vol. 3, Part C, section 8.1.1 (v2.1 + EDR, 3.0 + HS and 4.0)Vol. 3, Part C, section 18.2 (v4.0)Core Specification Supplement, Part A, section 1.1
# 0x05 "Complete List of 32-bit Service Class UUIDs" Bluetooth Core Specification:Vol. 3, Part C, section 8.1.1 (v2.1 + EDR, 3.0 + HS and 4.0)Vol. 3, Part C, section 18.2 (v4.0)Core Specification Supplement, Part A, section 1.1
# 0x06 "Incomplete List of 128-bit Service Class UUIDs" Bluetooth Core Specification:Vol. 3, Part C, section 8.1.1 (v2.1 + EDR, 3.0 + HS and 4.0)Vol. 3, Part C, sections 11.1.1 and 18.2 (v4.0)Core Specification Supplement, Part A, section 1.1
# 0x07 "Complete List of 128-bit Service Class UUIDs" Bluetooth Core Specification:Vol. 3, Part C, section 8.1.1 (v2.1 + EDR, 3.0 + HS and 4.0)Vol. 3, Part C, sections 11.1.1 and 18.2 (v4.0)Core Specification Supplement, Part A, section 1.1
# 0x08 "Shortened Local Name" Bluetooth Core Specification:Vol. 3, Part C, section 8.1.2 (v2.1 + EDR, 3.0 + HS and 4.0)Vol. 3, Part C, sections 11.1.2 and 18.4 (v4.0)Core Specification Supplement, Part A, section 1.2
# 0x09 "Complete Local Name" Bluetooth Core Specification:Vol. 3, Part C, section 8.1.2 (v2.1 + EDR, 3.0 + HS and 4.0)Vol. 3, Part C, sections 11.1.2 and 18.4 (v4.0)Core Specification Supplement, Part A, section 1.2
# 0x0A "Tx Power Level" Bluetooth Core Specification:Vol. 3, Part C, section 8.1.5 (v2.1 + EDR, 3.0 + HS and 4.0)Vol. 3, Part C, sections 11.1.5 and 18.3 (v4.0)Core Specification Supplement, Part A, section 1.5
# 0x0D "Class of Device" Bluetooth Core Specification:Vol. 3, Part C, section 8.1.6 (v2.1 + EDR, 3.0 + HS and 4.0)Vol. 3, Part C, sections 11.1.5 and 18.5 (v4.0)Core Specification Supplement, Part A, section 1.6
# 0x0E "Simple Pairing Hash C" Bluetooth Core Specification:Vol. 3, Part C, section 8.1.6 (v2.1 + EDR, 3.0 + HS and 4.0)Vol. 3, Part C, sections 11.1.5 and 18.5 (v4.0)
# 0x0E "Simple Pairing Hash C-192" Core Specification Supplement, Part A, section 1.6
# 0x0F "Simple Pairing Randomizer R" Bluetooth Core Specification:Vol. 3, Part C, section 8.1.6 (v2.1 + EDR, 3.0 + HS and 4.0)Vol. 3, Part C, sections 11.1.5 and 18.5 (v4.0)
# 0x0F "Simple Pairing Randomizer R-192" Core Specification Supplement, Part A, section 1.6
# 0x10 "Device ID" Device ID Profile v1.3 or later
# 0x10 "Security Manager TK Value" Bluetooth Core Specification:Vol. 3, Part C, sections 11.1.7 and 18.6 (v4.0)Core Specification Supplement, Part A, section 1.8
# 0x11 "Security Manager Out of Band Flags" Bluetooth Core Specification:Vol. 3, Part C, sections 11.1.6 and 18.7 (v4.0)Core Specification Supplement, Part A, section 1.7
# 0x12 "Slave Connection Interval Range" Bluetooth Core Specification:Vol. 3, Part C, sections 11.1.8 and 18.8 (v4.0)Core Specification Supplement, Part A, section 1.9
# 0x14 "List of 16-bit Service Solicitation UUIDs" Bluetooth Core Specification:Vol. 3, Part C, sections 11.1.9 and 18.9 (v4.0)Core Specification Supplement, Part A, section 1.10
# 0x15 "List of 128-bit Service Solicitation UUIDs" Bluetooth Core Specification:Vol. 3, Part C, sections 11.1.9 and 18.9 (v4.0)Core Specification Supplement, Part A, section 1.10
# 0x16 "Service Data" Bluetooth Core Specification:Vol. 3, Part C, sections 11.1.10 and 18.10 (v4.0)
# 0x16 "Service Data - 16-bit UUID" Core Specification Supplement, Part A, section 1.11
# 0x17 "Public Target Address" Bluetooth Core Specification:Core Specification Supplement, Part A, section 1.13
# 0x18 "Random Target Address" Bluetooth Core Specification:Core Specification Supplement, Part A, section 1.14
# 0x19 "Appearance" Bluetooth Core Specification:Core Specification Supplement, Part A, section 1.12
# 0x1A "Advertising Interval" Bluetooth Core Specification:Core Specification Supplement, Part A, section 1.15
# 0x1B "LE Bluetooth Device Address" Core Specification Supplement, Part A, section 1.16
# 0x1C "LE Role" Core Specification Supplement, Part A, section 1.17
# 0x1D "Simple Pairing Hash C-256" Core Specification Supplement, Part A, section 1.6
# 0x1E "Simple Pairing Randomizer R-256" Core Specification Supplement, Part A, section 1.6
# 0x1F "List of 32-bit Service Solicitation UUIDs" Core Specification Supplement, Part A, section 1.10
# 0x20 "Service Data - 32-bit UUID" Core Specification Supplement, Part A, section 1.11
# 0x21 "Service Data - 128-bit UUID" Core Specification Supplement, Part A, section 1.11
# 0x22 "LE Secure Connections Confirmation Value" Core Specification Supplement Part A, Section 1.6
# 0x23 "LE Secure Connections Random Value" Core Specification Supplement Part A, Section 1.6
# 0x24 "URI" Bluetooth Core Specification:Core Specification Supplement, Part A, section 1.18
# 0x25 "Indoor Positioning" Indoor Posiioning Service v1.0 or later
# 0x26 "Transport Discovery Data" Transport Discovery Service v1.0 or later
# 0x27 "LE Supported Features" Core Specification Supplement, Part A, Section 1.19
# 0x28 "Channel Map Update Indication" Core Specification Supplement, Part A, Section 1.20
# 0x29 "PB-ADV" Mesh Profile Specification Section 5.2.1
# 0x2A "Mesh Message" Mesh Profile Specification Section 3.3.1
# 0x2B "Mesh Beacon" Mesh Profile Specification Section 3.9
# 0x3D "3D Information Data" 3D Synchronization Profile, v1.0 or later
# 0xFF "Manufacturer Specific Data" Bluetooth Core Specification:Vol. 3, Part C, section 8.1.4 (v2.1 + EDR, 3.0 + HS and 4.0)Vol. 3, Part C, sections 11.1.4 and 18.11 (v4.0)Core Specification Supplement, Part A, section 1.4
class BTLEAdvIDToken(object):
def __init__(self, token_key):
self.type = str(token_key)
self.parser = BTLEAdvIDToken.tokens[self.type]['parser']
self.pattern = BTLEAdvIDToken.tokens[self.type]['pattern']
self.tokens = BTLEAdvIDToken.tokens[self.type]['tokens']
@classmethod
def get_matched_tokens(cls, data):
for vendor in BTLEAdvIDToken.tokens.keys():
token = BTLEAdvIDToken(vendor)
if token.pattern in data['raw']:
return token
return None
tokens = {
'Apple': { 'parser': 'parse_token_apple', 'pattern': "ff4c00", 'tokens': ["handoff", "nearby"] },
'Microsoft': { 'parser': 'parse_token_microsoft', 'pattern': "ff0600", 'tokens': ["msdata"] }
}
# @classmethod
# def parse_token_apple(cls, data):
# result = {}
# id_tokens = ['handoff', 'nearby']
# if 'manufacturer-specific' in data.keys() \
# and isinstance(data['manufacturer-specific'], dict):
# for t in id_tokens:
# if t in data['manufacturer-specific'].keys() \
# and isinstance(data['manufacturer-specific'][t], str):
# result[t] = data['manufacturer-specific'][t]
# else:
# result[t] = None
# return result
# @classmethod
# def parse_token_microsoft(cls, data):
# print "Parsing Microsoft", data
# return []
# @classmethod
# def get_token_type(cls, data):
# return
class BTLEAdvClassifier(object):
@classmethod
def parse_data(cls, adv_data):
d = {}
d["raw"] = adv_data
while adv_data:
ad_len = int(adv_data[:2], 16)
ad_str = adv_data[2:2+2*ad_len]
d = cls.parse_ad_structure(d, ad_str)
adv_data = adv_data[2+2*ad_len:]
return d
@classmethod
def parse_ad_structure(cls, d, ad_str):
try:
ad_type = int(ad_str[:2], 16)
ad_data = ad_str[2:]
if ad_type == 0x01:
d["flags"] = cls.parse_ad_type_0x01(ad_data)
elif ad_type == 0x11:
d["sec-mg-oob-flags"] = cls.parse_ad_type_0x11(ad_data)
elif ad_type == 0x16:
d["service-data"] = cls.parse_ad_type_0x16(ad_data)
elif ad_type == 0xff:
d["manufacturer-specific"] = cls.parse_ad_type_0xff(ad_data)
else:
d["unknown"] = (ad_type, ad_data)
except ValueError:
return d
return d
@classmethod
def parse_ad_type_0x01(cls, data):
""" Implementation of Bluetooth Specification Version 4.0 [Vol 3] Table 18.1: Flags
"""
ad_data = int(data, 16)
ad_flags = []
if ad_data & 0x01<<0:
ad_flags.append("'LE Limited Discoverable Mode'")
if ad_data & 0x01<<1:
ad_flags.append("'LE General Discoverable Mode'")
if ad_data & 0x01<<2:
ad_flags.append("'BR/EDR Not Supported (i.e. bit 37 of LMP Extended Feature bits Page 0)'")
if ad_data & 0x01<<3:
ad_flags.append("'Simultaneous LE and BR/EDR to Same Device Capable (Controller) (i.e. bit 49 of LMP Extended Feature bits Page 0)'")
if ad_data & 0x01<<4:
ad_flags.append("'Simultaneous LE and BR/EDR to Same Device Capable (Host) (i.e. bit 66 of LMP Extended Feature bits Page 1)'")
return ad_flags
@classmethod
def parse_ad_type_0x11(cls, data):
""" Implementation of Bluetooth Specification Version 4.0 [Vol 3] Table 18.7: Security Manager OOB Flags
"""
ad_data = int(data, 16)
ad_flags = []
if ad_data & 0x01<<0:
ad_flags.append("'OOB data present'")
else:
ad_flags.append("'OOB data not present'")
if ad_data & 0x01<<1:
ad_flags.append("'LE supported (Host) (i.e. bit 65 of LMP Extended Feature bits Page 1'")
if ad_data & 0x01<<2:
ad_flags.append("'Simultaneous LE and BR/EDR to Same Device Capable (Host) (i.e. bit 66 of LMP Extended Fea- ture bits Page 1)'")
if ad_data & 0x01<<3:
ad_flags.append("'Address Type: Random Address'")
else:
ad_flags.append("'Address Type: Public Address'")
return ad_flags
@classmethod
def parse_ad_type_0x16(cls, data):
""" Implementation of Bluetooth Specification Version 4.0 [Vol 3] Table 18.10: Service Data
and GATT Services list https://www.bluetooth.com/specifications/gatt/services
"""
service_uuid = int(data[2:4]+data[:2], 16) # First 2 octets contain the 16 bit service UUID, flip bytes around
service_data = data[4:] # additional service data
return (service_uuid, service_data)
apple_data_types = {
'02': 'ibeacon',
'05': 'airdrop',
'07': 'airpods',
'08': '(unknown)',
'09': 'airplay_dest',
'0a': 'airplay_src',
'0c': 'handoff',
'10': 'nearby',
}
@classmethod
def parse_ad_type_0xff(cls, data):
""" Implementation of Bluetooth Specification Version 4.0 [Vol 3] Table 18.11: Manufacturer Specific Data
and Company Identifier List: https://www.bluetooth.com/specifications/assigned-numbers/company-identifiers
"""
company_id = int(data[2:4]+data[:2], 16) # First 2 octets contain the 16 bit service UUID, flip bytes around
man_specific_data = data[4:] # additional service data
d = {}
d["company_id"] = company_id
d["raw"] = man_specific_data
if company_id == 0x0006:
d["company_name"] = "Microsoft"
elif company_id == 0x004c:
d["company_name"] = "Apple"
# iBeacon: see format @ https://support.kontakt.io/hc/en-gb/articles/201492492-iBeacon-advertising-packet-structure
d["ibeacon"] = (0x1502 == int(man_specific_data[2:4]+man_specific_data[:2], 16))
while man_specific_data:
if man_specific_data[:2] in cls.apple_data_types:
apple_type = cls.apple_data_types[man_specific_data[:2]]
else:
apple_type = '(unknown)'
apple_len = int(man_specific_data[2:4], 16)
apple_data = man_specific_data[4:4+2*apple_len]
d[apple_type] = apple_data
man_specific_data = man_specific_data[4+2*apple_len:]
#print "###", data, apple_type, apple_len, apple_data, man_specific_data
return d
if __name__ == "__main__":
def print_r(d, level=0):
for k,v in d.items():
if isinstance(v, dict):
print(level*"\t" + k + ":")
print_r(v,level+1)
else:
print(level*"\t" + "%s: %s" % (k, v) )
example_data = ["02011a1aff4c000c0e00750f812422021c3e213d190f3310050b1c6d9072",
"02011a0aff4c0010050b1c6d9072"
]
print("Hi, this is just a demo:")
for data in example_data:
print("Parsing %s" % data)
print_r(BTLEAdvClassifier.parse_data(data), 1)
| [
"[email protected]"
] | |
06d163a2fe5ead35d5e572263a70fde2496f201a | 745197407e81606718c4cdbedb6a81b5e8edf50b | /tests/texttest/TestSelf/TestData/GUI/CopyTestPermission/TargetApp/printpermissions.py | 982669999d14a181bf22034492a6efd8f0066ec8 | [] | no_license | dineshkummarc/texttest-3.22 | 5b986c4f6cc11fd553dab173c7f2e90590e7fcf0 | 85c3d3627082cdc5860d9a8468687acb499a7293 | refs/heads/master | 2021-01-23T20:44:35.653866 | 2012-06-25T07:52:13 | 2012-06-25T07:52:13 | 4,779,248 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | #!/usr/bin/env python
import os
if os.name == "posix":
os.system("fake_executable.py 2> /dev/null")
else:
os.system("fake_executable.py 2> nul")
| [
"[email protected]"
] | |
41966c4c82d82d656d5fa42250f7a8267dfc0855 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_3_neat/16_0_3_solomon_wzs_coin_jam2.py | dd210cf970f87c5ec2c5810a2df187cfd1dd819d | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 2,771 | py | #!/usr/bin/python2
import math
prime_list = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41,
43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 113,
193, 241, 257, 337, 353, 401, 433, 449, 577, 593, 641,
673, 769, 881, 929, 977, 1009, 1153, 1201, 1217, 1249,
1297, 1361, 1409, 1489, 1553, 1601, 1697, 1777, 1873,
1889, 2017, 2081, 2113, 2129, 2161, 2273, 2417, 2593,
2609, 2657, 2689, 2753, 2801, 2833, 2897, 3041, 3089,
3121, 3137, 3169, 3217, 3313, 3329, 3361, 3457, 3617,
3697, 3761, 3793, 3889, 4001, 4049, 4129, 4177, 4241,
4273, 4289, 4337, 4481, 4513, 4561, 4657, 4673, 4721,
4801, 4817, 4993, 5009, 5153, 5233, 5281, 5297, 5393,
5441, 5521, 5569, 5857, 5953, 6113, 6257, 6337, 6353,
6449, 6481, 6529, 6577, 6673, 6689, 6737, 6833, 6961,
6977, 7057, 7121, 7297, 7393, 7457, 7489, 7537, 7649,
7681, 7793, 7841, 7873, 7937, 8017, 8081, 8161, 8209,
8273, 8353, 8369, 8513, 8609, 8641, 8689, 8737, 8753,
8849, 8929, 9041, 9137, 9281, 9377, 9473, 9521, 9601,
9649, 9697, 9857]
def montgomery(n, p, m):
r = n % m
k = 1
while p > 1:
if p & 1 != 0:
k = (k * r) % m
r = (r * r) % m
p /= 2
return (r * k) % m
def is_prime(n):
if n < 2:
return False
for i in xrange(len(prime_list)):
if n % prime_list[i] == 0 or montgomery(prime_list[i], n - 1, n) != 1:
return False
return True
def f(n, j):
res = ""
for x in xrange(int("1%s1" % ("0" * (n - 2)), 2),
int("1%s1" % ("1" * (n - 2)), 2) + 1,
2):
s = bin(x)[2:]
ok = True
for i in xrange(2, 11, 1):
n = int(s, i)
if is_prime(n):
ok = False
break
if ok:
l = [0] * 9
for i in xrange(2, 11, 1):
n = int(s, i)
ok = False
for k in xrange(2, min(int(math.sqrt(n)), 1000000)):
if n % k == 0:
ok = True
l[i - 2] = str(k)
break
if not ok:
break
if ok:
res += "%s %s\n" % (s, " ".join(l))
j -= 1
if j == 0:
return res[0:len(res)-1]
import sys
fd = open(sys.argv[1], "rb")
t = int(fd.readline().strip())
for i in xrange(1, t + 1):
line = fd.readline().strip()
arr = line.split(" ")
n = int(arr[0])
j = int(arr[1])
res = f(n, j)
print "Case #%d:\n%s" % (i, res)
fd.close()
| [
"[[email protected]]"
] | |
c429802a9089f13c1454fc1561fb824738bee9ed | 35a2c7e6a01dc7f75116519e4521880416f2a9f2 | /tag/migrations/0002_value.py | 2824ada9ec86e752a45838963178796c90938761 | [] | no_license | engrogerio/edr-rest | ae977857d86aab3ef5b40e4d2be2e24abda97cb9 | a1115a1cd80c0531a85545681b0d3a70b97c529e | refs/heads/master | 2021-01-12T06:35:47.059448 | 2016-12-26T16:20:14 | 2016-12-26T16:20:14 | 77,392,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,377 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-09-07 15:42
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('form', '0002_auto_20160907_1542'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('tag', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Value',
fields=[
('created_when', models.DateTimeField(default=datetime.datetime.now)),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('numeric', models.DecimalField(decimal_places=10, max_digits=20)),
('text', models.CharField(max_length=1000)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('inspection', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='form.Inspection')),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tag.Tag')),
],
options={
'abstract': False,
},
),
]
| [
"[email protected]"
] | |
2db93df279e2e7651e7f462a9d558dc444be41b7 | b42b8f2bfadd25c51cbb12054bc6df42943b7536 | /venv/Scripts/easy_install-3.7-script.py | d0dad9ea927377d5b3c3ccd6ddf55aeec430b305 | [] | no_license | sharikgrg/week4.Gazorpazorp | 4b785f281334a6060d6edc8a195a58c072fb5a75 | 0f168a0df81703a8950e375081cafd2e766595fb | refs/heads/master | 2020-08-03T22:41:43.373137 | 2019-09-30T15:35:29 | 2019-09-30T15:35:29 | 211,907,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 510 | py | #!"C:\Users\Sharik Gurung\OneDrive - Sparta Global Limited\PYTHON\gazorpazorp-space-station\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"[email protected]"
] | |
a6095634bed30033519406856e61a0c38e3efe78 | be5d1ababc8dee59d3ea7687d5feee791ea78821 | /.vEnv/lib/python3.5/keyword.py | 8ff824fb913bb4a45d41291215d8dcb855a930e8 | [] | no_license | duggalr2/Personal-RSS | 980083337b52d2b8a74257766d3f1f1f546cac44 | d4cc0d04fdb19ce957f74cfbc583662f0dc3e727 | refs/heads/master | 2021-08-24T07:11:09.036689 | 2017-12-08T15:17:34 | 2017-12-08T15:17:34 | 100,282,827 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46 | py | /Users/Rahul/anaconda/lib/python3.5/keyword.py | [
"[email protected]"
] | |
25fe704f5be77484a077c570572772385b9cdd39 | 27c27208a167f089bb8ce4027dedb3fcc72e8e8a | /ProjectEuler/Solutions/Problems 50-100/Q075.py | b1510ace6b6e74c60b66d6a3e138b7926017acc7 | [] | no_license | stankiewiczm/contests | fd4347e7b84c8c7ec41ba9746723036d86e2373c | 85ed40f91bd3eef16e02e8fd45fe1c9b2df2887e | refs/heads/master | 2021-05-10T16:46:41.993515 | 2018-02-16T09:04:15 | 2018-02-16T09:04:15 | 118,587,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | from numpy import *
# m^2+n^2, 2mn, m^2-n^2
LIM = 1500000; Count = zeros(LIM+1, int);
def GCD(a,b):
while (b > 0):
c = a-(a/b)*b;
a = b;
b = c;
return a;
m = 1; M2 = 1;
while (2*M2 < LIM):
n = m%2+1;
while (2*M2+2*m*n < LIM) and (n < m):
if GCD(m,n) == 1:
p = 2*M2+2*m*n;
for k in range(1, LIM/p+1):
Count[p*k] += 1;
n += 2;
m += 1;
M2 = m*m;
print sum(Count==1)
| [
"[email protected]"
] | |
3f6d20b2b0368bc1fce9ed4428930b1693f2765e | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4134/codes/1723_2505.py | 8e06f5ecd7a38c37363f35f111d10476601ae390 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py | from math import*
ang = eval(input("angulo:"))
k = int(input("numero de termos:"))
soma = 0
i = 0
fim = k - 1
while(i <= fim):
soma = soma+(-1)**i*((ang**(2*i+1)/factorial(2*i+1)))
i = i + 1
print(round(soma, 10)) | [
"[email protected]"
] | |
e31dea602a2885d6f6b29d64376f9e3e2a16e75e | 57391fbdde43c3d2e8628613d9003c65ff8abf9d | /Exercicios/ex050.py | c2deb6d939dc7f9a9690a0cbb5d9e7af53d18167 | [] | no_license | JoaolSoares/CursoEmVideo_python | 082a6aff52414cdcc7ee94d76c3af0ac2cb2aaf5 | aa9d6553ca890a6d9369e60504290193d1c0fb54 | refs/heads/main | 2023-07-15T07:39:57.299061 | 2021-08-26T20:04:22 | 2021-08-26T20:04:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | soma = 0
for c in range(1, 7):
n1 = int(input('Diga um {}º numero: ' .format(c)))
if n1 % 2 == 0:
soma += n1
print('A soma de todos os numeros pares é de: \033[1;34m{}\033[m'. format(soma))
| [
"[email protected]"
] | |
347d0ea9561448fc30d4a289a796fa6453ad8a76 | 08120ee05b086d11ac46a21473f3b9f573ae169f | /gcloud/google-cloud-sdk/.install/.backup/lib/surface/projects/add_iam_policy_binding.py | c25bf659de28f724ec44d284cf9b7e902abe6009 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | harrystaley/TAMUSA_CSCI4349_Week9_Honeypot | 52f7d5b38af8612b7b0c02b48d0a41d707e0b623 | bd3eb7dfdcddfb267976e3abe4c6c8fe71e1772c | refs/heads/master | 2022-11-25T09:27:23.079258 | 2018-11-19T06:04:07 | 2018-11-19T06:04:07 | 157,814,799 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,025 | py | # -*- coding: utf-8 -*- #
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to add IAM policy binding for a resource."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.cloudresourcemanager import projects_api
from googlecloudsdk.api_lib.util import http_retry
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.iam import iam_util
from googlecloudsdk.command_lib.projects import flags
from googlecloudsdk.command_lib.projects import util as command_lib_util
from googlecloudsdk.command_lib.resource_manager import completers
import six.moves.http_client
@base.ReleaseTracks(base.ReleaseTrack.GA)
class AddIamPolicyBinding(base.Command):
"""Add IAM policy binding for a project.
Adds a policy binding to the IAM policy of a project,
given a project ID and the binding.
"""
detailed_help = iam_util.GetDetailedHelpForAddIamPolicyBinding(
'project', 'example-project-id-1')
@staticmethod
def Args(parser):
flags.GetProjectFlag('add IAM policy binding to').AddToParser(parser)
iam_util.AddArgsForAddIamPolicyBinding(
parser,
role_completer=completers.ProjectsIamRolesCompleter)
@http_retry.RetryOnHttpStatus(six.moves.http_client.CONFLICT)
def Run(self, args):
project_ref = command_lib_util.ParseProject(args.id)
return projects_api.AddIamPolicyBinding(project_ref, args.member, args.role)
| [
"[email protected]"
] | |
c850b7ff62e072b79250924149b4b8d33658b86a | 1276051db6315e12459bd96f1af76ca9f03cb2b4 | /pyslet/blockstore.py | b74c8f5b1edc2ec316f6b28ceb8996475d0c8d75 | [
"BSD-3-Clause"
] | permissive | rcplay/pyslet | 587a08a3225322e44e9cdea22a9f752008ca5eff | 152b2f2a3368ecd35ce985aef1f100f46dc4ae6d | refs/heads/master | 2021-01-18T15:56:32.635146 | 2016-02-25T08:32:43 | 2016-02-25T08:32:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,255 | py | #! /usr/bin/env python
import hashlib
import os
import threading
import time
import random
import logging
import string
import io
from pyslet.vfs import OSFilePath as FilePath
from pyslet.iso8601 import TimePoint
import pyslet.http.params as params
import pyslet.odata2.csdl as edm
import pyslet.odata2.core as core
MAX_BLOCK_SIZE = 65536
"""The default maximum block size for block stores: 64K"""
def _magic():
"""Calculate a magic string used to identify an object."""
try:
magic = os.urandom(4)
except NotImplementedError:
logging.warn("weak magic: urandom not available, "
"falling back to random.randint")
magic = []
for i in xrange(4):
magic.append(unichr(random.randint(0, 255)))
magic = string.join(magic, '')
return magic.encode('hex')
class BlockSize(Exception):
"""Raised when an attempt is made to store a block exceeding the
maximum block size for the block store."""
pass
class BlockMissing(Exception):
"""Raised when an attempt is made to retrieve a block with an
unknown key."""
pass
class LockError(Exception):
"""Raised when a timeout occurs during by
:py:meth:`LockingBlockStore.lock`"""
pass
class BlockStore(object):
"""Abstract class representing storage for blocks of data.
max_block_size
The maximum block size the store can hold. Defaults to
:py:attr:`MAX_BLOCK_SIZE`.
hash_class
The hashing object to use when calculating block keys. Defaults
to hashlib.sha256."""
def __init__(
self,
max_block_size=MAX_BLOCK_SIZE,
hash_class=hashlib.sha256):
self.hash_class = hash_class
self.max_block_size = max_block_size
def key(self, data):
if isinstance(data, bytearray):
data = str(data)
return self.hash_class(data).hexdigest().lower()
def store(self, data):
"""Stores a block of data, returning the hash key
data
A binary string not exceeding the maximum block size"""
if len(data) > self.max_block_size:
raise BlockSize
else:
raise NotImplementedError
def retrieve(self, key):
"""Returns the block of data referenced by key
key
A hex string previously returned by :py:meth:`store`.
If there is no block with *key* :py:class:`BlockMissing` is
raised."""
raise BlockMissing(key)
def delete(self, key):
"""Deletes the block of data referenced by key
key
A hex string previously returned by :py:meth:`store`."""
raise NotImplementedError
class FileBlockStore(BlockStore):
"""Class for storing blocks of data in the file system.
Additional keyword arguments:
dpath
A :py:class:`FilePath` instance pointing to a directory in which
to store the data blocks. If this argument is omitted then a
temporary directory is created using the builtin mkdtemp.
Each block is saved as a single file but the hash key is decomposed
into 3 components to reduce the number of files in a single
directory. For example, if the hash key is 'ABCDEF123' then the
file would be stored at the path: 'AB/CD/EF123'"""
def __init__(self, dpath=None, **kwargs):
super(FileBlockStore, self).__init__(**kwargs)
if dpath is None:
# create a temporary directory
self.dpath = FilePath.mkdtemp('.d', 'pyslet_blockstore-')
else:
self.dpath = dpath
self.tmpdir = self.dpath.join('tmp')
if not self.tmpdir.exists():
try:
self.tmpdir.mkdir()
except OSError:
# catch race condition where someone already created it
pass
self.magic = _magic()
def store(self, data):
# calculate the key
key = self.key(data)
parent = self.dpath.join(key[0:2], key[2:4])
path = parent.join(key[4:])
if path.exists():
return key
elif len(data) > self.max_block_size:
raise BlockSize
else:
tmp_path = self.tmpdir.join(
"%s_%i_%s" %
(self.magic, threading.current_thread().ident, key[
0:32]))
with tmp_path.open(mode="wb") as f:
f.write(data)
if not parent.exists():
try:
parent.makedirs()
except OSError:
# possible race condition, ignore for now
pass
tmp_path.move(path)
return key
def retrieve(self, key):
path = self.dpath.join(key[0:2], key[2:4], key[4:])
if path.exists():
with path.open('rb') as f:
data = f.read()
return data
else:
raise BlockMissing
def delete(self, key):
path = self.dpath.join(key[0:2], key[2:4], key[4:])
if path.exists():
try:
path.remove()
except OSError:
# catch race condition where path is gone already
pass
class EDMBlockStore(BlockStore):
"""Class for storing blocks of data in an EDM-backed data service.
Additional keyword arguments:
entity_set
A :py:class:`pyslet.odata2.csdl.EntitySet` instance
Each block is saved as a single entity using the hash as the key.
The entity must have a string key property named *hash* large enough
to hold the hex strings generated by the selected hashing module.
It must also have a Binary *data* property capable of holding
max_block_size bytes."""
def __init__(self, entity_set, **kwargs):
super(EDMBlockStore, self).__init__(**kwargs)
self.entity_set = entity_set
def store(self, data):
key = self.key(data)
with self.entity_set.OpenCollection() as blocks:
if key in blocks:
return key
elif len(data) > self.max_block_size:
raise BlockSize
try:
block = blocks.new_entity()
block['hash'].set_from_value(key)
block['data'].set_from_value(data)
blocks.insert_entity(block)
except edm.ConstraintError:
# race condition, duplicate key
pass
return key
def retrieve(self, key):
with self.entity_set.OpenCollection() as blocks:
try:
block = blocks[key]
return block['data'].value
except KeyError:
raise BlockMissing
def delete(self, key):
with self.entity_set.OpenCollection() as blocks:
try:
del blocks[key]
except KeyError:
pass
class LockStoreContext(object):
def __init__(self, ls, hash_key):
self.ls = ls
self.hash_key = hash_key
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.ls.unlock(self.hash_key)
class LockStore(object):
"""Class for storing simple locks
entity_set
A :py:class:`pyslet.odata2.csdl.EntitySet` instance for the
locks.
lock_timeout
The maximum number of seconds that a lock is considered valid
for. If a lock is older than this time it will be reused
automatically. This value is a long-stop cut off which allows a
system to recover automatically from bugs causing stale locks.
Defaults to 180s (3 minutes)
This object is designed for use in conjunction with the basic block
store to provide locking. The locks are managed using an EDM entity
set.
The entity must have a string key property named *hash* large enough
to hold the hex strings generated by the block store - the hash
values are not checked and can be any ASCII string so the LockStore
class could be reused for other purposes if required.
The entity must also have a string field named *owner* capable of
holding an ASCII string up to 32 characters in length and a datetime
field named *created* for storing the UTC timestamp when each lock
is created. The created property is used for optimistic concurrency
control during updates and must be identified as having fixed
concurrency mode in the entity type's definition."""
def __init__(self, entity_set, lock_timeout=180):
self.entity_set = entity_set
self.lock_timeout = lock_timeout
self.magic = _magic()
def lock(self, hash_key, timeout=60):
"""Acquires the lock on hash_key or raises LockError
The return value is a context manager object that will
automatically release the lock on hash_key when it exits.
locks are not nestable, they can only be acquired once. If the
lock cannot be acquired a back-off strategy is implemented using
random waits up to a total maximum of *timeout* seconds. If the
lock still cannot be obtained :py:class:`LockError` is raised."""
owner = "%s_%i" % (self.magic, threading.current_thread().ident)
with self.entity_set.OpenCollection() as locks:
tnow = time.time()
tstop = tnow + timeout
twait = 0
while tnow < tstop:
time.sleep(twait)
lock = locks.new_entity()
lock['hash'].set_from_value(hash_key)
lock['owner'].set_from_value(owner)
lock['created'].set_from_value(TimePoint.from_now_utc())
try:
locks.insert_entity(lock)
return LockStoreContext(self, hash_key)
except edm.ConstraintError:
pass
try:
lock = locks[hash_key]
except KeyError:
# someone deleted the lock, go straight round again
twait = 0
tnow = time.time()
continue
# has this lock expired?
locktime = lock['created'].value.with_zone(zdirection=0)
if locktime.get_unixtime() + self.lock_timeout < tnow:
# use optimistic locking
lock['owner'].set_from_value(owner)
try:
locks.update_entity(lock)
logging.warn("LockingBlockStore removed stale lock "
"on %s", hash_key)
return LockStoreContext(self, hash_key)
except KeyError:
twait = 0
tnow = time.time()
continue
except edm.ConstraintError:
pass
twait = random.randint(0, timeout // 5)
tnow = time.time()
logging.warn("LockingBlockStore: timeout locking %s", hash_key)
raise LockError
def unlock(self, hash_key):
"""Releases the lock on *hash_key*
Typically called by the context manager object returned by
:py:meth:`lock` rather than called directly.
Stale locks are handled automatically but three possible warning
conditions may be logged. All stale locks indicate that the
process holding the lock was unexpectedly slow (or clients with
poorly synchronised clocks) so these warnings suggest the need
for increasing the lock_timeout.
stale lock reused
The lock was not released as it has been acquired by another
owner. Could indicate significant contention on this
hash_key.
stale lock detected
The lock was no longer present and has since been acquired
and released by another owner. Indicates a slow process
holding locks.
stale lock race
The lock timed out and was reused while we were removing it.
Unlikely but indicates both significant contention and a
slow process holding the lock."""
owner = "%s_%i" % (self.magic, threading.current_thread().ident)
with self.entity_set.OpenCollection() as locks:
try:
lock = locks[hash_key]
if lock['owner'].value == owner:
# this is our lock - delete it
# potential race condition here if we timeout between
# loading and deleting the entity so we check how
# close it is and buy more time if necessary
locktime = lock['created'].value.with_zone(zdirection=0)
if (locktime.get_unixtime() + self.lock_timeout <
time.time() + 1):
# less than 1 second left, buy more time
# triggers update of 'created' property using
# optimistic locking ensuring we still own
locks.update_entity(lock)
del locks[hash_key]
else:
# we're not the owner
logging.warn("LockingBlockStore: stale lock reused "
"on busy hash %s", hash_key)
except KeyError:
# someone deleted the lock already - timeout?
logging.warn("LockingBlockStore: stale lock detected "
"on hash %s", hash_key)
pass
except edm.ConstraintError:
logging.warn("LockingBlockStore: stale lock race "
"on busy hash %s", hash_key)
class StreamStore(object):
"""Class for storing stream objects
Streams are split in to blocks that are stored in the associated
BlockStore. Timed locks are used to minimise the risk of conflicts
during store and delete operations on each block but all other
operations are done without locks. As a result, it is possible to
delete or modify a stream while another client is using it.
The intended use case for this store is to read and write entire
streams - not for editing. The stream identifiers are simply
numbers so if you want to modify the stream associated with a
resource in your application upload a new stream, switch the
references in your application and then delete the old one.
bs
A :py:class:`BlockStore`: used to store the actual data. The
use of a block store to persist the data in the stream ensures
that duplicate streams have only a small impact on storage
requirements as the block references are all that is duplicated.
Larger block sizes reduce this overhead and speed up access at
the expense of keeping a larger portion of the stream in memory
during streaming operations. The block size is set when the
block store is created.
ls
A :py:class:`LockStore`: used to lock blocks during write and
delete operations.
entity_set
An :py:class:`~pyslet.odata2.csdl.EntitySet` to hold the Stream
entities.
The entity set must have the following properties:
streamID
An automatically generated integer stream identifier that is
also the key
mimetype
An ASCII string to hold the stream's mime type (at least 64
characters).
created
An Edm.DateTime property to hold the creation date.
modified
An Edm.DateTime property to hold the last modified date.
size
An Edm.Int64 to hold the stream's size
md5
An Edm.Binary field of fixed length 16 bytes to hold the
MD5 checksum of the stream.
Blocks
A 1..Many navigation property to a related entity set with the
following properties...
blockID
An automatically generated integer block identifier that is
also the key
num
A block sequence integer
hash
The hash key of the block in the block store"""
def __init__(self, bs, ls, entity_set):
self.bs = bs
self.ls = ls
self.stream_set = entity_set
self.block_set = entity_set.NavigationTarget('Blocks')
def new_stream(self,
mimetype=params.MediaType('application', 'octet-stream'),
created=None):
"""Creates a new stream in the store.
mimetype
A :py:class:`~pyslet.http.params.MediaType` object
Returns a stream entity which is an
:py:class:`~pyslet.odata2.csdl.Entity` instance.
The stream is identified by the stream entity's key which you
can store elsewhere as a reference and pass to
:py:meth:`get_stream` to retrieve the stream again later."""
with self.stream_set.OpenCollection() as streams:
stream = streams.new_entity()
if not isinstance(mimetype, params.MediaType):
mimetype = params.MediaType.from_str(mimetype)
stream['mimetype'].set_from_value(str(mimetype))
now = TimePoint.from_now_utc()
stream['size'].set_from_value(0)
if created is None:
stream['created'].set_from_value(now)
stream['modified'].set_from_value(now)
else:
created = created.shift_zone(0)
stream['created'].set_from_value(created)
stream['modified'].set_from_value(created)
stream['md5'].set_from_value(hashlib.md5().digest())
streams.insert_entity(stream)
return stream
def get_stream(self, stream_id):
"""Returns the stream with identifier *stream_id*.
Returns the stream entity as an
:py:class:`~pyslet.odata2.csdl.Entity` instance."""
with self.stream_set.OpenCollection() as streams:
stream = streams[stream_id]
return stream
def open_stream(self, stream, mode="r"):
"""Returns a file-like object for a stream.
Returns an object derived from io.RawIOBase.
stream
A stream entity
mode
Files are always opened in binary mode. The characters "r",
"w" and "+" and "a" are honoured.
Warning: read and write methods of the resulting objects do not
always return all requested bytes. In particular, read or write
operations never cross block boundaries in a single call."""
if stream is None:
raise ValueError
return BlockStream(self, stream, mode)
def delete_stream(self, stream):
"""Deletes a stream from the store.
Any data blocks that are orphaned by this deletion are
removed."""
with self.stream_set.OpenCollection() as streams:
self.delete_blocks(stream)
del streams[stream.key()]
stream.exists = False
def store_block(self, stream, block_num, data):
hash_key = self.bs.key(data)
with stream['Blocks'].OpenCollection() as blocks:
block = blocks.new_entity()
block['num'].set_from_value(block_num)
block['hash'].set_from_value(hash_key)
blocks.insert_entity(block)
# now ensure that the data is stored
with self.ls.lock(hash_key):
self.bs.store(data)
return block
def update_block(self, block, data):
hash_key = block['hash'].value
new_hash = self.bs.key(data)
if new_hash == hash_key:
return
filter = core.BinaryExpression(core.Operator.eq)
filter.AddOperand(core.PropertyExpression('hash'))
hash_value = edm.EDMValue.NewSimpleValue(edm.SimpleType.String)
filter.AddOperand(core.LiteralExpression(hash_value))
# filter is: hash eq <hash_value>
with self.block_set.OpenCollection() as base_coll:
with self.ls.lock(hash_key):
with self.ls.lock(new_hash):
self.bs.store(data)
block['hash'].set_from_value(new_hash)
base_coll.update_entity(block)
# is the old hash key used anywhere?
hash_value.set_from_value(hash_key)
base_coll.set_filter(filter)
if len(base_coll) == 0:
# remove orphan block from block store
self.bs.delete(hash_key)
def retrieve_blocklist(self, stream):
with stream['Blocks'].OpenCollection() as blocks:
blocks.set_orderby(
core.CommonExpression.OrderByFromString("num asc"))
for block in blocks.itervalues():
yield block
def retrieve_block(self, block):
return self.bs.retrieve(block['hash'].value)
def delete_blocks(self, stream, from_num=0):
blocks = list(self.retrieve_blocklist(stream))
filter = core.BinaryExpression(core.Operator.eq)
filter.AddOperand(core.PropertyExpression('hash'))
hash_value = edm.EDMValue.NewSimpleValue(edm.SimpleType.String)
filter.AddOperand(core.LiteralExpression(hash_value))
# filter is: hash eq <hash_value>
with self.block_set.OpenCollection() as base_coll:
for block in blocks:
if from_num and block['num'].value < from_num:
continue
hash_key = block['hash'].value
with self.ls.lock(hash_key):
del base_coll[block.key()]
# is this hash key used anywhere?
hash_value.set_from_value(hash_key)
base_coll.set_filter(filter)
if len(base_coll) == 0:
# remove orphan block from block store
self.bs.delete(hash_key)
class BlockStream(io.RawIOBase):
"""Provides a file-like interface to stored streams
Based on the new style io.RawIOBase these streams are always in
binary mode. They are seekable but lack efficiency if random access
is used across block boundaries. The main design criteria is to
ensure that no more than one block is kept in memory at any one
time."""
def __init__(self, ss, stream, mode="r"):
self.ss = ss
self.stream = stream
self.r = "r" in mode or "+" in mode
self.w = "w" in mode or "+" in mode
self.size = stream['size'].value
self.block_size = self.ss.bs.max_block_size
self._bdata = None
self._bnum = 0
self._bpos = 0
self._btop = 0
self._bdirty = False
self._md5 = None
if "a" in mode:
self.seek(self.size)
self.blocks = list(self.ss.retrieve_blocklist(self.stream))
else:
self.seek(0)
if "w" in mode:
self.ss.delete_blocks(self.stream)
self.blocks = []
self._md5 = hashlib.md5()
self._md5num = 0
else:
self.blocks = list(self.ss.retrieve_blocklist(self.stream))
def close(self):
super(BlockStream, self).close()
self.blocks = None
self.r = self.w = False
def readable(self):
return self.r
def writable(self):
return self.w
def seekable(self):
return True
def seek(self, offset, whence=io.SEEK_SET):
if whence == io.SEEK_SET:
self.pos = offset
elif whence == io.SEEK_CUR:
self.pos += offset
elif whence == io.SEEK_END:
self.pos = self.size + offset
else:
raise IOError("bad value for whence in seek")
new_bnum = self.pos // self.block_size
if new_bnum != self._bnum:
self.flush()
self._bdata = None
self._bnum = new_bnum
self._bpos = self.pos % self.block_size
self._set_btop()
def _set_btop(self):
if self.size // self.block_size == self._bnum:
# we're pointing to the last block
self._btop = self.size % self.block_size
else:
self._btop = self.block_size
def flush(self):
if self._bdirty:
# the current block is dirty, write it out
data = self._bdata[:self._btop]
if data:
block = self.blocks[self._bnum]
if block.exists:
self.ss.update_block(block, str(data))
else:
self.blocks[self._bnum] = self.ss.store_block(
self.stream, self._bnum, data)
if self._md5 is not None and self._bnum == self._md5num:
self._md5.update(str(data))
self._md5num += 1
else:
self._md5 = None
if self.size != self.stream['size'].value:
self.stream['size'].set_from_value(self.size)
now = TimePoint.from_now_utc()
self.stream['modified'].set_from_value(now)
if self._md5 is not None:
self.stream['md5'].set_from_value(self._md5.digest())
else:
self.stream['md5'].set_null()
self.stream.commit()
self._bdirty = False
def tell(self):
return self.pos
def readinto(self, b):
if not self.r:
raise IOError("stream not open for reading")
nbytes = self._btop - self._bpos
if nbytes <= 0:
# we must be at the file size limit
return 0
if self._bdata is None:
# load the data
if self.w:
# create a full size block in case we also write
self._bdata = bytearray(self.block_size)
data = self.ss.retrieve_block(self.blocks[self._bnum])
self._bdata[:len(data)] = data
else:
self._bdata = self.ss.retrieve_block(self.blocks[self._bnum])
if nbytes > len(b):
nbytes = len(b)
b[:nbytes] = self._bdata[self._bpos:self._bpos + nbytes]
self.seek(nbytes, io.SEEK_CUR)
return nbytes
def write(self, b):
if not self.w:
raise IOError("stream not open for writing")
# we can always write something in the block, nbytes > 0
nbytes = self.block_size - self._bpos
if self._bdata is None:
if self._btop <= 0:
# add a new empty blocks first
last_block = len(self.blocks)
while last_block < self._bnum:
self.blocks.append(self.ss.store_block(
self.stream, last_block, bytearray(self.block_size)))
last_block += 1
self.size = last_block * self.block_size
# force the new size to be written
self._bdata = bytearray(self.block_size)
self._bdirty = True
self.flush()
# finally add the last block, but don't store it yet
with self.stream['Blocks'].OpenCollection() as blist:
new_block = blist.new_entity()
new_block['num'].set_from_value(self._bnum)
self.blocks.append(new_block)
self.size = self.pos
self._set_btop()
if self._bpos:
self._bdirty = True
else:
self._bdata = bytearray(self.block_size)
data = self.ss.retrieve_block(self.blocks[self._bnum])
self._bdata[:len(data)] = data
if nbytes > len(b):
nbytes = len(b)
self._bdata[self._bpos:self._bpos + nbytes] = b[:nbytes]
self._bdirty = True
if self.pos + nbytes > self.size:
self.size = self.pos + nbytes
self._set_btop()
self.seek(nbytes, io.SEEK_CUR)
return nbytes
| [
"[email protected]"
] | |
105b66682da75be919d969965dcd0c11bb4617ce | 80d50ea48e10674b1b7d3f583a1c4b7d0b01200f | /src/datadog_api_client/v1/model/monitor_search_response_counts.py | 94a81e62a22817294ef78c86f9fecc7290984a77 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"MPL-2.0"
] | permissive | DataDog/datadog-api-client-python | 3e01fa630278ad0b5c7005f08b7f61d07aa87345 | 392de360e7de659ee25e4a6753706820ca7c6a92 | refs/heads/master | 2023-09-01T20:32:37.718187 | 2023-09-01T14:42:04 | 2023-09-01T14:42:04 | 193,793,657 | 82 | 36 | Apache-2.0 | 2023-09-14T18:22:39 | 2019-06-25T22:52:04 | Python | UTF-8 | Python | false | false | 2,063 | py | # Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from __future__ import annotations
from typing import Union, TYPE_CHECKING
from datadog_api_client.model_utils import (
ModelNormal,
cached_property,
unset,
UnsetType,
)
if TYPE_CHECKING:
from datadog_api_client.v1.model.monitor_search_count import MonitorSearchCount
class MonitorSearchResponseCounts(ModelNormal):
@cached_property
def openapi_types(_):
from datadog_api_client.v1.model.monitor_search_count import MonitorSearchCount
return {
"muted": (MonitorSearchCount,),
"status": (MonitorSearchCount,),
"tag": (MonitorSearchCount,),
"type": (MonitorSearchCount,),
}
attribute_map = {
"muted": "muted",
"status": "status",
"tag": "tag",
"type": "type",
}
def __init__(
self_,
muted: Union[MonitorSearchCount, UnsetType] = unset,
status: Union[MonitorSearchCount, UnsetType] = unset,
tag: Union[MonitorSearchCount, UnsetType] = unset,
type: Union[MonitorSearchCount, UnsetType] = unset,
**kwargs,
):
"""
The counts of monitors per different criteria.
:param muted: Search facets.
:type muted: MonitorSearchCount, optional
:param status: Search facets.
:type status: MonitorSearchCount, optional
:param tag: Search facets.
:type tag: MonitorSearchCount, optional
:param type: Search facets.
:type type: MonitorSearchCount, optional
"""
if muted is not unset:
kwargs["muted"] = muted
if status is not unset:
kwargs["status"] = status
if tag is not unset:
kwargs["tag"] = tag
if type is not unset:
kwargs["type"] = type
super().__init__(kwargs)
| [
"[email protected]"
] | |
ee7611e405952a6d724354ab56524138152af431 | f8d3f814067415485bb439d7fe92dc2bbe22a048 | /solem/pcv_book/graphcut.py | 242ac3449953f8cca3ec94fabb66d20ceecfa821 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | gmonkman/python | 2f9ab8f159c01f6235c86cb0cd52062cd3fdedd3 | 9123aa6baf538b662143b9098d963d55165e8409 | refs/heads/master | 2023-04-09T15:53:29.746676 | 2022-11-26T20:35:21 | 2022-11-26T20:35:21 | 60,254,898 | 0 | 2 | null | 2023-03-24T22:58:39 | 2016-06-02T10:25:27 | Python | UTF-8 | Python | false | false | 3,413 | py | from pylab import *
from numpy import *
from pygraph.classes.digraph import digraph
from pygraph.algorithms.minmax import maximum_flow
import bayes
"""
Graph Cut image segmentation using max-flow/min-cut.
"""
def build_bayes_graph(im, labels, sigma=1e2, kappa=1):
""" Build a graph from 4-neighborhood of pixels.
Foreground and background is determined from
labels (1 for foreground, -1 for background, 0 otherwise)
and is modeled with naive Bayes classifiers."""
m, n = im.shape[:2]
# RGB vector version (one pixel per row)
vim = im.reshape((-1, 3))
# RGB for foreground and background
foreground = im[labels == 1].reshape((-1, 3))
background = im[labels == -1].reshape((-1, 3))
train_data = [foreground, background]
# train naive Bayes classifier
bc = bayes.BayesClassifier()
bc.train(train_data)
# get probabilities for all pixels
bc_lables, prob = bc.classify(vim)
prob_fg = prob[0]
prob_bg = prob[1]
# create graph with m*n+2 nodes
gr = digraph()
gr.add_nodes(range(m * n + 2))
source = m * n # second to last is source
sink = m * n + 1 # last node is sink
# normalize
for i in range(vim.shape[0]):
vim[i] = vim[i] / (linalg.norm(vim[i]) + 1e-9)
# go through all nodes and add edges
for i in range(m * n):
# add edge from source
gr.add_edge((source, i), wt=(prob_fg[i] / (prob_fg[i] + prob_bg[i])))
# add edge to sink
gr.add_edge((i, sink), wt=(prob_bg[i] / (prob_fg[i] + prob_bg[i])))
# add edges to neighbors
if i % n != 0: # left exists
edge_wt = kappa * exp(-1.0 * sum((vim[i] - vim[i - 1])**2) / sigma)
gr.add_edge((i, i - 1), wt=edge_wt)
if (i + 1) % n != 0: # right exists
edge_wt = kappa * exp(-1.0 * sum((vim[i] - vim[i + 1])**2) / sigma)
gr.add_edge((i, i + 1), wt=edge_wt)
if i // n != 0: # up exists
edge_wt = kappa * exp(-1.0 * sum((vim[i] - vim[i - n])**2) / sigma)
gr.add_edge((i, i - n), wt=edge_wt)
if i // n != m - 1: # down exists
edge_wt = kappa * exp(-1.0 * sum((vim[i] - vim[i + n])**2) / sigma)
gr.add_edge((i, i + n), wt=edge_wt)
return gr
def cut_graph(gr, imsize):
""" Solve max flow of graph gr and return binary
labels of the resulting segmentation."""
m, n = imsize
source = m * n # second to last is source
sink = m * n + 1 # last is sink
# cut the graph
flows, cuts = maximum_flow(gr, source, sink)
# convert graph to image with labels
res = zeros(m * n)
for pos, label in cuts.items()[:-2]: # don't add source/sink
res[pos] = label
return res.reshape((m, n))
def save_as_pdf(gr, filename, show_weights=False):
from pygraph.readwrite.dot import write
import gv
dot = write(gr, weighted=show_weights)
gvv = gv.readstring(dot)
gv.layout(gvv, 'fdp')
gv.render(gvv, 'pdf', filename)
def show_labeling(im, labels):
""" Show image with foreground and background areas.
labels = 1 for foreground, -1 for background, 0 otherwise."""
imshow(im)
contour(labels, [-0.5, 0.5])
contourf(labels, [-1, -0.5], colors='b', alpha=0.25)
contourf(labels, [0.5, 1], colors='r', alpha=0.25)
# axis('off')
xticks([])
yticks([])
| [
"[email protected]"
] | |
59aeb4698e5be1a9660b979dcf41c2e3880deca6 | 14bb0b5d7478d3a8740cbc15cc7870fcd1fa8207 | /tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/common_v1.py | c8dcb7ba231cf3f57f8b8a5dd3782e2a124fbac7 | [
"Apache-2.0"
] | permissive | terigrossheim/tensorflow | 2be34891c99e0fcf88cf8418632f24676f1620a7 | ed9d45f096097c77664815c361c75e73af4f32d4 | refs/heads/master | 2022-11-06T12:08:10.099807 | 2020-06-29T12:10:56 | 2020-06-29T12:35:24 | 275,867,898 | 1 | 0 | Apache-2.0 | 2020-06-29T16:21:41 | 2020-06-29T16:21:39 | null | UTF-8 | Python | false | false | 4,320 | py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Serves as a common "main" function for all the SavedModel tests.
There is a fair amount of setup needed to initialize tensorflow and get it
into a proper TF2 execution mode. This hides that boilerplate.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
from tensorflow.python import pywrap_mlir # pylint: disable=g-direct-tensorflow-import
# Use /tmp to make debugging the tests easier (see README.md)
flags.DEFINE_string('save_model_path', '', 'Path to save the model to.')
FLAGS = flags.FLAGS
def set_tf_options():
# Default TF1.x uses reference variables that are not supported by SavedModel
# v1 Importer. To use SavedModel V1 Importer, resource variables should be
# enabled.
tf.enable_resource_variables()
tf.compat.v1.disable_eager_execution()
# This function needs to take a "create_module_fn", as opposed to just the
# module itself, because the creation of the module has to be delayed until
# after absl and tensorflow have run various initialization steps.
def do_test(signature_def_map,
init_op=None,
canonicalize=False,
show_debug_info=False):
"""Runs test.
1. Performs absl and tf "main"-like initialization that must run before almost
anything else.
2. Converts signature_def_map to SavedModel V1
3. Converts SavedModel V1 to MLIR
4. Prints the textual MLIR to stdout (it is expected that the caller will have
FileCheck checks in its file to check this output).
This is only for use by the MLIR SavedModel importer tests.
Args:
signature_def_map: A map from string key to signature_def. The key will be
used as function name in the resulting MLIR.
init_op: The initializer op for the saved model. If set, it will generate a
initializer graph in the resulting MLIR.
canonicalize: If true, canonicalizer will be run on the resulting MLIR.
show_debug_info: If true, shows debug locations in the resulting MLIR.
"""
# Make LOG(ERROR) in C++ code show up on the console.
# All `Status` passed around in the C++ API seem to eventually go into
# `LOG(ERROR)`, so this makes them print out by default.
logging.set_stderrthreshold('error')
def app_main(argv):
"""Function passed to absl.app.run."""
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
if FLAGS.save_model_path:
save_model_path = FLAGS.save_model_path
else:
save_model_path = tempfile.mktemp(suffix='.saved_model')
sess = tf.Session()
sess.run(tf.initializers.global_variables())
builder = tf.saved_model.builder.SavedModelBuilder(save_model_path)
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map,
main_op=init_op,
strip_default_attrs=True)
builder.save()
logging.info('Saved model to: %s', save_model_path)
# TODO(b/153507667): Set the following boolean flag once the hoisting
# variables logic from SavedModel importer is removed.
lift_variables = False
mlir = pywrap_mlir.experimental_convert_saved_model_v1_to_mlir(
save_model_path, ','.join([tf.saved_model.tag_constants.SERVING]),
lift_variables, show_debug_info)
if canonicalize:
mlir = pywrap_mlir.experimental_run_pass_pipeline(mlir, 'canonicalize',
show_debug_info)
print(mlir)
app.run(app_main)
| [
"[email protected]"
] | |
c30392e2bb7b8ca47fa86eecc06d3ba2ebbf67c5 | b6472217400cfce4d12e50a06cd5cfc9e4deee1f | /sites/top/api/rest/WlbItemDeleteRequest.py | 90181cc7828d9d8c9ed09c35a46a07e62a9e7a08 | [] | no_license | topwinner/topwinner | 2d76cab853b481a4963826b6253f3fb0e578a51b | 83c996b898cf5cfe6c862c9adb76a3d6a581f164 | refs/heads/master | 2021-01-22T22:50:09.653079 | 2012-08-26T19:11:16 | 2012-08-26T19:11:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | '''
Created by auto_sdk on 2012-08-26 16:43:44
'''
from top.api.base import RestApi
class WlbItemDeleteRequest(RestApi):
def __init__(self,domain,port):
RestApi.__init__(self,domain, port)
self.item_id = None
self.user_nick = None
def getapiname(self):
return 'taobao.wlb.item.delete'
| [
"[email protected]"
] | |
a33002ee62b9f1e34ed9eabcd27de694c1e05a29 | 00f1f01f218fddc30a4194e999f0b48c45c47012 | /elements/resources/migrations/0001_initial.py | fb4b807194a3f2905b8e8ba7d9f27baedea4299e | [] | no_license | mikpanko/grakon | 495659317c5933a95650b3f9000aab73e7335a13 | 6c64432c366a6ad44fb7227f22498335bd193f37 | refs/heads/master | 2020-12-26T00:19:52.799388 | 2013-07-28T02:33:19 | 2013-07-28T02:33:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,184 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.rename_table('elements_entityresource', 'resources_entityresource')
if not db.dry_run:
# For permissions to work properly after migrating
orm['contenttypes.contenttype'].objects.filter(app_label='elements', model='EntityResource').update(app_label='elements.resources')
def backwards(self, orm):
db.rename_table('resources_entityresource', 'elements_entityresource')
if not db.dry_run:
# For permissions to work properly after migrating
orm['contenttypes.contenttype'].objects.filter(app_label='elements.resources', model='EntityResource').update(app_label='elements')
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'resources.entityresource': {
'Meta': {'unique_together': "(('content_type', 'entity_id', 'resource'),)", 'object_name': 'EntityResource'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'resource': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'})
}
}
complete_apps = ['resources'] | [
"[email protected]"
] | |
c24968234482ab1ffb8a859e5600f442fb2e4fff | 2fb75382cb8bb94ed8da382dc5843766ead6def2 | /python/xraydb/xraydb.py | 975f87f7bf9e39948a982fe26446153e5d8eb0da | [
"LicenseRef-scancode-public-domain",
"BSD-3-Clause"
] | permissive | mlevant/XrayDB | 16c677121364e163ce9c91a38c4880c0c3b6aa19 | cf2b405a60cefae961db220aca62f3fb7375b544 | refs/heads/master | 2021-05-18T01:46:21.303699 | 2019-10-09T18:38:10 | 2019-10-09T18:38:10 | 251,052,175 | 0 | 0 | NOASSERTION | 2020-03-29T14:29:43 | 2020-03-29T14:29:43 | null | UTF-8 | Python | false | false | 25,199 | py | #!/usr/bin/env python
"""
SQLAlchemy wrapping of x-ray database for data from
Elam et al, Chantler et al, Waasmaier and Kirfel
Main Class for full Database: xrayDB
"""
import os
import json
from collections import namedtuple
import numpy as np
from scipy.interpolate import UnivariateSpline
from sqlalchemy import MetaData, create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.pool import SingletonThreadPool
from .utils import elam_spline, as_ndarray
XrayEdge = namedtuple('XrayEdge', ('energy', 'fyield', 'jump_ratio'))
XrayLine = namedtuple('XrayLine', ('energy', 'intensity', 'initial_level',
'final_level'))
ElementData = namedtuple('ElementData', ('Z', 'symbol', 'mass', 'density'))
__version__ = '1.4'
def make_engine(dbname):
"create engine for sqlite connection"
return create_engine('sqlite:///%s' % (dbname),
poolclass=SingletonThreadPool)
def isxrayDB(dbname):
"""whether a file is a valid XrayDB database
Args:
dbname (string): name of XrayDB file
Returns:
bool: is file a valid XrayDB
Notes:
1. must be a sqlite db file, with tables named 'elements',
'photoabsorption', 'scattering', 'xray_levels', 'Coster_Kronig',
'Chantler', 'Waasmaier', and 'KeskiRahkonen_Krause'
"""
_tables = ('Chantler', 'Waasmaier', 'Coster_Kronig',
'KeskiRahkonen_Krause', 'xray_levels',
'elements', 'photoabsorption', 'scattering')
result = False
try:
engine = make_engine(dbname)
meta = MetaData(engine)
meta.reflect()
result = all([t in meta.tables for t in _tables])
except:
pass
return result
class XrayDB():
"""
Database of Atomic and X-ray Data
This XrayDB object gives methods to access the Atomic and
X-ray data in th SQLite3 database xraydb.sqlite.
Much of the data in this database comes from the compilation
of Elam, Ravel, and Sieber, with additional data from Chantler,
and other sources. See the documention and bibliography for
a complete listing.
"""
def __init__(self, dbname='xraydb.sqlite', read_only=True):
"connect to an existing database"
if not os.path.exists(dbname):
parent, _ = os.path.split(__file__)
dbname = os.path.join(parent, dbname)
if not os.path.exists(dbname):
raise IOError("Database '%s' not found!" % dbname)
if not isxrayDB(dbname):
raise ValueError("'%s' is not a valid X-ray Database file!" % dbname)
self.dbname = dbname
self.engine = make_engine(dbname)
self.conn = self.engine.connect()
kwargs = {}
if read_only:
kwargs = {'autoflush': True, 'autocommit': False}
def readonly_flush(*args, **kwargs):
return
self.session = sessionmaker(bind=self.engine, **kwargs)()
self.session.flush = readonly_flush
else:
self.session = sessionmaker(bind=self.engine, **kwargs)()
self.metadata = MetaData(self.engine)
self.metadata.reflect()
self.tables = self.metadata.tables
elems = self.tables['elements'].select().execute()
self.atomic_symbols = [e.element for e in elems.fetchall()]
def close(self):
"close session"
self.session.flush()
self.session.close()
def query(self, *args, **kws):
"generic query"
return self.session.query(*args, **kws)
def get_version(self, long=False, with_history=False):
"""
return sqlite3 database and python library version numbers
Parameters:
long (bool): show timestamp and notes of latest version [False]
with_history (bool): show complete version history [False]
Returns:
string: version information
"""
out = []
rows = self.tables['Version'].select().execute().fetchall()
if not with_history:
rows = rows[-1:]
if long or with_history:
for row in rows:
out.append("XrayDB Version: %s [%s] '%s'" % (row.tag,
row.date,
row.notes))
out.append("Python Version: %s" % __version__)
out = "\n".join(out)
else:
out = "XrayDB Version: %s, Python Version: %s" % (rows[0].tag,
__version__)
return out
def f0_ions(self, element=None):
"""
return list of ion names supported for the .f0() function.
Parameters:
element (string, int, pr None): atomic number, symbol, or ionic symbol
of scattering element.
Returns:
list: if element is None, all 211 ions are returned.
if element is not None, the ions for that element are returned
Example:
>>> xdb = XrayDB()
>>> xdb.f0_ions('Fe')
['Fe', 'Fe2+', 'Fe3+']
Notes:
Z values from 1 to 98 (and symbols 'H' to 'Cf') are supported.
References:
Waasmaier and Kirfel
"""
wtab = self.tables['Waasmaier']
rows = self.query(wtab)
if element is not None:
elem = self.symbol(element)
rows = rows.filter(wtab.c.element == elem)
return [str(r.ion) for r in rows.all()]
def f0(self, ion, q):
"""
return f0(q) -- elastic X-ray scattering factor from Waasmaier and Kirfel
Parameters:
ion (string, int, or None): atomic number, symbol or ionic symbol
of scattering element.
q (float, list, ndarray): value(s) of q for scattering factors
Returns:
ndarray: elastic scattering factors
Example:
>>> xdb = XrayDB()
>>> xdb.f0('Fe', range(10))
array([ 25.994603 , 6.55945765, 3.21048827, 1.65112769,
1.21133507, 1.0035555 , 0.81012185, 0.61900285,
0.43883403, 0.27673021])
Notes:
q = sin(theta) / lambda, where theta = incident angle,
and lambda = X-ray wavelength
Z values from 1 to 98 (and symbols 'H' to 'Cf') are supported.
The list of ionic symbols can be read with the function .f0_ions()
References:
Waasmaier and Kirfel
"""
wtab = self.tables['Waasmaier']
if isinstance(ion, int):
row = self.query(wtab).filter(wtab.c.atomic_number == ion).all()[0]
elif ion not in self.f0_ions():
raise ValueError('No ion {:s} from Waasmaier table'.format(repr(ion)))
else:
row = self.query(wtab).filter(wtab.c.ion == ion.title()).all()[0]
q = as_ndarray(q)
f0 = row.offset
for s, e in zip(json.loads(row.scale), json.loads(row.exponents)):
f0 += s * np.exp(-e*q*q)
return f0
def _from_chantler(self, element, energy, column='f1', smoothing=0):
"""
return energy-dependent data from Chantler table
Parameters:
element (string or int): atomic number or symbol.
eneregy (float or ndarray):
columns: f1, f2, mu_photo, mu_incoh, mu_total
Notes:
this function is meant for internal use.
"""
ctab = self.tables['Chantler']
elem = self.symbol(element)
row = self.query(ctab).filter(ctab.c.element == elem).one()
energy = as_ndarray(energy)
emin, emax = min(energy), max(energy)
te = np.array(json.loads(row.energy))
nemin = max(0, -3 + max(np.where(te <= emin)[0]))
nemax = min(len(te), 3 + max(np.where(te <= emax)[0]))
te = te[nemin:nemax+1]
if column == 'mu':
column = 'mu_total'
ty = np.array(json.loads(getattr(row, column)))[nemin:nemax+1]
if column == 'f1':
out = UnivariateSpline(te, ty, s=smoothing)(energy)
else:
out = np.exp(np.interp(np.log(energy),
np.log(te),
np.log(ty)))
if isinstance(out, np.ndarray) and len(out) == 1:
out = out[0]
return out
def chantler_energies(self, element, emin=0, emax=1.e9):
"""
return array of energies (in eV) at which data is
tabulated in the Chantler tables for a particular element.
Parameters:
element (string or int): atomic number or symbol
emin (float): minimum energy (in eV) [0]
emax (float): maximum energy (in eV) [1.e9]
Returns:
ndarray: energies
References:
Chantler
Notes:
returns 2 energies below emin and above emax to better
enable interpolation
"""
ctab = self.tables['Chantler']
elem = self.symbol(element)
row = self.query(ctab).filter(ctab.c.element == elem).one()
te = np.array(json.loads(row.energy))
if emin <= min(te):
nemin = 0
else:
nemin = max(0, -1 + max(np.where(te <= emin)[0]))
if emax > max(te):
nemax = len(te)
else:
nemax = min(len(te), 2 + max(np.where(te <= emax)[0]))
return te[nemin:nemax+1]
def f1_chantler(self, element, energy, **kws):
"""
returns f1 -- real part of anomalous X-ray scattering factor
for selected input energy (or energies) in eV.
Parameters:
element (string or int): atomic number or symbol
energy (float or ndarray): energies (in eV).
Returns:
ndarray: real part of anomalous scattering factor
References:
Chantler
"""
return self._from_chantler(element, energy, column='f1', **kws)
def f2_chantler(self, element, energy, **kws):
"""
returns f2 -- imaginary part of anomalous X-ray scattering factor
for selected input energy (or energies) in eV.
Parameters:
element (string or int): atomic number or symbol
energy (float or ndarray): energies (in eV).
Returns:
ndarray: imaginary part of anomalous scattering factor
References:
Chantler
"""
return self._from_chantler(element, energy, column='f2', **kws)
def mu_chantler(self, element, energy, incoh=False, photo=False):
"""
returns X-ray mass attenuation coefficient, mu/rho in cm^2/gr
for selected input energy (or energies) in eV.
default is to return total attenuation coefficient.
Parameters:
element (string or int): atomic number or symbol
energy (float or ndarray): energies (in eV).
photo (bool): return only the photo-electric contribution [False]
incoh (bool): return only the incoherent contribution [False]
Returns:
ndarray: mass attenuation coefficient in cm^2/gr
References:
Chantler
"""
col = 'mu_total'
if photo:
col = 'mu_photo'
elif incoh:
col = 'mu_incoh'
return self._from_chantler(element, energy, column=col)
def _elem_data(self, element):
"return data from elements table: internal use"
etab = self.tables['elements']
row = self.query(etab)
if isinstance(element, int):
row = row.filter(etab.c.atomic_number == element).one()
else:
elem = element.title()
if not elem in self.atomic_symbols:
raise ValueError("unknown element '%s'" % repr(elem))
row = row.filter(etab.c.element == elem).one()
return ElementData(int(row.atomic_number),
row.element.title(),
row.molar_mass, row.density)
def atomic_number(self, element):
"""
return element's atomic number
Parameters:
element (string or int): atomic number or symbol
Returns:
integer: atomic number
"""
return self._elem_data(element).Z
def symbol(self, element):
"""
return element symbol
Parameters:
element (string or int): atomic number or symbol
Returns:
string: element symbol
"""
return self._elem_data(element).symbol
def molar_mass(self, element):
"""
return molar mass of element
Parameters:
element (string or int): atomic number or symbol
Returns:
float: molar mass of element in amu
"""
return self._elem_data(element).mass
def density(self, element):
"""
return density of pure element
Parameters:
element (string or int): atomic number or symbol
Returns:
float: density of element in gr/cm^3
"""
return self._elem_data(element).density
def xray_edges(self, element):
"""
returns dictionary of X-ray absorption edge energy (in eV),
fluorescence yield, and jump ratio for an element.
Parameters:
element (string or int): atomic number or symbol
Returns:
dictionary: keys of edge (iupac symbol), and values of
XrayEdge namedtuple of (energy, fyield, edge_jump))
References:
Elam, Ravel, and Sieber.
"""
elem = self.symbol(element)
ltab = self.tables['xray_levels']
out = {}
for r in self.query(ltab).filter(ltab.c.element == elem).all():
out[str(r.iupac_symbol)] = XrayEdge(r.absorption_edge,
r.fluorescence_yield,
r.jump_ratio)
return out
def xray_edge(self, element, edge):
"""
returns XrayEdge for an element and edge
Parameters:
element (string or int): atomic number or symbol
edge (string): X-ray edge
Returns:
XrayEdge: namedtuple of (energy, fyield, edge_jump))
Example:
>>> xdb = XrayDB()
>>> xdb.xray_edge('Co', 'K')
XrayEdge(edge=7709.0, fyield=0.381903, jump_ratio=7.796)
References:
Elam, Ravel, and Sieber.
"""
return self.xray_edges(element).get(edge.title(), None)
def xray_lines(self, element, initial_level=None, excitation_energy=None):
"""
returns dictionary of X-ray emission lines of an element, with
Parameters:
initial_level (string or list/tuple of string): initial level(s) to
limit output.
excitation_energy (float): energy of excitation, limit output those
excited by X-rays of this energy (in eV).
Returns:
dictionary: keys of lines (Siegbahn symbol), values of Xray Lines
Notes:
if both excitation_energy and initial_level are given, excitation_level
will limit output
Example:
>>> xdb = XrayDB()
>>> for key, val in xdb.xray_lines('Ga', 'K').items():
>>> print(key, val)
'Ka3', XrayLine(energy=9068.0, intensity=0.000326203,
initial_level=u'K', final_level=u'L1')
'Ka2', XrayLine(energy=9223.8, intensity=0.294438,
initial_level=u'K', final_level=u'L2')
'Ka1', XrayLine(energy=9250.6, intensity=0.57501,
initial_level=u'K', final_level=u'L3')
'Kb3', XrayLine(energy=10263.5, intensity=0.0441511,
initial_level=u'K', final_level=u'M2')
'Kb1', XrayLine(energy=10267.0, intensity=0.0852337,
initial_level=u'K', final_level=u'M3')
'Kb5', XrayLine(energy=10348.3, intensity=0.000841354,
initial_level=u'K', final_level=u'M4,5')
References:
Elam, Ravel, and Sieber.
"""
elem = self.symbol(element)
ttab = self.tables['xray_transitions']
row = self.query(ttab).filter(ttab.c.element == elem)
if excitation_energy is not None:
initial_level = []
for ilevel, dat in self.xray_edges(elem).items():
if dat[0] < excitation_energy:
initial_level.append(ilevel.title())
if initial_level is not None:
if isinstance(initial_level, (list, tuple)):
row = row.filter(ttab.c.initial_level.in_(initial_level))
else:
row = row.filter(ttab.c.initial_level == initial_level.title())
out = {}
for r in row.all():
out[str(r.siegbahn_symbol)] = XrayLine(r.emission_energy, r.intensity,
r.initial_level, r.final_level)
return out
def xray_line_strengths(self, element, excitation_energy=None):
"""
return the absolute line strength in cm^2/gr for all available lines
Parameters:
element (string or int): Atomic symbol or number for element
excitation_energy (float): incident energy, in eV
Returns:
dictionary: elemental line with fluorescence cross section in cm2/gr.
References:
Elam, Ravel, and Sieber.
"""
out = {}
lines = self.xray_lines(element, excitation_energy=excitation_energy)
for label, eline in lines.items():
edge = self.xray_edge(element, eline.initial_level)
if edge is None and ',' in eline.initial_level:
ilevel, _ = eline.initial_level.split(',')
edge = self.xray_edge(element, ilevel)
if edge is not None:
mu = self.mu_elam(element, [edge.energy*(0.999),
edge.energy*(1.001)], kind='photo')
out[label] = (mu[1]-mu[0]) * eline.intensity * edge.fyield
return out
def ck_probability(self, element, initial, final, total=True):
"""
return Coster-Kronig transition probability for an element and
initial/final levels
Parameters:
element (string or int): Atomic symbol or number for element
initial (string): initial level
final (string): final level
total (bool): whether to return total or partial probability
Returns:
float: transition probability
Example:
>>> xdb = XrayDB()
>>> xdb.ck_probability('Cu', 'L1', 'L3', total=True)
0.681
References:
Elam, Ravel, and Sieber.
"""
elem = self.symbol(element)
ctab = self.tables['Coster_Kronig']
row = self.query(ctab).filter(ctab.c.element == elem)
row = row.filter(ctab.c.initial_level == initial.title())
row = row.filter(ctab.c.final_level == final.title()).all()
out = 0.0
if len(row) > 0:
row = row[0]
out = row.transition_probability
if total:
out = row.total_transition_probability
return out
def corehole_width(self, element, edge=None, use_keski=False):
"""
returns core hole width for an element and edge
Parameters:
element (string, integer): atomic number or symbol for element
edge (string or None): edge for hole, return all if None
use_keski (bool) : force use of KeskiRahkonen and Krause table for all data.
Returns:
float: corehole width in eV.
Notes:
Uses Krause and Oliver where data is available (K, L lines Z > 10)
Uses Keski-Rahkonen and Krause otherwise
References:
Krause and Oliver, 1979
Keski-Rahkonen and Krause, 1974
"""
version_qy = self.tables['Version'].select().order_by('date')
version_id = version_qy.execute().fetchall()[-1].id
ctab = self.tables['corelevel_widths']
if version_id < 4 or use_keski:
ctab = self.tables['KeskiRahkonen_Krause']
rows = self.query(ctab).filter(ctab.c.element == self.symbol(element))
if edge is not None:
rows = rows.filter(ctab.c.edge == edge.title())
result = rows.all()
if len(result) == 1:
result = result[0].width
else:
result = [(r.edge, r.width) for r in result]
return result
def cross_section_elam(self, element, energies, kind='photo'):
"""
returns Elam Cross Section values for an element and energies
Parameters:
element (string or int): atomic number or symbol for element
energies (float or ndarray): energies (in eV) to calculate cross-sections
kind (string): one of 'photo', 'coh', and 'incoh' for photo-absorption,
coherent scattering, and incoherent scattering cross sections,
respectively. Default is 'photo'.
Returns:
ndarray of scattering data
References:
Elam, Ravel, and Sieber.
"""
elem = self.symbol(element)
kind = kind.lower()
if kind not in ('coh', 'incoh', 'photo'):
raise ValueError('unknown cross section kind=%s' % kind)
stab = self.tables['scattering']
if kind == 'photo':
stab = self. tables['photoabsorption']
row = self.query(stab).filter(stab.c.element == elem).all()[0]
tab_lne = np.array(json.loads(row.log_energy))
if kind.startswith('coh'):
tab_val = np.array(json.loads(row.log_coherent_scatter))
tab_spl = np.array(json.loads(row.log_coherent_scatter_spline))
elif kind.startswith('incoh'):
tab_val = np.array(json.loads(row.log_incoherent_scatter))
tab_spl = np.array(json.loads(row.log_incoherent_scatter_spline))
else:
tab_val = np.array(json.loads(row.log_photoabsorption))
tab_spl = np.array(json.loads(row.log_photoabsorption_spline))
en = 1.0*as_ndarray(energies)
emin_tab = 10*int(0.102*np.exp(tab_lne[0]))
en[np.where(en < emin_tab)] = emin_tab
out = np.exp(elam_spline(tab_lne, tab_val, tab_spl, np.log(en)))
if len(out) == 1:
return out[0]
return out
def mu_elam(self, element, energies, kind='total'):
"""
returns attenuation cross section for an element at energies (in eV)
Parameters:
element (string or int): atomic number or symbol for element
energies (float or ndarray): energies (in eV) to calculate cross-sections
kind (string): one of 'photo' or 'total' for photo-electric or
total attenuation, respectively. Default is 'total'.
Returns:
ndarray of scattering values in units of cm^2/gr
References:
Elam, Ravel, and Sieber.
"""
calc = self.cross_section_elam
kind = kind.lower()
if kind.startswith('tot'):
xsec = calc(element, energies, kind='photo')
xsec += calc(element, energies, kind='coh')
xsec += calc(element, energies, kind='incoh')
elif kind.startswith('photo'):
xsec = calc(element, energies, kind='photo')
elif kind.lower().startswith('coh'):
xsec = calc(element, energies, kind='coh')
elif kind.lower().startswith('incoh'):
xsec = calc(element, energies, kind='incoh')
else:
raise ValueError('unknown cross section kind=%s' % kind)
return xsec
def coherent_cross_section_elam(self, element, energies):
"""returns coherenet scattering crossrxr section for an element
at energies (in eV)
returns values in units of cm^2 / gr
arguments
---------
element: atomic number, atomic symbol for element
energies: energies in eV to calculate cross-sections
Data from Elam, Ravel, and Sieber.
"""
return self.cross_section_elam(element, energies, kind='coh')
def incoherent_cross_section_elam(self, element, energies):
"""returns incoherenet scattering cross section for an element
at energies (in eV)
returns values in units of cm^2 / gr
arguments
---------
element: atomic number, atomic symbol for element
energies: energies in eV to calculate cross-sections
Data from Elam, Ravel, and Sieber.
"""
return self.cross_section_elam(element, energies, kind='incoh')
| [
"[email protected]"
] | |
1dcaa9207f2ccf6e23d755d436896b1aef624ac1 | a170461845f5b240daf2090810b4be706191f837 | /pyqt/DemoFullCode-PythonQt/chap12QtChart/Demo12_2ChartConfig/myDialogPen.py | 4cdbf17519ebcd8973fd4577bfea498efc83ca6b | [] | no_license | longhuarst/QTDemo | ec3873f85434c61cd2a8af7e568570d62c2e6da8 | 34f87f4b2337a140122b7c38937ab4fcf5f10575 | refs/heads/master | 2022-04-25T10:59:54.434587 | 2020-04-26T16:55:29 | 2020-04-26T16:55:29 | 259,048,398 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,026 | py | # -*- coding: utf-8 -*-
import sys
from PyQt5.QtWidgets import QApplication, QDialog,QColorDialog
from PyQt5.QtCore import pyqtSlot,Qt
##from PyQt5.QtWidgets import
from PyQt5.QtGui import QPen, QPalette,QColor
from ui_QWDialogPen import Ui_QWDialogPen
class QmyDialogPen(QDialog):
def __init__(self, parent=None):
super().__init__(parent)
self.ui=Ui_QWDialogPen()
self.ui.setupUi(self) #构造UI界面
self.__pen=QPen()
##“线型”ComboBox的选择项设置
self.ui.comboPenStyle.clear()
self.ui.comboPenStyle.addItem("NoPen",0)
self.ui.comboPenStyle.addItem("SolidLine",1)
self.ui.comboPenStyle.addItem("DashLine",2)
self.ui.comboPenStyle.addItem("DotLine",3)
self.ui.comboPenStyle.addItem("DashDotLine",4)
self.ui.comboPenStyle.addItem("DashDotDotLine",5)
self.ui.comboPenStyle.addItem("CustomDashLine",6)
self.ui.comboPenStyle.setCurrentIndex(1)
##=================自定义接口函数====================
def setPen(self,pen): ##设置pen
self.__pen=pen
self.ui.spinWidth.setValue(pen.width()) #线宽
i=int(pen.style()) #枚举类型转换为整型
self.ui.comboPenStyle.setCurrentIndex(i)
color=pen.color() #QColor
## self.ui.btnColor.setAutoFillBackground(True)
qss="background-color: rgb(%d, %d, %d)"%(
color.red(),color.green(),color.blue())
self.ui.btnColor.setStyleSheet(qss) #使用样式表设置按钮背景色
def getPen(self): ##返回pen
index=self.ui.comboPenStyle.currentIndex()
self.__pen.setStyle(Qt.PenStyle(index)) #线型
self.__pen.setWidth(self.ui.spinWidth.value()) #线宽
color=self.ui.btnColor.palette().color(QPalette.Button)
self.__pen.setColor(color) #颜色
return self.__pen
@staticmethod ##类函数,或静态函数
def staticGetPen(iniPen):
# 不能有参数self,不能与类的成员函数同名,也就是不能命名为getPen()
Dlg=QmyDialogPen() #创建一个对话框
Dlg.setPen(iniPen) #设置初始化QPen
pen=iniPen
ok=False
ret=Dlg.exec() #模态显示对话框
if ret==QDialog.Accepted:
pen=Dlg.getPen() #获取pen
ok=True
return pen ,ok #返回设置的QPen对象
## ==========由connectSlotsByName()自动连接的槽函数============
@pyqtSlot() ##选择颜色
def on_btnColor_clicked(self):
color=QColorDialog.getColor()
if color.isValid(): #用样式表设置QPushButton的背景色
qss="background-color: rgb(%d, %d, %d);"%(
color.red(),color.green(),color.blue())
self.ui.btnColor.setStyleSheet(qss)
## ============窗体测试程序 ================================
if __name__ == "__main__":
app = QApplication(sys.argv)
iniPen=QPen(Qt.blue)
pen=QmyDialogPen.staticGetPen(iniPen) #测试类函数调用
sys.exit(app.exec_())
| [
"[email protected]"
] | |
449a4e9073d7775f05349340826f0d6e53ce9997 | 19da1a56f137a08772c347cf974be54e9c23c053 | /lib/adafruit_motor/servo.py | 0c46abd369009f496e2dd3f194a68ec1901f43f5 | [] | no_license | mk53202/mk53202-timeclock-pyportal | d94f45a9d186190a4bc6130077baa6743a816ef3 | 230a858d429f8197c00cab3e67dcfd3b295ffbe0 | refs/heads/master | 2021-02-04T05:38:25.533292 | 2020-02-27T22:45:56 | 2020-02-27T22:45:56 | 243,626,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,061 | py | # The MIT License (MIT)
#
# Copyright (c) 2017 Scott Shawcroft for Adafruit Industries LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_motor.servo`
====================================================
Servos are motor based actuators that incorporate a feedback loop into the design. These feedback
loops enable pulse width modulated control to determine position or rotational speed.
* Author(s): Scott Shawcroft
"""
__version__ = "2.0.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_Motor.git"
# We disable the too few public methods check because this is a private base class for the two types
# of servos.
class _BaseServo: # pylint: disable-msg=too-few-public-methods
"""Shared base class that handles pulse output based on a value between 0 and 1.0
:param ~pulseio.PWMOut pwm_out: PWM output object.
:param int min_pulse: The minimum pulse length of the servo in microseconds.
:param int max_pulse: The maximum pulse length of the servo in microseconds."""
def __init__(self, pwm_out, *, min_pulse=750, max_pulse=2250):
self._pwm_out = pwm_out
self.set_pulse_width_range(min_pulse, max_pulse)
def set_pulse_width_range(self, min_pulse=750, max_pulse=2250):
"""Change min and max pulse widths."""
self._min_duty = int((min_pulse * self._pwm_out.frequency) / 1000000 * 0xffff)
max_duty = (max_pulse * self._pwm_out.frequency) / 1000000 * 0xffff
self._duty_range = int(max_duty - self._min_duty)
@property
def fraction(self):
"""Pulse width expressed as fraction between 0.0 (`min_pulse`) and 1.0 (`max_pulse`).
For conventional servos, corresponds to the servo position as a fraction
of the actuation range. Is None when servo is diabled (pulsewidth of 0ms).
"""
if self._pwm_out.duty_cycle == 0: # Special case for disabled servos
return None
return (self._pwm_out.duty_cycle - self._min_duty) / self._duty_range
@fraction.setter
def fraction(self, value):
if value is None:
self._pwm_out.duty_cycle = 0 # disable the motor
return
if not 0.0 <= value <= 1.0:
raise ValueError("Must be 0.0 to 1.0")
duty_cycle = self._min_duty + int(value * self._duty_range)
self._pwm_out.duty_cycle = duty_cycle
class Servo(_BaseServo):
"""Control the position of a servo.
:param ~pulseio.PWMOut pwm_out: PWM output object.
:param int actuation_range: The physical range of motion of the servo in degrees, \
for the given ``min_pulse`` and ``max_pulse`` values.
:param int min_pulse: The minimum pulse width of the servo in microseconds.
:param int max_pulse: The maximum pulse width of the servo in microseconds.
``actuation_range`` is an exposed property and can be changed at any time:
.. code-block:: python
servo = Servo(pwm)
servo.actuation_range = 135
The specified pulse width range of a servo has historically been 1000-2000us,
for a 90 degree range of motion. But nearly all modern servos have a 170-180
degree range, and the pulse widths can go well out of the range to achieve this
extended motion. The default values here of ``750`` and ``2250`` typically give
135 degrees of motion. You can set ``actuation_range`` to correspond to the
actual range of motion you observe with your given ``min_pulse`` and ``max_pulse``
values.
.. warning:: You can extend the pulse width above and below these limits to
get a wider range of movement. But if you go too low or too high,
the servo mechanism may hit the end stops, buzz, and draw extra current as it stalls.
Test carefully to find the safe minimum and maximum.
"""
def __init__(self, pwm_out, *, actuation_range=180, min_pulse=750, max_pulse=2250):
super().__init__(pwm_out, min_pulse=min_pulse, max_pulse=max_pulse)
self.actuation_range = actuation_range
"""The physical range of motion of the servo in degrees."""
self._pwm = pwm_out
@property
def angle(self):
"""The servo angle in degrees. Must be in the range ``0`` to ``actuation_range``.
Is None when servo is disabled."""
if self.fraction is None: # special case for disabled servos
return None
return self.actuation_range * self.fraction
@angle.setter
def angle(self, new_angle):
if new_angle is None: # disable the servo by sending 0 signal
self.fraction = None
return
if new_angle < 0 or new_angle > self.actuation_range:
raise ValueError("Angle out of range")
self.fraction = new_angle / self.actuation_range
class ContinuousServo(_BaseServo):
"""Control a continuous rotation servo.
:param int min_pulse: The minimum pulse width of the servo in microseconds.
:param int max_pulse: The maximum pulse width of the servo in microseconds."""
@property
def throttle(self):
"""How much power is being delivered to the motor. Values range from ``-1.0`` (full
throttle reverse) to ``1.0`` (full throttle forwards.) ``0`` will stop the motor from
spinning."""
return self.fraction * 2 - 1
@throttle.setter
def throttle(self, value):
if value > 1.0 or value < -1.0:
raise ValueError("Throttle must be between -1.0 and 1.0")
if value is None:
raise ValueError("Continuous servos cannot spin freely")
self.fraction = (value + 1) / 2
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.throttle = 0
def deinit(self):
"""Stop using the servo."""
self.throttle = 0
| [
"[email protected]"
] | |
f0202a4f34472c4c3be1f395aaae592e9ea9f454 | 7d9f92fba6af53bd385e0b4173134241c9998fff | /items/admin.py | 418f8f0f830fcebdebd5feddc8bd7ec707691ed5 | [] | no_license | ljarufe/intifil | 856f77c6ece7f444fd331a3eff3c35260201f78f | d478a8a1309d526a2508ca7b559e16de03aaa384 | refs/heads/master | 2021-01-02T09:09:13.613026 | 2013-10-21T17:00:03 | 2013-10-21T17:00:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,138 | py | # -*- coding: utf-8 -*-
from django.contrib import admin
from modeltranslation.admin import TranslationAdmin
from common.admin import BasePermissionAdmin
from items.models import Category, Photo, HomePhoto, Item, Video, SubItem
class CategoryAdmin(BasePermissionAdmin, TranslationAdmin):
"""
Category model admin
"""
list_display = ("name", "slug",)
class PhotoInLine(admin.TabularInline):
"""
Photo inline model admin
"""
model = Photo
exclude = ("name",)
class VideoInLine(admin.TabularInline):
"""
Video inline model admin
"""
model = Video
exclude = ("name",)
class HomePhotoAdmin(admin.ModelAdmin):
"""
Home photo model admin
"""
list_display = ("get_item", "get_shape_display",)
class ItemAdmin(TranslationAdmin):
"""
Item model admin
"""
list_display = ("name", "category", "order",)
list_display_links = ("name", "category")
list_editable = ('order', )
list_filter = ("category",)
exclude = ('order',)
def save_model(self, request, obj, form, change):
"""
Guarda un nuevo item de la página de inicio con el orden por defecto
al final de la lista
"""
if not change:
if form.cleaned_data["home_photo"]:
obj.order = Item.get_default_order()
obj.save()
class SubItemAdmin(TranslationAdmin):
"""
Subitem model admin
"""
list_display = ("name", "item", "order",)
list_display_links = ("name", "item")
list_editable = ('order', )
list_filter = ("item",)
inlines = [PhotoInLine, VideoInLine,]
class PhotoVideoAdmin(TranslationAdmin):
"""
Photo and video model admin
"""
list_display = ("name", "subitem", "order",)
list_display_links = ("name", "subitem")
list_editable = ('order', )
list_filter = ("subitem",)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Photo, PhotoVideoAdmin)
admin.site.register(HomePhoto, HomePhotoAdmin)
admin.site.register(Item, ItemAdmin)
admin.site.register(SubItem, SubItemAdmin)
admin.site.register(Video, PhotoVideoAdmin)
| [
"[email protected]"
] | |
e07ad01c23c45836b064759f00be7e07f68f04e8 | f04a36fdaa415c6a47d3727e783b2dce11e3dd43 | /blog/views.py | 8ae814785c3273121fdfa345ef1043693a0d0a73 | [
"BSD-3-Clause"
] | permissive | hellprise/cook_blog | e9486452cc53a1300fce5ea9ea54dbe5c0408bf0 | d55734af1625256f940e55d267beb38d911bfda4 | refs/heads/main | 2023-06-25T21:43:20.284389 | 2021-07-28T14:36:45 | 2021-07-28T14:36:45 | 390,378,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | from django.shortcuts import render
from django.views.generic import ListView, DetailView
from blog.models import Post
class PostListView(ListView):
model = Post
def get_queryset(self):
return Post.objects.filter(category__slug=self.kwargs.get('slug')).select_related('category')
class PostDetailView(DetailView):
model = Post
context_object_name = 'post'
slug_url_kwarg = 'post_slug'
def home(request):
return render(request, 'base.html')
| [
"[email protected]"
] | |
89b5ffc396c40540df2fb3de8ea43fa4e9444552 | 0a74f7afa97a0d31219fdf68b85d0733ef04caf3 | /python/pyspark/errors/error_classes.py | e87d37c63e77b262c0d228f0bff9b092a034d35e | [
"CC0-1.0",
"MIT",
"Python-2.0",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference",
"CDDL-1.1",
"BSD-2-Clause",
"EPL-2.0",
"CDDL-1.0",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-free-unknown",
"EPL-1.0",
"Classpath-exception-2.0",
"GCC-exception-3.1",
"CC-BY-SA-3.0",
"LGPL-2.1-only",
"LicenseRef-scancode-unicode",
"CPL-1.0",
"LicenseRef-scancode-other-permissive",
"GPL-2.0-only",
"CC-PDDC",
"NAIST-2003",
"LicenseRef-scancode-other-copyleft"
] | permissive | viirya/spark-1 | 37ad4643b3b31c2a3dfc3d5d55eb2aefa112d5d0 | 37aa62f629e652ed70505620473530cd9611018e | refs/heads/master | 2023-08-31T20:14:55.834184 | 2023-07-11T16:08:21 | 2023-07-11T16:08:21 | 21,467,907 | 1 | 3 | Apache-2.0 | 2023-07-12T01:21:50 | 2014-07-03T15:40:08 | Scala | UTF-8 | Python | false | false | 22,184 | py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
ERROR_CLASSES_JSON = """
{
"APPLICATION_NAME_NOT_SET" : {
"message" : [
"An application name must be set in your configuration."
]
},
"ARGUMENT_REQUIRED": {
"message": [
"Argument `<arg_name>` is required when <condition>."
]
},
"ATTRIBUTE_NOT_CALLABLE" : {
"message" : [
"Attribute `<attr_name>` in provided object `<obj_name>` is not callable."
]
},
"ATTRIBUTE_NOT_SUPPORTED" : {
"message" : [
"Attribute `<attr_name>` is not supported."
]
},
"AXIS_LENGTH_MISMATCH" : {
"message" : [
"Length mismatch: Expected axis has <expected_length> element, new values have <actual_length> elements."
]
},
"BROADCAST_VARIABLE_NOT_LOADED": {
"message": [
"Broadcast variable `<variable>` not loaded."
]
},
"CALL_BEFORE_INITIALIZE": {
"message": [
"Not supported to call `<func_name>` before initialize <object>."
]
},
"CANNOT_ACCEPT_OBJECT_IN_TYPE": {
"message": [
"`<data_type>` can not accept object `<obj_name>` in type `<obj_type>`."
]
},
"CANNOT_ACCESS_TO_DUNDER": {
"message": [
"Dunder(double underscore) attribute is for internal use only."
]
},
"CANNOT_APPLY_IN_FOR_COLUMN": {
"message": [
"Cannot apply 'in' operator against a column: please use 'contains' in a string column or 'array_contains' function for an array column."
]
},
"CANNOT_BE_EMPTY": {
"message": [
"At least one <item> must be specified."
]
},
"CANNOT_BE_NONE": {
"message": [
"Argument `<arg_name>` can not be None."
]
},
"CANNOT_CONVERT_COLUMN_INTO_BOOL": {
"message": [
"Cannot convert column into bool: please use '&' for 'and', '|' for 'or', '~' for 'not' when building DataFrame boolean expressions."
]
},
"CANNOT_CONVERT_TYPE": {
"message": [
"Cannot convert <from_type> into <to_type>."
]
},
"CANNOT_DETERMINE_TYPE": {
"message": [
"Some of types cannot be determined after inferring."
]
},
"CANNOT_GET_BATCH_ID": {
"message": [
"Could not get batch id from <obj_name>."
]
},
"CANNOT_INFER_ARRAY_TYPE": {
"message": [
"Can not infer Array Type from an list with None as the first element."
]
},
"CANNOT_INFER_EMPTY_SCHEMA": {
"message": [
"Can not infer schema from empty dataset."
]
},
"CANNOT_INFER_SCHEMA_FOR_TYPE": {
"message": [
"Can not infer schema for type: `<data_type>`."
]
},
"CANNOT_INFER_TYPE_FOR_FIELD": {
"message": [
"Unable to infer the type of the field `<field_name>`."
]
},
"CANNOT_MERGE_TYPE": {
"message": [
"Can not merge type `<data_type1>` and `<data_type2>`."
]
},
"CANNOT_OPEN_SOCKET": {
"message": [
"Can not open socket: <errors>."
]
},
"CANNOT_PARSE_DATATYPE": {
"message": [
"Unable to parse datatype. <msg>."
]
},
"CANNOT_PROVIDE_METADATA": {
"message": [
"metadata can only be provided for a single column."
]
},
"CANNOT_SET_TOGETHER": {
"message": [
"<arg_list> should not be set together."
]
},
"CANNOT_SPECIFY_RETURN_TYPE_FOR_UDF": {
"message": [
"returnType can not be specified when `<arg_name>` is a user-defined function, but got <return_type>."
]
},
"COLUMN_IN_LIST": {
"message": [
"`<func_name>` does not allow a Column in a list."
]
},
"CONTEXT_ONLY_VALID_ON_DRIVER" : {
"message" : [
"It appears that you are attempting to reference SparkContext from a broadcast variable, action, or transformation. SparkContext can only be used on the driver, not in code that it run on workers. For more information, see SPARK-5063."
]
},
"CONTEXT_UNAVAILABLE_FOR_REMOTE_CLIENT" : {
"message" : [
"Remote client cannot create a SparkContext. Create SparkSession instead."
]
},
"DIFFERENT_ROWS" : {
"message" : [
"<error_msg>"
]
},
"DIFFERENT_SCHEMA" : {
"message" : [
"Schemas do not match:",
"df schema: <df_schema>",
"expected schema: <expected_schema>"
]
},
"DISALLOWED_TYPE_FOR_CONTAINER" : {
"message" : [
"Argument `<arg_name>`(type: <arg_type>) should only contain a type in [<allowed_types>], got <return_type>"
]
},
"DUPLICATED_FIELD_NAME_IN_ARROW_STRUCT" : {
"message" : [
"Duplicated field names in Arrow Struct are not allowed, got <field_names>"
]
},
"EXCEED_RETRY" : {
"message" : [
"Retries exceeded but no exception caught."
]
},
"HIGHER_ORDER_FUNCTION_SHOULD_RETURN_COLUMN" : {
"message" : [
"Function `<func_name>` should return Column, got <return_type>."
]
},
"INCORRECT_CONF_FOR_PROFILE" : {
"message" : [
"`spark.python.profile` or `spark.python.profile.memory` configuration",
" must be set to `true` to enable Python profile."
]
},
"INVALID_BROADCAST_OPERATION": {
"message": [
"Broadcast can only be <operation> in driver."
]
},
"INVALID_CALL_ON_UNRESOLVED_OBJECT": {
"message": [
"Invalid call to `<func_name>` on unresolved object."
]
},
"INVALID_CONNECT_URL" : {
"message" : [
"Invalid URL for Spark Connect: <detail>"
]
},
"INVALID_ITEM_FOR_CONTAINER": {
"message": [
"All items in `<arg_name>` should be in <allowed_types>, got <item_type>."
]
},
"INVALID_NDARRAY_DIMENSION": {
"message": [
"NumPy array input should be of <dimensions> dimensions."
]
},
"INVALID_PANDAS_UDF" : {
"message" : [
"Invalid function: <detail>"
]
},
"INVALID_PANDAS_UDF_TYPE" : {
"message" : [
"`<arg_name>` should be one the values from PandasUDFType, got <arg_type>"
]
},
"INVALID_RETURN_TYPE_FOR_PANDAS_UDF": {
"message": [
"Pandas UDF should return StructType for <eval_type>, got <return_type>."
]
},
"INVALID_TIMEOUT_TIMESTAMP" : {
"message" : [
"Timeout timestamp (<timestamp>) cannot be earlier than the current watermark (<watermark>)."
]
},
"INVALID_TYPE" : {
"message" : [
"Argument `<arg_name>` should not be a <data_type>."
]
},
"INVALID_TYPENAME_CALL" : {
"message" : [
"StructField does not have typeName. Use typeName on its type explicitly instead."
]
},
"INVALID_UDF_EVAL_TYPE" : {
"message" : [
"Eval type for UDF must be <eval_type>."
]
},
"INVALID_WHEN_USAGE": {
"message": [
"when() can only be applied on a Column previously generated by when() function, and cannot be applied once otherwise() is applied."
]
},
"INVALID_WINDOW_BOUND_TYPE" : {
"message" : [
"Invalid window bound type: <window_bound_type>."
]
},
"JAVA_GATEWAY_EXITED" : {
"message" : [
"Java gateway process exited before sending its port number."
]
},
"JVM_ATTRIBUTE_NOT_SUPPORTED" : {
"message" : [
"Attribute `<attr_name>` is not supported in Spark Connect as it depends on the JVM. If you need to use this attribute, do not use Spark Connect when creating your session."
]
},
"KEY_VALUE_PAIR_REQUIRED" : {
"message" : [
"Key-value pair or a list of pairs is required."
]
},
"LENGTH_SHOULD_BE_THE_SAME" : {
"message" : [
"<arg1> and <arg2> should be of the same length, got <arg1_length> and <arg2_length>."
]
},
"MASTER_URL_NOT_SET" : {
"message" : [
"A master URL must be set in your configuration."
]
},
"MISSING_LIBRARY_FOR_PROFILER" : {
"message" : [
"Install the 'memory_profiler' library in the cluster to enable memory profiling."
]
},
"MISSING_VALID_PLAN" : {
"message" : [
"Argument to <operator> does not contain a valid plan."
]
},
"MIXED_TYPE_REPLACEMENT" : {
"message" : [
"Mixed type replacements are not supported."
]
},
"NEGATIVE_VALUE" : {
"message" : [
"Value for `<arg_name>` must be greater than or equal to 0, got '<arg_value>'."
]
},
"NOT_BOOL" : {
"message" : [
"Argument `<arg_name>` should be a bool, got <arg_type>."
]
},
"NOT_BOOL_OR_DICT_OR_FLOAT_OR_INT_OR_LIST_OR_STR_OR_TUPLE" : {
"message" : [
"Argument `<arg_name>` should be a bool, dict, float, int, str or tuple, got <arg_type>."
]
},
"NOT_BOOL_OR_DICT_OR_FLOAT_OR_INT_OR_STR" : {
"message" : [
"Argument `<arg_name>` should be a bool, dict, float, int or str, got <arg_type>."
]
},
"NOT_BOOL_OR_FLOAT_OR_INT" : {
"message" : [
"Argument `<arg_name>` should be a bool, float or str, got <arg_type>."
]
},
"NOT_BOOL_OR_FLOAT_OR_INT_OR_LIST_OR_NONE_OR_STR_OR_TUPLE" : {
"message" : [
"Argument `<arg_name>` should be a bool, float, int, list, None, str or tuple, got <arg_type>."
]
},
"NOT_BOOL_OR_FLOAT_OR_INT_OR_STR" : {
"message" : [
"Argument `<arg_name>` should be a bool, float, int or str, got <arg_type>."
]
},
"NOT_BOOL_OR_LIST" : {
"message" : [
"Argument `<arg_name>` should be a bool or list, got <arg_type>."
]
},
"NOT_BOOL_OR_STR" : {
"message" : [
"Argument `<arg_name>` should be a bool or str, got <arg_type>."
]
},
"NOT_CALLABLE" : {
"message" : [
"Argument `<arg_name>` should be a callable, got <arg_type>."
]
},
"NOT_COLUMN" : {
"message" : [
"Argument `<arg_name>` should be a Column, got <arg_type>."
]
},
"NOT_COLUMN_OR_DATATYPE_OR_STR" : {
"message" : [
"Argument `<arg_name>` should be a Column, str or DataType, but got <arg_type>."
]
},
"NOT_COLUMN_OR_FLOAT_OR_INT_OR_LIST_OR_STR" : {
"message" : [
"Argument `<arg_name>` should be a column, float, integer, list or string, got <arg_type>."
]
},
"NOT_COLUMN_OR_INT" : {
"message" : [
"Argument `<arg_name>` should be a Column or int, got <arg_type>."
]
},
"NOT_COLUMN_OR_INT_OR_LIST_OR_STR_OR_TUPLE" : {
"message" : [
"Argument `<arg_name>` should be a Column, int, list, str or tuple, got <arg_type>."
]
},
"NOT_COLUMN_OR_INT_OR_STR" : {
"message" : [
"Argument `<arg_name>` should be a Column, int or str, got <arg_type>."
]
},
"NOT_COLUMN_OR_LIST_OR_STR" : {
"message" : [
"Argument `<arg_name>` should be a Column, list or str, got <arg_type>."
]
},
"NOT_COLUMN_OR_STR" : {
"message" : [
"Argument `<arg_name>` should be a Column or str, got <arg_type>."
]
},
"NOT_DATAFRAME" : {
"message" : [
"Argument `<arg_name>` should be a DataFrame, got <arg_type>."
]
},
"NOT_DATATYPE_OR_STR" : {
"message" : [
"Argument `<arg_name>` should be a DataType or str, got <arg_type>."
]
},
"NOT_DICT" : {
"message" : [
"Argument `<arg_name>` should be a dict, got <arg_type>."
]
},
"NOT_EXPRESSION" : {
"message" : [
"Argument `<arg_name>` should be a Expression, got <arg_type>."
]
},
"NOT_FLOAT_OR_INT" : {
"message" : [
"Argument `<arg_name>` should be a float or int, got <arg_type>."
]
},
"NOT_FLOAT_OR_INT_OR_LIST_OR_STR" : {
"message" : [
"Argument `<arg_name>` should be a float, int, list or str, got <arg_type>."
]
},
"NOT_IMPLEMENTED" : {
"message" : [
"<feature> is not implemented."
]
},
"NOT_INSTANCE_OF" : {
"message" : [
"<value> is not an instance of type <data_type>."
]
},
"NOT_INT" : {
"message" : [
"Argument `<arg_name>` should be an int, got <arg_type>."
]
},
"NOT_INT_OR_SLICE_OR_STR" : {
"message" : [
"Argument `<arg_name>` should be an int, slice or str, got <arg_type>."
]
},
"NOT_IN_BARRIER_STAGE" : {
"message" : [
"It is not in a barrier stage."
]
},
"NOT_ITERABLE" : {
"message" : [
"<objectName> is not iterable."
]
},
"NOT_LIST" : {
"message" : [
"Argument `<arg_name>` should be a list, got <arg_type>."
]
},
"NOT_LIST_OF_COLUMN" : {
"message" : [
"Argument `<arg_name>` should be a list[Column]."
]
},
"NOT_LIST_OF_COLUMN_OR_STR" : {
"message" : [
"Argument `<arg_name>` should be a list[Column]."
]
},
"NOT_LIST_OF_FLOAT_OR_INT" : {
"message" : [
"Argument `<arg_name>` should be a list[float, int], got <arg_type>."
]
},
"NOT_LIST_OF_STR" : {
"message" : [
"Argument `<arg_name>` should be a list[str], got <arg_type>."
]
},
"NOT_LIST_OR_NONE_OR_STRUCT" : {
"message" : [
"Argument `<arg_name>` should be a list, None or StructType, got <arg_type>."
]
},
"NOT_LIST_OR_STR_OR_TUPLE" : {
"message" : [
"Argument `<arg_name>` should be a list, str or tuple, got <arg_type>."
]
},
"NOT_LIST_OR_TUPLE" : {
"message" : [
"Argument `<arg_name>` should be a list or tuple, got <arg_type>."
]
},
"NOT_NUMERIC_COLUMNS" : {
"message" : [
"Numeric aggregation function can only be applied on numeric columns, got <invalid_columns>."
]
},
"NOT_OBSERVATION_OR_STR" : {
"message" : [
"Argument `<arg_name>` should be a Observation or str, got <arg_type>."
]
},
"NOT_SAME_TYPE" : {
"message" : [
"Argument `<arg_name1>` and `<arg_name2>` should be the same type, got <arg_type1> and <arg_type2>."
]
},
"NOT_STR" : {
"message" : [
"Argument `<arg_name>` should be a str, got <arg_type>."
]
},
"NOT_STR_OR_LIST_OF_RDD" : {
"message" : [
"Argument `<arg_name>` should be a str or list[RDD], got <arg_type>."
]
},
"NOT_STR_OR_STRUCT" : {
"message" : [
"Argument `<arg_name>` should be a str or structType, got <arg_type>."
]
},
"NOT_WINDOWSPEC" : {
"message" : [
"Argument `<arg_name>` should be a WindowSpec, got <arg_type>."
]
},
"NO_ACTIVE_SESSION" : {
"message" : [
"No active Spark session found. Please create a new Spark session before running the code."
]
},
"ONLY_ALLOWED_FOR_SINGLE_COLUMN" : {
"message" : [
"Argument `<arg_name>` can only be provided for a single column."
]
},
"ONLY_ALLOW_SINGLE_TRIGGER" : {
"message" : [
"Only a single trigger is allowed."
]
},
"PIPE_FUNCTION_EXITED" : {
"message" : [
"Pipe function `<func_name>` exited with error code <error_code>."
]
},
"PYTHON_HASH_SEED_NOT_SET" : {
"message" : [
"Randomness of hash of string should be disabled via PYTHONHASHSEED."
]
},
"PYTHON_VERSION_MISMATCH" : {
"message" : [
"Python in worker has different version <worker_version> than that in driver <driver_version>, PySpark cannot run with different minor versions.",
"Please check environment variables PYSPARK_PYTHON and PYSPARK_DRIVER_PYTHON are correctly set."
]
},
"RDD_TRANSFORM_ONLY_VALID_ON_DRIVER" : {
"message" : [
"It appears that you are attempting to broadcast an RDD or reference an RDD from an ",
"action or transformation. RDD transformations and actions can only be invoked by the ",
"driver, not inside of other transformations; for example, ",
"rdd1.map(lambda x: rdd2.values.count() * x) is invalid because the values ",
"transformation and count action cannot be performed inside of the rdd1.map ",
"transformation. For more information, see SPARK-5063."
]
},
"RESULT_COLUMNS_MISMATCH_FOR_PANDAS_UDF" : {
"message" : [
"Column names of the returned pandas.DataFrame do not match specified schema.<missing><extra>"
]
},
"RESULT_LENGTH_MISMATCH_FOR_PANDAS_UDF" : {
"message" : [
"Number of columns of the returned pandas.DataFrame doesn't match specified schema. Expected: <expected> Actual: <actual>"
]
},
"RESULT_LENGTH_MISMATCH_FOR_SCALAR_ITER_PANDAS_UDF" : {
"message" : [
"The length of output in Scalar iterator pandas UDF should be the same with the input's; however, the length of output was <output_length> and the length of input was <input_length>."
]
},
"SCHEMA_MISMATCH_FOR_PANDAS_UDF" : {
"message" : [
"Result vector from pandas_udf was not the required length: expected <expected>, got <actual>."
]
},
"SESSION_ALREADY_EXIST" : {
"message" : [
"Cannot start a remote Spark session because there is a regular Spark session already running."
]
},
"SESSION_NOT_SAME" : {
"message" : [
"Both Datasets must belong to the same SparkSession."
]
},
"SESSION_OR_CONTEXT_EXISTS" : {
"message" : [
"There should not be an existing Spark Session or Spark Context."
]
},
"SHOULD_NOT_DATAFRAME": {
"message": [
"Argument `<arg_name>` should not be a DataFrame."
]
},
"SLICE_WITH_STEP" : {
"message" : [
"Slice with step is not supported."
]
},
"STATE_NOT_EXISTS" : {
"message" : [
"State is either not defined or has already been removed."
]
},
"STOP_ITERATION_OCCURRED" : {
"message" : [
"Caught StopIteration thrown from user's code; failing the task: <exc>"
]
},
"STOP_ITERATION_OCCURRED_FROM_SCALAR_ITER_PANDAS_UDF" : {
"message" : [
"pandas iterator UDF should exhaust the input iterator."
]
},
"TOO_MANY_VALUES" : {
"message" : [
"Expected <expected> values for `<item>`, got <actual>."
]
},
"UNEXPECTED_RESPONSE_FROM_SERVER" : {
"message" : [
"Unexpected response from iterator server."
]
},
"UNEXPECTED_TUPLE_WITH_STRUCT" : {
"message" : [
"Unexpected tuple <tuple> with StructType."
]
},
"UNKNOWN_EXPLAIN_MODE" : {
"message" : [
"Unknown explain mode: '<explain_mode>'. Accepted explain modes are 'simple', 'extended', 'codegen', 'cost', 'formatted'."
]
},
"UNKNOWN_INTERRUPT_TYPE" : {
"message" : [
"Unknown interrupt type: '<interrupt_type>'. Accepted interrupt types are 'all'."
]
},
"UNKNOWN_RESPONSE" : {
"message" : [
"Unknown response: <response>."
]
},
"UNSUPPORTED_DATA_TYPE" : {
"message" : [
"Unsupported DataType `<data_type>`."
]
},
"UNSUPPORTED_DATA_TYPE_FOR_ARROW" : {
"message" : [
"Single data type <data_type> is not supported with Arrow."
]
},
"UNSUPPORTED_DATA_TYPE_FOR_ARROW_CONVERSION" : {
"message" : [
"<data_type> is not supported in conversion to Arrow."
]
},
"UNSUPPORTED_DATA_TYPE_FOR_ARROW_VERSION" : {
"message" : [
"<data_type> is only supported with pyarrow 2.0.0 and above."
]
},
"UNSUPPORTED_DATA_TYPE_FOR_IGNORE_ROW_ORDER" : {
"message" : [
"Cannot ignore row order because undefined sorting for data type."
]
},
"UNSUPPORTED_JOIN_TYPE" : {
"message" : [
"Unsupported join type: <join_type>. Supported join types include: \\"inner\\", \\"outer\\", \\"full\\", \\"fullouter\\", \\"full_outer\\", \\"leftouter\\", \\"left\\", \\"left_outer\\", \\"rightouter\\", \\"right\\", \\"right_outer\\", \\"leftsemi\\", \\"left_semi\\", \\"semi\\", \\"leftanti\\", \\"left_anti\\", \\"anti\\", \\"cross\\"."
]
},
"UNSUPPORTED_LITERAL" : {
"message" : [
"Unsupported Literal '<literal>'."
]
},
"UNSUPPORTED_NUMPY_ARRAY_SCALAR" : {
"message" : [
"The type of array scalar '<dtype>' is not supported."
]
},
"UNSUPPORTED_OPERATION" : {
"message" : [
"<operation> is not supported."
]
},
"UNSUPPORTED_PARAM_TYPE_FOR_HIGHER_ORDER_FUNCTION" : {
"message" : [
"Function `<func_name>` should use only POSITIONAL or POSITIONAL OR KEYWORD arguments."
]
},
"UNSUPPORTED_SIGNATURE" : {
"message" : [
"Unsupported signature: <signature>."
]
},
"UNSUPPORTED_WITH_ARROW_OPTIMIZATION" : {
"message" : [
"<feature> is not supported with Arrow optimization enabled in Python UDFs. Disable 'spark.sql.execution.pythonUDF.arrow.enabled' to workaround.."
]
},
"VALUE_NOT_ACCESSIBLE": {
"message": [
"Value `<value>` cannot be accessed inside tasks."
]
},
"VALUE_NOT_ANY_OR_ALL" : {
"message" : [
"Value for `<arg_name>` must be 'any' or 'all', got '<arg_value>'."
]
},
"VALUE_NOT_BETWEEN" : {
"message" : [
"Value for `<arg_name>` must be between <min> and <max>."
]
},
"VALUE_NOT_NON_EMPTY_STR" : {
"message" : [
"Value for `<arg_name>` must be a non empty string, got '<arg_value>'."
]
},
"VALUE_NOT_PEARSON" : {
"message" : [
"Value for `<arg_name>` only supports the 'pearson', got '<arg_value>'."
]
},
"VALUE_NOT_POSITIVE" : {
"message" : [
"Value for `<arg_name>` must be positive, got '<arg_value>'."
]
},
"VALUE_NOT_TRUE" : {
"message" : [
"Value for `<arg_name>` must be True, got '<arg_value>'."
]
},
"VALUE_OUT_OF_BOUND" : {
"message" : [
"Value for `<arg_name>` must be greater than <lower_bound> or less than <upper_bound>, got <actual>"
]
},
"WRONG_NUM_ARGS_FOR_HIGHER_ORDER_FUNCTION" : {
"message" : [
"Function `<func_name>` should take between 1 and 3 arguments, but provided function takes <num_args>."
]
},
"WRONG_NUM_COLUMNS" : {
"message" : [
"Function `<func_name>` should take at least <num_cols> columns."
]
}
}
"""
ERROR_CLASSES_MAP = json.loads(ERROR_CLASSES_JSON)
| [
"[email protected]"
] | |
95a4b7f4ef92f184eefee95bceee085fc44064e8 | ecd2c20608e1f4a1646c87767762bd72db618d65 | /photo_blog/settings.py | a119b668239d31d500b4fa6a3be1f70c0a501c4a | [] | no_license | RianGirard/photo_blog | 129858ee32cbc2ff0521c8219b72b9d83c015726 | e461fa62abe027965b7143cce544d25634d5bf9c | refs/heads/master | 2023-06-20T14:36:38.040663 | 2021-07-21T01:02:13 | 2021-07-21T01:02:13 | 383,640,210 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,867 | py | """
Django settings for photo_blog project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
from decouple import config
config.encoding = 'cp1251'
SECRET_KEY = config('SECRET_KEY')
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
MEDIA_URL = '/media/' # for image upload
MEDIA_ROOT = os.path.join(BASE_DIR, 'media') # ditto
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'blog',
'profiles',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites', # required for django-allauth
'allauth', # ditto
'allauth.account', # ditto
'allauth.socialaccount', # ditto
# 'allauth.socialaccount.providers.github', # ditto
'sorl.thumbnail', # required for sorl.thumbnail
'crispy_forms',
]
CRISPY_TEMPLATE_PACK = 'bootstrap4'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'photo_blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'photo_blog/templates')], # added this in: os.path.join(BASE_DIR, '[mysite]/templates')
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'photo_blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = ( # added this
os.path.join(BASE_DIR, 'photo_blog/static'),
)
# following are parameters for django-allauth:
# https://django-allauth.readthedocs.io/en/latest/configuration.html
SITE_ID = 1
LOGIN_URL = '/login'
LOGIN_REDIRECT_URL = '/'
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_LOGOUT_ON_GET = True
ACCOUNT_LOGIN_ON_PASSWORD_RESET = True
ACCOUNT_LOGOUT_REDIRECT = '/'
ACCOUNT_PRESERVE_USERNAME_CASING = False
ACCOUNT_SESSION_REMEMBER = True
ACCOUNT_SIGNUP_PASSWORD_ENTER_TWICE = True
ACCOUNT_SIGNUP_REDIRECT_URL = '/'
ACCOUNT_USERNAME_MIN_LENGTH = 2
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend"
)
# EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' # for PROD
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' # for DEV | [
"[email protected]"
] | |
5e0d645e8d8db30e316d5aab006e9160adad1df9 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_impromptus.py | fee549bf75880518cd29b8bb36287ecde035b251 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py |
#calss header
class _IMPROMPTUS():
def __init__(self,):
self.name = "IMPROMPTUS"
self.definitions = impromptu
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['impromptu']
| [
"[email protected]"
] | |
4e5edd8fae1f0b8969b8d01ebb9cdc696f1cb1e4 | 0abc546a1442cae56ddcdc43f85497b37fc89036 | /scripts/graph_check_transitivity.py | b1793d5f4b9923cd0e824952d489b64036bc0a11 | [] | no_license | yangjl/cgat | 01a535531f381ace0afb9ed8dc3a0fcff6290446 | 01758b19aa1b0883f0e648f495b570f1b6159be4 | refs/heads/master | 2021-01-18T03:55:14.250603 | 2014-02-24T10:32:45 | 2014-02-24T10:32:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,395 | py | '''
graph_check_transitivity.py -
======================================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
.. todo::
describe purpose of the script.
Usage
-----
Example::
python graph_check_transitivity.py --help
Type::
python graph_check_transitivity.py --help
for command line help.
Command line options
--------------------
'''
import os
import sys
import string
import re
import getopt
import time
import optparse
import math
import tempfile
""" program $Id: graph_check_transitivity.py 2782 2009-09-10 11:40:29Z andreas $
python graph_check_transitivity < graph.in
check whether all edges in a graph are transitive, i.e.,
for every two edges A->B and B->C check whether A->C exists.
Edges are taken to be undirected.
"""
import CGAT.Experiment as E
import CGAT.Histogram as Histogram
def main( argv = None ):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv == None: argv = sys.argv
parser = E.OptionParser( version = "%prog version: $Id: graph_check_transitivity.py 2782 2009-09-10 11:40:29Z andreas $")
parser.add_option("--filename-missing", dest="filename_missing", type="string",
help="missing entries.")
parser.add_option("--filename-found", dest="filename_found", type="string",
help="found entries.")
parser.add_option("--report-step1", dest="report_step1", type="int",
help="report interval for input.")
parser.add_option("--report-step2", dest="report_step2", type="int",
help="report interval for processing.")
parser.add_option("--use-subsets", dest="subsets", action="store_true",
help="do subset calculation. Third field contains a redundancy code.")
parser.set_defaults(
filename_missing = None,
filename_found = None,
report_step1 = 100000,
report_step2 = 10000,
subsets = False,
)
(options, args) = E.Start( parser )
# retrieve data
vals = {}
niterations = 0
ninput = 0
for line in sys.stdin:
if line[0] == "#": continue
niterations += 1
if options.loglevel >= 1 and (niterations % options.report_step1 == 0):
options.stdlog.write( "# input: %i\n" % (niterations))
options.stdlog.flush()
v1, v2, w = line[:-1].split("\t")[:3]
if v1 == v2: continue
if v1 not in vals: vals[v1] = []
if v2 not in vals: vals[v2] = []
if not options.subsets:
w = ninput
vals[v1].append( (v2, w) )
vals[v2].append( (v1, w) )
ninput += 1
## make everything unique
for key, v1 in vals.items():
vals[key] = tuple(set(v1))
keys = vals.keys()
keys.sort()
niterations = 0
nkeys = len(keys)
missing = []
ntotal = 0
nfound = 0
counted = {}
nremoved = 0
if options.filename_found:
outfile_found = open(options.filename_found, "w")
for v1 in keys:
niterations += 1
if options.loglevel >= 1 and (niterations % options.report_step2 == 0):
options.stdlog.write( "# loop: %i\n" % (niterations))
options.stdlog.flush()
for v2, c2 in vals[v1]:
## only to half-symmetric test
for v3, c3 in vals[v2]:
if (c2, c3) in counted:
nremoved += 1
# print "v1=", v1, "v2=", v2, "v3=", v3, "c2=", c2, "c3=", c3, "removed"
continue
## do not do self-comparisons
if v1 == v3: continue
if c2 == c3: continue
counted[(c2,c3)] = True
ntotal += 1
if v3 in map(lambda x: x[0], vals[v1]) or v1 in map(lambda x: x[0], vals[v3]):
nfound += 1
if options.filename_found:
outfile_found.write( "\t".join( (v1, v2, v3) ) + "\n" )
# print "v1=", v1, "v2=", v2, "v3=", v3, "c2=", c2, "c3=", c3, "found"
else:
missing.append( (v1, v2, v3) )
# print "v1=", v1, "v2=", v2, "v3=", v3, "c2=", c2, "c3=", c3, "missing"
nmissing = len(missing)
options.stdout.write( "number of egdes\t%i\n" % ninput)
options.stdout.write( "number of vertices\t%i\n" % nkeys)
options.stdout.write( "number of removed triplets\t%i\n" % nremoved)
options.stdout.write( "number of tested triplets\t%i\t%6.4f\n" % (ntotal, float(ntotal) / float(ntotal)))
options.stdout.write( "number of realized triplets\t%i\t%6.4f\n" % (nfound, float(nfound) / float(ntotal)))
options.stdout.write( "number of incomplete triplets\t%i\t%6.4f\n" % (nmissing, float(nmissing) / float(ntotal)))
if options.filename_missing:
outfile = open(options.filename_missing, "w")
for v1, v2, v3 in missing:
outfile.write( "\t".join( (v1, v2, v3) ) + "\n")
outfile.close()
if options.filename_found:
outfile_found.close()
E.Stop()
if __name__ == "__main__":
sys.exit( main( sys.argv) )
| [
"[email protected]"
] | |
b695dc1cd6ac27aeb81909e86ad63a50c0fac5c4 | 23611933f0faba84fc82a1bc0a85d97cf45aba99 | /google-cloud-sdk/lib/surface/compute/instance_groups/describe.py | 8a88e0e197d87deb862c3ee4c7fd71f847b772b4 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | KaranToor/MA450 | 1f112d1caccebdc04702a77d5a6cee867c15f75c | c98b58aeb0994e011df960163541e9379ae7ea06 | refs/heads/master | 2021-06-21T06:17:42.585908 | 2020-12-24T00:36:28 | 2020-12-24T00:36:28 | 79,285,433 | 1 | 1 | Apache-2.0 | 2020-12-24T00:38:09 | 2017-01-18T00:05:44 | Python | UTF-8 | Python | false | false | 1,985 | py | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for describing instance groups."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import instance_groups_utils
class Describe(base_classes.MultiScopeDescriber):
"""Describe an instance group."""
SCOPES = (base_classes.ScopeType.regional_scope,
base_classes.ScopeType.zonal_scope)
@property
def global_service(self):
return None
@property
def regional_service(self):
return self.compute.regionInstanceGroups
@property
def zonal_service(self):
return self.compute.instanceGroups
@property
def global_resource_type(self):
return None
@property
def regional_resource_type(self):
return 'regionInstanceGroups'
@property
def zonal_resource_type(self):
return 'instanceGroups'
@staticmethod
def Args(parser):
base_classes.MultiScopeDescriber.AddScopeArgs(
parser, 'instanceGroups', Describe.SCOPES)
def ComputeDynamicProperties(self, args, items):
return instance_groups_utils.ComputeInstanceGroupManagerMembership(
compute=self.compute,
project=self.project,
http=self.http,
batch_url=self.batch_url,
items=items,
filter_mode=instance_groups_utils.InstanceGroupFilteringMode.ALL_GROUPS)
Describe.detailed_help = base_classes.GetMultiScopeDescriberHelp(
'instance group', Describe.SCOPES)
| [
"[email protected]"
] | |
f691ee2989bae7c40b5a2f82be950381023c2c20 | ed51f4726d1eec4b7fec03a1ebaa32d983f1008d | /gardens/apps.py | ffc10b571d2b501f8f6b438e5e9dbaf6f81e5928 | [] | no_license | kaczuchg711/OrdiTree | ececbbb13fa48364441ebdde7f52980b2e1175fe | 2b87535b8a60b9aca83674e5975f39f3f832c58a | refs/heads/master | 2021-05-18T03:36:50.498916 | 2020-06-13T12:42:44 | 2020-06-13T12:42:44 | 251,085,715 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | from django.apps import AppConfig
class GardensConfig(AppConfig):
name = 'gardens'
| [
"[email protected]"
] | |
e36d5216b192e842d632a87650507221796a33e3 | bcee50b3cbaf7a8000dffb7326cf467ae432b626 | /basic/15650/nm_2_dfs.py | cfe1b263b0584c44a10c3b12c47bba7fd97e0bce | [] | no_license | entrekid/algorithms | 53e5e563f6350b76047d8163ecd6e623dbe6e8d1 | 64377821718b3e44faf6a05be4d3ebf99b674489 | refs/heads/master | 2022-04-06T21:49:42.081981 | 2020-03-03T14:58:52 | 2020-03-03T14:58:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | n, m = map(int, input().split())
check_list = [False] * n
num_list = [elem + 1 for elem in range(n)]
result_list = []
def nm_dfs2(num):
if num == m:
print(*result_list)
return
for iter in range(n):
if check_list[iter] == True:
continue
check_list[iter] = True
result_list.append(num_list[iter])
nm_dfs2(num + 1)
result_list.pop()
for j in range(iter + 1, n):
check_list[j] = False
nm_dfs2(0) | [
"[email protected]"
] | |
005e6a8d7f20ae9bcc7a387f6cf8b691bc2da6d2 | aaa3ab0c89f558a33ddcad9bcc5a687049dbc599 | /backend/src/websocket/socket.py | c7efe44db002bc33abccdeaebe9cf23e1008b529 | [] | no_license | vetordev/Hypersup | 5d059282971bf45f54f8be49071984371f98aabe | 961ac24209a3772fef5016ca851f82bc2fc40bd1 | refs/heads/master | 2021-02-16T18:40:06.197712 | 2020-03-18T22:20:13 | 2020-03-18T22:20:13 | 245,034,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | from flask import request
class Socket:
def __init__(self, socket, app):
self.socket = socket
self.app = app
def run(self):
@self.socket.on('connect')
def connect():
print('New Connection; Id: ${id}'.format(id=request.sid))
| [
"[email protected]"
] | |
1d9c3616c035da8730928b2c6d124ebe273b931d | afd2087e80478010d9df66e78280f75e1ff17d45 | /torch/distributed/checkpoint/state_dict_saver.py | a99cd129aeb637da7d11cb88ad101de0a72d8c56 | [
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | pytorch/pytorch | 7521ac50c47d18b916ae47a6592c4646c2cb69b5 | a6f7dd4707ac116c0f5fb5f44f42429f38d23ab4 | refs/heads/main | 2023-08-03T05:05:02.822937 | 2023-08-03T00:40:33 | 2023-08-03T04:14:52 | 65,600,975 | 77,092 | 24,610 | NOASSERTION | 2023-09-14T21:58:39 | 2016-08-13T05:26:41 | Python | UTF-8 | Python | false | false | 4,458 | py | from typing import Optional
import torch
import torch.distributed as dist
from .planner import SavePlanner
from .default_planner import DefaultSavePlanner
from .storage import (
StorageWriter,
)
from .metadata import Metadata, STATE_DICT_TYPE
from .utils import _DistWrapper
__all__ = ["save_state_dict"]
def save_state_dict(
state_dict: STATE_DICT_TYPE,
storage_writer: StorageWriter,
process_group: Optional[dist.ProcessGroup] = None,
coordinator_rank: int = 0,
no_dist: bool = False,
planner: Optional[SavePlanner] = None,
) -> Metadata:
"""
Saves a distributed model in SPMD style.
This function is different from ``torch.save()`` as it handles
``ShardedTensor`` by having each rank only save their local shards.
.. warning::
There is no guarantees of Backwards Compatibility across PyTorch versions
for saved state_dicts.
.. warning::
If using the `process_group` argument, make sure that only its ranks
call `save_state_dict` and that all data in state_dict belong to it.
.. note::
When saving checkpoint for FSDP's `ShardingStrategy.HYBRID_SHARD`, only one of
the shard_group should be calling `save_state_dict` and the corresponding process
group needs to be passed in.
.. note::
This function can be used to save a state_dict without having a process group
initialized by passing ``no_dist=True``.
Args:
state_dict (Dict[str, Any]): The state_dict to save.
storage_writer (StorageWriter):
Instance of StorageWrite use to perform writes.
process_group (ProcessGroup):
ProcessGroup to be used for cross-rank synchronization.
coordinator_rank (int): Rank to use to coordinate the checkpoint.
rank0 is used by default.
no_dist (bool): If ``True``, distributed checkpoint will not save
in SPMD style. (Default: ``False``)
Returns:
Metadata: Metadata object for the saved checkpoint.
Example:
>>> # xdoctest: +SKIP
>>> my_model = MyModule()
>>> model_state_dict = my_model.state_dict()
>>> fs_storage_writer = torch.distributed.checkpoint.FileSystemWriter("/checkpoint/1")
>>> torch.distributed.checkpoint.save_state_dict(
>>> state_dict=model_state_dict,
>>> storage_writer=fs_storage_writer,
>>> )
.. note::
save_state_dict uses collectives to coordinate writes across ranks.
For NCCL-based process groups, internal tensor representations of
objects must be moved to the GPU device before communication takes place.
In this case, the device used is given by ``torch.cuda.current_device()``
and it is the user's responsibility to ensure that this is set so that
each rank has an individual GPU, via ``torch.cuda.set_device()``.
"""
torch._C._log_api_usage_once("torch.distributed.checkpoint.save_state_dict")
distW = _DistWrapper(process_group, not no_dist, coordinator_rank)
if planner is None:
planner = DefaultSavePlanner()
assert planner is not None
global_metatadata = None
def local_step():
assert planner is not None
planner.set_up_planner(state_dict, distW.is_coordinator)
storage_writer.set_up_storage_writer(distW.is_coordinator)
local_plan = planner.create_local_plan()
local_plan = storage_writer.prepare_local_plan(local_plan)
return local_plan
def global_step(all_local_plans):
nonlocal global_metatadata
assert planner is not None
all_local_plans, global_metatadata = planner.create_global_plan(
all_local_plans
)
all_local_plans = storage_writer.prepare_global_plan(all_local_plans)
return all_local_plans
central_plan = distW.reduce_scatter("plan", local_step, global_step)
def write_data():
assert planner is not None
final_local_plan = planner.finish_plan(central_plan)
all_writes = storage_writer.write_data(final_local_plan, planner)
all_writes.wait()
return all_writes.value()
def finish_checkpoint(all_results):
assert global_metatadata is not None
storage_writer.finish(metadata=global_metatadata, results=all_results)
return global_metatadata
return distW.all_reduce("write", write_data, finish_checkpoint)
| [
"[email protected]"
] | |
82a0e0d28994984b8a494fad02e967299d94d678 | eb817a5a5fd66d00906d2ac2574e2ef749780877 | /defining_classes/demos_metaclasses.py | b2fd1f30b2205a42c5e9f106569b3de0e8110ce2 | [
"MIT"
] | permissive | Minkov/python-oop-2021-02 | 5afcc356f59196fdfcfd217b455b8621176f578b | bd387dde165f4338eed66c4bc0b4b516ee085340 | refs/heads/main | 2023-04-01T08:07:39.096457 | 2021-04-05T18:24:40 | 2021-04-05T18:24:40 | 341,306,261 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | class Singleton(type):
__instances = {}
def __new__(cls, *args, **kwargs):
return super().__new__(cls, *args, **kwargs)
def __call__(cls, *args, **kwargs):
if cls not in cls.__instances:
cls.__instances[cls] = super().__call__(*args, **kwargs)
return cls.__instances[cls]
class PersonFactory(metaclass=Singleton):
pass
p = PersonFactory()
print(PersonFactory() == PersonFactory())
print(PersonFactory() == PersonFactory())
print(PersonFactory() == PersonFactory())
print(PersonFactory() == PersonFactory())
print(PersonFactory() == PersonFactory())
print(PersonFactory() == PersonFactory())
| [
"[email protected]"
] | |
e1059ae6e9b86f602d1bc6205a6ed704ffdc4962 | 5845ee6d82d9f691e846360fa267b9cca6829d99 | /supervised_learning/0x0F-word_embeddings/0-bag_of_words.py | 637623c05195091bb4a31ba366e5d15fe022ab76 | [] | no_license | jlassi1/holbertonschool-machine_learning | 6e8c11ebaf2fd57e101bd0b20b7d83358cc15374 | d45e18bcbe1898a1585e4b7b61f3a7af9f00e787 | refs/heads/main | 2023-07-02T20:25:52.216926 | 2021-08-11T14:19:49 | 2021-08-11T14:19:49 | 317,224,593 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | #!/usr/bin/env python3
""" 0. Bag Of Words """
from sklearn.feature_extraction.text import CountVectorizer
def bag_of_words(sentences, vocab=None):
"""function that creates a bag of words embedding matrix"""
vectorizer = CountVectorizer(vocabulary=vocab)
X = vectorizer.fit_transform(sentences)
features = vectorizer.get_feature_names()
embeddings = X.toarray()
return embeddings, features
| [
"[email protected]"
] | |
ee864bf4f45435d16fd37093d8533828dfc9fe61 | ad469d0ca144c485fc0cdcfb2ebfdd0bddf86271 | /src/models/base.py | 54694b4039a9f44b73fa58b3fa5fc83c93fa823d | [] | no_license | ngxbac/Kaggle-Google-Landmark-2019 | 3e8a29e83e835b29262df439b9af12ca27cee768 | 274864e2778acde9007c096607c113c268882343 | refs/heads/master | 2020-05-31T04:37:32.003023 | 2019-06-04T00:41:51 | 2019-06-04T00:41:51 | 190,102,248 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,154 | py | import torch
import torch.nn as nn
import torchvision.models as models
class Net(nn.Module):
def __init__(self, num_classes=100, norm=True, scale=True):
super(Net,self).__init__()
self.extractor = Extractor()
self.embedding = Embedding()
self.classifier = Classifier(num_classes)
self.s = nn.Parameter(torch.FloatTensor([10]))
self.norm = norm
self.scale = scale
def forward(self, x):
x = self.extractor(x)
x = self.embedding(x)
if self.norm:
x = self.l2_norm(x)
if self.scale:
x = self.s * x
x = self.classifier(x)
return x
def extract(self, x):
x = self.extractor(x)
x = self.embedding(x)
x = self.l2_norm(x)
return x
def l2_norm(self,input):
input_size = input.size()
buffer = torch.pow(input, 2)
normp = torch.sum(buffer, 1).add_(1e-10)
norm = torch.sqrt(normp)
_output = torch.div(input, norm.view(-1, 1).expand_as(input))
output = _output.view(input_size)
return output
def weight_norm(self):
w = self.classifier.fc.weight.data
norm = w.norm(p=2, dim=1, keepdim=True)
self.classifier.fc.weight.data = w.div(norm.expand_as(w))
class Extractor(nn.Module):
def __init__(self):
super(Extractor,self).__init__()
basenet = models.resnet50(pretrained=True)
self.extractor = nn.Sequential(*list(basenet.children())[:-1])
for param in self.extractor.parameters():
param.requires_grad = False
def forward(self, x):
x = self.extractor(x)
x = x.view(x.size(0), -1)
return x
class Embedding(nn.Module):
def __init__(self):
super(Embedding,self).__init__()
self.fc = nn.Linear(2048, 2048)
def forward(self, x):
x = self.fc(x)
return x
class Classifier(nn.Module):
def __init__(self, num_classes):
super(Classifier,self).__init__()
self.fc = nn.Linear(2048, num_classes, bias=False)
def forward(self, x):
x = self.fc(x)
return x | [
"[email protected]"
] | |
e0496f50c98467811842743bdcac4c7f1dc14c9e | c424ffe3c31422e72810b4865f482d505d145e87 | /fliermailses/models.py | 7eaea73f99fb1b029fe3303c6f16d0ab41e0e949 | [
"BSD-2-Clause"
] | permissive | hdknr/fliermail-ses | d49724b7f1eb648a806e4301738db96a50e098ca | 91366535b1a0890b4766c09d70aee1ec5387f7f0 | refs/heads/master | 2020-06-19T04:57:02.261919 | 2018-03-15T05:18:16 | 2018-03-15T05:18:16 | 94,177,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,760 | py | from django.db import models
from django.utils.translation import ugettext_lazy as _
from . import defs, methods, querysets
class Service(defs.Service, methods.Service):
class Meta:
verbose_name = _('SES Service')
verbose_name_plural = _('SES Service')
def __str__(self):
return self.name
class Source(defs.Source, methods.Source):
service = models.ForeignKey(
Service, verbose_name=_('Service'), help_text=_('Service Help'),
on_delete=models.SET_NULL,
null=True, blank=True, default=None, )
class Meta:
verbose_name = _('SES Source')
verbose_name_plural = _('SES Source')
def __str__(self):
return "ses:{0}".format(self.address)
class Topic(defs.Topic):
source = models.ForeignKey(
Source, null=True, blank=True, default=None,
on_delete=models.SET_NULL, )
class Meta:
verbose_name = _('SNS Topic')
verbose_name_plural = _('SNS Topic')
unique_together = (('source', 'topic', ), )
def __str__(self):
return u"{0} {1}".format(
self.source.__str__(),
self.get_topic_display())
class Notification(defs.Notification, methods.Notification):
topic = models.ForeignKey(
Topic, null=True, blank=True, default=None,
on_delete=models.SET_NULL, )
class Meta:
verbose_name = _('Notification')
verbose_name_plural = _('Notification')
objects = querysets.NotificationQuerySet.as_manager()
class Certificate(defs.Certificate, methods.Certificate):
service = models.ForeignKey(
Service, on_delete=models.CASCADE, )
class Meta:
verbose_name = _('SES Certificate')
verbose_name_plural = _('SES Certificate')
| [
"[email protected]"
] | |
df2ffa0accf83f4363cc11f2b219eb6f5a74b0c3 | dd834845a2ab346dafd04f3beb4ba0916b64dc51 | /test_case/task/test_200smart_sanity_clear_001.py | fc61417bcb137b08429c8f21631cfea146deaf4b | [] | no_license | Lewescaiyong/auto_test_framework | ae51726b705fbf125c30fce447c7c75510597047 | 2d3490393737b3e5f086cb6623369b988ffce67f | refs/heads/master | 2020-11-25T09:18:29.209261 | 2020-02-10T13:48:12 | 2020-02-10T13:48:12 | 228,590,729 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,443 | py | #!/usr/bin/env python
from lib.exceptions.check_exception import CheckException
from lib.base.script.integration_test.case_mw import CaseMW
class Test200SmartSanityClear001(CaseMW):
"""Clear OB
No.: test_200smart_sanity_clear_001
Preconditions:
1. Open Micro/WINr;
2. Set up connection with PLC;
3. Download a project which has OB,DB,SDB;
Step actions:
1. Clear program block;
2. Compare;
Expected results:
1. Clear successful;
2. The OB is different;
Priority: H
Author: Cai, Yong
ChangeInfo: Cai, Yong 2019-09-20 create
"""
def prepare(self):
"""the preparation before executing the test steps
Args:
Example:
Return:
Author: Cai, Yong
IsInterface: False
ChangeInfo: Cai, Yong 2019-09-20 create
"""
super(Test200SmartSanityClear001, self).prepare()
self.logger.info('Preconditions:')
self.logger.info('1. Open Micro/WINr; ')
self.logger.info('2. Set up connection with PLC;')
self.logger.info('3. Download a project which has OB,DB,SDB;')
self.MicroWIN.test_prepare('ob_db_sdb_01.smart', False)
def process(self):
"""execute the test steps
Args:
Example:
Return:
Author: Cai, Yong
IsInterface: False
ChangeInfo: Cai, Yong 2019-09-20 create
"""
super(Test200SmartSanityClear001, self).process()
self.logger.info('Step actions:')
self.logger.info('1. Clear program block;')
result1 = self.PLC['1'].plc_clear('ob')
self.logger.info('2. Compare;')
result2 = self.MicroWIN.compare_with_plc()
self.logger.info('Expected results:')
self.logger.info('1. Clear successful;')
if result1['code'] != 0:
raise CheckException('1. Clear OB failed;')
self.logger.info('2. The OB is different;')
if not ((not result2['ob']) and result2['db'] and result2['sdb']):
self.logger.info('Compare result: %s' % result2)
raise CheckException('Compare failed;')
def cleanup(self):
"""clean up after performing the test steps
Args:
Example:
Return:
Author: Cai, Yong
IsInterface: False
ChangeInfo: Cai, Yong 2019-09-20 create
"""
super(Test200SmartSanityClear001, self).cleanup()
| [
"[email protected]"
] | |
a89d9222bee0ded8bd36c1c69d2dacb9bfb28e01 | 7a6a2076cffbbd47316818b37ddf22a932002065 | /python/702 - Search in a Sorted Array of Unknown Size/main.py | f23ffb8bc239c9335e262a01b41c66efce7866a5 | [] | no_license | or0986113303/LeetCodeLearn | 6bd0aa16c8c80581e1c85032aca0f7a055f5e234 | 96fdc45d15b4150cefe12361b236de6aae3bdc6a | refs/heads/develop | 2023-06-14T01:30:41.103572 | 2021-07-01T08:59:08 | 2021-07-01T08:59:08 | 291,066,699 | 0 | 0 | null | 2020-08-31T02:44:26 | 2020-08-28T14:25:53 | Python | UTF-8 | Python | false | false | 1,577 | py | # """
# This is ArrayReader's API interface.
# You should not implement it, or speculate about its implementation
# """
#class ArrayReader(object):
# def get(self, index):
# """
# :type index: int
# :rtype int
# """
class Solution(object):
def fibosearch(self, source, target):
fibo1 = 1
fibo2 = 0
fibosum = fibo1 + fibo2
offset = -1
capacity = 0
resulttmp = float('-inf')
while resulttmp < target:
fibo2 = fibo1
fibo1 = fibosum
fibosum = fibo1 + fibo2
resulttmp = source.get(fibosum)
capacity = fibosum + 1
print(capacity)
while fibosum > 1:
operatorindex = min(fibo2 + offset, capacity - 1)
if source.get(operatorindex) == target:
return operatorindex
elif source.get(operatorindex) > target:
fibosum = fibo1
fibo1 = fibo2
fibo2 = fibosum - fibo1
else :
fibo2 = fibo1
fibo1 = fibosum
fibosum = fibo1 + fibo2
offset = operatorindex
return -1
def search(self, reader, target):
"""
:type reader: ArrayReader
:type target: int
:rtype: int
"""
if reader is None:
return -1
elif reader.get(0) == target:
return 0
result = self.fibosearch(reader, target)
print(result)
return result
| [
"[email protected]"
] | |
81bf3c105d1a1393058d90b3633bcebdd5ae4fbf | 9b1446b26e81a79c303f9799fb6a91785c7adb03 | /.history/Code/histogram_20200120113537.py | 4f52929b9fac6bf129f57f7e695e94974d77475a | [] | no_license | SamirIngley/CS1.2-Tweet-Gen | 017ea15b1113881a156ff24682828bc654eb6c81 | bcd95fa63e05849cbf8e36230d8e31032b99daaa | refs/heads/master | 2020-12-14T20:19:57.733290 | 2020-08-04T23:19:23 | 2020-08-04T23:19:23 | 234,856,234 | 0 | 0 | null | 2020-06-05T21:13:04 | 2020-01-19T07:05:55 | Python | UTF-8 | Python | false | false | 696 | py |
def list_histo(source):
''' Takes text. Stores each item in text compares each item to the rest of the words in
text and keeps a running total. Used list account for no repeats.
'''
histo = []
used = []
text = source.split()
print(text)
for word in text:
counter = 0
if word in used:
continue
used.append(word)
for word2 in text:
if word == word2:
counter += 1
instance = [word, counter]
histo.append(instance)
print(histo)
return histo
if __name__ == '__main__':
source = 'one fish two fish red fish blue fish'
list_histo(source)
| [
"[email protected]"
] | |
e872d8089a62b5d92696f6668390f4ab68945df9 | 6547d657706c041f2a87b0680936dd3d473ad328 | /httprunner/cli.py | f60004271687446d2bcfb3af3c86d5de03b91a41 | [
"Apache-2.0"
] | permissive | lixiaofeng1993/httprunner | 62c01f6b5adb8e3eded564947ac196938e3c88fb | 15c5d89605dc2d54fc624c3468be85eebcc8446e | refs/heads/master | 2020-07-26T09:18:35.310008 | 2019-10-21T16:03:50 | 2019-10-21T16:03:50 | 208,601,514 | 1 | 0 | Apache-2.0 | 2019-09-15T13:54:13 | 2019-09-15T13:54:13 | null | UTF-8 | Python | false | false | 6,813 | py | # encoding: utf-8
def main_hrun():
""" API test: parse command line options and run commands.
"""
import sys
import argparse
from httprunner.logger import color_print
from httprunner import __description__, __version__
from httprunner.api import HttpRunner
from httprunner.compat import is_py2
from httprunner.validator import validate_json_file
from httprunner.utils import (create_scaffold, get_python2_retire_msg,
prettify_json_file)
parser = argparse.ArgumentParser(description=__description__)
parser.add_argument(
'-V', '--version', dest='version', action='store_true',
help="show version")
parser.add_argument(
'testcase_paths', nargs='*',
help="testcase file path")
parser.add_argument(
'--log-level', default='INFO',
help="Specify logging level, default is INFO.")
parser.add_argument(
'--log-file',
help="Write logs to specified file path.")
parser.add_argument(
'--dot-env-path',
help="Specify .env file path, which is useful for keeping sensitive data.")
parser.add_argument(
'--report-template',
help="specify report template path.")
parser.add_argument(
'--report-dir',
help="specify report save directory.")
parser.add_argument(
'--failfast', action='store_true', default=False,
help="Stop the test run on the first error or failure.")
parser.add_argument(
'--save-tests', action='store_true', default=False,
help="Save loaded tests and parsed tests to JSON file.")
parser.add_argument(
'--startproject',
help="Specify new project name.")
parser.add_argument(
'--validate', nargs='*',
help="Validate JSON testcase format.")
parser.add_argument(
'--prettify', nargs='*',
help="Prettify JSON testcase format.")
args = parser.parse_args()
if is_py2:
color_print(get_python2_retire_msg(), "YELLOW")
if args.version:
color_print("{}".format(__version__), "GREEN")
exit(0)
if args.validate:
validate_json_file(args.validate)
exit(0)
if args.prettify:
prettify_json_file(args.prettify)
exit(0)
project_name = args.startproject
if project_name:
create_scaffold(project_name)
exit(0)
runner = HttpRunner(
failfast=args.failfast,
save_tests=args.save_tests,
report_template=args.report_template,
report_dir=args.report_dir,
log_level=args.log_level,
log_file=args.log_file
)
try:
for path in args.testcase_paths:
runner.run(path, dot_env_path=args.dot_env_path)
except Exception:
color_print("!!!!!!!!!! exception stage: {} !!!!!!!!!!".format(runner.exception_stage), "YELLOW")
raise
if runner.summary and runner.summary["success"]:
sys.exit(0)
else:
sys.exit(1)
def main_locust():
""" Performance test with locust: parse command line options and run commands.
"""
try:
# monkey patch ssl at beginning to avoid RecursionError when running locust.
from gevent import monkey; monkey.patch_ssl()
import multiprocessing
import sys
from httprunner import logger
from httprunner import locusts
except ImportError:
msg = "Locust is not installed, install first and try again.\n"
msg += "install command: pip install locustio"
print(msg)
exit(1)
sys.argv[0] = 'locust'
if len(sys.argv) == 1:
sys.argv.extend(["-h"])
if sys.argv[1] in ["-h", "--help", "-V", "--version"]:
locusts.start_locust_main()
sys.exit(0)
# set logging level
if "-L" in sys.argv:
loglevel_index = sys.argv.index('-L') + 1
elif "--loglevel" in sys.argv:
loglevel_index = sys.argv.index('--loglevel') + 1
else:
loglevel_index = None
if loglevel_index and loglevel_index < len(sys.argv):
loglevel = sys.argv[loglevel_index]
else:
# default
loglevel = "WARNING"
logger.setup_logger(loglevel)
# get testcase file path
try:
if "-f" in sys.argv:
testcase_index = sys.argv.index('-f') + 1
elif "--locustfile" in sys.argv:
testcase_index = sys.argv.index('--locustfile') + 1
else:
testcase_index = None
assert testcase_index and testcase_index < len(sys.argv)
except AssertionError:
print("Testcase file is not specified, exit.")
sys.exit(1)
testcase_file_path = sys.argv[testcase_index]
sys.argv[testcase_index] = locusts.parse_locustfile(testcase_file_path)
if "--processes" in sys.argv:
""" locusts -f locustfile.py --processes 4
"""
if "--no-web" in sys.argv:
logger.log_error("conflict parameter args: --processes & --no-web. \nexit.")
sys.exit(1)
processes_index = sys.argv.index('--processes')
processes_count_index = processes_index + 1
if processes_count_index >= len(sys.argv):
""" do not specify processes count explicitly
locusts -f locustfile.py --processes
"""
processes_count = multiprocessing.cpu_count()
logger.log_warning("processes count not specified, use {} by default.".format(processes_count))
else:
try:
""" locusts -f locustfile.py --processes 4 """
processes_count = int(sys.argv[processes_count_index])
sys.argv.pop(processes_count_index)
except ValueError:
""" locusts -f locustfile.py --processes -P 8888 """
processes_count = multiprocessing.cpu_count()
logger.log_warning("processes count not specified, use {} by default.".format(processes_count))
sys.argv.pop(processes_index)
locusts.run_locusts_with_processes(sys.argv, processes_count)
else:
locusts.start_locust_main()
if __name__ == "__main__":
""" debugging mode
"""
import sys
import os
if len(sys.argv) == 0:
exit(0)
sys.path.insert(0, os.getcwd())
cmd = sys.argv.pop(1)
if cmd in ["hrun", "httprunner", "ate"]:
main_hrun()
elif cmd in ["locust", "locusts"]:
main_locust()
else:
from httprunner.logger import color_print
color_print("Miss debugging type.", "RED")
example = "\n".join([
"e.g.",
"python -m httprunner.cli hrun /path/to/testcase_file",
"python -m httprunner.cli locusts -f /path/to/testcase_file"
])
color_print(example, "yellow")
| [
"[email protected]"
] | |
79f998c1ae08f5eac4dccac29ea00bf209c906d0 | 60044c76b631e622edb28f3a74971ce06211fac5 | /Python-for-Everybody/Python-Data-Structures/list.py | fa31bc357f500aa7cefac067eb8f807c1c0089d0 | [] | no_license | NestorMonroy/Courses-coursera | 8d45a858c79567d74f013ac27ac33d47e43abb96 | 98ac1aa5bb0cd9da5cea5be02995d5b65c779201 | refs/heads/master | 2023-08-14T13:36:07.348994 | 2021-09-22T06:13:57 | 2021-09-22T06:13:57 | 327,753,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,064 | py | """
List are mutable
String are "inmutable" - we cannont change the contents of a string-
we must make a new string to make any change
List are "mutable" we can change an element of a list using the index
operator
"""
fruit = ['Banana']
fruit[0]= 'b' # error
x = fruit.lower()
print(x)
lotto = [2, 15, 26, 41, 63 ]
print(lotto)
lotto[2]= 28
print(lotto)
# How long is a list
greet = 'Hello Boke'
print(len(greet))
x = [1, 4, 'joe', 99]
print(len(x))
# using the range function
"""
The range function returns a list of numbers that range from
zero to one less than the parameter value
We can construct an index loop using for and integer iterator
"""
print(range(4))
friends = ['joel', 'david', 'jon']
print(len(friends))
print(range(len(friends)))
# A tale of two loops
friends = ['joel', 'david', 'jon']
for friend in friends:
print('Happy new year: ', friend)
for i in range(len(friends)):
friend = friends[i]
print('Happy new year: ', friend)
print(len(friends))
print(range(len(friends))) | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.