blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
25e7256a2278380559a276d1b3e444401d66c3f7 | 14bca3c05f5d8de455c16ec19ac7782653da97b2 | /lib/kubernetes/client/models/v1beta1_custom_resource_subresource_scale.py | ed95df565c96d745a54209552e5348bb1e35de08 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | hovu96/splunk_as_a_service_app | 167f50012c8993879afbeb88a1f2ba962cdf12ea | 9da46cd4f45603c5c4f63ddce5b607fa25ca89de | refs/heads/master | 2020-06-19T08:35:21.103208 | 2020-06-16T19:07:00 | 2020-06-16T19:07:00 | 196,641,210 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,791 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1CustomResourceSubresourceScale(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'label_selector_path': 'str',
'spec_replicas_path': 'str',
'status_replicas_path': 'str'
}
attribute_map = {
'label_selector_path': 'labelSelectorPath',
'spec_replicas_path': 'specReplicasPath',
'status_replicas_path': 'statusReplicasPath'
}
def __init__(self, label_selector_path=None, spec_replicas_path=None, status_replicas_path=None):
"""
V1beta1CustomResourceSubresourceScale - a model defined in Swagger
"""
self._label_selector_path = None
self._spec_replicas_path = None
self._status_replicas_path = None
self.discriminator = None
if label_selector_path is not None:
self.label_selector_path = label_selector_path
self.spec_replicas_path = spec_replicas_path
self.status_replicas_path = status_replicas_path
@property
def label_selector_path(self):
"""
Gets the label_selector_path of this V1beta1CustomResourceSubresourceScale.
LabelSelectorPath defines the JSON path inside of a CustomResource that corresponds to Scale.Status.Selector. Only JSON paths without the array notation are allowed. Must be a JSON Path under .status. Must be set to work with HPA. If there is no value under the given path in the CustomResource, the status label selector value in the /scale subresource will default to the empty string.
:return: The label_selector_path of this V1beta1CustomResourceSubresourceScale.
:rtype: str
"""
return self._label_selector_path
@label_selector_path.setter
def label_selector_path(self, label_selector_path):
"""
Sets the label_selector_path of this V1beta1CustomResourceSubresourceScale.
LabelSelectorPath defines the JSON path inside of a CustomResource that corresponds to Scale.Status.Selector. Only JSON paths without the array notation are allowed. Must be a JSON Path under .status. Must be set to work with HPA. If there is no value under the given path in the CustomResource, the status label selector value in the /scale subresource will default to the empty string.
:param label_selector_path: The label_selector_path of this V1beta1CustomResourceSubresourceScale.
:type: str
"""
self._label_selector_path = label_selector_path
@property
def spec_replicas_path(self):
"""
Gets the spec_replicas_path of this V1beta1CustomResourceSubresourceScale.
SpecReplicasPath defines the JSON path inside of a CustomResource that corresponds to Scale.Spec.Replicas. Only JSON paths without the array notation are allowed. Must be a JSON Path under .spec. If there is no value under the given path in the CustomResource, the /scale subresource will return an error on GET.
:return: The spec_replicas_path of this V1beta1CustomResourceSubresourceScale.
:rtype: str
"""
return self._spec_replicas_path
@spec_replicas_path.setter
def spec_replicas_path(self, spec_replicas_path):
"""
Sets the spec_replicas_path of this V1beta1CustomResourceSubresourceScale.
SpecReplicasPath defines the JSON path inside of a CustomResource that corresponds to Scale.Spec.Replicas. Only JSON paths without the array notation are allowed. Must be a JSON Path under .spec. If there is no value under the given path in the CustomResource, the /scale subresource will return an error on GET.
:param spec_replicas_path: The spec_replicas_path of this V1beta1CustomResourceSubresourceScale.
:type: str
"""
if spec_replicas_path is None:
raise ValueError("Invalid value for `spec_replicas_path`, must not be `None`")
self._spec_replicas_path = spec_replicas_path
@property
def status_replicas_path(self):
"""
Gets the status_replicas_path of this V1beta1CustomResourceSubresourceScale.
StatusReplicasPath defines the JSON path inside of a CustomResource that corresponds to Scale.Status.Replicas. Only JSON paths without the array notation are allowed. Must be a JSON Path under .status. If there is no value under the given path in the CustomResource, the status replica value in the /scale subresource will default to 0.
:return: The status_replicas_path of this V1beta1CustomResourceSubresourceScale.
:rtype: str
"""
return self._status_replicas_path
@status_replicas_path.setter
def status_replicas_path(self, status_replicas_path):
"""
Sets the status_replicas_path of this V1beta1CustomResourceSubresourceScale.
StatusReplicasPath defines the JSON path inside of a CustomResource that corresponds to Scale.Status.Replicas. Only JSON paths without the array notation are allowed. Must be a JSON Path under .status. If there is no value under the given path in the CustomResource, the status replica value in the /scale subresource will default to 0.
:param status_replicas_path: The status_replicas_path of this V1beta1CustomResourceSubresourceScale.
:type: str
"""
if status_replicas_path is None:
raise ValueError("Invalid value for `status_replicas_path`, must not be `None`")
self._status_replicas_path = status_replicas_path
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1CustomResourceSubresourceScale):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
] | |
71335d29712b442c9ce5d95ca32fd48dad3c6e99 | 0567fcd808397a7024b5009cc290de1c414eff06 | /src/1470.shuffle-the-array.py | 7f9045ee0e7612652fc26cad85239700fb747fbe | [] | no_license | tientheshy/leetcode-solutions | d3897035a7fd453b9f47647e95f0f92a03bff4f3 | 218a8a97e3926788bb6320dda889bd379083570a | refs/heads/master | 2023-08-23T17:06:52.538337 | 2021-10-03T01:47:50 | 2021-10-03T01:47:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 562 | py | #
# @lc app=leetcode id=1470 lang=python3
#
# [1470] Shuffle the Array
#
# @lc code=start
class Solution:
# 80 ms, 50.80%. Time: O(N). Space: O(N). Could be better using Bit manipulation to make it O(1) Space
# More here: https://leetcode.com/problems/shuffle-the-array/discuss/675956/In-Place-O(n)-Time-O(1)-Space-With-Explanation-and-Analysis
def shuffle(self, nums: List[int], n: int) -> List[int]:
ans = []
for i in range(n):
ans.append(nums[i])
ans.append(nums[i + n])
return ans
# @lc code=end
| [
"[email protected]"
] | |
9aa148058532f77347a3c08339fbfbf260700c69 | f1cb404ea95f4527ffeaf6a7fe8db9a2a1990f12 | /scikits/cuda/misc.py | a09c4889cf06370693f6287df2d88d83901be156 | [
"BSD-3-Clause"
] | permissive | sequoiar/scikits.cuda | c0821502b7dc90d818669e20f2fa1858f1a78e82 | 79b62904a726d45066351c38b4274b1ecc985e47 | refs/heads/master | 2020-12-25T07:39:47.594383 | 2011-03-30T14:39:20 | 2011-03-30T14:39:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,786 | py | #!/usr/bin/env python
"""
General PyCUDA utility functions.
"""
import string
from string import Template
import atexit
import pycuda.driver as drv
import pycuda.gpuarray as gpuarray
from pycuda.compiler import SourceModule
import numpy as np
import cuda
import cublas
import cula
isdoubletype = lambda x : True if x == np.float64 or \
x == np.complex128 else False
isdoubletype.__doc__ = """
Check whether a type has double precision.
Parameters
----------
t : numpy float type
Type to test.
Returns
-------
result : bool
Result.
"""
iscomplextype = lambda x : True if x == np.complex64 or \
x == np.complex128 else False
iscomplextype.__doc__ = """
Check whether a type is complex.
Parameters
----------
t : numpy float type
Type to test.
Returns
-------
result : bool
Result.
"""
def init_device(n=0):
"""
Initialize a GPU device.
Initialize a specified GPU device rather than the default device
found by `pycuda.autoinit`.
Parameters
----------
n : int
Device number.
Returns
-------
dev : pycuda.driver.Device
Initialized device.
"""
drv.init()
dev = drv.Device(n)
return dev
def init_context(dev):
"""
Create a context that will be cleaned up properly.
Create a context on the specified device and register its pop()
method with atexit.
Parameters
----------
dev : pycuda.driver.Device
GPU device.
Returns
-------
ctx : pycuda.driver.Context
Created context.
"""
ctx = dev.make_context()
atexit.register(ctx.pop)
return ctx
def done_context(ctx):
"""
Detach from a context cleanly.
Detach from a context and remove its pop() from atexit.
Parameters
----------
ctx : pycuda.driver.Context
Context from which to detach.
"""
for i in xrange(len(atexit._exithandlers)):
if atexit._exithandlers[i][0] == ctx.pop:
del atexit._exithandlers[i]
break
ctx.detach()
def init():
"""
Initialize libraries used by scikits.cuda.
Notes
-----
This function does not initialize PyCUDA; it uses whatever device
and context were initialized in the current host thread.
"""
# CUBLAS uses whatever device is being used by the host thread:
cublas.cublasInit()
# culaSelectDevice() need not (and, in fact, cannot) be called
# here because the host thread has already been bound to a GPU
# device:
cula.culaInitialize()
def get_compute_capability(dev):
"""
Get the compute capability of the specified device.
Retrieve the compute capability of the specified CUDA device and
return it as a floating point value.
Parameters
----------
d : pycuda.driver.Device
Device object to examine.
Returns
-------
c : float
Compute capability.
"""
return np.float(string.join([str(i) for i in
dev.compute_capability()], '.'))
def get_current_device():
"""
Get the device in use by the current context.
Returns
-------
d : pycuda.driver.Device
Device in use by current context.
"""
return drv.Device(cuda.cudaGetDevice())
def get_dev_attrs(dev):
"""
Get select CUDA device attributes.
Retrieve select attributes of the specified CUDA device that
relate to maximum thread block and grid sizes.
Parameters
----------
d : pycuda.driver.Device
Device object to examine.
Returns
-------
attrs : list
List containing [MAX_THREADS_PER_BLOCK,
(MAX_BLOCK_DIM_X, MAX_BLOCK_DIM_Y, MAX_BLOCK_DIM_Z),
(MAX_GRID_DIM_X, MAX_GRID_DIM_Y)]
"""
attrs = dev.get_attributes()
return [attrs[drv.device_attribute.MAX_THREADS_PER_BLOCK],
(attrs[drv.device_attribute.MAX_BLOCK_DIM_X],
attrs[drv.device_attribute.MAX_BLOCK_DIM_Y],
attrs[drv.device_attribute.MAX_BLOCK_DIM_Z]),
(attrs[drv.device_attribute.MAX_GRID_DIM_X],
attrs[drv.device_attribute.MAX_GRID_DIM_Y])]
def select_block_grid_sizes(dev, data_shape, threads_per_block=None):
"""
Determine CUDA block and grid dimensions given device constraints.
Determine the CUDA block and grid dimensions allowed by a GPU
device that are sufficient for processing every element of an
array in a separate thread.
Parameters
----------
d : pycuda.driver.Device
Device object to be used.
data_shape : tuple
Shape of input data array. Must be of length 2.
threads_per_block : int, optional
Number of threads to execute in each block. If this is None,
the maximum number of threads per block allowed by device `d`
is used.
Returns
-------
block_dim : tuple
X, Y, and Z dimensions of minimal required thread block.
grid_dim : tuple
X and Y dimensions of minimal required block grid.
Notes
-----
Using the scheme in this function, all of the threads in the grid can be enumerated
as `i = blockIdx.y*max_threads_per_block*max_blocks_per_grid+
blockIdx.x*max_threads_per_block+threadIdx.x`.
For 2D shapes, the subscripts of the element `data[a, b]` where `data.shape == (A, B)`
can be computed as
`a = i/B`
`b = mod(i,B)`.
For 3D shapes, the subscripts of the element `data[a, b, c]` where
`data.shape == (A, B, C)` can be computed as
`a = i/(B*C)`
`b = mod(i, B*C)/C`
`c = mod(mod(i, B*C), C)`.
For 4D shapes, the subscripts of the element `data[a, b, c, d]`
where `data.shape == (A, B, C, D)` can be computed as
`a = i/(B*C*D)`
`b = mod(i, B*C*D)/(C*D)`
`c = mod(mod(i, B*C*D)%(C*D))/D`
`d = mod(mod(mod(i, B*C*D)%(C*D)), D)`
It is advisable that the number of threads per block be a multiple
of the warp size to fully utilize a device's computing resources.
"""
# Sanity checks:
if np.isscalar(data_shape):
data_shape = (data_shape,)
# Number of elements to process; we need to cast the result of
# np.prod to a Python int to prevent PyCUDA's kernel execution
# framework from getting confused when
N = int(np.prod(data_shape))
# Get device constraints:
max_threads_per_block, max_block_dim, max_grid_dim = get_dev_attrs(dev)
if threads_per_block != None:
max_threads_per_block = threads_per_block
# Assume that the maximum number of threads per block is no larger
# than the maximum X and Y dimension of a thread block:
assert max_threads_per_block <= max_block_dim[0]
assert max_threads_per_block <= max_block_dim[1]
# Assume that the maximum X and Y dimensions of a grid are the
# same:
max_blocks_per_grid_dim = max(max_grid_dim)
assert max_blocks_per_grid_dim == max_grid_dim[0]
assert max_blocks_per_grid_dim == max_grid_dim[1]
# Actual number of thread blocks needed:
blocks_needed = N/max_threads_per_block+1
if blocks_needed*max_threads_per_block < max_threads_per_block*max_blocks_per_grid_dim:
grid_x = blocks_needed
grid_y = 1
elif blocks_needed*max_threads_per_block < max_threads_per_block*max_blocks_per_grid_dim**2:
grid_x = max_blocks_per_grid_dim
grid_y = blocks_needed/max_blocks_per_grid_dim+1
else:
raise ValueError('array size too large')
return (max_threads_per_block, 1, 1), (grid_x, grid_y)
def ones(shape, dtype, allocator=drv.mem_alloc):
"""
Return an array of the given shape and dtype filled with ones.
Parameters
----------
shape : tuple
Array shape.
dtype : data-type
Data type for the array.
allocator : callable
Returns an object that represents the memory allocated for
the requested array.
Returns
-------
out : pycuda.gpuarray.GPUArray
Array of ones with the given shape and dtype.
"""
out = gpuarray.GPUArray(shape, dtype, allocator)
out.fill(1)
return out
def ones_like(other):
"""
Return an array of ones with the same shape and type as a given array.
Parameters
----------
other : pycuda.gpuarray.GPUArray
Array whose shape and dtype are to be used to allocate a new array.
Returns
-------
out : pycuda.gpuarray.GPUArray
Array of ones with the shape and dtype of `other`.
"""
out = gpuarray.GPUArray(other.shape, other.dtype,
other.allocator)
out.fill(1)
return out
def inf(shape, dtype, allocator=drv.mem_alloc):
"""
Return an array of the given shape and dtype filled with infs.
Parameters
----------
shape : tuple
Array shape.
dtype : data-type
Data type for the array.
allocator : callable
Returns an object that represents the memory allocated for
the requested array.
Returns
-------
out : pycuda.gpuarray.GPUArray
Array of infs with the given shape and dtype.
"""
out = gpuarray.GPUArray(shape, dtype, allocator)
out.fill(np.inf)
return out
maxabs_mod_template = Template("""
#include <pycuda/pycuda-complex.hpp>
#if ${use_double}
#define REAL_TYPE double
#if ${use_complex}
#define TYPE pycuda::complex<double>
#else
#define TYPE double
#endif
#else
#define REAL_TYPE float
#if ${use_complex}
#define TYPE pycuda::complex<float>
#else
#define TYPE float
#endif
#endif
// This kernel is only meant to be run in one thread;
// N must contain the length of x:
__global__ void maxabs(TYPE *x, REAL_TYPE *m, unsigned int N) {
unsigned int idx = threadIdx.x;
REAL_TYPE result, temp;
if (idx == 0) {
result = abs(x[0]);
for (unsigned int i = 1; i < N; i++) {
temp = abs(x[i]);
if (temp > result)
result = temp;
}
m[0] = result;
}
}
""")
def maxabs(x_gpu):
"""
Get maximum absolute value.
Find maximum absolute value in the specified array.
Parameters
----------
x_gpu : pycuda.gpuarray.GPUArray
Input array.
Returns
-------
m_gpu : pycuda.gpuarray.GPUArray
Length 1 array containing the maximum absolute value in
`x_gpu`.
Notes
-----
This function could be made faster by computing the absolute
values of the input array in parallel.
Examples
--------
>>> import pycuda.autoinit
>>> import pycuda.gpuarray as gpuarray
>>> import misc
>>> x_gpu = gpuarray.to_gpu(np.array([-1, 2, -3], np.float32))
>>> m_gpu = misc.maxabs(x_gpu)
>>> np.allclose(m_gpu.get(), 3.0)
True
"""
use_double = int(x_gpu.dtype in [np.float64, np.complex128])
use_complex = int(x_gpu.dtype in [np.complex64, np.complex128])
real_type = np.float64 if use_double else np.float32
# Set this to False when debugging to make sure the compiled kernel is
# not cached:
cache_dir = None
maxabs_mod = \
SourceModule(maxabs_mod_template.substitute(use_double=use_double,
use_complex=use_complex),
cache_dir=cache_dir)
maxabs = maxabs_mod.get_function("maxabs")
m_gpu = gpuarray.empty(1, real_type)
maxabs(x_gpu, m_gpu, np.uint32(x_gpu.size),
block=(1, 1, 1), grid=(1, 1))
return m_gpu
cumsum_template = Template("""
#include <pycuda/pycuda-complex.hpp>
#if ${use_double}
#define REAL_TYPE double
#if ${use_complex}
#define TYPE pycuda::complex<double>
#else
#define TYPE double
#endif
#else
#define REAL_TYPE float
#if ${use_complex}
#define TYPE pycuda::complex<float>
#else
#define TYPE float
#endif
#endif
// This kernel should only be invoked on a single thread:
__global__ void cumsum(TYPE *x, TYPE *c, unsigned int N) {
unsigned int idx = threadIdx.x;
if (idx == 0) {
TYPE sum_curr = 0;
for (unsigned i = 0; i < N; i++) {
sum_curr += x[i];
c[i] = sum_curr;
}
}
}
""")
def cumsum(x_gpu):
"""
Cumulative sum.
Return the cumulative sum of the elements in the specified array.
Parameters
----------
x_gpu : pycuda.gpuarray.GPUArray
Input array.
Returns
-------
c_gpu : pycuda.gpuarray.GPUArray
Output array containing cumulative sum of `x_gpu`.
Notes
-----
This function could be made faster by using a parallel prefix sum.
Examples
--------
>>> import pycuda.autoinit
>>> import pycuda.gpuarray as gpuarray
>>> import misc
>>> x_gpu = gpuarray.to_gpu(np.random.rand(5).astype(np.float32))
>>> c_gpu = misc.cumsum(x_gpu)
>>> np.allclose(c_gpu.get(), np.cumsum(x_gpu.get()))
True
"""
use_double = int(x_gpu.dtype in [np.float64, np.complex128])
use_complex = int(x_gpu.dtype in [np.complex64, np.complex128])
cumsum_mod = \
SourceModule(cumsum_template.substitute(use_double=use_double,
use_complex=use_complex))
cumsum = cumsum_mod.get_function("cumsum")
c_gpu = gpuarray.empty_like(x_gpu)
cumsum(x_gpu, c_gpu, np.uint32(x_gpu.size),
block=(1, 1, 1), grid=(1, 1))
return c_gpu
diff_mod_template = Template("""
#include <pycuda/pycuda-complex.hpp>
#if ${use_double}
#define REAL_TYPE double
#if ${use_complex}
#define TYPE pycuda::complex<double>
#else
#define TYPE double
#endif
#else
#define REAL_TYPE float
#if ${use_complex}
#define TYPE pycuda::complex<float>
#else
#define TYPE float
#endif
#endif
__global__ void diff(TYPE *x, TYPE *y, unsigned int N) {
unsigned int idx = blockIdx.y*blockDim.x*gridDim.x+
blockIdx.x*blockDim.x+threadIdx.x;
if (idx < N-1) {
y[idx] = x[idx+1]-x[idx];
}
}
""")
def diff(x_gpu):
"""
Calculate the discrete difference.
Calculates the first order difference between the successive
entries of a vector.
Parameters
----------
x_gpu : pycuda.gpuarray.GPUArray
Input vector.
Returns
-------
y_gpu : pycuda.gpuarray.GPUArray
Discrete difference.
Examples
--------
>>> import pycuda.driver as drv
>>> import pycuda.gpuarray as gpuarray
>>> import pycuda.autoinit
>>> import numpy as np
>>> import misc
>>> x = np.asarray(np.random.rand(5), np.float32)
>>> x_gpu = gpuarray.to_gpu(x)
>>> y_gpu = misc.diff(x_gpu)
>>> np.allclose(np.diff(x), y_gpu.get())
True
"""
if len(x_gpu.shape) > 1:
raise ValueError('input must be 1D vector')
use_double = int(x_gpu.dtype in [np.float64, np.complex128])
use_complex = int(x_gpu.dtype in [np.complex64, np.complex128])
# Get block/grid sizes:
dev = get_current_device()
block_dim, grid_dim = select_block_grid_sizes(dev, x_gpu.shape)
# Set this to False when debugging to make sure the compiled kernel is
# not cached:
cache_dir=None
diff_mod = \
SourceModule(diff_mod_template.substitute(use_double=use_double,
use_complex=use_complex),
cache_dir=cache_dir)
diff = diff_mod.get_function("diff")
N = x_gpu.size
y_gpu = gpuarray.empty((N-1,), x_gpu.dtype)
diff(x_gpu, y_gpu, np.uint32(N),
block=block_dim,
grid=grid_dim)
return y_gpu
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
"[email protected]"
] | |
d0a9e939c12148512e62d5cb50faeaf448f9dc4a | 489d0c9b861e22dbb781c87c6e509bd2d04e783d | /codes/Feature Extraction/FeatureExtraction(3 features).py | d53977ab65b673c802b375f3f72da2e1d21612b3 | [] | no_license | sameesayeed007/Prediction-of-Epileptic-Seizures-using-SVM-and-DSP | 24af3755b4c2bd4646d05df52f4f306ed2902ab0 | cdd972551f9cea1a90cc957f33ac656a09e48c9f | refs/heads/master | 2022-06-19T00:10:38.822712 | 2020-05-10T16:59:23 | 2020-05-10T16:59:23 | 262,826,669 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,733 | py | import numpy as np
import scipy as sp
import scipy.fftpack
import pandas as pd
from scipy.fftpack import fft, fftfreq, fftshift
import statistics
import scipy.fftpack
thesis_final_files = ['chb01_21Final.csv','chb01_26Final.csv']
iterator = 0
while(iterator < len(thesis_final_files)) :
file_name = thesis_final_files[iterator]
data = pd.read_csv(file_name)
a=[]
d=0
for i in range(23):
Channel = data.iloc[:,i]
num_of_iterations = int((len(Channel)-1)/2560)
# making a list of lists with 10 seconds data
#a=[]
p=0
q= 2560
b=[]
for i in range(num_of_iterations):
c=[]
for j in range(2560):
c.append(Channel[p])
p+=1
b.append(c)
a.append(b)
print(d)
d=d+1
print('**1**')
def angle(a):
#no of points
n=2560
#Time period is 10s
Lx=10
x=np.linspace(0,Lx,n)
#Creating all the necessary frequencies
freqs=fftfreq(n)
#mask array to be used for power spectra
#ignoring half the values, as they are complex conjugates of the other
mask=freqs>0
#FFT values
fft_values=fft(a)
#true theoretical fft values
fft_theo = 2.0*np.abs(fft_values/n)
#FFT shift
fftshift_values = fftshift(fft_values)
#Calculating the angle
out_angle = np.angle(fftshift_values, deg = True)
#print ("output angle in degrees : ", out_angle)
out_angle2=statistics.mean(abs(out_angle))
#print("Mean angle: ")
return out_angle2
#Calculates the energy
def energy(a):
#no of points
n=2560
#Time period is 10s
Lx=10
x=np.linspace(0,Lx,n)
#Creating all the necessary frequencies
freqs=fftfreq(n)
#mask array to be used for power spectra
#ignoring half the values, as they are complex conjugates of the other
mask=freqs>0
#FFT values
fft_values=fft(a)
#true theoretical fft values
fft_theo = 2.0*np.abs(fft_values/n)
#FFT shift
fftshift_values = fftshift(fft_values)
ps = 2.0*(np.abs(fft_values/n)**2)
#Calculating the mean of power spectrum-energy
ps_mean = statistics.mean(ps)
return ps_mean
#Calculates tthe amplitude
def amplitude(a):
#no of points
n=2560
#Time period is 10s
Lx=10
x=np.linspace(0,Lx,n)
#Creating all the necessary frequencies
freqs=fftfreq(n)
#mask array to be used for power spectra
#ignoring half the values, as they are complex conjugates of the other
mask=freqs>0
#FFT values
fft_values=fft(a)
#true theoretical fft values
fft_theo = 2.0*np.abs(fft_values/n)
#FFT shift
fftshift_values = fftshift(fft_values)
amplitudes = 2 / n * np.abs(fft_values)
amplitudes_mean = statistics.mean(amplitudes)
return amplitudes_mean
#Channel=[]
Channel=[] #23
#tenseconds=[]
for m in range(23):
tenseconds=[]
for n in range(540):
features=[]
angle_value=angle(a[m][n])
features.append(angle_value)
energy_value=energy(a[m][n])
features.append(energy_value)
amplitude_value=amplitude(a[m][n])
features.append(amplitude_value)
tenseconds.append(features)
Channel.append(tenseconds)
print('**2**')
w=1
x=[]
df1 = pd.DataFrame()
ind=[]
for j in range(540):
ind.append(w)
w=w+1
df1['index']=ind
C="c"
F='f'
for i in range(23):
for f in range(3):
g=[]
name="C"+str(i+1)+"F"+str(f+1)
for j in range(540):
r=Channel[i][j][f]
g.append(r)
df1[name]=g
cvalue=[]
for i in range(360):
cvalue.append(0)
for j in range(180):
cvalue.append(1)
df1['class']=cvalue
saved_feature_file_name = file_name[0:8] + 'S.csv'
df1.to_csv(saved_feature_file_name,index=False)
print('**3**')
iterator += 1
print('***********************************************')
| [
"[email protected]"
] | |
a65d1a542c88c350b4f72b6ab7ca1593bca262a2 | 07ec5a0b3ba5e70a9e0fb65172ea6b13ef4115b8 | /lib/python3.6/site-packages/tensorflow/contrib/labeled_tensor/python/ops/sugar.py | a80c8809882161ab99456117a0e3bd46222439f1 | [] | no_license | cronos91/ML-exercise | 39c5cd7f94bb90c57450f9a85d40c2f014900ea4 | 3b7afeeb6a7c87384049a9b87cac1fe4c294e415 | refs/heads/master | 2021-05-09T22:02:55.131977 | 2017-12-14T13:50:44 | 2017-12-14T13:50:44 | 118,736,043 | 0 | 0 | null | 2018-01-24T08:30:23 | 2018-01-24T08:30:22 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:bd2f92e1bb5733974c243de95a2275942d5c2567b858a4babde5074896e254c0
size 4821
| [
"[email protected]"
] | |
310d466c5d33bc5941084acae385c83ac0b33b25 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/rna-transcription/fc224d30443944ccb7072df9ef1a4e3b.py | e0bfc8d46549b897dc275dd321beb33d587074e7 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 222 | py | __author__ = 'shandr'
def to_rna(dna):
rna_list = []
dna_rna_map = {'G':'C','C':'G','T':'A','A':'U'}
for letter in dna:
rna_list.append(dna_rna_map[letter])
rna = ''.join(rna_list)
return rna
| [
"[email protected]"
] | |
258f78e6084bfc82db551b44bb0c9ecd4317def7 | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/merra_scripts/01_netCDF_extraction/merra902Combine/514-tideGauge.py | 93ddf87ea463c8e4498d9c7309742f95dda401cf | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,376 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 17 11:28:00 2020
--------------------------------------------
Load predictors for each TG and combine them
--------------------------------------------
@author: Michael Tadesse
"""
import os
import pandas as pd
#define directories
# dir_name = 'F:\\01_erainterim\\01_eraint_predictors\\eraint_D3'
dir_in = "/lustre/fs0/home/mtadesse/merraLocalized"
dir_out = "/lustre/fs0/home/mtadesse/merraAllCombined"
def combine():
os.chdir(dir_in)
#get names
tg_list_name = os.listdir()
x = 514
y = 515
for tg in range(x, y):
os.chdir(dir_in)
tg_name = tg_list_name[tg]
print(tg_name, '\n')
#looping through each TG folder
os.chdir(tg_name)
#check for empty folders
if len(os.listdir()) == 0:
continue
#defining the path for each predictor
where = os.getcwd()
csv_path = {'slp' : os.path.join(where, 'slp.csv'),\
"wnd_u": os.path.join(where, 'wnd_u.csv'),\
'wnd_v' : os.path.join(where, 'wnd_v.csv')}
first = True
for pr in csv_path.keys():
print(tg_name, ' ', pr)
#read predictor
pred = pd.read_csv(csv_path[pr])
#remove unwanted columns
pred.drop(['Unnamed: 0'], axis = 1, inplace=True)
#sort based on date as merra files are scrambled
pred.sort_values(by = 'date', inplace=True)
#give predictor columns a name
pred_col = list(pred.columns)
for pp in range(len(pred_col)):
if pred_col[pp] == 'date':
continue
pred_col[pp] = pr + str(pred_col[pp])
pred.columns = pred_col
#merge all predictors
if first:
pred_combined = pred
first = False
else:
pred_combined = pd.merge(pred_combined, pred, on = 'date')
#saving pred_combined
os.chdir(dir_out)
tg_name = str(tg)+"_"+tg_name;
pred_combined.to_csv('.'.join([tg_name, 'csv']))
os.chdir(dir_in)
print('\n')
#run script
combine()
| [
"[email protected]"
] | |
fe80c401762a5612c00a9b27ab5506e10ff205c4 | 9979e352e8d823dec905395c0a6cc2488643ee01 | /setup.py | 3ba4688cada1e343051c62eb477533290e1adab1 | [
"MIT"
] | permissive | ixc/django-polymorphic-auth | 41f3cf1c99938e307c994937f4fcee7fb697eeea | 690c5e78846b328ca1b60bd0e099fe622d40892d | refs/heads/master | 2021-08-12T05:02:09.894776 | 2021-08-10T08:43:09 | 2021-08-10T08:43:09 | 34,054,086 | 6 | 5 | null | 2017-03-27T11:21:00 | 2015-04-16T12:19:40 | Python | UTF-8 | Python | false | false | 1,542 | py | from __future__ import print_function
import setuptools
import sys
# Convert README.md to reStructuredText.
if {'bdist_wheel', 'sdist'}.intersection(sys.argv):
try:
import pypandoc
except ImportError:
print('WARNING: You should install `pypandoc` to convert `README.md` '
'to reStructuredText to use as long description.',
file=sys.stderr)
else:
print('Converting `README.md` to reStructuredText to use as long '
'description.')
long_description = pypandoc.convert('README.md', 'rst')
setuptools.setup(
name='django-polymorphic-auth',
use_scm_version={'version_scheme': 'post-release'},
author='Interaction Consortium',
author_email='[email protected]',
url='https://github.com/ixc/django-polymorphic-auth',
description='Polymorphic user model with plugins for common options, plus '
'abstract and mixin classes to create your own.',
long_description=locals().get('long_description', ''),
license='MIT',
packages=setuptools.find_packages(),
include_package_data=True,
install_requires=[
'Django',
'django-polymorphic',
],
extras_require={
'dev': [
'ipdb',
'ipython',
],
'test': [
'coverage',
'django-dynamic-fixture',
'django-nose',
'django-webtest',
'nose-progressive',
'WebTest',
],
},
setup_requires=['setuptools_scm'],
)
| [
"[email protected]"
] | |
9e2341741d513855fc4d31aeaaa513f7e4afcc37 | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/fabric/overallhealth1mo.py | ed96ae65a68fe255b301a63c23220117647ca8df | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 11,443 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class OverallHealth1mo(Mo):
"""
A class that represents the most current statistics for overall fabric health in a 1 month sampling interval. This class updates every day.
"""
meta = StatsClassMeta("cobra.model.fabric.OverallHealth1mo", "overall fabric health")
counter = CounterMeta("health", CounterCategory.GAUGE, "score", "health score")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "healthLast"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "healthMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "healthMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "healthAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "healthSpct"
counter._propRefs[PropCategory.IMPLICIT_TOTAL] = "healthTtl"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "healthThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "healthTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "healthTr"
meta._counters.append(counter)
meta.moClassName = "fabricOverallHealth1mo"
meta.rnFormat = "CDfabricOverallHealth1mo"
meta.category = MoCategory.STATS_CURRENT
meta.label = "current overall fabric health stats in 1 month"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = True
meta.parentClasses.add("cobra.model.fabric.Topology")
meta.parentClasses.add("cobra.model.fabric.Pod")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Curr")
meta.superClasses.add("cobra.model.fabric.OverallHealth")
meta.rnPrefixes = [
('CDfabricOverallHealth1mo', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "healthAvg", "healthAvg", 9287, PropCategory.IMPLICIT_AVG)
prop.label = "health score average value"
prop.isOper = True
prop.isStats = True
meta.props.add("healthAvg", prop)
prop = PropMeta("str", "healthLast", "healthLast", 9284, PropCategory.IMPLICIT_LASTREADING)
prop.label = "health score current value"
prop.isOper = True
prop.isStats = True
meta.props.add("healthLast", prop)
prop = PropMeta("str", "healthMax", "healthMax", 9286, PropCategory.IMPLICIT_MAX)
prop.label = "health score maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("healthMax", prop)
prop = PropMeta("str", "healthMin", "healthMin", 9285, PropCategory.IMPLICIT_MIN)
prop.label = "health score minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("healthMin", prop)
prop = PropMeta("str", "healthSpct", "healthSpct", 9288, PropCategory.IMPLICIT_SUSPECT)
prop.label = "health score suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("healthSpct", prop)
prop = PropMeta("str", "healthThr", "healthThr", 9290, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "health score thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("healthThr", prop)
prop = PropMeta("str", "healthTr", "healthTr", 9292, PropCategory.IMPLICIT_TREND)
prop.label = "health score trend"
prop.isOper = True
prop.isStats = True
meta.props.add("healthTr", prop)
prop = PropMeta("str", "healthTrBase", "healthTrBase", 9291, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "health score trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("healthTrBase", prop)
prop = PropMeta("str", "healthTtl", "healthTtl", 9289, PropCategory.IMPLICIT_TOTAL)
prop.label = "health score total sum"
prop.isOper = True
prop.isStats = True
meta.props.add("healthTtl", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
047af3c638c0799e259eb7a0f2cd21a6b047142e | 694d57c3e512ce916269411b51adef23532420cd | /leetcode_review/292nim_game.py | 6340cc3f2e071470d640c21b078b53bb3bfc6b4a | [] | no_license | clovery410/mycode | 5541c3a99962d7949832a0859f18819f118edfba | e12025e754547d18d5bb50a9dbe5e725fd03fd9c | refs/heads/master | 2021-05-16T02:46:47.996748 | 2017-05-10T23:43:50 | 2017-05-10T23:43:50 | 39,235,141 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 697 | py | class Solution(object):
# solution1, recursive solution, but not right
def canWinNim(self, n, cacahe = {}):
if n <= 0:
return False
if n <= 3:
return True
if n in cache:
return cache[n]
res = False
for i in xrange(1, 4):
if not self.canWinNim(n - i):
res = True
break
cache[n] = res
return res
# solution2, use math trick, since if you are fall into 4 stones, you will absolutely lose.. so just check whether the number is a multiple of 4
def canWinNim2(self, n):
if n % 4 == 0:
return False
return True
| [
"[email protected]"
] | |
c53bc07c90f655c8c17449734bdd6286c1ea8898 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /ehyZvt6AJF4rKFfXT_19.py | 4d40bc67e85b8aa828032f74be858e3060b838e7 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 726 | py | """
Someone has attempted to censor my strings by replacing every vowel with a
`*`, `l*k* th*s`. Luckily, I've been able to find the vowels that were
removed.
Given a censored string and a string of the censored vowels, return the
original uncensored string.
### Example
uncensor("Wh*r* d*d my v*w*ls g*?", "eeioeo") ➞ "Where did my vowels go?"
uncensor("abcd", "") ➞ "abcd"
uncensor("*PP*RC*S*", "UEAE") ➞ "UPPERCASE"
### Notes
* The vowels are given in the correct order.
* The number of vowels will match the number of `*` characters in the censored string.
"""
def uncensor(txt, vowels):
for n in range(0,txt.count('*')):
txt = txt.replace('*', vowels[n], 1)
return txt
| [
"[email protected]"
] | |
59a12bfbbecf0027560e12271513c1ee8de92ada | 56b36ddf920b5f43e922cb84e8f420f1ad91a889 | /Leetcode/Leetcode-Longest Common Prefix.py | 9820848a27af5a14772e38c073f2718a3bc6f238 | [] | no_license | chithien0909/Competitive-Programming | 9ede2072e85d696ccf143118b17638bef9fdc07c | 1262024a99b34547a3556c54427b86b243594e3c | refs/heads/master | 2022-07-23T16:47:16.566430 | 2020-05-12T08:44:30 | 2020-05-12T08:44:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 675 | py | class Solution:
def longestCommonPrefix(self, strs: List[str]) -> str:
s = ""
l = len(strs)
if strs == []:
return ""
if l == 1:
return strs[0]
if strs[0] == "": return ""
for j in range(0, min(len(strs[0]), len(strs[1]))):
if strs[0][j] == strs[1][j]:
s += strs[0][j]
else:
break
for i in range(2, l):
if s in strs[i] and strs[i].index(s) == 0: continue
s = s[:-1]
while s not in strs[i] or strs[i].index(s) != 0:
if s == "" : return ""
s = s[:-1]
return s | [
"[email protected]"
] | |
32c08699894dee750fa8a1522197162dd670a0ab | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/ActivityDiscountVoucher.py | a78120116e8b61444048e09435bd6c059aa4c491 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 3,004 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class ActivityDiscountVoucher(object):
def __init__(self):
self._ceiling_amount = None
self._discount = None
self._floor_amount = None
self._goods_name = None
self._origin_amount = None
@property
def ceiling_amount(self):
return self._ceiling_amount
@ceiling_amount.setter
def ceiling_amount(self, value):
self._ceiling_amount = value
@property
def discount(self):
return self._discount
@discount.setter
def discount(self, value):
self._discount = value
@property
def floor_amount(self):
return self._floor_amount
@floor_amount.setter
def floor_amount(self, value):
self._floor_amount = value
@property
def goods_name(self):
return self._goods_name
@goods_name.setter
def goods_name(self, value):
self._goods_name = value
@property
def origin_amount(self):
return self._origin_amount
@origin_amount.setter
def origin_amount(self, value):
self._origin_amount = value
def to_alipay_dict(self):
params = dict()
if self.ceiling_amount:
if hasattr(self.ceiling_amount, 'to_alipay_dict'):
params['ceiling_amount'] = self.ceiling_amount.to_alipay_dict()
else:
params['ceiling_amount'] = self.ceiling_amount
if self.discount:
if hasattr(self.discount, 'to_alipay_dict'):
params['discount'] = self.discount.to_alipay_dict()
else:
params['discount'] = self.discount
if self.floor_amount:
if hasattr(self.floor_amount, 'to_alipay_dict'):
params['floor_amount'] = self.floor_amount.to_alipay_dict()
else:
params['floor_amount'] = self.floor_amount
if self.goods_name:
if hasattr(self.goods_name, 'to_alipay_dict'):
params['goods_name'] = self.goods_name.to_alipay_dict()
else:
params['goods_name'] = self.goods_name
if self.origin_amount:
if hasattr(self.origin_amount, 'to_alipay_dict'):
params['origin_amount'] = self.origin_amount.to_alipay_dict()
else:
params['origin_amount'] = self.origin_amount
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ActivityDiscountVoucher()
if 'ceiling_amount' in d:
o.ceiling_amount = d['ceiling_amount']
if 'discount' in d:
o.discount = d['discount']
if 'floor_amount' in d:
o.floor_amount = d['floor_amount']
if 'goods_name' in d:
o.goods_name = d['goods_name']
if 'origin_amount' in d:
o.origin_amount = d['origin_amount']
return o
| [
"[email protected]"
] | |
4b99b6d1673da8a70f3aa73976aa21a849dabad7 | b637e53b36ad083575b161eaa8371f0cc11981a2 | /apps/provincia/views.py | 55a1aa707bfbd27a24ced0b9c850be99985ce008 | [] | no_license | cienciometrico2017/cienciometrico2018v2.0 | d7d014f858296aa262649696a4d3bfceb0b9afec | 22e8800c921e8c4890c4f52c9826532364a99a68 | refs/heads/master | 2020-03-20T22:04:26.710351 | 2018-07-26T04:28:26 | 2018-07-26T04:28:26 | 137,777,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,289 | py | from django.shortcuts import render
from django.core.urlresolvers import reverse_lazy
from apps.provincia.form import ProvinciaForm
from apps.provincia.models import provincia
from django.views.generic import ListView, CreateView,UpdateView,DeleteView
from apps.Investigador.models import Investigador
from apps.roles.models import Rol
from apps.pais.models import pais
from apps.zona.models import zona
# Create your views here.
class ProvinciaList(ListView):
model = provincia
template_name = 'provincia/provincia_listar.html'
paginate_by = 6
def get_context_data(self, **kwargs):
context = super(ProvinciaList, self).get_context_data(**kwargs)
usuario = self.request.user.id
perfil = Investigador.objects.get(user_id=usuario)
roles = perfil.roles.all()
privi = []
privilegios = []
privilegio= []
for r in roles:
privi.append(r.id)
for p in privi:
roles5 = Rol.objects.get(pk=p)
priv = roles5.privilegios.all()
for pr in priv:
privilegios.append(pr.codename)
for i in privilegios:
if i not in privilegio:
privilegio.append(i)
context['usuario'] = privilegio
return context
class ProvinciaCreate(CreateView):
model = provincia
form_class = ProvinciaForm
template_name = 'provincia/provincia_crear.html'
success_url = reverse_lazy('provincia:provincia_listar')
def get_context_data(self, **kwargs):
context = super(ProvinciaCreate, self).get_context_data(**kwargs)
Pais = pais.objects.all()
Zona = zona.objects.all()
usuario = self.request.user.id
perfil = Investigador.objects.get(user_id=usuario)
roles = perfil.roles.all()
privi = []
privilegios = []
privilegio= []
for r in roles:
privi.append(r.id)
for p in privi:
roles5 = Rol.objects.get(pk=p)
priv = roles5.privilegios.all()
for pr in priv:
privilegios.append(pr.codename)
for i in privilegios:
if i not in privilegio:
privilegio.append(i)
context['usuario'] = privilegio
context['Pais'] = Pais
context['Zona'] = Zona
return context
class ProvinciaUpdate(UpdateView):
model = provincia
form_class = ProvinciaForm
template_name = 'provincia/provincia_update.html'
success_url = reverse_lazy('provincia:provincia_listar')
def get_context_data(self, **kwargs):
context = super(ProvinciaUpdate, self).get_context_data(**kwargs)
Pais = pais.objects.all()
Zona = zona.objects.all()
usuario = self.request.user.id
perfil = Investigador.objects.get(user_id=usuario)
roles = perfil.roles.all()
privi = []
privilegios = []
privilegio= []
for r in roles:
privi.append(r.id)
for p in privi:
roles5 = Rol.objects.get(pk=p)
priv = roles5.privilegios.all()
for pr in priv:
privilegios.append(pr.codename)
for i in privilegios:
if i not in privilegio:
privilegio.append(i)
context['usuario'] = privilegio
context['Pais'] = Pais
context['Zona'] = Zona
return context
class ProvinciaDelete(DeleteView):
model = provincia
template_name = 'provincia/provincia_delete.html'
success_url = reverse_lazy('provincia:provincia_listar')
def get_context_data(self, **kwargs):
context = super(ProvinciaDelete, self).get_context_data(**kwargs)
usuario = self.request.user.id
perfil = Investigador.objects.get(user_id=usuario)
roles = perfil.roles.all()
privi = []
privilegios = []
privilegio= []
for r in roles:
privi.append(r.id)
for p in privi:
roles5 = Rol.objects.get(pk=p)
priv = roles5.privilegios.all()
for pr in priv:
privilegios.append(pr.codename)
for i in privilegios:
if i not in privilegio:
privilegio.append(i)
context['usuario'] = privilegio
return context
| [
"[email protected]"
] | |
7f15ab1953b900545b0e54e272e970176f2d68b9 | 652a173173380629e92e8b4f85b5ded0fdf2e4bf | /venv/bin/sqlformat | 528a9371f9fe88d746bd7dced7c4272e82e4113c | [] | no_license | Jethet/udemycourse-producthunt-project | d28908162a64880ae761a0160905fe32e8157f12 | 8d1564efa2335817ad0d05c649447e290a7786e8 | refs/heads/master | 2023-04-04T05:42:00.962786 | 2021-04-21T15:03:53 | 2021-04-21T15:03:53 | 197,566,411 | 0 | 0 | null | 2021-04-20T18:23:50 | 2019-07-18T10:32:51 | Python | UTF-8 | Python | false | false | 271 | #!/Users/henriettehettinga/GitHub/producthunt_project/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from sqlparse.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
75cf96bd5078bb078561c71d9d602acf4f5bcce3 | 9af2312a1ea1abe1a9641b7ee578eb93828f8131 | /TinySpider/01-bs4_test.py | 8b19f2c2c105a2febc2a9d94afaaef28babf9d33 | [] | no_license | Huangyan0804/Python | 593df64fffe44822d38b3cab6f5ee7999802b8a9 | 6adcc342b658afcf805004b868ac0976b0fabed6 | refs/heads/master | 2020-06-04T13:27:56.235753 | 2020-02-20T15:13:31 | 2020-02-20T15:13:31 | 145,232,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py |
#print(a, b, c, d, e, f, g)
def work(x, a, b):
if x == 0:
return a + b
else:
return work(x - 1, a * 2, a)
t = int(input())
for i in range(t):
n = int(input())
ans = work(n, int(1), int(0))
print(ans)
| [
"[email protected]"
] | |
6343453a9fd07a76c848dbceb689b069e62f8cd2 | aaa204ad7f134b526593c785eaa739bff9fc4d2a | /tests/system/providers/airbyte/example_airbyte_trigger_job.py | c65df48a42ac826d5e6952344641a704118f592b | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | cfei18/incubator-airflow | 913b40efa3d9f1fdfc5e299ce2693492c9a92dd4 | ffb2078eb5546420864229cdc6ee361f89cab7bd | refs/heads/master | 2022-09-28T14:44:04.250367 | 2022-09-19T16:50:23 | 2022-09-19T16:50:23 | 88,665,367 | 0 | 1 | Apache-2.0 | 2021-02-05T16:29:42 | 2017-04-18T20:00:03 | Python | UTF-8 | Python | false | false | 2,393 | py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example DAG demonstrating the usage of the AirbyteTriggerSyncOperator."""
from __future__ import annotations
import os
from datetime import datetime, timedelta
from airflow import DAG
from airflow.providers.airbyte.operators.airbyte import AirbyteTriggerSyncOperator
from airflow.providers.airbyte.sensors.airbyte import AirbyteJobSensor
ENV_ID = os.environ.get("SYSTEM_TESTS_ENV_ID")
DAG_ID = "example_airbyte_operator"
CONN_ID = '15bc3800-82e4-48c3-a32d-620661273f28'
with DAG(
dag_id=DAG_ID,
schedule=None,
start_date=datetime(2021, 1, 1),
dagrun_timeout=timedelta(minutes=60),
tags=['example'],
catchup=False,
) as dag:
# [START howto_operator_airbyte_synchronous]
sync_source_destination = AirbyteTriggerSyncOperator(
task_id='airbyte_sync_source_dest_example',
connection_id=CONN_ID,
)
# [END howto_operator_airbyte_synchronous]
# [START howto_operator_airbyte_asynchronous]
async_source_destination = AirbyteTriggerSyncOperator(
task_id='airbyte_async_source_dest_example',
connection_id=CONN_ID,
asynchronous=True,
)
airbyte_sensor = AirbyteJobSensor(
task_id='airbyte_sensor_source_dest_example',
airbyte_job_id=async_source_destination.output,
)
# [END howto_operator_airbyte_asynchronous]
# Task dependency created via `XComArgs`:
# async_source_destination >> airbyte_sensor
from tests.system.utils import get_test_run # noqa: E402
# Needed to run the example DAG with pytest (see: tests/system/README.md#run_via_pytest)
test_run = get_test_run(dag)
| [
"[email protected]"
] | |
9255ed7704219f5c9d9b65a22bcc4967c6e1f444 | 196f7e3238f961fb5eba7a794f0b0c75d7c30ba1 | /Python编程从入门到实践3.6/c14/test14/ship.py | 5b286547c1a1424e828a4a64df2276be4b6bc83b | [] | no_license | Liaoyingjie/Pythonlearn | d0b1b95110017af7e063813660e52c61a6333575 | 8bca069f38a60719acac5aa39bd347f90ab0bfb1 | refs/heads/master | 2020-04-08T07:35:07.357487 | 2018-04-12T16:44:43 | 2018-04-12T16:44:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,525 | py | import pygame
from pygame.sprite import Sprite
class Ship(Sprite):
#1264加入ai_settings 来控制加入的 速度
def __init__(self,ai_settings,screen):
"""初始化飞船且设置启动初始位置"""
super(Ship,self).__init__()
self.screen = screen
self.ai_settings = ai_settings
# 加载飞船且获取其外接矩形
self.image = pygame.image.load('../images/ship.bmp')
self.rect = self.image.get_rect()
self.screen_rect =self.screen.get_rect()
#放每个船在中间 和最底部
#如果这2条 都注释 就会直接到最左上角 为原点 0 0
self.rect.centerx = self.screen_rect.centerx
self.rect.bottom = self.screen_rect.bottom
#Ship类的方法 接2个参数 SCREEN决定 飞船飞到什么地方
#加载pygame.image.load 得到这个飞船的surface 存储到image
#image.get_rect得到了相应元素的rect对象
#rect 对象可以 上下左右来定位 bottom center
#在pygame 原点在屏幕左上角, 右下角坐标值 慢慢增大 1200*800中 右下角坐标(1200,800)
#在飞机 属性 center 中存储最小值 rect.centerx只能 存储 整数值
self.center = float(self.rect.centerx)
#移动标志
#加入 2个 是左右的移动
self.moving_right = False
self.moving_left = False
def update(self):
"""根据移动标志 调整飞船的位置"""
"""按住右键 不放 就一直 向右进行移动"""
#不是一直 按住 就是 按一下走 一下
"""设置距离限制 超过范围不动了"""
# Update the ship's center value, not the rect.
if self.moving_right and self.rect.right < self.screen_rect.right:
self.center += self.ai_settings.ship_speed_factor
if self.moving_left and self.rect.left > 0:
self.center -= self.ai_settings.ship_speed_factor
# Update rect object from self.center.
self.rect.centerx = self.center
#更新rect对象 更新到最新的值 速度
self.rect.centerx = self.center
def blitme(self):
"""在指定位置绘制船"""
self.screen.blit(self.image, self.rect)
def center_ship(self):
#在屏幕上 飞机上居中
self.center = self.screen_rect.centerx | [
"[email protected]"
] | |
291e75936bd0c64c644451a8afeba47c13a350cb | 6de40caa30577bdf7cc8d788781fd2622588cf1d | /w4/examples/shelter-demo/shelter/urls.py | 7b7d8917340a2492154f386c1929a45e63b629cc | [] | no_license | momentum-cohort-2019-02/kb | ca42d4ff61a4cf08efb89bf0502788bd0eb7b648 | ad8318712349600f6ab13c2a0a92a65b3ae04677 | refs/heads/master | 2020-04-21T13:05:52.246012 | 2019-04-17T14:17:39 | 2019-04-17T14:17:42 | 169,587,008 | 4 | 13 | null | null | null | null | UTF-8 | Python | false | false | 982 | py | """shelter URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls.static import static
from django.conf import settings
from core import views as core_views
urlpatterns = [
path('admin/', admin.site.urls),
path('', core_views.index_view, name="index"),
] + static(
settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"[email protected]"
] | |
0ed744a294fa836def27d2717433779dc3f9dd53 | 0ad2458c85ce545b1d3a4b75dabe2c94ed8c2518 | /pipeline/0x02-databases/32-update_topics.py | f0deec04f31ea17769b5ba4aaa8d63a85baf3b7f | [] | no_license | felipeserna/holbertonschool-machine_learning | fc82eda9ee4cb8765ad0ffb5fa923407b200480d | 161e33b23d398d7d01ad0d7740b78dda3f27e787 | refs/heads/master | 2023-07-06T20:26:12.892875 | 2021-08-17T17:03:30 | 2021-08-17T17:03:30 | 317,288,341 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | #!/usr/bin/env python3
"""
Changes all topics of a school document based on the name
"""
def update_topics(mongo_collection, name, topics):
"""
Return: Nothing
"""
new_topics = {"$set": {"topics": topics}}
mongo_collection.update_many({"name": name}, new_topics)
| [
"[email protected]"
] | |
cf1c2910de8bd2db6d9e04af294d65f29c8da0d7 | 58d3a6720cd6ecc420db58de002f776f717ae77f | /Array/SplitAndAppend.py | a2d6da570b537d9edcc9c166b8d9888f4c379d5b | [] | no_license | Jaydeep-07/Python-Practice | 786a4763caf0c7e9b1cf7ac6773c0ba3ccbc0d12 | 1aec7648fa47c7cd325d3f0f466ba440a3ec1808 | refs/heads/master | 2020-12-29T15:53:55.954878 | 2020-05-09T09:34:10 | 2020-05-09T09:34:10 | 238,659,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | # Python program to split array and move first
# part to end.
def splitArr(arr, n, k):
for i in range(0, k):
x = arr[0]
for j in range(0, n-1):
arr[j] = arr[j + 1]
arr[n-1] = x
# main
arr = [12, 10, 5, 6, 52, 36]
n = len(arr)
position = 2
splitArr(arr, n, position)
for i in range(0, n):
print(arr[i], end = ' ')
| [
"[email protected]"
] | |
b23610d4ed2d962655cbb3f9d94c100c44194741 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /2t6NvMe27HtSmqC4F_16.py | 72db51f2e725cbf6c80b069f070630cf0775452d | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,275 | py | """
Write three functions:
1. boolean_and
2. boolean_or
3. boolean_xor
These functions should evaluate a list of `True` and `False` values, starting
from the leftmost element and evaluating pairwise.
### Examples
boolean_and([True, True, False, True]) ➞ False
# [True, True, False, True] => [True, False, True] => [False, True] => False
boolean_or([True, True, False, False]) ➞ True
# [True, True, False, True] => [True, False, False] => [True, False] => True
boolean_xor([True, True, False, False]) ➞ False
# [True, True, False, False] => [False, False, False] => [False, False] => False
### Notes
* `XOR` is the same as `OR`, except that it excludes `[True, True]`.
* Each time you evaluate an element at 0 and at 1, you collapse it into the single result.
"""
def boolean_and(lst):
if False in lst:
return False
else:
return True
def boolean_or(lst):
if True in lst:
return True
else:
return False
def boolean_xor(lst):
ret=lst[0]
for x in range(1,len(lst)):
if lst[x]==True:
if ret==True:
ret=False
else:
ret=True
elif lst[x]==False:
if ret==False:
ret=False
else:
ret=True
else:
ret=True
return ret
| [
"[email protected]"
] | |
c94460f1f5fd13eedaf09959bd0a26cad54b92a0 | b3a77d07f852bc865524120d9b3c40c691aa62b5 | /slguerbetal/spiders/slguerbetal.py | 99c0aa2792feb87f8a55d47a41a5df80777e0bb2 | [] | no_license | daniel-kanchev/slguerbetal | 0a6eb2efb5701a1f86556b75ae833dc90b4cc653 | 2984ce5983c92b809cdb173bae14ef0b179078a8 | refs/heads/main | 2023-03-11T19:32:12.199529 | 2021-03-01T09:35:41 | 2021-03-01T09:35:41 | 343,361,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 876 | py | import scrapy
from scrapy.loader import ItemLoader
from itemloaders.processors import TakeFirst
from datetime import datetime
from slguerbetal.items import Article
class SlguerbetalSpider(scrapy.Spider):
name = 'slguerbetal'
start_urls = ['https://www.slguerbetal.ch/de/']
def parse(self, response):
articles = response.xpath('//article')
for article in articles:
item = ItemLoader(Article())
item.default_output_processor = TakeFirst()
title = article.xpath('./h2//text()').get()
content = article.xpath('./div[@class="long-text"]//text()').getall()
content = [text for text in content if text.strip()]
content = "\n".join(content).strip()
item.add_value('title', title)
item.add_value('content', content)
yield item.load_item()
| [
"[email protected]"
] | |
5b8ae701bdf1cbe048386291f6f3f6a2a33c0e76 | 3d54d60973f88e0ed1f6be1a03ca6c5fbd0d3244 | /examples/dfp/v201403/label_service/get_all_labels.py | 047731e06eb7e60616f2b8b70a2fb40be4e3721e | [
"Apache-2.0"
] | permissive | zyqkenmy/googleads-python-lib | 52a4f9ef9eef0da9410c9c90322186bb7a8e408f | fb7d3c2c7c42cc1fc27a3d2bf97382e25f6a05c2 | refs/heads/master | 2020-02-26T13:55:31.186467 | 2014-03-13T20:51:33 | 2014-03-13T20:51:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,711 | py | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all labels.
To create labels, run create_labels.py. This feature is only available to DFP
premium solution networks."""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate classes from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
label_service = client.GetService('LabelService', version='v201403')
# Create statement to get all labels
statement = dfp.FilterStatement()
# Get labels by statement.
while True:
response = label_service.getLabelsByStatement(statement.ToStatement())
if 'results' in response:
# Display results.
for label in response['results']:
print ('Label with id \'%s\' and name \'%s\' was found.'
% (label['id'], label['name']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| [
"[email protected]"
] | |
1c7b100b3994b57253d8a6156e045b977b28db37 | 25160eef36f911322c3b24c3e587d66c54cdf6a8 | /iconset.py | 934495bad32bac2b01f3fb5a8aa2e33f889fcb48 | [] | no_license | asmikush111/september | 7c8dd200cded9616e10f1e93b6aebe82b07e2196 | 8f058d10f5781b420d1a78df10c30a5eea01bedd | refs/heads/master | 2020-09-05T15:28:56.310270 | 2019-11-05T09:56:09 | 2019-11-05T09:56:09 | 220,144,047 | 1 | 0 | null | 2019-11-07T03:31:06 | 2019-11-07T03:31:06 | null | UTF-8 | Python | false | false | 167 | py | from tkinter import *
root=Tk()
root.title("My Notepad") #title name
root.wm_iconbitmap("notepad.ico") #to add icon
mainloop() | [
"[email protected]"
] | |
d6370ed3821e8c3d3368fa8e7d799b6439b64044 | 65113128c2bd0bfe05db1c75776cee7a22ea98fd | /deadcode/firstmodel.py | 583cbfe07d4db227a5311b3463e3df2471a52c7d | [] | no_license | taygrave/salad_tool | 32a539f2fba3e9478d1277d5ec5d763e2defc4ce | fa01b3ac74ea41a21e7256552f3a1e4c1c6eebb0 | refs/heads/master | 2021-01-13T14:04:46.272323 | 2015-02-18T06:16:46 | 2015-02-18T06:16:46 | 30,062,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,385 | py | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy import Column, Integer, String, DateTime, ForeignKey
from sqlalchemy.orm import sessionmaker, scoped_session, relationship, backref
import sqlite3
import csv
Base = declarative_base()
ENGINE = create_engine("sqlite:///saladtool.db", echo=True)
Session = sessionmaker(bind=ENGINE)
session = Session()
#process the file and build the necessary dictionaries
#searching: I live in X state, the time frame is X, I want X# of X types of food.
CONN = sqlite3.connect("saladtool.db")
CURSOR = CONN.cursor()
#Use this for one time adds to db of all foods once scraped new data from web: http://www.sustainabletable.org/seasonalguide/seasonalfoodguide.php
def add_to_db(sfile):
"""Adds new data file to database in the Master table"""
#sfile = "db.txt" for current data
#making connection with SQL database
query = """INSERT INTO Master (name, type, season, state) VALUES (?,?,?,?)"""
#data file must be text with four columns, for name, type, season, and state
for line in sfile:
my_list = line.strip().split(",")
vname, vtype, vseason, vstate = my_list
CURSOR.execute(query, (vname, vtype, vseason, vstate))
CONN.commit()
print "Successfully added %s to Master table in saladtool.db" %sfile
#Already used for a one-time add to db for list of states
def states_to_db():
"""Adds new data file to database in the States table"""
#making connection with SQL database
query = """INSERT INTO States (abbrv, state) VALUES (?,?)"""
with open("states.csv", 'rb') as src_file:
reader = csv.reader(src_file)
for line in reader:
state, abbrv = line
CURSOR.execute(query, (abbrv, state))
CONN.commit()
print "Successfully added states to States table in saladtool.db"
#Q: Did i really have to create a class? cant I return these values better?? Was getting an error if i just returned the result of the following function w/o making a whole class and everything
class State(object):
"""A wrapper that corresponds to rows in the States table"""
def __init__(self, abbrv, state):
self.abbrv = abbrv
self.state = state
def __repr__(self):
return "<State: %s, %s>" %(self.abbrv, self.state)
class Food(Base):
| [
"[email protected]"
] | |
afa158c4929e3a0b8fa19183e2b7492a385cb1dd | 25491d1b0b69911885b2ad00e9c1ef880f946ae1 | /env/render.py | 211bbb52bc24adfd441c4fd98670392d65e62329 | [
"MIT"
] | permissive | hzheng40/distributed_es | 106fe87ad2918f77c8e1803117bd2208835b520f | 5f447eb3fd1159c0754dfe14e92640df75a9cde7 | refs/heads/master | 2022-06-05T22:23:43.395821 | 2020-05-03T19:38:12 | 2020-05-03T19:38:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,084 | py | import time
from argparse import Namespace
from pathlib import Path
from typing import Optional
import click
from badger_utils.sacred import SacredReader
from utils.eval import simulate, build_policy_env
from utils.sacred_local import get_sacred_storage
@click.command()
@click.argument('exp_id', type=int)
@click.option('--gen', default=None, help='Generation to be deserialized (the last one by default)')
@click.option('--sleep', default=0.001, help='Sleep time between the time steps')
@click.option('--num_episodes', default=None, type=int, help='Override num_episodes parameter?')
@click.option('--max_ep_length', default=None, type=int, help='Override the max_episode_length?')
def render(exp_id: int,
gen: int,
sleep: float,
num_episodes: Optional[int],
max_ep_length: Optional[int]):
"""Download a given config and policy from the sacred, run the inference"""
# parse arguments, init the reader
reader = SacredReader(exp_id, get_sacred_storage(), data_dir=Path.cwd())
# obtain the config
config = Namespace(**reader.config)
num_episodes = num_episodes if num_episodes is not None else config.num_episodes
max_ep_length = max_ep_length if max_ep_length is not None else config.max_ep_length
env_seed = config.env_seed if config.env_seed is not None else -1
policy, env = build_policy_env(config, env_seed)
# deserialize the model parameters
if gen is None:
gen = reader.find_last_epoch()
print(f'Deserialization from the epoch: {gen}')
time.sleep(2)
policy.load(reader=reader, epoch=gen)
fitness, num_steps_used = simulate(env=env,
policy=policy,
num_episodes=num_episodes,
max_ep_length=max_ep_length,
render=True,
sleep_render=sleep)
print(f'\n\n Done, fitness is: {fitness}, num_steps: {num_steps_used}\n\n')
if __name__ == '__main__':
render()
| [
"[email protected]"
] | |
b9b442f95e158d2656877533a5b3a77333c6ed88 | 48bab55feaa39d8de6f05adc4dda42eab49906c6 | /06 Condições (Parte 2)/ex041.py | 01666f2bf170e31ea93347c9866da9dd9e9ae133 | [] | no_license | bmsrangel/Python_CeV | 1850e7aa23892a78ba88e566862c75772fa5dcb9 | f08e28b2f0dd444a2963e97f3ac2a024a6c5cd6f | refs/heads/master | 2022-12-21T01:10:13.950249 | 2018-11-23T21:08:12 | 2018-11-23T21:08:12 | 150,914,095 | 0 | 1 | null | 2022-12-18T15:04:56 | 2018-09-30T00:32:08 | Python | UTF-8 | Python | false | false | 396 | py | from datetime import date
ano = int(input('Informe seu ano de nascimento: '))
atual = date.today().year
idade = atual - ano
print('O atleta tem {} anos'.format(idade))
if idade <=9:
print('Categoria MIRIM')
elif idade <= 14:
print('Categoria INFANTIL')
elif idade <= 19:
print('Categoria JÚNIOR')
elif idade <= 25:
print('Categoria SÊNIOR')
else:
print('Categoria MASTER') | [
"[email protected]"
] | |
da5d4a1feffa55183c27b747c21bfbf5719f647c | 39a1d46fdf2acb22759774a027a09aa9d10103ba | /model-optimizer/extensions/front/HSigmoid_fusion.py | a898308d3a18042615ded67d8b969533938ff36e | [
"Apache-2.0"
] | permissive | mashoujiang/openvino | 32c9c325ffe44f93a15e87305affd6099d40f3bc | bc3642538190a622265560be6d88096a18d8a842 | refs/heads/master | 2023-07-28T19:39:36.803623 | 2021-07-16T15:55:05 | 2021-07-16T15:55:05 | 355,786,209 | 1 | 3 | Apache-2.0 | 2021-06-30T01:32:47 | 2021-04-08T06:22:16 | C++ | UTF-8 | Python | false | false | 7,183 | py | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from extensions.front.AttributedClampNormalizer import AttributedClampNormalizer
from extensions.ops.activation_ops import HSigmoid
from mo.front.common.replacement import FrontReplacementSubgraph
from mo.front.subgraph_matcher import SubgraphMatch
from mo.graph.graph import Graph, rename_nodes
from mo.middle.pattern_match import check_value
from mo.utils.graph import Node
def replace_with_hsigmoid(graph: Graph, first_node: Node, last_node: Node):
# determine the input port of first and last nodes which gets the 'input' node output
add_input_port_idx = int(first_node.in_port(0).get_connection().get_source().node.soft_get('op') == 'Const')
last_node_name = last_node.soft_get('name', last_node.id)
hsigmoid = HSigmoid(graph, {}).create_node()
hsigmoid.in_port(0).connect(first_node.in_port(add_input_port_idx).get_source())
last_node.out_port(0).get_connection().set_source(hsigmoid.out_port(0))
rename_nodes([(last_node, last_node_name + '/TBR'), (hsigmoid, last_node_name)])
class HSigmoidWithClamp(FrontReplacementSubgraph):
"""
The transformation looks for the pattern with ReLU6 (Clamp) defining the HSigmoid function:
HSigmoid(x) = Relu6(x + 3.0) / 6.0.
"""
enabled = True
def run_after(self):
return [AttributedClampNormalizer]
def pattern(self):
return dict(
nodes=[
('input', dict()),
('add', dict(op='Add')),
('const_0', dict(op='Const', value=lambda v: check_value(v, lambda x: np.allclose(x, 0.0, atol=1e-6)))),
('const_3', dict(op='Const', value=lambda v: check_value(v, lambda x: np.allclose(x, 3.0, atol=1e-6)))),
('const_6', dict(op='Const', value=lambda v: check_value(v, lambda x: np.allclose(x, 6.0, atol=1e-6)))),
('const_1_6',
dict(op='Const', value=lambda v: check_value(v, lambda x: np.allclose(x, 1.0 / 6.0, atol=1e-6)))),
('clamp', dict(op='Clamp')),
('mul_2', dict(op='Mul')),
],
edges=[
('input', 'add', {}),
('const_3', 'add', {}),
('add', 'clamp', {'in': 0}),
('const_0', 'clamp', {'in': 1}),
('const_6', 'clamp', {'in': 2}),
('clamp', 'mul_2', {}),
('const_1_6', 'mul_2', {}),
])
def replace_sub_graph(self, graph: Graph, match: [dict, SubgraphMatch]):
replace_with_hsigmoid(graph, match['add'], match['mul_2'])
class HSigmoidWithMinMax(FrontReplacementSubgraph):
"""
The transformation looks for the pattern with Min/Max defining the HSigmoid function:
HSigmoid(x) = Min(Max(x + 3.0, 0), 6.0) / 6.0.
"""
enabled = True
def run_after(self):
return [AttributedClampNormalizer]
def pattern(self):
return dict(
nodes=[
('input', dict()),
('add', dict(op='Add')),
('const_0', dict(op='Const', value=lambda v: check_value(v, lambda x: np.allclose(x, 0.0, atol=1e-6)))),
('const_3', dict(op='Const', value=lambda v: check_value(v, lambda x: np.allclose(x, 3.0, atol=1e-6)))),
('const_6', dict(op='Const', value=lambda v: check_value(v, lambda x: np.allclose(x, 6.0, atol=1e-6)))),
('const_1_6',
dict(op='Const', value=lambda v: check_value(v, lambda x: np.allclose(x, 1.0 / 6.0, atol=1e-6)))),
('max', dict(op='Maximum')),
('min', dict(op='Minimum')),
('mul_2', dict(op='Mul')),
],
edges=[
('input', 'add', {'out': 0}),
('const_3', 'add', {}),
('add', 'max', {}),
('const_0', 'max', {}),
('max', 'min', {}),
('const_6', 'min', {}),
('min', 'mul_2', {}),
('const_1_6', 'mul_2', {}),
])
def replace_sub_graph(self, graph: Graph, match: [dict, SubgraphMatch]):
replace_with_hsigmoid(graph, match['add'], match['mul_2'])
class HSigmoidWithReluDiv(FrontReplacementSubgraph):
"""
The transformation looks for the pattern with Relu/Div defining the HSigmoid function:
HSigmoid(x) = Min(Relu(x + 3.0), 6.0) / 6.0
"""
enabled = True
def run_after(self):
return [AttributedClampNormalizer]
def pattern(self):
return dict(
nodes=[
('input', dict()),
('add_const',
dict(op='Const', value=lambda v: check_value(v, lambda x: np.allclose(x, 3.0, atol=1e-6)))),
('add', dict(op='Add')),
('relu', dict(op='ReLU')),
('min_const',
dict(op='Const', value=lambda v: check_value(v, lambda x: np.allclose(x, 6.0, atol=1e-6)))),
('min', dict(op='Minimum')),
('div_const',
dict(op='Const', value=lambda v: check_value(v, lambda x: np.allclose(x, 6.0, atol=1e-6)))),
('div', dict(op='Div')),
],
edges=[
('input', 'add', {'out': 0}),
('add_const', 'add', {}),
('add', 'relu', {}),
('relu', 'min', {}),
('min_const', 'min', {}),
('min', 'div', {}),
('div_const', 'div', {}),
])
def replace_sub_graph(self, graph: Graph, match: [dict, SubgraphMatch]):
replace_with_hsigmoid(graph, match['add'], match['div'])
class HSigmoidWithReluMul(FrontReplacementSubgraph):
"""
The transformation looks for the pattern with Relu/Mul defining the HSigmoid function:
HSigmoid(x) = Min(Relu(x + 3.0), 6.0) * 1.0/6.0
"""
enabled = True
def run_after(self):
return [AttributedClampNormalizer]
def pattern(self):
return dict(
nodes=[
('input', dict()),
('add_const',
dict(op='Const', value=lambda v: check_value(v, lambda x: np.allclose(x, 3.0, atol=1e-6)))),
('add', dict(op='Add')),
('relu', dict(op='ReLU')),
('min_const',
dict(op='Const', value=lambda v: check_value(v, lambda x: np.allclose(x, 6.0, atol=1e-6)))),
('min', dict(op='Minimum')),
('mul_const',
dict(op='Const', value=lambda v: check_value(v, lambda x: np.allclose(x, 1.0 / 6.0, atol=1e-6)))),
('mul', dict(op='Mul')),
],
edges=[
('input', 'add', {'out': 0}),
('add_const', 'add', {}),
('add', 'relu', {}),
('relu', 'min', {}),
('min_const', 'min', {}),
('min', 'mul', {}),
('mul_const', 'mul', {}),
])
def replace_sub_graph(self, graph: Graph, match: [dict, SubgraphMatch]):
replace_with_hsigmoid(graph, match['add'], match['mul'])
| [
"[email protected]"
] | |
942be96b750114306413fc70537498dc6516768b | eb0711915d6bba2f765f052736e33ac9a9a397a6 | /HE0435/write_file/write_glee/cre_HE.py | 8a049dec0fc04d6215094467d842b6c3b5c71ad7 | [] | no_license | dartoon/GL_HostGalaxy | cd2166f273ae7e0397a7d2d39f760ab59e86f014 | 7469f1c1e640d176a75cc6e9497920e494ad656a | refs/heads/master | 2016-08-11T13:27:17.545360 | 2016-04-07T19:04:57 | 2016-04-07T19:04:57 | 46,524,027 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 909 | py | import numpy as np
file1 = open('../../pylens/HE1104.txt','r')
para = np.loadtxt(file1)
file1.close()
#for i in range(0,len(para)):
# print para[i]
t=np.empty(10)
for i in range(0,len(para)):
t[0]=para[i,0]/6*0.13
t[1]=para[i,1]/6*0.13
t[2]=para[i,2]/6*0.13
t[3]=para[i,3]
t[4]=para[i,4]/180*np.pi
t[5]=para[i,5]
t[6]=para[i,16]/6*0.13
t[7]=para[i,17]
t[8]=para[i,18]/180*np.pi
t[9]=para[i,19]/2
inn = open('HE').read()
out = open('HE{0}'.format(i+1), 'w')
replacements = {'d0_':str(t[0]), 'd1_':str(t[1]), 'd2_':str(t[2]), 'd3_':str(t[3]), 'd4_':str(t[4]), 'd5_':str(t[5]), 'd6_':str(t[6]), 'd7_':str(t[7]), 'd8_':str(t[8]), 'd9_':str(t[9])}
#print replacements
for j in replacements.keys()
inn = inn.replace(j, replacements[j])
#print inn
out.write(inn)
out.close
| [
"[email protected]"
] | |
f511d276f53a6a99d60aa772674abcbd95dfbdd2 | 656341483ae8abe8792942d26556fdd4ff5ca7a9 | /ThriftAPI/gen_py_tmp/ShareSite/ncTShareSite-remote | 0e4871e944bf763b963ec05cac4e9d552778344a | [] | no_license | GWenPeng/Apitest_framework | b57ded9be4ec896d4ba8e02e9135bc7c73d90034 | ab922c82c2454a3397ddbf4cd0771067734e1111 | refs/heads/master | 2022-11-26T05:54:47.168062 | 2020-08-06T01:45:12 | 2020-08-06T01:45:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,156 | #!/usr/bin/env python
#
# Autogenerated by Thrift Compiler (0.13.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
import sys
import pprint
if sys.version_info[0] > 2:
from urllib.parse import urlparse
else:
from urlparse import urlparse
from thrift.transport import TTransport, TSocket, TSSLSocket, THttpClient
from thrift.protocol.TBinaryProtocol import TBinaryProtocol
from ShareSite import ncTShareSite
from ShareSite.ttypes import *
if len(sys.argv) <= 1 or sys.argv[1] == '--help':
print('')
print('Usage: ' + sys.argv[0] + ' [-h host[:port]] [-u url] [-f[ramed]] [-s[sl]] [-novalidate] [-ca_certs certs] [-keyfile keyfile] [-certfile certfile] function [arg1 [arg2...]]')
print('')
print('Functions:')
print(' void SetMultSiteStatus(bool status)')
print(' bool GetMultSiteStatus()')
print(' ncTSiteInfo GetLocalSiteInfo()')
print(' void AddSite(ncTAddSiteParam paramInfo)')
print(' void DeleteSite(string siteID)')
print(' void EditSite(ncTEditSiteParam paramInfo)')
print(' GetSiteInfo()')
print(' ncTSiteInfo NodifySiteAddBegin()')
print(' void NodifySiteAdd(string masterIp)')
print(' void NodifySiteDelete()')
print(' ncTSiteInfo GetLocalSiteInfoByRemote()')
print(' void UpdateHeartByMaster(string siteId)')
print(' void SyncSlaveToMaster(string data)')
print(' void SyncMasterToSlave(string data)')
print(' void UpdateSiteIp(string ip)')
print(' ncTSiteInfo GetSiteInfoById(string siteid)')
print(' void CheckSign(string expired, string sign, string site_id, bool flag)')
print(' void RestartServer(string server_name)')
print(' void UpdateEVFSSiteInfo()')
print(' void CreateCrossDomainXml()')
print(' void UpdateSiteMasterDbIp(string ip)')
print(' void SyncOSSInfo(string data)')
print(' void UpdateSiteVirusStatus(bool Status)')
print(' void UpdateAllSiteVirusStatus(bool Status)')
print(' bool GetSiteVirusStatus()')
print('')
sys.exit(0)
pp = pprint.PrettyPrinter(indent=2)
host = 'localhost'
port = 9090
uri = ''
framed = False
ssl = False
validate = True
ca_certs = None
keyfile = None
certfile = None
http = False
argi = 1
if sys.argv[argi] == '-h':
parts = sys.argv[argi + 1].split(':')
host = parts[0]
if len(parts) > 1:
port = int(parts[1])
argi += 2
if sys.argv[argi] == '-u':
url = urlparse(sys.argv[argi + 1])
parts = url[1].split(':')
host = parts[0]
if len(parts) > 1:
port = int(parts[1])
else:
port = 80
uri = url[2]
if url[4]:
uri += '?%s' % url[4]
http = True
argi += 2
if sys.argv[argi] == '-f' or sys.argv[argi] == '-framed':
framed = True
argi += 1
if sys.argv[argi] == '-s' or sys.argv[argi] == '-ssl':
ssl = True
argi += 1
if sys.argv[argi] == '-novalidate':
validate = False
argi += 1
if sys.argv[argi] == '-ca_certs':
ca_certs = sys.argv[argi+1]
argi += 2
if sys.argv[argi] == '-keyfile':
keyfile = sys.argv[argi+1]
argi += 2
if sys.argv[argi] == '-certfile':
certfile = sys.argv[argi+1]
argi += 2
cmd = sys.argv[argi]
args = sys.argv[argi + 1:]
if http:
transport = THttpClient.THttpClient(host, port, uri)
else:
if ssl:
socket = TSSLSocket.TSSLSocket(host, port, validate=validate, ca_certs=ca_certs, keyfile=keyfile, certfile=certfile)
else:
socket = TSocket.TSocket(host, port)
if framed:
transport = TTransport.TFramedTransport(socket)
else:
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol(transport)
client = ncTShareSite.Client(protocol)
transport.open()
if cmd == 'SetMultSiteStatus':
if len(args) != 1:
print('SetMultSiteStatus requires 1 args')
sys.exit(1)
pp.pprint(client.SetMultSiteStatus(eval(args[0]),))
elif cmd == 'GetMultSiteStatus':
if len(args) != 0:
print('GetMultSiteStatus requires 0 args')
sys.exit(1)
pp.pprint(client.GetMultSiteStatus())
elif cmd == 'GetLocalSiteInfo':
if len(args) != 0:
print('GetLocalSiteInfo requires 0 args')
sys.exit(1)
pp.pprint(client.GetLocalSiteInfo())
elif cmd == 'AddSite':
if len(args) != 1:
print('AddSite requires 1 args')
sys.exit(1)
pp.pprint(client.AddSite(eval(args[0]),))
elif cmd == 'DeleteSite':
if len(args) != 1:
print('DeleteSite requires 1 args')
sys.exit(1)
pp.pprint(client.DeleteSite(args[0],))
elif cmd == 'EditSite':
if len(args) != 1:
print('EditSite requires 1 args')
sys.exit(1)
pp.pprint(client.EditSite(eval(args[0]),))
elif cmd == 'GetSiteInfo':
if len(args) != 0:
print('GetSiteInfo requires 0 args')
sys.exit(1)
pp.pprint(client.GetSiteInfo())
elif cmd == 'NodifySiteAddBegin':
if len(args) != 0:
print('NodifySiteAddBegin requires 0 args')
sys.exit(1)
pp.pprint(client.NodifySiteAddBegin())
elif cmd == 'NodifySiteAdd':
if len(args) != 1:
print('NodifySiteAdd requires 1 args')
sys.exit(1)
pp.pprint(client.NodifySiteAdd(args[0],))
elif cmd == 'NodifySiteDelete':
if len(args) != 0:
print('NodifySiteDelete requires 0 args')
sys.exit(1)
pp.pprint(client.NodifySiteDelete())
elif cmd == 'GetLocalSiteInfoByRemote':
if len(args) != 0:
print('GetLocalSiteInfoByRemote requires 0 args')
sys.exit(1)
pp.pprint(client.GetLocalSiteInfoByRemote())
elif cmd == 'UpdateHeartByMaster':
if len(args) != 1:
print('UpdateHeartByMaster requires 1 args')
sys.exit(1)
pp.pprint(client.UpdateHeartByMaster(args[0],))
elif cmd == 'SyncSlaveToMaster':
if len(args) != 1:
print('SyncSlaveToMaster requires 1 args')
sys.exit(1)
pp.pprint(client.SyncSlaveToMaster(args[0],))
elif cmd == 'SyncMasterToSlave':
if len(args) != 1:
print('SyncMasterToSlave requires 1 args')
sys.exit(1)
pp.pprint(client.SyncMasterToSlave(args[0],))
elif cmd == 'UpdateSiteIp':
if len(args) != 1:
print('UpdateSiteIp requires 1 args')
sys.exit(1)
pp.pprint(client.UpdateSiteIp(args[0],))
elif cmd == 'GetSiteInfoById':
if len(args) != 1:
print('GetSiteInfoById requires 1 args')
sys.exit(1)
pp.pprint(client.GetSiteInfoById(args[0],))
elif cmd == 'CheckSign':
if len(args) != 4:
print('CheckSign requires 4 args')
sys.exit(1)
pp.pprint(client.CheckSign(args[0], args[1], args[2], eval(args[3]),))
elif cmd == 'RestartServer':
if len(args) != 1:
print('RestartServer requires 1 args')
sys.exit(1)
pp.pprint(client.RestartServer(args[0],))
elif cmd == 'UpdateEVFSSiteInfo':
if len(args) != 0:
print('UpdateEVFSSiteInfo requires 0 args')
sys.exit(1)
pp.pprint(client.UpdateEVFSSiteInfo())
elif cmd == 'CreateCrossDomainXml':
if len(args) != 0:
print('CreateCrossDomainXml requires 0 args')
sys.exit(1)
pp.pprint(client.CreateCrossDomainXml())
elif cmd == 'UpdateSiteMasterDbIp':
if len(args) != 1:
print('UpdateSiteMasterDbIp requires 1 args')
sys.exit(1)
pp.pprint(client.UpdateSiteMasterDbIp(args[0],))
elif cmd == 'SyncOSSInfo':
if len(args) != 1:
print('SyncOSSInfo requires 1 args')
sys.exit(1)
pp.pprint(client.SyncOSSInfo(args[0],))
elif cmd == 'UpdateSiteVirusStatus':
if len(args) != 1:
print('UpdateSiteVirusStatus requires 1 args')
sys.exit(1)
pp.pprint(client.UpdateSiteVirusStatus(eval(args[0]),))
elif cmd == 'UpdateAllSiteVirusStatus':
if len(args) != 1:
print('UpdateAllSiteVirusStatus requires 1 args')
sys.exit(1)
pp.pprint(client.UpdateAllSiteVirusStatus(eval(args[0]),))
elif cmd == 'GetSiteVirusStatus':
if len(args) != 0:
print('GetSiteVirusStatus requires 0 args')
sys.exit(1)
pp.pprint(client.GetSiteVirusStatus())
else:
print('Unrecognized method %s' % cmd)
sys.exit(1)
transport.close()
| [
"[email protected]"
] | ||
1ed68d4f010fc1efda84c9c1ab660821e4bb0cf4 | 9adc810b07f7172a7d0341f0b38088b4f5829cf4 | /experiments/ashvin/icml2020/hand/pen/gaussian2/mpo1.py | 7ac527427c0596b9005378a26a524b4c5ff45d13 | [
"MIT"
] | permissive | Asap7772/railrl_evalsawyer | 7ee9358b5277b9ddf2468f0c6d28beb92a5a0879 | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | refs/heads/main | 2023-05-29T10:00:50.126508 | 2021-06-18T03:08:12 | 2021-06-18T03:08:12 | 375,810,557 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,737 | py | """
AWR + SAC from demo experiment
"""
from rlkit.demos.source.dict_to_mdp_path_loader import DictToMDPPathLoader
from rlkit.launchers.experiments.awac.awac_rl import experiment
import rlkit.misc.hyperparameter as hyp
from rlkit.launchers.arglauncher import run_variants
from rlkit.torch.sac.policies import GaussianPolicy
if __name__ == "__main__":
variant = dict(
num_epochs=1001,
num_eval_steps_per_epoch=1000,
num_trains_per_train_loop=1000,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=1000,
max_path_length=1000,
batch_size=1024,
replay_buffer_size=int(1E6),
layer_size=256,
policy_class=GaussianPolicy,
policy_kwargs=dict(
hidden_sizes=[256, 256, ],
max_log_std=0,
min_log_std=-4,
),
algorithm="SAC",
version="normal",
collection_mode='batch',
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
beta=1,
use_automatic_entropy_tuning=True,
alpha=0,
compute_bc=True,
bc_num_pretrain_steps=0,
q_num_pretrain1_steps=0,
q_num_pretrain2_steps=25000,
policy_weight_decay=1e-4,
bc_loss_type="mse",
rl_weight=1.0,
use_awr_update=True,
use_reparam_update=True,
reparam_weight=0.0,
awr_weight=0.0,
bc_weight=1.0,
awr_use_mle_for_vf=False,
awr_sample_actions=False,
awr_min_q=False,
),
num_exps_per_instance=1,
region='us-west-2',
path_loader_class=DictToMDPPathLoader,
path_loader_kwargs=dict(
obs_key="state_observation",
demo_paths=[
dict(
path="demos/icml2020/hand/pen2.npy",
obs_dict=True,
is_demo=True,
),
dict(
path="demos/icml2020/hand/pen_bc5.npy",
obs_dict=False,
is_demo=False,
train_split=0.9,
),
],
),
# logger_variant=dict(
# tensorboard=True,
# ),
load_demos=True,
pretrain_policy=True,
pretrain_rl=True,
# save_pretrained_algorithm=True,
# snapshot_mode="all",
)
search_space = {
'env': ["pen-v0", ],
'trainer_kwargs.bc_loss_type': ["mle"],
'trainer_kwargs.awr_loss_type': ["mle"],
'seedid': range(3),
'trainer_kwargs.beta': [50, 100, ],
'trainer_kwargs.use_automatic_entropy_tuning': [False],
# 'policy_kwargs.max_log_std': [0, ],
'policy_kwargs.min_log_std': [-6, ],
'trainer_kwargs.reparam_weight': [0],
'trainer_kwargs.awr_weight': [1.0],
'trainer_kwargs.bc_weight': [0.0, ],
'trainer_kwargs.awr_use_mle_for_vf': [True, False],
'trainer_kwargs.awr_sample_actions': [True, False],
'trainer_kwargs.awr_min_q': [True, False],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = []
for variant in sweeper.iterate_hyperparameters():
trainer_kwargs = variant["trainer_kwargs"]
if not (trainer_kwargs["reparam_weight"] == 0 and trainer_kwargs["awr_weight"] == 0 and trainer_kwargs["bc_weight"] == 0):
variants.append(variant)
run_variants(experiment, variants, run_id=0)
| [
"[email protected]"
] | |
955f09efc2087f336d4fb414c067ca7dfe26d279 | 386a5b505d77c9798aaab78495d0f00c349cf660 | /Prognos Project/Working/Latiket Jaronde Git/DJango examples/templatedemo2/testapp/urls.py | 0d1437daf76c61d1a5266b9babab9224eeb06d79 | [] | no_license | namratarane20/MachineLearning | 2da2c87217618d124fd53f607c20641ba44fb0b7 | b561cc74733b655507242cbbf13ea09a2416b9e2 | refs/heads/master | 2023-01-20T18:54:15.662179 | 2020-03-09T14:12:44 | 2020-03-09T14:12:44 | 237,597,461 | 0 | 0 | null | 2023-01-05T12:37:12 | 2020-02-01T10:22:20 | Python | UTF-8 | Python | false | false | 106 | py | from django.urls import path
from . import views
urlpatterns = [
path('greeting/', views.greeting)
]
| [
"[email protected]"
] | |
fdff8ee5169cf72046ed0e39b6a24863d1045a9b | 49ba5356bdc5df7dd9803b56fe507c5164a90716 | /integer-break/solution.py | 856adbbae8712f49e4d57219672d2b09cd793fc3 | [] | no_license | uxlsl/leetcode_practice | d80ad481c9d8ee71cce0f3c66e98446ced149635 | d8ed762d1005975f0de4f07760c9671195621c88 | refs/heads/master | 2021-04-25T18:12:28.136504 | 2020-03-11T07:54:15 | 2020-03-11T07:54:15 | 121,472,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | class Solution:
def integerBreak(self, n):
"""
:type n: int
:rtype: int
"""
record = {1: 1}
for i in range(2, n + 1):
record[i] = 1
for j in range(1, (i + 1) // 2 + 1):
record[i] = max(record[i], record[j] * record[i - j],
j * record[i - j], j * (i - j))
return record[n]
| [
"[email protected]"
] | |
74111a48767d4c6fd5bf8a2c3164384a0f6dcd16 | b3b443f0bc49bbb10c26b51fe89e6860d4ca3d3a | /venv/Lib/site-packages/more_itertools/more.py | f07f8c1c0c01ab9f2cf36264f72167daf34872ca | [
"MIT"
] | permissive | naveens33/ctreport-selenium | 6b3a1cc93a6741a1d493c2452c1cf56c6d85c052 | 9553b5c4b8deb52e46cf0fb3e1ea7092028cf090 | refs/heads/master | 2022-12-23T04:55:12.226339 | 2020-08-29T19:22:00 | 2020-08-29T19:22:00 | 228,779,087 | 2 | 2 | MIT | 2022-12-18T22:53:51 | 2019-12-18T07:03:39 | Python | UTF-8 | Python | false | false | 83,966 | py | import warnings
from collections import Counter, defaultdict, deque
from collections.abc import Sequence
from functools import partial, wraps
from heapq import merge
from itertools import (
chain,
compress,
count,
cycle,
dropwhile,
groupby,
islice,
repeat,
starmap,
takewhile,
tee,
zip_longest,
)
from operator import itemgetter, lt, gt, sub
from sys import maxsize
from time import monotonic
from .recipes import consume, flatten, powerset, take, unique_everseen
__all__ = [
'adjacent',
'always_iterable',
'always_reversible',
'bucket',
'chunked',
'circular_shifts',
'collapse',
'collate',
'consecutive_groups',
'consumer',
'count_cycle',
'difference',
'distinct_combinations',
'distinct_permutations',
'distribute',
'divide',
'exactly_n',
'filter_except',
'first',
'groupby_transform',
'ilen',
'interleave_longest',
'interleave',
'intersperse',
'islice_extended',
'iterate',
'ichunked',
'last',
'locate',
'lstrip',
'make_decorator',
'map_except',
'map_reduce',
'numeric_range',
'one',
'only',
'padded',
'partitions',
'set_partitions',
'peekable',
'repeat_last',
'replace',
'rlocate',
'rstrip',
'run_length',
'seekable',
'SequenceView',
'side_effect',
'sliced',
'sort_together',
'split_at',
'split_after',
'split_before',
'split_when',
'split_into',
'spy',
'stagger',
'strip',
'substrings',
'substrings_indexes',
'time_limited',
'unique_to_each',
'unzip',
'windowed',
'with_iter',
'zip_offset',
]
_marker = object()
def chunked(iterable, n):
"""Break *iterable* into lists of length *n*:
>>> list(chunked([1, 2, 3, 4, 5, 6], 3))
[[1, 2, 3], [4, 5, 6]]
If the length of *iterable* is not evenly divisible by *n*, the last
returned list will be shorter:
>>> list(chunked([1, 2, 3, 4, 5, 6, 7, 8], 3))
[[1, 2, 3], [4, 5, 6], [7, 8]]
To use a fill-in value instead, see the :func:`grouper` recipe.
:func:`chunked` is useful for splitting up a computation on a large number
of keys into batches, to be pickled and sent off to worker processes. One
example is operations on rows in MySQL, which does not implement
server-side cursors properly and would otherwise load the entire dataset
into RAM on the client.
"""
return iter(partial(take, n, iter(iterable)), [])
def first(iterable, default=_marker):
"""Return the first item of *iterable*, or *default* if *iterable* is
empty.
>>> first([0, 1, 2, 3])
0
>>> first([], 'some default')
'some default'
If *default* is not provided and there are no items in the iterable,
raise ``ValueError``.
:func:`first` is useful when you have a generator of expensive-to-retrieve
values and want any arbitrary one. It is marginally shorter than
``next(iter(iterable), default)``.
"""
try:
return next(iter(iterable))
except StopIteration:
# I'm on the edge about raising ValueError instead of StopIteration. At
# the moment, ValueError wins, because the caller could conceivably
# want to do something different with flow control when I raise the
# exception, and it's weird to explicitly catch StopIteration.
if default is _marker:
raise ValueError(
'first() was called on an empty iterable, and no '
'default value was provided.'
)
return default
def last(iterable, default=_marker):
"""Return the last item of *iterable*, or *default* if *iterable* is
empty.
>>> last([0, 1, 2, 3])
3
>>> last([], 'some default')
'some default'
If *default* is not provided and there are no items in the iterable,
raise ``ValueError``.
"""
try:
try:
# Try to access the last item directly
return iterable[-1]
except (TypeError, AttributeError, KeyError):
# If not slice-able, iterate entirely using length-1 deque
return deque(iterable, maxlen=1)[0]
except IndexError: # If the iterable was empty
if default is _marker:
raise ValueError(
'last() was called on an empty iterable, and no '
'default value was provided.'
)
return default
class peekable:
"""Wrap an iterator to allow lookahead and prepending elements.
Call :meth:`peek` on the result to get the value that will be returned
by :func:`next`. This won't advance the iterator:
>>> p = peekable(['a', 'b'])
>>> p.peek()
'a'
>>> next(p)
'a'
Pass :meth:`peek` a default value to return that instead of raising
``StopIteration`` when the iterator is exhausted.
>>> p = peekable([])
>>> p.peek('hi')
'hi'
peekables also offer a :meth:`prepend` method, which "inserts" items
at the head of the iterable:
>>> p = peekable([1, 2, 3])
>>> p.prepend(10, 11, 12)
>>> next(p)
10
>>> p.peek()
11
>>> list(p)
[11, 12, 1, 2, 3]
peekables can be indexed. Index 0 is the item that will be returned by
:func:`next`, index 1 is the item after that, and so on:
The values up to the given index will be cached.
>>> p = peekable(['a', 'b', 'c', 'd'])
>>> p[0]
'a'
>>> p[1]
'b'
>>> next(p)
'a'
Negative indexes are supported, but be aware that they will cache the
remaining items in the source iterator, which may require significant
storage.
To check whether a peekable is exhausted, check its truth value:
>>> p = peekable(['a', 'b'])
>>> if p: # peekable has items
... list(p)
['a', 'b']
>>> if not p: # peekable is exhaused
... list(p)
[]
"""
def __init__(self, iterable):
self._it = iter(iterable)
self._cache = deque()
def __iter__(self):
return self
def __bool__(self):
try:
self.peek()
except StopIteration:
return False
return True
def peek(self, default=_marker):
"""Return the item that will be next returned from ``next()``.
Return ``default`` if there are no items left. If ``default`` is not
provided, raise ``StopIteration``.
"""
if not self._cache:
try:
self._cache.append(next(self._it))
except StopIteration:
if default is _marker:
raise
return default
return self._cache[0]
def prepend(self, *items):
"""Stack up items to be the next ones returned from ``next()`` or
``self.peek()``. The items will be returned in
first in, first out order::
>>> p = peekable([1, 2, 3])
>>> p.prepend(10, 11, 12)
>>> next(p)
10
>>> list(p)
[11, 12, 1, 2, 3]
It is possible, by prepending items, to "resurrect" a peekable that
previously raised ``StopIteration``.
>>> p = peekable([])
>>> next(p)
Traceback (most recent call last):
...
StopIteration
>>> p.prepend(1)
>>> next(p)
1
>>> next(p)
Traceback (most recent call last):
...
StopIteration
"""
self._cache.extendleft(reversed(items))
def __next__(self):
if self._cache:
return self._cache.popleft()
return next(self._it)
def _get_slice(self, index):
# Normalize the slice's arguments
step = 1 if (index.step is None) else index.step
if step > 0:
start = 0 if (index.start is None) else index.start
stop = maxsize if (index.stop is None) else index.stop
elif step < 0:
start = -1 if (index.start is None) else index.start
stop = (-maxsize - 1) if (index.stop is None) else index.stop
else:
raise ValueError('slice step cannot be zero')
# If either the start or stop index is negative, we'll need to cache
# the rest of the iterable in order to slice from the right side.
if (start < 0) or (stop < 0):
self._cache.extend(self._it)
# Otherwise we'll need to find the rightmost index and cache to that
# point.
else:
n = min(max(start, stop) + 1, maxsize)
cache_len = len(self._cache)
if n >= cache_len:
self._cache.extend(islice(self._it, n - cache_len))
return list(self._cache)[index]
def __getitem__(self, index):
if isinstance(index, slice):
return self._get_slice(index)
cache_len = len(self._cache)
if index < 0:
self._cache.extend(self._it)
elif index >= cache_len:
self._cache.extend(islice(self._it, index + 1 - cache_len))
return self._cache[index]
def collate(*iterables, **kwargs):
"""Return a sorted merge of the items from each of several already-sorted
*iterables*.
>>> list(collate('ACDZ', 'AZ', 'JKL'))
['A', 'A', 'C', 'D', 'J', 'K', 'L', 'Z', 'Z']
Works lazily, keeping only the next value from each iterable in memory. Use
:func:`collate` to, for example, perform a n-way mergesort of items that
don't fit in memory.
If a *key* function is specified, the iterables will be sorted according
to its result:
>>> key = lambda s: int(s) # Sort by numeric value, not by string
>>> list(collate(['1', '10'], ['2', '11'], key=key))
['1', '2', '10', '11']
If the *iterables* are sorted in descending order, set *reverse* to
``True``:
>>> list(collate([5, 3, 1], [4, 2, 0], reverse=True))
[5, 4, 3, 2, 1, 0]
If the elements of the passed-in iterables are out of order, you might get
unexpected results.
On Python 3.5+, this function is an alias for :func:`heapq.merge`.
"""
warnings.warn(
"collate is no longer part of more_itertools, use heapq.merge",
DeprecationWarning,
)
return merge(*iterables, **kwargs)
def consumer(func):
"""Decorator that automatically advances a PEP-342-style "reverse iterator"
to its first yield point so you don't have to call ``next()`` on it
manually.
>>> @consumer
... def tally():
... i = 0
... while True:
... print('Thing number %s is %s.' % (i, (yield)))
... i += 1
...
>>> t = tally()
>>> t.send('red')
Thing number 0 is red.
>>> t.send('fish')
Thing number 1 is fish.
Without the decorator, you would have to call ``next(t)`` before
``t.send()`` could be used.
"""
@wraps(func)
def wrapper(*args, **kwargs):
gen = func(*args, **kwargs)
next(gen)
return gen
return wrapper
def ilen(iterable):
"""Return the number of items in *iterable*.
>>> ilen(x for x in range(1000000) if x % 3 == 0)
333334
This consumes the iterable, so handle with care.
"""
# This approach was selected because benchmarks showed it's likely the
# fastest of the known implementations at the time of writing.
# See GitHub tracker: #236, #230.
counter = count()
deque(zip(iterable, counter), maxlen=0)
return next(counter)
def iterate(func, start):
"""Return ``start``, ``func(start)``, ``func(func(start))``, ...
>>> from itertools import islice
>>> list(islice(iterate(lambda x: 2*x, 1), 10))
[1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
"""
while True:
yield start
start = func(start)
def with_iter(context_manager):
"""Wrap an iterable in a ``with`` statement, so it closes once exhausted.
For example, this will close the file when the iterator is exhausted::
upper_lines = (line.upper() for line in with_iter(open('foo')))
Any context manager which returns an iterable is a candidate for
``with_iter``.
"""
with context_manager as iterable:
yield from iterable
def one(iterable, too_short=None, too_long=None):
"""Return the first item from *iterable*, which is expected to contain only
that item. Raise an exception if *iterable* is empty or has more than one
item.
:func:`one` is useful for ensuring that an iterable contains only one item.
For example, it can be used to retrieve the result of a database query
that is expected to return a single row.
If *iterable* is empty, ``ValueError`` will be raised. You may specify a
different exception with the *too_short* keyword:
>>> it = []
>>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: too many items in iterable (expected 1)'
>>> too_short = IndexError('too few items')
>>> one(it, too_short=too_short) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
IndexError: too few items
Similarly, if *iterable* contains more than one item, ``ValueError`` will
be raised. You may specify a different exception with the *too_long*
keyword:
>>> it = ['too', 'many']
>>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: Expected exactly one item in iterable, but got 'too',
'many', and perhaps more.
>>> too_long = RuntimeError
>>> one(it, too_long=too_long) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
RuntimeError
Note that :func:`one` attempts to advance *iterable* twice to ensure there
is only one item. See :func:`spy` or :func:`peekable` to check iterable
contents less destructively.
"""
it = iter(iterable)
try:
first_value = next(it)
except StopIteration:
raise too_short or ValueError('too few items in iterable (expected 1)')
try:
second_value = next(it)
except StopIteration:
pass
else:
msg = (
'Expected exactly one item in iterable, but got {!r}, {!r}, '
'and perhaps more.'.format(first_value, second_value)
)
raise too_long or ValueError(msg)
return first_value
def distinct_permutations(iterable):
"""Yield successive distinct permutations of the elements in *iterable*.
>>> sorted(distinct_permutations([1, 0, 1]))
[(0, 1, 1), (1, 0, 1), (1, 1, 0)]
Equivalent to ``set(permutations(iterable))``, except duplicates are not
generated and thrown away. For larger input sequences this is much more
efficient.
Duplicate permutations arise when there are duplicated elements in the
input iterable. The number of items returned is
`n! / (x_1! * x_2! * ... * x_n!)`, where `n` is the total number of
items input, and each `x_i` is the count of a distinct item in the input
sequence.
"""
def make_new_permutations(pool, e):
"""Internal helper function.
The output permutations are built up by adding element *e* to the
current *permutations* at every possible position.
The key idea is to keep repeated elements (reverse) ordered:
if e1 == e2 and e1 is before e2 in the iterable, then all permutations
with e1 before e2 are ignored.
"""
for perm in pool:
for j in range(len(perm)):
yield perm[:j] + (e,) + perm[j:]
if perm[j] == e:
break
else:
yield perm + (e,)
permutations = [()]
for e in iterable:
permutations = make_new_permutations(permutations, e)
return (tuple(t) for t in permutations)
def intersperse(e, iterable, n=1):
"""Intersperse filler element *e* among the items in *iterable*, leaving
*n* items between each filler element.
>>> list(intersperse('!', [1, 2, 3, 4, 5]))
[1, '!', 2, '!', 3, '!', 4, '!', 5]
>>> list(intersperse(None, [1, 2, 3, 4, 5], n=2))
[1, 2, None, 3, 4, None, 5]
"""
if n == 0:
raise ValueError('n must be > 0')
elif n == 1:
# interleave(repeat(e), iterable) -> e, x_0, e, e, x_1, e, x_2...
# islice(..., 1, None) -> x_0, e, e, x_1, e, x_2...
return islice(interleave(repeat(e), iterable), 1, None)
else:
# interleave(filler, chunks) -> [e], [x_0, x_1], [e], [x_2, x_3]...
# islice(..., 1, None) -> [x_0, x_1], [e], [x_2, x_3]...
# flatten(...) -> x_0, x_1, e, x_2, x_3...
filler = repeat([e])
chunks = chunked(iterable, n)
return flatten(islice(interleave(filler, chunks), 1, None))
def unique_to_each(*iterables):
"""Return the elements from each of the input iterables that aren't in the
other input iterables.
For example, suppose you have a set of packages, each with a set of
dependencies::
{'pkg_1': {'A', 'B'}, 'pkg_2': {'B', 'C'}, 'pkg_3': {'B', 'D'}}
If you remove one package, which dependencies can also be removed?
If ``pkg_1`` is removed, then ``A`` is no longer necessary - it is not
associated with ``pkg_2`` or ``pkg_3``. Similarly, ``C`` is only needed for
``pkg_2``, and ``D`` is only needed for ``pkg_3``::
>>> unique_to_each({'A', 'B'}, {'B', 'C'}, {'B', 'D'})
[['A'], ['C'], ['D']]
If there are duplicates in one input iterable that aren't in the others
they will be duplicated in the output. Input order is preserved::
>>> unique_to_each("mississippi", "missouri")
[['p', 'p'], ['o', 'u', 'r']]
It is assumed that the elements of each iterable are hashable.
"""
pool = [list(it) for it in iterables]
counts = Counter(chain.from_iterable(map(set, pool)))
uniques = {element for element in counts if counts[element] == 1}
return [list(filter(uniques.__contains__, it)) for it in pool]
def windowed(seq, n, fillvalue=None, step=1):
"""Return a sliding window of width *n* over the given iterable.
>>> all_windows = windowed([1, 2, 3, 4, 5], 3)
>>> list(all_windows)
[(1, 2, 3), (2, 3, 4), (3, 4, 5)]
When the window is larger than the iterable, *fillvalue* is used in place
of missing values::
>>> list(windowed([1, 2, 3], 4))
[(1, 2, 3, None)]
Each window will advance in increments of *step*:
>>> list(windowed([1, 2, 3, 4, 5, 6], 3, fillvalue='!', step=2))
[(1, 2, 3), (3, 4, 5), (5, 6, '!')]
To slide into the iterable's items, use :func:`chain` to add filler items
to the left:
>>> iterable = [1, 2, 3, 4]
>>> n = 3
>>> padding = [None] * (n - 1)
>>> list(windowed(chain(padding, iterable), 3))
[(None, None, 1), (None, 1, 2), (1, 2, 3), (2, 3, 4)]
"""
if n < 0:
raise ValueError('n must be >= 0')
if n == 0:
yield tuple()
return
if step < 1:
raise ValueError('step must be >= 1')
it = iter(seq)
window = deque([], n)
append = window.append
# Initial deque fill
for _ in range(n):
append(next(it, fillvalue))
yield tuple(window)
# Appending new items to the right causes old items to fall off the left
i = 0
for item in it:
append(item)
i = (i + 1) % step
if i % step == 0:
yield tuple(window)
# If there are items from the iterable in the window, pad with the given
# value and emit them.
if (i % step) and (step - i < n):
for _ in range(step - i):
append(fillvalue)
yield tuple(window)
def substrings(iterable):
"""Yield all of the substrings of *iterable*.
>>> [''.join(s) for s in substrings('more')]
['m', 'o', 'r', 'e', 'mo', 'or', 're', 'mor', 'ore', 'more']
Note that non-string iterables can also be subdivided.
>>> list(substrings([0, 1, 2]))
[(0,), (1,), (2,), (0, 1), (1, 2), (0, 1, 2)]
"""
# The length-1 substrings
seq = []
for item in iter(iterable):
seq.append(item)
yield (item,)
seq = tuple(seq)
item_count = len(seq)
# And the rest
for n in range(2, item_count + 1):
for i in range(item_count - n + 1):
yield seq[i : i + n]
def substrings_indexes(seq, reverse=False):
"""Yield all substrings and their positions in *seq*
The items yielded will be a tuple of the form ``(substr, i, j)``, where
``substr == seq[i:j]``.
This function only works for iterables that support slicing, such as
``str`` objects.
>>> for item in substrings_indexes('more'):
... print(item)
('m', 0, 1)
('o', 1, 2)
('r', 2, 3)
('e', 3, 4)
('mo', 0, 2)
('or', 1, 3)
('re', 2, 4)
('mor', 0, 3)
('ore', 1, 4)
('more', 0, 4)
Set *reverse* to ``True`` to yield the same items in the opposite order.
"""
r = range(1, len(seq) + 1)
if reverse:
r = reversed(r)
return (
(seq[i : i + L], i, i + L) for L in r for i in range(len(seq) - L + 1)
)
class bucket:
"""Wrap *iterable* and return an object that buckets it iterable into
child iterables based on a *key* function.
>>> iterable = ['a1', 'b1', 'c1', 'a2', 'b2', 'c2', 'b3']
>>> s = bucket(iterable, key=lambda x: x[0])
>>> a_iterable = s['a']
>>> next(a_iterable)
'a1'
>>> next(a_iterable)
'a2'
>>> list(s['b'])
['b1', 'b2', 'b3']
The original iterable will be advanced and its items will be cached until
they are used by the child iterables. This may require significant storage.
By default, attempting to select a bucket to which no items belong will
exhaust the iterable and cache all values.
If you specify a *validator* function, selected buckets will instead be
checked against it.
>>> from itertools import count
>>> it = count(1, 2) # Infinite sequence of odd numbers
>>> key = lambda x: x % 10 # Bucket by last digit
>>> validator = lambda x: x in {1, 3, 5, 7, 9} # Odd digits only
>>> s = bucket(it, key=key, validator=validator)
>>> 2 in s
False
>>> list(s[2])
[]
"""
def __init__(self, iterable, key, validator=None):
self._it = iter(iterable)
self._key = key
self._cache = defaultdict(deque)
self._validator = validator or (lambda x: True)
def __contains__(self, value):
if not self._validator(value):
return False
try:
item = next(self[value])
except StopIteration:
return False
else:
self._cache[value].appendleft(item)
return True
def _get_values(self, value):
"""
Helper to yield items from the parent iterator that match *value*.
Items that don't match are stored in the local cache as they
are encountered.
"""
while True:
# If we've cached some items that match the target value, emit
# the first one and evict it from the cache.
if self._cache[value]:
yield self._cache[value].popleft()
# Otherwise we need to advance the parent iterator to search for
# a matching item, caching the rest.
else:
while True:
try:
item = next(self._it)
except StopIteration:
return
item_value = self._key(item)
if item_value == value:
yield item
break
elif self._validator(item_value):
self._cache[item_value].append(item)
def __getitem__(self, value):
if not self._validator(value):
return iter(())
return self._get_values(value)
def spy(iterable, n=1):
"""Return a 2-tuple with a list containing the first *n* elements of
*iterable*, and an iterator with the same items as *iterable*.
This allows you to "look ahead" at the items in the iterable without
advancing it.
There is one item in the list by default:
>>> iterable = 'abcdefg'
>>> head, iterable = spy(iterable)
>>> head
['a']
>>> list(iterable)
['a', 'b', 'c', 'd', 'e', 'f', 'g']
You may use unpacking to retrieve items instead of lists:
>>> (head,), iterable = spy('abcdefg')
>>> head
'a'
>>> (first, second), iterable = spy('abcdefg', 2)
>>> first
'a'
>>> second
'b'
The number of items requested can be larger than the number of items in
the iterable:
>>> iterable = [1, 2, 3, 4, 5]
>>> head, iterable = spy(iterable, 10)
>>> head
[1, 2, 3, 4, 5]
>>> list(iterable)
[1, 2, 3, 4, 5]
"""
it = iter(iterable)
head = take(n, it)
return head, chain(head, it)
def interleave(*iterables):
"""Return a new iterable yielding from each iterable in turn,
until the shortest is exhausted.
>>> list(interleave([1, 2, 3], [4, 5], [6, 7, 8]))
[1, 4, 6, 2, 5, 7]
For a version that doesn't terminate after the shortest iterable is
exhausted, see :func:`interleave_longest`.
"""
return chain.from_iterable(zip(*iterables))
def interleave_longest(*iterables):
"""Return a new iterable yielding from each iterable in turn,
skipping any that are exhausted.
>>> list(interleave_longest([1, 2, 3], [4, 5], [6, 7, 8]))
[1, 4, 6, 2, 5, 7, 3, 8]
This function produces the same output as :func:`roundrobin`, but may
perform better for some inputs (in particular when the number of iterables
is large).
"""
i = chain.from_iterable(zip_longest(*iterables, fillvalue=_marker))
return (x for x in i if x is not _marker)
def collapse(iterable, base_type=None, levels=None):
"""Flatten an iterable with multiple levels of nesting (e.g., a list of
lists of tuples) into non-iterable types.
>>> iterable = [(1, 2), ([3, 4], [[5], [6]])]
>>> list(collapse(iterable))
[1, 2, 3, 4, 5, 6]
Binary and text strings are not considered iterable and
will not be collapsed.
To avoid collapsing other types, specify *base_type*:
>>> iterable = ['ab', ('cd', 'ef'), ['gh', 'ij']]
>>> list(collapse(iterable, base_type=tuple))
['ab', ('cd', 'ef'), 'gh', 'ij']
Specify *levels* to stop flattening after a certain level:
>>> iterable = [('a', ['b']), ('c', ['d'])]
>>> list(collapse(iterable)) # Fully flattened
['a', 'b', 'c', 'd']
>>> list(collapse(iterable, levels=1)) # Only one level flattened
['a', ['b'], 'c', ['d']]
"""
def walk(node, level):
if (
((levels is not None) and (level > levels))
or isinstance(node, (str, bytes))
or ((base_type is not None) and isinstance(node, base_type))
):
yield node
return
try:
tree = iter(node)
except TypeError:
yield node
return
else:
for child in tree:
yield from walk(child, level + 1)
yield from walk(iterable, 0)
def side_effect(func, iterable, chunk_size=None, before=None, after=None):
"""Invoke *func* on each item in *iterable* (or on each *chunk_size* group
of items) before yielding the item.
`func` must be a function that takes a single argument. Its return value
will be discarded.
*before* and *after* are optional functions that take no arguments. They
will be executed before iteration starts and after it ends, respectively.
`side_effect` can be used for logging, updating progress bars, or anything
that is not functionally "pure."
Emitting a status message:
>>> from more_itertools import consume
>>> func = lambda item: print('Received {}'.format(item))
>>> consume(side_effect(func, range(2)))
Received 0
Received 1
Operating on chunks of items:
>>> pair_sums = []
>>> func = lambda chunk: pair_sums.append(sum(chunk))
>>> list(side_effect(func, [0, 1, 2, 3, 4, 5], 2))
[0, 1, 2, 3, 4, 5]
>>> list(pair_sums)
[1, 5, 9]
Writing to a file-like object:
>>> from io import StringIO
>>> from more_itertools import consume
>>> f = StringIO()
>>> func = lambda x: print(x, file=f)
>>> before = lambda: print(u'HEADER', file=f)
>>> after = f.close
>>> it = [u'a', u'b', u'c']
>>> consume(side_effect(func, it, before=before, after=after))
>>> f.closed
True
"""
try:
if before is not None:
before()
if chunk_size is None:
for item in iterable:
func(item)
yield item
else:
for chunk in chunked(iterable, chunk_size):
func(chunk)
yield from chunk
finally:
if after is not None:
after()
def sliced(seq, n):
"""Yield slices of length *n* from the sequence *seq*.
>>> list(sliced((1, 2, 3, 4, 5, 6), 3))
[(1, 2, 3), (4, 5, 6)]
If the length of the sequence is not divisible by the requested slice
length, the last slice will be shorter.
>>> list(sliced((1, 2, 3, 4, 5, 6, 7, 8), 3))
[(1, 2, 3), (4, 5, 6), (7, 8)]
This function will only work for iterables that support slicing.
For non-sliceable iterables, see :func:`chunked`.
"""
return takewhile(bool, (seq[i : i + n] for i in count(0, n)))
def split_at(iterable, pred):
"""Yield lists of items from *iterable*, where each list is delimited by
an item where callable *pred* returns ``True``. The lists do not include
the delimiting items.
>>> list(split_at('abcdcba', lambda x: x == 'b'))
[['a'], ['c', 'd', 'c'], ['a']]
>>> list(split_at(range(10), lambda n: n % 2 == 1))
[[0], [2], [4], [6], [8], []]
"""
buf = []
for item in iterable:
if pred(item):
yield buf
buf = []
else:
buf.append(item)
yield buf
def split_before(iterable, pred):
"""Yield lists of items from *iterable*, where each list ends just before
an item for which callable *pred* returns ``True``:
>>> list(split_before('OneTwo', lambda s: s.isupper()))
[['O', 'n', 'e'], ['T', 'w', 'o']]
>>> list(split_before(range(10), lambda n: n % 3 == 0))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
"""
buf = []
for item in iterable:
if pred(item) and buf:
yield buf
buf = []
buf.append(item)
yield buf
def split_after(iterable, pred):
"""Yield lists of items from *iterable*, where each list ends with an
item where callable *pred* returns ``True``:
>>> list(split_after('one1two2', lambda s: s.isdigit()))
[['o', 'n', 'e', '1'], ['t', 'w', 'o', '2']]
>>> list(split_after(range(10), lambda n: n % 3 == 0))
[[0], [1, 2, 3], [4, 5, 6], [7, 8, 9]]
"""
buf = []
for item in iterable:
buf.append(item)
if pred(item) and buf:
yield buf
buf = []
if buf:
yield buf
def split_when(iterable, pred):
"""Split *iterable* into pieces based on the output of *pred*.
*pred* should be a function that takes successive pairs of items and
returns ``True`` if the iterable should be split in between them.
For example, to find runs of increasing numbers, split the iterable when
element ``i`` is larger than element ``i + 1``:
>>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2], lambda x, y: x > y))
[[1, 2, 3, 3], [2, 5], [2, 4], [2]]
"""
it = iter(iterable)
try:
cur_item = next(it)
except StopIteration:
return
buf = [cur_item]
for next_item in it:
if pred(cur_item, next_item):
yield buf
buf = []
buf.append(next_item)
cur_item = next_item
yield buf
def split_into(iterable, sizes):
"""Yield a list of sequential items from *iterable* of length 'n' for each
integer 'n' in *sizes*.
>>> list(split_into([1,2,3,4,5,6], [1,2,3]))
[[1], [2, 3], [4, 5, 6]]
If the sum of *sizes* is smaller than the length of *iterable*, then the
remaining items of *iterable* will not be returned.
>>> list(split_into([1,2,3,4,5,6], [2,3]))
[[1, 2], [3, 4, 5]]
If the sum of *sizes* is larger than the length of *iterable*, fewer items
will be returned in the iteration that overruns *iterable* and further
lists will be empty:
>>> list(split_into([1,2,3,4], [1,2,3,4]))
[[1], [2, 3], [4], []]
When a ``None`` object is encountered in *sizes*, the returned list will
contain items up to the end of *iterable* the same way that itertools.slice
does:
>>> list(split_into([1,2,3,4,5,6,7,8,9,0], [2,3,None]))
[[1, 2], [3, 4, 5], [6, 7, 8, 9, 0]]
:func:`split_into` can be useful for grouping a series of items where the
sizes of the groups are not uniform. An example would be where in a row
from a table, multiple columns represent elements of the same feature
(e.g. a point represented by x,y,z) but, the format is not the same for
all columns.
"""
# convert the iterable argument into an iterator so its contents can
# be consumed by islice in case it is a generator
it = iter(iterable)
for size in sizes:
if size is None:
yield list(it)
return
else:
yield list(islice(it, size))
def padded(iterable, fillvalue=None, n=None, next_multiple=False):
"""Yield the elements from *iterable*, followed by *fillvalue*, such that
at least *n* items are emitted.
>>> list(padded([1, 2, 3], '?', 5))
[1, 2, 3, '?', '?']
If *next_multiple* is ``True``, *fillvalue* will be emitted until the
number of items emitted is a multiple of *n*::
>>> list(padded([1, 2, 3, 4], n=3, next_multiple=True))
[1, 2, 3, 4, None, None]
If *n* is ``None``, *fillvalue* will be emitted indefinitely.
"""
it = iter(iterable)
if n is None:
yield from chain(it, repeat(fillvalue))
elif n < 1:
raise ValueError('n must be at least 1')
else:
item_count = 0
for item in it:
yield item
item_count += 1
remaining = (n - item_count) % n if next_multiple else n - item_count
for _ in range(remaining):
yield fillvalue
def repeat_last(iterable, default=None):
"""After the *iterable* is exhausted, keep yielding its last element.
>>> list(islice(repeat_last(range(3)), 5))
[0, 1, 2, 2, 2]
If the iterable is empty, yield *default* forever::
>>> list(islice(repeat_last(range(0), 42), 5))
[42, 42, 42, 42, 42]
"""
item = _marker
for item in iterable:
yield item
final = default if item is _marker else item
yield from repeat(final)
def distribute(n, iterable):
"""Distribute the items from *iterable* among *n* smaller iterables.
>>> group_1, group_2 = distribute(2, [1, 2, 3, 4, 5, 6])
>>> list(group_1)
[1, 3, 5]
>>> list(group_2)
[2, 4, 6]
If the length of *iterable* is not evenly divisible by *n*, then the
length of the returned iterables will not be identical:
>>> children = distribute(3, [1, 2, 3, 4, 5, 6, 7])
>>> [list(c) for c in children]
[[1, 4, 7], [2, 5], [3, 6]]
If the length of *iterable* is smaller than *n*, then the last returned
iterables will be empty:
>>> children = distribute(5, [1, 2, 3])
>>> [list(c) for c in children]
[[1], [2], [3], [], []]
This function uses :func:`itertools.tee` and may require significant
storage. If you need the order items in the smaller iterables to match the
original iterable, see :func:`divide`.
"""
if n < 1:
raise ValueError('n must be at least 1')
children = tee(iterable, n)
return [islice(it, index, None, n) for index, it in enumerate(children)]
def stagger(iterable, offsets=(-1, 0, 1), longest=False, fillvalue=None):
"""Yield tuples whose elements are offset from *iterable*.
The amount by which the `i`-th item in each tuple is offset is given by
the `i`-th item in *offsets*.
>>> list(stagger([0, 1, 2, 3]))
[(None, 0, 1), (0, 1, 2), (1, 2, 3)]
>>> list(stagger(range(8), offsets=(0, 2, 4)))
[(0, 2, 4), (1, 3, 5), (2, 4, 6), (3, 5, 7)]
By default, the sequence will end when the final element of a tuple is the
last item in the iterable. To continue until the first element of a tuple
is the last item in the iterable, set *longest* to ``True``::
>>> list(stagger([0, 1, 2, 3], longest=True))
[(None, 0, 1), (0, 1, 2), (1, 2, 3), (2, 3, None), (3, None, None)]
By default, ``None`` will be used to replace offsets beyond the end of the
sequence. Specify *fillvalue* to use some other value.
"""
children = tee(iterable, len(offsets))
return zip_offset(
*children, offsets=offsets, longest=longest, fillvalue=fillvalue
)
def zip_offset(*iterables, offsets, longest=False, fillvalue=None):
"""``zip`` the input *iterables* together, but offset the `i`-th iterable
by the `i`-th item in *offsets*.
>>> list(zip_offset('0123', 'abcdef', offsets=(0, 1)))
[('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e')]
This can be used as a lightweight alternative to SciPy or pandas to analyze
data sets in which some series have a lead or lag relationship.
By default, the sequence will end when the shortest iterable is exhausted.
To continue until the longest iterable is exhausted, set *longest* to
``True``.
>>> list(zip_offset('0123', 'abcdef', offsets=(0, 1), longest=True))
[('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e'), (None, 'f')]
By default, ``None`` will be used to replace offsets beyond the end of the
sequence. Specify *fillvalue* to use some other value.
"""
if len(iterables) != len(offsets):
raise ValueError("Number of iterables and offsets didn't match")
staggered = []
for it, n in zip(iterables, offsets):
if n < 0:
staggered.append(chain(repeat(fillvalue, -n), it))
elif n > 0:
staggered.append(islice(it, n, None))
else:
staggered.append(it)
if longest:
return zip_longest(*staggered, fillvalue=fillvalue)
return zip(*staggered)
def sort_together(iterables, key_list=(0,), reverse=False):
"""Return the input iterables sorted together, with *key_list* as the
priority for sorting. All iterables are trimmed to the length of the
shortest one.
This can be used like the sorting function in a spreadsheet. If each
iterable represents a column of data, the key list determines which
columns are used for sorting.
By default, all iterables are sorted using the ``0``-th iterable::
>>> iterables = [(4, 3, 2, 1), ('a', 'b', 'c', 'd')]
>>> sort_together(iterables)
[(1, 2, 3, 4), ('d', 'c', 'b', 'a')]
Set a different key list to sort according to another iterable.
Specifying multiple keys dictates how ties are broken::
>>> iterables = [(3, 1, 2), (0, 1, 0), ('c', 'b', 'a')]
>>> sort_together(iterables, key_list=(1, 2))
[(2, 3, 1), (0, 0, 1), ('a', 'c', 'b')]
Set *reverse* to ``True`` to sort in descending order.
>>> sort_together([(1, 2, 3), ('c', 'b', 'a')], reverse=True)
[(3, 2, 1), ('a', 'b', 'c')]
"""
return list(
zip(
*sorted(
zip(*iterables), key=itemgetter(*key_list), reverse=reverse
)
)
)
def unzip(iterable):
"""The inverse of :func:`zip`, this function disaggregates the elements
of the zipped *iterable*.
The ``i``-th iterable contains the ``i``-th element from each element
of the zipped iterable. The first element is used to to determine the
length of the remaining elements.
>>> iterable = [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
>>> letters, numbers = unzip(iterable)
>>> list(letters)
['a', 'b', 'c', 'd']
>>> list(numbers)
[1, 2, 3, 4]
This is similar to using ``zip(*iterable)``, but it avoids reading
*iterable* into memory. Note, however, that this function uses
:func:`itertools.tee` and thus may require significant storage.
"""
head, iterable = spy(iter(iterable))
if not head:
# empty iterable, e.g. zip([], [], [])
return ()
# spy returns a one-length iterable as head
head = head[0]
iterables = tee(iterable, len(head))
def itemgetter(i):
def getter(obj):
try:
return obj[i]
except IndexError:
# basically if we have an iterable like
# iter([(1, 2, 3), (4, 5), (6,)])
# the second unzipped iterable would fail at the third tuple
# since it would try to access tup[1]
# same with the third unzipped iterable and the second tuple
# to support these "improperly zipped" iterables,
# we create a custom itemgetter
# which just stops the unzipped iterables
# at first length mismatch
raise StopIteration
return getter
return tuple(map(itemgetter(i), it) for i, it in enumerate(iterables))
def divide(n, iterable):
"""Divide the elements from *iterable* into *n* parts, maintaining
order.
>>> group_1, group_2 = divide(2, [1, 2, 3, 4, 5, 6])
>>> list(group_1)
[1, 2, 3]
>>> list(group_2)
[4, 5, 6]
If the length of *iterable* is not evenly divisible by *n*, then the
length of the returned iterables will not be identical:
>>> children = divide(3, [1, 2, 3, 4, 5, 6, 7])
>>> [list(c) for c in children]
[[1, 2, 3], [4, 5], [6, 7]]
If the length of the iterable is smaller than n, then the last returned
iterables will be empty:
>>> children = divide(5, [1, 2, 3])
>>> [list(c) for c in children]
[[1], [2], [3], [], []]
This function will exhaust the iterable before returning and may require
significant storage. If order is not important, see :func:`distribute`,
which does not first pull the iterable into memory.
"""
if n < 1:
raise ValueError('n must be at least 1')
seq = tuple(iterable)
q, r = divmod(len(seq), n)
ret = []
for i in range(n):
start = (i * q) + (i if i < r else r)
stop = ((i + 1) * q) + (i + 1 if i + 1 < r else r)
ret.append(iter(seq[start:stop]))
return ret
def always_iterable(obj, base_type=(str, bytes)):
"""If *obj* is iterable, return an iterator over its items::
>>> obj = (1, 2, 3)
>>> list(always_iterable(obj))
[1, 2, 3]
If *obj* is not iterable, return a one-item iterable containing *obj*::
>>> obj = 1
>>> list(always_iterable(obj))
[1]
If *obj* is ``None``, return an empty iterable:
>>> obj = None
>>> list(always_iterable(None))
[]
By default, binary and text strings are not considered iterable::
>>> obj = 'foo'
>>> list(always_iterable(obj))
['foo']
If *base_type* is set, objects for which ``isinstance(obj, base_type)``
returns ``True`` won't be considered iterable.
>>> obj = {'a': 1}
>>> list(always_iterable(obj)) # Iterate over the dict's keys
['a']
>>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit
[{'a': 1}]
Set *base_type* to ``None`` to avoid any special handling and treat objects
Python considers iterable as iterable:
>>> obj = 'foo'
>>> list(always_iterable(obj, base_type=None))
['f', 'o', 'o']
"""
if obj is None:
return iter(())
if (base_type is not None) and isinstance(obj, base_type):
return iter((obj,))
try:
return iter(obj)
except TypeError:
return iter((obj,))
def adjacent(predicate, iterable, distance=1):
"""Return an iterable over `(bool, item)` tuples where the `item` is
drawn from *iterable* and the `bool` indicates whether
that item satisfies the *predicate* or is adjacent to an item that does.
For example, to find whether items are adjacent to a ``3``::
>>> list(adjacent(lambda x: x == 3, range(6)))
[(False, 0), (False, 1), (True, 2), (True, 3), (True, 4), (False, 5)]
Set *distance* to change what counts as adjacent. For example, to find
whether items are two places away from a ``3``:
>>> list(adjacent(lambda x: x == 3, range(6), distance=2))
[(False, 0), (True, 1), (True, 2), (True, 3), (True, 4), (True, 5)]
This is useful for contextualizing the results of a search function.
For example, a code comparison tool might want to identify lines that
have changed, but also surrounding lines to give the viewer of the diff
context.
The predicate function will only be called once for each item in the
iterable.
See also :func:`groupby_transform`, which can be used with this function
to group ranges of items with the same `bool` value.
"""
# Allow distance=0 mainly for testing that it reproduces results with map()
if distance < 0:
raise ValueError('distance must be at least 0')
i1, i2 = tee(iterable)
padding = [False] * distance
selected = chain(padding, map(predicate, i1), padding)
adjacent_to_selected = map(any, windowed(selected, 2 * distance + 1))
return zip(adjacent_to_selected, i2)
def groupby_transform(iterable, keyfunc=None, valuefunc=None):
"""An extension of :func:`itertools.groupby` that transforms the values of
*iterable* after grouping them.
*keyfunc* is a function used to compute a grouping key for each item.
*valuefunc* is a function for transforming the items after grouping.
>>> iterable = 'AaaABbBCcA'
>>> keyfunc = lambda x: x.upper()
>>> valuefunc = lambda x: x.lower()
>>> grouper = groupby_transform(iterable, keyfunc, valuefunc)
>>> [(k, ''.join(g)) for k, g in grouper]
[('A', 'aaaa'), ('B', 'bbb'), ('C', 'cc'), ('A', 'a')]
*keyfunc* and *valuefunc* default to identity functions if they are not
specified.
:func:`groupby_transform` is useful when grouping elements of an iterable
using a separate iterable as the key. To do this, :func:`zip` the iterables
and pass a *keyfunc* that extracts the first element and a *valuefunc*
that extracts the second element::
>>> from operator import itemgetter
>>> keys = [0, 0, 1, 1, 1, 2, 2, 2, 3]
>>> values = 'abcdefghi'
>>> iterable = zip(keys, values)
>>> grouper = groupby_transform(iterable, itemgetter(0), itemgetter(1))
>>> [(k, ''.join(g)) for k, g in grouper]
[(0, 'ab'), (1, 'cde'), (2, 'fgh'), (3, 'i')]
Note that the order of items in the iterable is significant.
Only adjacent items are grouped together, so if you don't want any
duplicate groups, you should sort the iterable by the key function.
"""
res = groupby(iterable, keyfunc)
return ((k, map(valuefunc, g)) for k, g in res) if valuefunc else res
def numeric_range(*args):
"""An extension of the built-in ``range()`` function whose arguments can
be any orderable numeric type.
With only *stop* specified, *start* defaults to ``0`` and *step*
defaults to ``1``. The output items will match the type of *stop*:
>>> list(numeric_range(3.5))
[0.0, 1.0, 2.0, 3.0]
With only *start* and *stop* specified, *step* defaults to ``1``. The
output items will match the type of *start*:
>>> from decimal import Decimal
>>> start = Decimal('2.1')
>>> stop = Decimal('5.1')
>>> list(numeric_range(start, stop))
[Decimal('2.1'), Decimal('3.1'), Decimal('4.1')]
With *start*, *stop*, and *step* specified the output items will match
the type of ``start + step``:
>>> from fractions import Fraction
>>> start = Fraction(1, 2) # Start at 1/2
>>> stop = Fraction(5, 2) # End at 5/2
>>> step = Fraction(1, 2) # Count by 1/2
>>> list(numeric_range(start, stop, step))
[Fraction(1, 2), Fraction(1, 1), Fraction(3, 2), Fraction(2, 1)]
If *step* is zero, ``ValueError`` is raised. Negative steps are supported:
>>> list(numeric_range(3, -1, -1.0))
[3.0, 2.0, 1.0, 0.0]
Be aware of the limitations of floating point numbers; the representation
of the yielded numbers may be surprising.
``datetime.datetime`` objects can be used for *start* and *stop*, if *step*
is a ``datetime.timedelta`` object:
>>> import datetime
>>> start = datetime.datetime(2019, 1, 1)
>>> stop = datetime.datetime(2019, 1, 3)
>>> step = datetime.timedelta(days=1)
>>> items = numeric_range(start, stop, step)
>>> next(items)
datetime.datetime(2019, 1, 1, 0, 0)
>>> next(items)
datetime.datetime(2019, 1, 2, 0, 0)
"""
argc = len(args)
if argc == 1:
stop, = args
start = type(stop)(0)
step = 1
elif argc == 2:
start, stop = args
step = 1
elif argc == 3:
start, stop, step = args
else:
err_msg = 'numeric_range takes at most 3 arguments, got {}'
raise TypeError(err_msg.format(argc))
values = (start + (step * n) for n in count())
zero = type(step)(0)
if step > zero:
return takewhile(partial(gt, stop), values)
elif step < zero:
return takewhile(partial(lt, stop), values)
else:
raise ValueError('numeric_range arg 3 must not be zero')
def count_cycle(iterable, n=None):
"""Cycle through the items from *iterable* up to *n* times, yielding
the number of completed cycles along with each item. If *n* is omitted the
process repeats indefinitely.
>>> list(count_cycle('AB', 3))
[(0, 'A'), (0, 'B'), (1, 'A'), (1, 'B'), (2, 'A'), (2, 'B')]
"""
iterable = tuple(iterable)
if not iterable:
return iter(())
counter = count() if n is None else range(n)
return ((i, item) for i in counter for item in iterable)
def locate(iterable, pred=bool, window_size=None):
"""Yield the index of each item in *iterable* for which *pred* returns
``True``.
*pred* defaults to :func:`bool`, which will select truthy items:
>>> list(locate([0, 1, 1, 0, 1, 0, 0]))
[1, 2, 4]
Set *pred* to a custom function to, e.g., find the indexes for a particular
item.
>>> list(locate(['a', 'b', 'c', 'b'], lambda x: x == 'b'))
[1, 3]
If *window_size* is given, then the *pred* function will be called with
that many items. This enables searching for sub-sequences:
>>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]
>>> pred = lambda *args: args == (1, 2, 3)
>>> list(locate(iterable, pred=pred, window_size=3))
[1, 5, 9]
Use with :func:`seekable` to find indexes and then retrieve the associated
items:
>>> from itertools import count
>>> from more_itertools import seekable
>>> source = (3 * n + 1 if (n % 2) else n // 2 for n in count())
>>> it = seekable(source)
>>> pred = lambda x: x > 100
>>> indexes = locate(it, pred=pred)
>>> i = next(indexes)
>>> it.seek(i)
>>> next(it)
106
"""
if window_size is None:
return compress(count(), map(pred, iterable))
if window_size < 1:
raise ValueError('window size must be at least 1')
it = windowed(iterable, window_size, fillvalue=_marker)
return compress(count(), starmap(pred, it))
def lstrip(iterable, pred):
"""Yield the items from *iterable*, but strip any from the beginning
for which *pred* returns ``True``.
For example, to remove a set of items from the start of an iterable:
>>> iterable = (None, False, None, 1, 2, None, 3, False, None)
>>> pred = lambda x: x in {None, False, ''}
>>> list(lstrip(iterable, pred))
[1, 2, None, 3, False, None]
This function is analogous to to :func:`str.lstrip`, and is essentially
an wrapper for :func:`itertools.dropwhile`.
"""
return dropwhile(pred, iterable)
def rstrip(iterable, pred):
"""Yield the items from *iterable*, but strip any from the end
for which *pred* returns ``True``.
For example, to remove a set of items from the end of an iterable:
>>> iterable = (None, False, None, 1, 2, None, 3, False, None)
>>> pred = lambda x: x in {None, False, ''}
>>> list(rstrip(iterable, pred))
[None, False, None, 1, 2, None, 3]
This function is analogous to :func:`str.rstrip`.
"""
cache = []
cache_append = cache.append
cache_clear = cache.clear
for x in iterable:
if pred(x):
cache_append(x)
else:
yield from cache
cache_clear()
yield x
def strip(iterable, pred):
"""Yield the items from *iterable*, but strip any from the
beginning and end for which *pred* returns ``True``.
For example, to remove a set of items from both ends of an iterable:
>>> iterable = (None, False, None, 1, 2, None, 3, False, None)
>>> pred = lambda x: x in {None, False, ''}
>>> list(strip(iterable, pred))
[1, 2, None, 3]
This function is analogous to :func:`str.strip`.
"""
return rstrip(lstrip(iterable, pred), pred)
def islice_extended(iterable, *args):
"""An extension of :func:`itertools.islice` that supports negative values
for *stop*, *start*, and *step*.
>>> iterable = iter('abcdefgh')
>>> list(islice_extended(iterable, -4, -1))
['e', 'f', 'g']
Slices with negative values require some caching of *iterable*, but this
function takes care to minimize the amount of memory required.
For example, you can use a negative step with an infinite iterator:
>>> from itertools import count
>>> list(islice_extended(count(), 110, 99, -2))
[110, 108, 106, 104, 102, 100]
"""
s = slice(*args)
start = s.start
stop = s.stop
if s.step == 0:
raise ValueError('step argument must be a non-zero integer or None.')
step = s.step or 1
it = iter(iterable)
if step > 0:
start = 0 if (start is None) else start
if start < 0:
# Consume all but the last -start items
cache = deque(enumerate(it, 1), maxlen=-start)
len_iter = cache[-1][0] if cache else 0
# Adjust start to be positive
i = max(len_iter + start, 0)
# Adjust stop to be positive
if stop is None:
j = len_iter
elif stop >= 0:
j = min(stop, len_iter)
else:
j = max(len_iter + stop, 0)
# Slice the cache
n = j - i
if n <= 0:
return
for index, item in islice(cache, 0, n, step):
yield item
elif (stop is not None) and (stop < 0):
# Advance to the start position
next(islice(it, start, start), None)
# When stop is negative, we have to carry -stop items while
# iterating
cache = deque(islice(it, -stop), maxlen=-stop)
for index, item in enumerate(it):
cached_item = cache.popleft()
if index % step == 0:
yield cached_item
cache.append(item)
else:
# When both start and stop are positive we have the normal case
yield from islice(it, start, stop, step)
else:
start = -1 if (start is None) else start
if (stop is not None) and (stop < 0):
# Consume all but the last items
n = -stop - 1
cache = deque(enumerate(it, 1), maxlen=n)
len_iter = cache[-1][0] if cache else 0
# If start and stop are both negative they are comparable and
# we can just slice. Otherwise we can adjust start to be negative
# and then slice.
if start < 0:
i, j = start, stop
else:
i, j = min(start - len_iter, -1), None
for index, item in list(cache)[i:j:step]:
yield item
else:
# Advance to the stop position
if stop is not None:
m = stop + 1
next(islice(it, m, m), None)
# stop is positive, so if start is negative they are not comparable
# and we need the rest of the items.
if start < 0:
i = start
n = None
# stop is None and start is positive, so we just need items up to
# the start index.
elif stop is None:
i = None
n = start + 1
# Both stop and start are positive, so they are comparable.
else:
i = None
n = start - stop
if n <= 0:
return
cache = list(islice(it, n))
yield from cache[i::step]
def always_reversible(iterable):
"""An extension of :func:`reversed` that supports all iterables, not
just those which implement the ``Reversible`` or ``Sequence`` protocols.
>>> print(*always_reversible(x for x in range(3)))
2 1 0
If the iterable is already reversible, this function returns the
result of :func:`reversed()`. If the iterable is not reversible,
this function will cache the remaining items in the iterable and
yield them in reverse order, which may require significant storage.
"""
try:
return reversed(iterable)
except TypeError:
return reversed(list(iterable))
def consecutive_groups(iterable, ordering=lambda x: x):
"""Yield groups of consecutive items using :func:`itertools.groupby`.
The *ordering* function determines whether two items are adjacent by
returning their position.
By default, the ordering function is the identity function. This is
suitable for finding runs of numbers:
>>> iterable = [1, 10, 11, 12, 20, 30, 31, 32, 33, 40]
>>> for group in consecutive_groups(iterable):
... print(list(group))
[1]
[10, 11, 12]
[20]
[30, 31, 32, 33]
[40]
For finding runs of adjacent letters, try using the :meth:`index` method
of a string of letters:
>>> from string import ascii_lowercase
>>> iterable = 'abcdfgilmnop'
>>> ordering = ascii_lowercase.index
>>> for group in consecutive_groups(iterable, ordering):
... print(list(group))
['a', 'b', 'c', 'd']
['f', 'g']
['i']
['l', 'm', 'n', 'o', 'p']
Each group of consecutive items is an iterator that shares it source with
*iterable*. When an an output group is advanced, the previous group is
no longer available unless its elements are copied (e.g., into a ``list``).
>>> iterable = [1, 2, 11, 12, 21, 22]
>>> saved_groups = []
>>> for group in consecutive_groups(iterable):
... saved_groups.append(list(group)) # Copy group elements
>>> saved_groups
[[1, 2], [11, 12], [21, 22]]
"""
for k, g in groupby(
enumerate(iterable), key=lambda x: x[0] - ordering(x[1])
):
yield map(itemgetter(1), g)
def difference(iterable, func=sub, *, initial=None):
"""By default, compute the first difference of *iterable* using
:func:`operator.sub`.
>>> iterable = [0, 1, 3, 6, 10]
>>> list(difference(iterable))
[0, 1, 2, 3, 4]
This is the opposite of :func:`itertools.accumulate`'s default behavior:
>>> from itertools import accumulate
>>> iterable = [0, 1, 2, 3, 4]
>>> list(accumulate(iterable))
[0, 1, 3, 6, 10]
>>> list(difference(accumulate(iterable)))
[0, 1, 2, 3, 4]
By default *func* is :func:`operator.sub`, but other functions can be
specified. They will be applied as follows::
A, B, C, D, ... --> A, func(B, A), func(C, B), func(D, C), ...
For example, to do progressive division:
>>> iterable = [1, 2, 6, 24, 120] # Factorial sequence
>>> func = lambda x, y: x // y
>>> list(difference(iterable, func))
[1, 2, 3, 4, 5]
Since Python 3.8, :func:`itertools.accumulate` can be supplied with an
*initial* keyword argument. If :func:`difference` is called with *initial*
set to something other than ``None``, it will skip the first element when
computing successive differences.
>>> iterable = [100, 101, 103, 106] # accumate([1, 2, 3], initial=100)
>>> list(difference(iterable, initial=100))
[1, 2, 3]
"""
a, b = tee(iterable)
try:
first = [next(b)]
except StopIteration:
return iter([])
if initial is not None:
first = []
return chain(first, starmap(func, zip(b, a)))
class SequenceView(Sequence):
"""Return a read-only view of the sequence object *target*.
:class:`SequenceView` objects are analogous to Python's built-in
"dictionary view" types. They provide a dynamic view of a sequence's items,
meaning that when the sequence updates, so does the view.
>>> seq = ['0', '1', '2']
>>> view = SequenceView(seq)
>>> view
SequenceView(['0', '1', '2'])
>>> seq.append('3')
>>> view
SequenceView(['0', '1', '2', '3'])
Sequence views support indexing, slicing, and length queries. They act
like the underlying sequence, except they don't allow assignment:
>>> view[1]
'1'
>>> view[1:-1]
['1', '2']
>>> len(view)
4
Sequence views are useful as an alternative to copying, as they don't
require (much) extra storage.
"""
def __init__(self, target):
if not isinstance(target, Sequence):
raise TypeError
self._target = target
def __getitem__(self, index):
return self._target[index]
def __len__(self):
return len(self._target)
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, repr(self._target))
class seekable:
"""Wrap an iterator to allow for seeking backward and forward. This
progressively caches the items in the source iterable so they can be
re-visited.
Call :meth:`seek` with an index to seek to that position in the source
iterable.
To "reset" an iterator, seek to ``0``:
>>> from itertools import count
>>> it = seekable((str(n) for n in count()))
>>> next(it), next(it), next(it)
('0', '1', '2')
>>> it.seek(0)
>>> next(it), next(it), next(it)
('0', '1', '2')
>>> next(it)
'3'
You can also seek forward:
>>> it = seekable((str(n) for n in range(20)))
>>> it.seek(10)
>>> next(it)
'10'
>>> it.seek(20) # Seeking past the end of the source isn't a problem
>>> list(it)
[]
>>> it.seek(0) # Resetting works even after hitting the end
>>> next(it), next(it), next(it)
('0', '1', '2')
The cache grows as the source iterable progresses, so beware of wrapping
very large or infinite iterables.
You may view the contents of the cache with the :meth:`elements` method.
That returns a :class:`SequenceView`, a view that updates automatically:
>>> it = seekable((str(n) for n in range(10)))
>>> next(it), next(it), next(it)
('0', '1', '2')
>>> elements = it.elements()
>>> elements
SequenceView(['0', '1', '2'])
>>> next(it)
'3'
>>> elements
SequenceView(['0', '1', '2', '3'])
"""
def __init__(self, iterable):
self._source = iter(iterable)
self._cache = []
self._index = None
def __iter__(self):
return self
def __next__(self):
if self._index is not None:
try:
item = self._cache[self._index]
except IndexError:
self._index = None
else:
self._index += 1
return item
item = next(self._source)
self._cache.append(item)
return item
def elements(self):
return SequenceView(self._cache)
def seek(self, index):
self._index = index
remainder = index - len(self._cache)
if remainder > 0:
consume(self, remainder)
class run_length:
"""
:func:`run_length.encode` compresses an iterable with run-length encoding.
It yields groups of repeated items with the count of how many times they
were repeated:
>>> uncompressed = 'abbcccdddd'
>>> list(run_length.encode(uncompressed))
[('a', 1), ('b', 2), ('c', 3), ('d', 4)]
:func:`run_length.decode` decompresses an iterable that was previously
compressed with run-length encoding. It yields the items of the
decompressed iterable:
>>> compressed = [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
>>> list(run_length.decode(compressed))
['a', 'b', 'b', 'c', 'c', 'c', 'd', 'd', 'd', 'd']
"""
@staticmethod
def encode(iterable):
return ((k, ilen(g)) for k, g in groupby(iterable))
@staticmethod
def decode(iterable):
return chain.from_iterable(repeat(k, n) for k, n in iterable)
def exactly_n(iterable, n, predicate=bool):
"""Return ``True`` if exactly ``n`` items in the iterable are ``True``
according to the *predicate* function.
>>> exactly_n([True, True, False], 2)
True
>>> exactly_n([True, True, False], 1)
False
>>> exactly_n([0, 1, 2, 3, 4, 5], 3, lambda x: x < 3)
True
The iterable will be advanced until ``n + 1`` truthy items are encountered,
so avoid calling it on infinite iterables.
"""
return len(take(n + 1, filter(predicate, iterable))) == n
def circular_shifts(iterable):
"""Return a list of circular shifts of *iterable*.
>>> circular_shifts(range(4))
[(0, 1, 2, 3), (1, 2, 3, 0), (2, 3, 0, 1), (3, 0, 1, 2)]
"""
lst = list(iterable)
return take(len(lst), windowed(cycle(lst), len(lst)))
def make_decorator(wrapping_func, result_index=0):
"""Return a decorator version of *wrapping_func*, which is a function that
modifies an iterable. *result_index* is the position in that function's
signature where the iterable goes.
This lets you use itertools on the "production end," i.e. at function
definition. This can augment what the function returns without changing the
function's code.
For example, to produce a decorator version of :func:`chunked`:
>>> from more_itertools import chunked
>>> chunker = make_decorator(chunked, result_index=0)
>>> @chunker(3)
... def iter_range(n):
... return iter(range(n))
...
>>> list(iter_range(9))
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
To only allow truthy items to be returned:
>>> truth_serum = make_decorator(filter, result_index=1)
>>> @truth_serum(bool)
... def boolean_test():
... return [0, 1, '', ' ', False, True]
...
>>> list(boolean_test())
[1, ' ', True]
The :func:`peekable` and :func:`seekable` wrappers make for practical
decorators:
>>> from more_itertools import peekable
>>> peekable_function = make_decorator(peekable)
>>> @peekable_function()
... def str_range(*args):
... return (str(x) for x in range(*args))
...
>>> it = str_range(1, 20, 2)
>>> next(it), next(it), next(it)
('1', '3', '5')
>>> it.peek()
'7'
>>> next(it)
'7'
"""
# See https://sites.google.com/site/bbayles/index/decorator_factory for
# notes on how this works.
def decorator(*wrapping_args, **wrapping_kwargs):
def outer_wrapper(f):
def inner_wrapper(*args, **kwargs):
result = f(*args, **kwargs)
wrapping_args_ = list(wrapping_args)
wrapping_args_.insert(result_index, result)
return wrapping_func(*wrapping_args_, **wrapping_kwargs)
return inner_wrapper
return outer_wrapper
return decorator
def map_reduce(iterable, keyfunc, valuefunc=None, reducefunc=None):
"""Return a dictionary that maps the items in *iterable* to categories
defined by *keyfunc*, transforms them with *valuefunc*, and
then summarizes them by category with *reducefunc*.
*valuefunc* defaults to the identity function if it is unspecified.
If *reducefunc* is unspecified, no summarization takes place:
>>> keyfunc = lambda x: x.upper()
>>> result = map_reduce('abbccc', keyfunc)
>>> sorted(result.items())
[('A', ['a']), ('B', ['b', 'b']), ('C', ['c', 'c', 'c'])]
Specifying *valuefunc* transforms the categorized items:
>>> keyfunc = lambda x: x.upper()
>>> valuefunc = lambda x: 1
>>> result = map_reduce('abbccc', keyfunc, valuefunc)
>>> sorted(result.items())
[('A', [1]), ('B', [1, 1]), ('C', [1, 1, 1])]
Specifying *reducefunc* summarizes the categorized items:
>>> keyfunc = lambda x: x.upper()
>>> valuefunc = lambda x: 1
>>> reducefunc = sum
>>> result = map_reduce('abbccc', keyfunc, valuefunc, reducefunc)
>>> sorted(result.items())
[('A', 1), ('B', 2), ('C', 3)]
You may want to filter the input iterable before applying the map/reduce
procedure:
>>> all_items = range(30)
>>> items = [x for x in all_items if 10 <= x <= 20] # Filter
>>> keyfunc = lambda x: x % 2 # Evens map to 0; odds to 1
>>> categories = map_reduce(items, keyfunc=keyfunc)
>>> sorted(categories.items())
[(0, [10, 12, 14, 16, 18, 20]), (1, [11, 13, 15, 17, 19])]
>>> summaries = map_reduce(items, keyfunc=keyfunc, reducefunc=sum)
>>> sorted(summaries.items())
[(0, 90), (1, 75)]
Note that all items in the iterable are gathered into a list before the
summarization step, which may require significant storage.
The returned object is a :obj:`collections.defaultdict` with the
``default_factory`` set to ``None``, such that it behaves like a normal
dictionary.
"""
valuefunc = (lambda x: x) if (valuefunc is None) else valuefunc
ret = defaultdict(list)
for item in iterable:
key = keyfunc(item)
value = valuefunc(item)
ret[key].append(value)
if reducefunc is not None:
for key, value_list in ret.items():
ret[key] = reducefunc(value_list)
ret.default_factory = None
return ret
def rlocate(iterable, pred=bool, window_size=None):
"""Yield the index of each item in *iterable* for which *pred* returns
``True``, starting from the right and moving left.
*pred* defaults to :func:`bool`, which will select truthy items:
>>> list(rlocate([0, 1, 1, 0, 1, 0, 0])) # Truthy at 1, 2, and 4
[4, 2, 1]
Set *pred* to a custom function to, e.g., find the indexes for a particular
item:
>>> iterable = iter('abcb')
>>> pred = lambda x: x == 'b'
>>> list(rlocate(iterable, pred))
[3, 1]
If *window_size* is given, then the *pred* function will be called with
that many items. This enables searching for sub-sequences:
>>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]
>>> pred = lambda *args: args == (1, 2, 3)
>>> list(rlocate(iterable, pred=pred, window_size=3))
[9, 5, 1]
Beware, this function won't return anything for infinite iterables.
If *iterable* is reversible, ``rlocate`` will reverse it and search from
the right. Otherwise, it will search from the left and return the results
in reverse order.
See :func:`locate` to for other example applications.
"""
if window_size is None:
try:
len_iter = len(iterable)
return (len_iter - i - 1 for i in locate(reversed(iterable), pred))
except TypeError:
pass
return reversed(list(locate(iterable, pred, window_size)))
def replace(iterable, pred, substitutes, count=None, window_size=1):
"""Yield the items from *iterable*, replacing the items for which *pred*
returns ``True`` with the items from the iterable *substitutes*.
>>> iterable = [1, 1, 0, 1, 1, 0, 1, 1]
>>> pred = lambda x: x == 0
>>> substitutes = (2, 3)
>>> list(replace(iterable, pred, substitutes))
[1, 1, 2, 3, 1, 1, 2, 3, 1, 1]
If *count* is given, the number of replacements will be limited:
>>> iterable = [1, 1, 0, 1, 1, 0, 1, 1, 0]
>>> pred = lambda x: x == 0
>>> substitutes = [None]
>>> list(replace(iterable, pred, substitutes, count=2))
[1, 1, None, 1, 1, None, 1, 1, 0]
Use *window_size* to control the number of items passed as arguments to
*pred*. This allows for locating and replacing subsequences.
>>> iterable = [0, 1, 2, 5, 0, 1, 2, 5]
>>> window_size = 3
>>> pred = lambda *args: args == (0, 1, 2) # 3 items passed to pred
>>> substitutes = [3, 4] # Splice in these items
>>> list(replace(iterable, pred, substitutes, window_size=window_size))
[3, 4, 5, 3, 4, 5]
"""
if window_size < 1:
raise ValueError('window_size must be at least 1')
# Save the substitutes iterable, since it's used more than once
substitutes = tuple(substitutes)
# Add padding such that the number of windows matches the length of the
# iterable
it = chain(iterable, [_marker] * (window_size - 1))
windows = windowed(it, window_size)
n = 0
for w in windows:
# If the current window matches our predicate (and we haven't hit
# our maximum number of replacements), splice in the substitutes
# and then consume the following windows that overlap with this one.
# For example, if the iterable is (0, 1, 2, 3, 4...)
# and the window size is 2, we have (0, 1), (1, 2), (2, 3)...
# If the predicate matches on (0, 1), we need to zap (0, 1) and (1, 2)
if pred(*w):
if (count is None) or (n < count):
n += 1
yield from substitutes
consume(windows, window_size - 1)
continue
# If there was no match (or we've reached the replacement limit),
# yield the first item from the window.
if w and (w[0] is not _marker):
yield w[0]
def partitions(iterable):
"""Yield all possible order-perserving partitions of *iterable*.
>>> iterable = 'abc'
>>> for part in partitions(iterable):
... print([''.join(p) for p in part])
['abc']
['a', 'bc']
['ab', 'c']
['a', 'b', 'c']
This is unrelated to :func:`partition`.
"""
sequence = list(iterable)
n = len(sequence)
for i in powerset(range(1, n)):
yield [sequence[i:j] for i, j in zip((0,) + i, i + (n,))]
def set_partitions(iterable, k=None):
"""
Yield the set partitions of *iterable* into *k* parts. Set partitions are
not order-preserving.
>>> iterable = 'abc'
>>> for part in set_partitions(iterable, 2):
... print([''.join(p) for p in part])
['a', 'bc']
['ab', 'c']
['b', 'ac']
If *k* is not given, every set partition is generated.
>>> iterable = 'abc'
>>> for part in set_partitions(iterable):
... print([''.join(p) for p in part])
['abc']
['a', 'bc']
['ab', 'c']
['b', 'ac']
['a', 'b', 'c']
"""
L = list(iterable)
n = len(L)
if k is not None:
if k < 1:
raise ValueError(
"Can't partition in a negative or zero number of groups"
)
elif k > n:
return
def set_partitions_helper(L, k):
n = len(L)
if k == 1:
yield [L]
elif n == k:
yield [[s] for s in L]
else:
e, *M = L
for p in set_partitions_helper(M, k - 1):
yield [[e], *p]
for p in set_partitions_helper(M, k):
for i in range(len(p)):
yield p[:i] + [[e] + p[i]] + p[i + 1 :]
if k is None:
for k in range(1, n + 1):
yield from set_partitions_helper(L, k)
else:
yield from set_partitions_helper(L, k)
def time_limited(limit_seconds, iterable):
"""
Yield items from *iterable* until *limit_seconds* have passed.
>>> from time import sleep
>>> def generator():
... yield 1
... yield 2
... sleep(0.2)
... yield 3
>>> iterable = generator()
>>> list(time_limited(0.1, iterable))
[1, 2]
Note that the time is checked before each item is yielded, and iteration
stops if the time elapsed is greater than *limit_seconds*. If your time
limit is 1 second, but it takes 2 seconds to generate the first item from
the iterable, the function will run for 2 seconds and not yield anything.
"""
if limit_seconds < 0:
raise ValueError('limit_seconds must be positive')
start_time = monotonic()
for item in iterable:
if monotonic() - start_time > limit_seconds:
break
yield item
def only(iterable, default=None, too_long=None):
"""If *iterable* has only one item, return it.
If it has zero items, return *default*.
If it has more than one item, raise the exception given by *too_long*,
which is ``ValueError`` by default.
>>> only([], default='missing')
'missing'
>>> only([1])
1
>>> only([1, 2]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: Expected exactly one item in iterable, but got 1, 2,
and perhaps more.'
>>> only([1, 2], too_long=TypeError) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError
Note that :func:`only` attempts to advance *iterable* twice to ensure there
is only one item. See :func:`spy` or :func:`peekable` to check
iterable contents less destructively.
"""
it = iter(iterable)
first_value = next(it, default)
try:
second_value = next(it)
except StopIteration:
pass
else:
msg = (
'Expected exactly one item in iterable, but got {!r}, {!r}, '
'and perhaps more.'.format(first_value, second_value)
)
raise too_long or ValueError(msg)
return first_value
def ichunked(iterable, n):
"""Break *iterable* into sub-iterables with *n* elements each.
:func:`ichunked` is like :func:`chunked`, but it yields iterables
instead of lists.
If the sub-iterables are read in order, the elements of *iterable*
won't be stored in memory.
If they are read out of order, :func:`itertools.tee` is used to cache
elements as necessary.
>>> from itertools import count
>>> all_chunks = ichunked(count(), 4)
>>> c_1, c_2, c_3 = next(all_chunks), next(all_chunks), next(all_chunks)
>>> list(c_2) # c_1's elements have been cached; c_3's haven't been
[4, 5, 6, 7]
>>> list(c_1)
[0, 1, 2, 3]
>>> list(c_3)
[8, 9, 10, 11]
"""
source = iter(iterable)
while True:
# Check to see whether we're at the end of the source iterable
item = next(source, _marker)
if item is _marker:
return
# Clone the source and yield an n-length slice
source, it = tee(chain([item], source))
yield islice(it, n)
# Advance the source iterable
consume(source, n)
def distinct_combinations(iterable, r):
"""Yield the distinct combinations of *r* items taken from *iterable*.
>>> list(distinct_combinations([0, 0, 1], 2))
[(0, 0), (0, 1)]
Equivalent to ``set(combinations(iterable))``, except duplicates are not
generated and thrown away. For larger input sequences this is much more
efficient.
"""
if r < 0:
raise ValueError('r must be non-negative')
elif r == 0:
yield ()
else:
pool = tuple(iterable)
for i, prefix in unique_everseen(enumerate(pool), key=itemgetter(1)):
for suffix in distinct_combinations(pool[i + 1 :], r - 1):
yield (prefix,) + suffix
def filter_except(validator, iterable, *exceptions):
"""Yield the items from *iterable* for which the *validator* function does
not raise one of the specified *exceptions*.
*validator* is called for each item in *iterable*.
It should be a function that accepts one argument and raises an exception
if that item is not valid.
>>> iterable = ['1', '2', 'three', '4', None]
>>> list(filter_except(int, iterable, ValueError, TypeError))
['1', '2', '4']
If an exception other than one given by *exceptions* is raised by
*validator*, it is raised like normal.
"""
exceptions = tuple(exceptions)
for item in iterable:
try:
validator(item)
except exceptions:
pass
else:
yield item
def map_except(function, iterable, *exceptions):
"""Transform each item from *iterable* with *function* and yield the
result, unless *function* raises one of the specified *exceptions*.
*function* is called to transform each item in *iterable*.
It should be a accept one argument.
>>> iterable = ['1', '2', 'three', '4', None]
>>> list(map_except(int, iterable, ValueError, TypeError))
[1, 2, 4]
If an exception other than one given by *exceptions* is raised by
*function*, it is raised like normal.
"""
exceptions = tuple(exceptions)
for item in iterable:
try:
yield function(item)
except exceptions:
pass
| [
"[email protected]"
] | |
409eab8e92a731bec8c3fbce3195ffe60d53c1e0 | 9625c5665611a5a1e92fa8fbe230ede1154d5a49 | /apps/messenger/migrations/0001_initial.py | c4ef6f5702b59c2470d2ec769a6bf7fa81872b28 | [] | no_license | Alfredynho/Sistema-Venta-de-Motos | 94a6ffcc45409faaea44f89389f89b6b1bfe0905 | 136b6d7c7cbcf4b5432212ae588d47a27fdcb348 | refs/heads/master | 2021-05-15T00:46:55.811827 | 2017-09-10T17:58:58 | 2017-09-10T17:58:58 | 103,049,391 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,425 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-18 03:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='MessengerInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('messenger_id', models.CharField(blank=True, max_length=255, null=True, verbose_name='Messenger ID')),
('nombre', models.CharField(blank=True, max_length=150, null=True, verbose_name='Nombre')),
('apellido', models.CharField(blank=True, max_length=150, null=True, verbose_name='Apellido')),
('foto_perfil', models.CharField(blank=True, max_length=150, null=True, verbose_name='Foto de Perfil')),
('lugar', models.CharField(blank=True, max_length=150, null=True, verbose_name='Lugar')),
('zona_horaria', models.CharField(blank=True, max_length=150, null=True, verbose_name='Zona Horaria')),
('genero', models.CharField(blank=True, max_length=150, null=True, verbose_name='Género')),
],
options={
'verbose_name_plural': 'Usuarios',
'verbose_name': 'Usuario',
},
),
]
| [
"[email protected]"
] | |
339af08ee79097195e0eb7a1db67191f4741fa12 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_ringers.py | 3df2b9591b997bead80abf3adb13ac12e6619cac | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py |
from xai.brain.wordbase.nouns._ringer import _RINGER
#calss header
class _RINGERS(_RINGER, ):
def __init__(self,):
_RINGER.__init__(self)
self.name = "RINGERS"
self.specie = 'nouns'
self.basic = "ringer"
self.jsondata = {}
| [
"[email protected]"
] | |
bafac5b9571935c5109690accb7731b96dd87dab | b449adf6024f393937df5253ed5d955236942370 | /src/model.py | 1d953f67a8d3e0255d1e517c5f686393e6cb474f | [] | no_license | futianfan/HINT | b8b6654483e2a760d2d6ce148e9b8e07dfd20c3c | 8a593f720747a3e9a1343d3f3fb5cf9ae54c7ab7 | refs/heads/main | 2023-01-21T10:54:25.910820 | 2020-11-28T15:19:25 | 2020-11-28T15:19:25 | 316,760,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,629 | py | '''
'''
from sklearn.metrics import roc_auc_score, f1_score, average_precision_score, precision_score, recall_score, accuracy_score
import matplotlib.pyplot as plt
from copy import deepcopy
import numpy as np
import torch
from torch import nn
from torch.autograd import Variable
import torch.nn.functional as F
from module import Highway, GCN
class Interaction(nn.Sequential):
def __init__(self, molecule_encoder, disease_encoder, protocol_encoder,
global_embed_size,
highway_num_layer,
prefix_name,
epoch = 20,
lr = 3e-4,
weight_decay = 0,
):
super(Interaction, self).__init__()
self.molecule_encoder = molecule_encoder
self.disease_encoder = disease_encoder
self.protocol_encoder = protocol_encoder
self.global_embed_size = global_embed_size
self.highway_num_layer = highway_num_layer
self.feature_dim = self.molecule_encoder.embedding_size + self.disease_encoder.embedding_size + self.protocol_encoder.embedding_size
self.epoch = epoch
self.lr = lr
self.weight_decay = weight_decay
self.save_name = prefix_name + '_interaction'
self.f = F.relu
self.loss = nn.BCEWithLogitsLoss()
##### NN
self.encoder2interaction_fc = nn.Linear(self.feature_dim, self.global_embed_size)
self.encoder2interaction_highway = Highway(self.global_embed_size, self.highway_num_layer)
self.pred_nn = nn.Linear(self.global_embed_size, 1)
def feed_lst_of_module(self, input_feature, lst_of_module):
x = input_feature
for single_module in lst_of_module:
x = self.f(single_module(x))
return x
def forward_get_three_encoders(self, smiles_lst2, icdcode_lst3, criteria_lst):
molecule_embed = self.molecule_encoder.forward_smiles_lst_lst(smiles_lst2)
icd_embed = self.disease_encoder.forward_code_lst3(icdcode_lst3)
protocol_embed = self.protocol_encoder.forward(criteria_lst)
return molecule_embed, icd_embed, protocol_embed
def forward_encoder_2_interaction(self, molecule_embed, icd_embed, protocol_embed):
encoder_embedding = torch.cat([molecule_embed, icd_embed, protocol_embed], 1)
interaction_embedding = self.feed_lst_of_module(encoder_embedding, [self.encoder2interaction_fc, self.encoder2interaction_highway])
return interaction_embedding
def forward(self, smiles_lst2, icdcode_lst3, criteria_lst):
molecule_embed, icd_embed, protocol_embed = self.forward_get_three_encoders(smiles_lst2, icdcode_lst3, criteria_lst)
interaction_embedding = self.forward_encoder_2_interaction(molecule_embed, icd_embed, protocol_embed)
output = self.pred_nn(interaction_embedding)
return output ### 32, 1
def evaluation(self, predict_all, label_all, threshold = 0.5):
import pickle, os
from sklearn.metrics import roc_curve, precision_recall_curve
with open("predict_label.txt", 'w') as fout:
for i,j in zip(predict_all, label_all):
fout.write(str(i)[:4] + '\t' + str(j)[:4]+'\n')
auc_score = roc_auc_score(label_all, predict_all)
figure_folder = "figure"
#### ROC-curve
fpr, tpr, thresholds = roc_curve(label_all, predict_all, pos_label=1)
roc_curve =plt.figure()
plt.plot(fpr,tpr,'-',label=self.save_name + ' ROC Curve ')
plt.legend(fontsize = 15)
#plt.savefig(os.path.join(figure_folder,name+"_roc_curve.png"))
#### PR-curve
precision, recall, thresholds = precision_recall_curve(label_all, predict_all)
plt.plot(recall,precision, label = self.save_name + ' PR Curve')
plt.legend(fontsize = 15)
plt.savefig(os.path.join(figure_folder,self.save_name + "_pr_curve.png"))
label_all = [int(i) for i in label_all]
float2binary = lambda x:0 if x<threshold else 1
predict_all = list(map(float2binary, predict_all))
f1score = f1_score(label_all, predict_all)
prauc_score = average_precision_score(label_all, predict_all)
# print(predict_all)
precision = precision_score(label_all, predict_all)
recall = recall_score(label_all, predict_all)
accuracy = accuracy_score(label_all, predict_all)
predict_1_ratio = sum(predict_all) / len(predict_all)
label_1_ratio = sum(label_all) / len(label_all)
return auc_score, f1score, prauc_score, precision, recall, accuracy, predict_1_ratio, label_1_ratio
def testloader_to_lst(self, dataloader):
nctid_lst, label_lst, smiles_lst2, icdcode_lst3, criteria_lst = [], [], [], [], []
for nctid, label, smiles, icdcode, criteria in dataloader:
nctid_lst.extend(nctid)
label_lst.extend([i.item() for i in label])
smiles_lst2.extend(smiles)
icdcode_lst3.extend(icdcode)
criteria_lst.extend(criteria)
length = len(nctid_lst)
assert length == len(smiles_lst2) and length == len(icdcode_lst3)
return nctid_lst, label_lst, smiles_lst2, icdcode_lst3, criteria_lst, length
def generate_predict(self, dataloader):
whole_loss = 0
label_all, predict_all = [], []
for nctid_lst, label_vec, smiles_lst2, icdcode_lst3, criteria_lst in dataloader:
output = self.forward(smiles_lst2, icdcode_lst3, criteria_lst).view(-1)
loss = self.loss(output, label_vec.float())
whole_loss += loss.item()
predict_all.extend([i.item() for i in torch.sigmoid(output)])
label_all.extend([i.item() for i in label_vec])
return whole_loss, predict_all, label_all
def bootstrap_test(self, dataloader, sample_num = 20):
# if validloader is not None:
# best_threshold = self.select_threshold_for_binary(validloader)
self.eval()
best_threshold = 0.5
whole_loss, predict_all, label_all = self.generate_predict(dataloader)
def bootstrap(length, sample_num):
idx = [i for i in range(length)]
from random import choices
bootstrap_idx = [choices(idx, k = length) for i in range(sample_num)]
return bootstrap_idx
results_lst = []
bootstrap_idx_lst = bootstrap(len(predict_all), sample_num = sample_num)
for bootstrap_idx in bootstrap_idx_lst:
bootstrap_label = [label_all[idx] for idx in bootstrap_idx]
bootstrap_predict = [predict_all[idx] for idx in bootstrap_idx]
results = self.evaluation(bootstrap_predict, bootstrap_label, threshold = best_threshold)
results_lst.append(results)
auc = [results[0] for results in results_lst]
f1score = [results[1] for results in results_lst]
prauc_score = [results[2] for results in results_lst]
print("prauc_score", np.mean(prauc_score), np.std(prauc_score))
print("f1score", np.mean(f1score), np.std(f1score))
print("auc", np.mean(auc), np.std(auc))
def test(self, dataloader, return_loss = True, validloader=None):
# if validloader is not None:
# best_threshold = self.select_threshold_for_binary(validloader)
self.eval()
best_threshold = 0.5
whole_loss, predict_all, label_all = self.generate_predict(dataloader)
from utils import plot_hist
plt.clf()
prefix_name = "./figure/" + self.save_name
plot_hist(prefix_name, predict_all, label_all)
self.train()
if return_loss:
return whole_loss
else:
print_num = 5
auc_score, f1score, prauc_score, precision, recall, accuracy, \
predict_1_ratio, label_1_ratio = self.evaluation(predict_all, label_all, threshold = best_threshold)
print("ROC AUC: " + str(auc_score)[:print_num] + "\nF1: " + str(f1score)[:print_num] \
+ "\nPR-AUC: " + str(prauc_score)[:print_num] \
+ "\nPrecision: " + str(precision)[:print_num] \
+ "\nrecall: "+str(recall)[:print_num] + "\naccuracy: "+str(accuracy)[:print_num] \
+ "\npredict 1 ratio: " + str(predict_1_ratio)[:print_num] \
+ "\nlabel 1 ratio: " + str(label_1_ratio)[:print_num])
return auc_score, f1score, prauc_score, precision, recall, accuracy, predict_1_ratio, label_1_ratio
def learn(self, train_loader, valid_loader, test_loader):
opt = torch.optim.Adam(self.parameters(), lr = self.lr, weight_decay = self.weight_decay)
train_loss_record = []
valid_loss = self.test(valid_loader, return_loss=True)
valid_loss_record = [valid_loss]
best_valid_loss = valid_loss
best_model = deepcopy(self)
for ep in range(self.epoch):
for nctid_lst, label_vec, smiles_lst2, icdcode_lst3, criteria_lst in train_loader:
output = self.forward(smiles_lst2, icdcode_lst3, criteria_lst).view(-1) #### 32, 1 -> 32, || label_vec 32,
loss = self.loss(output, label_vec.float())
train_loss_record.append(loss.item())
opt.zero_grad()
loss.backward()
opt.step()
valid_loss = self.test(valid_loader, return_loss=True)
valid_loss_record.append(valid_loss)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
best_model = deepcopy(self)
self.plot_learning_curve(train_loss_record, valid_loss_record)
self = deepcopy(best_model)
auc_score, f1score, prauc_score, precision, recall, accuracy, predict_1_ratio, label_1_ratio = self.test(test_loader, return_loss = False, validloader = valid_loader)
def plot_learning_curve(self, train_loss_record, valid_loss_record):
plt.plot(train_loss_record)
plt.savefig("./figure/" + self.save_name + '_train_loss.jpg')
plt.clf()
plt.plot(valid_loss_record)
plt.savefig("./figure/" + self.save_name + '_valid_loss.jpg')
plt.clf()
def select_threshold_for_binary(self, validloader):
_, prediction, label_all = self.generate_predict(validloader)
best_f1 = 0
for threshold in prediction:
float2binary = lambda x:0 if x<threshold else 1
predict_all = list(map(float2binary, prediction))
f1score = precision_score(label_all, predict_all)
if f1score > best_f1:
best_f1 = f1score
best_threshold = threshold
return best_threshold
class HINT_nograph(Interaction):
def __init__(self, molecule_encoder, disease_encoder, protocol_encoder,
global_embed_size,
highway_num_layer,
prefix_name,
epoch = 20,
lr = 3e-4,
weight_decay = 0, ):
super(HINT_nograph, self).__init__(molecule_encoder = molecule_encoder,
disease_encoder = disease_encoder,
protocol_encoder = protocol_encoder,
global_embed_size = global_embed_size,
prefix_name = prefix_name,
highway_num_layer = highway_num_layer,
epoch = epoch,
lr = lr,
weight_decay = weight_decay,
)
self.save_name = prefix_name + '_HINT_nograph'
''' ### interaction model
self.molecule_encoder = molecule_encoder
self.disease_encoder = disease_encoder
self.protocol_encoder = protocol_encoder
self.global_embed_size = global_embed_size
self.highway_num_layer = highway_num_layer
self.feature_dim = self.molecule_encoder.embedding_size + self.disease_encoder.embedding_size + self.protocol_encoder.embedding_size
self.epoch = epoch
self.lr = lr
self.weight_decay = weight_decay
self.save_name = save_name
self.f = F.relu
self.loss = nn.BCEWithLogitsLoss()
##### NN
self.encoder2interaction_fc = nn.Linear(self.feature_dim, self.global_embed_size)
self.encoder2interaction_highway = Highway(self.global_embed_size, self.highway_num_layer)
self.pred_nn = nn.Linear(self.global_embed_size, 1)
'''
#### risk of disease
self.risk_disease_fc = nn.Linear(self.disease_encoder.embedding_size, self.global_embed_size)
self.risk_disease_higway = Highway(self.global_embed_size, self.highway_num_layer)
#### augment interaction
self.augment_interaction_fc = nn.Linear(self.global_embed_size*2, self.global_embed_size)
self.augment_interaction_highway = Highway(self.global_embed_size, self.highway_num_layer)
#### ADMET
self.admet_model = []
for i in range(5):
admet_fc = nn.Linear(self.molecule_encoder.embedding_size, self.global_embed_size)
admet_highway = Highway(self.global_embed_size, self.highway_num_layer)
self.admet_model.append((admet_fc, admet_highway))
#### PK
self.pk_fc = nn.Linear(self.global_embed_size*5, self.global_embed_size)
self.pk_highway = Highway(self.global_embed_size, self.highway_num_layer)
#### trial node
self.trial_fc = nn.Linear(self.global_embed_size*2, self.global_embed_size)
self.trial_highway = Highway(self.global_embed_size, self.highway_num_layer)
## self.pred_nn = nn.Linear(self.global_embed_size, 1)
def forward(self, smiles_lst2, icdcode_lst3, criteria_lst, if_gnn = False):
### encoder for molecule, disease and protocol
molecule_embed, icd_embed, protocol_embed = self.forward_get_three_encoders(smiles_lst2, icdcode_lst3, criteria_lst)
### interaction
interaction_embedding = self.forward_encoder_2_interaction(molecule_embed, icd_embed, protocol_embed)
### risk of disease
risk_of_disease_embedding = self.feed_lst_of_module(input_feature = icd_embed,
lst_of_module = [self.risk_disease_fc, self.risk_disease_higway])
### augment interaction
augment_interaction_input = torch.cat([interaction_embedding, risk_of_disease_embedding], 1)
augment_interaction_embedding = self.feed_lst_of_module(input_feature = augment_interaction_input,
lst_of_module = [self.augment_interaction_fc, self.augment_interaction_highway])
### admet
admet_embedding_lst = []
for idx in range(5):
admet_embedding = self.feed_lst_of_module(input_feature = molecule_embed,
lst_of_module = self.admet_model[idx])
admet_embedding_lst.append(admet_embedding)
### pk
pk_input = torch.cat(admet_embedding_lst, 1)
pk_embedding = self.feed_lst_of_module(input_feature = pk_input,
lst_of_module = [self.pk_fc, self.pk_highway])
### trial
trial_input = torch.cat([pk_embedding, augment_interaction_embedding], 1)
trial_embedding = self.feed_lst_of_module(input_feature = trial_input,
lst_of_module = [self.trial_fc, self.trial_highway])
output = self.pred_nn(trial_embedding)
if if_gnn == False:
return output
else:
embedding_lst = [molecule_embed, icd_embed, protocol_embed, interaction_embedding, risk_of_disease_embedding, \
augment_interaction_embedding] + admet_embedding_lst + [pk_embedding, trial_embedding]
return embedding_lst
class HINT(HINT_nograph):
def __init__(self, molecule_encoder, disease_encoder, protocol_encoder,
global_embed_size,
highway_num_layer,
prefix_name,
gnn_hidden_size,
epoch = 20,
lr = 3e-4,
weight_decay = 0,):
super(HINT, self).__init__(molecule_encoder = molecule_encoder,
disease_encoder = disease_encoder,
protocol_encoder = protocol_encoder,
prefix_name = prefix_name,
global_embed_size = global_embed_size,
highway_num_layer = highway_num_layer,
epoch = epoch,
lr = lr,
weight_decay = weight_decay)
self.save_name = prefix_name + '_HINT'
self.gnn_hidden_size = gnn_hidden_size
#### GNN
self.adj = self.generate_adj()
self.gnn = GCN(
nfeat = self.global_embed_size,
nhid = self.gnn_hidden_size,
nclass = 1,
dropout = 0.6,
init = 'uniform')
### gnn's attention
self.node_size = self.adj.shape[0]
self.graph_attention_model_mat = [[self.gnn_attention() \
if self.adj[i,j]==1 else None \
for j in range(self.node_size)] \
for i in range(self.node_size)]
def generate_adj(self):
##### consistent with HINT_nograph.forward
lst = ["molecule", "disease", "criteria", 'INTERACTION', 'risk_disease', 'augment_interaction', 'A', 'D', 'M', 'E', 'T', 'PK', "final"]
edge_lst = [("disease", "molecule"), ("disease", "criteria"), ("molecule", "criteria"),
("disease", "INTERACTION"), ("molecule", "INTERACTION"), ("criteria", "INTERACTION"),
("disease", "risk_disease"), ('risk_disease', 'augment_interaction'), ('INTERACTION', 'augment_interaction'),
("molecule", "A"), ("molecule", "D"), ("molecule", "M"), ("molecule", "E"), ("molecule", "T"),
('A', 'PK'), ('D', 'PK'), ('M', 'PK'), ('E', 'PK'), ('T', 'PK'),
('augment_interaction', 'final'), ('PK', 'final')]
adj = torch.zeros(len(lst), len(lst))
adj = torch.eye(len(lst)) * len(lst)
num2str = {k:v for k,v in enumerate(lst)}
str2num = {v:k for k,v in enumerate(lst)}
for i,j in edge_lst:
n1,n2 = str2num[i], str2num[j]
adj[n1,n2] = 1
adj[n2,n1] = 1
return adj
def generate_attention_matrx(self, node_feature_mat):
attention_mat = torch.zeros(self.node_size, self.node_size)
for i in range(self.node_size):
for j in range(self.node_size):
if self.adj[i,j]!=1:
continue
feature = torch.cat([node_feature_mat[i].view(1,-1), node_feature_mat[j].view(1,-1)], 1)
attention_model = self.graph_attention_model_mat[i][j]
attention_mat[i,j] = torch.sigmoid(self.feed_lst_of_module(input_feature=feature, lst_of_module=attention_model))
return attention_mat
##### self.global_embed_size*2 -> 1
def gnn_attention(self):
highway_nn = Highway(size = self.global_embed_size*2, num_layers = self.highway_num_layer)
highway_fc = nn.Linear(self.global_embed_size*2, 1)
return [highway_nn, highway_fc]
def forward(self, smiles_lst2, icdcode_lst3, criteria_lst, return_attention_matrix = False):
embedding_lst = HINT_nograph.forward(self, smiles_lst2, icdcode_lst3, criteria_lst, if_gnn = True)
### length is 13, each is 32,50
batch_size = embedding_lst[0].shape[0]
output_lst = []
if return_attention_matrix:
attention_mat_lst = []
for i in range(batch_size):
node_feature_lst = [embedding[i].view(1,-1) for embedding in embedding_lst]
node_feature_mat = torch.cat(node_feature_lst, 0) ### 13, 50
attention_mat = self.generate_attention_matrx(node_feature_mat)
output = self.gnn(node_feature_mat, self.adj * attention_mat)
output = output[-1].view(1,-1)
output_lst.append(output)
if return_attention_matrix:
attention_mat_lst.append(attention_mat)
output_mat = torch.cat(output_lst, 0)
if not return_attention_matrix:
return output_mat
else:
return output_mat, attention_mat_lst
def interpret(self, complete_dataloader):
from graph_visualize_interpret import data2graph
from utils import replace_strange_symbol
for nctid_lst, status_lst, why_stop_lst, label_vec, phase_lst, \
diseases_lst, icdcode_lst3, drugs_lst, smiles_lst2, criteria_lst in complete_dataloader:
output, attention_mat_lst = self.forward(smiles_lst2, icdcode_lst3, criteria_lst, return_attention_matrix=True)
output = output.view(-1)
batch_size = len(nctid_lst)
for i in range(batch_size):
name = '__'.join([nctid_lst[i], status_lst[i], why_stop_lst[i], \
str(label_vec[i].item()), str(torch.sigmoid(output[i]).item())[:5], \
phase_lst[i], diseases_lst[i], drugs_lst[i]])
if len(name) > 150:
name = name[:250]
name = replace_strange_symbol(name)
name = name.replace('__', '_')
name = name.replace(' ', ' ')
name = 'interpret_result/' + name + '.png'
print(name)
data2graph(attention_matrix = attention_mat_lst[i], adj = self.adj, save_name = name)
### generate attention matrix
class Only_Molecule(Interaction):
def __init__(self, molecule_encoder, disease_encoder, protocol_encoder,
global_embed_size,
highway_num_layer,
prefix_name,
epoch = 20,
lr = 3e-4,
weight_decay = 0):
super(Only_Molecule, self).__init__(molecule_encoder=molecule_encoder,
disease_encoder=disease_encoder,
protocol_encoder=protocol_encoder,
global_embed_size = global_embed_size,
highway_num_layer = highway_num_layer,
prefix_name = prefix_name,
epoch = epoch,
lr = lr,
weight_decay = weight_decay,)
self.molecule2out = nn.Linear(self.global_embed_size,1)
def forward(self, smiles_lst2, icdcode_lst3, criteria_lst):
molecule_embed = self.molecule_encoder.forward_smiles_lst_lst(smiles_lst2)
return self.molecule2out(molecule_embed)
class Only_Disease(Only_Molecule):
def __init__(self, molecule_encoder, disease_encoder, protocol_encoder,
global_embed_size,
highway_num_layer,
prefix_name,
epoch = 20,
lr = 3e-4,
weight_decay = 0):
super(Only_Disease, self).__init__(molecule_encoder = molecule_encoder,
disease_encoder=disease_encoder,
protocol_encoder=protocol_encoder,
global_embed_size = global_embed_size,
highway_num_layer = highway_num_layer,
prefix_name = prefix_name,
epoch = epoch,
lr = lr,
weight_decay = weight_decay,)
self.disease2out = self.molecule2out
def forward(self, smiles_lst2, icdcode_lst3, criteria_lst):
icd_embed = self.disease_encoder.forward_code_lst3(icdcode_lst3)
return self.disease2out(icd_embed)
| [
"[email protected]"
] | |
6a99650619af2f11b218bb57ff7630a44898107d | 2b4e133329a5ca1ee205b026a46606b027d3f205 | /Customer/urls.py | b6181d44da1f0dcdfe16696339889e7b31696a9b | [] | no_license | wadeeat786486962/bladerscenter.github.io- | 06884c5ad3e7b874ce761e21ab5c00c9ab74fcfc | 410d11feb6bc1885e614069a7bc5007521cf982d | refs/heads/main | 2023-06-16T04:24:23.697174 | 2021-07-11T18:05:14 | 2021-07-11T18:05:14 | 384,936,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | from django.urls import path
from Customer.middlewares.auth import customerPanel_middleware
from Customer import views
urlpatterns = [
path('', customerPanel_middleware(views.customer_panel), name='customerpanel'),
path('updateprofile/', customerPanel_middleware(views.profile_update), name='updateprofile'),
path('wish_list/<int:id>/', views.wish_list, name='wish_list'),
path('comment/<int:id>/', views.comment, name='comment'),
path('wished_product/', views.wished_product, name='wished_product'),
path('delete_comment/<int:id>/', views.delete_comment, name='delete_comment'),
path('delete_wish_pro/<int:id>/', views.delete_wish_pro, name='delete_wish_pro'),
]
| [
"[email protected]"
] | |
c6cd354f3834b048d995b22a1bb692b27b4c369f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02392/s151511346.py | c222535b5db3bdb72e93f3da95df73cef146eb98 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | def max(a,b):
if a>b :
return a
else :
return b
a,b,c = map(int,raw_input().split())
if a<b<c :
print'Yes'
else :
print'No' | [
"[email protected]"
] | |
1663460b4eb0ad56f0fc55fcc2afd8b357ecfeaf | 03969015ab882f4751dc0e91beeda1212babca48 | /robot_code/nimbus_explore_latest/src/util.py | a613fc7360ddb8615a72a53e5182ee94064acb96 | [] | no_license | lnairGT/Thesis_code | f3ad57f4344691227dcd128a741eb9c0e937738e | 6f5dbfc2510272f294a0e9bb4273beceeacbff2a | refs/heads/master | 2023-03-17T21:43:56.320553 | 2020-09-26T16:05:31 | 2020-09-26T16:05:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,965 | py | import glob, csv, os
import numpy as np
import cPickle as pickle
def loadScioDataset(pklFile='sciodata', csvFile='scio_allmaterials_clean', materialNames=[], objectNames=[]):
saveFilename = os.path.join('data', pklFile + '.pkl')
if os.path.isfile(saveFilename):
with open(saveFilename, 'rb') as f:
X, y_materials, y_objects, wavelengths = pickle.load(f)
else:
X = []
y_materials = []
y_objects = []
filename = os.path.join('data', csvFile + '.csv')
wavelengthCount = 331
with open(filename, 'rb') as f:
reader = csv.reader(f)
for i, row in enumerate(reader):
if i < 10 or i == 11:
continue
if i == 10:
# Header row
wavelengths = [float(r.strip().split('_')[-1].split()[0]) + 740.0 for r in row[10:wavelengthCount+10]]
continue
obj = row[3].strip()
material = row[4].strip()
if material not in materialNames:
continue
index = materialNames.index(material)
if obj not in objectNames[index]:
continue
values = [float(v) for v in row[10:wavelengthCount+10]]
X.append(values)
y_materials.append(index)
y_objects.append(obj)
with open(saveFilename, 'wb') as f:
pickle.dump([X, y_materials, y_objects, wavelengths], f, protocol=pickle.HIGHEST_PROTOCOL)
return X, y_materials, y_objects, wavelengths
def firstDeriv(x, wavelengths):
# First derivative of measurements with respect to wavelength
x = np.copy(x)
for i, xx in enumerate(x):
dx = np.zeros(xx.shape, np.float)
dx[0:-1] = np.diff(xx)/np.diff(wavelengths)
dx[-1] = (xx[-1] - xx[-2])/(wavelengths[-1] - wavelengths[-2])
x[i] = dx
return x
| [
"[email protected]"
] | |
f98be85338873b97927faa46962a5b3097b14fb0 | 706f239f0df4586221e6a7aac001626ab531c224 | /src/client_libraries/python/dynamics/customerinsights/api/models/profile_store_state_info.py | 8363b24f80a213b76cc14db8a208077e35435112 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | Global19-atlassian-net/Dynamics365-CustomerInsights-Client-Libraries | 9681d258c649b005a2379d32b23d374695a6fca4 | 0ce81ae25e97c3b8de12b97963a8c765c0248238 | refs/heads/main | 2023-02-28T20:39:33.622885 | 2021-02-09T23:34:38 | 2021-02-09T23:34:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,739 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ProfileStoreStateInfo(Model):
"""Represents runtime profile store state.
Variables are only populated by the server, and will be ignored when
sending a request.
:param ingestion_time: Gets the latest date of ingestion.
:type ingestion_time: datetime
:param primary_info:
:type primary_info:
~dynamics.customerinsights.api.models.ProfileStoreCollectionInfo
:param secondary_info:
:type secondary_info:
~dynamics.customerinsights.api.models.ProfileStoreCollectionInfo
:ivar instance_id: Gets the Customer Insights instance id associated with
this object.
:vartype instance_id: str
"""
_validation = {
'instance_id': {'readonly': True},
}
_attribute_map = {
'ingestion_time': {'key': 'ingestionTime', 'type': 'iso-8601'},
'primary_info': {'key': 'primaryInfo', 'type': 'ProfileStoreCollectionInfo'},
'secondary_info': {'key': 'secondaryInfo', 'type': 'ProfileStoreCollectionInfo'},
'instance_id': {'key': 'instanceId', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ProfileStoreStateInfo, self).__init__(**kwargs)
self.ingestion_time = kwargs.get('ingestion_time', None)
self.primary_info = kwargs.get('primary_info', None)
self.secondary_info = kwargs.get('secondary_info', None)
self.instance_id = None
| [
"[email protected]"
] | |
bba4672adc471b819e4d9197f3c29cdb216a6e34 | 9a0d178a9128b8b3f33334f10d65abc7c2d8ed6e | /main/backend/capstone_project/diary_app/serializers.py | b197945f9c1100880d3facd8c1126b98b7fcad34 | [] | no_license | kookmin-sw/capstone-2021-5 | b19917d3d9fe5d2edfd8ebea5745a2806414aff3 | 322a2cd5d79d8bfd639f60e015af5db5dd7bc4a1 | refs/heads/master | 2023-05-06T01:24:00.457598 | 2021-05-26T14:43:28 | 2021-05-26T14:43:28 | 329,216,184 | 0 | 9 | null | 2021-05-24T05:05:41 | 2021-01-13T06:35:50 | JavaScript | UTF-8 | Python | false | false | 1,370 | py | from rest_framework import serializers
from .models import Diary
from django.core.exceptions import ValidationError
import datetime
from analysis.models import Emotion
class DiarySerializer(serializers.ModelSerializer):
"""
검색 기록(SearchRecord) Serializer
"""
class Meta:
model = Diary
fields = "__all__"
def validate(self, data):
if self.context['request'].method != "PUT" and Diary.objects.filter(profile=self.context['request'].user, title=data['title']).exists() == True: # 만약 같은 계정의 project title이 중복되면
raise ValidationError('duplicated title')
today = datetime.date.today()
if self.context['request'].method != "PUT" and Diary.objects.filter(profile=self.context['request'].user, pubdate = today ).exists():
raise ValidationError('already written')
if self.context['request'].method == "POST" and not Emotion.objects.filter(profile=self.context['request'].user,pubdate = today).exists():
raise ValidationError('not analyzed')
data['pubdate'] = today
data['weather'] = Emotion.objects.get(profile=self.context['request'].user,pubdate = today).weather
data['profile'] = self.context['request'].user #project 생성시 항상 author을 해당 계정으로 설정
return data
| [
"[email protected]"
] | |
4e5f0dedb9fbc134b5fe17d5f1d3d24006e690b5 | 93289539257faa129aa2d17a42148f7d73ce4e9e | /Python/2193_PinaryNumber.py | c969c707dd349552d942546aaeaa2892839f9462 | [] | no_license | Manngold/baekjoon-practice | d015dd518144a75b5cb3d4e831d6c95a3c70544f | 54f9efcb6460647c2a0f465731b582fe6de89cf3 | refs/heads/master | 2021-06-25T13:04:23.162531 | 2020-10-14T08:34:28 | 2020-10-14T08:34:28 | 148,895,003 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | n = int(input())
dp = [1, 1, 2]
if n <= 3:
pass
else:
for i in range(3, n):
dp.append(dp[i - 2] + dp[i - 1])
print(dp[n-1])
| [
"[email protected]"
] | |
f17886fdaeaef31d2dbe43e3265a80c7adac1985 | 884923e1d3d3705688218838c6c669230ac308f3 | /Py/1204.py | 07359f911918a011634f6976e5195c109ae67268 | [] | no_license | gimyoni/CodeUp | 1c22fa1513706eef987b7d7d7ea965ee99c72a09 | 97728d8772ba2a19994ca68420093ffad3fd3552 | refs/heads/master | 2023-04-06T13:09:10.553671 | 2021-04-18T13:51:46 | 2021-04-18T13:51:46 | 268,708,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | a = int(input())
if a % 10 == 1:
if a == 11:
print("11th")
else:
print(str(a)+"st")
elif a%10 ==2:
if a == 12:
print("12th")
else:
print(str(a)+"nd")
elif a%10==3:
if a == 13:
print("13th")
else:
print(str(a)+"rd")
else:
print(str(a)+"th")
| [
"[email protected]"
] | |
b8be4bdb76e7984b8c8b1c0c457aa46965c52abe | 50402cc4388dfee3a9dbe9e121ef217759ebdba8 | /Proj/UR/GeneratePaths/WorldViz.py | 43eb54e6bed1c69598e6490130d3c8d6f0bcc8a4 | [] | no_license | dqyi11/SVNBackup | bd46a69ec55e3a4f981a9bca4c8340944d8d5886 | 9ad38e38453ef8539011cf4d9a9c0a363e668759 | refs/heads/master | 2020-03-26T12:15:01.155873 | 2015-12-10T01:11:36 | 2015-12-10T01:11:36 | 144,883,382 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,188 | py | '''
Created on Jul 30, 2015
@author: daqing_yi
'''
import pygame, sys
from pygame.locals import *
import numpy as np
from Path import *
BLUE = (0,0,255)
RED = (255,0,0)
BLACK = (0,0,0)
GREEN = (0,255,0)
class WorldViz(object):
def __init__(self, world):
self.world = world
pygame.init()
self.screen = pygame.display.set_mode((int(self.world.width),int(self.world.height)))
pygame.display.set_caption(self.world.name)
self.screen.fill((255,255,255))
self.myfont = pygame.font.SysFont("monospace", 15)
self.colors = []
for obj in self.world.objects:
color = (np.random.randint(0,255), np.random.randint(0,255), np.random.randint(0,255))
self.colors.append(color)
def update(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
return False
elif event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
pos = pygame.mouse.get_pos()
print "LEFT " + str(pos)
self.world.init = pos
else:
pos = pygame.mouse.get_pos()
print "RIGHT " + str(pos)
self.world.goal = pos
self.screen.fill((255,255,255))
RADIUS = 10
RECT_WIDTH = 16
for i in range(len(self.world.objects)):
obj = self.world.objects[i]
if obj.type == "robot":
pygame.draw.circle(self.screen, self.colors[i], obj.center, RADIUS)
else:
pygame.draw.rect(self.screen, self.colors[i], (obj.center[0]-RECT_WIDTH/2, obj.center[1]-RECT_WIDTH/2, RECT_WIDTH, RECT_WIDTH))
label = self.myfont.render(obj.type+"("+obj.name+")", 1, (0,0,0))
self.screen.blit(label, (obj.center[0], obj.center[1]+15))
#pygame.draw.line(self.screen, GREEN, [int(obj.bounding[0]), int(obj.center.y)], [int(obj.bounding[2]),int(obj.center.y)], 2)
#pygame.draw.line(self.screen, GREEN, [int(obj.center.x), int(obj.bounding[1])], [int(obj.center.x), int(obj.bounding[3])], 2)
if self.world.init != None:
pygame.draw.circle(self.screen, BLUE, self.world.init, 10, 0)
if self.world.goal != None:
pygame.draw.circle(self.screen, RED, self.world.goal, 10, 0)
pygame.display.flip()
pygame.time.delay(100)
return True
def close(self):
pygame.quit()
def drawPath(self, path, filename, background=""):
surface = pygame.Surface((self.world.width, self.world.height))
if background == "":
surface.fill((255,255,255))
else:
#surface.fill((255,255,255))
img = pygame.image.load(background)
surface.blit( img, (0,0) )
RADIUS = 10
RECT_WIDTH = 16
for i in range(len(self.world.objects)):
obj = self.world.objects[i]
if obj.type == "robot":
pygame.draw.circle(surface, self.colors[i], obj.center, RADIUS)
else:
pygame.draw.rect(surface, self.colors[i], (obj.center[0]-RECT_WIDTH/2, obj.center[1]-RECT_WIDTH/2, RECT_WIDTH, RECT_WIDTH))
label = self.myfont.render(obj.type+"("+obj.name+")", 1, (0,0,0))
surface.blit(label, (obj.center[0], obj.center[1]+15))
pathLen = len(path.waypoints)
#print path.waypoints
for i in range(pathLen-1):
pygame.draw.line(surface, (0,0,0), path.waypoints[i], path.waypoints[i+1], 6)
if self.world.init != None:
pygame.draw.circle(surface, BLUE, self.world.init, 10, 0)
if self.world.goal != None:
pygame.draw.circle(surface, RED, self.world.goal, 10, 0)
pygame.image.save(surface, filename)
| [
"walter@e224401c-0ce2-47f2-81f6-2da1fe30fd39"
] | walter@e224401c-0ce2-47f2-81f6-2da1fe30fd39 |
0ed6c6a658a25b982fcb8c2eda5f65ceaa9e1362 | b13a1a96e9f1dddb3a3a44b636ca939b85962899 | /Django & REST API/testalpha/demo/migrations/0002_teacher.py | b5360168e5edd1473d8dae138f464eb7dc7ed5a3 | [] | no_license | jspw/Django-Test | f266331c73c34b83b1189811a163567b6b4cc60b | 13a6d0146c9c78f8fa03c269e4546b5bbdb146bd | refs/heads/master | 2021-03-23T17:50:21.764636 | 2020-10-18T09:21:23 | 2020-10-18T09:21:23 | 247,472,132 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | # Generated by Django 3.0.5 on 2020-04-20 15:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('demo', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Teacher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('dept', models.ManyToManyField(related_name='teacher', to='demo.Department')),
],
),
]
| [
"[email protected]"
] | |
fd1991fd30ed9da80b1585f41314ee041ef41214 | b7ee82768de54a83a99ad4ddd9e090c61935b86a | /paper/plot-subset-sites.py | 3a1d9b3df807a72e6ed633d1d9138578707b7d98 | [
"MIT"
] | permissive | brentp/somalier | 5042ea9e7773a311c12825ac4ad8ee4140db2412 | de50b1cfe1b859407b64ba3928cc4419f85c7403 | refs/heads/master | 2023-09-01T16:21:31.459237 | 2023-08-28T10:50:30 | 2023-08-28T10:50:30 | 143,888,326 | 228 | 32 | MIT | 2023-08-28T10:50:31 | 2018-08-07T14:52:04 | Nim | UTF-8 | Python | false | false | 968 | py | import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
import sys
sns.set_palette("Set2")
df = pd.read_csv(sys.argv[1], sep="\t")
fig, axes = plt.subplots(1, 1, figsize=(8, 4))
try:
axes[0]
except (IndexError, TypeError):
axes = (axes,)
sns.barplot(x="n", y="fp", hue="strict", data=df, ax=axes[0])
axes[0].set_xlabel("Number of sites")
axes[0].set_ylabel("False-positive rate")
#sns.barplot(x="n", y="tp", hue="strict", data=df, ax=axes[1])
#axes[1].set_xlabel("Number of sites")
#axes[1].set_ylabel("True-positive rate")
plt.savefig("subset-sites.png")
plt.show()
"""
n tp fp fn strict
10 0.816905 0.183080 0.000014 false
20 0.859777 0.140218 0.000005 false
40 0.925964 0.074012 0.000024 false
100 0.985616 0.014384 0.000000 false
200 0.997638 0.002362 0.000000 false
400 0.999724 0.000276 0.000000 false
1000 0.999986 0.000014 0.000000 false
2000 1.000000 0.000000 0.000000 false
4000 1.000000 0.000000 0.000000 false
"""
| [
"[email protected]"
] | |
e47a79bfda7d1e46c14a365203b23e5a4e979f72 | a5570cfad2697da8f95a65e0b8a7cc0697e71a2e | /12 Python Lists.py | c252cc40019ff24e9104e7a1ad7b53030b3577d7 | [] | no_license | mukund7296/Python-Brushup | c4db1d43fe6a06742f00ec4f12affec4b96c7237 | 367ce4ddf60aeea4f33294702f7c4b9d11231efe | refs/heads/master | 2020-12-19T21:26:29.371532 | 2020-01-23T18:27:58 | 2020-01-23T18:27:58 | 235,858,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,502 | py | """
Python Collections (Arrays)
There are four collection data types in the Python programming language:
List is a collection which is ordered and changeable. Allows duplicate members.
Tuple is a collection which is ordered and unchangeable. Allows duplicate members.
Set is a collection which is unordered and unindexed. No duplicate members.
Dictionary is a collection which is unordered, changeable and indexed. No duplicate members.
List
A list is a collection which is ordered and changeable. In Python lists are written with square brackets."""
thislist = ["apple", "banana", "cherry", "orange", "kiwi", "melon", "mango"]
print(thislist)
#indexing with list
print(thislist[0])
# negative indexing
print(thislist[-1])
# reverse indexing
print(thislist[::-1])
# range of indexing
print(thislist[2:5])
# adding new fruite in list at 2 position
thislist[2]="MMango"
print(thislist)
# removing one item from list
thislist.remove("MMango")
print(thislist)
thislist.append("Kela")
thislist.append("Kela")
print(thislist)
thislist.reverse()
print(thislist)
thislist = ["apple", "banana", "cherry"]
if "apple" in thislist:
print("Yes, 'apple' is in the fruits list")
# copy of list
thislist = ["apple", "banana", "cherry"]
mylist = thislist.copy()
print(mylist)
list1 = ["a", "b" , "c"]
list2 = [1, 2, 3]
list3 = list1 + list2
print(list3)
list1 = ["a", "b" , "c"]
list2 = [1, 2, 3]
list1.extend(list2)
print(list1)
| [
"[email protected]"
] | |
601bb4a95a862f9e49e3cba3c1813a262db8c74f | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2317/60730/280771.py | 05ee5cd5914a59c30517768e788b2c0f2c7b859c | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | MOD = 10 ** 9 + 7
m = list(map(int, input().split(",")))
N = len(m)
m.sort()
ans = 0
for i in range(N - 1):
for j in range(i + 1, N):
ans += pow(2, j - i - 1) * (m[j] - m[i])
print(ans % MOD)
| [
"[email protected]"
] | |
e03a99f67ddefa6e40bfffa5e697884a1295f0c1 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2471/60678/253055.py | cc45c769fa61efb73585348d1e372814f6241525 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,824 | py | class Stack:
def __init__(self):
self.stack = []
def pop(self):
if len(self.stack) == 0:
return None
elem = self.stack[len(self.stack) - 1]
del self.stack[len(self.stack) - 1]
return elem
def push(self, elem):
self.stack.append(elem)
def top(self):
return self.stack[len(self.stack) - 1]
times = int(input())
stringTest = ''
for loopTimes in range(0, times):
over = False
string = input()
stringTest = stringTest + ' ' + string
listTest = []
stack = Stack()
if string == '':
over = True
print('not balanced')
for i in string:
if i == '(' or i == '[' or i == '{':
stack.push(i)
elif i == ')':
character = stack.pop()
if character == '[' or character == '{':
print('not balanced')
listTest.append('not balanced')
over = True
break
elif i == ']':
character = stack.pop()
if character == '(' or character == '{':
print('not balanced')
listTest.append('not balanced')
over = True
break
elif i == '}':
character = stack.pop()
if character == '(' or character == '[':
print('not balanced')
listTest.append('not balanced')
over = True
break
if not over and len(stack.stack) > 0:
print('not balanced')
listTest.append('not balanced')
over = True
break
if not over:
print('balanced')
listTest.append('balanced')
if listTest[0] == 'balanced' and listTest[1] == 'not balanced' and len(listTest) == 2:
print(stringTest) | [
"[email protected]"
] | |
cca9577decf9449eb84c393e3b58c8a559685aa5 | eec1b3d81a6dee43571f753ffa379735a3d4aa41 | /webdriver_service/test/test_yintai_single/fapiao_1_liuchen.py | b88ce727956467a9292e23ef2e8f3dee0f4f0fc7 | [] | no_license | fn199544123/download-demo-beyebe | be00aacb302acd8304312c5a876a3c69fb3e73e8 | ec9892a582a7a69235d95e49541b3a2b14b51239 | refs/heads/master | 2022-12-12T18:47:34.783443 | 2019-04-25T11:32:33 | 2019-04-25T11:32:33 | 163,168,934 | 1 | 1 | null | 2022-12-08T03:08:24 | 2018-12-26T10:44:33 | Python | UTF-8 | Python | false | false | 966 | py | import requests
import json
url = "http://39.108.188.34:8893/QrcodeDetectV3"
data = {
"bill": {'ossPath': "http://byb-pic.oss-cn-shenzhen.aliyuncs.com/beyebe/data/20190222/cc47c589fcea0609b6ca1aadfaac7d6c.pdf"},
}
data = json.dumps(data)
headers = {
'User-Agent': 'User-Agent:Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
"Content-Type": "application/json"}
response = requests.post(url, data=data, headers=headers, timeout=(500, 500))
print(response.text)
# 这是两次请求,下面的是单张,上面的是PDF 其中headers和data分别是请求头和post请求体
# data = {
# "bill": {'ossPath': 'http://byb-pic.oss-cn-shenzhen.aliyuncs.com/beyebe/docker/test2_0c7ed229f4fb2a3dacdf8f9f22b7677e.jpg'},
# }
# data = json.dumps(data)
# headers = {"Content-Type": "application/json"}
# response = requests.post(url, data=data, headers=headers)
# print(response.text)
| [
"[email protected]"
] | |
142863b82bed966008fad305bc5092c88cecc8ce | 8dcd3ee098b4f5b80879c37a62292f42f6b2ae17 | /venv/Lib/site-packages/pythonwin/pywin/framework/editor/frame.py | ec2b8e6a3e5619a74efb04eb04ba088ea73b794e | [] | no_license | GregVargas1999/InfinityAreaInfo | 53fdfefc11c4af8f5d2b8f511f7461d11a3f7533 | 2e4a7c6a2424514ca0ec58c9153eb08dc8e09a4a | refs/heads/master | 2022-12-01T20:26:05.388878 | 2020-08-11T18:37:05 | 2020-08-11T18:37:05 | 286,821,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,164 | py | # frame.py - The MDI frame window for an editor.
import afxres
import pywin.framework.window
import win32con
import win32ui
from . import ModuleBrowser
class EditorFrame(pywin.framework.window.MDIChildWnd):
def OnCreateClient(self, cp, context):
# Create the default view as specified by the template (ie, the editor view)
view = context.template.MakeView(context.doc)
# Create the browser view.
browserView = ModuleBrowser.BrowserView(context.doc)
view2 = context.template.MakeView(context.doc)
splitter = win32ui.CreateSplitter()
style = win32con.WS_CHILD | win32con.WS_VISIBLE
splitter.CreateStatic(self, 1, 2, style, win32ui.AFX_IDW_PANE_FIRST)
sub_splitter = self.sub_splitter = win32ui.CreateSplitter()
sub_splitter.CreateStatic(splitter, 2, 1, style, win32ui.AFX_IDW_PANE_FIRST + 1)
# Note we must add the default view first, so that doc.GetFirstView() returns the editor view.
sub_splitter.CreateView(view, 1, 0, (0, 0))
splitter.CreateView(browserView, 0, 0, (0, 0))
sub_splitter.CreateView(view2, 0, 0, (0, 0))
## print "First view is", context.doc.GetFirstView()
## print "Views are", view, view2, browserView
## print "Parents are", view.GetParent(), view2.GetParent(), browserView.GetParent()
## print "Splitter is", splitter
## print "sub splitter is", sub_splitter
## Old
## splitter.CreateStatic (self, 1, 2)
## splitter.CreateView(view, 0, 1, (0,0)) # size ignored.
## splitter.CreateView (browserView, 0, 0, (0, 0))
# Restrict the size of the browser splitter (and we can avoid filling
# it until it is shown)
splitter.SetColumnInfo(0, 10, 20)
# And the active view is our default view (so it gets initial focus)
self.SetActiveView(view)
def GetEditorView(self):
# In a multi-view (eg, splitter) environment, get
# an editor (ie, scintilla) view
# Look for the splitter opened the most!
if self.sub_splitter is None:
return self.GetDlgItem(win32ui.AFX_IDW_PANE_FIRST)
v1 = self.sub_splitter.GetPane(0, 0)
v2 = self.sub_splitter.GetPane(1, 0)
r1 = v1.GetWindowRect()
r2 = v2.GetWindowRect()
if r1[3] - r1[1] > r2[3] - r2[1]:
return v1
return v2
def GetBrowserView(self):
# XXX - should fix this :-)
return self.GetActiveDocument().GetAllViews()[1]
def OnClose(self):
doc = self.GetActiveDocument()
if not doc.SaveModified():
## Cancel button selected from Save dialog, do not actually close
## print 'close cancelled'
return 0
## So the 'Save' dialog doesn't come up twice
doc._obj_.SetModifiedFlag(False)
# Must force the module browser to close itself here (OnDestroy for the view itself is too late!)
self.sub_splitter = None # ensure no circles!
self.GetBrowserView().DestroyBrowser()
return self._obj_.OnClose()
| [
"[email protected]"
] | |
24b1eba5f108d0b8f858ac1fa0d5b882f5a8219a | 6c816f19d7f4a3d89abbb00eeaf43dd818ecc34f | /apps/payment/migrations/0003_auto_20210218_1806.py | 2b9fa9d8c5a50aa3596f9d6fd7f82ebc6eeb87cd | [] | no_license | reo-dev/bolt | 29ee6aa7cfc96bd50fa7a7dae07fbaafc2125e54 | d1a7859dd1ebe2f5b0e6e295047b620f5afdb92e | refs/heads/master | 2023-07-13T04:05:57.856278 | 2021-08-27T09:07:03 | 2021-08-27T09:07:03 | 382,195,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | # Generated by Django 3.0.8 on 2021-02-18 09:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('payment', '0002_auto_20210218_1805'),
]
operations = [
migrations.AlterField(
model_name='paylist',
name='count',
field=models.IntegerField(default=0, verbose_name='개수'),
),
]
| [
"[email protected]"
] | |
fb1c3a8f95eb3aa2252bd4f7443528f9b0d935a2 | 413125277311510b40ca481b12ab82d379f5df62 | /chess (horse movement).py | 2fb0d1d957c87a1bd7b22c6b33aed2d6472c4b7d | [] | no_license | Aakashbansal837/python | 98d85ce1e88c73f0e5180b1b1af80714f3e45097 | 4de2a3d6a482fdba8809ceb81e94f201b776b00e | refs/heads/master | 2021-04-06T00:16:24.884830 | 2018-05-30T17:42:20 | 2018-05-30T17:42:20 | 124,778,551 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,605 | py | for _ in range(int(input())):
n,m,q = map(int,input().split())
arr = [[0 for x in range(m)] for y in range(n)]
arr1=[]
for i in range(q):
tmp,tmp2 = map(int,input().split())
arr1.append([tmp-1,tmp2-1])
arr[tmp-1][tmp2-1] = 1
#print("array is:")
#print(*arr,sep="\n")
count = 0
for i in arr1:
#print("i:",i)
x,y = i[0],i[1]
if x-2 >= 0:
if y-1 >= 0:
if arr[x-2][y-1] == 1:
count+=1
#print("x-2:y-1")
if y+1 < m:
if arr[x-2][y+1] == 1:
count+=1
#print("x-2:y+1")
if x+2 < n:
if y-1 >= 0:
if arr[x+2][y-1] == 1:
count+=1
#print("x+2:y-1")
if y+1 < m:
if arr[x+2][y+1] == 1:
count+=1
#print("x+2:y+1")
if y-2 >= 0:
if x-1 >= 0:
if arr[x-1][y-2] == 1:
count+=1
#print("x-1:y-2")
if x+1 < m:
if arr[x+1][y-2] == 1:
count+=1
#print("x+1:y-2")
if y+2 < m:
if x-1 >= 0:
if arr[x-1][y+2] == 1:
count+=1
#print("x-1:y-2")
if x+1 < m:
if arr[x+1][y+2] == 1:
count+=1
#print("x+1:y+2")
print(count)
| [
"[email protected]"
] | |
f57738ea9e0d933d0ce62e21f6781693aeceb9d8 | 3e8dfa786b7f68ac4ffbc5154448dc4f479d27be | /爬虫项目实例/BaiduStocks/BaiduStocks/pipelines.py | 5cf17a6a43228d5e29fdfb995e2ed28fd3730087 | [] | no_license | SmallSir/Python_ | a5201cda762af8fe54a74f368eb140d354ce84d6 | 93367667d74bc29cfba80239334e6c3b3fa604ca | refs/heads/master | 2021-09-07T03:25:57.072044 | 2018-02-16T15:21:32 | 2018-02-16T15:21:32 | 110,565,596 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 650 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class BaidustocksPipeline(object):
def process_item(self, item, spider):
return item
class BaidustockInfoPipeline(object):
def open_spider(self,spider):
self.f = open('BaiduStockInfo.txt','a')
def close_spider(self,spider):
self.f.close()
def process_item(self,item,spider):
try:
line = str(dict(item)) + '\n'
self.f.write(line)
except:
pass
return item | [
"[email protected]"
] | |
37dd944c5eec22c13d855f59bd969c0e2498e3cd | 3f77dda6c8d8508902f9ae3efbd4fed8ef2ee1b2 | /scrap/migrations/0031_auto_20190517_1104.py | 08a68dc34f4a666813a5c290a2a3797a88b8bea2 | [] | no_license | priyankush-siloria/linkedinscrap | a3d83cac1ca923e7e0f3a03e6e4c5f02b7f6a0e5 | 882a4df294ce8b2b01c94299471bbe6d9826f582 | refs/heads/master | 2020-07-07T13:38:01.659725 | 2019-08-20T11:40:23 | 2019-08-20T11:40:23 | 203,363,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | # Generated by Django 2.2 on 2019-05-17 11:04
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scrap', '0030_auto_20190517_0948'),
]
operations = [
migrations.AlterField(
model_name='automations',
name='created_at',
field=models.DateTimeField(default=datetime.datetime(2019, 5, 17, 11, 4, 51, 676236)),
),
]
| [
"[email protected]"
] | |
51ea87311527f3de7247bb5e04a3033641a9e2db | 52a3fab945f71f1bb2177c650d1e6fa9f9dd9716 | /packages/motifextraction/alignment/extract_errors.py | ced825aaeb300d56ce649ce061d2f8d947d1e757 | [
"MIT"
] | permissive | SimonPringleWallace/motifextraction | 402430f3b51e7854b9b1c0c8c33d522f6af877cd | a39fee2a029ae7acc1cf0c5a031913bf84c948a0 | refs/heads/master | 2023-03-28T18:36:25.811621 | 2021-04-03T17:13:48 | 2021-04-03T17:13:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,247 | py | import numpy as np
from path import Path
from ppm3d import load_alignment_data
def extract_errors(cluster_number: int, results_fn: str, save_dir: Path, cluster_dir: Path, cutoff: float, target_path: Path = None):
if save_dir and not save_dir.exists():
save_dir.makedirs()
print(f"Loading {results_fn}")
if target_path is None:
data = load_alignment_data(results_fn, prepend_path=cluster_dir)
else:
data = load_alignment_data(results_fn, prepend_model_path=cluster_dir, prepend_target_path=target_path)
errors = np.zeros((len(data), 4))
errors.fill(np.nan)
failed = 0
for i, a in enumerate(data):
if a is not None and a.successful:
l2 = a.L2Norm()
assert np.isclose(l2, a.error, rtol=1e-05, atol=1e-08)
l1 = a.L1Norm()
linf = a.LinfNorm()
rcutoff = cutoff / (a.model_scale*0.5 + a.target_scale*0.5)
angular = a.angular_variation(rcutoff)
else:
l2, l1, linf, angular = np.inf, np.inf, np.inf, np.inf
failed += 1
errors[i, :] = (l2, l1, linf, angular)
print(f"Finished! {failed} alignments failed.")
np.save(f'{save_dir}/{cluster_number}_errors.npy', errors)
| [
"[email protected]"
] | |
33220aff8c0decd5a152cb8cc96b951dd3eb037b | 304926837d94f37ef33c46b8f3c71ecfac4690e8 | /5.8_Hello_Admin.py | c3d19d10a8eb6df9440d75dcc566805cac6fbd35 | [] | no_license | ver0nika4ka/PythonCrashCourse | 1015d207d9da1b0f9efaee3acc502d2757880f33 | 6bde3b716deb86d022da5cb478c0a95505fe5acc | refs/heads/master | 2021-07-12T17:24:16.478133 | 2021-06-17T03:27:24 | 2021-06-17T03:27:24 | 246,993,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | usernames = ['veronica','nastya','victor','alex','admin']
for user in usernames:
if user == 'admin':
print(f"Hello admin, would you like to see a status report?")
else:
print(f"Hello {user.title()}, thank you for logging in again.") | [
"[email protected]"
] | |
a27e20221825939f2e9ac619b03572526083145b | 7033c84a3051e51a3d38a25d52f724bc0f6aab25 | /sandbox.py | 54edb623375b4528317d30ce33aaa8fb7140050a | [] | no_license | andycasey/bitfitter | 46332d12de79d45265f77707bd9a0749f115b75a | 403624d98582a6c249ef9da347f74686c410d400 | refs/heads/master | 2021-01-10T13:47:02.687006 | 2016-02-07T17:10:13 | 2016-02-07T17:10:13 | 51,255,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | import fitbit
import auth
client = fitbit.Fitbit(auth.CONSUMER_KEY, auth.CONSUMER_SECRET, oauth2=True,
access_token=auth.ACCESS_TOKEN, refresh_token=auth.REFRESH_TOKEN)
| [
"[email protected]"
] | |
8ba56c0fd41325626de705073c99c28a7cf1166c | 0cc4eb3cb54f8394c127ace62d3108fdb5230c85 | /.spack-env/view/lib/python3.7/site-packages/pylint/test/functional/future_unicode_literals.py | 9aaabead04f4a504f9c8e78424ee8b8e844ae420 | [] | no_license | jacobmerson/spack-develop-env | 5b2d76f58c0b64ae97c64f77a3c4d33a770c71c8 | 5fca20ca343b1a76f05fc635c87f94ed25417d94 | refs/heads/master | 2022-07-04T02:22:50.264727 | 2020-05-06T05:13:50 | 2020-05-06T05:13:50 | 261,657,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | /lore/mersoj/spack/spack/opt/spack/linux-rhel7-x86_64/gcc-7.3.0/py-pylint-2.3.0-pmz72kdc34fnma6vo5sc2y3c5wp5sjeb/lib/python3.7/site-packages/pylint/test/functional/future_unicode_literals.py | [
"[email protected]"
] | |
255049d34adcb37d9d4241719fc8e003fe117978 | 95aa9069a0a115c1cbccaac38c6c7d193b5a2fb7 | /home/migrations/0002_load_initial_data.py | 71268fb3a0667934e255cfd24dd2754440810b82 | [] | no_license | crowdbotics-apps/sunday-app-dev-5568 | 35f9c5b4ed8560fbb4b985f6761fb22c9eff8d34 | d8ae94d463f3b6186f27ccf0fa5458c051e2955b | refs/heads/master | 2022-10-09T18:05:11.968490 | 2020-06-07T08:36:37 | 2020-06-07T08:36:37 | 270,244,275 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,297 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "sunday app"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">sunday app</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "sunday-app-dev-5568.botics.co"
site_params = {
"name": "sunday app",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"[email protected]"
] | |
ac7310af9bea64230b95d4d7b496937545189fff | 86b82049d710ed27cc0a94bc776020b199d4f4ad | /setup.py | 662aa245b9c64b75c5876ef2450838748e87ecfa | [
"MIT"
] | permissive | ChristopherHaydenTodd/ctodd-python-lib-revision-control | 02b8e5682fc48e67ac22afdfbf503f0a9b3d124c | d9618003592d9b54957789b2baebfe6d225d6c94 | refs/heads/master | 2020-04-22T19:00:31.401848 | 2019-02-21T21:50:10 | 2019-02-21T21:50:10 | 170,594,927 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,783 | py | """
Purpose:
setup.py is executed to build the python package
"""
# Python Imports
from os import listdir
from setuptools import setup, find_packages
import re
###
# Helper Functions
###
def get_version_from_file(python_version_file="./VERSION"):
"""
Purpose:
Get python requirements from a specified requirements file.
Args:
python_requirements_file (String): Path to the requirements file (usually
it is requirements.txt in the same directory as the setup.py)
Return:
requirements (List of Strings): The python requirements necessary to run
the library
"""
version = "unknown"
with open(python_version_file) as version_file:
version = version_file.readline().strip().strip("\n")
return version
def get_requirements_from_file(python_requirements_file="./requirements.txt"):
"""
Purpose:
Get python requirements from a specified requirements file.
Args:
python_requirements_file (String): Path to the requirements file (usually
it is requirements.txt in the same directory as the setup.py)
Return:
requirements (List of Strings): The python requirements necessary to run
the library
"""
requirements = []
with open(python_requirements_file) as requirements_file:
requirement = requirements_file.readline()
while requirement:
if requirement.strip().startswith("#"):
pass
elif requirement.strip() == "":
pass
else:
requirements.append(requirement.strip())
requirement = requirements_file.readline()
return requirements
def get_requirements_from_packages(packages):
"""
Purpose:
Get python requirements for each package. will get requirements file
in each package's subdirectory
Args:
packages (String): Name of the packages
Return:
requirements (List of Strings): The python requirements necessary to run
the library
"""
requirements = []
for package in packages:
package_dir = package.replace(".", "/")
requirement_files = get_requirements_files_in_package_dir(package_dir)
for requirement_file in requirement_files:
package_requirements =\
get_requirements_from_file(python_requirements_file=requirement_file)
requirements = requirements + package_requirements
return list(set(requirements))
def get_requirements_files_in_package_dir(package_dir):
"""
Purpose:
From a package dir, find all requirements files (Assuming form requirements.txt
or requirements_x.txt)
Args:
package_dir (String): Directory of the package
Return:
requirement_files (List of Strings): Requirement Files
"""
requirements_regex = r"^requirements[_\w]*.txt$"
requirement_files = []
for requirement_file in listdir(f"./{package_dir}"):
if re.match(requirements_regex, requirement_file):
requirement_files.append(f"./{package_dir}/{requirement_file}")
return requirement_files
###
# Main Functionality
###
def main():
"""
Purpose:
Main function for packaging and setting up packages
Args:
N/A
Return:
N/A
"""
# Get Version
version = get_version_from_file()
# Get Packages
packages = find_packages()
install_packages = [package for package in packages if not package.endswith(".tests")]
test_packages = [package for package in packages if package.endswith(".tests")]
# Get Requirements and Requirments Installation Details
install_requirements = get_requirements_from_packages(install_packages)
test_requirements = get_requirements_from_packages(test_packages)
setup_requirements = ["pytest-runner", "pytest", "pytest-cov"]
# Get Dependency Links For Each Requirement (As Necessary)
dependency_links = []
setup(
name="ctodd-python-lib-revision-control",
version=version,
python_requires=">3.0,<3.7",
description=("Python utilities used for interacting with Revision Control Systems Like Git"),
url="https://github.com/ChristopherHaydenTodd/ctodd-python-lib-revision-control",
author="Christopher H. Todd",
author_email="[email protected]",
classifiers=["Programming Language :: Python"],
keywords=["python", "libraries", "Revision Control", "Git"],
packages=packages,
install_requires=install_requirements,
setup_requires=setup_requirements,
tests_require=test_requirements,
project_urls={},
)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
a9fa204ab840d2ff649a13cac7078d8e2d08155e | cc3ded8dc35ec9f52e7cee6a840fb6d3c92b3185 | /transformers_repo/examples/pplm/run_pplm_discrim_train.py | 44f6b726d822c9e33460b5b3faaee3726ca263ad | [
"MIT",
"Apache-2.0"
] | permissive | see--/natural-question-answering | 04e3a38022dcb78a20f1632749cb82bb40a3d8aa | 9d31c8dee0ff799d190a2a351f4857224788a5ca | refs/heads/master | 2023-08-17T00:10:17.594705 | 2020-03-12T21:38:56 | 2020-03-12T21:38:56 | 234,064,034 | 90 | 24 | MIT | 2023-09-06T17:32:48 | 2020-01-15T11:19:50 | Python | UTF-8 | Python | false | false | 18,712 | py | #! /usr/bin/env python3
# coding=utf-8
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import csv
import json
import math
import time
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as data
from nltk.tokenize.treebank import TreebankWordDetokenizer
from torchtext import data as torchtext_data
from torchtext import datasets
from tqdm import tqdm, trange
from pplm_classification_head import ClassificationHead
from transformers import GPT2LMHeadModel, GPT2Tokenizer
torch.manual_seed(0)
np.random.seed(0)
EPSILON = 1e-10
example_sentence = "This is incredible! I love it, this is the best chicken I have ever had."
max_length_seq = 100
class Discriminator(torch.nn.Module):
"""Transformer encoder followed by a Classification Head"""
def __init__(self, class_size, pretrained_model="gpt2-medium", cached_mode=False, device="cpu"):
super(Discriminator, self).__init__()
self.tokenizer = GPT2Tokenizer.from_pretrained(pretrained_model)
self.encoder = GPT2LMHeadModel.from_pretrained(pretrained_model)
self.embed_size = self.encoder.transformer.config.hidden_size
self.classifier_head = ClassificationHead(class_size=class_size, embed_size=self.embed_size)
self.cached_mode = cached_mode
self.device = device
def get_classifier(self):
return self.classifier_head
def train_custom(self):
for param in self.encoder.parameters():
param.requires_grad = False
self.classifier_head.train()
def avg_representation(self, x):
mask = x.ne(0).unsqueeze(2).repeat(1, 1, self.embed_size).float().to(self.device).detach()
hidden, _ = self.encoder.transformer(x)
masked_hidden = hidden * mask
avg_hidden = torch.sum(masked_hidden, dim=1) / (torch.sum(mask, dim=1).detach() + EPSILON)
return avg_hidden
def forward(self, x):
if self.cached_mode:
avg_hidden = x.to(self.device)
else:
avg_hidden = self.avg_representation(x.to(self.device))
logits = self.classifier_head(avg_hidden)
probs = F.log_softmax(logits, dim=-1)
return probs
class Dataset(data.Dataset):
def __init__(self, X, y):
"""Reads source and target sequences from txt files."""
self.X = X
self.y = y
def __len__(self):
return len(self.X)
def __getitem__(self, index):
"""Returns one data pair (source and target)."""
data = {}
data["X"] = self.X[index]
data["y"] = self.y[index]
return data
def collate_fn(data):
def pad_sequences(sequences):
lengths = [len(seq) for seq in sequences]
padded_sequences = torch.zeros(len(sequences), max(lengths)).long() # padding value = 0
for i, seq in enumerate(sequences):
end = lengths[i]
padded_sequences[i, :end] = seq[:end]
return padded_sequences, lengths
item_info = {}
for key in data[0].keys():
item_info[key] = [d[key] for d in data]
x_batch, _ = pad_sequences(item_info["X"])
y_batch = torch.tensor(item_info["y"], dtype=torch.long)
return x_batch, y_batch
def cached_collate_fn(data):
item_info = {}
for key in data[0].keys():
item_info[key] = [d[key] for d in data]
x_batch = torch.cat(item_info["X"], 0)
y_batch = torch.tensor(item_info["y"], dtype=torch.long)
return x_batch, y_batch
def train_epoch(data_loader, discriminator, optimizer, epoch=0, log_interval=10, device="cpu"):
samples_so_far = 0
discriminator.train_custom()
for batch_idx, (input_t, target_t) in enumerate(data_loader):
input_t, target_t = input_t.to(device), target_t.to(device)
optimizer.zero_grad()
output_t = discriminator(input_t)
loss = F.nll_loss(output_t, target_t)
loss.backward(retain_graph=True)
optimizer.step()
samples_so_far += len(input_t)
if batch_idx % log_interval == 0:
print(
"Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
epoch + 1,
samples_so_far,
len(data_loader.dataset),
100 * samples_so_far / len(data_loader.dataset),
loss.item(),
)
)
def evaluate_performance(data_loader, discriminator, device="cpu"):
discriminator.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for input_t, target_t in data_loader:
input_t, target_t = input_t.to(device), target_t.to(device)
output_t = discriminator(input_t)
# sum up batch loss
test_loss += F.nll_loss(output_t, target_t, reduction="sum").item()
# get the index of the max log-probability
pred_t = output_t.argmax(dim=1, keepdim=True)
correct += pred_t.eq(target_t.view_as(pred_t)).sum().item()
test_loss /= len(data_loader.dataset)
print(
"Performance on test set: "
"Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)".format(
test_loss, correct, len(data_loader.dataset), 100.0 * correct / len(data_loader.dataset)
)
)
def predict(input_sentence, model, classes, cached=False, device="cpu"):
input_t = model.tokenizer.encode(input_sentence)
input_t = torch.tensor([input_t], dtype=torch.long, device=device)
if cached:
input_t = model.avg_representation(input_t)
log_probs = model(input_t).data.cpu().numpy().flatten().tolist()
print("Input sentence:", input_sentence)
print(
"Predictions:",
", ".join("{}: {:.4f}".format(c, math.exp(log_prob)) for c, log_prob in zip(classes, log_probs)),
)
def get_cached_data_loader(dataset, batch_size, discriminator, shuffle=False, device="cpu"):
data_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=batch_size, collate_fn=collate_fn)
xs = []
ys = []
for batch_idx, (x, y) in enumerate(tqdm(data_loader, ascii=True)):
with torch.no_grad():
x = x.to(device)
avg_rep = discriminator.avg_representation(x).cpu().detach()
avg_rep_list = torch.unbind(avg_rep.unsqueeze(1))
xs += avg_rep_list
ys += y.cpu().numpy().tolist()
data_loader = torch.utils.data.DataLoader(
dataset=Dataset(xs, ys), batch_size=batch_size, shuffle=shuffle, collate_fn=cached_collate_fn
)
return data_loader
def train_discriminator(
dataset,
dataset_fp=None,
pretrained_model="gpt2-medium",
epochs=10,
batch_size=64,
log_interval=10,
save_model=False,
cached=False,
no_cuda=False,
):
device = "cuda" if torch.cuda.is_available() and not no_cuda else "cpu"
print("Preprocessing {} dataset...".format(dataset))
start = time.time()
if dataset == "SST":
idx2class = ["positive", "negative", "very positive", "very negative", "neutral"]
class2idx = {c: i for i, c in enumerate(idx2class)}
discriminator = Discriminator(
class_size=len(idx2class), pretrained_model=pretrained_model, cached_mode=cached, device=device
).to(device)
text = torchtext_data.Field()
label = torchtext_data.Field(sequential=False)
train_data, val_data, test_data = datasets.SST.splits(text, label, fine_grained=True, train_subtrees=True,)
x = []
y = []
for i in trange(len(train_data), ascii=True):
seq = TreebankWordDetokenizer().detokenize(vars(train_data[i])["text"])
seq = discriminator.tokenizer.encode(seq)
seq = torch.tensor([50256] + seq, device=device, dtype=torch.long)
x.append(seq)
y.append(class2idx[vars(train_data[i])["label"]])
train_dataset = Dataset(x, y)
test_x = []
test_y = []
for i in trange(len(test_data), ascii=True):
seq = TreebankWordDetokenizer().detokenize(vars(test_data[i])["text"])
seq = discriminator.tokenizer.encode(seq)
seq = torch.tensor([50256] + seq, device=device, dtype=torch.long)
test_x.append(seq)
test_y.append(class2idx[vars(test_data[i])["label"]])
test_dataset = Dataset(test_x, test_y)
discriminator_meta = {
"class_size": len(idx2class),
"embed_size": discriminator.embed_size,
"pretrained_model": pretrained_model,
"class_vocab": class2idx,
"default_class": 2,
}
elif dataset == "clickbait":
idx2class = ["non_clickbait", "clickbait"]
class2idx = {c: i for i, c in enumerate(idx2class)}
discriminator = Discriminator(
class_size=len(idx2class), pretrained_model=pretrained_model, cached_mode=cached, device=device
).to(device)
with open("datasets/clickbait/clickbait_train_prefix.txt") as f:
data = []
for i, line in enumerate(f):
try:
data.append(eval(line))
except Exception:
print("Error evaluating line {}: {}".format(i, line))
continue
x = []
y = []
with open("datasets/clickbait/clickbait_train_prefix.txt") as f:
for i, line in enumerate(tqdm(f, ascii=True)):
try:
d = eval(line)
seq = discriminator.tokenizer.encode(d["text"])
if len(seq) < max_length_seq:
seq = torch.tensor([50256] + seq, device=device, dtype=torch.long)
else:
print("Line {} is longer than maximum length {}".format(i, max_length_seq))
continue
x.append(seq)
y.append(d["label"])
except Exception:
print("Error evaluating / tokenizing" " line {}, skipping it".format(i))
pass
full_dataset = Dataset(x, y)
train_size = int(0.9 * len(full_dataset))
test_size = len(full_dataset) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(full_dataset, [train_size, test_size])
discriminator_meta = {
"class_size": len(idx2class),
"embed_size": discriminator.embed_size,
"pretrained_model": pretrained_model,
"class_vocab": class2idx,
"default_class": 1,
}
elif dataset == "toxic":
idx2class = ["non_toxic", "toxic"]
class2idx = {c: i for i, c in enumerate(idx2class)}
discriminator = Discriminator(
class_size=len(idx2class), pretrained_model=pretrained_model, cached_mode=cached, device=device
).to(device)
x = []
y = []
with open("datasets/toxic/toxic_train.txt") as f:
for i, line in enumerate(tqdm(f, ascii=True)):
try:
d = eval(line)
seq = discriminator.tokenizer.encode(d["text"])
if len(seq) < max_length_seq:
seq = torch.tensor([50256] + seq, device=device, dtype=torch.long)
else:
print("Line {} is longer than maximum length {}".format(i, max_length_seq))
continue
x.append(seq)
y.append(int(np.sum(d["label"]) > 0))
except Exception:
print("Error evaluating / tokenizing" " line {}, skipping it".format(i))
pass
full_dataset = Dataset(x, y)
train_size = int(0.9 * len(full_dataset))
test_size = len(full_dataset) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(full_dataset, [train_size, test_size])
discriminator_meta = {
"class_size": len(idx2class),
"embed_size": discriminator.embed_size,
"pretrained_model": pretrained_model,
"class_vocab": class2idx,
"default_class": 0,
}
else: # if dataset == "generic":
# This assumes the input dataset is a TSV with the following structure:
# class \t text
if dataset_fp is None:
raise ValueError("When generic dataset is selected, " "dataset_fp needs to be specified aswell.")
classes = set()
with open(dataset_fp) as f:
csv_reader = csv.reader(f, delimiter="\t")
for row in tqdm(csv_reader, ascii=True):
if row:
classes.add(row[0])
idx2class = sorted(classes)
class2idx = {c: i for i, c in enumerate(idx2class)}
discriminator = Discriminator(
class_size=len(idx2class), pretrained_model=pretrained_model, cached_mode=cached, device=device
).to(device)
x = []
y = []
with open(dataset_fp) as f:
csv_reader = csv.reader(f, delimiter="\t")
for i, row in enumerate(tqdm(csv_reader, ascii=True)):
if row:
label = row[0]
text = row[1]
try:
seq = discriminator.tokenizer.encode(text)
if len(seq) < max_length_seq:
seq = torch.tensor([50256] + seq, device=device, dtype=torch.long)
else:
print("Line {} is longer than maximum length {}".format(i, max_length_seq))
continue
x.append(seq)
y.append(class2idx[label])
except Exception:
print("Error tokenizing line {}, skipping it".format(i))
pass
full_dataset = Dataset(x, y)
train_size = int(0.9 * len(full_dataset))
test_size = len(full_dataset) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(full_dataset, [train_size, test_size])
discriminator_meta = {
"class_size": len(idx2class),
"embed_size": discriminator.embed_size,
"pretrained_model": pretrained_model,
"class_vocab": class2idx,
"default_class": 0,
}
end = time.time()
print("Preprocessed {} data points".format(len(train_dataset) + len(test_dataset)))
print("Data preprocessing took: {:.3f}s".format(end - start))
if cached:
print("Building representation cache...")
start = time.time()
train_loader = get_cached_data_loader(train_dataset, batch_size, discriminator, shuffle=True, device=device)
test_loader = get_cached_data_loader(test_dataset, batch_size, discriminator, device=device)
end = time.time()
print("Building representation cache took: {:.3f}s".format(end - start))
else:
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn
)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, collate_fn=collate_fn)
if save_model:
with open("{}_classifier_head_meta.json".format(dataset), "w") as meta_file:
json.dump(discriminator_meta, meta_file)
optimizer = optim.Adam(discriminator.parameters(), lr=0.0001)
for epoch in range(epochs):
start = time.time()
print("\nEpoch", epoch + 1)
train_epoch(
discriminator=discriminator,
data_loader=train_loader,
optimizer=optimizer,
epoch=epoch,
log_interval=log_interval,
device=device,
)
evaluate_performance(data_loader=test_loader, discriminator=discriminator, device=device)
end = time.time()
print("Epoch took: {:.3f}s".format(end - start))
print("\nExample prediction")
predict(example_sentence, discriminator, idx2class, cached=cached, device=device)
if save_model:
# torch.save(discriminator.state_dict(),
# "{}_discriminator_{}.pt".format(
# args.dataset, epoch + 1
# ))
torch.save(
discriminator.get_classifier().state_dict(),
"{}_classifier_head_epoch_{}.pt".format(dataset, epoch + 1),
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Train a discriminator on top of GPT-2 representations")
parser.add_argument(
"--dataset",
type=str,
default="SST",
choices=("SST", "clickbait", "toxic", "generic"),
help="dataset to train the discriminator on."
"In case of generic, the dataset is expected"
"to be a TSBV file with structure: class \\t text",
)
parser.add_argument(
"--dataset_fp",
type=str,
default="",
help="File path of the dataset to use. " "Needed only in case of generic datadset",
)
parser.add_argument(
"--pretrained_model", type=str, default="gpt2-medium", help="Pretrained model to use as encoder"
)
parser.add_argument("--epochs", type=int, default=10, metavar="N", help="Number of training epochs")
parser.add_argument(
"--batch_size", type=int, default=64, metavar="N", help="input batch size for training (default: 64)"
)
parser.add_argument(
"--log_interval",
type=int,
default=10,
metavar="N",
help="how many batches to wait before logging training status",
)
parser.add_argument("--save_model", action="store_true", help="whether to save the model")
parser.add_argument("--cached", action="store_true", help="whether to cache the input representations")
parser.add_argument("--no_cuda", action="store_true", help="use to turn off cuda")
args = parser.parse_args()
train_discriminator(**(vars(args)))
| [
"[email protected]"
] | |
0ce156178c0a4e5b5787386c7b7ef99496948793 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa2/sample/ast_coverage-51.py | bfef20fc22cc7201d85726659aa3556a23ea3605 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | count:int = 0
def foo(s: str) -> int:
return len(s)
class bar(object):
p: bool = True
def baz($TypedVar, xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
print(bar().baz([1,2]))
| [
"[email protected]"
] | |
3b1373563feba29c600a45c39b4c088717a565e3 | 33c1c5d0f48ad952776fe546a85350a441d6cfc2 | /ABC/058/C.py | 5e7f508ce787e956e285580187e4f2a28810dd0c | [] | no_license | hisyatokaku/Competition | 985feb14aad73fda94804bb1145e7537b057e306 | fdbf045a59eccb1b2502b018cab01810de4ea894 | refs/heads/master | 2021-06-30T18:48:48.256652 | 2020-11-16T11:55:12 | 2020-11-16T11:55:12 | 191,138,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,175 | py | import math,string,itertools,fractions,heapq,collections,re,array,bisect,sys,random,time,copy,functools
from collections import deque
sys.setrecursionlimit(10**7)
inf = 10**20
mod = 10**9 + 7
DR = [1, -1, 0, 0]
DC = [0, 0, 1, -1]
def LI(): return [int(x) for x in sys.stdin.readline().split()]
def LI_(): return [int(x)-1 for x in sys.stdin.readline().split()]
def LF(): return [float(x) for x in sys.stdin.readline().split()]
def LS(): return sys.stdin.readline().split()
def I(): return int(sys.stdin.readline())
def F(): return float(sys.stdin.readline())
def S(): return input()
def main():
N = I()
strings = []
prevdic = None
for _ in range(N):
ch = S()
curdic = collections.Counter()
for c in ch:
curdic[c] += 1
if prevdic:
for k, v in prevdic.items():
prevdic[k] = min(v, curdic[k])
else:
prevdic = curdic
diclist = []
for k, v in prevdic.items():
diclist.append((k, v))
diclist = sorted(diclist, key=lambda x: x[0])
ans = ''
for item in diclist:
k, v = item[0], item[1]
ans += k * v
print(ans)
main()
| [
"[email protected]"
] | |
7eb97a4b679b17234e30a781df737859f3a9fec7 | f89d70fc8bf370ef4e2aa54c7ee0de3b4a053624 | /troposphere/validators/certificatemanager.py | c7644aacc79b763e015b48d41199629fa523839d | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | yks0000/troposphere | a7622bff01c31f10dcb296d2ca353144e1d7f793 | 9a94a8fafd8b4da1cd1f4239be0e7aa0681fd8d4 | refs/heads/main | 2022-04-28T03:51:42.770881 | 2022-04-15T15:15:01 | 2022-04-15T15:15:01 | 482,753,190 | 1 | 0 | BSD-2-Clause | 2022-04-18T07:20:42 | 2022-04-18T07:20:42 | null | UTF-8 | Python | false | false | 250 | py | # Copyright (c) 2012-2022, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
from . import tags_or_list
def validate_tags_or_list(x):
"""
Property: Certificate.Tags
"""
return tags_or_list(x)
| [
"[email protected]"
] | |
10f6a7a084d195cc8020da4f2a0f4e5ac4493b89 | ebbbefbc82412d8a91c8de96a45ffebe5d625c51 | /test/inspector_protocol_parser_test/inspector_protocol_parser_test.gyp | 835117c5a7b9f7f8f21033995d3aee142372a057 | [
"BSD-3-Clause",
"SunPro",
"bzip2-1.0.6"
] | permissive | riverar/v8 | d93eb2aea104d10aee2a27a7763e13061ec60f4b | 16397d242258b97f107e742e37cc585e77b9b3d0 | refs/heads/master | 2020-12-14T18:39:32.337673 | 2016-11-13T22:04:51 | 2016-11-13T22:04:51 | 77,864,048 | 2 | 0 | null | 2017-01-02T21:50:22 | 2017-01-02T21:50:21 | null | UTF-8 | Python | false | false | 1,311 | gyp | # Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{ 'variables': {
'protocol_path': '../../third_party/WebKit/Source/platform/inspector_protocol',
},
'targets': [
{ 'target_name': 'inspector_protocol_parser_test',
'type': 'executable',
'dependencies': [
'../../src/inspector/inspector.gyp:inspector_protocol',
'../../testing/gmock.gyp:gmock',
'../../testing/gtest.gyp:gtest',
],
'include_dirs+': [
'../..',
'<(protocol_path)/../..',
],
'defines': [
'V8_INSPECTOR_USE_STL',
],
'sources': [
'<(protocol_path)/ParserTest.cpp',
'RunTests.cpp',
]
},
],
'conditions': [
['test_isolation_mode != "noop"', {
'targets': [
{
'target_name': 'inspector_protocol_parser_test_run',
'type': 'none',
'dependencies': [
'inspector_protocol_parser_test',
],
'includes': [
'../../gypfiles/features.gypi',
'../../gypfiles/isolate.gypi',
],
'sources': [
'inspector_protocol_parser_test.isolate',
],
},
],
}],
],
}
| [
"[email protected]"
] | |
c4fc69e83f8f586a7eac0551aaba42be7e4142fa | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/pg_1713+248/sdB_pg_1713+248_coadd.py | 512545067cc439b9893d34f29e077ea79ff244a5 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[258.910292,24.789658], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_pg_1713+248/sdB_pg_1713+248_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_pg_1713+248/sdB_pg_1713+248_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
3d4853aadd426ddffba132f55513da77925f1a05 | 72dbf8366cf17b6a81ab37e72af667726e3f2661 | /store/migrations/0004_customer_profile_pic.py | 3c6be38b982836d8550da5a4b198ba126e8f506e | [] | no_license | Rayhun/Django_E-Commerce_website | 3aef732ffa0a41509be95ced3c33b845233903a7 | 1a5f7e31f942914256e49ba7da1f7367a799f097 | refs/heads/main | 2023-05-23T18:18:27.875328 | 2021-04-30T19:29:06 | 2021-04-30T19:29:06 | 306,414,778 | 3 | 1 | null | 2021-04-30T19:28:58 | 2020-10-22T17:41:57 | CSS | UTF-8 | Python | false | false | 412 | py | # Generated by Django 3.1.1 on 2020-10-31 15:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0003_auto_20201030_1534'),
]
operations = [
migrations.AddField(
model_name='customer',
name='profile_pic',
field=models.ImageField(blank=True, null=True, upload_to=''),
),
]
| [
"[email protected]"
] | |
0b7425ef67785d25400834e4025a57b2674c43ad | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/azure/azcollection/plugins/modules/azure_rm_vmbackuppolicy.py | c8977d6cfac4931a9ca8ddcc2ed369004c8ee7d9 | [
"GPL-1.0-or-later",
"GPL-3.0-only",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 15,653 | py | #!/usr/bin/python
#
# Copyright (c) 2020 Suyeb Ansari (@suyeb786)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_vmbackuppolicy
version_added: '1.1.0'
short_description: Create or Delete Azure VM Backup Policy
description:
- Create or Delete Azure VM Backup Policy.
options:
name:
description:
- Policy Name.
required: true
type: str
resource_group:
description:
- The name of the resource group.
required: true
type: str
vault_name:
description:
- Recovery Service Vault Name.
required: true
type: str
time:
description:
- Retention times of retention policy in UTC.
required: false
default: '12:00'
type: str
weekdays:
description:
- List of days of the week.
required: false
default: ['Monday']
type: list
weeks:
description:
- List of weeks of month.
required: false
default: ['First']
type: list
months:
description:
- List of months of year of yearly retention policy.
required: false
default: ['January']
type: list
count:
description:
- Count of duration types. Retention duration is obtained by the counting the duration type Count times.
required: false
default: 1
type: int
state:
description:
- Assert the state of the protection item.
- Use C(present) for Creating Backup Policy.
- Use C(absent) for Deleting Backup Policy.
default: present
type: str
choices:
- present
- absent
extends_documentation_fragment:
- azure.azcollection.azure
- azure.azcollection.azure_tags
author:
- Suyeb Ansari (@suyeb786)
'''
EXAMPLES = '''
- name: Create VM Backup Policy
azure_rm_backvmuppolicy:
name: 'myBackupPolicy'
vault_name: 'myVault'
resource_group: 'myResourceGroup'
time: '18:00'
weekdays: ['Monday', 'Thursday', 'Friday']
weeks: ['First', 'Fourth']
months: ['February', 'November']
count: 4
state: present
- name: Delete VM Backup Policy
azure_rm_backvmuppolicy:
name: 'myBackupPolicy'
vault_name: 'myVault'
resource_group: 'myResourceGroup'
state: absent
'''
RETURN = '''
response:
description:
- The response about the current state of the backup policy.
returned: always
type: complex
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: "/subscriptions/xxxxxxx/resourceGroups/resourcegroup_name/ \
providers/Microsoft.RecoveryServices/vaults/myVault/backupPolicies/myBackup"
name:
description:
- Backup Policy Name.
returned: always
type: str
sample: "myBackup"
properties:
description:
- The backup policy properties.
returned: always
type: dict
sample: {
"backupManagementType": "AzureIaasVM",
"schedulePolicy": {
"schedulePolicyType": "SimpleSchedulePolicy",
"scheduleRunFrequency": "Weekly",
"scheduleRunDays": [
"Monday",
"Wednesday",
"Thursday"
],
"scheduleRunTimes": [
"2018-01-24T10:00:00Z"
],
"scheduleWeeklyFrequency": 0
},
"retentionPolicy": {
"retentionPolicyType": "LongTermRetentionPolicy",
"weeklySchedule": {
"daysOfTheWeek": [
"Monday",
"Wednesday",
"Thursday"
],
"retentionTimes": [
"2018-01-24T10:00:00Z"
],
"retentionDuration": {
"count": 1,
"durationType": "Weeks"
}
},
"monthlySchedule": {
"retentionScheduleFormatType": "Weekly",
"retentionScheduleWeekly": {
"daysOfTheWeek": [
"Wednesday",
"Thursday"
],
"weeksOfTheMonth": [
"First",
"Third"
]
},
"retentionTimes": [
"2018-01-24T10:00:00Z"
],
"retentionDuration": {
"count": 2,
"durationType": "Months"
}
},
"yearlySchedule": {
"retentionScheduleFormatType": "Weekly",
"monthsOfYear": [
"February",
"November"
],
"retentionScheduleWeekly": {
"daysOfTheWeek": [
"Monday",
"Thursday"
],
"weeksOfTheMonth": [
"Fourth"
]
},
"retentionTimes": [
"2018-01-24T10:00:00Z"
],
"retentionDuration": {
"count": 4,
"durationType": "Years"
}
}
},
"timeZone": "Pacific Standard Time",
"protectedItemsCount": 0
}
type:
description:
- Resource type.
returned: always
type: str
sample: "Microsoft.RecoveryServices/vaults/backupPolicies"
'''
from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_rest import GenericRestClient
from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt
import time
import json
try:
from msrestazure.azure_exceptions import CloudError
except ImportError:
# This is handled in azure_rm_common
pass
class VMBackupPolicy(AzureRMModuleBaseExt):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
vault_name=dict(
type='str',
required=True
),
time=dict(
type='str',
default='12:00'
),
weekdays=dict(
type='list',
default=['Monday']
),
weeks=dict(
type='list',
default=['First']
),
months=dict(
type='list',
default=['January']
),
count=dict(
type='int',
default=1
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.resource_group = None
self.name = None
self.time = None
self.state = None
self.vault_name = None
self.count = None
self.weekdays = None
self.weeks = None
self.months = None
self.results = dict(changed=False)
self.mgmt_client = None
self.url = None
self.status_code = [200, 201, 202, 204]
self.body = {}
self.query_parameters = {}
self.query_parameters['api-version'] = '2019-05-13'
self.header_parameters = {}
self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
super(VMBackupPolicy, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True
)
def get_url(self):
return '/subscriptions/' \
+ self.subscription_id \
+ '/resourceGroups/' \
+ self.resource_group \
+ '/providers/Microsoft.RecoveryServices' \
+ '/vaults' + '/' \
+ self.vault_name + '/' \
+ "backupPolicies/" \
+ self.name
def set_schedule_run_time(self):
return time.strftime("%Y-%m-%d", time.gmtime()) + "T" + self.time + ":00Z"
def get_body(self):
self.log('backup attributes {0}'.format(self.body))
self.time = self.set_schedule_run_time()
schedule_policy = dict()
schedule_policy['schedulePolicyType'] = 'SimpleSchedulePolicy'
schedule_policy['scheduleRunFrequency'] = 'Weekly'
schedule_policy['scheduleRunTimes'] = [self.time]
schedule_policy['scheduleRunDays'] = self.weekdays
weekly_schedule = dict()
weekly_schedule['daysOfTheWeek'] = ['Monday']
weekly_schedule['retentionTimes'] = [self.time]
weekly_schedule['retentionDuration'] = dict()
weekly_schedule['retentionDuration']['count'] = self.count
weekly_schedule['retentionDuration']['durationType'] = 'Weeks'
monthly_schedule = dict()
monthly_schedule['retentionScheduleFormatType'] = 'Weekly'
monthly_schedule['retentionScheduleWeekly'] = dict()
monthly_schedule['retentionScheduleWeekly']['daysOfTheWeek'] = self.weekdays
monthly_schedule['retentionScheduleWeekly']['weeksOfTheMonth'] = self.weeks
monthly_schedule['retentionTimes'] = [self.time]
monthly_schedule['retentionDuration'] = dict()
monthly_schedule['retentionDuration']['count'] = self.count
monthly_schedule['retentionDuration']['durationType'] = 'Months'
yearly_schedule = dict()
yearly_schedule['retentionScheduleFormatType'] = 'Weekly'
yearly_schedule['monthsOfYear'] = self.months
yearly_schedule['retentionScheduleWeekly'] = dict()
yearly_schedule['retentionScheduleWeekly']['daysOfTheWeek'] = self.weekdays
yearly_schedule['retentionScheduleWeekly']['weeksOfTheMonth'] = self.weeks
yearly_schedule['retentionTimes'] = [self.time]
yearly_schedule['retentionDuration'] = dict()
yearly_schedule['retentionDuration']['count'] = self.count
yearly_schedule['retentionDuration']['durationType'] = 'Years'
body = dict()
body['properties'] = dict()
body['properties']['backupManagementType'] = 'AzureIaasVM'
body['properties']['timeZone'] = 'Pacific Standard Time'
body['properties']['schedulePolicy'] = schedule_policy
body['properties']['retentionPolicy'] = dict()
body['properties']['retentionPolicy']['retentionPolicyType'] = 'LongTermRetentionPolicy'
body['properties']['retentionPolicy']['weeklySchedule'] = weekly_schedule
body['properties']['retentionPolicy']['monthlySchedule'] = monthly_schedule
body['properties']['retentionPolicy']['yearlySchedule'] = yearly_schedule
return body
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
elif kwargs[key] is not None:
self.body[key] = kwargs[key]
self.inflate_parameters(self.module_arg_spec, self.body, 0)
self.url = self.get_url()
self.body = self.get_body()
old_response = None
response = None
self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
base_url=self._cloud_environment.endpoints.resource_manager)
old_response = self.get_resource()
changed = False
if self.state == 'present':
if old_response is False:
response = self.create_vm_backup_policy()
changed = True
else:
response = old_response
if self.state == 'absent':
changed = True
response = self.delete_vm_backup_policy()
self.results['response'] = response
self.results['changed'] = changed
return self.results
def create_vm_backup_policy(self):
# self.log('Creating VM Backup Policy {0}'.format(self.))
try:
response = self.mgmt_client.query(
self.url,
'PUT',
self.query_parameters,
self.header_parameters,
self.body,
self.status_code,
600,
30,
)
except CloudError as e:
self.log('Error in creating Backup Policy.')
self.fail('Error in creating Backup Policy {0}'.format(str(e)))
try:
response = json.loads(response.text)
except Exception:
response = {'text': response.text}
return response
def delete_vm_backup_policy(self):
# self.log('Deleting Backup Policy {0}'.format(self.))
try:
response = self.mgmt_client.query(
self.url,
'DELETE',
self.query_parameters,
self.header_parameters,
None,
self.status_code,
600,
30,
)
except CloudError as e:
self.log('Error attempting to delete Azure Backup policy.')
self.fail('Error attempting to delete Azure Backup policy: {0}'.format(str(e)))
try:
response = json.loads(response.text)
except Exception:
response = {'text': response.text}
return response
def get_resource(self):
# self.log('Fetch Backup Policy Details {0}'.format(self.))
found = False
try:
response = self.mgmt_client.query(
self.url,
'GET',
self.query_parameters,
self.header_parameters,
None,
self.status_code,
600,
30,
)
found = True
except CloudError as e:
self.log('Backup policy does not exist.')
if found is True:
response = json.loads(response.text)
return response
else:
return False
def main():
VMBackupPolicy()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
5dbd1f7c42544433d409a21de2f3f2b30ab38bb3 | 31730fbdf50dcbc36205911e3a676f0d826157b1 | /setup.py | 87aa04ba1362ee17f5c2df4173f3d322c339a45c | [
"MIT"
] | permissive | Harpuia/terminal-leetcode | 954a270784fe920eb49f1fc4b8e66b379377e608 | 39b27b3a6260195376c0cdffb7697b4aa5c60ca8 | refs/heads/master | 2021-01-22T02:04:19.470178 | 2017-05-21T21:34:26 | 2017-05-21T21:34:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,133 | py | import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
requirements = ['urwid', 'requests', 'bs4', 'lxml']
setup(
name = "terminal-leetcode",
version = "0.0.11",
author = "Liyun Xiu",
author_email = "[email protected]",
description = "A terminal based leetcode website viewer",
license = "MIT",
keywords = "leetcode terminal urwid",
url = "https://github.com/chishui/terminal-leetcode",
packages=['leetcode', 'leetcode/views'],
long_description=read('README.md'),
include_package_data=True,
install_requires=requirements,
entry_points={'console_scripts': ['leetcode=leetcode.__main__:main']},
#classifiers=[
#"Operating System :: MacOS :: MacOS X",
#"Operating System :: POSIX",
#"Natural Language :: English",
#"Programming Language :: Python :: 2.7",
#"Development Status :: 2 - Pre-Alpha",
#"Environment :: Console :: Curses",
#"Topic :: Utilities",
#"Topic :: Terminals",
#"License :: OSI Approved :: MIT License",
#],
)
| [
"[email protected]"
] | |
8308c5e060050d89710a4c75af0015ccdf6f9d54 | 9f250956e2c19e5b51053a513a6b31ef8128d674 | /myProject/account/models.py | 847e54c8c8c7bd75fd82bf7bb24b88146fd4144d | [] | no_license | murali-kotakonda/pyDjango | 85b9128949fcdf3bcc0e60c386decd9eeff723db | cf1a920f8146be600bc04455cb5769f369c7d6eb | refs/heads/master | 2023-03-24T06:34:14.143103 | 2021-03-20T12:38:40 | 2021-03-20T12:38:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py |
from django.db import models
# Create your models here.
from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
middle_name = models.CharField(max_length=30, blank=True)
dob = models.DateField(null=True, blank=True)
active = models.BooleanField(default=True)
pub_date = models.DateTimeField(default=timezone.now) | [
"[email protected]"
] | |
2b2ef12926774661cb4b51bc33a1ee978667c5e7 | 536bce6ca78a9a151247b51acb8c375c9db7445f | /src/plot/plot0a.py | 576ed81aae88789ba020629c45a25f2e61024a75 | [] | no_license | clicianaldoni/aprimeronpython | 57de34313f4fd2a0c69637fefd60b0fb5861f859 | a917b62bec669765a238c4b310cc52b79c7df0c9 | refs/heads/master | 2023-01-28T18:02:31.175511 | 2023-01-23T08:14:57 | 2023-01-23T08:14:57 | 112,872,454 | 0 | 0 | null | 2017-12-02T19:55:40 | 2017-12-02T19:55:40 | null | UTF-8 | Python | false | false | 679 | py | """Plot three curves. Use Matlab-style syntax."""
from scitools.std import *
# plot two curves in the same plot:
t = linspace(0, 3, 51) # 51 points between 0 and 3
y1 = t**2*exp(-t**2)
y2 = t**4*exp(-t**2)
# pick out each 4 points and add random noise:
t3 = t[::4]
random.seed(11)
y3 = y2[::4] + random.normal(loc=0, scale=0.02, size=len(t3))
# use Matlab syntax:
plot(t, y1, 'r-')
hold('on')
plot(t, y2, 'b-')
plot(t3, y3, 'bo')
legend('t^2*exp(-t^2)', 't^4*exp(-t^2)', 'data')
title('Simple Plot Demo')
axis([0, 3, -0.05, 0.6])
xlabel('t')
ylabel('y')
show()
hardcopy('tmp0.eps') # this one can be included in latex
hardcopy('tmp0.png') # this one can be included in HTML
| [
"[email protected]"
] | |
b6976d1ebf040e74ebb2ffe37340c1a569afacca | bc444c603a80d7c656c4f20539f6035c43fff54a | /src/dirbs/api/v2/resources/__init__.py | beaa8eb8bea4490bfdda03263a24cddeddeb4211 | [
"BSD-4-Clause",
"LicenseRef-scancode-other-permissive",
"zlib-acknowledgement",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | dewipuspa/DIRBS-Core | fe1af50918333474732872b61dc3ae4f8e41c14f | 702e93dcefdf0fb5787cb42c2a6bc2574e483057 | refs/heads/master | 2020-07-07T09:59:10.723477 | 2019-06-21T06:39:59 | 2019-06-21T06:39:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,104 | py | """
DIRBS REST-ful API-V2 resource package.
SPDX-License-Identifier: BSD-4-Clause-Clear
Copyright (c) 2018-2019 Qualcomm Technologies, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the
limitations in the disclaimer below) provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
- Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
following disclaimer in the documentation and/or other materials provided with the distribution.
- All advertising materials mentioning features or use of this software, or any deployment of this software,
or documentation accompanying any distribution of this software, must display the trademark/logo as per the
details provided here: https://www.qualcomm.com/documents/dirbs-logo-and-brand-guidelines
- Neither the name of Qualcomm Technologies, Inc. nor the names of its contributors may be used to endorse or
promote products derived from this software without specific prior written permission.
SPDX-License-Identifier: ZLIB-ACKNOWLEDGEMENT
Copyright (c) 2018-2019 Qualcomm Technologies, Inc.
This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable
for any damages arising from the use of this software. Permission is granted to anyone to use this software for any
purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following
restrictions:
- The origin of this software must not be misrepresented; you must not claim that you wrote the original software.
If you use this software in a product, an acknowledgment is required by displaying the trademark/logo as per the
details provided here: https://www.qualcomm.com/documents/dirbs-logo-and-brand-guidelines
- Altered source versions must be plainly marked as such, and must not be misrepresented as being the original
software.
- This notice may not be removed or altered from any source distribution.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY
THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
| [
"[email protected]"
] | |
bcfeb0806979c5c3c6b5815db7dd321e6d847b41 | d994cee3810de8f5fa895807f415d8051dfd1319 | /HetMan/experiments/pair_inference/setup_comb.py | 4ab9ea7068b4cfd9486fea0b2579d392b6315969 | [] | no_license | ohsu-comp-bio/bergamot | b4e162525d1cc66975d64866cc68004ee42828e2 | 37fd50fc1ce6da83049a0cbbad5038a055fd3544 | refs/heads/master | 2023-07-24T01:15:27.695906 | 2019-09-19T01:39:13 | 2019-09-19T01:39:13 | 89,973,424 | 1 | 2 | null | 2023-07-06T21:07:53 | 2017-05-02T00:03:26 | Python | UTF-8 | Python | false | false | 4,119 | py |
import os
base_dir = os.path.dirname(__file__)
import sys
sys.path.extend([os.path.join(base_dir, '../../..')])
from HetMan.features.expression import get_expr_firehose
from HetMan.features.variants import get_variants_mc3
from HetMan.features.cohorts import VariantCohort
import synapseclient
import dill as pickle
import argparse
firehose_dir = '/home/exacloud/lustre1/CompBio/mgrzad/input-data/firehose'
def main():
"""Runs the experiment."""
parser = argparse.ArgumentParser(
description='Set up searching for sub-types to detect.'
)
# positional command line arguments
parser.add_argument('cohort', type=str, help='a TCGA cohort')
parser.add_argument('classif', type=str,
help='a classifier in HetMan.predict.classifiers')
# optional command line arguments controlling the thresholds for which
# individual mutations and how many genes' mutations are considered
parser.add_argument(
'--freq_cutoff', type=int, default=20,
help='sub-type sample frequency threshold'
)
parser.add_argument(
'--max_genes', type=int, default=10,
help='maximum number of mutated genes to consider'
)
# optional command line argument controlling verbosity
parser.add_argument('--verbose', '-v', action='store_true',
help='turns on diagnostic messages')
# parse the command line arguments, get the directory where found sub-types
# will be saved for future use
args = parser.parse_args()
out_path = os.path.join(base_dir, 'output',
args.cohort, args.classif, 'comb')
if args.verbose:
print("Looking for mutation sub-types in cohort {} with at least {} "
"samples in total.\n".format(
args.cohort, args.freq_cutoff))
# log into Synapse using locally-stored credentials
syn = synapseclient.Synapse()
syn.cache.cache_root_dir = ("/home/exacloud/lustre1/CompBio/"
"mgrzad/input-data/synapse")
syn.login()
# load the expression matrix for the given cohort from Broad Firehose,
# load the MC3 variant call set from Synapse, find the mutations for the
# samples that are in both datasets
expr_data = get_expr_firehose(args.cohort, firehose_dir)
mc3_data = get_variants_mc3(syn)
expr_mc3 = mc3_data.loc[mc3_data['Sample'].isin(expr_data.index), :]
# get the genes whose mutations appear in enough samples to pass the
# frequency threshold
gene_counts = expr_mc3.groupby(by='Gene').Sample.nunique()
common_genes = set(gene_counts.index[gene_counts >= args.freq_cutoff])
if args.verbose:
print("Found {} candidate genes with at least {} potential "
"mutated samples.".format(len(common_genes), args.freq_cutoff))
# if too many genes passed the frequency cutoff, use only the top n by
# frequency - note that ties are broken arbitrarily and so the list of
# genes chosen will differ slightly between runs
if len(common_genes) >= args.max_genes:
gene_counts = gene_counts[common_genes].sort_values(ascending=False)
common_genes = set(gene_counts[:args.max_genes].index)
if args.verbose:
print("Too many genes found, culling list to {} genes which each "
"have at least {} mutated samples.".format(
args.max_genes, min(gene_counts[common_genes])))
cdata = VariantCohort(
cohort=args.cohort, mut_genes=common_genes, mut_levels=['Gene'],
expr_source='Firehose', data_dir=firehose_dir, cv_prop=1.0, syn=syn
)
use_mtypes = cdata.train_mut.branchtypes(sub_levels=['Gene'],
min_size=args.freq_cutoff)
if args.verbose:
print("\nFound {} total sub-types!".format(len(use_mtypes)))
# save the list of found non-duplicate sub-types to file
pickle.dump(sorted(list(use_mtypes)),
open(os.path.join(out_path, 'tmp/mtype_list.p'), 'wb'))
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
120fa7d83ee44ee9335c1740031e2fa791883cf0 | 3f28b697f570ded0502de70c706200005ab62525 | /env/lib/python2.7/site-packages/scipy/io/mmio.py | 6b36f6dccc7d3f6b1daf07e1dd4f4d10aac5ddcc | [
"MIT"
] | permissive | Ram-Aditya/Healthcare-Data-Analytics | 5387e41ad8e56af474e10fa2d1c9d8a2847c5ead | d1a15d2cc067410f82a9ded25f7a782ef56b4729 | refs/heads/master | 2022-12-09T12:49:59.027010 | 2019-11-23T20:10:55 | 2019-11-23T20:10:55 | 223,639,339 | 0 | 1 | MIT | 2022-11-22T00:37:48 | 2019-11-23T19:06:20 | Jupyter Notebook | UTF-8 | Python | false | false | 22,016 | py | """
Matrix Market I/O in Python.
"""
#
# Author: Pearu Peterson <[email protected]>
# Created: October, 2004
#
# References:
# http://math.nist.gov/MatrixMarket/
#
from __future__ import division, print_function, absolute_import
import os
import sys
from numpy import asarray, real, imag, conj, zeros, ndarray, concatenate, \
ones, ascontiguousarray, vstack, savetxt, fromfile, fromstring
from numpy.compat import asbytes, asstr
from scipy.lib.six import string_types
__all__ = ['mminfo','mmread','mmwrite', 'MMFile']
#-------------------------------------------------------------------------------
def mminfo(source):
"""
Queries the contents of the Matrix Market file 'filename' to
extract size and storage information.
Parameters
----------
source : file
Matrix Market filename (extension .mtx) or open file object
Returns
-------
rows,cols : int
Number of matrix rows and columns
entries : int
Number of non-zero entries of a sparse matrix
or rows*cols for a dense matrix
format : str
Either 'coordinate' or 'array'.
field : str
Either 'real', 'complex', 'pattern', or 'integer'.
symm : str
Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'.
"""
return MMFile.info(source)
#-------------------------------------------------------------------------------
def mmread(source):
"""
Reads the contents of a Matrix Market file 'filename' into a matrix.
Parameters
----------
source : file
Matrix Market filename (extensions .mtx, .mtz.gz)
or open file object.
Returns
-------
a:
Sparse or full matrix
"""
return MMFile().read(source)
#-------------------------------------------------------------------------------
def mmwrite(target, a, comment='', field=None, precision=None):
"""
Writes the sparse or dense array `a` to a Matrix Market formatted file.
Parameters
----------
target : file
Matrix Market filename (extension .mtx) or open file object
a : array like
Sparse or dense 2D array
comment : str, optional
comments to be prepended to the Matrix Market file
field : None or str, optional
Either 'real', 'complex', 'pattern', or 'integer'.
precision : None or int, optional
Number of digits to display for real or complex values.
"""
MMFile().write(target, a, comment, field, precision)
################################################################################
class MMFile (object):
__slots__ = (
'_rows',
'_cols',
'_entries',
'_format',
'_field',
'_symmetry')
@property
def rows(self):
return self._rows
@property
def cols(self):
return self._cols
@property
def entries(self):
return self._entries
@property
def format(self):
return self._format
@property
def field(self):
return self._field
@property
def symmetry(self):
return self._symmetry
@property
def has_symmetry(self):
return self._symmetry in (self.SYMMETRY_SYMMETRIC,
self.SYMMETRY_SKEW_SYMMETRIC, self.SYMMETRY_HERMITIAN)
# format values
FORMAT_COORDINATE = 'coordinate'
FORMAT_ARRAY = 'array'
FORMAT_VALUES = (FORMAT_COORDINATE, FORMAT_ARRAY)
@classmethod
def _validate_format(self, format):
if format not in self.FORMAT_VALUES:
raise ValueError('unknown format type %s, must be one of %s' %
(format, self.FORMAT_VALUES))
# field values
FIELD_INTEGER = 'integer'
FIELD_REAL = 'real'
FIELD_COMPLEX = 'complex'
FIELD_PATTERN = 'pattern'
FIELD_VALUES = (FIELD_INTEGER, FIELD_REAL, FIELD_COMPLEX, FIELD_PATTERN)
@classmethod
def _validate_field(self, field):
if field not in self.FIELD_VALUES:
raise ValueError('unknown field type %s, must be one of %s' %
(field, self.FIELD_VALUES))
# symmetry values
SYMMETRY_GENERAL = 'general'
SYMMETRY_SYMMETRIC = 'symmetric'
SYMMETRY_SKEW_SYMMETRIC = 'skew-symmetric'
SYMMETRY_HERMITIAN = 'hermitian'
SYMMETRY_VALUES = (SYMMETRY_GENERAL, SYMMETRY_SYMMETRIC,
SYMMETRY_SKEW_SYMMETRIC, SYMMETRY_HERMITIAN)
@classmethod
def _validate_symmetry(self, symmetry):
if symmetry not in self.SYMMETRY_VALUES:
raise ValueError('unknown symmetry type %s, must be one of %s' %
(symmetry, self.SYMMETRY_VALUES))
DTYPES_BY_FIELD = {
FIELD_INTEGER: 'i',
FIELD_REAL: 'd',
FIELD_COMPLEX: 'D',
FIELD_PATTERN: 'd'}
#---------------------------------------------------------------------------
@staticmethod
def reader():
pass
#---------------------------------------------------------------------------
@staticmethod
def writer():
pass
#---------------------------------------------------------------------------
@classmethod
def info(self, source):
source, close_it = self._open(source)
try:
# read and validate header line
line = source.readline()
mmid, matrix, format, field, symmetry = \
[asstr(part.strip()) for part in line.split()]
if not mmid.startswith('%%MatrixMarket'):
raise ValueError('source is not in Matrix Market format')
if not matrix.lower() == 'matrix':
raise ValueError("Problem reading file header: " + line)
# http://math.nist.gov/MatrixMarket/formats.html
if format.lower() == 'array':
format = self.FORMAT_ARRAY
elif format.lower() == 'coordinate':
format = self.FORMAT_COORDINATE
# skip comments
while line.startswith(b'%'):
line = source.readline()
line = line.split()
if format == self.FORMAT_ARRAY:
if not len(line) == 2:
raise ValueError("Header line not of length 2: " + line)
rows, cols = map(int, line)
entries = rows * cols
else:
if not len(line) == 3:
raise ValueError("Header line not of length 3: " + line)
rows, cols, entries = map(int, line)
return (rows, cols, entries, format, field.lower(), symmetry.lower())
finally:
if close_it:
source.close()
#---------------------------------------------------------------------------
@staticmethod
def _open(filespec, mode='rb'):
"""
Return an open file stream for reading based on source. If source is
a file name, open it (after trying to find it with mtx and gzipped mtx
extensions). Otherwise, just return source.
"""
close_it = False
if isinstance(filespec, string_types):
close_it = True
# open for reading
if mode[0] == 'r':
# determine filename plus extension
if not os.path.isfile(filespec):
if os.path.isfile(filespec+'.mtx'):
filespec = filespec + '.mtx'
elif os.path.isfile(filespec+'.mtx.gz'):
filespec = filespec + '.mtx.gz'
elif os.path.isfile(filespec+'.mtx.bz2'):
filespec = filespec + '.mtx.bz2'
# open filename
if filespec.endswith('.gz'):
import gzip
stream = gzip.open(filespec, mode)
elif filespec.endswith('.bz2'):
import bz2
stream = bz2.BZ2File(filespec, 'rb')
else:
stream = open(filespec, mode)
# open for writing
else:
if filespec[-4:] != '.mtx':
filespec = filespec + '.mtx'
stream = open(filespec, mode)
else:
stream = filespec
return stream, close_it
#---------------------------------------------------------------------------
@staticmethod
def _get_symmetry(a):
m,n = a.shape
if m != n:
return MMFile.SYMMETRY_GENERAL
issymm = 1
isskew = 1
isherm = a.dtype.char in 'FD'
for j in range(n):
for i in range(j+1,n):
aij,aji = a[i][j],a[j][i]
if issymm and aij != aji:
issymm = 0
if isskew and aij != -aji:
isskew = 0
if isherm and aij != conj(aji):
isherm = 0
if not (issymm or isskew or isherm):
break
if issymm:
return MMFile.SYMMETRY_SYMMETRIC
if isskew:
return MMFile.SYMMETRY_SKEW_SYMMETRIC
if isherm:
return MMFile.SYMMETRY_HERMITIAN
return MMFile.SYMMETRY_GENERAL
#---------------------------------------------------------------------------
@staticmethod
def _field_template(field, precision):
return {
MMFile.FIELD_REAL: '%%.%ie\n' % precision,
MMFile.FIELD_INTEGER: '%i\n',
MMFile.FIELD_COMPLEX: '%%.%ie %%.%ie\n' % (precision,precision)
}.get(field, None)
#---------------------------------------------------------------------------
def __init__(self, **kwargs):
self._init_attrs(**kwargs)
#---------------------------------------------------------------------------
def read(self, source):
stream, close_it = self._open(source)
try:
self._parse_header(stream)
return self._parse_body(stream)
finally:
if close_it:
stream.close()
#---------------------------------------------------------------------------
def write(self, target, a, comment='', field=None, precision=None):
stream, close_it = self._open(target, 'wb')
try:
self._write(stream, a, comment, field, precision)
finally:
if close_it:
stream.close()
else:
stream.flush()
#---------------------------------------------------------------------------
def _init_attrs(self, **kwargs):
"""
Initialize each attributes with the corresponding keyword arg value
or a default of None
"""
attrs = self.__class__.__slots__
public_attrs = [attr[1:] for attr in attrs]
invalid_keys = set(kwargs.keys()) - set(public_attrs)
if invalid_keys:
raise ValueError('found %s invalid keyword arguments, please only use %s' %
(tuple(invalid_keys), public_attrs))
for attr in attrs:
setattr(self, attr, kwargs.get(attr[1:], None))
#---------------------------------------------------------------------------
def _parse_header(self, stream):
rows, cols, entries, format, field, symmetry = \
self.__class__.info(stream)
self._init_attrs(rows=rows, cols=cols, entries=entries, format=format,
field=field, symmetry=symmetry)
#---------------------------------------------------------------------------
def _parse_body(self, stream):
rows, cols, entries, format, field, symm = (self.rows, self.cols,
self.entries, self.format, self.field, self.symmetry)
try:
from scipy.sparse import coo_matrix
except ImportError:
coo_matrix = None
dtype = self.DTYPES_BY_FIELD.get(field, None)
has_symmetry = self.has_symmetry
is_complex = field == self.FIELD_COMPLEX
is_skew = symm == self.SYMMETRY_SKEW_SYMMETRIC
is_herm = symm == self.SYMMETRY_HERMITIAN
is_pattern = field == self.FIELD_PATTERN
if format == self.FORMAT_ARRAY:
a = zeros((rows,cols), dtype=dtype)
line = 1
i,j = 0,0
while line:
line = stream.readline()
if not line or line.startswith(b'%'):
continue
if is_complex:
aij = complex(*map(float,line.split()))
else:
aij = float(line)
a[i,j] = aij
if has_symmetry and i != j:
if is_skew:
a[j,i] = -aij
elif is_herm:
a[j,i] = conj(aij)
else:
a[j,i] = aij
if i < rows-1:
i = i + 1
else:
j = j + 1
if not has_symmetry:
i = 0
else:
i = j
if not (i in [0,j] and j == cols):
raise ValueError("Parse error, did not read all lines.")
elif format == self.FORMAT_COORDINATE and coo_matrix is None:
# Read sparse matrix to dense when coo_matrix is not available.
a = zeros((rows,cols), dtype=dtype)
line = 1
k = 0
while line:
line = stream.readline()
if not line or line.startswith(b'%'):
continue
l = line.split()
i,j = map(int,l[:2])
i,j = i-1,j-1
if is_complex:
aij = complex(*map(float,l[2:]))
else:
aij = float(l[2])
a[i,j] = aij
if has_symmetry and i != j:
if is_skew:
a[j,i] = -aij
elif is_herm:
a[j,i] = conj(aij)
else:
a[j,i] = aij
k = k + 1
if not k == entries:
ValueError("Did not read all entries")
elif format == self.FORMAT_COORDINATE:
# Read sparse COOrdinate format
if entries == 0:
# empty matrix
return coo_matrix((rows, cols), dtype=dtype)
try:
if not _is_fromfile_compatible(stream):
flat_data = fromstring(stream.read(), sep=' ')
else:
# fromfile works for normal files
flat_data = fromfile(stream, sep=' ')
except Exception:
# fallback - fromfile fails for some file-like objects
flat_data = fromstring(stream.read(), sep=' ')
# TODO use iterator (e.g. xreadlines) to avoid reading
# the whole file into memory
if is_pattern:
flat_data = flat_data.reshape(-1,2)
I = ascontiguousarray(flat_data[:,0], dtype='intc')
J = ascontiguousarray(flat_data[:,1], dtype='intc')
V = ones(len(I), dtype='int8') # filler
elif is_complex:
flat_data = flat_data.reshape(-1,4)
I = ascontiguousarray(flat_data[:,0], dtype='intc')
J = ascontiguousarray(flat_data[:,1], dtype='intc')
V = ascontiguousarray(flat_data[:,2], dtype='complex')
V.imag = flat_data[:,3]
else:
flat_data = flat_data.reshape(-1,3)
I = ascontiguousarray(flat_data[:,0], dtype='intc')
J = ascontiguousarray(flat_data[:,1], dtype='intc')
V = ascontiguousarray(flat_data[:,2], dtype='float')
I -= 1 # adjust indices (base 1 -> base 0)
J -= 1
if has_symmetry:
mask = (I != J) # off diagonal mask
od_I = I[mask]
od_J = J[mask]
od_V = V[mask]
I = concatenate((I,od_J))
J = concatenate((J,od_I))
if is_skew:
od_V *= -1
elif is_herm:
od_V = od_V.conjugate()
V = concatenate((V,od_V))
a = coo_matrix((V, (I, J)), shape=(rows, cols), dtype=dtype)
else:
raise NotImplementedError(format)
return a
#---------------------------------------------------------------------------
def _write(self, stream, a, comment='', field=None, precision=None):
if isinstance(a, list) or isinstance(a, ndarray) or isinstance(a, tuple) or hasattr(a,'__array__'):
rep = self.FORMAT_ARRAY
a = asarray(a)
if len(a.shape) != 2:
raise ValueError('Expected 2 dimensional array')
rows,cols = a.shape
entries = rows*cols
if field is not None:
if field == self.FIELD_INTEGER:
a = a.astype('i')
elif field == self.FIELD_REAL:
if a.dtype.char not in 'fd':
a = a.astype('d')
elif field == self.FIELD_COMPLEX:
if a.dtype.char not in 'FD':
a = a.astype('D')
else:
from scipy.sparse import spmatrix
if not isinstance(a,spmatrix):
raise ValueError('unknown matrix type: %s' % type(a))
rep = 'coordinate'
rows, cols = a.shape
entries = a.getnnz()
typecode = a.dtype.char
if precision is None:
if typecode in 'fF':
precision = 8
else:
precision = 16
if field is None:
kind = a.dtype.kind
if kind == 'i':
field = 'integer'
elif kind == 'f':
field = 'real'
elif kind == 'c':
field = 'complex'
else:
raise TypeError('unexpected dtype kind ' + kind)
if rep == self.FORMAT_ARRAY:
symm = self._get_symmetry(a)
else:
symm = self.SYMMETRY_GENERAL
# validate rep, field, and symmetry
self.__class__._validate_format(rep)
self.__class__._validate_field(field)
self.__class__._validate_symmetry(symm)
# write initial header line
stream.write(asbytes('%%%%MatrixMarket matrix %s %s %s\n' % (rep,field,symm)))
# write comments
for line in comment.split('\n'):
stream.write(asbytes('%%%s\n' % (line)))
template = self._field_template(field, precision)
# write dense format
if rep == self.FORMAT_ARRAY:
# write shape spec
stream.write(asbytes('%i %i\n' % (rows,cols)))
if field in (self.FIELD_INTEGER, self.FIELD_REAL):
if symm == self.SYMMETRY_GENERAL:
for j in range(cols):
for i in range(rows):
stream.write(asbytes(template % a[i,j]))
else:
for j in range(cols):
for i in range(j,rows):
stream.write(asbytes(template % a[i,j]))
elif field == self.FIELD_COMPLEX:
if symm == self.SYMMETRY_GENERAL:
for j in range(cols):
for i in range(rows):
aij = a[i,j]
stream.write(asbytes(template % (real(aij),imag(aij))))
else:
for j in range(cols):
for i in range(j,rows):
aij = a[i,j]
stream.write(asbytes(template % (real(aij),imag(aij))))
elif field == self.FIELD_PATTERN:
raise ValueError('pattern type inconsisted with dense format')
else:
raise TypeError('Unknown field type %s' % field)
# write sparse format
else:
if symm != self.SYMMETRY_GENERAL:
raise NotImplementedError('symmetric matrices not yet supported')
coo = a.tocoo() # convert to COOrdinate format
# write shape spec
stream.write(asbytes('%i %i %i\n' % (rows, cols, coo.nnz)))
fmt = '%%.%dg' % precision
if field == self.FIELD_PATTERN:
IJV = vstack((coo.row, coo.col)).T
elif field in [self.FIELD_INTEGER, self.FIELD_REAL]:
IJV = vstack((coo.row, coo.col, coo.data)).T
elif field == self.FIELD_COMPLEX:
IJV = vstack((coo.row, coo.col, coo.data.real, coo.data.imag)).T
else:
raise TypeError('Unknown field type %s' % field)
IJV[:,:2] += 1 # change base 0 -> base 1
savetxt(stream, IJV, fmt=fmt)
def _is_fromfile_compatible(stream):
"""
Check whether stream is compatible with numpy.fromfile.
Passing a gzipped file to fromfile/fromstring doesn't work
with Python3
"""
if sys.version_info[0] < 3:
return True
bad_cls = []
try:
import gzip
bad_cls.append(gzip.GzipFile)
except ImportError:
pass
try:
import bz2
bad_cls.append(bz2.BZ2File)
except ImportError:
pass
bad_cls = tuple(bad_cls)
return not isinstance(stream, bad_cls)
#-------------------------------------------------------------------------------
if __name__ == '__main__':
import sys
import time
for filename in sys.argv[1:]:
print('Reading',filename,'...', end=' ')
sys.stdout.flush()
t = time.time()
mmread(filename)
print('took %s seconds' % (time.time() - t))
| [
"[email protected]"
] | |
481cc8b7ab198499fce1c0a51236d4cecf13d6bf | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03495/s609850404.py | b480fa2ca615b11e243acdf596b2bb3cc9bfea4e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | N, K = map(int,input().split())
A = list(map(int,input().split()))
import collections
cA = collections.Counter(A)
sorted_ls = sorted(list(cA.values()))
sum_ls = sum(sorted_ls)
if len(sorted_ls)>K:
print(sum(sorted_ls[:len(sorted_ls)-K]))
else:
print(0) | [
"[email protected]"
] | |
fc1c51f09fbcfb9579c3048674323b1c14071f24 | 3ef70fe63acaa665e2b163f30f1abd0a592231c1 | /stackoverflow/venv/lib/python3.6/site-packages/twisted/trial/test/test_keyboard.py | 71756bc7972de92fd569ca5053a3fce5ba032347 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | wistbean/learn_python3_spider | 14914b63691ac032955ba1adc29ad64976d80e15 | 40861791ec4ed3bbd14b07875af25cc740f76920 | refs/heads/master | 2023-08-16T05:42:27.208302 | 2023-03-30T17:03:58 | 2023-03-30T17:03:58 | 179,152,420 | 14,403 | 3,556 | MIT | 2022-05-20T14:08:34 | 2019-04-02T20:19:54 | Python | UTF-8 | Python | false | false | 4,036 | py | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for interrupting tests with Control-C.
"""
from __future__ import absolute_import, division
from twisted.python.compat import NativeStringIO
from twisted.trial import unittest
from twisted.trial import reporter, runner
class TrialTest(unittest.SynchronousTestCase):
def setUp(self):
self.output = NativeStringIO()
self.reporter = reporter.TestResult()
self.loader = runner.TestLoader()
class InterruptInTestTests(TrialTest):
class InterruptedTest(unittest.TestCase):
def test_02_raiseInterrupt(self):
raise KeyboardInterrupt
def test_01_doNothing(self):
pass
def test_03_doNothing(self):
InterruptInTestTests.test_03_doNothing_run = True
def setUp(self):
super(InterruptInTestTests, self).setUp()
self.suite = self.loader.loadClass(InterruptInTestTests.InterruptedTest)
InterruptInTestTests.test_03_doNothing_run = None
def test_setUpOK(self):
self.assertEqual(3, self.suite.countTestCases())
self.assertEqual(0, self.reporter.testsRun)
self.assertFalse(self.reporter.shouldStop)
def test_interruptInTest(self):
runner.TrialSuite([self.suite]).run(self.reporter)
self.assertTrue(self.reporter.shouldStop)
self.assertEqual(2, self.reporter.testsRun)
self.assertFalse(InterruptInTestTests.test_03_doNothing_run,
"test_03_doNothing ran.")
class InterruptInSetUpTests(TrialTest):
testsRun = 0
class InterruptedTest(unittest.TestCase):
def setUp(self):
if InterruptInSetUpTests.testsRun > 0:
raise KeyboardInterrupt
def test_01(self):
InterruptInSetUpTests.testsRun += 1
def test_02(self):
InterruptInSetUpTests.testsRun += 1
InterruptInSetUpTests.test_02_run = True
def setUp(self):
super(InterruptInSetUpTests, self).setUp()
self.suite = self.loader.loadClass(
InterruptInSetUpTests.InterruptedTest)
InterruptInSetUpTests.test_02_run = False
InterruptInSetUpTests.testsRun = 0
def test_setUpOK(self):
self.assertEqual(0, InterruptInSetUpTests.testsRun)
self.assertEqual(2, self.suite.countTestCases())
self.assertEqual(0, self.reporter.testsRun)
self.assertFalse(self.reporter.shouldStop)
def test_interruptInSetUp(self):
runner.TrialSuite([self.suite]).run(self.reporter)
self.assertTrue(self.reporter.shouldStop)
self.assertEqual(2, self.reporter.testsRun)
self.assertFalse(InterruptInSetUpTests.test_02_run,
"test_02 ran")
class InterruptInTearDownTests(TrialTest):
testsRun = 0
class InterruptedTest(unittest.TestCase):
def tearDown(self):
if InterruptInTearDownTests.testsRun > 0:
raise KeyboardInterrupt
def test_01(self):
InterruptInTearDownTests.testsRun += 1
def test_02(self):
InterruptInTearDownTests.testsRun += 1
InterruptInTearDownTests.test_02_run = True
def setUp(self):
super(InterruptInTearDownTests, self).setUp()
self.suite = self.loader.loadClass(
InterruptInTearDownTests.InterruptedTest)
InterruptInTearDownTests.testsRun = 0
InterruptInTearDownTests.test_02_run = False
def test_setUpOK(self):
self.assertEqual(0, InterruptInTearDownTests.testsRun)
self.assertEqual(2, self.suite.countTestCases())
self.assertEqual(0, self.reporter.testsRun)
self.assertFalse(self.reporter.shouldStop)
def test_interruptInTearDown(self):
runner.TrialSuite([self.suite]).run(self.reporter)
self.assertEqual(1, self.reporter.testsRun)
self.assertTrue(self.reporter.shouldStop)
self.assertFalse(InterruptInTearDownTests.test_02_run,
"test_02 ran")
| [
"[email protected]"
] | |
0f31bb30e71eb5a4692377586a4d3f448740db5f | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/rdbms/azure-mgmt-rdbms/azure/mgmt/rdbms/mariadb/operations/_private_endpoint_connections_operations.py | a9985f5e8097e13ef7a5578ee99c9245b436b671 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 30,299 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PrivateEndpointConnectionsOperations(object):
"""PrivateEndpointConnectionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.rdbms.mariadb.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
server_name, # type: str
private_endpoint_connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.PrivateEndpointConnection"
"""Gets a private endpoint connection.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.rdbms.mariadb.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMariaDB/servers/{serverName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
server_name, # type: str
private_endpoint_connection_name, # type: str
parameters, # type: "_models.PrivateEndpointConnection"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.PrivateEndpointConnection"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.PrivateEndpointConnection"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PrivateEndpointConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMariaDB/servers/{serverName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
server_name, # type: str
private_endpoint_connection_name, # type: str
parameters, # type: "_models.PrivateEndpointConnection"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.PrivateEndpointConnection"]
"""Approve or reject a private endpoint connection with a given name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param private_endpoint_connection_name:
:type private_endpoint_connection_name: str
:param parameters:
:type parameters: ~azure.mgmt.rdbms.mariadb.models.PrivateEndpointConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.rdbms.mariadb.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
server_name=server_name,
private_endpoint_connection_name=private_endpoint_connection_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMariaDB/servers/{serverName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
server_name, # type: str
private_endpoint_connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMariaDB/servers/{serverName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
server_name, # type: str
private_endpoint_connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a private endpoint connection with a given name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param private_endpoint_connection_name:
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
server_name=server_name,
private_endpoint_connection_name=private_endpoint_connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMariaDB/servers/{serverName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
server_name, # type: str
private_endpoint_connection_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.PrivateEndpointConnection"
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMariaDB/servers/{serverName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
server_name, # type: str
private_endpoint_connection_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.PrivateEndpointConnection"]
"""Updates tags on private endpoint connection.
Updates private endpoint connection with the specified tags.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param private_endpoint_connection_name:
:type private_endpoint_connection_name: str
:param parameters: Parameters supplied to the Update private endpoint connection Tags
operation.
:type parameters: ~azure.mgmt.rdbms.mariadb.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.rdbms.mariadb.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
server_name=server_name,
private_endpoint_connection_name=private_endpoint_connection_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMariaDB/servers/{serverName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
def list_by_server(
self,
resource_group_name, # type: str
server_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PrivateEndpointConnectionListResult"]
"""Gets all private endpoint connections on a server.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateEndpointConnectionListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.rdbms.mariadb.models.PrivateEndpointConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_server.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PrivateEndpointConnectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_server.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMariaDB/servers/{serverName}/privateEndpointConnections'} # type: ignore
| [
"[email protected]"
] | |
44735efc8ba871e2f2e3ca0ced6963479ab46e19 | ea4567b4388ea97c8ca718d9e331dc796439ee44 | /exercise_learn/new_selenium_project/util/browser_driver_test.py | af46ab09ff714df86c85b89525f0d5733303ef53 | [] | no_license | Kingwolf9527/python_knowledge | ace65470ec706cae195b228b8e8d6ca8db574db8 | 1ccb3a788c172f3122a7c119d0607aa90934e59b | refs/heads/master | 2020-12-04T06:12:49.809020 | 2020-02-10T18:22:36 | 2020-02-10T18:22:44 | 231,647,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,783 | py | # ! /usr/bin/env python
# - * - coding:utf-8 - * -
# __author__ : KingWolf
# createtime : 2019/11/12 3:21
import os
from selenium import webdriver
from util.read_config import Read_Config
from util.common_log import Common_Logs
#实例化logger
log_name = Common_Logs(logger='browser_driver')
logger = log_name.get_logger()
class WebdriverBrowser(object):
def __init__(self,selection,key):
"""
打开浏览器
:param selection:
:param key:
:return:
"""
self.browser = Read_Config().get_value(selection,key)
if self.browser == 'chrome':
"""
谷歌浏览器的设置
"""
#设置user-data-dir的路径
newOptions = webdriver.ChromeOptions()
newOptions.add_argument(r"user-data-dir=F:\data_profile")
#设置谷歌浏览器的驱动路径
driverPath = os.path.dirname(os.path.dirname(__file__)) + '/browser_driver/chromedriver.exe'
self.driver = webdriver.Chrome(executable_path=driverPath,options=newOptions)
logger.info('-----------------open the browser:Chrome--------------------')
elif self.browser == 'firefox':
"""
火狐浏览器的设置
"""
# #设置火狐浏览器驱动路径
driverPath = os.path.dirname(os.path.dirname(__file__)) + '/browser_driver/geckodriver.exe'
self.driver = webdriver.Firefox(executable_path=driverPath)
logger.info('-----------------open the browser:Firefox--------------------')
else:
"""
edge浏览器的设置
"""
# #设置edge浏览器驱动路径
driverPath = os.path.dirname(os.path.dirname(__file__)) + '/browser_driver/MicrosoftWebDriver.exe'
self.driver = webdriver.Edge(executable_path=driverPath)
logger.info('-----------------open the browser:Edge--------------------')
def getDriver(self):
"""
返回driver
:return:
"""
return self.driver
def getUrl(self,selection,key):
"""
输入url地址
:param selection:
:param key:
:return:
"""
self.registerUrl = Read_Config().get_value(selection,key)
self.getDriver().get(self.registerUrl)
logger.info('---------------------open the url: %s -----------------------' %self.registerUrl)
self.getDriver().implicitly_wait(10)
self.getDriver().maximize_window()
if __name__ == '__main__':
dd = WebdriverBrowser('Browser','chrome_browser')
dd.getUrl('Register_url','url')
| [
"[email protected]"
] | |
8e82a7f954f34fe2899eb5500cb51358b6154c4b | 747eeeed1056b69a8bde6364ee9bf266523f19e5 | /Project/My solutions/12.py | 83d4817a7abb86b7b2219a8886f0815ffcdfb50e | [] | no_license | LittleAndroidBunny/Python_Cheatsheet_Nohar_Batit | d18a77d455474834da99c11e763beea598947f7c | a53f5e3a635bb47012fceb50efd43ad124eff180 | refs/heads/main | 2023-06-10T10:40:54.746084 | 2021-07-03T23:25:17 | 2021-07-03T23:25:17 | 382,595,063 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,522 | py | # ######################
# ## Nohar_Batit ###
# ## 315572941 ###
# ######################
# ################################
import random
random.seed(1)
import pylab
import matplotlib.pyplot as plt
from scipy import stats
# Question 1
# function f1 gets a natural number and returns a tuple of the biggest divider
def f1(a):
result = []
if a < 0:
return "please insert a number higher than 0"
highest_divider = 2
lowest_positive = 1
if type(a / 2) is float:
highest_divider = None
for i in range(2, a):
if a % i == 0:
highest_divider = i
for j in range(1, a+1):
if j != 1 and a % j == 0:
if lowest_positive < j and lowest_positive == 1:
lowest_positive = j
if lowest_positive == 1:
lowest_positive = None
result.append(highest_divider)
result.append(lowest_positive)
result = tuple(result)
return result
input_1 = int(input("Enter a positive int number:\n"))
print("Question 1")
print(f1(input_1))
# b
comp1 = "O(n)"
print("The complexity of the code is:", comp1)
list1 = [1, 3, 4, 5, 9, 9]
list2 = [1, 2, 3, 4, 5, 0]
list3 = [4, 5, 6, 6, 7, -8, 9]
def f2(l1, l2, l3):
list_new = []
list_all = []
for i in l1:
if i in l2 + l3:
list_new.append(i)
for j in l2:
if j in l3:
if j not in l1:
list_new.append(j)
for k in list_new:
if k not in list_all:
list_all.append(k)
return list_all
print(f2(list1, list2, list3))
# Question 3
# function f3 is a recursive function that gets a number bigger than 1 and finds An = 2An-1 - 3An-2
def f3(n):
if n == 1:
return 1
if n == 2:
return 4
if n < 1:
return "Enter an n bigger than 1"
else:
return 2*f3(n-1) - 3*f3(n-2)
print()
print("Question 3")
while True:
print("Please enter an integer n biggest than 0:")
num = int(input())
if num > 0:
break
print(f"The An is: (by An = 2An-1 - 3An-2)\n{f3(num)}")
# Question 4
# function f4 gets a list of numbers and returns a dictonary of numbers
# organized in keys buy the first number(from left) the keys in the dictonary are (0-9)
dictonary = {}
def find(f):
temp = abs(f)
if temp > 10:
return round(find(temp / 10))
else:
return round(temp)
def f4(list_of_numbers):
for i in range(10):
dictonary[i] = []
for number in list_of_numbers:
temp = find(number)
for i in range(10):
if i == temp:
dictonary[i].append(number)
return dictonary
types = []
print()
print("Question 4")
print(f4([12, -121, 1, 1111, 22.2, 2.2, 1234314.1, 0, 0]))
# # showing the keys are from int type
# for k in dictonary.keys():
# types.append(type(k))
# print(types)
# Question 5
# class c5 checks if the right amount of palafel balls(between 2-7) used and if there is a sauce or not
class c5:
def __init__(self, Nb, s):
self.Nb = Nb
self.s = bool(s)
assert (2 <= Nb <= 7), "This is not the right amount of falafel balls, min 2, max 7"
assert (s == True) or (s == False), "s should be True or False"
# print function prints number of balls and if there is a spicy sauce
def __str__(self):
if self.s:
return f"Mana: {self.Nb} balls and has spicy sauce"
else:
return f"Mana: {self.Nb} balls and has no spicy sauce"
# 5.bet
# add function that add 2 manot falafel and checks if its possible
# if its possible it makes the mix and checks if there was a sauce
# if on 1 of the manot was a sauce than the mix will have a sauce
# if neither were with the sauce the mix wont have a sauce
def __add__(self, other):
if self.Nb + other.Nb < 8:
self.Nb = self.Nb + other.Nb
else:
return "Cant add the falafels cause too many balls"
if self.s and other.s:
self.s = other.s
return f"Mana after merge: {self.Nb} balls and has spicy sauce"
elif self.s and not other.s:
other.s = self.s
return f"Mana after merge: {self.Nb} balls and has spicy sauce"
elif not self.s and other.s:
self.s = other.s
return f"Mana after merge: {self.Nb} balls and has spicy sauce"
else:
self.s = self.s
return f"Mana after merge: {self.Nb} balls and has no spicy sauce"
man = c5(2, True)
man2 = c5(5, False)
print()
print("Question 5.alef")
print("1st", man)
print("2nd", man2)
print()
print("Question 5.bet")
print(man+man2)
def f6(N):
counter = 0
prob = 0
for i in range(N):
for j in range(10):
dice = random.randrange(1, 7)
round(dice)
if dice == 6:
counter += 1
if counter == 2:
prob += 1
counter = 0
return prob/N
print(f6(1000000))
# Question 7
# function f7a get a list of tuples and returns 3 random tuples from the list
# with using random.sample
def f7a(l):
random.seed(2)
r_list = random.sample(l, 3)
return r_list
# print(f7a([(1,2,1,1),(2,2,2,2),(3,3,3,3),(4,4,4,4)]))
#Question 7.bet
def euclidean_dist(vec1, vec2):
dist = 0
for k in range(len(vec1)):
dist += (vec1[k] - vec2[k]) ** 2
return dist ** 0.5
def f7b(l1, l2):
first_vector = []
sec_vector = []
third_vector = []
for i in l1:
min_euc = min(euclidean_dist(i, l2[0]), euclidean_dist(i, l2[1]), euclidean_dist(i, l2[2]))
if euclidean_dist(i, l2[0]) == min_euc:
first_vector.append(i)
elif euclidean_dist(i, l2[1]) == min_euc:
sec_vector.append(i)
else:
third_vector.append(i)
return [first_vector, sec_vector, third_vector]
def f7c(l):
def compute_centroid(list):
vals = pylab.array([0] * len(list[0]))
for vec in list: # compute mean
vals += vec
return tuple(vals / len(list))
return [compute_centroid(l[0]), compute_centroid(l[1]), compute_centroid(l[2])]
def f7d(l):
initial_centroids = f7a(l)
clusters = f7b(l, initial_centroids)
new_centroids = f7c(clusters)
while True:
initial_centroids = new_centroids
clusters = f7b(l, new_centroids)
new_centroids = f7c(clusters)
if initial_centroids == new_centroids:
break
return clusters
# Question 8
# function f8 gets a list and sorts it from the highest to lowest and returns it
def f8(l):
flag = False
while not flag:
flag = True
for n in range(len(l)):
for k in range(n, 0, -1):
if l[n] > l[k]:
temp = l[k]
l[k] = l[n]
l[n] = temp
flag = False
if l[0] < l[1]:
temp = l[0]
del l[0]
l.append(temp)
flag = False
return l
comp2 = "O(n**3)"
print()
print("Question 8")
print(f8([12, 4, 5, 122, 1, 13, 0]))
print("The complexity is:", comp2)
# Question 9
# the function f9 makes a linear regression
def f9(tau, alpha):
slope, intercept, r, p, std_err = stats.linregress(tau, alpha)
def my_func(x):
return slope * x + intercept
model = list(map(my_func, tau))
plt.scatter(tau, alpha)
plt.plot(tau, model)
plt.show()
return slope
print()
print("Question 9")
f9([1, 2, 3, 4, 5], [6, 7, 8, 9, 10])
| [
"[email protected]"
] | |
ef495dc49c8aacb3b2af95d4c40ebb6a453fa1ad | c6e2e537a6bf2a2e009a64eef76954dae30ae214 | /tests/test_series_replacement.py | e36974b49ae8808c00c3818d5b1560e71d388bfc | [
"Unlicense"
] | permissive | mb5/tvnamer | 46d0eb0ae8b4d8e72656d2dfc239555e91b2bfd2 | ce4f7374ff8abbbe137aa7c43ed0ba0fe0f2f755 | refs/heads/master | 2021-01-18T02:49:44.110873 | 2012-12-13T09:21:39 | 2012-12-13T09:21:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,085 | py | #!/usr/bin/env python
"""Tests custom replacements on input/output files
"""
from functional_runner import run_tvnamer, verify_out_data
from nose.plugins.attrib import attr
@attr("functional")
def test_replace_input():
"""Tests replacing strings in input files
"""
out_data = run_tvnamer(
with_files = ['scruuuuuubs.s01e01.avi'],
with_config = """
{
"input_series_replacements": {
"scru*bs": "scrubs"},
"always_rename": true,
"select_first": true
}
""")
expected_files = ['Scrubs - [01x01] - My First Day.avi']
verify_out_data(out_data, expected_files)
@attr("functional")
def test_replace_output():
"""Tests replacing strings in input files
"""
out_data = run_tvnamer(
with_files = ['Scrubs.s01e01.avi'],
with_config = """
{
"output_series_replacements": {
"Scrubs": "Replacement Series Name"},
"always_rename": true,
"select_first": true
}
""")
expected_files = ['Replacement Series Name - [01x01] - My First Day.avi']
verify_out_data(out_data, expected_files)
| [
"[email protected]"
] | |
d831ff7bbac8a88c716b003523818b763f425495 | 4978ce56457ac4c64075b2d70663c74cf4dc3896 | /demoVisualizeDataFrame/__init__.py | 2569839deb53775254f19d982557de5c755e1a7b | [
"MIT"
] | permissive | listenzcc/data_visualize | 90700f4ca9f22e351363b3c91b7bd30beac136a2 | 7f0867e19e3ae88041efb24c79789b1bc4b46f40 | refs/heads/master | 2023-03-13T20:53:01.126166 | 2021-03-20T02:49:14 | 2021-03-20T02:49:14 | 315,854,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 966 | py | # File: __init__.py
# Aim: economyZone package startup script
import configparser
import logging
import os
import sys
import pandas as pd
def beside(name, this=__file__):
# Get path of [name] beside __file__
return os.path.join(os.path.dirname(this), name)
config = configparser.ConfigParser()
config.read(beside('setting.ini'))
logger = logging.Logger('demoVisualizeDataFrame', level=logging.DEBUG)
for handler, formatter in zip([logging.StreamHandler(sys.stdout),
logging.FileHandler('logging.log')],
[logging.Formatter('%(filename)s %(levelname)s %(message)s'),
logging.Formatter('%(asctime)s %(name)s %(filename)s %(levelname)s %(message)s')]):
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
logger.info('info')
logger.debug('debug')
logger.warning('warning')
logger.error('error')
logger.fatal('fatal')
| [
"[email protected]"
] | |
048c2c28d81f61ade6bb91e3c6025ccdb74bd471 | 1ab99223dfef768cbead2813d039c66a627024be | /api/src/opentrons/drivers/temp_deck/__init__.py | 91a9ef7bba1f13dc509decc0dfe380a038bf462a | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | fakela/opentrons | f399b8a9444ea557072a00477d0c8176e46e433e | 7552a1cbe6d06131bd45241b027f27e11428e100 | refs/heads/master | 2022-11-21T01:32:40.084185 | 2020-06-29T18:27:00 | 2020-06-29T18:27:00 | 280,266,622 | 0 | 0 | Apache-2.0 | 2020-07-16T21:55:27 | 2020-07-16T21:55:26 | null | UTF-8 | Python | false | false | 128 | py | from opentrons.drivers.temp_deck.driver import TempDeck, SimulatingDriver
__all__ = [
'TempDeck',
'SimulatingDriver'
]
| [
"[email protected]"
] | |
11b05574742fa3d545e177dce442438a7c969a74 | eeb7e70b0b68decbdcb32682351e54e0be99a5b0 | /kaggle/python_files/sample360.py | 295bae3b2767e1ea485f2b260bb28b19a14fd260 | [] | no_license | SocioProphet/CodeGraph | 8bafd7f03204f20da8f54ab23b04f3844e6d24de | 215ac4d16d21d07e87964fe9a97a5bf36f4c7d64 | refs/heads/master | 2023-02-16T02:51:27.791886 | 2021-01-15T07:00:41 | 2021-01-15T07:00:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,151 | py | #!/usr/bin/env python
# coding: utf-8
# # Loading Libraries
# In[ ]:
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.metrics import log_loss
from sklearn.model_selection import StratifiedKFold
import gc
import os
import matplotlib.pyplot as plt
import seaborn as sns
import lightgbm as lgb
from catboost import Pool, CatBoostClassifier
import itertools
import pickle, gzip
import glob
from sklearn.preprocessing import StandardScaler
# # Extracting Features from train set
# In[ ]:
gc.enable()
train = pd.read_csv('../input/training_set.csv')
train['flux_ratio_sq'] = np.power(train['flux'] / train['flux_err'], 2.0)
train['flux_by_flux_ratio_sq'] = train['flux'] * train['flux_ratio_sq']
aggs = {
'mjd': ['min', 'max', 'size'],
'passband': ['min', 'max', 'mean', 'median', 'std'],
'flux': ['min', 'max', 'mean', 'median', 'std','skew'],
'flux_err': ['min', 'max', 'mean', 'median', 'std','skew'],
'detected': ['mean'],
'flux_ratio_sq':['sum','skew'],
'flux_by_flux_ratio_sq':['sum','skew'],
}
agg_train = train.groupby('object_id').agg(aggs)
new_columns = [
k + '_' + agg for k in aggs.keys() for agg in aggs[k]
]
agg_train.columns = new_columns
agg_train['mjd_diff'] = agg_train['mjd_max'] - agg_train['mjd_min']
agg_train['flux_diff'] = agg_train['flux_max'] - agg_train['flux_min']
agg_train['flux_dif2'] = (agg_train['flux_max'] - agg_train['flux_min']) / agg_train['flux_mean']
agg_train['flux_w_mean'] = agg_train['flux_by_flux_ratio_sq_sum'] / agg_train['flux_ratio_sq_sum']
agg_train['flux_dif3'] = (agg_train['flux_max'] - agg_train['flux_min']) / agg_train['flux_w_mean']
del agg_train['mjd_max'], agg_train['mjd_min']
agg_train.head()
del train
gc.collect()
# # Merging extracted features with meta data
# In[ ]:
meta_train = pd.read_csv('../input/training_set_metadata.csv')
meta_train.head()
full_train = agg_train.reset_index().merge(
right=meta_train,
how='outer',
on='object_id'
)
if 'target' in full_train:
y = full_train['target']
del full_train['target']
classes = sorted(y.unique())
# Taken from Giba's topic : https://www.kaggle.com/titericz
# https://www.kaggle.com/c/PLAsTiCC-2018/discussion/67194
# with Kyle Boone's post https://www.kaggle.com/kyleboone
class_weight = {
c: 1 for c in classes
}
for c in [64, 15]:
class_weight[c] = 2
print('Unique classes : ', classes)
# In[ ]:
if 'object_id' in full_train:
oof_df = full_train[['object_id']]
del full_train['object_id'], full_train['distmod'], full_train['hostgal_specz']
del full_train['ra'], full_train['decl'], full_train['gal_l'],full_train['gal_b'],full_train['ddf']
train_mean = full_train.mean(axis=0)
full_train.fillna(train_mean, inplace=True)
folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=1)
# # Standard Scaling the input (imp.)
# In[ ]:
full_train_new = full_train.copy()
ss = StandardScaler()
full_train_ss = ss.fit_transform(full_train_new)
# # Deep Learning Begins...
# In[ ]:
from keras.models import Sequential
from keras.layers import Dense,BatchNormalization,Dropout
from keras.callbacks import ReduceLROnPlateau,ModelCheckpoint
from keras.utils import to_categorical
import tensorflow as tf
from keras import backend as K
import keras
from keras import regularizers
from collections import Counter
from sklearn.metrics import confusion_matrix
# In[ ]:
# https://www.kaggle.com/c/PLAsTiCC-2018/discussion/69795
def mywloss(y_true,y_pred):
yc=tf.clip_by_value(y_pred,1e-15,1-1e-15)
loss=-(tf.reduce_mean(tf.reduce_mean(y_true*tf.log(yc),axis=0)/wtable))
return loss
# In[ ]:
def multi_weighted_logloss(y_ohe, y_p):
"""
@author olivier https://www.kaggle.com/ogrellier
multi logloss for PLAsTiCC challenge
"""
classes = [6, 15, 16, 42, 52, 53, 62, 64, 65, 67, 88, 90, 92, 95]
class_weight = {6: 1, 15: 2, 16: 1, 42: 1, 52: 1, 53: 1, 62: 1, 64: 2, 65: 1, 67: 1, 88: 1, 90: 1, 92: 1, 95: 1}
# Normalize rows and limit y_preds to 1e-15, 1-1e-15
y_p = np.clip(a=y_p, a_min=1e-15, a_max=1-1e-15)
# Transform to log
y_p_log = np.log(y_p)
# Get the log for ones, .values is used to drop the index of DataFrames
# Exclude class 99 for now, since there is no class99 in the training set
# we gave a special process for that class
y_log_ones = np.sum(y_ohe * y_p_log, axis=0)
# Get the number of positives for each class
nb_pos = y_ohe.sum(axis=0).astype(float)
# Weight average and divide by the number of positives
class_arr = np.array([class_weight[k] for k in sorted(class_weight.keys())])
y_w = y_log_ones * class_arr / nb_pos
loss = - np.sum(y_w) / np.sum(class_arr)
return loss
# # Defining simple model in keras
# In[ ]:
K.clear_session()
def build_model(dropout_rate=0.25,activation='relu'):
start_neurons = 512
# create model
model = Sequential()
model.add(Dense(start_neurons, input_dim=full_train_ss.shape[1], activation=activation))
model.add(BatchNormalization())
model.add(Dropout(dropout_rate))
model.add(Dense(start_neurons//2,activation=activation))
model.add(BatchNormalization())
model.add(Dropout(dropout_rate))
model.add(Dense(start_neurons//4,activation=activation))
model.add(BatchNormalization())
model.add(Dropout(dropout_rate))
model.add(Dense(start_neurons//8,activation=activation))
model.add(BatchNormalization())
model.add(Dropout(dropout_rate/2))
model.add(Dense(len(classes), activation='softmax'))
return model
# In[ ]:
unique_y = np.unique(y)
class_map = dict()
for i,val in enumerate(unique_y):
class_map[val] = i
y_map = np.zeros((y.shape[0],))
y_map = np.array([class_map[val] for val in y])
y_categorical = to_categorical(y_map)
# # Calculating the class weights
# In[ ]:
y_count = Counter(y_map)
wtable = np.zeros((len(unique_y),))
for i in range(len(unique_y)):
wtable[i] = y_count[i]/y_map.shape[0]
# In[ ]:
def plot_loss_acc(history):
plt.plot(history.history['loss'][1:])
plt.plot(history.history['val_loss'][1:])
plt.title('model loss')
plt.ylabel('val_loss')
plt.xlabel('epoch')
plt.legend(['train','Validation'], loc='upper left')
plt.show()
plt.plot(history.history['acc'][1:])
plt.plot(history.history['val_acc'][1:])
plt.title('model Accuracy')
plt.ylabel('val_acc')
plt.xlabel('epoch')
plt.legend(['train','Validation'], loc='upper left')
plt.show()
# In[ ]:
clfs = []
oof_preds = np.zeros((len(full_train_ss), len(classes)))
epochs = 600
batch_size = 100
for fold_, (trn_, val_) in enumerate(folds.split(y_map, y_map)):
checkPoint = ModelCheckpoint("./keras.model",monitor='val_loss',mode = 'min', save_best_only=True, verbose=0)
x_train, y_train = full_train_ss[trn_], y_categorical[trn_]
x_valid, y_valid = full_train_ss[val_], y_categorical[val_]
model = build_model(dropout_rate=0.5,activation='tanh')
model.compile(loss=mywloss, optimizer='adam', metrics=['accuracy'])
history = model.fit(x_train, y_train,
validation_data=[x_valid, y_valid],
epochs=epochs,
batch_size=batch_size,shuffle=True,verbose=0,callbacks=[checkPoint])
plot_loss_acc(history)
print('Loading Best Model')
model.load_weights('./keras.model')
# # Get predicted probabilities for each class
oof_preds[val_, :] = model.predict_proba(x_valid,batch_size=batch_size)
print(multi_weighted_logloss(y_valid, model.predict_proba(x_valid,batch_size=batch_size)))
clfs.append(model)
print('MULTI WEIGHTED LOG LOSS : %.5f ' % multi_weighted_logloss(y_categorical,oof_preds))
# In[ ]:
# http://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
# In[ ]:
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_map, np.argmax(oof_preds,axis=-1))
np.set_printoptions(precision=2)
# In[ ]:
sample_sub = pd.read_csv('../input/sample_submission.csv')
class_names = list(sample_sub.columns[1:-1])
del sample_sub;gc.collect()
# In[ ]:
# Plot non-normalized confusion matrix
plt.figure(figsize=(12,12))
foo = plot_confusion_matrix(cnf_matrix, classes=class_names,normalize=True,
title='Confusion matrix')
# # Test Set Predictions
# In[ ]:
meta_test = pd.read_csv('../input/test_set_metadata.csv')
import time
start = time.time()
chunks = 5000000
for i_c, df in enumerate(pd.read_csv('../input/test_set.csv', chunksize=chunks, iterator=True)):
df['flux_ratio_sq'] = np.power(df['flux'] / df['flux_err'], 2.0)
df['flux_by_flux_ratio_sq'] = df['flux'] * df['flux_ratio_sq']
# Group by object id
agg_test = df.groupby('object_id').agg(aggs)
agg_test.columns = new_columns
agg_test['mjd_diff'] = agg_test['mjd_max'] - agg_test['mjd_min']
agg_test['flux_diff'] = agg_test['flux_max'] - agg_test['flux_min']
agg_test['flux_dif2'] = (agg_test['flux_max'] - agg_test['flux_min']) / agg_test['flux_mean']
agg_test['flux_w_mean'] = agg_test['flux_by_flux_ratio_sq_sum'] / agg_test['flux_ratio_sq_sum']
agg_test['flux_dif3'] = (agg_test['flux_max'] - agg_test['flux_min']) / agg_test['flux_w_mean']
del agg_test['mjd_max'], agg_test['mjd_min']
# del df
# gc.collect()
# Merge with meta data
full_test = agg_test.reset_index().merge(
right=meta_test,
how='left',
on='object_id'
)
full_test[full_train.columns] = full_test[full_train.columns].fillna(train_mean)
full_test_ss = ss.transform(full_test[full_train.columns])
# Make predictions
preds = None
for clf in clfs:
if preds is None:
preds = clf.predict_proba(full_test_ss) / folds.n_splits
else:
preds += clf.predict_proba(full_test_ss) / folds.n_splits
# Compute preds_99 as the proba of class not being any of the others
# preds_99 = 0.1 gives 1.769
preds_99 = np.ones(preds.shape[0])
for i in range(preds.shape[1]):
preds_99 *= (1 - preds[:, i])
# Store predictions
preds_df = pd.DataFrame(preds, columns=class_names)
preds_df['object_id'] = full_test['object_id']
preds_df['class_99'] = 0.14 * preds_99 / np.mean(preds_99)
if i_c == 0:
preds_df.to_csv('predictions.csv', header=True, mode='a', index=False)
else:
preds_df.to_csv('predictions.csv', header=False, mode='a', index=False)
del agg_test, full_test, preds_df, preds
# print('done')
if (i_c + 1) % 10 == 0:
print('%15d done in %5.1f' % (chunks * (i_c + 1), (time.time() - start) / 60))
# In[ ]:
z = pd.read_csv('predictions.csv')
print(z.groupby('object_id').size().max())
print((z.groupby('object_id').size() > 1).sum())
z = z.groupby('object_id').mean()
z.to_csv('single_predictions.csv', index=True)
# In[ ]:
z.head()
# In[ ]:
| [
"[email protected]"
] | |
406acb5d812d6501c3f70fc0f287c9dc3e429502 | 8fd56e9b9dbc49c16b4a8afe1007f824183bb0ab | /Python_Stack/django/django_fundamentals/django_intro/Users/Users/urls.py | 26466e0c713ac1c17c9670e927a61dc1f74d8187 | [] | no_license | DiyarBarham/CodingDojo | b1cc7d7355f5fb139cb640168f78d6b7f91e372a | 0891e2c41ddbb9004eadfd2d54fe7f34d6d4ef58 | refs/heads/main | 2023-07-08T12:12:33.227932 | 2021-08-07T13:55:33 | 2021-08-07T13:55:33 | 363,878,740 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 726 | py | """Users URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path, include
urlpatterns = [
path('', include('usersapp.urls')),
]
| [
"[email protected]"
] | |
517f372040db391e899d7c91dcf383f4af6eecaf | 649bd422025e421d86025743eac324c9b882a2e8 | /exam/1_three-dimensional_atomic_system/dump/phasetrans/temp37_4500.py | 90986260c1c51da810729e60990270493504ca1e | [] | no_license | scheuclu/atom_class | 36ddee1f6a5995872e858add151c5942c109847c | 0c9a8c63d9b38898c1869fe8983126cef17662cd | refs/heads/master | 2021-01-21T10:52:28.448221 | 2017-03-07T23:04:41 | 2017-03-07T23:04:41 | 83,489,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68,874 | py | ITEM: TIMESTEP
4500
ITEM: NUMBER OF ATOMS
2048
ITEM: BOX BOUNDS pp pp pp
7.9433252788447817e-01 4.6405667472114288e+01
7.9433252788447817e-01 4.6405667472114288e+01
7.9433252788447817e-01 4.6405667472114288e+01
ITEM: ATOMS id type xs ys zs
8 1 0.129159 0.059669 0.0603151
35 1 0.0632288 0.124995 0.0640692
130 1 0.064921 0.0670279 0.123945
165 1 0.126784 0.125843 0.124487
4 1 0.00134786 0.0581556 0.0629357
3 1 0.0594568 -0.00460677 0.0572581
133 1 0.123166 0.0044247 0.122589
7 1 0.186536 -0.00123124 0.0614121
12 1 0.24413 0.063346 0.0638826
39 1 0.186169 0.127546 0.0572274
43 1 0.316503 0.127355 0.0590978
134 1 0.189842 0.0598842 0.127799
138 1 0.313744 0.0598441 0.127744
169 1 0.251183 0.120567 0.121349
10 1 0.308578 0.0658481 0.00481893
137 1 0.249323 0.00042414 0.126148
11 1 0.310825 -3.95117e-06 0.0591022
41 1 0.249622 0.131136 0.0011127
6 1 0.191913 0.0638135 -0.00226609
16 1 0.375722 0.0621824 0.0627748
47 1 0.441256 0.122467 0.0670647
142 1 0.442209 0.062994 0.127435
173 1 0.374823 0.122421 0.126462
20 1 0.506144 0.068054 0.0632277
177 1 0.503959 0.12396 0.12714
53 1 0.625201 0.129235 -0.00292208
24 1 0.622399 0.0637463 0.0591467
51 1 0.564494 0.128165 0.0566846
146 1 0.568512 0.0639075 0.124423
181 1 0.627225 0.129583 0.122279
18 1 0.56498 0.0648544 -0.0018147
19 1 0.564302 -0.000143704 0.0548712
149 1 0.628834 -0.000840041 0.121051
28 1 0.752331 0.0634961 0.0565945
55 1 0.688642 0.124331 0.0599345
59 1 0.808382 0.127956 0.0592053
150 1 0.687696 0.0596127 0.118352
154 1 0.812944 0.0653609 0.124058
185 1 0.751377 0.120774 0.125364
25 1 0.752723 0.000632403 -0.00526859
57 1 0.744901 0.125178 -0.00540449
26 1 0.811417 0.0623475 -0.00277066
22 1 0.688708 0.065802 -0.00590571
161 1 1.00051 0.124364 0.126498
129 1 1.00252 -0.000497102 0.123721
32 1 0.876094 0.0603559 0.0665776
63 1 0.934689 0.128127 0.0606916
158 1 0.936681 0.0647438 0.122761
189 1 0.873328 0.126411 0.128421
31 1 0.937564 -0.00271495 0.0592093
157 1 0.877607 0.00315911 0.122691
40 1 0.122496 0.18853 0.0631221
67 1 0.063316 0.251579 0.0613883
72 1 0.127736 0.31251 0.0608411
162 1 0.057726 0.18628 0.124783
194 1 0.0650747 0.318575 0.11719
197 1 0.124428 0.248504 0.123149
36 1 -0.00468323 0.187252 0.0569436
68 1 -0.00189591 0.316154 0.0597642
193 1 -0.00726445 0.249332 0.116615
34 1 0.0589009 0.188721 -0.000552223
69 1 0.122056 0.254206 0.00104114
44 1 0.257045 0.183962 0.0639674
71 1 0.186541 0.247044 0.0614926
75 1 0.313941 0.245417 0.0601854
76 1 0.24965 0.312119 0.0636957
166 1 0.186593 0.183021 0.120966
170 1 0.312762 0.180108 0.127547
198 1 0.189029 0.308283 0.124532
201 1 0.247659 0.241946 0.120859
202 1 0.311225 0.312044 0.125157
38 1 0.186522 0.188061 -0.00163101
48 1 0.382442 0.185468 0.0679347
79 1 0.439504 0.245742 0.0649326
80 1 0.376667 0.306695 0.0620525
174 1 0.443344 0.181516 0.127649
205 1 0.375699 0.247007 0.121875
206 1 0.435005 0.314768 0.127278
46 1 0.442403 0.185528 3.43653e-05
81 1 0.501892 0.252204 -8.4828e-05
52 1 0.503413 0.18704 0.0621523
84 1 0.506013 0.307478 0.0642593
209 1 0.500134 0.249802 0.126269
56 1 0.630406 0.191474 0.0580353
83 1 0.566577 0.249458 0.0642683
88 1 0.621582 0.312398 0.0627495
178 1 0.562194 0.189708 0.124944
210 1 0.567471 0.311897 0.127249
213 1 0.62917 0.244688 0.123325
85 1 0.626599 0.248716 0.00113432
60 1 0.748217 0.186072 0.0645964
87 1 0.691471 0.254747 0.0636609
91 1 0.809773 0.251784 0.0607009
92 1 0.751639 0.316386 0.0630132
182 1 0.688356 0.187002 0.123952
186 1 0.809756 0.190275 0.127067
214 1 0.687823 0.313135 0.129364
217 1 0.746647 0.248706 0.127462
218 1 0.815332 0.313806 0.125026
64 1 0.867605 0.186566 0.0655893
95 1 0.930998 0.250318 0.0580333
96 1 0.872282 0.314108 0.0584088
190 1 0.936878 0.186373 0.120869
221 1 0.872935 0.25032 0.125543
222 1 0.934782 0.310821 0.121326
94 1 0.938655 0.316454 -0.00235378
1027 1 0.0593636 0.501334 0.0633396
99 1 0.0610567 0.38172 0.0623664
104 1 0.128319 0.438734 0.0645588
226 1 0.0685798 0.437984 0.124481
229 1 0.128287 0.375416 0.122594
1025 1 -0.0018373 0.497516 0.00139232
1157 1 0.122633 0.504324 0.119101
101 1 0.121588 0.37444 0.00193096
1031 1 0.18864 0.501075 0.062442
1033 1 0.251598 0.50729 -0.000104967
103 1 0.189647 0.375136 0.057842
107 1 0.313516 0.37559 0.0634412
108 1 0.25666 0.443101 0.0592365
230 1 0.189626 0.438756 0.120347
233 1 0.24916 0.377688 0.122982
234 1 0.312234 0.434793 0.131274
1039 1 0.433129 0.503967 0.0605043
111 1 0.43745 0.369617 0.0630702
112 1 0.372629 0.433232 0.063768
237 1 0.374693 0.378153 0.128189
238 1 0.438858 0.437338 0.124462
241 1 0.498629 0.370437 0.124143
1037 1 0.371106 0.50389 0.00189131
109 1 0.374549 0.374079 0.00198331
1043 1 0.560827 0.502739 0.0641113
116 1 0.496594 0.439964 0.0636783
115 1 0.566482 0.374425 0.0620596
120 1 0.625374 0.435434 0.0615099
242 1 0.564345 0.435939 0.12453
245 1 0.628195 0.373562 0.123528
117 1 0.630175 0.369013 0.000596425
114 1 0.560304 0.43938 0.00339834
119 1 0.689786 0.373061 0.0590191
123 1 0.811087 0.377744 0.0610136
124 1 0.744463 0.436359 0.06328
246 1 0.685803 0.438494 0.129502
249 1 0.74961 0.379675 0.124449
250 1 0.811092 0.438435 0.125147
1051 1 0.811863 0.496046 0.0662183
122 1 0.810858 0.43576 -0.00194166
1049 1 0.7499 0.500751 0.00685747
121 1 0.748094 0.375254 -0.0028316
97 1 1.00113 0.381762 -0.000319735
225 1 1.00111 0.376178 0.126361
100 1 0.999103 0.44351 0.0673451
127 1 0.935722 0.375421 0.063144
128 1 0.873476 0.437415 0.0662656
253 1 0.874229 0.374081 0.123347
254 1 0.938942 0.436916 0.126384
1181 1 0.872777 0.498408 0.123489
1153 1 1.00169 0.50294 0.126416
125 1 0.871583 0.378593 0.00224439
131 1 0.0661122 0.00304313 0.186065
136 1 0.12495 0.0624388 0.186228
163 1 0.0640648 0.128357 0.182119
258 1 0.0626981 0.0619077 0.247783
264 1 0.126369 0.0675035 0.310884
291 1 0.0660735 0.131082 0.312491
293 1 0.125047 0.128815 0.246153
260 1 0.000425628 0.0653311 0.313011
265 1 0.247674 0.00499134 0.247832
140 1 0.252307 0.0608524 0.184715
167 1 0.18698 0.12375 0.185372
171 1 0.310133 0.123316 0.187802
262 1 0.186192 0.0632289 0.247128
266 1 0.31253 0.0616505 0.248698
268 1 0.249755 0.0643218 0.317341
295 1 0.190035 0.130258 0.315309
297 1 0.245487 0.124041 0.249439
299 1 0.31653 0.125252 0.312611
269 1 0.374932 -0.000125256 0.253871
273 1 0.505477 -0.000178799 0.254357
144 1 0.377209 0.0628027 0.18647
175 1 0.446496 0.120393 0.190716
270 1 0.436477 0.0559138 0.248798
272 1 0.370864 0.0596871 0.315911
301 1 0.374205 0.122391 0.248648
303 1 0.438391 0.120895 0.310988
271 1 0.440818 -0.00135485 0.315411
276 1 0.497751 0.0603155 0.311848
148 1 0.51246 0.0617503 0.192672
305 1 0.503848 0.122449 0.255284
152 1 0.625433 0.0637309 0.186307
179 1 0.569086 0.125652 0.191324
274 1 0.573789 0.0632045 0.254197
280 1 0.628157 0.0624131 0.318731
307 1 0.567638 0.124122 0.31663
309 1 0.631322 0.124508 0.253863
279 1 0.6857 -0.0031882 0.312665
281 1 0.751638 -0.00232873 0.251622
156 1 0.750097 0.0665974 0.18677
183 1 0.682059 0.121499 0.191037
187 1 0.812542 0.130777 0.190322
278 1 0.687922 0.0613312 0.251059
282 1 0.813151 0.0640362 0.252592
284 1 0.753307 0.0610154 0.31272
311 1 0.692674 0.127067 0.315886
313 1 0.74786 0.125048 0.246894
315 1 0.81035 0.125439 0.314631
287 1 0.940758 0.0017618 0.308459
257 1 1.001 0.00333469 0.247542
132 1 1.00704 0.063259 0.183594
289 1 1.00415 0.123981 0.249764
160 1 0.873688 0.0659134 0.18521
191 1 0.941853 0.125725 0.186528
286 1 0.941486 0.0668212 0.249749
288 1 0.874542 0.0650163 0.314032
317 1 0.876779 0.124694 0.253668
319 1 0.935631 0.129568 0.312321
285 1 0.871854 -0.000236506 0.253783
168 1 0.123191 0.188879 0.186155
195 1 0.0550298 0.255334 0.176382
200 1 0.125925 0.311212 0.18768
290 1 0.0634768 0.197493 0.247888
296 1 0.12902 0.184952 0.312797
322 1 0.0632055 0.315691 0.245971
323 1 0.0603577 0.254717 0.314991
325 1 0.124839 0.256836 0.254947
328 1 0.127384 0.316214 0.313321
196 1 -0.00341254 0.314909 0.186535
324 1 -0.000127462 0.316287 0.3048
164 1 0.000108799 0.187723 0.184409
172 1 0.246811 0.183529 0.184273
199 1 0.183389 0.249565 0.186313
203 1 0.313654 0.2453 0.185767
204 1 0.245785 0.308776 0.185681
294 1 0.189653 0.192827 0.251433
298 1 0.311231 0.181688 0.245644
300 1 0.251728 0.189498 0.314382
326 1 0.188268 0.315667 0.253455
327 1 0.187192 0.252348 0.311377
329 1 0.251973 0.250684 0.249138
330 1 0.315646 0.311069 0.253852
331 1 0.312809 0.250095 0.313916
332 1 0.250251 0.311158 0.316346
176 1 0.373414 0.181182 0.190138
207 1 0.437562 0.245154 0.183413
208 1 0.378877 0.311088 0.191004
302 1 0.442431 0.186712 0.252219
304 1 0.370183 0.189132 0.314299
333 1 0.3738 0.252594 0.252518
334 1 0.440938 0.306002 0.252349
335 1 0.436528 0.245554 0.313019
336 1 0.374714 0.311237 0.314164
340 1 0.503453 0.312511 0.314857
180 1 0.505751 0.183376 0.18623
337 1 0.49914 0.247241 0.250586
308 1 0.503764 0.190083 0.311267
212 1 0.502245 0.306527 0.188494
184 1 0.625416 0.187559 0.185877
211 1 0.563796 0.24747 0.18639
216 1 0.628127 0.30778 0.189131
306 1 0.566948 0.189865 0.250897
312 1 0.627033 0.187076 0.312157
338 1 0.56235 0.31105 0.247585
339 1 0.56346 0.254043 0.314636
341 1 0.626765 0.251339 0.252584
344 1 0.629302 0.312618 0.31496
188 1 0.742933 0.183921 0.188738
215 1 0.68675 0.248652 0.187622
219 1 0.810532 0.248981 0.191527
220 1 0.749132 0.313481 0.188156
310 1 0.6849 0.185336 0.248297
314 1 0.813205 0.18842 0.257274
316 1 0.751433 0.190992 0.316711
342 1 0.688537 0.313932 0.249475
343 1 0.689508 0.251716 0.314895
345 1 0.747173 0.248634 0.250586
346 1 0.811394 0.311736 0.248324
347 1 0.810981 0.251769 0.317455
348 1 0.750568 0.312324 0.311747
292 1 1.00328 0.191946 0.314328
321 1 1.00054 0.255014 0.244559
192 1 0.87621 0.185639 0.192625
223 1 0.938601 0.252364 0.190261
224 1 0.876522 0.309589 0.186003
318 1 0.941509 0.185841 0.252207
320 1 0.873512 0.187556 0.313621
349 1 0.876629 0.248283 0.256463
350 1 0.934864 0.314554 0.249698
351 1 0.940606 0.253667 0.314425
352 1 0.876212 0.313132 0.312163
1155 1 0.0673383 0.501424 0.184209
227 1 0.0670633 0.377117 0.183463
232 1 0.127913 0.437832 0.185644
354 1 0.0630579 0.440601 0.246864
355 1 0.0650824 0.377417 0.305177
357 1 0.128747 0.379007 0.243697
360 1 0.127608 0.437006 0.308048
353 1 -0.00153821 0.379191 0.241971
1285 1 0.118873 0.503264 0.252714
231 1 0.188642 0.377583 0.184268
235 1 0.316524 0.369025 0.194998
236 1 0.246187 0.439095 0.186656
358 1 0.189333 0.440008 0.250075
359 1 0.190548 0.377198 0.315653
361 1 0.248866 0.373598 0.251422
362 1 0.317005 0.433194 0.251124
363 1 0.313157 0.374651 0.312948
364 1 0.249925 0.441075 0.315457
1295 1 0.436377 0.501551 0.314609
239 1 0.436666 0.374799 0.190552
240 1 0.37588 0.43878 0.188616
365 1 0.376863 0.371554 0.251661
366 1 0.440772 0.438211 0.245206
367 1 0.437768 0.37236 0.309307
368 1 0.376374 0.44309 0.314392
1167 1 0.443169 0.498189 0.186582
369 1 0.498104 0.371121 0.250541
372 1 0.497154 0.438591 0.309224
244 1 0.506318 0.437326 0.186444
243 1 0.562568 0.372541 0.184155
248 1 0.62033 0.433435 0.187169
370 1 0.562897 0.436715 0.251019
371 1 0.560465 0.373918 0.314404
373 1 0.625092 0.373477 0.252894
376 1 0.622741 0.440779 0.313964
1175 1 0.683735 0.499155 0.189784
247 1 0.682768 0.375205 0.190607
251 1 0.814429 0.3767 0.186743
252 1 0.746702 0.440964 0.187245
374 1 0.685268 0.439209 0.249067
375 1 0.685554 0.376873 0.309555
377 1 0.752036 0.373337 0.251594
378 1 0.811587 0.437503 0.247962
379 1 0.812743 0.374251 0.312953
380 1 0.750127 0.438968 0.31228
1183 1 0.940062 0.501929 0.184285
356 1 0.99984 0.435655 0.312508
228 1 1.00598 0.441737 0.186283
255 1 0.934773 0.374011 0.18682
256 1 0.875011 0.437803 0.186506
381 1 0.872523 0.379659 0.254995
382 1 0.940621 0.439882 0.2407
383 1 0.941254 0.377158 0.3111
384 1 0.879574 0.439147 0.313014
389 1 0.124805 0.00282677 0.379417
514 1 0.0610255 0.0639389 0.500268
386 1 0.0655315 0.0637459 0.374405
392 1 0.125888 0.0678542 0.438679
419 1 0.0625327 0.126037 0.442762
421 1 0.123961 0.123598 0.378383
417 1 0.0038197 0.125986 0.375633
549 1 0.132255 0.128453 0.499922
388 1 0.00289679 0.0590679 0.435619
385 1 0.000961249 0.00134507 0.371766
521 1 0.253277 -0.00515183 0.502872
391 1 0.188471 -0.00715985 0.440641
390 1 0.188923 0.0593743 0.378774
394 1 0.309852 0.0644534 0.377668
396 1 0.249166 0.0577624 0.438731
423 1 0.186094 0.124685 0.437315
425 1 0.248628 0.12466 0.379041
427 1 0.313428 0.12482 0.436671
553 1 0.248871 0.12509 0.498765
522 1 0.313855 0.0600222 0.504895
399 1 0.436839 -0.00179326 0.43735
398 1 0.438353 0.0583396 0.373446
400 1 0.374985 0.0610815 0.43646
429 1 0.376805 0.120469 0.375328
431 1 0.434402 0.124803 0.437159
404 1 0.499879 0.0569923 0.436163
401 1 0.500154 -5.46898e-05 0.371588
557 1 0.370723 0.122494 0.500906
561 1 0.500926 0.119652 0.494867
530 1 0.562345 0.0589693 0.499052
433 1 0.496275 0.123546 0.371264
402 1 0.558734 0.0643131 0.376523
408 1 0.629684 0.0610511 0.436533
435 1 0.564469 0.127624 0.437485
437 1 0.630563 0.123225 0.378861
533 1 0.627621 -0.000577451 0.501842
411 1 0.816238 0.000527564 0.439307
534 1 0.694242 0.0583746 0.504036
409 1 0.752705 -0.000771602 0.376572
406 1 0.692448 0.0601285 0.377965
410 1 0.815871 0.0647623 0.37593
412 1 0.755597 0.0579335 0.442084
439 1 0.689845 0.125728 0.440416
441 1 0.751733 0.12555 0.381287
443 1 0.815807 0.129376 0.439552
538 1 0.818776 0.0582356 0.503335
413 1 0.87455 0.00288286 0.375062
414 1 0.941554 0.0663945 0.372229
416 1 0.875571 0.065058 0.442239
445 1 0.877437 0.125013 0.37784
447 1 0.940983 0.124167 0.440134
545 1 1.00078 0.124303 0.499866
542 1 0.939546 0.0622577 0.50314
418 1 0.0710432 0.188814 0.37869
424 1 0.126082 0.185861 0.438573
450 1 0.0610014 0.31597 0.372523
451 1 0.0665852 0.256074 0.433992
453 1 0.124646 0.253044 0.368913
456 1 0.125023 0.318194 0.438199
452 1 0.00540564 0.318073 0.437583
581 1 0.12705 0.251497 0.497949
546 1 0.0623062 0.19122 0.499069
422 1 0.181851 0.189232 0.379738
426 1 0.31238 0.186283 0.37551
428 1 0.253235 0.183253 0.438103
454 1 0.187348 0.313781 0.374175
455 1 0.183567 0.251935 0.435096
457 1 0.249556 0.24754 0.377832
458 1 0.31242 0.313093 0.372753
459 1 0.31095 0.248707 0.435926
460 1 0.245505 0.311215 0.43534
586 1 0.307847 0.308805 0.49705
585 1 0.247902 0.249901 0.497775
550 1 0.190062 0.185991 0.496146
582 1 0.188726 0.311444 0.496756
430 1 0.439477 0.188845 0.374438
432 1 0.375421 0.190389 0.435555
461 1 0.379078 0.252754 0.375846
462 1 0.43401 0.31246 0.377066
463 1 0.442435 0.249844 0.439403
464 1 0.374372 0.309671 0.440229
468 1 0.496536 0.316065 0.440172
558 1 0.436939 0.185994 0.496617
436 1 0.499858 0.183739 0.435561
465 1 0.500511 0.24861 0.37704
434 1 0.561604 0.184941 0.374644
440 1 0.628561 0.188799 0.436562
466 1 0.561359 0.313156 0.374383
467 1 0.557812 0.248751 0.437064
469 1 0.627279 0.250173 0.373398
472 1 0.620456 0.311512 0.436429
562 1 0.562703 0.189271 0.498589
566 1 0.68669 0.189919 0.498801
601 1 0.741221 0.25221 0.498138
474 1 0.810511 0.316387 0.376634
473 1 0.753696 0.254082 0.382885
471 1 0.688875 0.251058 0.436504
470 1 0.689131 0.312854 0.376167
476 1 0.749313 0.317273 0.442517
475 1 0.80982 0.251603 0.442867
438 1 0.689634 0.189973 0.374938
442 1 0.812404 0.18676 0.378798
444 1 0.748814 0.189744 0.439926
602 1 0.815751 0.309774 0.505863
420 1 1.00513 0.187136 0.437175
449 1 1.00531 0.252705 0.377915
480 1 0.875611 0.311151 0.441468
479 1 0.938656 0.24907 0.437524
478 1 0.934797 0.312345 0.377462
477 1 0.871403 0.252738 0.373666
448 1 0.877947 0.188638 0.437711
446 1 0.938807 0.187348 0.375355
577 1 1.00332 0.253061 0.500931
484 1 0.000575128 0.439526 0.442133
1411 1 0.0651932 0.501402 0.435301
613 1 0.127434 0.379736 0.496869
485 1 0.116774 0.377885 0.372716
483 1 0.0625517 0.377451 0.438498
482 1 0.0574369 0.43914 0.375432
488 1 0.123047 0.439987 0.43752
1415 1 0.18953 0.505321 0.432525
487 1 0.184872 0.377314 0.433523
486 1 0.186142 0.44037 0.372541
489 1 0.252419 0.376068 0.378216
492 1 0.250956 0.439795 0.436588
491 1 0.313162 0.375867 0.439625
490 1 0.316264 0.436051 0.374111
618 1 0.31224 0.436616 0.497621
1545 1 0.252488 0.500458 0.496082
625 1 0.500454 0.373343 0.499387
496 1 0.37539 0.438387 0.442237
500 1 0.498665 0.437252 0.437653
494 1 0.436894 0.434886 0.373173
493 1 0.377597 0.371674 0.37421
495 1 0.435677 0.372599 0.436372
622 1 0.442876 0.435142 0.50401
626 1 0.560976 0.432114 0.500599
497 1 0.494454 0.374971 0.372967
498 1 0.561367 0.439938 0.376276
501 1 0.626865 0.37468 0.376556
504 1 0.621969 0.434157 0.441137
499 1 0.561992 0.373718 0.434526
502 1 0.688221 0.438041 0.378285
503 1 0.685387 0.373118 0.43933
1561 1 0.746486 0.499945 0.502511
505 1 0.7483 0.375906 0.380282
506 1 0.80898 0.435503 0.372516
507 1 0.812407 0.376036 0.43807
508 1 0.745844 0.435128 0.444413
633 1 0.747036 0.373465 0.503196
634 1 0.810875 0.433608 0.504255
509 1 0.877856 0.381063 0.37197
510 1 0.943182 0.443669 0.373931
511 1 0.939502 0.379842 0.435966
1409 1 0.998264 0.506658 0.378858
1439 1 0.936211 0.501751 0.442696
512 1 0.877734 0.438168 0.440128
481 1 1.00296 0.37379 0.372531
637 1 0.870342 0.374145 0.498076
520 1 0.127606 0.0635124 0.555742
547 1 0.0653979 0.127973 0.560488
642 1 0.0670786 0.0589633 0.626885
677 1 0.125782 0.125396 0.62143
515 1 0.0666609 0.00348759 0.566631
517 1 0.125985 -0.00204709 0.498784
513 1 0.00623353 0.0024703 0.500746
524 1 0.249745 0.062467 0.559072
551 1 0.189319 0.121804 0.563158
555 1 0.30681 0.126402 0.564958
646 1 0.181003 0.061921 0.621091
650 1 0.312665 0.0594688 0.62277
681 1 0.25018 0.120733 0.626334
519 1 0.189135 -0.0051528 0.558073
649 1 0.253111 -0.000564897 0.623858
518 1 0.189014 0.0612397 0.498756
528 1 0.376239 0.0603236 0.560316
559 1 0.434895 0.120636 0.559137
654 1 0.436422 0.0604693 0.627466
685 1 0.370986 0.123108 0.624511
532 1 0.494005 0.0598747 0.561989
526 1 0.436232 0.0602849 0.500791
689 1 0.497713 0.123215 0.624699
536 1 0.626195 0.0603077 0.557714
563 1 0.555611 0.117458 0.56228
658 1 0.554079 0.0559656 0.625291
693 1 0.622671 0.119709 0.625179
565 1 0.62207 0.125064 0.501884
539 1 0.811893 -0.00226332 0.564108
540 1 0.752634 0.0595382 0.561725
567 1 0.687096 0.122663 0.563415
571 1 0.816754 0.125223 0.558313
662 1 0.684833 0.058094 0.627919
666 1 0.812929 0.0651666 0.625921
697 1 0.753094 0.121492 0.624749
665 1 0.749556 -0.00124956 0.626916
569 1 0.746457 0.122806 0.50035
516 1 1.00358 0.0612021 0.56221
673 1 1.00447 0.124793 0.624122
641 1 1.00223 -0.00457042 0.62587
544 1 0.880656 0.0633772 0.564391
575 1 0.942965 0.124104 0.563247
670 1 0.939549 0.0624551 0.622523
701 1 0.876464 0.1301 0.622987
573 1 0.881077 0.132494 0.501708
669 1 0.875552 -0.000444238 0.624794
552 1 0.129592 0.18839 0.562817
579 1 0.0648169 0.25774 0.560417
584 1 0.128228 0.315594 0.563215
674 1 0.0678645 0.19188 0.626092
706 1 0.0611816 0.315521 0.626857
709 1 0.130291 0.250905 0.630816
705 1 0.0058678 0.253518 0.624138
578 1 0.0653742 0.314197 0.502012
556 1 0.251601 0.185776 0.561725
583 1 0.187501 0.250705 0.560064
587 1 0.312471 0.244846 0.567281
588 1 0.251305 0.306503 0.561845
678 1 0.191148 0.186474 0.622601
682 1 0.311565 0.188431 0.62748
710 1 0.187319 0.314134 0.629691
713 1 0.249164 0.253455 0.624161
714 1 0.314122 0.311241 0.621397
554 1 0.314073 0.18426 0.499609
590 1 0.431135 0.314912 0.5005
589 1 0.3757 0.248937 0.49889
560 1 0.377271 0.186525 0.568095
591 1 0.436422 0.2513 0.562911
592 1 0.3656 0.313558 0.556203
686 1 0.434458 0.186438 0.625553
717 1 0.37635 0.251277 0.622864
718 1 0.434454 0.308871 0.62476
596 1 0.499505 0.315548 0.564824
721 1 0.494051 0.251974 0.624916
564 1 0.495238 0.187302 0.560144
568 1 0.620929 0.182956 0.563047
595 1 0.560668 0.254434 0.563467
600 1 0.62164 0.311474 0.563446
690 1 0.55545 0.186328 0.62139
722 1 0.558395 0.313472 0.627718
725 1 0.619904 0.2489 0.624868
597 1 0.623704 0.249034 0.495377
593 1 0.501451 0.253882 0.501721
594 1 0.560518 0.311635 0.495703
572 1 0.742702 0.184424 0.564414
599 1 0.680381 0.250726 0.561509
603 1 0.802472 0.248641 0.562979
604 1 0.744887 0.314175 0.562575
694 1 0.685313 0.18327 0.623318
698 1 0.811734 0.189672 0.624018
726 1 0.682494 0.31437 0.623919
729 1 0.745269 0.25325 0.622878
730 1 0.805986 0.307995 0.624958
570 1 0.806863 0.19039 0.502693
598 1 0.679917 0.311899 0.500627
606 1 0.93842 0.311045 0.496855
580 1 1.00517 0.316192 0.560064
548 1 1.00075 0.187319 0.564706
576 1 0.874933 0.192831 0.561453
607 1 0.938585 0.255616 0.562621
608 1 0.871001 0.304644 0.568348
702 1 0.935161 0.188419 0.627807
733 1 0.876957 0.246843 0.628489
734 1 0.939923 0.313076 0.625085
605 1 0.877573 0.248014 0.502758
574 1 0.944664 0.191029 0.502134
610 1 0.0611884 0.436021 0.504828
611 1 0.0695251 0.372725 0.559374
616 1 0.129311 0.437188 0.562866
738 1 0.0634202 0.435972 0.62129
741 1 0.126319 0.375824 0.622539
737 1 0.00258037 0.375423 0.625761
612 1 -0.000688051 0.436032 0.561353
1541 1 0.121909 0.506213 0.499898
1665 1 -0.00148085 0.498996 0.624795
617 1 0.247818 0.376398 0.495872
615 1 0.194053 0.374777 0.559271
619 1 0.316264 0.378839 0.561825
620 1 0.252171 0.441105 0.560511
742 1 0.186685 0.441168 0.624501
745 1 0.249247 0.373724 0.62103
746 1 0.31325 0.439458 0.622313
614 1 0.185039 0.446237 0.4933
623 1 0.432532 0.367013 0.56347
624 1 0.374769 0.437562 0.563103
749 1 0.370806 0.373718 0.626452
750 1 0.434392 0.437262 0.624917
753 1 0.495906 0.375133 0.626728
628 1 0.50071 0.430019 0.563768
621 1 0.371269 0.377421 0.500416
627 1 0.561769 0.372348 0.563121
632 1 0.624102 0.435599 0.563004
754 1 0.557669 0.437295 0.625319
757 1 0.621969 0.374749 0.624644
629 1 0.624166 0.374252 0.498792
1689 1 0.74447 0.499732 0.623634
1559 1 0.681698 0.498748 0.561817
1563 1 0.806439 0.494708 0.563372
631 1 0.682715 0.377376 0.558369
635 1 0.806735 0.367967 0.568414
636 1 0.746894 0.434699 0.561617
758 1 0.687475 0.437717 0.6287
761 1 0.743769 0.374949 0.62254
762 1 0.810312 0.435621 0.627162
630 1 0.684513 0.436173 0.498965
609 1 1.00076 0.373698 0.498299
639 1 0.93766 0.373298 0.564117
640 1 0.87394 0.434528 0.562856
765 1 0.869398 0.370571 0.623989
766 1 0.937753 0.436434 0.623198
638 1 0.937189 0.438174 0.501809
1565 1 0.872741 0.499131 0.504732
643 1 0.065304 -0.00521636 0.688432
648 1 0.126144 0.0631524 0.683172
675 1 0.0662834 0.125736 0.685561
770 1 0.0682702 0.0632317 0.747252
776 1 0.129444 0.0658612 0.810168
803 1 0.0626218 0.12539 0.809069
805 1 0.129153 0.120007 0.746457
773 1 0.128269 0.00151312 0.749434
772 1 0.000324259 0.0633736 0.807731
644 1 0.0072644 0.0575453 0.687672
775 1 0.189944 -0.00146882 0.815515
652 1 0.252661 0.058119 0.68492
679 1 0.186755 0.125169 0.68257
683 1 0.312643 0.128017 0.68651
774 1 0.192062 0.0607435 0.748828
778 1 0.312087 0.0608235 0.74633
780 1 0.253206 0.0595164 0.812366
807 1 0.190654 0.12755 0.810646
809 1 0.248559 0.124257 0.749868
811 1 0.315737 0.126042 0.813202
781 1 0.374991 0.0006658 0.752072
783 1 0.436013 -0.00353669 0.812388
656 1 0.372625 0.0615836 0.685382
687 1 0.435673 0.124211 0.686381
782 1 0.434321 0.058678 0.747311
784 1 0.374405 0.0617417 0.813508
813 1 0.37415 0.121575 0.750572
815 1 0.436878 0.126691 0.80745
788 1 0.492776 0.0595315 0.811512
817 1 0.499784 0.123149 0.744092
660 1 0.494759 0.0625905 0.685111
785 1 0.497072 -0.00202242 0.752554
787 1 0.561221 0.000865936 0.814184
659 1 0.55639 -0.00219545 0.689732
789 1 0.622105 -0.0034455 0.750809
664 1 0.617988 0.059299 0.687444
691 1 0.558806 0.123228 0.681174
786 1 0.556884 0.0610429 0.748009
792 1 0.626017 0.0652355 0.810427
819 1 0.559187 0.120831 0.809391
821 1 0.618617 0.126545 0.74652
793 1 0.748897 6.75927e-05 0.751986
668 1 0.751076 0.0601442 0.683825
695 1 0.682991 0.125104 0.688679
699 1 0.816214 0.12734 0.688001
790 1 0.687992 0.0664982 0.745229
794 1 0.810732 0.0641156 0.745381
796 1 0.75063 0.0673362 0.8042
823 1 0.684006 0.125868 0.810278
825 1 0.749068 0.127041 0.742702
827 1 0.81072 0.127912 0.809809
795 1 0.811424 0.00322598 0.811712
663 1 0.684688 -0.00255538 0.687255
799 1 0.940248 -0.00120939 0.810939
671 1 0.939158 0.00183747 0.6859
801 1 0.99902 0.123803 0.747944
672 1 0.874883 0.0630253 0.688583
703 1 0.936416 0.120723 0.686209
798 1 0.938893 0.0594101 0.750335
800 1 0.877824 0.0628586 0.811247
829 1 0.875392 0.126588 0.752166
831 1 0.939647 0.123113 0.811148
797 1 0.874185 -0.00426886 0.754898
680 1 0.129881 0.190094 0.686656
707 1 0.0627645 0.248953 0.686316
712 1 0.121128 0.318701 0.687466
802 1 0.0616892 0.185531 0.748988
808 1 0.12097 0.1863 0.813497
834 1 0.062632 0.312671 0.752436
835 1 0.0591207 0.248247 0.816953
837 1 0.12706 0.25348 0.747158
840 1 0.127338 0.312744 0.810376
836 1 -2.32433e-06 0.313588 0.811132
804 1 0.0032396 0.187155 0.812295
684 1 0.250075 0.185863 0.687363
711 1 0.18923 0.2528 0.687182
715 1 0.308617 0.25268 0.688685
716 1 0.247945 0.315959 0.690159
806 1 0.186983 0.185121 0.750889
810 1 0.307272 0.190589 0.747268
812 1 0.253859 0.189714 0.811345
838 1 0.187322 0.314646 0.750211
839 1 0.183147 0.244837 0.812358
841 1 0.249075 0.249724 0.747855
842 1 0.310597 0.31818 0.750969
843 1 0.314759 0.251167 0.807832
844 1 0.248509 0.313526 0.812674
688 1 0.373996 0.187068 0.691683
719 1 0.43542 0.250713 0.687403
720 1 0.365426 0.314494 0.6859
814 1 0.438557 0.186496 0.744762
816 1 0.377169 0.188934 0.809416
845 1 0.371965 0.253887 0.745233
846 1 0.436485 0.311826 0.746162
847 1 0.442691 0.247885 0.813591
848 1 0.372133 0.314645 0.81138
692 1 0.49788 0.188489 0.685418
820 1 0.500359 0.187381 0.812274
852 1 0.499641 0.30927 0.807588
849 1 0.500032 0.250181 0.747889
724 1 0.494004 0.316932 0.685251
696 1 0.617007 0.185084 0.685684
723 1 0.559445 0.251453 0.684539
728 1 0.62397 0.314411 0.68493
818 1 0.561601 0.185627 0.75343
824 1 0.624906 0.191353 0.809982
850 1 0.553825 0.314488 0.742914
851 1 0.559504 0.250804 0.810079
853 1 0.624435 0.250959 0.746553
856 1 0.618709 0.31171 0.806766
700 1 0.744979 0.186997 0.684574
727 1 0.685791 0.247018 0.681133
731 1 0.809375 0.247084 0.687664
732 1 0.7474 0.311196 0.687964
822 1 0.682176 0.185371 0.74897
826 1 0.806432 0.187676 0.748427
828 1 0.749587 0.186981 0.810865
854 1 0.68677 0.313247 0.747472
855 1 0.684923 0.254177 0.809797
857 1 0.743104 0.247822 0.745484
858 1 0.806828 0.313169 0.750083
859 1 0.806833 0.25194 0.809892
860 1 0.748836 0.31297 0.810614
708 1 1.00305 0.314578 0.691633
833 1 1.0032 0.248783 0.748086
676 1 1.00126 0.186619 0.685568
704 1 0.875167 0.187689 0.69132
735 1 0.941968 0.24875 0.683883
736 1 0.876091 0.310915 0.685388
830 1 0.938404 0.18684 0.74969
832 1 0.87529 0.189408 0.808535
861 1 0.870832 0.252193 0.748621
862 1 0.940615 0.308518 0.749168
863 1 0.940448 0.249221 0.81248
864 1 0.873365 0.310531 0.811249
1667 1 0.0638851 0.504725 0.684835
739 1 0.0627989 0.376657 0.689667
744 1 0.126586 0.439843 0.685086
866 1 0.0674587 0.442164 0.745869
867 1 0.0636791 0.379623 0.810714
869 1 0.123522 0.375984 0.751074
872 1 0.121985 0.441815 0.814856
740 1 -0.000532961 0.435023 0.686755
1795 1 0.0605211 0.504585 0.813596
865 1 0.000762798 0.378754 0.750948
1797 1 0.126338 0.502436 0.751021
743 1 0.186479 0.377038 0.689734
747 1 0.308323 0.380618 0.685978
748 1 0.247969 0.442155 0.686845
870 1 0.188102 0.438953 0.755652
871 1 0.186132 0.379384 0.818186
873 1 0.246446 0.378502 0.753344
874 1 0.306697 0.439071 0.754904
875 1 0.311613 0.376706 0.818209
876 1 0.243675 0.445015 0.819301
1807 1 0.434391 0.506032 0.808838
1809 1 0.501011 0.49707 0.75505
751 1 0.432381 0.374101 0.695785
752 1 0.373802 0.439504 0.686019
877 1 0.375243 0.374572 0.752733
878 1 0.433102 0.438369 0.752616
879 1 0.439492 0.374314 0.814672
880 1 0.372604 0.436643 0.812408
881 1 0.497323 0.371943 0.751612
884 1 0.500193 0.433984 0.816494
1813 1 0.621029 0.497435 0.753396
756 1 0.497257 0.43327 0.689097
755 1 0.560879 0.376793 0.68662
760 1 0.622049 0.436936 0.688215
882 1 0.559947 0.43689 0.749553
883 1 0.559667 0.374442 0.816648
885 1 0.621899 0.377255 0.7493
888 1 0.623863 0.437931 0.812156
1687 1 0.685045 0.497011 0.685796
759 1 0.68644 0.37753 0.686881
763 1 0.812537 0.366936 0.688792
764 1 0.749195 0.435498 0.68837
886 1 0.688144 0.44053 0.752642
887 1 0.681464 0.371545 0.811996
889 1 0.744652 0.377433 0.748641
890 1 0.809895 0.435207 0.748565
891 1 0.809344 0.374433 0.812692
892 1 0.753411 0.436472 0.816461
1817 1 0.750984 0.499608 0.747903
1793 1 0.996199 0.502876 0.744189
1821 1 0.873154 0.496732 0.751032
868 1 1.00487 0.445233 0.808266
767 1 0.936974 0.371791 0.68562
768 1 0.879924 0.432158 0.685139
893 1 0.872705 0.37029 0.750078
894 1 0.937244 0.437137 0.748087
895 1 0.933887 0.377969 0.80847
896 1 0.87319 0.432481 0.811688
1823 1 0.936016 0.501093 0.810058
898 1 0.0637378 0.0643345 0.874319
904 1 0.123953 0.0632038 0.935344
931 1 0.0598118 0.123711 0.939435
933 1 0.124766 0.126044 0.868332
900 1 -0.000758142 0.0633278 0.939052
929 1 -0.00170932 0.126361 0.873343
37 1 0.123553 0.12432 0.995153
33 1 0.000569376 0.120397 0.998809
1 1 -0.00126349 0.00137057 1.00075
2 1 0.0673021 0.0618703 1.00178
902 1 0.189432 0.0663454 0.872162
906 1 0.31243 0.0640764 0.876582
908 1 0.24864 0.0627784 0.937937
935 1 0.18669 0.130529 0.932885
937 1 0.254477 0.124716 0.874368
939 1 0.311418 0.124188 0.936671
909 1 0.373231 -0.0018092 0.871904
911 1 0.433226 0.000442166 0.936231
14 1 0.43843 0.0613757 0.999586
910 1 0.432473 0.0608654 0.869716
912 1 0.373371 0.0624751 0.940524
941 1 0.378505 0.123869 0.874387
943 1 0.433485 0.123491 0.934452
945 1 0.498712 0.122622 0.870289
45 1 0.378818 0.125916 1.00245
13 1 0.373389 0.00120603 0.996898
917 1 0.624403 0.00369395 0.875207
915 1 0.568421 -0.00462799 0.936463
916 1 0.501456 0.0655248 0.936212
914 1 0.566727 0.0617427 0.875415
920 1 0.626561 0.0631541 0.93721
947 1 0.563683 0.129971 0.934645
949 1 0.625981 0.126526 0.874829
49 1 0.495492 0.122659 1.00177
21 1 0.628297 0.00212996 0.996889
921 1 0.748122 0.00506165 0.872414
918 1 0.686323 0.0589854 0.875929
922 1 0.810751 0.0647145 0.870677
924 1 0.751848 0.0659524 0.935744
951 1 0.687528 0.124017 0.933076
953 1 0.74774 0.128916 0.873254
955 1 0.817474 0.129099 0.931168
61 1 0.874646 0.123196 1.00411
926 1 0.939541 0.0675436 0.874285
928 1 0.873993 0.065854 0.935736
957 1 0.874371 0.129293 0.8714
959 1 0.935359 0.125915 0.937642
30 1 0.935692 0.065259 1.00117
927 1 0.938484 0.0055911 0.935861
925 1 0.87996 0.00132936 0.872872
930 1 0.0609588 0.184598 0.876024
936 1 0.120494 0.186494 0.933603
962 1 0.0585889 0.316768 0.873361
963 1 0.0606524 0.25082 0.933882
965 1 0.124695 0.249005 0.875779
968 1 0.120604 0.313548 0.93568
961 1 -0.00276481 0.24973 0.875471
65 1 0.000623564 0.250781 0.994904
66 1 0.0604647 0.310863 0.995097
934 1 0.185208 0.190029 0.875935
938 1 0.316514 0.186043 0.87284
940 1 0.249364 0.189323 0.939775
966 1 0.187483 0.310504 0.876438
967 1 0.190525 0.248967 0.936417
969 1 0.244488 0.245821 0.874139
970 1 0.313997 0.313032 0.877203
971 1 0.314947 0.245003 0.9358
972 1 0.254984 0.312813 0.940036
70 1 0.190926 0.315849 0.995634
74 1 0.313945 0.313797 1.00445
73 1 0.249932 0.249724 1.00546
42 1 0.318723 0.188151 0.99975
77 1 0.378112 0.248655 1.00002
942 1 0.441137 0.184424 0.874171
944 1 0.3782 0.18768 0.937663
973 1 0.377381 0.249518 0.873738
974 1 0.440256 0.31244 0.871791
975 1 0.438564 0.25164 0.935117
976 1 0.371061 0.308227 0.940197
78 1 0.440441 0.312333 1.00184
980 1 0.500404 0.313701 0.932619
977 1 0.503407 0.247704 0.874441
948 1 0.502242 0.186365 0.934359
946 1 0.563433 0.187446 0.870584
952 1 0.62341 0.189831 0.93798
978 1 0.559815 0.313863 0.873475
979 1 0.564196 0.25352 0.940597
981 1 0.62603 0.252055 0.875021
984 1 0.626815 0.30996 0.936847
50 1 0.56284 0.187669 0.997666
82 1 0.562631 0.314134 0.996757
86 1 0.691712 0.311721 1.00339
988 1 0.750063 0.310552 0.936205
987 1 0.811593 0.251398 0.934878
986 1 0.810642 0.31376 0.874147
985 1 0.749327 0.251188 0.872592
983 1 0.691802 0.250713 0.937864
982 1 0.683184 0.312571 0.872206
950 1 0.684981 0.191781 0.876865
954 1 0.812564 0.191139 0.870542
956 1 0.751021 0.185023 0.935729
89 1 0.74776 0.24549 1.00019
90 1 0.813163 0.312122 0.994912
58 1 0.811571 0.187111 0.997649
54 1 0.690671 0.189069 0.995707
964 1 0.997055 0.317489 0.93995
958 1 0.935566 0.186312 0.870697
960 1 0.876093 0.19429 0.936234
989 1 0.877013 0.251813 0.872199
932 1 0.995863 0.185016 0.935301
992 1 0.876624 0.316398 0.937547
991 1 0.938284 0.255132 0.936876
990 1 0.937122 0.314088 0.868378
62 1 0.936348 0.184314 1.00126
93 1 0.870544 0.247046 0.993818
1029 1 0.123297 0.504162 1.00267
994 1 0.0627097 0.44326 0.878625
1000 1 0.123545 0.436389 0.941032
997 1 0.122507 0.379094 0.874723
995 1 0.0605055 0.377335 0.939541
1925 1 0.1255 0.500479 0.873894
98 1 0.0610945 0.44166 0.999934
102 1 0.183797 0.437397 1.00305
1002 1 0.311394 0.440753 0.876367
1001 1 0.248157 0.370849 0.876463
1003 1 0.313021 0.377951 0.935554
999 1 0.186836 0.372318 0.937216
1004 1 0.245049 0.440068 0.936444
998 1 0.181599 0.438529 0.878438
106 1 0.313337 0.43484 0.997604
1931 1 0.310031 0.49992 0.938042
1929 1 0.2458 0.501553 0.879657
105 1 0.248151 0.376273 0.996497
1009 1 0.495792 0.376522 0.875849
1007 1 0.432944 0.368625 0.934516
1008 1 0.376543 0.435917 0.932932
1005 1 0.377383 0.373873 0.875641
1006 1 0.437074 0.436728 0.873765
110 1 0.433917 0.440024 0.999006
1041 1 0.498219 0.503434 1.00393
1939 1 0.562251 0.499154 0.938671
1937 1 0.497324 0.500207 0.876943
113 1 0.501887 0.371274 0.999994
1011 1 0.564445 0.379238 0.941777
1013 1 0.624653 0.372659 0.87801
1016 1 0.625168 0.436667 0.937248
1010 1 0.558098 0.435052 0.877095
1012 1 0.49719 0.437329 0.94122
1045 1 0.625242 0.496829 0.999438
1014 1 0.68527 0.432059 0.870552
1947 1 0.813076 0.497965 0.938264
1015 1 0.688244 0.368855 0.939425
118 1 0.684662 0.43595 0.99721
1017 1 0.749924 0.374467 0.881633
1018 1 0.816708 0.437487 0.874511
1019 1 0.81464 0.377907 0.937614
1020 1 0.746494 0.435439 0.937597
1943 1 0.688488 0.50298 0.94094
996 1 1.00017 0.438632 0.93412
1951 1 0.935331 0.503337 0.941924
1949 1 0.875543 0.502011 0.874271
1021 1 0.873864 0.375221 0.872273
1053 1 0.871479 0.500756 1.00211
1022 1 0.935977 0.438303 0.87107
1023 1 0.93281 0.377695 0.938311
1024 1 0.875141 0.440968 0.936992
126 1 0.933507 0.441519 1.00295
993 1 0.998529 0.37983 0.871672
1032 1 0.124152 0.566193 0.0618317
1059 1 0.0620459 0.626492 0.0642579
1154 1 0.0599026 0.569171 0.124672
1189 1 0.126533 0.63332 0.123931
1028 1 -0.00432021 0.567881 0.0613693
1061 1 0.12352 0.626869 0.00304659
1026 1 0.0608149 0.563232 -0.000111311
1036 1 0.250879 0.561395 0.0629162
1063 1 0.194291 0.624776 0.0636002
1067 1 0.31304 0.62646 0.0620621
1158 1 0.185166 0.567254 0.120906
1162 1 0.315109 0.566491 0.125681
1193 1 0.254628 0.621274 0.128916
1161 1 0.254803 0.496862 0.125218
1035 1 0.319976 0.499585 0.065727
1065 1 0.251512 0.626589 0.000585661
1034 1 0.316481 0.568087 -0.000848411
1040 1 0.378897 0.568851 0.0620645
1071 1 0.441548 0.630316 0.067668
1166 1 0.43828 0.563708 0.125102
1197 1 0.376004 0.627149 0.126818
1069 1 0.375946 0.626748 -0.00370854
1165 1 0.376441 0.498738 0.126261
1038 1 0.434373 0.565361 -0.00535809
1169 1 0.499812 0.501701 0.123923
1201 1 0.501107 0.625096 0.128766
1044 1 0.496551 0.566229 0.0622555
1048 1 0.624164 0.559785 0.0657092
1075 1 0.55804 0.623186 0.0655762
1170 1 0.564424 0.566427 0.12749
1205 1 0.625915 0.624246 0.122373
1173 1 0.626454 0.502618 0.127074
1073 1 0.502582 0.624015 -0.000299821
1077 1 0.621522 0.619324 0.0038788
1052 1 0.75284 0.565618 0.0694212
1079 1 0.686356 0.620403 0.0616066
1083 1 0.811475 0.627244 0.063127
1174 1 0.686966 0.565049 0.125613
1178 1 0.810582 0.565119 0.128116
1209 1 0.751384 0.629271 0.124408
1177 1 0.750081 0.500106 0.124655
1047 1 0.684888 0.497376 0.0639564
1185 1 0.998666 0.62707 0.122818
1056 1 0.869544 0.564505 0.0671461
1087 1 0.934125 0.631167 0.064078
1182 1 0.941798 0.563557 0.125725
1213 1 0.873013 0.627866 0.12696
1054 1 0.933735 0.562761 0.0014036
1055 1 0.933794 0.505454 0.0664078
1085 1 0.873929 0.627524 0.0010258
1064 1 0.129224 0.690357 0.0618398
1091 1 0.0655018 0.750572 0.0628119
1096 1 0.125337 0.810481 0.062739
1186 1 0.0595975 0.691703 0.123602
1218 1 0.0559435 0.817533 0.125738
1221 1 0.124959 0.752247 0.123959
1060 1 -3.94771e-05 0.686494 0.0623125
1092 1 0.00108154 0.812208 0.0580028
1058 1 0.0628571 0.688603 0.00140066
1097 1 0.247964 0.750981 0.00600164
1098 1 0.31007 0.810184 0.00307796
1068 1 0.247952 0.689515 0.061017
1095 1 0.187353 0.75259 0.0655683
1099 1 0.313012 0.749283 0.063075
1100 1 0.251269 0.816421 0.0654332
1190 1 0.192056 0.689535 0.123331
1194 1 0.312956 0.686285 0.131924
1222 1 0.186048 0.815631 0.128249
1225 1 0.250307 0.75164 0.127836
1226 1 0.312518 0.813203 0.126837
1066 1 0.314358 0.690455 0.000415056
1062 1 0.189883 0.684724 -0.00281823
1102 1 0.439677 0.816935 0.00263727
1072 1 0.375628 0.689644 0.0611514
1103 1 0.438391 0.749549 0.0628548
1104 1 0.373632 0.813036 0.061904
1198 1 0.435465 0.687026 0.126504
1229 1 0.378021 0.748186 0.128101
1230 1 0.437289 0.812519 0.12035
1076 1 0.500503 0.686659 0.0624052
1070 1 0.442936 0.688284 0.00116778
1105 1 0.497916 0.752739 -0.000506256
1108 1 0.506768 0.812021 0.0610971
1233 1 0.501342 0.751017 0.125937
1080 1 0.624688 0.68605 0.0611519
1107 1 0.566742 0.748954 0.0611639
1112 1 0.62747 0.812182 0.0663346
1202 1 0.56446 0.684105 0.119502
1234 1 0.56254 0.806125 0.126694
1237 1 0.625814 0.748483 0.127144
1106 1 0.564738 0.811416 0.000761974
1109 1 0.625528 0.746822 -0.00231736
1082 1 0.814198 0.687893 0.00301058
1084 1 0.749895 0.685545 0.0614004
1111 1 0.68945 0.746001 0.0627717
1115 1 0.807603 0.751259 0.0675974
1116 1 0.752293 0.808121 0.0638129
1206 1 0.686254 0.682431 0.12316
1210 1 0.812685 0.686511 0.13016
1238 1 0.689208 0.814104 0.126585
1241 1 0.748238 0.75025 0.126773
1242 1 0.809778 0.815841 0.127263
1078 1 0.684144 0.681361 0.00428127
1217 1 1.00197 0.752492 0.128116
1086 1 0.939457 0.690346 0.00188851
1118 1 0.942049 0.80962 -0.000140096
1088 1 0.875134 0.691738 0.0650759
1119 1 0.936018 0.750127 0.0640438
1120 1 0.8739 0.812699 0.0637215
1214 1 0.936286 0.692842 0.126814
1245 1 0.873779 0.752927 0.127296
1246 1 0.941404 0.815006 0.124385
1089 1 1.00115 0.749661 -0.00251104
1123 1 0.059305 0.873237 0.0601086
1128 1 0.126131 0.933094 0.0603558
1250 1 0.0647829 0.933577 0.123092
1253 1 0.125478 0.873651 0.129767
1124 1 -0.00401709 0.938272 0.0576042
1127 1 0.188261 0.873686 0.0606534
1131 1 0.314846 0.876636 0.0598868
1132 1 0.251009 0.937647 0.0629458
1254 1 0.185418 0.940046 0.128276
1257 1 0.249917 0.876497 0.123118
1258 1 0.30858 0.938282 0.12571
15 1 0.445441 1.00833 0.0612111
1133 1 0.37597 0.878734 0.00172588
1135 1 0.439874 0.879639 0.0641252
1136 1 0.379899 0.943188 0.0639321
1261 1 0.374202 0.875361 0.125672
1262 1 0.442933 0.942789 0.124921
1140 1 0.497592 0.941089 0.0561667
141 1 0.380365 0.999328 0.123812
17 1 0.504057 1.00074 -0.00189853
145 1 0.508639 1.00183 0.128587
1134 1 0.435777 0.939544 -0.00196449
1265 1 0.504214 0.875236 0.122415
1139 1 0.56593 0.87614 0.0645039
1144 1 0.630445 0.939018 0.0598597
1266 1 0.560182 0.934436 0.118698
1269 1 0.630346 0.873067 0.125623
1138 1 0.565006 0.933502 -0.00243534
1143 1 0.697786 0.870898 0.0625924
1147 1 0.807101 0.874306 0.0644894
1148 1 0.756712 0.941416 0.0609201
1270 1 0.691498 0.938127 0.119496
1273 1 0.754851 0.880976 0.13389
1274 1 0.81823 0.940307 0.12782
1146 1 0.813616 0.936958 -0.00407766
27 1 0.813041 1.00446 0.0597544
153 1 0.752326 1.00099 0.118224
23 1 0.690301 1.00005 0.0562029
1249 1 0.999709 0.877929 0.120833
1151 1 0.933597 0.878592 0.0602444
1152 1 0.874189 0.940066 0.0580168
1277 1 0.873755 0.873931 0.125835
1278 1 0.934475 0.937542 0.124381
1121 1 0.99787 0.876166 0.00150856
1149 1 0.869841 0.873187 0.00224575
1160 1 0.125905 0.567243 0.187625
1187 1 0.0647066 0.629337 0.184185
1282 1 0.0606262 0.565587 0.249527
1288 1 0.124704 0.565673 0.314185
1315 1 0.0613462 0.62546 0.309915
1317 1 0.123726 0.626535 0.247071
1283 1 0.0604379 0.495325 0.312231
1291 1 0.309848 0.500179 0.310279
1163 1 0.312198 0.497083 0.187892
1159 1 0.187891 0.508379 0.188469
1164 1 0.255854 0.557455 0.188074
1191 1 0.190284 0.626454 0.188768
1195 1 0.314366 0.628335 0.193322
1286 1 0.188175 0.565018 0.252593
1290 1 0.309521 0.559661 0.251382
1292 1 0.249332 0.562649 0.320402
1319 1 0.18686 0.625115 0.314899
1321 1 0.246513 0.626074 0.250691
1323 1 0.311327 0.623857 0.314939
1287 1 0.186074 0.496498 0.310678
1289 1 0.251294 0.497465 0.24695
1293 1 0.377153 0.503999 0.246909
1168 1 0.372065 0.564298 0.186049
1199 1 0.439119 0.624509 0.189874
1294 1 0.44149 0.566023 0.251356
1296 1 0.373979 0.559388 0.311893
1325 1 0.3749 0.62149 0.249973
1327 1 0.438128 0.625004 0.313499
1329 1 0.503972 0.622535 0.255407
1172 1 0.502737 0.559539 0.188909
1297 1 0.501081 0.506689 0.254122
1301 1 0.624871 0.499539 0.249187
1171 1 0.568426 0.496744 0.185825
1299 1 0.562216 0.500245 0.313496
1300 1 0.496036 0.560609 0.315727
1176 1 0.627821 0.562432 0.186101
1203 1 0.563017 0.625645 0.188211
1298 1 0.567446 0.558373 0.247737
1304 1 0.624514 0.56671 0.316463
1331 1 0.562407 0.622194 0.316765
1333 1 0.623792 0.62219 0.255128
1303 1 0.684541 0.49979 0.317701
1305 1 0.746438 0.503193 0.253849
1179 1 0.810924 0.499716 0.188161
1180 1 0.744517 0.563499 0.187493
1207 1 0.688272 0.623032 0.189927
1211 1 0.805997 0.624505 0.189974
1302 1 0.684774 0.561739 0.252217
1306 1 0.812052 0.559511 0.247067
1308 1 0.7457 0.558938 0.312597
1335 1 0.684487 0.622832 0.314245
1337 1 0.748917 0.625316 0.250512
1339 1 0.806282 0.62247 0.314446
1307 1 0.808137 0.500599 0.316123
1281 1 1.00099 0.499462 0.24634
1309 1 0.873863 0.497979 0.248908
1284 1 0.997778 0.56544 0.312184
1313 1 0.997683 0.624427 0.250577
1156 1 0.998212 0.563967 0.188545
1184 1 0.873729 0.560983 0.181791
1215 1 0.934557 0.627727 0.185451
1310 1 0.934328 0.559954 0.244065
1312 1 0.871101 0.561754 0.313905
1341 1 0.87226 0.621944 0.247909
1343 1 0.936124 0.620748 0.312855
1311 1 0.93619 0.502927 0.309139
1192 1 0.126598 0.688567 0.186724
1219 1 0.0632063 0.756748 0.185555
1224 1 0.129944 0.815032 0.191594
1314 1 0.0589072 0.688761 0.248907
1320 1 0.126818 0.686374 0.313971
1346 1 0.0631404 0.817026 0.24858
1347 1 0.059627 0.756551 0.308676
1349 1 0.12449 0.751764 0.249413
1352 1 0.125016 0.812587 0.315502
1345 1 0.000615362 0.751218 0.248849
1348 1 -0.00204713 0.814319 0.309923
1196 1 0.254337 0.687202 0.192939
1223 1 0.186231 0.748372 0.185042
1227 1 0.311517 0.752434 0.185815
1228 1 0.246317 0.81125 0.186989
1318 1 0.188309 0.688177 0.250298
1322 1 0.314671 0.692573 0.248992
1324 1 0.252844 0.689338 0.312839
1350 1 0.190593 0.810663 0.250596
1351 1 0.189504 0.750772 0.310376
1353 1 0.253372 0.750845 0.250675
1354 1 0.315643 0.815192 0.247917
1355 1 0.311693 0.747094 0.314261
1356 1 0.253723 0.813193 0.30862
1200 1 0.378171 0.686094 0.195256
1231 1 0.43893 0.749978 0.185476
1232 1 0.377038 0.81214 0.18498
1326 1 0.441219 0.684449 0.250318
1328 1 0.375061 0.686855 0.313675
1357 1 0.377246 0.753395 0.251747
1358 1 0.442933 0.812974 0.248793
1359 1 0.440393 0.747091 0.311224
1360 1 0.372852 0.813935 0.309199
1236 1 0.501183 0.812546 0.187731
1204 1 0.497568 0.6859 0.189931
1364 1 0.50311 0.812221 0.316006
1361 1 0.500137 0.749593 0.247439
1332 1 0.504371 0.688286 0.311979
1208 1 0.625062 0.685753 0.184869
1235 1 0.559104 0.743378 0.187042
1240 1 0.623868 0.805843 0.187368
1330 1 0.564076 0.684133 0.252071
1336 1 0.626166 0.687151 0.315571
1362 1 0.563968 0.809756 0.245982
1363 1 0.562718 0.75037 0.313947
1365 1 0.629259 0.745035 0.245533
1368 1 0.625727 0.807736 0.319132
1212 1 0.751054 0.687823 0.190303
1239 1 0.688995 0.750663 0.183967
1243 1 0.807544 0.752454 0.186466
1244 1 0.748136 0.814852 0.187261
1334 1 0.68779 0.687191 0.251956
1338 1 0.811528 0.687048 0.252581
1340 1 0.748573 0.683179 0.314442
1366 1 0.687016 0.811666 0.250257
1367 1 0.687592 0.745919 0.313441
1369 1 0.749975 0.752112 0.252701
1370 1 0.817354 0.814309 0.25237
1371 1 0.816218 0.751955 0.315752
1372 1 0.750031 0.810855 0.310863
1220 1 1.00045 0.817746 0.188534
1316 1 0.994067 0.685557 0.312508
1188 1 0.998728 0.688679 0.184598
1216 1 0.871928 0.685573 0.188267
1247 1 0.94163 0.751967 0.191464
1248 1 0.875822 0.812954 0.185595
1342 1 0.937707 0.688198 0.250997
1344 1 0.876425 0.684671 0.309473
1373 1 0.875017 0.748639 0.248245
1374 1 0.937898 0.812751 0.249609
1375 1 0.933632 0.751979 0.312517
1376 1 0.878419 0.815639 0.309653
1251 1 0.0605066 0.876766 0.191518
1256 1 0.122006 0.935158 0.188301
1378 1 0.0576674 0.940738 0.251455
1379 1 0.0604383 0.874861 0.311734
1381 1 0.124861 0.878339 0.25014
1384 1 0.126411 0.939312 0.310952
1380 1 -0.00196485 0.937342 0.313193
1252 1 0.00485169 0.938615 0.186889
1377 1 -0.00288514 0.878784 0.251033
261 1 0.122887 1.00302 0.24888
259 1 0.0626018 0.998181 0.312596
263 1 0.186885 1.00045 0.309345
135 1 0.183286 0.998871 0.188511
1255 1 0.185413 0.879221 0.186771
1259 1 0.314131 0.876172 0.188602
1260 1 0.247366 0.936604 0.189062
1382 1 0.18867 0.939277 0.248591
1383 1 0.187198 0.877619 0.307943
1385 1 0.249522 0.873703 0.246589
1386 1 0.308128 0.938308 0.249732
1387 1 0.314521 0.877452 0.311106
1388 1 0.24637 0.93661 0.31013
267 1 0.309494 0.999136 0.314508
139 1 0.314053 0.998287 0.191001
1263 1 0.440512 0.875806 0.182209
1264 1 0.378286 0.941054 0.183719
1389 1 0.376794 0.879156 0.248155
1390 1 0.436856 0.937223 0.247584
1391 1 0.438178 0.874275 0.306652
1392 1 0.37253 0.939005 0.313471
143 1 0.444559 0.999882 0.189609
275 1 0.568588 0.999649 0.314489
1396 1 0.502533 0.934597 0.311498
1268 1 0.504685 0.939069 0.186189
1393 1 0.502512 0.874788 0.246485
1267 1 0.564335 0.870163 0.181974
1272 1 0.628795 0.938208 0.18046
1394 1 0.566713 0.939806 0.256006
1395 1 0.56593 0.87097 0.312583
1397 1 0.620858 0.872713 0.247273
1400 1 0.630232 0.935719 0.312688
147 1 0.566501 0.997712 0.190934
277 1 0.629086 0.997675 0.24798
283 1 0.815724 0.999313 0.313248
1271 1 0.689425 0.87302 0.189246
1275 1 0.817187 0.87476 0.189167
1276 1 0.749841 0.942291 0.188823
1398 1 0.682595 0.934216 0.245562
1399 1 0.688057 0.872874 0.315012
1401 1 0.750684 0.873186 0.248586
1402 1 0.812249 0.935329 0.252517
1403 1 0.810654 0.873347 0.313424
1404 1 0.747693 0.933655 0.317543
151 1 0.693535 1.00286 0.181472
155 1 0.817308 1.00105 0.187242
159 1 0.936724 0.996831 0.185906
1279 1 0.936656 0.876128 0.1901
1280 1 0.874768 0.941398 0.190539
1405 1 0.876146 0.877968 0.252388
1406 1 0.937415 0.94005 0.248422
1407 1 0.938412 0.877958 0.312077
1408 1 0.875117 0.934986 0.31353
1410 1 0.0641747 0.564733 0.374003
1416 1 0.126506 0.56155 0.435427
1443 1 0.0614363 0.62927 0.4358
1445 1 0.126201 0.62925 0.376645
1573 1 0.125626 0.622583 0.502111
1413 1 0.121558 0.496923 0.370911
1538 1 0.0564975 0.564036 0.505712
1414 1 0.185748 0.56432 0.372709
1418 1 0.315117 0.56571 0.374724
1420 1 0.249583 0.564044 0.435717
1447 1 0.186629 0.625602 0.43509
1449 1 0.247448 0.62901 0.378385
1451 1 0.308507 0.631398 0.439178
1419 1 0.311397 0.498411 0.437599
1417 1 0.251134 0.501798 0.375492
1577 1 0.24666 0.627078 0.49871
1422 1 0.434259 0.557784 0.380499
1424 1 0.373555 0.567412 0.438724
1453 1 0.37501 0.625264 0.373987
1455 1 0.442317 0.623719 0.43861
1421 1 0.374756 0.499314 0.380669
1423 1 0.430332 0.502344 0.442804
1553 1 0.496178 0.498815 0.501788
1549 1 0.36803 0.501668 0.501125
1429 1 0.623617 0.497788 0.378152
1557 1 0.62676 0.495816 0.501108
1425 1 0.499107 0.49616 0.378162
1457 1 0.498278 0.624243 0.376976
1428 1 0.497711 0.559892 0.439178
1427 1 0.56122 0.498751 0.440359
1426 1 0.557338 0.558968 0.377978
1432 1 0.620071 0.563231 0.440543
1459 1 0.556383 0.62326 0.438599
1461 1 0.622671 0.6208 0.379734
1554 1 0.558464 0.562737 0.507838
1431 1 0.680089 0.500732 0.438312
1430 1 0.683521 0.557488 0.378686
1434 1 0.81082 0.563171 0.380443
1436 1 0.743071 0.55734 0.440463
1463 1 0.687979 0.623142 0.439708
1465 1 0.748095 0.6246 0.380643
1467 1 0.81252 0.626725 0.438836
1435 1 0.812829 0.497358 0.438226
1433 1 0.749444 0.499563 0.377736
1437 1 0.875479 0.494135 0.374861
1441 1 0.995792 0.623959 0.376805
1412 1 1.00095 0.565663 0.439692
1438 1 0.934337 0.560083 0.373079
1440 1 0.875077 0.562411 0.438819
1469 1 0.870267 0.622715 0.375535
1471 1 0.9355 0.62373 0.441426
1597 1 0.875643 0.624009 0.501152
1566 1 0.937521 0.565089 0.504738
1537 1 1.00382 0.501609 0.498253
1473 1 -0.00619461 0.754559 0.371352
1476 1 -0.00397614 0.812296 0.434117
1442 1 0.0582536 0.688596 0.370227
1448 1 0.126499 0.688408 0.438319
1474 1 0.059468 0.814814 0.373878
1475 1 0.0619154 0.750083 0.436074
1477 1 0.118945 0.749504 0.372724
1480 1 0.12285 0.805906 0.433623
1446 1 0.189975 0.690416 0.372607
1450 1 0.313086 0.68815 0.374147
1452 1 0.245921 0.688012 0.440097
1478 1 0.190505 0.810368 0.37399
1479 1 0.183071 0.747905 0.436048
1481 1 0.249385 0.749013 0.374098
1482 1 0.311515 0.811656 0.371747
1483 1 0.310436 0.751968 0.432529
1484 1 0.24713 0.808932 0.437977
1609 1 0.244752 0.749662 0.500221
1574 1 0.187661 0.686533 0.497155
1606 1 0.183024 0.81284 0.499789
1454 1 0.442846 0.689612 0.376505
1456 1 0.374127 0.690183 0.432328
1485 1 0.379436 0.751932 0.370296
1486 1 0.4429 0.813333 0.372198
1487 1 0.436971 0.749751 0.435958
1488 1 0.374063 0.811825 0.435307
1489 1 0.499152 0.74889 0.379294
1492 1 0.498022 0.811512 0.439269
1460 1 0.501022 0.686115 0.43967
1458 1 0.565146 0.683508 0.375613
1464 1 0.627653 0.687441 0.437237
1496 1 0.626719 0.814426 0.436862
1490 1 0.561202 0.811122 0.376684
1493 1 0.6253 0.743437 0.377797
1491 1 0.564408 0.7497 0.440348
1617 1 0.501445 0.747417 0.502688
1500 1 0.752568 0.808641 0.442834
1499 1 0.814936 0.752082 0.443952
1498 1 0.815053 0.812495 0.377536
1497 1 0.750784 0.749238 0.38266
1495 1 0.68897 0.745283 0.439656
1494 1 0.687813 0.809673 0.379552
1468 1 0.745574 0.684893 0.441786
1466 1 0.809446 0.686456 0.3764
1462 1 0.68959 0.686201 0.373584
1594 1 0.814758 0.687607 0.5043
1590 1 0.683329 0.686954 0.501654
1625 1 0.748085 0.74865 0.501112
1504 1 0.878845 0.810692 0.439936
1501 1 0.873327 0.74636 0.377976
1503 1 0.936579 0.747807 0.432266
1444 1 1.00188 0.689783 0.436363
1502 1 0.936646 0.815979 0.377402
1472 1 0.874596 0.686514 0.44033
1470 1 0.936123 0.679515 0.379573
1629 1 0.877919 0.748123 0.50281
1506 1 0.0658654 0.934715 0.373587
1507 1 0.0631884 0.871075 0.434261
1509 1 0.124938 0.871393 0.372582
1512 1 0.123673 0.934944 0.432658
387 1 0.0665805 1.00235 0.440953
1637 1 0.125062 0.86902 0.498622
1634 1 0.0710543 0.938214 0.498159
393 1 0.246858 0.999932 0.371625
1641 1 0.248039 0.866275 0.502671
1510 1 0.18544 0.935332 0.376218
1511 1 0.185108 0.869847 0.436907
1513 1 0.250949 0.872207 0.369409
1514 1 0.309403 0.938462 0.372122
1515 1 0.305877 0.872325 0.432681
1516 1 0.246514 0.93263 0.434093
395 1 0.306981 0.998494 0.436855
1638 1 0.185038 0.934054 0.506132
1521 1 0.500812 0.878231 0.377029
1517 1 0.372289 0.870661 0.375988
1520 1 0.37224 0.934428 0.433646
1646 1 0.433728 0.934285 0.493883
1519 1 0.435435 0.87005 0.4384
1518 1 0.437145 0.936786 0.379273
529 1 0.494204 0.996024 0.502725
397 1 0.373407 0.999908 0.376325
403 1 0.566703 1.00095 0.435952
1650 1 0.560731 0.936178 0.502615
1528 1 0.629256 0.939129 0.439872
1525 1 0.623058 0.879896 0.375935
1522 1 0.564032 0.938361 0.376589
1523 1 0.565444 0.879738 0.438724
1524 1 0.503355 0.939234 0.440281
405 1 0.627859 0.997894 0.377269
407 1 0.69177 0.99932 0.439155
1658 1 0.818728 0.938189 0.504595
1532 1 0.75398 0.938707 0.437365
1531 1 0.814018 0.869048 0.440745
1530 1 0.81292 0.938949 0.376968
1529 1 0.750211 0.872893 0.38034
1527 1 0.687987 0.875465 0.441874
1526 1 0.688264 0.936704 0.376051
1657 1 0.755495 0.877298 0.501249
537 1 0.757376 0.996634 0.502728
541 1 0.879431 0.999241 0.503091
415 1 0.942008 1.00045 0.442302
1536 1 0.873869 0.935242 0.441613
1535 1 0.936572 0.87829 0.438
1534 1 0.935015 0.942427 0.376684
1533 1 0.86987 0.875968 0.378148
1508 1 1.00764 0.943997 0.435063
1505 1 0.996451 0.878071 0.376661
1544 1 0.124524 0.562483 0.563977
1571 1 0.061179 0.627578 0.564701
1666 1 0.0628287 0.562357 0.618668
1701 1 0.123321 0.628244 0.620727
1697 1 -0.00126051 0.621228 0.62465
1539 1 0.0603 0.498487 0.563234
1669 1 0.124708 0.50205 0.627101
1548 1 0.250145 0.560231 0.563937
1575 1 0.186103 0.627467 0.565866
1579 1 0.30786 0.624927 0.558432
1670 1 0.185618 0.561742 0.627872
1674 1 0.309941 0.556634 0.625236
1705 1 0.250907 0.625249 0.625263
1543 1 0.188541 0.502474 0.560757
1673 1 0.245299 0.500464 0.625115
1547 1 0.312589 0.500949 0.566292
1542 1 0.188142 0.560299 0.501645
1546 1 0.313437 0.563242 0.497568
1552 1 0.370439 0.568978 0.561841
1583 1 0.439577 0.629533 0.562795
1678 1 0.434002 0.561802 0.62997
1709 1 0.368959 0.622704 0.626259
1713 1 0.496261 0.626932 0.626803
1551 1 0.429499 0.500095 0.563179
1556 1 0.497539 0.561924 0.564685
1677 1 0.375098 0.497659 0.627025
1585 1 0.499113 0.627315 0.50674
1550 1 0.438186 0.564367 0.504
1581 1 0.376087 0.626915 0.497156
1681 1 0.495753 0.497684 0.624654
1560 1 0.623391 0.561194 0.566953
1587 1 0.563096 0.623325 0.568458
1682 1 0.557756 0.558006 0.625088
1717 1 0.623364 0.619615 0.629666
1685 1 0.623371 0.499576 0.622319
1589 1 0.621425 0.624511 0.499964
1555 1 0.560026 0.494259 0.566081
1564 1 0.747934 0.567503 0.565124
1591 1 0.683009 0.624397 0.561896
1595 1 0.811467 0.621841 0.562609
1686 1 0.688238 0.563034 0.627005
1690 1 0.811533 0.559776 0.626564
1721 1 0.750466 0.624413 0.62435
1562 1 0.812185 0.558205 0.501216
1593 1 0.748323 0.623224 0.499774
1558 1 0.685322 0.56166 0.502068
1540 1 0.995048 0.561459 0.563828
1568 1 0.876424 0.564236 0.56552
1599 1 0.938355 0.629376 0.563188
1694 1 0.937124 0.565113 0.625899
1725 1 0.872388 0.621682 0.625688
1569 1 1.00297 0.629973 0.500908
1567 1 0.933617 0.497906 0.562592
1693 1 0.871256 0.497752 0.621549
1601 1 -0.00356291 0.74771 0.50102
1576 1 0.122425 0.689942 0.556973
1603 1 0.0585795 0.75099 0.564244
1608 1 0.120893 0.80867 0.565161
1698 1 0.0626639 0.68745 0.624637
1730 1 0.0620889 0.807268 0.624265
1733 1 0.126401 0.742838 0.625785
1604 1 -0.00226387 0.809232 0.563954
1570 1 0.0609069 0.68785 0.499525
1605 1 0.121402 0.750044 0.497842
1602 1 0.0617084 0.809366 0.498299
1580 1 0.250777 0.687858 0.558887
1607 1 0.184729 0.744942 0.559591
1611 1 0.31039 0.751551 0.556327
1612 1 0.249021 0.810989 0.571827
1702 1 0.192778 0.688004 0.624233
1706 1 0.31142 0.688182 0.62396
1734 1 0.184861 0.804933 0.624427
1737 1 0.251164 0.745681 0.626486
1738 1 0.313658 0.810213 0.623457
1578 1 0.314455 0.689034 0.498474
1610 1 0.313161 0.809162 0.493641
1584 1 0.373915 0.687666 0.561796
1615 1 0.437661 0.751099 0.563161
1616 1 0.370991 0.810182 0.556291
1710 1 0.436748 0.69032 0.629642
1741 1 0.37741 0.753095 0.625054
1742 1 0.44092 0.813449 0.623692
1588 1 0.504321 0.68866 0.567942
1613 1 0.376169 0.747625 0.494242
1582 1 0.4408 0.685837 0.502014
1614 1 0.437149 0.808857 0.498522
1618 1 0.55972 0.813108 0.501594
1745 1 0.505082 0.749006 0.625066
1620 1 0.499568 0.81343 0.563472
1592 1 0.623918 0.685093 0.560993
1619 1 0.566366 0.750492 0.562069
1624 1 0.626268 0.813782 0.561074
1714 1 0.564096 0.688449 0.626178
1746 1 0.56673 0.819043 0.631331
1749 1 0.627355 0.756135 0.627834
1621 1 0.625736 0.752152 0.501327
1586 1 0.563272 0.689305 0.501814
1596 1 0.746181 0.688308 0.563771
1623 1 0.687335 0.74965 0.565558
1627 1 0.820694 0.747036 0.564761
1628 1 0.747949 0.815142 0.558635
1718 1 0.686686 0.688203 0.626122
1722 1 0.815501 0.68698 0.62679
1750 1 0.689738 0.817169 0.626261
1753 1 0.748563 0.755003 0.623837
1754 1 0.814962 0.813207 0.625546
1626 1 0.815762 0.810508 0.503358
1622 1 0.686153 0.812586 0.499846
1729 1 0.993941 0.747909 0.625967
1572 1 0.99881 0.69219 0.564959
1600 1 0.874342 0.685073 0.565398
1631 1 0.935637 0.749851 0.565181
1632 1 0.879639 0.81288 0.563616
1726 1 0.934986 0.685485 0.625387
1757 1 0.875678 0.7518 0.625184
1758 1 0.937928 0.814838 0.625598
1598 1 0.942708 0.688172 0.499893
1630 1 0.943158 0.814651 0.496709
1635 1 0.0608511 0.87054 0.563456
1640 1 0.122917 0.9307 0.563731
1762 1 0.0668024 0.937279 0.626538
1765 1 0.127304 0.8698 0.626523
1761 1 0.00258007 0.874968 0.627346
645 1 0.128821 0.996504 0.620939
523 1 0.318159 0.996145 0.561036
1639 1 0.184821 0.872388 0.565154
1643 1 0.311556 0.872408 0.561031
1644 1 0.250646 0.932896 0.562544
1766 1 0.186803 0.936363 0.620539
1769 1 0.249092 0.875964 0.62645
1770 1 0.314742 0.932485 0.62147
1642 1 0.308733 0.933528 0.496366
1647 1 0.42923 0.872211 0.559265
1648 1 0.371787 0.934805 0.556144
1773 1 0.373232 0.871779 0.621985
1774 1 0.437518 0.936236 0.622543
1652 1 0.500554 0.932442 0.557107
527 1 0.430387 0.99786 0.56099
525 1 0.370184 0.996667 0.493275
653 1 0.374384 0.999065 0.625035
1645 1 0.369937 0.872841 0.498953
1777 1 0.500098 0.878116 0.624883
1651 1 0.566455 0.871093 0.562198
1656 1 0.624308 0.938834 0.559477
1778 1 0.560616 0.936604 0.626737
1781 1 0.626061 0.877739 0.624296
1649 1 0.497956 0.873245 0.498338
1653 1 0.627436 0.872425 0.499799
657 1 0.497205 0.99412 0.623217
531 1 0.561967 1.00269 0.559711
661 1 0.620509 0.998069 0.625347
1655 1 0.688077 0.871986 0.562727
1659 1 0.8183 0.874652 0.563065
1660 1 0.753383 0.934856 0.562743
1782 1 0.69493 0.933909 0.625152
1785 1 0.755081 0.8743 0.626065
1786 1 0.813853 0.936788 0.626904
535 1 0.687891 0.996237 0.564877
1654 1 0.695161 0.939746 0.501213
1636 1 1.00171 0.933059 0.558829
1663 1 0.938523 0.874144 0.557832
1664 1 0.879535 0.934149 0.563173
1789 1 0.872768 0.875763 0.627863
1790 1 0.934702 0.93445 0.621862
543 1 0.944419 0.998439 0.561337
1662 1 0.942259 0.936698 0.498191
1661 1 0.875969 0.870498 0.500245
1633 1 1.00491 0.875904 0.495794
1672 1 0.125414 0.566719 0.685108
1699 1 0.058264 0.626924 0.687101
1794 1 0.0620067 0.565565 0.748834
1800 1 0.120161 0.56166 0.815018
1827 1 0.0601022 0.62877 0.813692
1829 1 0.126383 0.624762 0.751001
1668 1 -6.4041e-05 0.56153 0.68834
1799 1 0.187228 0.504835 0.81527
1803 1 0.308631 0.500148 0.817843
1676 1 0.252247 0.560225 0.687096
1703 1 0.190265 0.623157 0.688448
1707 1 0.309737 0.627313 0.689596
1798 1 0.188231 0.568689 0.747006
1802 1 0.313452 0.564547 0.753563
1804 1 0.251511 0.563337 0.813029
1831 1 0.190054 0.622102 0.810302
1833 1 0.253438 0.623627 0.753112
1835 1 0.312897 0.628575 0.815208
1801 1 0.248107 0.507609 0.755241
1671 1 0.186436 0.502394 0.690109
1675 1 0.311033 0.501169 0.692619
1679 1 0.434072 0.497629 0.691472
1680 1 0.37279 0.56046 0.690631
1711 1 0.431907 0.624015 0.691396
1806 1 0.4377 0.567741 0.753441
1808 1 0.366095 0.558932 0.818474
1837 1 0.375444 0.624484 0.754422
1839 1 0.434977 0.617924 0.816247
1812 1 0.502242 0.56128 0.812217
1805 1 0.370907 0.498507 0.752145
1683 1 0.552946 0.495324 0.685343
1811 1 0.560984 0.497936 0.818557
1684 1 0.493136 0.562237 0.688917
1841 1 0.498636 0.627075 0.751991
1688 1 0.624892 0.553491 0.687961
1715 1 0.563403 0.616582 0.691858
1810 1 0.565375 0.559437 0.753138
1816 1 0.622014 0.559465 0.815327
1843 1 0.568705 0.621539 0.807283
1845 1 0.627494 0.623202 0.748694
1815 1 0.686574 0.501393 0.816232
1819 1 0.813707 0.497814 0.811836
1692 1 0.7484 0.559034 0.689767
1719 1 0.685871 0.619526 0.686733
1723 1 0.808862 0.626249 0.684081
1814 1 0.68564 0.55721 0.751825
1818 1 0.815504 0.559676 0.748844
1820 1 0.752914 0.556075 0.812548
1847 1 0.690361 0.623874 0.814478
1849 1 0.747536 0.620757 0.752315
1851 1 0.812772 0.62585 0.811118
1691 1 0.815647 0.496841 0.687458
1695 1 0.932111 0.500655 0.685057
1796 1 0.99957 0.564847 0.81245
1825 1 0.998923 0.621497 0.749584
1696 1 0.87093 0.562358 0.686429
1727 1 0.936263 0.626924 0.685916
1822 1 0.937033 0.563437 0.749719
1824 1 0.873225 0.559657 0.813541
1853 1 0.868475 0.627767 0.747954
1855 1 0.935425 0.622717 0.811837
1704 1 0.126985 0.686932 0.688515
1731 1 0.0612601 0.751891 0.688371
1736 1 0.124857 0.806953 0.686108
1826 1 0.0581737 0.687537 0.749027
1832 1 0.122228 0.685856 0.811899
1858 1 0.0681777 0.811371 0.753421
1859 1 0.0628431 0.744528 0.808874
1861 1 0.126981 0.747103 0.750464
1864 1 0.123551 0.810968 0.813681
1732 1 0.00306482 0.810867 0.687605
1700 1 0.00226074 0.688038 0.683685
1708 1 0.249368 0.684915 0.693167
1735 1 0.187192 0.749003 0.686671
1739 1 0.318882 0.749655 0.689205
1740 1 0.249996 0.813697 0.684686
1830 1 0.190743 0.688676 0.751379
1834 1 0.314531 0.687523 0.751147
1836 1 0.250734 0.685108 0.808672
1862 1 0.182254 0.812759 0.74813
1863 1 0.191491 0.756427 0.811511
1865 1 0.25455 0.753018 0.748547
1866 1 0.314911 0.815223 0.75074
1867 1 0.317251 0.747834 0.810458
1868 1 0.253224 0.808476 0.814695
1712 1 0.375967 0.685622 0.691277
1743 1 0.435934 0.74471 0.689508
1744 1 0.375549 0.813046 0.686018
1838 1 0.440758 0.685232 0.753718
1840 1 0.375379 0.688961 0.81212
1869 1 0.379351 0.752855 0.74987
1870 1 0.438396 0.816416 0.75215
1871 1 0.440228 0.74846 0.813319
1872 1 0.378485 0.814975 0.813003
1873 1 0.502173 0.749684 0.748753
1748 1 0.500887 0.808138 0.687989
1844 1 0.501698 0.685657 0.809481
1716 1 0.503041 0.689659 0.688266
1876 1 0.502642 0.810739 0.812283
1720 1 0.629583 0.68769 0.685444
1747 1 0.562716 0.750157 0.686561
1752 1 0.623987 0.812686 0.693199
1842 1 0.567593 0.686831 0.748175
1848 1 0.626427 0.688187 0.815236
1874 1 0.558643 0.812821 0.746579
1875 1 0.563598 0.746422 0.813216
1877 1 0.62953 0.747174 0.749161
1880 1 0.631375 0.811156 0.814105
1724 1 0.751223 0.69078 0.682666
1751 1 0.692428 0.752172 0.688673
1755 1 0.816022 0.752012 0.687181
1756 1 0.757411 0.819201 0.690825
1846 1 0.692172 0.682701 0.748385
1850 1 0.81001 0.692579 0.749866
1852 1 0.75193 0.688318 0.810995
1878 1 0.69231 0.813464 0.749974
1879 1 0.695278 0.74929 0.813492
1881 1 0.756361 0.755238 0.750374
1882 1 0.813911 0.817161 0.75449
1883 1 0.812188 0.749252 0.817013
1884 1 0.750002 0.812684 0.817078
1857 1 0.998535 0.753842 0.750882
1828 1 0.997246 0.688514 0.811542
1860 1 1.00433 0.813399 0.812402
1728 1 0.871673 0.687184 0.687757
1759 1 0.938954 0.752037 0.687483
1760 1 0.878676 0.812033 0.685198
1854 1 0.936642 0.68843 0.751685
1856 1 0.869728 0.683523 0.811881
1885 1 0.872565 0.752907 0.75299
1886 1 0.93217 0.812987 0.750273
1887 1 0.936734 0.751879 0.8128
1888 1 0.873782 0.817911 0.815157
1763 1 0.0634754 0.873096 0.689934
1768 1 0.126128 0.935142 0.686593
1890 1 0.0615661 0.938733 0.752748
1891 1 0.0628705 0.878071 0.813484
1893 1 0.12571 0.879928 0.751897
1896 1 0.122647 0.940369 0.814012
771 1 0.0640242 1.00329 0.811814
1764 1 0.00153237 0.936104 0.689031
777 1 0.250803 0.997945 0.745164
779 1 0.310196 0.995742 0.813584
647 1 0.191881 0.998584 0.685313
1767 1 0.186535 0.874486 0.687496
1771 1 0.308086 0.874928 0.683563
1772 1 0.249846 0.938299 0.680024
1894 1 0.19241 0.93513 0.749332
1895 1 0.190246 0.871935 0.810216
1897 1 0.252733 0.87421 0.747372
1898 1 0.314804 0.938392 0.744833
1899 1 0.316135 0.873148 0.809754
1900 1 0.252934 0.93145 0.812404
651 1 0.315925 0.996234 0.683195
1775 1 0.440503 0.871218 0.685869
1776 1 0.375446 0.933261 0.684714
1901 1 0.372169 0.875698 0.744168
1902 1 0.435512 0.936232 0.749713
1903 1 0.434594 0.87598 0.807364
1904 1 0.376244 0.936804 0.810766
1908 1 0.496622 0.937632 0.811638
1905 1 0.497599 0.878435 0.750473
655 1 0.433352 0.998217 0.687624
1780 1 0.493519 0.939251 0.685935
1779 1 0.560755 0.875175 0.694238
1784 1 0.619927 0.940583 0.68886
1906 1 0.565673 0.93516 0.752223
1907 1 0.562079 0.873938 0.813633
1909 1 0.628127 0.876462 0.758769
1912 1 0.624702 0.939143 0.816439
1783 1 0.684087 0.875871 0.691242
1787 1 0.819145 0.874472 0.691701
1788 1 0.751237 0.933498 0.68897
1910 1 0.686775 0.940168 0.749141
1911 1 0.690164 0.873899 0.814666
1913 1 0.74643 0.87973 0.7526
1914 1 0.808065 0.940499 0.756084
1915 1 0.814389 0.878354 0.815609
1916 1 0.749132 0.940824 0.81531
667 1 0.811889 0.995273 0.687182
791 1 0.684502 0.999535 0.809857
769 1 1.00059 0.999189 0.745818
1889 1 0.999786 0.87512 0.749377
1892 1 1.00441 0.943218 0.817862
1791 1 0.936646 0.880576 0.688428
1792 1 0.873141 0.940118 0.687684
1917 1 0.878104 0.875306 0.749845
1918 1 0.939727 0.937536 0.74846
1919 1 0.939735 0.877385 0.812917
1920 1 0.8766 0.936852 0.810707
1923 1 0.0633495 0.503677 0.939915
1922 1 0.0599287 0.564179 0.876072
1928 1 0.126741 0.567812 0.940234
1955 1 0.0633589 0.625109 0.937556
1957 1 0.123307 0.627205 0.876878
1927 1 0.188535 0.505484 0.939025
1926 1 0.182095 0.564471 0.87371
1930 1 0.309125 0.56501 0.879403
1932 1 0.247363 0.571303 0.93719
1959 1 0.185511 0.629417 0.933679
1961 1 0.251096 0.625221 0.871914
1963 1 0.309811 0.628545 0.938358
1030 1 0.18793 0.565792 1.00453
1933 1 0.377833 0.494261 0.870393
1935 1 0.435474 0.498305 0.939205
1934 1 0.43605 0.55903 0.875368
1936 1 0.372537 0.55664 0.935198
1965 1 0.369475 0.628633 0.875689
1967 1 0.433724 0.623423 0.934047
1969 1 0.500737 0.616551 0.87399
1042 1 0.566961 0.558028 1.00301
1940 1 0.495626 0.560628 0.93763
1941 1 0.625178 0.499572 0.874604
1938 1 0.561189 0.558706 0.87404
1944 1 0.624176 0.560582 0.937922
1971 1 0.561259 0.622749 0.937224
1973 1 0.626821 0.624939 0.872585
1942 1 0.685624 0.561079 0.87987
1946 1 0.807129 0.559679 0.876253
1948 1 0.751058 0.556264 0.940649
1975 1 0.686998 0.623296 0.938292
1977 1 0.747638 0.621219 0.878902
1979 1 0.806956 0.622285 0.941278
1945 1 0.743002 0.495271 0.879332
1081 1 0.749966 0.625658 1.0019
1046 1 0.689562 0.559343 1.00098
1050 1 0.812801 0.561627 1.00152
1924 1 0.996677 0.559541 0.937998
1953 1 0.997667 0.625732 0.875893
1950 1 0.934436 0.559234 0.875256
1952 1 0.872677 0.562896 0.939738
1981 1 0.869262 0.621569 0.873755
1983 1 0.937036 0.623244 0.936298
1057 1 0.996425 0.62181 0.999928
1921 1 0.998241 0.49883 0.877393
1954 1 0.0586619 0.68932 0.870948
1960 1 0.117864 0.689362 0.939007
1986 1 0.0701323 0.809099 0.87406
1987 1 0.0603019 0.751761 0.935018
1989 1 0.126684 0.749417 0.871437
1992 1 0.123376 0.810947 0.938745
1985 1 0.000939397 0.750742 0.868884
1093 1 0.123846 0.751985 1.00175
1090 1 0.0607636 0.809347 0.996121
1958 1 0.187403 0.688077 0.870398
1962 1 0.313486 0.690779 0.880687
1964 1 0.247218 0.691444 0.939216
1990 1 0.188541 0.811855 0.874341
1991 1 0.183079 0.747802 0.934126
1993 1 0.24836 0.750909 0.874693
1994 1 0.316187 0.811389 0.87234
1995 1 0.309687 0.752355 0.938501
1996 1 0.249786 0.812341 0.93434
1094 1 0.187458 0.813501 0.996986
2000 1 0.373959 0.816132 0.938666
1999 1 0.436403 0.751745 0.932444
1998 1 0.44106 0.810829 0.872891
1997 1 0.376942 0.752898 0.875413
1968 1 0.376256 0.69047 0.937309
1966 1 0.435289 0.685124 0.872899
1101 1 0.376437 0.750975 1.00131
1074 1 0.56183 0.688031 0.996198
1972 1 0.500245 0.685126 0.932948
2004 1 0.498266 0.812878 0.931437
2001 1 0.500315 0.744759 0.87407
1970 1 0.56378 0.686728 0.877633
1976 1 0.625364 0.679056 0.940061
2002 1 0.56569 0.812964 0.870812
2003 1 0.561851 0.752273 0.934877
2005 1 0.626566 0.747206 0.877461
2008 1 0.626765 0.812545 0.939579
1114 1 0.808938 0.811937 1.00153
1974 1 0.691752 0.682837 0.876483
1978 1 0.811713 0.684149 0.87544
1980 1 0.749371 0.686744 0.943569
2006 1 0.692306 0.81568 0.878345
2007 1 0.69138 0.749799 0.935059
2009 1 0.751831 0.749987 0.876396
2010 1 0.808056 0.810632 0.877262
2011 1 0.810014 0.748098 0.935376
2012 1 0.752138 0.810662 0.937152
1113 1 0.752808 0.747767 1.00135
1110 1 0.686928 0.808005 0.999537
1956 1 1.00609 0.682918 0.939527
1988 1 0.998402 0.814881 0.936054
1982 1 0.9345 0.687492 0.874456
1984 1 0.869938 0.687063 0.933339
2013 1 0.873195 0.75101 0.871049
2014 1 0.938203 0.809375 0.877483
2015 1 0.939885 0.747096 0.935924
2016 1 0.877963 0.813328 0.940464
1117 1 0.879088 0.751847 0.996214
2020 1 0.00215452 0.936914 0.93769
5 1 0.125122 1.00205 0.997712
899 1 0.0635391 1.00373 0.938987
901 1 0.131437 0.999504 0.876298
2019 1 0.0647554 0.870937 0.934593
2021 1 0.133851 0.872557 0.874392
2024 1 0.129256 0.93245 0.937777
2018 1 0.0700927 0.941713 0.877855
1122 1 0.0648709 0.939073 0.995932
1125 1 0.122508 0.871721 0.999142
905 1 0.250533 1.00178 0.873774
1130 1 0.318087 0.935974 0.997245
2026 1 0.311749 0.936023 0.878714
2022 1 0.191495 0.937707 0.87791
903 1 0.190814 1.00522 0.937744
2023 1 0.190493 0.870996 0.935393
2028 1 0.254407 0.939059 0.935402
2025 1 0.25252 0.872252 0.87449
2027 1 0.31068 0.872693 0.93788
907 1 0.309791 1.0015 0.934257
9 1 0.250664 1.00031 1.00372
1126 1 0.189113 0.938022 0.998448
1129 1 0.254514 0.874812 0.995883
913 1 0.498061 0.995527 0.879054
2036 1 0.50179 0.936192 0.938402
2032 1 0.380404 0.933519 0.93302
2030 1 0.435701 0.930867 0.873449
2029 1 0.373059 0.875028 0.874388
2031 1 0.440424 0.87259 0.933144
1137 1 0.500973 0.876227 0.994641
2034 1 0.563828 0.936567 0.872947
2035 1 0.565414 0.871238 0.934781
2037 1 0.630469 0.874591 0.875654
2040 1 0.627285 0.937501 0.936952
1141 1 0.629388 0.874193 1.00075
2033 1 0.50219 0.873881 0.875545
923 1 0.809137 1.00209 0.93219
919 1 0.689749 0.999196 0.937977
2044 1 0.754489 0.935105 0.935801
2043 1 0.813442 0.874166 0.936028
2042 1 0.816462 0.936885 0.869612
2041 1 0.751283 0.876397 0.874396
2039 1 0.685696 0.878331 0.93855
2038 1 0.690963 0.938651 0.876527
1142 1 0.688149 0.935746 1.00096
1145 1 0.751152 0.875457 0.997257
2048 1 0.87782 0.942339 0.932018
2047 1 0.940031 0.87244 0.936311
2046 1 0.938897 0.939705 0.873698
2045 1 0.877396 0.87731 0.878955
2017 1 1.0061 0.877027 0.87358
897 1 0.999829 1.00481 0.874971
29 1 0.873759 1.00254 0.996457
1150 1 0.93797 0.936958 0.992786
| [
"[email protected]"
] | |
14ffbcfc6415f4bf0327420712bd59e4b3c160cd | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/coverage-big-1541.py | 2810ab83f51df7539b7286b2cc88ea87391ad9a7 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,348 | py | count:int = 0
count2:int = 0
count3:int = 0
count4:int = 0
count5:int = 0
def foo(s: str) -> int:
return len(s)
def foo2(s: str, s2: str) -> int:
return len(s)
def foo3(s: str, s2: str, s3: str) -> int:
return len(s)
def foo4(s: str, s2: str, s3: str, s4: str) -> int:
return len(s)
def foo5(s: str, s2: str, s3: str, s4: str, s5: str) -> int:
return len(s)
class bar(object):
p: bool = True
def baz(self:"bar", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar2(object):
p: bool = True
p2: bool = True
def baz(self:"bar2", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar2", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar3(object):
p: bool = True
p2: bool = True
p3: bool = True
def baz(self:"bar3", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar3", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar3", xx: [int], xx2: [$ID], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar4(object):
p: bool = True
p2: bool = True
p3: bool = True
p4: bool = True
def baz(self:"bar4", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar4", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar4", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz4(self:"bar4", xx: [int], xx2: [int], xx3: [int], xx4: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar5(object):
p: bool = True
p2: bool = True
p3: bool = True
p4: bool = True
p5: bool = True
def baz(self:"bar5", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar5", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar5", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz4(self:"bar5", xx: [int], xx2: [int], xx3: [int], xx4: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz5(self:"bar5", xx: [int], xx2: [int], xx3: [int], xx4: [int], xx5: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
x5:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
y5:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
def qux5(y: int, y2: int, y3: int, y4: int, y5: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
nonlocal x5
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
print(bar().baz([1,2]))
| [
"[email protected]"
] | |
99d407e913ff467ad42df621f0b9be1d4a91cb8f | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flashblade/FB_2_2/models/object_store_user.py | 511b6638e591a3c91097395f049ed12ee8273e6b | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 4,119 | py | # coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.2, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_2 import models
class ObjectStoreUser(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'id': 'str',
'access_keys': 'list[FixedReference]',
'account': 'FixedReference',
'created': 'int'
}
attribute_map = {
'name': 'name',
'id': 'id',
'access_keys': 'access_keys',
'account': 'account',
'created': 'created'
}
required_args = {
}
def __init__(
self,
name=None, # type: str
id=None, # type: str
access_keys=None, # type: List[models.FixedReference]
account=None, # type: models.FixedReference
created=None, # type: int
):
"""
Keyword args:
name (str): Name of the object (e.g., a file system or snapshot).
id (str): A non-modifiable, globally unique ID chosen by the system.
access_keys (list[FixedReference]): References of the user's access keys.
account (FixedReference): Reference of the associated account.
created (int): Creation timestamp of the object.
"""
if name is not None:
self.name = name
if id is not None:
self.id = id
if access_keys is not None:
self.access_keys = access_keys
if account is not None:
self.account = account
if created is not None:
self.created = created
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ObjectStoreUser`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ObjectStoreUser, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ObjectStoreUser):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
d22c1861f13411c3d3e664e9a8bfcc28701f4a32 | add7f191d38538f0ecca582264ee733d8f5d0e99 | /tests/strategies/test_strategy.py | 74b0acb55b9c834b446c7e4c5445e7dcddad0538 | [] | no_license | netfily/simian-wallet | 2feab3080adf3cf59882aba9931c70bcb38c2c38 | f4b2eb1688411cddd0b6bd703a43f7c7123cc3bf | refs/heads/master | 2023-02-11T03:19:07.382213 | 2021-01-12T20:26:29 | 2021-01-12T20:26:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,615 | py | def test_strategy_from_owner(wallet, do_nothing, empty_strategy, owner):
data = do_nothing.nothing.encode_input(0)
tx = wallet.execute(do_nothing, empty_strategy, data, {'from': owner})
strategy_value = int(repr(tx.return_value[1]), 16)
assert strategy_value == 666
def test_user_cant_use_strategy(wallet, do_nothing, empty_strategy, owner, user):
sig = do_nothing.signatures['nothing']
wallet.permit(user, do_nothing, sig, {'from': owner})
data = do_nothing.nothing.encode_input(0)
tx = wallet.execute(do_nothing, empty_strategy, data, {'from': user})
strategy_value = tx.return_value[1]
assert strategy_value == "0x"
def test_permit_user_strategy(wallet, do_nothing, empty_strategy, owner, user, strategy_sig):
sig = do_nothing.signatures['nothing']
wallet.permit(user, do_nothing, sig, {'from': owner})
wallet.permit(user, empty_strategy, strategy_sig, {'from': owner})
data = do_nothing.nothing.encode_input(0)
tx = wallet.execute(do_nothing, empty_strategy, data, {'from': user})
strategy_value = int(repr(tx.return_value[1]), 16)
assert strategy_value == 666
def test_permit_user_all_strategies(
wallet, empty_strategy, empty_strategy2, owner, user, all_addr, strategy_sig
):
assert wallet.canCall(user, empty_strategy, strategy_sig) is False
assert wallet.canCall(user, empty_strategy2, strategy_sig) is False
wallet.permit(user, all_addr, strategy_sig, {'from': owner})
assert wallet.canCall(user, empty_strategy, strategy_sig) is True
assert wallet.canCall(user, empty_strategy2, strategy_sig) is True
| [
"[email protected]"
] | |
fe0bf6676519e72dbc2a8a07ce3b89292806afe0 | 2352bc07e12b0256913559cf3485a360569ccd5e | /practice/python-tutorial-master/26cv/data_enhancement/resize_demo.py | e9fc3f21cbde46712f831ed1aa864e567fd482db | [
"Apache-2.0"
] | permissive | Dis-count/Python_practice | 166ae563be7f6d99a12bdc0e221c550ef37bd4fd | fa0cae54e853157a1d2d78bf90408c68ce617c1a | refs/heads/master | 2022-12-12T03:38:24.091529 | 2021-12-22T09:51:59 | 2021-12-22T09:51:59 | 224,171,833 | 2 | 1 | null | 2022-12-08T05:29:38 | 2019-11-26T11:07:00 | Jupyter Notebook | UTF-8 | Python | false | false | 955 | py | # -*- coding: utf-8 -*-
"""
@author:XuMing([email protected])
@description:
"""
import cv2
import os
from PIL import Image
def display_cv(image_path):
img = cv2.imread(image_path)
height, width = img.shape[:2]
print(height, width)
# 缩小图像
size = (200, 200)
print(size)
shrink = cv2.resize(img, size, interpolation=cv2.INTER_AREA)
# 放大图像
fx = 1.6
fy = 1.2
enlarge = cv2.resize(img, (0, 0), fx=fx, fy=fy, interpolation=cv2.INTER_CUBIC)
# 显示
cv2.imshow("src", img)
cv2.imshow("shrink", shrink)
cv2.imshow("enlarge", enlarge)
cv2.waitKey(0)
def display_pil(image_path):
img = Image.open(image_path)
# 缩小图像
size = (200, 200)
print(size)
new_img = img.resize((200, 200), Image.BILINEAR)
new_img.show()
new_img.save('data/resize_a.png')
if __name__ == '__main__':
# display_cv('flower.png')
display_pil('data/flower.png')
| [
"[email protected]"
] | |
6d375108ec41d11c6aa91b2ff0650d73350efe98 | 633944f913050debf0764c2a29cf3e88f912670e | /v8/depot_tools/bootstrap-3.8.0b1.chromium.1_bin/python3/lib/python3.8/distutils/command/build_ext.py | 4e19404fea4b5556898da5500e5fc862a1e3693f | [
"BSD-3-Clause",
"bzip2-1.0.6",
"SunPro",
"Apache-2.0"
] | permissive | bopopescu/V8-lgtm | 0474c2ff39baf754f556ef57619ceae93e7320fd | da307e2f7abfca5fa0e860a809de6cd07fd1b72b | refs/heads/master | 2022-02-16T19:10:54.008520 | 2019-09-25T07:51:13 | 2019-09-25T07:51:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 81 | py | ../../../../../.cipd/pkgs/2/_current/lib/python3.8/distutils/command/build_ext.py | [
"[email protected]"
] | |
982882bc61f19ad91edbfded17ce4f4cf73ba97e | d2e029233e08ea2b7f806728fb6fdb4313992d1d | /Object Orianted Programming In Python/@property.py | f4c9aceb926378d9cdc9dc96b285b0f1c7d11e2f | [] | no_license | pvr30/Python-Tutorial | f0ccc6c6af2346afc656e5f1f98bae69a58bda6d | 3c4b968d0e0efbf454fbf9a9f98cd630a288b2d9 | refs/heads/master | 2023-06-02T10:08:50.891627 | 2021-06-21T16:36:11 | 2021-06-21T16:36:11 | 378,997,069 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 987 | py | # @property
class Student:
def __init__(self, name, school):
self.name = name
self.school = school
self.marks = []
@property
def average(self):
return sum(self.marks) / len(self.marks)
vishal = Student("Vishal Parmar","Axar")
print(vishal.name)
vishal.marks.append(90)
vishal.marks.append(80)
vishal.marks.append(55)
# using @property we can make a method into value or property.
# Instead of vishal.average() we can write vishal.average.
print(vishal.average)
"""
You can do that with any method that doesn’t take any arguments.
But remember, this method only returns a value calculated from the object’s properties.
If you have a method that does things
(e.g. save to a database or interact with other things),
it can be better to stay with the brackets.
Normally:
* Brackets: this method does things, performs actions.
* No brackets: this is a value
(or a value calculated from existing values, in the case of `@property`).
""" | [
"[email protected]"
] | |
0a9fa79b93775e749b18b7dc2076725cce8b1a3a | addf291a1a4bad5d823e62422cdda4b73faa7a14 | /src/util/convenient_funcs.py | 4bbf2b61f62e541d7a99c01d77f3799da01d0cc8 | [] | no_license | zhengxxn/AutoEssayScoring_NN | a8504f0ff975568746a8435760bea12bef410b09 | faa030fe8fd8cf0612c01d4d7e94e25e76404aeb | refs/heads/master | 2022-12-19T11:50:40.651591 | 2020-09-09T06:41:33 | 2020-09-09T06:41:33 | 294,025,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,153 | py | import pandas as pd
# from sklearn.model_selection import train_test_split
import re
from pathlib import Path
from collections import Counter
import pickle
import numpy as np
def tensor2str(prediction, vocab):
str = []
for i in range(0, prediction.size(0)):
ch = vocab.itos[prediction[i]]
if ch == '<eos>':
break
else:
str.append(ch)
return " ".join(str)
def convert_xml_to_plaintext(src_file, trg_file):
with open(src_file, 'r') as f:
with open(trg_file, 'w') as wf:
newlines = []
lines = f.readlines()
for (i, line) in enumerate(lines):
newline = re.sub('<seg id=\"[0-9]+\"> | </seg>', '', line, 2)
if '<' not in newline:
newlines.append(newline)
wf.writelines(newlines)
def save_to_tsv(file_path_1, file_path_2, tsv_file_path, domain=None):
with open(file_path_1, encoding='utf-8') as f:
src = f.read().split('\n')[:-1]
with open(file_path_2, encoding='utf-8') as f:
trg = f.read().split('\n')[:-1]
if domain is not None:
raw_data = {'src': [line for line in src], 'trg': [line for line in trg], 'domain': [domain for line in src]}
else:
raw_data = {'src': [line for line in src], 'trg': [line for line in trg]}
df = pd.DataFrame(raw_data)
df.to_csv(tsv_file_path, index=False, sep='\t')
def new_save_to_tsv(config, tsv_file_path):
raw_data = {}
for key in config.keys():
file_name = config[key]
with open(file_name, encoding='utf-8') as f:
lines = f.read().split('\n')[:-1]
value = [line for line in lines]
raw_data[key] = value
df = pd.DataFrame(raw_data)
df.to_csv(tsv_file_path, index=False, sep='\t')
def get_path_prefix(path):
return re.sub('/[^/]+$', '', path, 1)
def create_path(path):
path = Path(path)
path.mkdir(parents=True, exist_ok=True)
def de_bpe(str):
return re.sub(r'@@ |@@ ?$', '', str)
def generate_vocab_counter(file):
c = Counter()
with open(file, 'r', encoding='utf-8') as f:
lines = f.readlines()
for line in lines:
word, freq = line.split(' ')
c[word] = int(freq)
return c
def print_model(model):
print(model)
for name, param in model.named_parameters():
print(name, param.size())
def combine_sentence_to_segment(sents: list, max_segment_len=400):
segments = []
segment = ''
for sent in sents:
if segment == '':
segment = sent
elif len(segment.split(' ')) + len(sent.split(' ')) > max_segment_len:
segments.append(segment)
segment = ''
segment = segment + sent
else:
segment = segment + ' ' + sent
segments.append(segment)
return segments
def get_feature_from_ids(ids, file_name):
with open('/home/user_data55/zhengx/project/data/auto_score/train.feature', 'rb') as train_f:
train_features = {}
train_features = pickle.load(train_f)
with open('/home/user_data55/zhengx/project/data/auto_score/dev.feature', 'rb') as dev_f:
dev_featues = {}
dev_featues = pickle.load(dev_f)
features = []
for id in ids:
if id in train_features.keys():
features.append(train_features[id])
else:
features.append(dev_featues[id])
return features
def get_feature_from_test_ids(ids, filename):
with open('/home/user_data55/zhengx/project/data/auto_score/test.feature', 'rb') as test_f:
test_features = {}
test_features = pickle.load(test_f)
features = []
for id in ids:
if id in test_features.keys():
features.append(test_features[id])
else:
features.append(test_features[id])
return features
def more_uniform(values):
mean = np.average(values)
for i, value in enumerate(values):
gap = value - mean
if 0 < gap < 1:
value += 0.5
if 0 > gap > 1:
value -= 0.5
values[i] = value
return values | [
"[email protected]"
] | |
16ef4452b53fc204e60807d73a35d1d2f2f6929b | 3d9b939c50c8b68bc7110f35a9a5100c7ca163e3 | /ColGrid.py | 1356826faf275842d8532c7bb90fb8a892eea720 | [
"MIT"
] | permissive | mbuckaway/CrossMgr | bbab53e9e858b72b99a9fb239787f8932e50539d | 4c64e429eb3215fda1b685c5e684c56f5d0c02cf | refs/heads/master | 2020-09-16T00:42:25.100361 | 2020-01-15T02:24:49 | 2020-01-15T02:24:49 | 223,599,605 | 1 | 0 | MIT | 2020-01-08T01:45:22 | 2019-11-23T14:12:15 | Python | UTF-8 | Python | false | false | 8,941 | py | import wx
import six
import wx.grid as Grid
import copy
#---------------------------------------------------------------------------
class ColTable( Grid.GridTableBase ):
"""
A custom wx.Grid Table using user supplied data
"""
def __init__( self ):
"""
data is a list, indexed by col, containing a list of row values
"""
self.attrs = {} # Set of unique cell attributes.
self.rightAlign = False
self.leftAlignCols = set()
self.colRenderer = {}
# The base class must be initialized *first*
Grid.GridTableBase.__init__(self)
# Column-oriented data.
# textColour and backgroundColour are store as a dict indexed by (row, col).
# Colour is a wx.Colour.
self.data = []
self.colnames = []
self.textColour = {}
self.backgroundColour = {}
def __del__( self ):
pass
def SetRightAlign( self, ra = True ):
self.rightAlign = ra
self.attrs = {}
def SetLeftAlignCols( self, col, la = True ):
if la:
self.leftAlignCols.add( col )
else:
try:
self.leftAlignCols.remove( col )
except KeyError:
pass
self.attrs = {}
def SetColRenderer( self, col, renderer ):
self.colRenderer[col] = renderer
self.attrs = {}
def _adjustDimension( self, grid, current, new, isCol ):
if isCol:
delmsg, addmsg = Grid.GRIDTABLE_NOTIFY_COLS_DELETED, Grid.GRIDTABLE_NOTIFY_COLS_APPENDED
else:
delmsg, addmsg = Grid.GRIDTABLE_NOTIFY_ROWS_DELETED, Grid.GRIDTABLE_NOTIFY_ROWS_APPENDED
if new < current:
msg = Grid.GridTableMessage( self, delmsg, new, current-new )
grid.ProcessTableMessage( msg )
elif new > current:
msg = Grid.GridTableMessage( self, addmsg, new-current )
grid.ProcessTableMessage( msg )
def Set( self, grid, data = None, colnames = None, textColour = None, backgroundColour = None ):
if colnames is not None:
self._adjustDimension( grid, len(self.colnames), len(colnames), True )
self.colnames = list(colnames)
if data is not None:
current = max( len(c) for c in self.data ) if self.data else 0
new = max( len(c) for c in data ) if data else 0
self._adjustDimension( grid, current, new, False )
self.data = copy.copy(data)
if textColour is not None:
self.textColour = dict(textColour)
if backgroundColour is not None:
self.backgroundColour = dict(backgroundColour)
self.attrs = {}
def SetColumn( self, grid, iCol, colData ):
self.data[iCol] = copy.copy(colData)
self.UpdateValues( grid )
def SortByColumn( self, iCol, descending = False ):
if not self.data:
return
colLen = len(self.data[0])
if not all(len(colData) == colLen for colData in self.data):
raise ValueError( 'Cannot sort with different column lengths' )
allNumeric = True
for e in self.data[iCol]:
try:
i = float(e)
except:
allNumeric = False
break
if allNumeric:
elementIndex = [(float(e), i) for i, e in enumerate(self.data[iCol])]
else:
elementIndex = [(e, i) for i, e in enumerate(self.data[iCol])]
elementIndex.sort()
for c in range(len(self.data)):
self.data[c] = [self.data[c][i] for e, i in elementIndex]
if descending:
self.data[c].reverse()
def GetData( self ):
return self.data
def GetColNames( self ):
return self.colnames
def isEmpty( self ):
return True if not self.data else False
def GetNumberCols(self):
try:
return len(self.colnames)
except TypeError:
return 0
def GetNumberRows(self):
try:
return max( len(c) for c in self.data )
except (TypeError, ValueError):
return 0
def GetColLabelValue(self, col):
try:
return self.colnames[col]
except (TypeError, IndexError):
return ''
def GetRowLabelValue(self, row):
return six.text_type(row+1)
def IsEmptyCell( self, row, col ):
try:
v = self.data[col][row]
return v is None or v == ''
except (TypeError, IndexError):
return True
def GetRawValue(self, row, col):
return '' if self.IsEmptyCell(row, col) else self.data[col][row]
def GetValue(self, row, col):
return six.text_type(self.GetRawValue(row, col))
def SetValue(self, row, col, value):
# Nothing to do - everthing is read-only.
pass
def DeleteCols( self, pos = 0, numCols = 1, updateLabels = True, grid = None ):
oldCols = len(self.colnames) if self.colnames else 0
if self.data:
del self.data[pos:pos+numCols]
if self.colnames:
del self.colnames[pos:pos+numCols]
posMax = pos + numCols
for a in ['textColour', 'backgroundColour']:
if not getattr(self, a, None):
continue
colD = {}
for (r, c), colour in six.iteritems(getattr(self, a)):
if c < pos:
colD[(r, c)] = colour
elif posMax <= c:
colD[(r, c-numCols)] = colour
setattr( self, a, colD )
newCols = len(self.colnames) if self.colnames else 0
self._adjustDimension( grid, oldCols, newCols, True )
self.attrs = {}
def GetAttr(self, row, col, someExtraParameter ):
hCellAlign = None
if col in self.leftAlignCols:
hCellAlign = wx.ALIGN_LEFT
elif self.rightAlign:
hCellAlign = wx.ALIGN_RIGHT
rc = (row, col)
textColour = self.textColour.get(rc, None)
if textColour:
textColour = textColour.GetAsString(wx.C2S_HTML_SYNTAX)
backgroundColour = self.backgroundColour.get(rc, None)
if backgroundColour:
backgroundColour = backgroundColour.GetAsString(wx.C2S_HTML_SYNTAX)
key = (textColour, backgroundColour, col, hCellAlign)
try:
attr = self.attrs[key]
except KeyError:
# Create an attribute for the cache.
attr = Grid.GridCellAttr()
attr.SetReadOnly( True ) # All cells read-only.
if rc in self.textColour:
attr.SetTextColour( self.textColour[rc] )
if rc in self.backgroundColour:
attr.SetBackgroundColour( self.backgroundColour[rc] )
if hCellAlign is not None:
attr.SetAlignment( hAlign=hCellAlign, vAlign=wx.ALIGN_CENTRE )
renderer = self.colRenderer.get(col, None)
if renderer:
attr.SetRenderer( renderer.Clone() )
self.attrs[key] = attr
attr.IncRef()
return attr
def SetAttr( self, row, col, attr ): pass
def SetRowAttr( self, row, attr ): pass
def SetColAttr( self, col, attr ) : pass
def UpdateAttrRows( self, pos, numRows ) : pass
def UpdateAttrCols( self, pos, numCols ) : pass
def ResetView(self, grid):
"""
(Grid) -> Reset the grid view. Call this to redraw the grid.
"""
self.attrs = {}
grid.AdjustScrollbars()
grid.ForceRefresh()
def UpdateValues(self, grid):
"""Update all displayed values"""
# This sends an event to the grid table to update all of the values
msg = Grid.GridTableMessage(self, Grid.GRIDTABLE_REQUEST_VIEW_GET_VALUES)
grid.ProcessTableMessage(msg)
# --------------------------------------------------------------------
# Sample Grid
class ColGrid(Grid.Grid):
def __init__(self, parent, data = None, colnames = None, textColour = None, backgroundColour = None, style = 0 ):
"""parent, data, colnames, plugins=None
Initialize a grid using the data defined in data and colnames
"""
# The base class must be initialized *first*
Grid.Grid.__init__(self, parent, style = style)
self._table = ColTable()
self.SetTable( self._table )
self.Set( data, colnames, textColour, backgroundColour )
self.zoomLevel = 1.0
def Reset( self ):
"""reset the view based on the data in the table. Call this when rows are added or destroyed"""
self._table.ResetView(self)
def Set( self, data = None, colnames = None, textColour = None, backgroundColour = None ):
self._table.Set( self, data, colnames, textColour, backgroundColour )
def SetColumn( self, iCol, colData ):
self._table.SetColumn( self, iCol, colData )
def SetColRenderer( self, col, renderer ):
self._table.SetColRenderer( col, renderer )
def GetData( self ):
return self._table.GetData()
def GetColNames( self ):
return self._table.GetColNames()
def DeleteCols( self, pos = 0, numCols = 1, updateLabels = True ):
self._table.DeleteCols(pos, numCols, updateLabels, self)
self.Reset()
def Zoom( self, zoomIn = True ):
factor = 2 if zoomIn else 0.5
if not 1.0/4.0 <= self.zoomLevel * factor <= 4.0:
return
self.zoomLevel *= factor
font = self.GetDefaultCellFont()
font.SetPointSize( int(font.GetPointSize() * factor) )
self.SetDefaultCellFont( font )
font = self.GetLabelFont()
font.SetPointSize( int(font.GetPointSize() * factor) )
self.SetLabelFont( font )
self.SetColLabelSize( int(self.GetColLabelSize() * factor) )
self.AutoSize()
self.Reset()
def SetRightAlign( self, ra = True ):
self._table.SetRightAlign( ra )
self.Reset()
def SetLeftAlignCols( self, cols ):
self._table.leftAlignCols = set()
for c in cols:
self._table.leftAlignCols.add( c )
self.Reset()
def SortByColumn( self, iCol, descending = False ):
self._table.SortByColumn( iCol, descending )
self.Refresh()
def clearGrid( self ):
self.Set( data = [], colnames = [], textColour = {}, backgroundColour = {} )
self.Reset()
| [
"[email protected]"
] | |
516a664d5e69b3e6ccc8b980b298b28226e04d80 | aa9297175621fcd499cad5a0373aaad15f33cde8 | /udemy-py/practica-clases/mundo_pc/raton.py | 526bdec66619a3f912fd0fdfad898366d48d57f1 | [] | no_license | eflipe/python-exercises | a64e88affe8f9deb34e8aa29a23a68c25e7ba08a | b7a429f57a5e4c5dda7c77db5721ca66a401d0a3 | refs/heads/master | 2023-04-26T19:19:28.674350 | 2022-07-19T20:53:09 | 2022-07-19T20:53:09 | 192,589,885 | 0 | 0 | null | 2023-04-21T21:23:14 | 2019-06-18T18:06:14 | HTML | UTF-8 | Python | false | false | 703 | py | from dispositivo_de_entrada import DispositivoEntrada
class Raton(DispositivoEntrada):
contador_producto = 0
def __init__(self, marca, tipo_entrada):
Raton.contador_producto += 1
self._id_producto = Raton.contador_producto
super().__init__(marca, tipo_entrada)
def __str__(self):
return f'\tID raton: {self._id_producto} \
\n\t\t\t\tMarca: {self._marca} \
\n\t\t\t\tTipo de entrada: {self._tipo_entrada}\n'
if __name__ == '__main__': # es como una prueba
obj_raton1 = Raton(marca='HP', tipo_entrada="USB")
obj_raton2 = Raton(marca='Acer', tipo_entrada="Bluetooth")
print(obj_raton1)
print(obj_raton2)
| [
"[email protected]"
] | |
aee1247903d18ea9a88a475d42322f840a9bcfbd | 09cead98874a64d55b9e5c84b369d3523c890442 | /py200622_python2/day12_py200730/datatype_2_decimal.py | ab59ad921dea94213327cd36e00e7fdbfcac34c5 | [] | no_license | edu-athensoft/stem1401python_student | f12b404d749286036a090e941c0268381ce558f8 | baad017d4cef2994855b008a756758d7b5e119ec | refs/heads/master | 2021-08-29T15:01:45.875136 | 2021-08-24T23:03:51 | 2021-08-24T23:03:51 | 210,029,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,324 | py | """
numbers - decimal
Due to this reason, most of the decimal fractions cannot be accurately stored in our computer.
"""
a = 1.1 + 2.2
# print(a)
b = 3.3
# print(b)
if a == b:
print("a==b")
else:
print("a!=b")
a = 1.1 + 0.1
b = 1.3 - 0.1
if a == b:
print("a==b")
else:
print("a!=b")
print()
# question
# 1.1 + 2.2 == 3.3
print(1.1 + 2.2 == 3.3)
print(float(1.1)+float(2.2)==float(3.3))
print()
print(1.1)
print(2.2)
print(3.3)
print(1.1+2.2)
print()
print(1.1+2.2 > 3.3)
print(1.1 + 2.2 == 3.3)
print(1.1+2.2 < 3.3)
print()
# other example
print("=== example 2 ===")
f11 = 1.0
f12 = 0.1
f2 = 0.9
print("f11={}, f12={} and f2={}".format(f11,f12,f2))
print("{}-{} == {} ?".format(f11, f12, (f11-f12 == f2)))
print("f11-f12 > f2 ?",f11-f12 > f2)
print("f11-f12 < f2 ?",f11-f12 < f2)
print()
# decimal
print("=== example 3 ===")
f11 = 1.0
f12 = 0.3
f2 = 0.7
print("f11={}, f12={} and f2={}".format(f11,f12,f2))
print("f11-f12 == f2 ?",f11-f12 == f2)
print("f11-f12 > f2 ?",f11-f12 > f2)
print("f11-f12 < f2 ?",f11-f12 < f2)
print()
# faction
print("=== example 4 ===")
f11 = 1.0
f12 = 0.33
f2 = 0.67
print("f11={}, f12={} and f2={}".format(f11,f12,f2))
print("f11-f12 == f2 ?",f11-f12 == f2)
print("f11-f12 > f2 ?",f11-f12 > f2)
print("f11-f12 < f2 ?",f11-f12 < f2)
print()
print(1.0-0.33)
| [
"[email protected]"
] | |
44189b3313b77752f57281d61124938352559d1a | 0377a4135f9e8940809a62186b229295bed9e9bc | /201. 数字范围按位与/solution.py | cae779cbe0ac247a557b52a9f0b2723b028eedab | [] | no_license | neko-niko/leetcode | 80f54a8ffa799cb026a7f60296de26d59a0826b0 | 311f19641d890772cc78d5aad9d4162dedfc20a0 | refs/heads/master | 2021-07-10T10:24:57.284226 | 2020-09-13T11:28:45 | 2020-09-13T11:28:45 | 198,792,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | class Solution:
def rangeBitwiseAnd(self, m: int, n: int) -> int:
while m < n:
n = n & (n-1)
return n
| [
"[email protected]"
] | |
c23bf011047c5d486f451c689ad1ad9bb5b648e7 | 060ce17de7b5cdbd5f7064d1fceb4ded17a23649 | /fn_mcafee_esm/fn_mcafee_esm/util/config.py | 18fc57d089d0070ed18677b9df04b68fd225e8a0 | [
"MIT"
] | permissive | ibmresilient/resilient-community-apps | 74bbd770062a22801cef585d4415c29cbb4d34e2 | 6878c78b94eeca407998a41ce8db2cc00f2b6758 | refs/heads/main | 2023-06-26T20:47:15.059297 | 2023-06-23T16:33:58 | 2023-06-23T16:33:58 | 101,410,006 | 81 | 107 | MIT | 2023-03-29T20:40:31 | 2017-08-25T14:07:33 | Python | UTF-8 | Python | false | false | 1,020 | py | # -*- coding: utf-8 -*-
# (c) Copyright IBM Corp. 2010, 2018. All Rights Reserved.
"""Generate a default configuration-file section for fn_mcafee_esm"""
from __future__ import print_function
def config_section_data():
"""Produce the default configuration section for app.config,
when called by `resilient-circuits config [-c|-u]`
"""
config_data = u"""[fn_mcafee_esm]
# url example: https://127.0.0.1
esm_url=<your_esm_url>
esm_username=<your_esm_username>
esm_password=<your_esm_password>
# If your ESM server uses a cert which is not automatically trusted by your machine, set verify_cert=False.
verify_cert=[True|False]
## ESM Polling settings
# How often polling should happen. Value is in seconds. To disable polling, set this to zero.
esm_polling_interval=0
#incident_template=<location_of_template_file> # If not set uses default template.
# Optional settings for access to McAfee ESM via a proxy.
#http_proxy=http://proxy:80
#https_proxy=http://proxy:80
"""
return config_data
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.