blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f925dec040d95821cef191779da6306070d8ebd1 | 74951991a9e1dbe92d4999da9060409a9492bdc3 | /minimum-number-of-operations-to-move-all-balls-to-each-box/minimum-number-of-operations-to-move-all-balls-to-each-box.py | 8b91d314c3fcf1107e5a03f5f58cacc5ca30453a | [] | no_license | drpuig/Leetcode-1 | fd800ee2f13c7ce03fa57c8a1d10b3aa6976d7c0 | 4ee104f3069c380e1756dd65f6ff6004554e6c0e | refs/heads/main | 2023-07-15T08:57:32.971194 | 2021-08-21T08:29:24 | 2021-08-21T08:29:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 756 | py | class Solution:
def minOperations(self, boxes: str) -> List[int]:
# similar to 238. Product of Array Except Self and 42. Trapping Rain Water
# example for leftCost:
# input 11010
# leftCount 01223
# leftCost 01358
ans = [0]*len(boxes)
leftCount, leftCost, rightCount, rightCost, n = 0, 0, 0, 0, len(boxes)
for i in range(1, n):
if boxes[i-1] == '1': leftCount += 1
leftCost += leftCount # each step move to right, the cost increases by # of 1s on the left
ans[i] = leftCost
for i in range(n-2, -1, -1):
if boxes[i+1] == '1': rightCount += 1
rightCost += rightCount
ans[i] += rightCost
return ans | [
"[email protected]"
] | |
f9adf60909c5eb13e5acb5e65c6af8986fc22867 | 28bf7793cde66074ac6cbe2c76df92bd4803dab9 | /answers/Anuraj Pariya/Day 3/question 2.py | e1eb01c954fac70baaec7abeb51bcb72b21b949c | [
"MIT"
] | permissive | Codechef-SRM-NCR-Chapter/30-DaysOfCode-March-2021 | 2dee33e057ba22092795a6ecc6686a9d31607c9d | 66c7d85025481074c93cfda7853b145c88a30da4 | refs/heads/main | 2023-05-29T10:33:31.795738 | 2021-06-10T14:57:30 | 2021-06-10T14:57:30 | 348,153,476 | 22 | 135 | MIT | 2021-06-10T14:57:31 | 2021-03-15T23:37:26 | Java | UTF-8 | Python | false | false | 356 | py | def uniqueCharacters(num):
for i in range(len(num)):
for j in range(i + 1,len(num)):
if(num[i] == num[j]):
return False
return True
num = input('enter no.')
if(uniqueCharacters(num)):
print("The String ", num," has all unique characters")
else:
print("The String ", num, " has duplicate characters")
| [
"[email protected]"
] | |
1e73fcf13b62a58f6664ed0b3991ee7998376d37 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02842/s205208232.py | ff1472f4186e61c2bc8dcd3915d423c7c49f881b | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | import math
n = int(input())
x = int(n/1.08)
if math.floor(x*1.08) == n:
print (x)
elif math.floor((x-1)*1.08) == n:
print (x-1)
elif math.floor((x+1)*1.08) == n:
print (x+1)
else:
print (":(")
| [
"[email protected]"
] | |
50e7908500919233f20330f73cca0b3ef23c2b71 | 6b045457b0ea97f950eaef8373f417617be0bdd6 | /edexOsgi/com.raytheon.uf.edex.plugin.goesr/utility/common_static/base/derivedParameters/functions/satRgbRecipeDiff.py | 45781926363996d2c0eb651e84109c0ed370a9dd | [] | no_license | Unidata/awips2-goesr | 76228dda89e2d867318f2d6bffb10a897afe4083 | 61cc76f83643fea62dbfe59ba26d04e1ec9bc0ac | refs/heads/unidata_18.2.1 | 2023-09-04T06:36:35.949636 | 2023-05-02T18:59:52 | 2023-05-02T18:59:52 | 72,454,342 | 2 | 5 | null | 2023-05-02T18:59:53 | 2016-10-31T16:15:04 | Java | UTF-8 | Python | false | false | 5,460 | py | '''
<!--
This is an absolute override file, indicating that a higher priority
version of the file will completely replace a lower priority version
of the file.
-->
<!-- TOWRdocs Header
Derived Parameter Python file for implementing channel difference RGB recipes.
-->
<!-- TOWRdocs Description
This method implements channel difference RGB recipes outlined in EUMETSAT's "Best
Practices for RGB Displays of Multispectral Imagery" document
(http://oiswww.eumetsat.int/~idds/html/doc/best_practices.pdf). Two arrays
of satellite data are passed to this method along with various recipe options
outlined below. The channel difference is calculated as physicalElement1 -
physicalElement2. An array of display values is returned that corresponds to a
single color component of an RGB product.
-->
<!-- TOWRdocs Status
This is a new derived parameters file.
It is used to implement channel difference RGB recipes.
-->
<!-- TOWRdocs POC
Kevin M. McGrath
-->
CONTACTS:
This code was co-developed via the AWIPS II Experimental Products Development
Team (EPDT) by personnel from NASA SPoRT, CIRA, and the NWS:
Jason Burks/CIRA/MDL ([email protected])
Nancy Eustice/NWS ([email protected])
Kevin McGrath/NASA SPoRT/Jacobs ([email protected])
Deb Molenar/NOAA/NESDIS/RAMMB ([email protected])
Matt Smith/NASA SPoRT/UAH ([email protected])
Nate Smith/NWS ([email protected])
INPUT PARAMETERS:
@param physicalElement1:
Array of satellite data.
@param physicalElement2:
Array of satellite data.
@param calNumber:
This integer corresponds to methods in satRgbRecipeCalibration.py,
which converts satellite data values to physical values(e.g.,
IR counts to brightness temperatures). For most data, calNumber is 0
(no calibration required) because these values are already calibrated
when passed to this method. The units of the data passed to this method
is controlled by the the "unit=" option in the derived parameter definition
.xml files. As an example with GOES-R IR data, we need the data to be calibrated
to brightness temperature in units of Kelvin. The following line in a derived
parameter definition .xml file would accomplish this:
<Field abbreviation="CH-08-6.19um" unit="K"/>
Here's an example requesting GOES-13 IR data (unit="IRPixel") in units of Kelvin:
<Field abbreviation="Imager 11 micron IR" unit="IRPixel"/>
In the case of visible imagery, we want the data to be in albedo from 0 to 100.
The following line in a derived parameter definition .xml file would accomplish this
for GOES-R visible data (unit="%"):
<Field abbreviation="CH-06-2.25um" unit="%"/>
If no "unit=" option is included in the "Field abbreviation" derived parameter
definition, raw values will be passed to this method.
@param minCalibratedValue:
The calibrated satellite data values are clipped to this minimum
value.
@param maxCalibratedValue:
The calibrated satellite data values are clipped to this maximum
value.
@param gamma:
Gamma value used for stretching.
@param invert:
Invert the final display values (255 - value)? (0 = no, 1 = yes)
RETURNS:
@return: Display values
@rtype: numpy array (int8)
DEPENDENCIES:
* Numpy
* The satRgbRecipeCalibration module found in satRgbRecipeCalibration.py:
This is only required and imported if we need to calibrate the satellite
data (calNumber != 0).
MODIFICATIONS:
'''
import numpy as np
def execute(physicalElement1, physicalElement2, calNum, minCalibratedValue, maxCalibratedValue, gamma, invert):
#########################
# Create calibrated arrays.
if (int(calNum) == 0):
# No need to calibrate the data.
a1_calibrated = physicalElement1
a2_calibrated = physicalElement2
else:
# Import the calibration module.
import satRgbRecipeCalibration as calibration
# Define calibration method to call.
calTypeString = 'calType_' + str(int(calNum))
try:
calMethodToCall = getattr(calibration, calTypeString)
except AttributeError:
return(0)
# Calibrate the data by calling calType_<calNum> method.
a1_calibrated = calMethodToCall(physicalElement1)
a2_calibrated = calMethodToCall(physicalElement2)
#########################
# Calculate the difference between a1_calibrated and a2_calibrated.
diff_calibrated = a1_calibrated - a2_calibrated
#########################
# Clip the calibrated values.
diff_calibrated_clipped = np.clip(diff_calibrated, minCalibratedValue, maxCalibratedValue)
#########################
# Generate display values by implement EUMETSAT recipe.
dispValue_float = 255.*(np.power( (diff_calibrated_clipped - minCalibratedValue)/(maxCalibratedValue - minCalibratedValue), (1./gamma)))
#########################
# Invert, if selected.
if (invert):
dispValue_float = 255. - dispValue_float
#########################
# Convert from float to int8.
dispValue_byte = np.array(dispValue_float, dtype=np.int8)
#########################
# CAVE interprets byte values of 0 as "No Data". Force values of 0 to 1.
dispValue_byte[dispValue_byte == 0] = 1
#########################
# For any pixels where physicalElement1 or physicalElement2 is zero (no data), set
# the displayValue_byte to 0 (no data) so that the pixel is transparent.
dispValue_byte[(physicalElement1 == 0) | (physicalElement2 == 0)] = 0
return dispValue_byte
| [
"[email protected]"
] | |
e3c648df17473aa2c6ec699a3c3a228f309b458b | cc096d321ab5c6abf54fdcea67f10e77cd02dfde | /flex-backend/pypy/rpython/rctypes/achar_p.py | 679fb44c613f75c7b294d3527c0d94da6c01241f | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | limweb/flex-pypy | 310bd8fcd6a9ddc01c0b14a92f0298d0ae3aabd2 | 05aeeda183babdac80f9c10fca41e3fb1a272ccb | refs/heads/master | 2021-01-19T22:10:56.654997 | 2008-03-19T23:51:59 | 2008-03-19T23:51:59 | 32,463,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,100 | py | from pypy.rpython.rctypes.implementation import CTypesCallEntry, CTypesObjEntry
from pypy.annotation.model import SomeString
from ctypes import c_char_p
class CallEntry(CTypesCallEntry):
"Annotation and rtyping of calls to c_char_p."
_about_ = c_char_p
def specialize_call(self, hop):
string_repr = hop.rtyper.type_system.rstr.string_repr
r_char_p = hop.r_result
hop.exception_cannot_occur()
v_result = r_char_p.allocate_instance(hop.llops)
if len(hop.args_s):
v_value, = hop.inputargs(string_repr)
r_char_p.setstring(hop.llops, v_result, v_value)
return v_result
class ObjEntry(CTypesObjEntry):
"Annotation and rtyping of c_char_p instances."
_type_ = c_char_p
s_return_trick = SomeString(can_be_None=True)
def get_field_annotation(self, s_char_p, fieldname):
assert fieldname == 'value'
return self.s_return_trick
def get_repr(self, rtyper, s_char_p):
from pypy.rpython.rctypes import rchar_p
return rchar_p.CCharPRepr(rtyper, s_char_p, rchar_p.CCHARP)
| [
"lucio.torre@dbd81ab4-9648-0410-a770-9b81666e587d"
] | lucio.torre@dbd81ab4-9648-0410-a770-9b81666e587d |
6fbe95249a65618956af92f25e82a132dbcd9f39 | 2940f5416082dadd9c646cd9a46d2d0a99883efb | /venv/Lib/site-packages/scipy/_lib/tests/test_linear_assignment.py | 5ba729dcac445604773fab01e74e2df735b11eb7 | [
"BSD-3-Clause",
"Python-2.0",
"Qhull",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"BSD-2-Clause",
"GPL-3.0-or-later",
"BSD-3-Clause-Open-MPI",
"GCC-exception-3.1",
"GPL-3.0-only"
] | permissive | tpike3/SugarScape | 4813e4fefbfb0a701f5913d74f045fd0eaed1942 | 39efe4007fba2b12b75c72f7795827a1f74d640b | refs/heads/main | 2021-06-20T03:55:46.288721 | 2021-01-20T17:06:35 | 2021-01-20T17:06:35 | 168,583,530 | 11 | 3 | MIT | 2021-01-20T17:19:53 | 2019-01-31T19:29:40 | Jupyter Notebook | UTF-8 | Python | false | false | 3,420 | py | from itertools import product
from numpy.testing import assert_array_equal
import numpy as np
import pytest
from scipy.optimize import linear_sum_assignment
from scipy.sparse import csr_matrix, random
from scipy.sparse.csgraph import min_weight_full_bipartite_matching
# Tests that combine scipy.optimize.linear_sum_assignment and
# scipy.sparse.csgraph.min_weight_full_bipartite_matching
@pytest.mark.parametrize('solver_type,sign,test_case', product(
[(linear_sum_assignment, np.array),
(min_weight_full_bipartite_matching, csr_matrix)],
[-1, 1],
[
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
[150, 400, 300]),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
[150, 2, 300]),
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
[10, 1, 7]),
# Square
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
[10, 1, 4]),
# Rectangular variant
([[10, float("inf"), float("inf")],
[float("inf"), float("inf"), 1],
[float("inf"), 7, float("inf")]],
[10, 1, 7]),
])
)
def test_two_methods_give_expected_result_on_small_inputs(
solver_type, sign, test_case
):
solver, array_type = solver_type
cost_matrix, expected_cost = test_case
maximize = sign == -1
cost_matrix = sign * array_type(cost_matrix)
expected_cost = sign * np.array(expected_cost)
row_ind, col_ind = solver(cost_matrix, maximize=maximize)
assert_array_equal(row_ind, np.sort(row_ind))
assert_array_equal(expected_cost,
np.array(cost_matrix[row_ind, col_ind]).flatten())
cost_matrix = cost_matrix.T
row_ind, col_ind = solver(cost_matrix, maximize=maximize)
assert_array_equal(row_ind, np.sort(row_ind))
assert_array_equal(np.sort(expected_cost),
np.sort(np.array(
cost_matrix[row_ind, col_ind])).flatten())
def test_two_methods_give_same_result_on_many_sparse_inputs():
# As opposed to the test above, here we do not spell out the expected
# output; only assert that the two methods give the same result.
# Concretely, the below tests 100 cases of size 100x100, out of which
# 36 are infeasible.
np.random.seed(1234)
for _ in range(100):
lsa_raises = False
mwfbm_raises = False
sparse = random(100, 100, density=0.06,
data_rvs=lambda size: np.random.randint(1, 100, size))
# In csgraph, zeros correspond to missing edges, so we explicitly
# replace those with infinities
dense = np.full(sparse.shape, np.inf)
dense[sparse.row, sparse.col] = sparse.data
sparse = sparse.tocsr()
try:
row_ind, col_ind = linear_sum_assignment(dense)
lsa_cost = dense[row_ind, col_ind].sum()
except ValueError:
lsa_raises = True
try:
row_ind, col_ind = min_weight_full_bipartite_matching(sparse)
mwfbm_cost = sparse[row_ind, col_ind].sum()
except ValueError:
mwfbm_raises = True
# Ensure that if one method raises, so does the other one.
assert lsa_raises == mwfbm_raises
if not lsa_raises:
assert lsa_cost == mwfbm_cost
| [
"[email protected]"
] | |
5d9ea69a2479861b2b8aeffb32b1e02a1968905c | 4a81e33fe6d214f2efaeb97b03b5b05fae12b0d8 | /demos/great-expectations/venv/lib/python3.8/site-packages/prometheus_client/metrics_core.py | 77b3e446711a2d90ba41f6fb060b6e95520439fb | [] | no_license | franciscojavierarceo/Python | 29aaea28642dde151255c5b4a813158e975a073d | 02715ca6f19fd3c76cefa12de92deeae4ddf9684 | refs/heads/main | 2023-08-27T14:23:04.376095 | 2023-08-27T10:30:37 | 2023-08-27T10:30:37 | 33,146,755 | 7 | 9 | null | 2023-02-16T06:40:35 | 2015-03-30T20:38:00 | Jupyter Notebook | UTF-8 | Python | false | false | 15,538 | py | import re
from typing import Dict, List, Optional, Sequence, Tuple, Union
from .samples import Exemplar, Sample, Timestamp
METRIC_TYPES = (
'counter', 'gauge', 'summary', 'histogram',
'gaugehistogram', 'unknown', 'info', 'stateset',
)
METRIC_NAME_RE = re.compile(r'^[a-zA-Z_:][a-zA-Z0-9_:]*$')
METRIC_LABEL_NAME_RE = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$')
RESERVED_METRIC_LABEL_NAME_RE = re.compile(r'^__.*$')
class Metric:
"""A single metric family and its samples.
This is intended only for internal use by the instrumentation client.
Custom collectors should use GaugeMetricFamily, CounterMetricFamily
and SummaryMetricFamily instead.
"""
def __init__(self, name: str, documentation: str, typ: str, unit: str = ''):
if unit and not name.endswith("_" + unit):
name += "_" + unit
if not METRIC_NAME_RE.match(name):
raise ValueError('Invalid metric name: ' + name)
self.name: str = name
self.documentation: str = documentation
self.unit: str = unit
if typ == 'untyped':
typ = 'unknown'
if typ not in METRIC_TYPES:
raise ValueError('Invalid metric type: ' + typ)
self.type: str = typ
self.samples: List[Sample] = []
def add_sample(self, name: str, labels: Dict[str, str], value: float, timestamp: Optional[Union[Timestamp, float]] = None, exemplar: Optional[Exemplar] = None) -> None:
"""Add a sample to the metric.
Internal-only, do not use."""
self.samples.append(Sample(name, labels, value, timestamp, exemplar))
def __eq__(self, other: object) -> bool:
return (isinstance(other, Metric)
and self.name == other.name
and self.documentation == other.documentation
and self.type == other.type
and self.unit == other.unit
and self.samples == other.samples)
def __repr__(self) -> str:
return "Metric({}, {}, {}, {}, {})".format(
self.name,
self.documentation,
self.type,
self.unit,
self.samples,
)
def _restricted_metric(self, names):
"""Build a snapshot of a metric with samples restricted to a given set of names."""
samples = [s for s in self.samples if s[0] in names]
if samples:
m = Metric(self.name, self.documentation, self.type)
m.samples = samples
return m
return None
class UnknownMetricFamily(Metric):
"""A single unknown metric and its samples.
For use by custom collectors.
"""
def __init__(self,
name: str,
documentation: str,
value: Optional[float] = None,
labels: Optional[Sequence[str]] = None,
unit: str = '',
):
Metric.__init__(self, name, documentation, 'unknown', unit)
if labels is not None and value is not None:
raise ValueError('Can only specify at most one of value and labels.')
if labels is None:
labels = []
self._labelnames = tuple(labels)
if value is not None:
self.add_metric([], value)
def add_metric(self, labels: Sequence[str], value: float, timestamp: Optional[Union[Timestamp, float]] = None) -> None:
"""Add a metric to the metric family.
Args:
labels: A list of label values
value: The value of the metric.
"""
self.samples.append(Sample(self.name, dict(zip(self._labelnames, labels)), value, timestamp))
# For backward compatibility.
UntypedMetricFamily = UnknownMetricFamily
class CounterMetricFamily(Metric):
"""A single counter and its samples.
For use by custom collectors.
"""
def __init__(self,
name: str,
documentation: str,
value: Optional[float] = None,
labels: Sequence[str] = None,
created: Optional[float] = None,
unit: str = '',
):
# Glue code for pre-OpenMetrics metrics.
if name.endswith('_total'):
name = name[:-6]
Metric.__init__(self, name, documentation, 'counter', unit)
if labels is not None and value is not None:
raise ValueError('Can only specify at most one of value and labels.')
if labels is None:
labels = []
self._labelnames = tuple(labels)
if value is not None:
self.add_metric([], value, created)
def add_metric(self,
labels: Sequence[str],
value: float,
created: Optional[float] = None,
timestamp: Optional[Union[Timestamp, float]] = None,
) -> None:
"""Add a metric to the metric family.
Args:
labels: A list of label values
value: The value of the metric
created: Optional unix timestamp the child was created at.
"""
self.samples.append(Sample(self.name + '_total', dict(zip(self._labelnames, labels)), value, timestamp))
if created is not None:
self.samples.append(Sample(self.name + '_created', dict(zip(self._labelnames, labels)), created, timestamp))
class GaugeMetricFamily(Metric):
"""A single gauge and its samples.
For use by custom collectors.
"""
def __init__(self,
name: str,
documentation: str,
value: Optional[float] = None,
labels: Optional[Sequence[str]] = None,
unit: str = '',
):
Metric.__init__(self, name, documentation, 'gauge', unit)
if labels is not None and value is not None:
raise ValueError('Can only specify at most one of value and labels.')
if labels is None:
labels = []
self._labelnames = tuple(labels)
if value is not None:
self.add_metric([], value)
def add_metric(self, labels: Sequence[str], value: float, timestamp: Optional[Union[Timestamp, float]] = None) -> None:
"""Add a metric to the metric family.
Args:
labels: A list of label values
value: A float
"""
self.samples.append(Sample(self.name, dict(zip(self._labelnames, labels)), value, timestamp))
class SummaryMetricFamily(Metric):
"""A single summary and its samples.
For use by custom collectors.
"""
def __init__(self,
name: str,
documentation: str,
count_value: Optional[int] = None,
sum_value: Optional[float] = None,
labels: Optional[Sequence[str]] = None,
unit: str = '',
):
Metric.__init__(self, name, documentation, 'summary', unit)
if (sum_value is None) != (count_value is None):
raise ValueError('count_value and sum_value must be provided together.')
if labels is not None and count_value is not None:
raise ValueError('Can only specify at most one of value and labels.')
if labels is None:
labels = []
self._labelnames = tuple(labels)
# The and clause is necessary only for typing, the above ValueError will raise if only one is set.
if count_value is not None and sum_value is not None:
self.add_metric([], count_value, sum_value)
def add_metric(self,
labels: Sequence[str],
count_value: int,
sum_value: float,
timestamp:
Optional[Union[float, Timestamp]] = None
) -> None:
"""Add a metric to the metric family.
Args:
labels: A list of label values
count_value: The count value of the metric.
sum_value: The sum value of the metric.
"""
self.samples.append(Sample(self.name + '_count', dict(zip(self._labelnames, labels)), count_value, timestamp))
self.samples.append(Sample(self.name + '_sum', dict(zip(self._labelnames, labels)), sum_value, timestamp))
class HistogramMetricFamily(Metric):
"""A single histogram and its samples.
For use by custom collectors.
"""
def __init__(self,
name: str,
documentation: str,
buckets: Optional[Sequence[Union[Tuple[str, float], Tuple[str, float, Exemplar]]]] = None,
sum_value: Optional[float] = None,
labels: Optional[Sequence[str]] = None,
unit: str = '',
):
Metric.__init__(self, name, documentation, 'histogram', unit)
if sum_value is not None and buckets is None:
raise ValueError('sum value cannot be provided without buckets.')
if labels is not None and buckets is not None:
raise ValueError('Can only specify at most one of buckets and labels.')
if labels is None:
labels = []
self._labelnames = tuple(labels)
if buckets is not None:
self.add_metric([], buckets, sum_value)
def add_metric(self,
labels: Sequence[str],
buckets: Sequence[Union[Tuple[str, float], Tuple[str, float, Exemplar]]],
sum_value: Optional[float],
timestamp: Optional[Union[Timestamp, float]] = None) -> None:
"""Add a metric to the metric family.
Args:
labels: A list of label values
buckets: A list of lists.
Each inner list can be a pair of bucket name and value,
or a triple of bucket name, value, and exemplar.
The buckets must be sorted, and +Inf present.
sum_value: The sum value of the metric.
"""
for b in buckets:
bucket, value = b[:2]
exemplar = None
if len(b) == 3:
exemplar = b[2] # type: ignore
self.samples.append(Sample(
self.name + '_bucket',
dict(list(zip(self._labelnames, labels)) + [('le', bucket)]),
value,
timestamp,
exemplar,
))
# Don't include sum and thus count if there's negative buckets.
if float(buckets[0][0]) >= 0 and sum_value is not None:
# +Inf is last and provides the count value.
self.samples.append(
Sample(self.name + '_count', dict(zip(self._labelnames, labels)), buckets[-1][1], timestamp))
self.samples.append(
Sample(self.name + '_sum', dict(zip(self._labelnames, labels)), sum_value, timestamp))
class GaugeHistogramMetricFamily(Metric):
"""A single gauge histogram and its samples.
For use by custom collectors.
"""
def __init__(self,
name: str,
documentation: str,
buckets: Optional[Sequence[Tuple[str, float]]] = None,
gsum_value: Optional[float] = None,
labels: Optional[Sequence[str]] = None,
unit: str = '',
):
Metric.__init__(self, name, documentation, 'gaugehistogram', unit)
if labels is not None and buckets is not None:
raise ValueError('Can only specify at most one of buckets and labels.')
if labels is None:
labels = []
self._labelnames = tuple(labels)
if buckets is not None:
self.add_metric([], buckets, gsum_value)
def add_metric(self,
labels: Sequence[str],
buckets: Sequence[Tuple[str, float]],
gsum_value: Optional[float],
timestamp: Optional[Union[float, Timestamp]] = None,
) -> None:
"""Add a metric to the metric family.
Args:
labels: A list of label values
buckets: A list of pairs of bucket names and values.
The buckets must be sorted, and +Inf present.
gsum_value: The sum value of the metric.
"""
for bucket, value in buckets:
self.samples.append(Sample(
self.name + '_bucket',
dict(list(zip(self._labelnames, labels)) + [('le', bucket)]),
value, timestamp))
# +Inf is last and provides the count value.
self.samples.extend([
Sample(self.name + '_gcount', dict(zip(self._labelnames, labels)), buckets[-1][1], timestamp),
# TODO: Handle None gsum_value correctly. Currently a None will fail exposition but is allowed here.
Sample(self.name + '_gsum', dict(zip(self._labelnames, labels)), gsum_value, timestamp), # type: ignore
])
class InfoMetricFamily(Metric):
"""A single info and its samples.
For use by custom collectors.
"""
def __init__(self,
name: str,
documentation: str,
value: Optional[Dict[str, str]] = None,
labels: Optional[Sequence[str]] = None,
):
Metric.__init__(self, name, documentation, 'info')
if labels is not None and value is not None:
raise ValueError('Can only specify at most one of value and labels.')
if labels is None:
labels = []
self._labelnames = tuple(labels)
if value is not None:
self.add_metric([], value)
def add_metric(self,
labels: Sequence[str],
value: Dict[str, str],
timestamp: Optional[Union[Timestamp, float]] = None,
) -> None:
"""Add a metric to the metric family.
Args:
labels: A list of label values
value: A dict of labels
"""
self.samples.append(Sample(
self.name + '_info',
dict(dict(zip(self._labelnames, labels)), **value),
1,
timestamp,
))
class StateSetMetricFamily(Metric):
"""A single stateset and its samples.
For use by custom collectors.
"""
def __init__(self,
name: str,
documentation: str,
value: Optional[Dict[str, bool]] = None,
labels: Optional[Sequence[str]] = None,
):
Metric.__init__(self, name, documentation, 'stateset')
if labels is not None and value is not None:
raise ValueError('Can only specify at most one of value and labels.')
if labels is None:
labels = []
self._labelnames = tuple(labels)
if value is not None:
self.add_metric([], value)
def add_metric(self,
labels: Sequence[str],
value: Dict[str, bool],
timestamp: Optional[Union[Timestamp, float]] = None,
) -> None:
"""Add a metric to the metric family.
Args:
labels: A list of label values
value: A dict of string state names to booleans
"""
labels = tuple(labels)
for state, enabled in sorted(value.items()):
v = (1 if enabled else 0)
self.samples.append(Sample(
self.name,
dict(zip(self._labelnames + (self.name,), labels + (state,))),
v,
timestamp,
))
| [
"[email protected]"
] | |
109b2be6ef02aea21c97f308754df2fdaeffe5d9 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/tree-big-2603.py | 1825fa8c36018790097910ebb46e5047bcf0ef5b | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,291 | py | # Binary-search trees
class TreeNode(object):
value:int = 0
left:"TreeNode" = None
right:"TreeNode" = None
def insert(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode(x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode(x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode2(object):
value:int = 0
value2:int = 0
left:"TreeNode2" = None
left2:"TreeNode2" = None
right:"TreeNode2" = None
right2:"TreeNode2" = None
def insert(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode3(object):
value:int = 0
value2:int = 0
value3:int = 0
left:"TreeNode3" = None
left2:"TreeNode3" = None
left3:"TreeNode3" = None
right:"TreeNode3" = None
right2:"TreeNode3" = None
right3:"TreeNode3" = None
def insert(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode4(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
left:"TreeNode4" = None
left2:"TreeNode4" = None
left3:"TreeNode4" = None
left4:"TreeNode4" = None
right:"TreeNode4" = None
right2:"TreeNode4" = None
right3:"TreeNode4" = None
right4:"TreeNode4" = None
def insert(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif $ID > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode5(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
value5:int = 0
left:"TreeNode5" = None
left2:"TreeNode5" = None
left3:"TreeNode5" = None
left4:"TreeNode5" = None
left5:"TreeNode5" = None
right:"TreeNode5" = None
right2:"TreeNode5" = None
right3:"TreeNode5" = None
right4:"TreeNode5" = None
right5:"TreeNode5" = None
def insert(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class Tree(object):
root:TreeNode = None
size:int = 0
def insert(self:"Tree", x:int) -> object:
if self.root is None:
self.root = makeNode(x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree2(object):
root:TreeNode2 = None
root2:TreeNode2 = None
size:int = 0
size2:int = 0
def insert(self:"Tree2", x:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree2", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree2", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree2", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree3(object):
root:TreeNode3 = None
root2:TreeNode3 = None
root3:TreeNode3 = None
size:int = 0
size2:int = 0
size3:int = 0
def insert(self:"Tree3", x:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree3", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree3", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree3", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree3", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree3", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree4(object):
root:TreeNode4 = None
root2:TreeNode4 = None
root3:TreeNode4 = None
root4:TreeNode4 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
def insert(self:"Tree4", x:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree4", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree4", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree4", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree4", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree4", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree5(object):
root:TreeNode5 = None
root2:TreeNode5 = None
root3:TreeNode5 = None
root4:TreeNode5 = None
root5:TreeNode5 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
size5:int = 0
def insert(self:"Tree5", x:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree5", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree5", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree5", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree5", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree5", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def makeNode(x: int) -> TreeNode:
b:TreeNode = None
b = TreeNode()
b.value = x
return b
def makeNode2(x: int, x2: int) -> TreeNode2:
b:TreeNode2 = None
b2:TreeNode2 = None
b = TreeNode2()
b.value = x
return b
def makeNode3(x: int, x2: int, x3: int) -> TreeNode3:
b:TreeNode3 = None
b2:TreeNode3 = None
b3:TreeNode3 = None
b = TreeNode3()
b.value = x
return b
def makeNode4(x: int, x2: int, x3: int, x4: int) -> TreeNode4:
b:TreeNode4 = None
b2:TreeNode4 = None
b3:TreeNode4 = None
b4:TreeNode4 = None
b = TreeNode4()
b.value = x
return b
def makeNode5(x: int, x2: int, x3: int, x4: int, x5: int) -> TreeNode5:
b:TreeNode5 = None
b2:TreeNode5 = None
b3:TreeNode5 = None
b4:TreeNode5 = None
b5:TreeNode5 = None
b = TreeNode5()
b.value = x
return b
# Input parameters
n:int = 100
n2:int = 100
n3:int = 100
n4:int = 100
n5:int = 100
c:int = 4
c2:int = 4
c3:int = 4
c4:int = 4
c5:int = 4
# Data
t:Tree = None
t2:Tree = None
t3:Tree = None
t4:Tree = None
t5:Tree = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
k:int = 37813
k2:int = 37813
k3:int = 37813
k4:int = 37813
k5:int = 37813
# Crunch
t = Tree()
while i < n:
t.insert(k)
k = (k * 37813) % 37831
if i % c != 0:
t.insert(i)
i = i + 1
print(t.size)
for i in [4, 8, 15, 16, 23, 42]:
if t.contains(i):
print(i)
| [
"[email protected]"
] | |
d8aacd22dc55729c0394ca6fdd998d7aef004f28 | 3bedfe030662300844861d8f4d0ba52c1e43f950 | /fit_farquhar_model/plot_priors.py | af76c6018455b51fd775409540fd1529586fb42a | [] | no_license | shaoxiuma/FitFarquharModel | d200ae38d3fd6ab73b1db4910ef7c15fe16bb0f3 | fd3766feaea65e80025df9bf5a9257e68805f696 | refs/heads/master | 2021-01-06T06:45:36.683836 | 2016-07-14T09:41:05 | 2016-07-14T09:41:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,978 | py | import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import truncnorm as tn
import pymc
#mu = 25.0
#sigma = 11.25
#a = 1.0
#b = 650.0
#vals = tn(a=a, b=b, loc=mu, scale=sigma)
#plt.hist(vals.rvs(100000), bins=50)
#plt.xlim(0, 100)
#plt.show()
N = 10000
Vcmax = [pymc.TruncatedNormal('Vcmax25', \
mu=100.0, tau=1.0/61.25**2, a=0.0, b=650.0).value \
for i in xrange(N)]
Jfac = [pymc.TruncatedNormal('Jfac', mu=1.8, tau=1.0/0.5**2, \
a=0.0, b=5.0).value for i in xrange(N)]
Rdfac = [pymc.Uniform('Rdfac', lower=0.005, upper=0.05).value \
for i in xrange(N)]
Eaj = [pymc.TruncatedNormal('Eaj', mu=40000.0, tau=1.0/10000.0**2, a=0.0,
b=199999.9).value for i in xrange(N)]
Eav = [pymc.TruncatedNormal('Eav', mu=60000.0, tau=1.0/10000.0**2, a=0.0,
b=199999.9).value for i in xrange(N)]
Ear = [pymc.TruncatedNormal('Ear', mu=34000.0, tau=1.0/10000.0**2, a=0.0,
b=199999.9).value for i in xrange(N)]
delSj = [pymc.TruncatedNormal('delSj', mu=640.0, tau=1.0/10.0**2, a=300.0,
b=800.0).value for i in xrange(N)]
delSv = [pymc.TruncatedNormal('delSv', mu=640.0, tau=1.0/10.0**2,
a=300.0, b=800.0).value for i in xrange(N)]
plt.rcParams['figure.subplot.hspace'] = 0.3
plt.rcParams['figure.subplot.wspace'] = 0.3
plt.rcParams['font.size'] = 10
plt.rcParams['legend.fontsize'] = 10
plt.rcParams['xtick.labelsize'] = 10.0
plt.rcParams['ytick.labelsize'] = 10.0
plt.rcParams['axes.labelsize'] = 10.0
var_names = ["Vcmax", "Jfac", "Rdfac", "Eaj", "Eav", "Ear", "delSj", "delSv"]
vars = [Vcmax, Jfac, Rdfac, Eaj, Eav, Ear, delSj, delSv]
fig = plt.figure(figsize=(10,10))
bins = 50
for index, var in enumerate(var_names):
ax = fig.add_subplot(4,2,(index+1))
ax.set_title(var_names[index])
ax.hist(vars[index], bins=bins)
fig.savefig("/Users/mdekauwe/Desktop/priors.png", dpi=150)
plt.show() | [
"[email protected]"
] | |
ca06a6bb356622e06b1d115ea2cc65695ec2f9e8 | ae80a18e9f834e0346d8ffeac0a6efad58bfa36f | /tensorflow/python/keras/engine/base_layer.py | f63fb4e69445c5ce95de80516cee59f52800f6e3 | [
"Apache-2.0"
] | permissive | dangchaojin/tensorflow | 4e248eb0dccb5e4fd2c4cf5fe4022b15953be7dc | d29b2faa5fe9c79679171bcafe930b819f74dca5 | refs/heads/master | 2020-04-17T07:44:53.714475 | 2019-01-18T09:03:42 | 2019-01-18T09:14:09 | 166,382,351 | 1 | 0 | Apache-2.0 | 2019-01-18T09:47:33 | 2019-01-18T09:47:32 | null | UTF-8 | Python | false | false | 73,430 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Contains the base Layer class, from which all layers inherit."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import inspect # Necessary supplement to tf_inspect to deal with variadic args.
import itertools
import numpy as np
from six.moves import zip # pylint: disable=redefined-builtin
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.engine import input_spec
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import tf_utils
# A module that only depends on `keras.layers` import these from here.
from tensorflow.python.keras.utils.generic_utils import to_snake_case # pylint: disable=unused-import
from tensorflow.python.keras.utils.tf_utils import is_tensor_or_tensor_list # pylint: disable=unused-import
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.training.checkpointable import layer_utils as checkpointable_layer_utils
from tensorflow.python.util import function_utils
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import keras_export
from tensorflow.tools.docs import doc_controls
@keras_export('keras.layers.Layer')
class Layer(checkpointable.Checkpointable):
"""Base layer class.
This is the class from which all layers inherit.
A layer is a class implementing common neural networks operations, such
as convolution, batch norm, etc. These operations require managing weights,
losses, updates, and inter-layer connectivity.
Users will just instantiate a layer and then treat it as a callable.
We recommend that descendants of `Layer` implement the following methods:
* `__init__()`: Save configuration in member variables
* `build()`: Called once from `__call__`, when we know the shapes of inputs
and `dtype`. Should have the calls to `add_weight()`, and then
call the super's `build()` (which sets `self.built = True`, which is
nice in case the user wants to call `build()` manually before the
first `__call__`).
* `call()`: Called in `__call__` after making sure `build()` has been called
once. Should actually perform the logic of applying the layer to the
input tensors (which should be passed in as the first argument).
Arguments:
trainable: Boolean, whether the layer's variables should be trainable.
name: String name of the layer.
dtype: Default dtype of the layer's weights (default of `None` means use the
type of the first input).
dynamic: Set this to `True` if your layer should only be run eagerly, and
should not be used to generate a static computation graph.
This would be the case for a Tree-RNN or a recursive network,
for example, or generally for any layer that manipulates tensors
using Python control flow. If `False`, we assume that the layer can
safely be used to generate a static computation graph.
Read-only properties:
name: The name of the layer (string).
dtype: Default dtype of the layer's weights (default of `None` means use the
type of the first input).
updates: List of update ops of this layer.
losses: List of losses added by this layer.
trainable_weights: List of variables to be included in backprop.
non_trainable_weights: List of variables that should not be
included in backprop.
weights: The concatenation of the lists trainable_weights and
non_trainable_weights (in this order).
Mutable properties:
trainable: Whether the layer should be trained (boolean).
input_spec: Optional (list of) `InputSpec` object(s) specifying the
constraints on inputs that can be accepted by the layer.
"""
@checkpointable.no_automatic_dependency_tracking
def __init__(self, trainable=True, name=None, dtype=None, dynamic=False,
**kwargs):
# These properties should be set by the user via keyword arguments.
# note that 'dtype', 'input_shape' and 'batch_input_shape'
# are only applicable to input layers: do not pass these keywords
# to non-input layers.
allowed_kwargs = {
'input_shape',
'batch_input_shape',
'batch_size',
'weights',
'activity_regularizer',
}
# Validate optional keyword arguments.
for kwarg in kwargs:
if kwarg not in allowed_kwargs:
raise TypeError('Keyword argument not understood:', kwarg)
# Mutable properties
# Indicates whether the layer's weights are updated during training
# and whether the layer's updates are run during training
self.trainable = trainable
# A stateful layer is a layer whose updates are run during inference too,
# for instance stateful RNNs.
self.stateful = False
# Indicates whether `build` needs to be called upon layer call, to create
# the layer's weights.
self.built = False
# Provides information about which inputs are compatible with the layer.
self.input_spec = None
self.supports_masking = False
self._init_set_name(name)
self._activity_regularizer = kwargs.pop('activity_regularizer', None)
if not hasattr(self, '_trainable_weights'):
self._trainable_weights = []
if not hasattr(self, '_non_trainable_weights'):
self._non_trainable_weights = []
self._updates = []
# A list of zero-argument lambdas which return Tensors, used for variable
# regularizers.
self._callable_losses = []
# A list of symbolic Tensors containing activity regularizers and losses
# manually added through `add_loss` in graph-building mode.
self._losses = []
# A list of loss values containing activity regularizers and losses
# manually added through `add_loss` during eager execution. It is cleared
# after every batch.
# Because we plan on eventually allowing a same model instance to be trained
# in eager mode or graph mode alternatively, we need to keep track of
# eager losses and symbolic losses via separate attributes.
self._eager_losses = []
# A list of metric instances corresponding to the symbolic metric tensors
# added using the `add_metric` API.
self._metrics = []
# TODO(psv): Remove this property.
# A dictionary that maps metric names to metric result tensors. The results
# are the running averages of metric values over an epoch.
self._metrics_tensors = {}
self._dtype = None if dtype is None else dtypes.as_dtype(dtype).name
self._call_fn_args = function_utils.fn_args(self.call)
self._compute_previous_mask = ('mask' in self._call_fn_args or
hasattr(self, 'compute_mask'))
self._call_convention = (base_layer_utils
.CallConvention.EXPLICIT_INPUTS_ARGUMENT)
if not hasattr(self, '_layers'):
self._layers = [] # Dependencies tracked via attribute assignment.
# These lists will be filled via successive calls
# to self._add_inbound_node().
self._inbound_nodes = []
self._outbound_nodes = []
call_argspec = tf_inspect.getfullargspec(self.call)
if 'training' in call_argspec.args:
self._expects_training_arg = True
else:
self._expects_training_arg = False
# Whether the `call` method can be used to build a TF graph without issues.
self._dynamic = dynamic
# Manage input shape information if passed.
if 'input_shape' in kwargs or 'batch_input_shape' in kwargs:
# In this case we will later create an input layer
# to insert before the current layer
if 'batch_input_shape' in kwargs:
batch_input_shape = tuple(kwargs['batch_input_shape'])
elif 'input_shape' in kwargs:
if 'batch_size' in kwargs:
batch_size = kwargs['batch_size']
else:
batch_size = None
batch_input_shape = (batch_size,) + tuple(kwargs['input_shape'])
self._batch_input_shape = batch_input_shape
# Manage initial weight values if passed.
if 'weights' in kwargs:
self._initial_weights = kwargs['weights']
else:
self._initial_weights = None
def build(self, input_shape):
"""Creates the variables of the layer (optional, for subclass implementers).
This is a method that implementers of subclasses of `Layer` or `Model`
can override if they need a state-creation step in-between
layer instantiation and layer call.
This is typically used to create the weights of `Layer` subclasses.
Arguments:
input_shape: Instance of `TensorShape`, or list of instances of
`TensorShape` if the layer expects a list of inputs
(one instance per input).
"""
self.built = True
@doc_controls.for_subclass_implementers
def call(self, inputs, **kwargs): # pylint: disable=unused-argument
"""This is where the layer's logic lives.
Arguments:
inputs: Input tensor, or list/tuple of input tensors.
**kwargs: Additional keyword arguments.
Returns:
A tensor or list/tuple of tensors.
"""
return inputs
@doc_controls.for_subclass_implementers
def add_weight(self,
name,
shape,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
constraint=None,
partitioner=None,
use_resource=None,
synchronization=tf_variables.VariableSynchronization.AUTO,
aggregation=tf_variables.VariableAggregation.NONE,
**kwargs):
"""Adds a new variable to the layer.
Arguments:
name: variable name.
shape: variable shape.
dtype: The type of the variable. Defaults to `self.dtype` or `float32`.
initializer: initializer instance (callable).
regularizer: regularizer instance (callable).
trainable: whether the variable should be part of the layer's
"trainable_variables" (e.g. variables, biases)
or "non_trainable_variables" (e.g. BatchNorm mean, stddev).
Note, if the current variable scope is marked as non-trainable
then this parameter is ignored and any added variables are also
marked as non-trainable. `trainable` defaults to `True` unless
`synchronization` is set to `ON_READ`.
constraint: constraint instance (callable).
partitioner: Partitioner to be passed to the `Checkpointable` API.
use_resource: Whether to use `ResourceVariable`.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
**kwargs: Additional keyword arguments. Accepted values are `getter` and
`collections`.
Returns:
The created variable. Usually either a `Variable` or `ResourceVariable`
instance. If `partitioner` is not `None`, a `PartitionedVariable`
instance is returned.
Raises:
RuntimeError: If called with partioned variable regularization and
eager execution is enabled.
ValueError: When giving unsupported dtype and no initializer or when
trainable has been set to True with synchronization set as `ON_READ`.
"""
# Validate optional keyword arguments.
for kwarg in kwargs:
if kwarg not in ['getter', 'collections']:
raise TypeError('Unknown keyword argument:', kwarg)
getter = kwargs.pop('getter', None)
collections = kwargs.pop('collections', None)
if dtype is None:
dtype = self.dtype or backend.floatx()
dtype = dtypes.as_dtype(dtype)
if self._dtype is None:
self._dtype = dtype.base_dtype.name
initializer = initializers.get(initializer)
regularizer = regularizers.get(regularizer)
constraint = constraints.get(constraint)
if synchronization == tf_variables.VariableSynchronization.ON_READ:
if trainable:
raise ValueError(
'Synchronization value can be set to '
'VariableSynchronization.ON_READ only for non-trainable variables. '
'You have specified trainable=True and '
'synchronization=VariableSynchronization.ON_READ.')
else:
# Set trainable to be false when variable is to be synced on read.
trainable = False
elif trainable is None:
trainable = True
# Initialize variable when no initializer provided
if initializer is None:
# If dtype is DT_FLOAT, provide a uniform unit scaling initializer
if dtype.is_floating:
initializer = initializers.glorot_uniform()
# If dtype is DT_INT/DT_UINT, provide a default value `zero`
# If dtype is DT_BOOL, provide a default value `FALSE`
elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool:
initializer = initializers.zeros()
# NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?
else:
raise ValueError('An initializer for variable %s of type %s is required'
' for layer %s' % (name, dtype.base_dtype, self.name))
variable = self._add_variable_with_custom_getter(
name=name,
shape=shape,
# TODO(allenl): a `make_variable` equivalent should be added as a
# `Checkpointable` method.
getter=getter or base_layer_utils.make_variable,
# Manage errors in Layer rather than Checkpointable.
overwrite=True,
initializer=initializer,
dtype=dtype,
constraint=constraint,
trainable=trainable and self.trainable,
partitioner=partitioner,
use_resource=use_resource,
collections=collections,
synchronization=synchronization,
aggregation=aggregation)
backend.track_variable(variable)
if regularizer is not None:
# TODO(fchollet): in the future, this should be handled at the
# level of variable creation, and weight regularization losses
# should be variable attributes.
self._handle_weight_regularization(name, variable, regularizer)
if trainable:
self._trainable_weights.append(variable)
else:
self._non_trainable_weights.append(variable)
return variable
def get_config(self):
"""Returns the config of the layer.
A layer config is a Python dictionary (serializable)
containing the configuration of a layer.
The same layer can be reinstantiated later
(without its trained weights) from this configuration.
The config of a layer does not include connectivity
information, nor the layer class name. These are handled
by `Network` (one layer of abstraction above).
Returns:
Python dictionary.
"""
config = {'name': self.name, 'trainable': self.trainable}
if hasattr(self, '_batch_input_shape'):
config['batch_input_shape'] = self._batch_input_shape
if hasattr(self, 'dtype'):
config['dtype'] = self.dtype
return config
@classmethod
def from_config(cls, config):
"""Creates a layer from its config.
This method is the reverse of `get_config`,
capable of instantiating the same layer from the config
dictionary. It does not handle layer connectivity
(handled by Network), nor weights (handled by `set_weights`).
Arguments:
config: A Python dictionary, typically the
output of get_config.
Returns:
A layer instance.
"""
return cls(**config)
def compute_output_shape(self, input_shape):
"""Computes the output shape of the layer.
Assumes that the layer will be built
to match that input shape provided.
Arguments:
input_shape: Shape tuple (tuple of integers)
or list of shape tuples (one per output tensor of the layer).
Shape tuples can include None for free dimensions,
instead of an integer.
Returns:
An input shape tuple.
"""
if context.executing_eagerly():
# In this case we build the model first in order to do shape inference.
# This is acceptable because the framework only calls
# `compute_output_shape` on shape values that the layer would later be
# built for. It would however cause issues in case a user attempts to
# use `compute_output_shape` manually (these users will have to
# implement `compute_output_shape` themselves).
self.build(input_shape)
with context.graph_mode():
graph = func_graph.FuncGraph('graph')
with graph.as_default():
if isinstance(input_shape, list):
inputs = [base_layer_utils.generate_placeholders_from_shape(shape)
for shape in input_shape]
else:
inputs = base_layer_utils.generate_placeholders_from_shape(
input_shape)
try:
if self._expects_training_arg:
outputs = self(inputs, training=False)
else:
outputs = self(inputs)
except TypeError:
raise NotImplementedError('We could not automatically infer '
'the static shape of the layer\'s output.'
' Please implement the '
'`compute_output_shape` method on your '
'layer (%s).' % self.__class__.__name__)
if isinstance(outputs, list):
return [output.shape for output in outputs]
else:
return outputs.shape
raise NotImplementedError
def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument
"""Computes an output mask tensor.
Arguments:
inputs: Tensor or list of tensors.
mask: Tensor or list of tensors.
Returns:
None or a tensor (or list of tensors,
one per output tensor of the layer).
"""
if not self.supports_masking:
if mask is not None:
if isinstance(mask, list):
if any(m is not None for m in mask):
raise TypeError('Layer ' + self.name + ' does not support masking, '
'but was passed an input_mask: ' + str(mask))
else:
raise TypeError('Layer ' + self.name + ' does not support masking, '
'but was passed an input_mask: ' + str(mask))
# masking not explicitly supported: return None as mask
return None
# if masking is explicitly supported, by default
# carry over the input mask
return mask
def __call__(self, inputs, *args, **kwargs):
"""Wraps `call`, applying pre- and post-processing steps.
Arguments:
inputs: input tensor(s).
*args: additional positional arguments to be passed to `self.call`.
**kwargs: additional keyword arguments to be passed to `self.call`.
Returns:
Output tensor(s).
Note:
- The following optional keyword arguments are reserved for specific uses:
* `training`: Boolean scalar tensor of Python boolean indicating
whether the `call` is meant for training or inference.
* `mask`: Boolean input mask.
- If the layer's `call` method takes a `mask` argument (as some Keras
layers do), its default value will be set to the mask generated
for `inputs` by the previous layer (if `input` did come from
a layer that generated a corresponding mask, i.e. if it came from
a Keras layer with masking support.
Raises:
ValueError: if the layer's `call` method returns None (an invalid value).
"""
input_list = nest.flatten(inputs)
if context.executing_eagerly():
# Accept NumPy inputs by converting to Tensors when executing eagerly.
if all(isinstance(x, (np.ndarray, float, int)) for x in input_list):
inputs = nest.map_structure(ops.convert_to_tensor, inputs)
input_list = nest.flatten(inputs)
# We will attempt to build a TF graph if & only if all inputs are symbolic.
# This is always the case in graph mode. It can also be the case in eager
# mode when all inputs can be traced back to `keras.Input()` (when building
# models using the functional API).
build_graph = tf_utils.are_all_symbolic_tensors(input_list)
if build_graph:
# Only create Keras history if at least one tensor originates from a
# `keras.Input`. Otherwise this Layer may be being used outside the Keras
# framework.
if base_layer_utils.uses_keras_input_layers(inputs):
base_layer_utils.create_keras_history(inputs)
# Handle Keras mask propagation from previous layer to current layer.
previous_mask = None
if build_graph and (not hasattr(self, '_compute_previous_mask') or
self._compute_previous_mask):
previous_mask = base_layer_utils.collect_previous_mask(inputs)
if not hasattr(self, '_call_fn_args'):
self._call_fn_args = function_utils.fn_args(self.call)
if ('mask' in self._call_fn_args and 'mask' not in kwargs and
not generic_utils.is_all_none(previous_mask)):
# The previous layer generated a mask, and mask was not explicitly pass
# to __call__, hence we set previous_mask as the default value.
kwargs['mask'] = previous_mask
with ops.name_scope(self._name_scope()):
if not self.built:
# Build layer if applicable (if the `build` method has been overridden).
self._maybe_build(inputs)
# We must set self.built since user defined build functions are not
# constrained to set self.built.
self.built = True
# Check input assumptions set after layer building, e.g. input shape.
if build_graph:
# Symbolic execution on symbolic tensors. We will attempt to build
# the corresponding TF subgraph inside `backend.get_graph()`
input_spec.assert_input_compatibility(
self.input_spec, inputs, self.name)
graph = backend.get_graph()
with graph.as_default():
if not self.dynamic:
try:
outputs = self.call(inputs, *args, **kwargs)
except TypeError as e:
messages = ['`tf.Tensor` as a Python `bool` is not allowed',
'Tensor objects are only iterable when eager']
for msg in messages:
if msg in str(e):
raise TypeError('You are attempting to use Python control '
'flow in a layer that was not declared to be '
'dynamic. Pass `dynamic=True` to the class '
'constructor.\nEncountered error:\n"""\n' +
str(e) + '\n"""')
raise e
else:
# We will use static shape inference to return symbolic tensors
# matching the specifications of the layer outputs.
# Since `self.dynamic` is True, we will never attempt to
# run the underlying TF graph (which is disconnected).
# TODO(fchollet): consider py_func as an alternative, which
# would enable us to run the underlying graph if needed.
outputs = self._symbolic_call(inputs)
if outputs is None:
raise ValueError('A layer\'s `call` method should return a '
'Tensor or a list of Tensors, not None '
'(layer: ' + self.name + ').')
self._handle_activity_regularization(inputs, outputs)
self._set_mask_metadata(inputs, outputs, previous_mask)
if base_layer_utils.have_all_keras_metadata(inputs):
inputs, outputs = self._set_connectivity_metadata_(
inputs, outputs, args, kwargs)
if hasattr(self, '_set_inputs') and not self.inputs:
# Subclassed network: explicitly set metadata normally set by
# a call to self._set_inputs().
# TODO(b/120997007): This should be done in Eager as well, but
# causes garbage collection issues because of the placeholders
# created on the default Keras graph.
self._set_inputs(inputs, outputs)
else:
# Eager execution on data tensors.
outputs = self.call(inputs, *args, **kwargs)
self._handle_activity_regularization(inputs, outputs)
return outputs
if not context.executing_eagerly():
# Optionally load weight values specified at layer instantiation.
# TODO(fchollet): consider enabling this with eager execution too.
if (hasattr(self, '_initial_weights') and
self._initial_weights is not None):
self.set_weights(self._initial_weights)
del self._initial_weights
return outputs
@property
def dtype(self):
return self._dtype
@property
def name(self):
return self._name
@property
def dynamic(self):
return self._dynamic
@property
def activity_regularizer(self):
"""Optional regularizer function for the output of this layer."""
return self._activity_regularizer
@activity_regularizer.setter
def activity_regularizer(self, regularizer):
"""Optional regularizer function for the output of this layer."""
self._activity_regularizer = regularizer
@property
def trainable_weights(self):
if self.trainable:
nested = self._gather_children_attribute('trainable_weights')
return self._trainable_weights + nested
else:
return []
@property
def non_trainable_weights(self):
if self.trainable:
nested = self._gather_children_attribute('non_trainable_weights')
return self._non_trainable_weights + nested
else:
nested = self._gather_children_attribute('weights')
return self._trainable_weights + self._non_trainable_weights + nested
@property
def weights(self):
"""Returns the list of all layer variables/weights.
Returns:
A list of variables.
"""
return self.trainable_weights + self.non_trainable_weights
@property
def updates(self):
if not self.trainable and not self.stateful:
return []
return self._updates + self._gather_children_attribute('updates')
@property
def losses(self):
"""Losses which are associated with this `Layer`.
Variable regularization tensors are created when this property is accessed,
so it is eager safe: accessing `losses` under a `tf.GradientTape` will
propagate gradients back to the corresponding variables.
Returns:
A list of tensors.
"""
collected_losses = []
if context.executing_eagerly():
collected_losses.extend(self._eager_losses)
else:
collected_losses.extend(self._losses)
for regularizer in self._callable_losses:
loss_tensor = regularizer()
if loss_tensor is not None:
collected_losses.append(loss_tensor)
return collected_losses + self._gather_children_attribute('losses')
@doc_controls.for_subclass_implementers
def add_loss(self, losses, inputs=None):
"""Add loss tensor(s), potentially dependent on layer inputs.
Some losses (for instance, activity regularization losses) may be dependent
on the inputs passed when calling a layer. Hence, when reusing the same
layer on different inputs `a` and `b`, some entries in `layer.losses` may
be dependent on `a` and some on `b`. This method automatically keeps track
of dependencies.
The `get_losses_for` method allows to retrieve the losses relevant to a
specific set of inputs.
Note that `add_loss` is not supported when executing eagerly. Instead,
variable regularizers may be added through `add_variable`. Activity
regularization is not supported directly (but such losses may be returned
from `Layer.call()`).
Arguments:
losses: Loss tensor, or list/tuple of tensors. Rather than tensors, losses
may also be zero-argument callables which create a loss tensor.
inputs: Ignored when executing eagerly. If anything other than None is
passed, it signals the losses are conditional on some of the layer's
inputs, and thus they should only be run where these inputs are
available. This is the case for activity regularization losses, for
instance. If `None` is passed, the losses are assumed
to be unconditional, and will apply across all dataflows of the layer
(e.g. weight regularization losses).
"""
losses = generic_utils.to_list(losses)
def _tag_unconditional(loss):
if callable(loss):
loss = loss()
if loss is None:
return None # Will be filtered out when computing the .losses property
if not tensor_util.is_tensor(loss):
loss = ops.convert_to_tensor(loss, dtype=backend.floatx())
loss._unconditional_loss = (inputs is None) # pylint: disable=protected-access
return loss
for loss in losses:
if callable(loss):
self._callable_losses.append(
functools.partial(_tag_unconditional, loss))
else:
if context.executing_eagerly():
self._eager_losses.append(_tag_unconditional(loss))
else:
self._losses.append(_tag_unconditional(loss))
@doc_controls.for_subclass_implementers
def add_metric(self, value, aggregation=None, name=None):
"""Adds metric tensor to the layer.
Args:
value: Metric tensor.
aggregation: Sample-wise metric reduction function. If `aggregation=None`,
it indicates that the metric tensor provided has been aggregated
already. eg, `model.add_metric(BinaryAccuracy(name='acc')(y_true,
y_pred))`. If aggregation='mean', the given metric tensor will be
sample-wise reduced using `mean` function. eg, `model.add_metric(
tf.reduce_mean(outputs), name='output_mean', aggregation='mean')`.
name: String metric name.
Raises:
ValueError: If `aggregation` is anything other than None or `mean`.
"""
if aggregation is not None and aggregation != 'mean':
raise ValueError(
'We currently support only `mean` sample-wise metric aggregation. '
'You provided aggregation=`%s`' % aggregation)
if tf_utils.is_symbolic_tensor(value):
self._symbolic_add_metric(value, aggregation, name)
else:
self._eager_add_metric(value, aggregation, name)
@doc_controls.for_subclass_implementers
def add_update(self, updates, inputs=None):
"""Add update op(s), potentially dependent on layer inputs.
Weight updates (for instance, the updates of the moving mean and variance
in a BatchNormalization layer) may be dependent on the inputs passed
when calling a layer. Hence, when reusing the same layer on
different inputs `a` and `b`, some entries in `layer.updates` may be
dependent on `a` and some on `b`. This method automatically keeps track
of dependencies.
The `get_updates_for` method allows to retrieve the updates relevant to a
specific set of inputs.
This call is ignored when eager execution is enabled (in that case, variable
updates are run on the fly and thus do not need to be tracked for later
execution).
Arguments:
updates: Update op, or list/tuple of update ops.
inputs: If anything other than None is passed, it signals the updates
are conditional on some of the layer's inputs,
and thus they should only be run where these inputs are available.
This is the case for BatchNormalization updates, for instance.
If None, the updates will be taken into account unconditionally,
and you are responsible for making sure that any dependency they might
have is available at runtime.
A step counter might fall into this category.
"""
if context.executing_eagerly():
return # Updates already applied when in eager mode.
def process_update(x):
if isinstance(x, ops.Operation):
return x
elif hasattr(x, 'op'):
return x.op
else:
return ops.convert_to_tensor(x)
updates = generic_utils.to_list(updates)
updates = [process_update(x) for x in updates]
self._updates += updates
if inputs is None:
for u in updates:
u._unconditional_update = True # pylint: disable=protected-access
else:
for u in updates:
u._unconditional_update = False # pylint: disable=protected-access
def set_weights(self, weights):
"""Sets the weights of the layer, from Numpy arrays.
Arguments:
weights: a list of Numpy arrays. The number
of arrays and their shape must match
number of the dimensions of the weights
of the layer (i.e. it should match the
output of `get_weights`).
Raises:
ValueError: If the provided weights list does not match the
layer's specifications.
"""
params = self.weights
if len(params) != len(weights):
raise ValueError('You called `set_weights(weights)` on layer "' +
self.name + '" with a weight list of length ' +
str(len(weights)) + ', but the layer was expecting ' +
str(len(params)) + ' weights. Provided weights: ' +
str(weights)[:50] + '...')
if not params:
return
weight_value_tuples = []
param_values = backend.batch_get_value(params)
for pv, p, w in zip(param_values, params, weights):
if pv.shape != w.shape:
raise ValueError('Layer weight shape ' + str(pv.shape) +
' not compatible with '
'provided weight shape ' + str(w.shape))
weight_value_tuples.append((p, w))
backend.batch_set_value(weight_value_tuples)
def get_weights(self):
"""Returns the current weights of the layer.
Returns:
Weights values as a list of numpy arrays.
"""
params = self.weights
return backend.batch_get_value(params)
def get_updates_for(self, inputs):
"""Retrieves updates relevant to a specific set of inputs.
Arguments:
inputs: Input tensor or list/tuple of input tensors.
Returns:
List of update ops of the layer that depend on `inputs`.
Raises:
RuntimeError: If called in Eager mode.
"""
# Updates disabled if layer is not trainable and not explicitly stateful.
if not self.trainable and not self.stateful:
return []
if inputs is None:
# Requesting unconditional updates.
return [x for x in self.updates if x._unconditional_update] # pylint: disable=protected-access
# Requesting input-conditional updates.
inputs = nest.flatten(inputs)
reachable = tf_utils.get_reachable_from_inputs(inputs, self.updates)
updates = []
for update in self.updates:
if update in reachable:
updates.append(update)
return updates
def get_losses_for(self, inputs):
"""Retrieves losses relevant to a specific set of inputs.
Arguments:
inputs: Input tensor or list/tuple of input tensors.
Returns:
List of loss tensors of the layer that depend on `inputs`.
Raises:
RuntimeError: If called in Eager mode.
"""
if inputs is None:
# Requesting unconditional losses.
return [x for x in self.losses if x._unconditional_loss] # pylint: disable=protected-access
# Requesting input-conditional losses.
inputs = nest.flatten(inputs)
# Retrieve the set of tensors in the TF graph that depend on `inputs`.
# The losses we want to return will be part of this set.
# To avoid unnecessary work, we stop the search in case all of
# `self.losses` have been retrieved.
reachable = tf_utils.get_reachable_from_inputs(inputs, self.losses)
losses = []
for loss in self.losses:
if loss in reachable:
losses.append(loss)
return losses
def get_input_mask_at(self, node_index):
"""Retrieves the input mask tensor(s) of a layer at a given node.
Arguments:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A mask tensor
(or list of tensors if the layer has multiple inputs).
"""
inputs = self.get_input_at(node_index)
if isinstance(inputs, list):
return [getattr(x, '_keras_mask', None) for x in inputs]
else:
return getattr(inputs, '_keras_mask', None)
def get_output_mask_at(self, node_index):
"""Retrieves the output mask tensor(s) of a layer at a given node.
Arguments:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A mask tensor
(or list of tensors if the layer has multiple outputs).
"""
output = self.get_output_at(node_index)
if isinstance(output, list):
return [getattr(x, '_keras_mask', None) for x in output]
else:
return getattr(output, '_keras_mask', None)
@property
def input_mask(self):
"""Retrieves the input mask tensor(s) of a layer.
Only applicable if the layer has exactly one inbound node,
i.e. if it is connected to one incoming layer.
Returns:
Input mask tensor (potentially None) or list of input
mask tensors.
Raises:
AttributeError: if the layer is connected to
more than one incoming layers.
"""
inputs = self.input
if isinstance(inputs, list):
return [getattr(x, '_keras_mask', None) for x in inputs]
else:
return getattr(inputs, '_keras_mask', None)
@property
def output_mask(self):
"""Retrieves the output mask tensor(s) of a layer.
Only applicable if the layer has exactly one inbound node,
i.e. if it is connected to one incoming layer.
Returns:
Output mask tensor (potentially None) or list of output
mask tensors.
Raises:
AttributeError: if the layer is connected to
more than one incoming layers.
"""
output = self.output
if isinstance(output, list):
return [getattr(x, '_keras_mask', None) for x in output]
else:
return getattr(output, '_keras_mask', None)
def get_input_shape_at(self, node_index):
"""Retrieves the input shape(s) of a layer at a given node.
Arguments:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A shape tuple
(or list of shape tuples if the layer has multiple inputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'input_shapes',
'input shape')
def get_output_shape_at(self, node_index):
"""Retrieves the output shape(s) of a layer at a given node.
Arguments:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A shape tuple
(or list of shape tuples if the layer has multiple outputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'output_shapes',
'output shape')
def get_input_at(self, node_index):
"""Retrieves the input tensor(s) of a layer at a given node.
Arguments:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A tensor (or list of tensors if the layer has multiple inputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'input_tensors',
'input')
def get_output_at(self, node_index):
"""Retrieves the output tensor(s) of a layer at a given node.
Arguments:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A tensor (or list of tensors if the layer has multiple outputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'output_tensors',
'output')
@property
def input(self):
"""Retrieves the input tensor(s) of a layer.
Only applicable if the layer has exactly one input,
i.e. if it is connected to one incoming layer.
Returns:
Input tensor or list of input tensors.
Raises:
AttributeError: if the layer is connected to
more than one incoming layers.
Raises:
RuntimeError: If called in Eager mode.
AttributeError: If no inbound nodes are found.
"""
if not self._inbound_nodes:
raise AttributeError('Layer ' + self.name +
' is not connected, no input to return.')
return self._get_node_attribute_at_index(0, 'input_tensors', 'input')
@property
def output(self):
"""Retrieves the output tensor(s) of a layer.
Only applicable if the layer has exactly one output,
i.e. if it is connected to one incoming layer.
Returns:
Output tensor or list of output tensors.
Raises:
AttributeError: if the layer is connected to more than one incoming
layers.
RuntimeError: if called in Eager mode.
"""
if not self._inbound_nodes:
raise AttributeError('Layer ' + self.name + ' has no inbound nodes.')
return self._get_node_attribute_at_index(0, 'output_tensors', 'output')
@property
def input_shape(self):
"""Retrieves the input shape(s) of a layer.
Only applicable if the layer has exactly one input,
i.e. if it is connected to one incoming layer, or if all inputs
have the same shape.
Returns:
Input shape, as an integer shape tuple
(or list of shape tuples, one tuple per input tensor).
Raises:
AttributeError: if the layer has no defined input_shape.
RuntimeError: if called in Eager mode.
"""
if not self._inbound_nodes:
raise AttributeError('The layer has never been called '
'and thus has no defined input shape.')
all_input_shapes = set(
[str(node.input_shapes) for node in self._inbound_nodes])
if len(all_input_shapes) == 1:
return self._inbound_nodes[0].input_shapes
else:
raise AttributeError('The layer "' + str(self.name) +
' has multiple inbound nodes, '
'with different input shapes. Hence '
'the notion of "input shape" is '
'ill-defined for the layer. '
'Use `get_input_shape_at(node_index)` '
'instead.')
def count_params(self):
"""Count the total number of scalars composing the weights.
Returns:
An integer count.
Raises:
ValueError: if the layer isn't yet built
(in which case its weights aren't yet defined).
"""
if not self.built:
if self.__class__.__name__ == 'Sequential':
self.build() # pylint: disable=no-value-for-parameter
else:
raise ValueError('You tried to call `count_params` on ' + self.name +
', but the layer isn\'t built. '
'You can build it manually via: `' + self.name +
'.build(batch_input_shape)`.')
return int(sum(np.prod(w.shape.as_list()) for w in self.weights))
@property
def output_shape(self):
"""Retrieves the output shape(s) of a layer.
Only applicable if the layer has one output,
or if all outputs have the same shape.
Returns:
Output shape, as an integer shape tuple
(or list of shape tuples, one tuple per output tensor).
Raises:
AttributeError: if the layer has no defined output shape.
RuntimeError: if called in Eager mode.
"""
if not self._inbound_nodes:
raise AttributeError('The layer has never been called '
'and thus has no defined output shape.')
all_output_shapes = set(
[str(node.output_shapes) for node in self._inbound_nodes])
if len(all_output_shapes) == 1:
return self._inbound_nodes[0].output_shapes
else:
raise AttributeError('The layer "%s"'
' has multiple inbound nodes, '
'with different output shapes. Hence '
'the notion of "output shape" is '
'ill-defined for the layer. '
'Use `get_output_shape_at(node_index)` '
'instead.' % self.name)
@property
@doc_controls.do_not_doc_inheritable
def inbound_nodes(self):
"""Deprecated, do NOT use! Only for compatibility with external Keras."""
return self._inbound_nodes
@property
@doc_controls.do_not_doc_inheritable
def outbound_nodes(self):
"""Deprecated, do NOT use! Only for compatibility with external Keras."""
return self._outbound_nodes
##############################################################################
# Methods & attributes below are public aliases of other methods. #
##############################################################################
def apply(self, inputs, *args, **kwargs):
"""Apply the layer on a input.
This is an alias of `self.__call__`.
Arguments:
inputs: Input tensor(s).
*args: additional positional arguments to be passed to `self.call`.
**kwargs: additional keyword arguments to be passed to `self.call`.
Returns:
Output tensor(s).
"""
return self.__call__(inputs, *args, **kwargs)
@doc_controls.for_subclass_implementers
def add_variable(self, *args, **kwargs):
"""Alias for `add_weight`."""
return self.add_weight(*args, **kwargs)
@property
def variables(self):
"""Returns the list of all layer variables/weights.
Alias of `self.weights`.
Returns:
A list of variables.
"""
return self.weights
@property
def trainable_variables(self):
return self.trainable_weights
@property
def non_trainable_variables(self):
return self.non_trainable_weights
##############################################################################
# Methods & attributes below are all private and only used by the framework. #
##############################################################################
def _name_scope(self):
return self.name
def _init_set_name(self, name, zero_based=True):
if not name:
self._name = base_layer_utils.unique_layer_name(
generic_utils.to_snake_case(self.__class__.__name__),
zero_based=zero_based)
else:
self._name = name
def _get_existing_metric(self, name=None):
match = [m for m in self._metrics if m.name == name]
if not match:
return
if len(match) > 1:
raise ValueError(
'Please provide different names for the metrics you have added. '
'We found {} metrics with the name: "{}"'.format(len(match), name))
return match[0]
def _eager_add_metric(self, value, aggregation=None, name=None):
# If the given metric is available in `metrics` list we just update state
# on it, otherwise we create a new metric instance and
# add it to the `metrics` list.
match = self._get_existing_metric(name)
if match:
match(value) # Update the metric state.
return
else:
if aggregation is None:
raise ValueError('We do not support adding an aggregated metric tensor '
'in `call` in eager execution.')
metric_obj, _ = base_layer_utils.create_mean_metric(value, name)
self._metrics.append(metric_obj)
def _symbolic_add_metric(self, value, aggregation=None, name=None):
if aggregation is None:
# Iterate over the metrics and check if the given metric exists already.
# This can happen when a metric instance is created in subclassed model
# layer `__init__` and we have tracked that instance already in
# model.__setattr__.
match = self._get_existing_metric(name)
if match:
result_tensor = value
if match.name not in self._metrics_tensors:
self._metrics_tensors[match.name] = result_tensor
return
else:
raise ValueError(
'We currently do not support reusing a metric instance.')
else:
# We track the instance using the metadata on the result tensor.
result_tensor = value
metric_obj = result_tensor._metric_obj
else:
# If a non-aggregated tensor is given as input (ie. `aggregation` is
# explicitly set to `mean`), we wrap the tensor in `Mean` metric.
metric_obj, result_tensor = base_layer_utils.create_mean_metric(
value, name)
self._metrics.append(metric_obj)
self._metrics_tensors[metric_obj.name] = result_tensor
def _handle_weight_regularization(self, name, variable, regularizer):
"""Create lambdas which compute regularization losses."""
def _loss_for_variable(v):
"""Creates a regularization loss `Tensor` for variable `v`."""
with ops.name_scope(name + '/Regularizer'):
regularization = regularizer(v)
return regularization
if isinstance(variable, tf_variables.PartitionedVariable):
for v in variable:
self.add_loss(functools.partial(_loss_for_variable, v))
else:
self.add_loss(functools.partial(_loss_for_variable, variable))
def _handle_activity_regularization(self, inputs, outputs):
# Apply activity regularization.
# Note that it should be applied every time the layer creates a new
# output, since it is output-specific.
if self._activity_regularizer:
output_list = nest.flatten(outputs)
with ops.name_scope('ActivityRegularizer'):
for output in output_list:
activity_loss = self._activity_regularizer(output)
batch_size = math_ops.cast(
array_ops.shape(output)[0], activity_loss.dtype)
# Make activity regularization strength batch-agnostic.
mean_activity_loss = activity_loss / batch_size
self.add_loss(mean_activity_loss, inputs=inputs)
def _set_mask_metadata(self, inputs, outputs, previous_mask):
# In some cases the mask of the outputs has already been computed by
# inner layers and does not need to be recomputed by this layer.
mask_already_computed = all(
hasattr(x, '_keras_mask') for x in generic_utils.to_list(outputs))
if hasattr(self, 'compute_mask') and not mask_already_computed:
output_mask = self.compute_mask(inputs, previous_mask)
else:
output_mask = None
if isinstance(outputs, (list, tuple)):
if output_mask is None:
output_mask = [None for _ in range(len(outputs))]
for x, m in zip(outputs, output_mask):
try:
x._keras_mask = m # pylint: disable=protected-access
except AttributeError:
pass # C type such as dict. Masking not supported in this case.
else:
try:
outputs._keras_mask = output_mask # pylint: disable=protected-access
except AttributeError:
pass # C type such as dict. Masking not supported in this case.
def _set_connectivity_metadata_(self, inputs, outputs, args, kwargs):
call_convention = getattr(
self, '_call_convention',
base_layer_utils.CallConvention.EXPLICIT_INPUTS_ARGUMENT)
if args:
if call_convention == (base_layer_utils
.CallConvention.EXPLICIT_INPUTS_ARGUMENT):
raise TypeError(
'This layer ("{}") takes an `inputs` argument in `call()`, '
'and only the `inputs` argument may be specified as a positional '
'argument. Pass everything else as a keyword argument '
'(those arguments will not be tracked '
'as inputs to the layer).'.format(self.name))
elif call_convention == (base_layer_utils
.CallConvention.SINGLE_POSITIONAL_ARGUMENT):
raise TypeError(
'This layer ("{}") takes a single positional argument in `call()`,'
' which is by convention the `inputs` argument, '
'and only this argument may be specified as a positional argument. '
'Pass everything else as a keyword argument '
'(those arguments will not be tracked '
'as inputs to the layer).'.format(self.name))
# If the layer returns tensors from its inputs, unmodified,
# we copy them to avoid loss of tensor metadata.
output_ls = nest.flatten(outputs)
inputs_ls = nest.flatten(inputs)
output_ls_copy = []
for x in output_ls:
if x in inputs_ls:
with ops.name_scope(self.name):
x = array_ops.identity(x)
output_ls_copy.append(x)
outputs = nest.pack_sequence_as(outputs, output_ls_copy)
inputs, kwargs = self._inputs_from_call_args(
call_args=(inputs,) + args, call_kwargs=kwargs)
# Add an inbound node to the layer, so it can keep track of this call.
# This updates the layer history of the output tensor(s).
kwargs.pop('mask', None) # `mask` should not be serialized.
self._add_inbound_node(
input_tensors=inputs, output_tensors=outputs, arguments=kwargs)
return inputs, outputs
def _inputs_from_call_args(self, call_args, call_kwargs):
"""Get Layer inputs from __call__ *args and **kwargs.
Args:
call_args: The positional arguments passed to __call__.
call_kwargs: The keyword argument dict passed to __call__.
Returns:
A tuple of (inputs, non_input_kwargs). These may be the same objects as
were passed in (call_args and call_kwargs).
"""
call_convention = getattr(
self, '_call_convention',
base_layer_utils.CallConvention.EXPLICIT_INPUTS_ARGUMENT)
if (call_convention in (
base_layer_utils.CallConvention.EXPLICIT_INPUTS_ARGUMENT,
base_layer_utils.CallConvention.SINGLE_POSITIONAL_ARGUMENT)):
assert len(call_args) == 1 # TypeError raised earlier in __call__.
return call_args[0], call_kwargs
else:
call_arg_spec = tf_inspect.getfullargspec(self.call)
# There is no explicit "inputs" argument expected or provided to
# call(). Arguments which have default values are considered non-inputs,
# and arguments without are considered inputs.
if call_arg_spec.defaults:
if call_arg_spec.varargs is not None:
raise TypeError(
'Layers may not accept both positional arguments and '
'arguments with default values (unable to determine which '
'are inputs to the layer). '
'Issue occurred with layer "%s"' % (self.name))
keyword_arg_names = set(
call_arg_spec.args[-len(call_arg_spec.defaults):])
else:
keyword_arg_names = set()
# Training is never an input argument name, to allow signatures like
# call(x, training).
keyword_arg_names.add('training')
_, unwrapped_call = tf_decorator.unwrap(self.call)
bound_args = inspect.getcallargs(
unwrapped_call, *call_args, **call_kwargs)
if call_arg_spec.varkw is not None:
var_kwargs = bound_args.pop(call_arg_spec.varkw)
bound_args.update(var_kwargs)
keyword_arg_names = keyword_arg_names.union(var_kwargs.keys())
all_args = call_arg_spec.args
if all_args and bound_args[all_args[0]] is self:
# Ignore the 'self' argument of methods
bound_args.pop(call_arg_spec.args[0])
all_args = all_args[1:]
non_input_arg_values = {}
input_arg_values = []
remaining_args_are_keyword = False
for argument_name in all_args:
if argument_name in keyword_arg_names:
remaining_args_are_keyword = True
else:
if remaining_args_are_keyword:
raise TypeError(
'Found a positional argument in a layer call after a non-input '
'argument. All arguments after "training" must be keyword '
'arguments, and are not tracked as inputs to the layer. '
'Issue occurred with layer "%s"' % (self.name))
if remaining_args_are_keyword:
non_input_arg_values[argument_name] = bound_args[argument_name]
else:
input_arg_values.append(bound_args[argument_name])
if call_arg_spec.varargs is not None:
input_arg_values.extend(bound_args[call_arg_spec.varargs])
return input_arg_values, non_input_arg_values
def _add_inbound_node(self,
input_tensors,
output_tensors,
arguments=None):
"""Internal method to create an inbound node for the layer.
Arguments:
input_tensors: list of input tensors.
output_tensors: list of output tensors.
arguments: dictionary of keyword arguments that were passed to the
`call` method of the layer at the call that created the node.
"""
inbound_layers = nest.map_structure(lambda t: t._keras_history[0],
input_tensors)
node_indices = nest.map_structure(lambda t: t._keras_history[1],
input_tensors)
tensor_indices = nest.map_structure(lambda t: t._keras_history[2],
input_tensors)
# Create node, add it to inbound nodes.
Node(
self,
inbound_layers=inbound_layers,
node_indices=node_indices,
tensor_indices=tensor_indices,
input_tensors=input_tensors,
output_tensors=output_tensors,
arguments=arguments)
# Update tensor history metadata.
# The metadata attribute consists of
# 1) a layer instance
# 2) a node index for the layer
# 3) a tensor index for the node.
# The allows layer reuse (multiple nodes per layer) and multi-output
# or multi-input layers (e.g. a layer can return multiple tensors,
# and each can be sent to a different layer).
for i, tensor in enumerate(nest.flatten(output_tensors)):
tensor._keras_history = (self, len(self._inbound_nodes) - 1, i) # pylint: disable=protected-access
def _get_node_attribute_at_index(self, node_index, attr, attr_name):
"""Private utility to retrieves an attribute (e.g. inputs) from a node.
This is used to implement the methods:
- get_input_shape_at
- get_output_shape_at
- get_input_at
etc...
Arguments:
node_index: Integer index of the node from which
to retrieve the attribute.
attr: Exact node attribute name.
attr_name: Human-readable attribute name, for error messages.
Returns:
The layer's attribute `attr` at the node of index `node_index`.
Raises:
RuntimeError: If the layer has no inbound nodes, or if called in Eager
mode.
ValueError: If the index provided does not match any node.
"""
if not self._inbound_nodes:
raise RuntimeError('The layer has never been called '
'and thus has no defined ' + attr_name + '.')
if not len(self._inbound_nodes) > node_index:
raise ValueError('Asked to get ' + attr_name + ' at node ' +
str(node_index) + ', but the layer has only ' +
str(len(self._inbound_nodes)) + ' inbound nodes.')
values = getattr(self._inbound_nodes[node_index], attr)
if isinstance(values, list) and len(values) == 1:
return values[0]
else:
return values
def _maybe_build(self, inputs):
# Check input assumptions set before layer building, e.g. input rank.
input_spec.assert_input_compatibility(
self.input_spec, inputs, self.name)
input_list = nest.flatten(inputs)
if input_list and self._dtype is None:
try:
self._dtype = input_list[0].dtype.base_dtype.name
except AttributeError:
pass
input_shapes = None
if all(hasattr(x, 'shape') for x in input_list):
input_shapes = nest.map_structure(lambda x: x.shape, inputs)
# Only call `build` if the user has manually overridden the build method.
if not hasattr(self.build, '_is_default'):
self.build(input_shapes)
def _symbolic_call(self, inputs):
input_shapes = nest.map_structure(lambda x: x.shape, inputs)
output_shapes = self.compute_output_shape(input_shapes)
return nest.map_structure(
lambda shape: backend.placeholder(shape, dtype=self.dtype),
output_shapes)
def __setattr__(self, name, value):
if (not getattr(self, '_setattr_tracking', True) or
getattr(self, '_is_graph_network', False)):
super(Layer, self).__setattr__(name, value)
return
# Append value to self._layers if relevant
if (isinstance(value, Layer) or
checkpointable_layer_utils.has_weights(value)):
# Initialize `_layers` here in case `__init__` has not yet been called.
if not hasattr(self, '_layers'):
self._layers = []
# We need to check object identity to avoid de-duplicating empty
# container types which compare equal.
if not any((layer is value for layer in self._layers)):
self._layers.append(value)
if hasattr(value, '_use_resource_variables'):
# Legacy layers (V1 tf.layers) must always use
# resource variables.
value._use_resource_variables = True
# Append value to list of trainable / non-trainable weights if relevant
if isinstance(value, tf_variables.Variable):
# Users may add extra weights/variables
# simply by assigning them to attributes (invalid for graph networks)
if not hasattr(self, '_trainable_weights'):
self._trainable_weights = []
if not hasattr(self, '_non_trainable_weights'):
self._non_trainable_weights = []
if value not in self._trainable_weights + self._non_trainable_weights:
if value.trainable:
self._trainable_weights.append(value)
else:
self._non_trainable_weights.append(value)
super(Layer, self).__setattr__(name, value)
def _gather_children_attribute(self, attribute):
assert attribute in {'weights', 'trainable_weights',
'non_trainable_weights', 'updates', 'losses'}
if hasattr(self, '_layers'):
return list(itertools.chain.from_iterable(
getattr(layer, attribute) for layer in self._layers))
return []
# This is a hack so that the is_layer (within
# training/checkpointable/layer_utils.py) check doesn't get the weights attr.
# TODO(b/110718070): Remove when fixed.
def _is_layer(self):
return True
class Node(object):
"""A `Node` describes the connectivity between two layers.
Each time a layer is connected to some new input,
a node is added to `layer._inbound_nodes`.
Each time the output of a layer is used by another layer,
a node is added to `layer._outbound_nodes`.
Arguments:
outbound_layer: the layer that takes
`input_tensors` and turns them into `output_tensors`
(the node gets created when the `call`
method of the layer was called).
inbound_layers: a list of layers, the same length as `input_tensors`,
the layers from where `input_tensors` originate.
node_indices: a list of integers, the same length as `inbound_layers`.
`node_indices[i]` is the origin node of `input_tensors[i]`
(necessary since each inbound layer might have several nodes,
e.g. if the layer is being shared with a different data stream).
tensor_indices: a list of integers,
the same length as `inbound_layers`.
`tensor_indices[i]` is the index of `input_tensors[i]` within the
output of the inbound layer
(necessary since each inbound layer might
have multiple tensor outputs, with each one being
independently manipulable).
input_tensors: list of input tensors.
output_tensors: list of output tensors.
arguments: dictionary of keyword arguments that were passed to the
`call` method of the layer at the call that created the node.
`node_indices` and `tensor_indices` are basically fine-grained coordinates
describing the origin of the `input_tensors`.
A node from layer A to layer B is added to:
- A._outbound_nodes
- B._inbound_nodes
"""
def __init__(self,
outbound_layer,
inbound_layers,
node_indices,
tensor_indices,
input_tensors,
output_tensors,
arguments=None):
# Layer instance (NOT a sequence)
if isinstance(outbound_layer, (list, tuple, dict)):
raise ValueError('`outbound_layer` should be a layer instance, '
'not a list, tuple, or, dict.')
# this is the layer that takes a nested structure of input tensors
# and turns them into a nested structure of output tensors.
# the current node will be added to
# the inbound_nodes of outbound_layer.
self.outbound_layer = outbound_layer
# The following 3 properties describe where
# the input tensors come from: which layers,
# and for each layer, which node and which
# tensor output of each node.
# Nested structure of layer instances.
self.inbound_layers = inbound_layers
# Nested structure of integers, 1:1 mapping with inbound_layers.
self.node_indices = node_indices
# Nested of integers, 1:1 mapping with inbound_layers.
self.tensor_indices = tensor_indices
# Following 2 properties:
# tensor inputs and outputs of outbound_layer.
# Nested structure of tensors. 1:1 mapping with inbound_layers.
self.input_tensors = input_tensors
# Nested structure of tensors, created by outbound_layer.call().
self.output_tensors = output_tensors
# Following 2 properties: input and output shapes.
# Nested structure of shape tuples, shapes of input_tensors.
self.input_shapes = nest.map_structure(backend.int_shape, input_tensors)
# Nested structure of shape tuples, shapes of output_tensors.
self.output_shapes = nest.map_structure(backend.int_shape, output_tensors)
# Optional keyword arguments to layer's `call`.
self.arguments = arguments
# Add nodes to all layers involved.
for layer in nest.flatten(inbound_layers):
if layer is not None:
# For compatibility with external Keras, we use the deprecated
# accessor here.
layer.outbound_nodes.append(self)
# For compatibility with external Keras, we use the deprecated
# accessor here.
outbound_layer.inbound_nodes.append(self)
def iterate_inbound(self):
"""Returns a list of tuples representing the inbound data.
Returns:
List of tuples like: (inbound_layer, node_index, tensor_index, tensor).
"""
return zip(
nest.flatten(self.inbound_layers), nest.flatten(self.node_indices),
nest.flatten(self.tensor_indices), nest.flatten(self.input_tensors))
def get_config(self):
inbound_names = nest.map_structure(
lambda layer: layer.name if layer else None, self.inbound_layers)
return {
'outbound_layer': self.outbound_layer.name,
'inbound_layers': inbound_names,
'node_indices': self.node_indices,
'tensor_indices': self.tensor_indices
}
class TensorFlowOpLayer(Layer):
"""Wraps a TensorFlow Operation in a Layer.
This class is used internally by the Functional API. When a user
uses a raw TensorFlow Operation on symbolic tensors originating
from an `Input` Layer, the resultant operation will be wrapped
with this Layer object in order to make the operation compatible
with the Keras API.
This Layer will create a new, identical operation (except for inputs
and outputs) every time it is called. If `run_eagerly` is `True`,
the op creation and calculation will happen inside an Eager function.
Instances of this Layer are created when `autolambda` is called, which
is whenever a Layer's `__call__` encounters symbolic inputs that do
not have Keras metadata, or when a Network's `__init__` encounters
outputs that do not have Keras metadata.
Attributes:
node_def: String, the serialized NodeDef of the Op this layer will wrap.
constants: Dict of NumPy arrays, the values of any Tensors needed for this
Operation that do not originate from a Keras `Input` Layer. Since all
placeholders must come from Keras `Input` Layers, these Tensors must be
treated as constant in the Functional API.
name: String, the name of the Layer.
trainable: Bool, whether this Layer is trainable. Currently Variables are
not supported, and so this parameter has no effect.
dtype: The default dtype of this Layer. Inherited from `Layer` and has no
effect on this class, however is used in `get_config`.
"""
def __init__(self,
node_def,
constants=None,
name=None,
trainable=True,
dtype=None):
super(TensorFlowOpLayer, self).__init__(
name=name, trainable=trainable, dtype=dtype)
self.node_def = node_def_pb2.NodeDef.FromString(node_def)
self.constants = constants or {}
def call(self, inputs):
if context.executing_eagerly():
return self._defun_call(inputs)
return self._make_op(inputs)
def _make_op(self, inputs):
inputs = nest.flatten(inputs)
graph = inputs[0].graph
with graph.as_default():
for index, constant in self.constants.items():
constant = ops.convert_to_tensor(constant)
inputs.insert(index, constant)
self.node_def.name = graph.unique_name(self.node_def.name)
c_op = ops._create_c_op(graph, self.node_def, inputs, control_inputs=[])
op = graph._create_op_from_tf_operation(c_op)
if len(op.outputs) == 1:
return op.outputs[0]
return op.outputs
@function.defun
def _defun_call(self, inputs):
"""Wraps the op creation method in an Eager function for `run_eagerly`."""
return self._make_op(inputs)
def get_config(self):
config = super(TensorFlowOpLayer, self).get_config()
config.update({
'node_def': self.node_def.SerializeToString(),
'constants': self.constants
})
return config
def default(method):
"""Decorates a method to detect overrides in subclasses."""
method._is_default = True
return method
# Avoid breaking users who directly import this symbol from this file.
# TODO(fchollet): remove this.
InputSpec = input_spec.InputSpec # pylint:disable=invalid-name
| [
"[email protected]"
] | |
30bb5a8de099650a678bb641cbd91df19f7b70e5 | edc1f1369794a4a1c499c6e9d5fe49a712657611 | /algorithms/BAT-algorithms/Linklist/把链表分隔成 k 部分.py | b8bdcc1f196b6ec7dc476ed648423dfbb07a90aa | [] | no_license | williamsyb/mycookbook | 93d4aca1a539b506c8ed2797863de6da8a0ed70f | dd917b6eba48eef42f1086a54880bab6cd1fbf07 | refs/heads/master | 2023-03-07T04:16:18.384481 | 2020-11-11T14:36:54 | 2020-11-11T14:36:54 | 280,005,004 | 2 | 0 | null | 2023-03-07T02:07:46 | 2020-07-15T23:34:24 | Python | UTF-8 | Python | false | false | 1,148 | py | """
题目:
把链表分隔成 k 部分,每部分的长度都应该尽可能相同,排在前面的长度应该大于等于后面的。
示例:
Input:
root = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], k = 3
Output: [[1, 2, 3, 4], [5, 6, 7], [8, 9, 10]]
"""
from Linklist.utils import *
def split_list_to_parts(root, k):
cur = root
count = 0 # 统计节点个数
while cur is not None:
count += 1
cur = cur.next
mod = count % k
size = int(count / k)
cut_way = [size] * k # 划分成k个部分,cut_way中记录每部分的节点个数
for i in range(mod):
cut_way[i] += 1
result = [None] * k
cur = root
i = 0
# 按cut_way中每部分的节点个数将链表分成k断,存于result中
while cur is not None and i < k:
result[i] = cur
for j in range(cut_way[i] - 1):
cur = cur.next
next1 = cur.next
cur.next = None
cur = next1
i += 1
return result
if __name__ == '__main__':
head = build_l([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
for l in split_list_to_parts(head, 3):
print_l(l)
| [
"[email protected]"
] | |
3cba030978802e2c08627276829ad7783d3fa0b9 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /AaSXX4SKNdZ7mgqK7_17.py | be63a7e642493a1b3670a3c483d9fb9f2cc07103 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,821 | py | """
Check the principles of minimalist code in the [intro to the first
challenge](https://edabit.com/challenge/2XLjgZhmACph76Pkr).
In the **Code** tab you will find a code that is missing a single character in
order to pass the tests. However, your goal is to submit a function as
**minimalist** as possible. Use the tips in the tips section below.
Write a function that returns the **first truthy argument** passed to the
function. If all arguments are falsy, return the string `"not found"`. The
function will be called with a **minimum of one** and a **maximum of four**
arguments: `a`, `b`, `c`, `d`.
### Tips
The operator `or` can be used to assign or return the first truthy value among
two or more elements. If no truthy value is found, the last element will be
returned.
For example, the code:
def one_of_these(a, b, c):
return a if a else b if b else c
Can be simplified to:
def one_of_these(a, b, c):
return a or b or c
### Bonus
Once a truthy value is found, the rest of the elements will not be checked.
This can be used to define a sort of default value that will be returned if
all of the previous elements happen to be false or empty:
txt1 = ""
txt2 = "Edabit"
txt1 or "Empty string" ➞ "Empty string"
txt2 or "Empty string" ➞ "Edabit"
### Notes
* This is an open series: there isn't a definite list of features for the challenges. Please, do not hesitate to leave your **suggestions** in the **Comments**.
* _ **Readability**_ is indeed a subjective concept. **Let's discuss it!** Feel free to leave your opinion in the **Comments**.
* You can find all the exercises in this series [over here](https://edabit.com/collection/8F3LA2Mwrf5bp7kse).
"""
def first_one(a,b=None,c=None,d=None):
return a or b or c or d or 'not found'
| [
"[email protected]"
] | |
f6fe6717319bec24fa325da576bb51eb891e8504 | 34652a47355a8dbe9200db229a1bbc62619de364 | /Courses, Trainings, Books & Exams/PYTHON 3 - Scientific Training/5-1 Defining and Calling Functions -03.py | 17586f32ac4d6ca197e6bf670f6a09aad7f612a0 | [] | no_license | btrif/Python_dev_repo | df34ab7066eab662a5c11467d390e067ab5bf0f8 | b4c81010a1476721cabc2621b17d92fead9314b4 | refs/heads/master | 2020-04-02T13:34:11.655162 | 2019-11-10T11:08:23 | 2019-11-10T11:08:23 | 154,487,015 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | #!/usr/bin/python
def parrot(voltage, state='a stiff', action='voom'):
print("-- This parrot wouldn't", action,)
print("if you put", voltage, "volts through it.",)
print("E's", state, "!")
# Define a dictionary
d = {"voltage": "4.000.000", "state": "bleedin' demised", "action": "VOOM"}
# call the function parrot with the dictionary
parrot(**d)
| [
"[email protected]"
] | |
3197e0ded1fcd0b9cbfc65013a98c0b902ec901a | becf6e96bd866e4a8ee964bc1901225e4fa3fb46 | /thornoise/noise.py | 264f7672212363d1214a68567be5297050cf0eaa | [
"MIT"
] | permissive | YannThorimbert/RpgMap | 80be6a4aea9e8af41a8ff61657a8fcfddd12dd62 | c8c4746e1c99930142d8742e0aa6975dde7efa90 | refs/heads/main | 2023-04-20T17:50:03.875740 | 2021-05-05T12:47:52 | 2021-05-05T12:47:52 | 354,846,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,210 | py | from __future__ import print_function, division
import random, math
import matplotlib.pyplot as plt
def generate_constraints(n_octaves, S, chunk):
"""Generates random height constraints used by terrain generation.
THIS IS NOT THE ACTUAL TERRAIN GENERATION FUNCTION."""
min_res = int(S / 2**(n_octaves-1))
hmap_size = S//min_res + 1
random.seed(chunk)
h = [[random.random() for x in range(hmap_size)] for y in range(hmap_size)]
#
XCOORD, YCOORD = chunk
#left
random.seed((XCOORD,YCOORD))
for y in range(hmap_size):
h[0][y] = random.random()
#right
random.seed((XCOORD+1,YCOORD))
for y in range(hmap_size):
h[-1][y] = random.random()
#top
random.seed((XCOORD,YCOORD))
for x in range(hmap_size):
h[x][0] = random.random()
#bottom
random.seed((XCOORD,YCOORD+1))
for x in range(hmap_size):
h[x][-1] = random.random()
random.seed((XCOORD,YCOORD))
h[0][0] = random.random()
random.seed((XCOORD+1,YCOORD+1))
h[-1][-1] = random.random()
random.seed((XCOORD,YCOORD+1))
h[0][-1] = random.random()
random.seed((XCOORD+1,YCOORD))
h[-1][0] = random.random()
return h, min_res
def generate_terrain(size, n_octaves=None, chunk=(0,0), persistance=2.):
"""Returns a <S> times <S> array of heigth values for <n_octaves>, using
<chunk> as seed."""
S = size
if n_octaves is None: #automatic max number of octaves
n_octaves = int(math.log(S,2))
h, min_res = generate_constraints(n_octaves, S, chunk) #h is the hmap constraint
terrain = [[0. for x in range(S)] for y in range(S)] #actual heightmap
res = int(S) #resolution for the current octave
step = res//min_res #space step in pixels for the current octave
change_cell = True #indicates when polynomial coeffs have to be recomputed
amplitude = persistance
for i in range(n_octaves):
delta = 1./res #size of current cell
x_rel = 0. #x-pos in the current cell
for x in range(S): #here x is coord of pixel
y_rel = 0. #y-pos in the current cell
x2 = x_rel*x_rel;
smoothx = 3.*x2 - 2.*x_rel*x2;
for y in range(S):
y2 = y_rel*y_rel
smoothy = 3.*y2 - 2.*y_rel*y2
diag_term = x_rel*y_rel - smoothx*y_rel - smoothy*x_rel
if change_cell:
idx0, idy0 = int(x/res)*step, int(y/res)*step
idx1, idy1 = idx0+step, idy0+step
h00 = h[idx0][idy0]
h01 = h[idx0][idy1]
h10 = h[idx1][idy0]
h11 = h[idx1][idy1]
#
dx = h10 - h00
dy = h01 - h00
A = dx - h11 + h01
change_cell = False
dh = h00 + smoothx*dx + smoothy*dy + A*diag_term
terrain[x][y] += amplitude*dh
#
y_rel += delta
if y_rel >= 1.: #periodicity
change_cell = True
y_rel = 0.
x_rel += delta
if x_rel >= 1.: #periodicity
change_cell = True
x_rel = 0.
res //= 2
step = res//min_res
amplitude /= persistance
return terrain
def normalize(terrain):
"""Normalize in place the values of <terrain>."""
M = max([max(line) for line in terrain])
m = min([min(line) for line in terrain])
S = len(terrain)
for x in range(S):
for y in range(S):
terrain[x][y] = (terrain[x][y] - m)/(M-m)
return terrain
resolution = 128
terrain = generate_terrain(size=resolution, n_octaves=8, persistance=1.5)
normalize(terrain)
#here we add an offset (don't add offset if you just want natural noise)
offset_amplitude = -2.5/resolution
for x in range(resolution):
for y in range(resolution):
terrain[x][y] += x*offset_amplitude
plt.imshow(terrain, cmap="Blues")
plt.show()
#note: matplotlib swap the axes compared to the matrix format used here
#cool cmaps for terrain : "terrain", "gist_earth", ... (see https://matplotlib.org/users/colormaps.html)
| [
"[email protected]"
] | |
f948d2c486bf549cd835bfca0a5694c3e2e0688a | 0f5c047bdb5cd8aee5b6aac3447e7cf75c6eedcc | /weighted/weighted_lev_gen.py | 752dfb8ad94d66988658319bec58e4935880d9ff | [] | no_license | dengl11/CS166-Project | fcde43fe5a044757432faa53ef79dcaa4ed46132 | f05c5de12a0cfe6939e55149cc69d82ceca41a1c | refs/heads/master | 2020-03-18T23:13:56.963216 | 2018-06-13T22:21:19 | 2018-06-13T22:21:19 | 135,390,090 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,569 | py | import sys, os
sys.path.append(os.path.join(os.path.abspath(__file__), "../"))
from util import *
from trie import *
from levenshtein import *
from match import *
from generator import *
from config import *
from lev_dfa_gen import *
from weighted_gen import *
class WeighedLevTrieGenerator(LevTrieDFAGenerator):
def __init__(self, costs, corpus_dfa = None):
self.corpus_dfa = corpus_dfa or load_data(corpus_dfa_path)
WeightedGenerator.__init__(self, costs)
def construct_levenshten(self, word, k):
nfa = self.construct_levenshten_nfa(word, k)
return DFA.from_nfa(nfa)
def construct_levenshten_nfa(self, word, k):
"""
Args:
word:
k : max edit distance
Return:
"""
n = len(word)
m = dict() # {(n_char, n_err): state_name}
for nc in range(n + 1):
for ne in range(k + 1):
m[(nc, ne)] = str((nc, ne))
transitions = defaultdict(lambda: defaultdict(lambda: set()))
for i in range(n + 1):
ch = word[i] if (i < n) else None
for e in range(k + 1):
credit = k - e # remaining credits
curr = m[(i, e)]
right = m[(i + 1, e)] if ch else None
if credit >= self.c_insert: # can insert
up = m[(i, e + self.c_insert)]
for c in ALPHABETS: transitions[curr][c].add(up)
if not right: continue
transitions[curr][ch].add(right) # correct char: right arrow
if credit >= self.c_delete: # can delete
next_del = m[(i + 1, e + self.c_delete)]
transitions[curr][""].add(next_del) # deletions - epsilon: diagonal arrow
if credit < self.c_subs: continue
next_subs = m[(i + 1, e + self.c_subs)]
for c in ALPHABETS:
transitions[curr][c].add(next_subs
)
nfa = NFA(states = set(m.values()),\
transitions = transitions,\
initial_state = m[(0, 0)],\
final_states = {m[(n, j)] for j in range(k + 1)},\
input_symbols = set(ALPHABETS))
return nfa
def gen_candidates(self, w, k):
"""get candidates of w within edit distance of k
Args:
w:
Return:
"""
lev_dfa = self.construct_levenshten(w, k)
return list(match(self.corpus_dfa, lev_dfa))
| [
"[email protected]"
] | |
eb87d7b2fc7943fe13c436379b5f7a34d0faaee1 | 356b5455a4fb86c49c800a6181323b7fabef2b08 | /ppci/lang/llvmir/nodes.py | 8b38582de2ec730e14e850ad41aa7fed34d291b8 | [
"BSD-2-Clause"
] | permissive | obround/ppci | be7d1ce7832513629ee1301e7b67c0ceda38d668 | ba0840bc5f4ffd889f882a814fb26f88cd854379 | refs/heads/master | 2023-02-11T13:47:35.439871 | 2021-01-05T22:33:08 | 2021-01-05T22:33:08 | 327,131,704 | 0 | 0 | BSD-2-Clause | 2021-01-05T22:08:23 | 2021-01-05T22:08:23 | null | UTF-8 | Python | false | false | 15,154 | py | """ LLVM-ir nodes """
class Module:
""" Holds all information related to a module """
def __init__(self, context):
self.context = context
self.data_layout = DataLayout()
self.functions = OwnedList(self)
self.vmap = {}
self.global_list = OwnedList(self)
def add_global_variable(self, v):
self.global_list.append(v)
self.vmap[v.name] = v
class Value:
""" Root of most nodes """
def __init__(self, ty):
self.ty = ty
@property
def context(self):
return self.ty.context
def set_name(self, name):
""" Set name and update symbol table """
sym_tab = self.symbol_table
sym_tab[name] = self
self.name = name
@property
def symbol_table(self):
if isinstance(self, Instruction):
basic_block = self.parent
function = basic_block.parent
return function.vmap
elif isinstance(self, Argument):
function = self.parent
return function.vmap
else:
raise NotImplementedError(str(self))
class OwnedList(list):
""" Special list that sets the parent attribute upon append """
def __init__(self, owner):
super().__init__()
self.owner = owner
def append(self, x):
x.parent = self.owner
super().append(x)
class BasicBlock(Value):
""" A sequence of non-interrupted instructions """
def __init__(self, context, label, function):
super().__init__(context.label_ty)
self.label = label
self.instructions = OwnedList(self)
self.parent = function
@classmethod
def create(cls, context, name, function):
return BasicBlock(context, name, function)
class Argument(Value):
pass
class UndefValue(Value):
""" An undefined value """
@classmethod
def get(cls, ty):
return UndefValue(ty)
class User(Value):
pass
class Constant(User):
@classmethod
def get_null_value(cls, ty):
if ty.type_id == integer_ty_id:
return ConstantInt.get(ty, 0)
elif ty.type_id in [vector_ty_id, array_ty_id]:
return ConstantAggregateZero.get(ty)
else: # pragma: no cover
raise NotImplementedError(str(ty))
class ConstantInt(Constant):
def __init__(self, ty, value):
super().__init__(ty)
self.value = value
@classmethod
def get(cls, ty, value):
return ConstantInt(ty, value)
@classmethod
def get_true(cls, context):
""" Get the constant value for true """
return cls.get(context.int1_ty, 1)
@classmethod
def get_false(cls, context):
return cls.get(context.int1_ty, 0)
class ConstantFP(Constant):
def __init__(self, ty, val):
super().__init__(ty)
self.val = val
@classmethod
def get(cls, ty, val):
return ConstantFP(ty, val)
class ConstantAggregateZero(Constant):
def __init__(self, ty):
super().__init__(ty)
@classmethod
def get(cls, ty):
return ConstantAggregateZero(ty)
class ConstantVector(Constant):
def __init__(self, elts):
assert len(elts)
assert all(e.ty is elts[0].ty for e in elts)
ty = VectorType.get(elts[0].ty, len(elts))
super().__init__(ty)
self.elts = elts
@classmethod
def get(cls, elts):
return ConstantVector(elts)
class GlobalValue(Constant):
pass
class GlobalObject(GlobalValue):
pass
class GlobalVariable(GlobalObject):
def __init__(self, ty, name, module=None):
super().__init__(ty)
self.name = name
if module:
module.add_global_variable(self)
class Function(GlobalObject):
def __init__(self, ty, module):
super().__init__(ty)
module.functions.append(self)
self.vmap = {}
self.basic_blocks = OwnedList(self)
self.arguments = OwnedList(self)
for param_type in ty.params:
self.arguments.append(Argument(param_type))
@classmethod
def create(cls, function_type, name, module):
return Function(function_type, module)
class Instruction(User):
@property
def is_terminator(self):
return isinstance(self, TerminatorInst)
class BinaryOperator(Instruction):
def __init__(self, op, lhs, rhs, ty):
super().__init__(ty)
self.op = op
self.lhs = lhs
self.rhs = rhs
@staticmethod
def create(op, lhs, rhs):
return BinaryOperator(op, lhs, rhs, lhs.ty)
class CmpInst(Instruction):
FCMP_FALSE = 0
FCMP_OEQ = 1
FCMP_OGT = 2
FCMP_OGE = 3
FCMP_OLT = 4
FCMP_OLE = 5
FCMP_ONE = 6
FCMP_ORD = 7
FCMP_UNO = 8
FCMP_UEQ = 9
FCMP_TRUE = 15
ICMP_EQ = 32
ICMP_NE = 33
ICMP_UGT = 34
ICMP_UGE = 35
ICMP_ULT = 36
ICMP_ULE = 37
ICMP_SGT = 38
ICMP_SGE = 39
ICMP_SLT = 40
ICMP_SLE = 41
def __init__(self, pred, lhs, rhs):
super().__init__(self.make_cmp_result_type(lhs.ty))
self.pred = pred
self.lhs = lhs
self.rhs = rhs
@classmethod
def make_cmp_result_type(cls, opnd_type):
if isinstance(opnd_type, VectorType):
return VectorType.get(opnd_type.context.int1_ty, opnd_type.num)
else:
return opnd_type.context.int1_ty
class FCmpInst(CmpInst):
pass
class ICmpInst(CmpInst):
pass
class ExtractElementInst(Instruction):
def __init__(self, val, index):
super().__init__(val.ty.el_type)
self.val = val
self.index = index
class GetElementPtrInst(Instruction):
def __init__(self, ty, ptr, indices):
ret_ty = self.get_gep_return_type(ptr, indices)
super().__init__(ret_ty)
self.ptr = ptr
self.indices = indices
@staticmethod
def get_indexed_type(agg, idx_list):
""" Return the type after all indexing magic """
for index in idx_list:
agg = agg.get_type_at_index(index)
return agg
@classmethod
def get_gep_return_type(cls, ptr, idx_list):
""" Get the pointer type returned by the GEP """
ty2 = cls.get_indexed_type(ptr.ty, idx_list)
ptr_ty = PointerType.get(ty2, 0)
return ptr_ty
class InsertElementInst(Instruction):
"""Insert element instruction.
Returns a new vector with element at index replaced.
"""
def __init__(self, vec, elt, index):
super().__init__(vec.ty)
self.vec = vec
self.elt = elt
self.index = index
class PhiNode(Instruction):
pass
class SelectInst(Instruction):
def __init__(self, op0, op1, op2):
super().__init__(op1.ty)
self.op0 = op0
self.op1 = op1
self.op2 = op2
@classmethod
def create(cls, op0, op1, op2):
return SelectInst(op0, op1, op2)
class ShuffleVectorInst(Instruction):
def __init__(self, v1, v2, mask):
super().__init__(VectorType.get(v1.ty.el_type, mask.ty.num))
self.v1 = v1
self.v2 = v2
self.mask = mask
class StoreInst(Instruction):
def __init__(self, val, ptr):
self.val = val
self.ptr = ptr
class TerminatorInst(Instruction):
pass
class BranchInst(TerminatorInst):
def __init__(self, op1, op2=None, op0=None):
# super().__init__()
self.op1 = op1
self.op2 = op2
class CallInst(Instruction):
def __init__(self, ty, name, arguments):
super().__init__(ty)
self.fname = name
self.arguments = arguments
class ReturnInst(TerminatorInst):
def __init__(self, ty, value=None):
super().__init__(ty)
self.value = value
class SwitchInst(TerminatorInst):
pass
class UnaryInstruction(Instruction):
pass
class AllocaInst(UnaryInstruction):
def __init__(self, ty, size, alignment):
super().__init__(PointerType.get_unequal(ty))
self.allocated_ty = ty
self.size = size
class CastInst(Instruction):
def __init__(self, op, val, dest_ty):
super().__init__(dest_ty)
self.op = op
self.val = val
@staticmethod
def create(op, val, dest_ty):
return CastInst(op, val, dest_ty)
class LoadInst(Instruction):
def __init__(self, val, ptr):
super().__init__(ptr.ty.el_type)
self.val = val
self.ptr = ptr
void_ty_id = 0
half_ty_id = 1
float_ty_id = 2
double_ty_id = 3
fp128_ty_id = 5
label_ty_id = 7
integer_ty_id = 11
function_ty_id = 12
struct_ty_id = 13
array_ty_id = 14
pointer_ty_id = 15
vector_ty_id = 16
class Type:
""" The type class """
def __init__(self, context, type_id):
self.context = context
self.type_id = type_id
@property
def is_void(self):
return self.type_id == void_ty_id
@property
def is_label(self):
return self.type_id == label_ty_id
@property
def is_integer(self):
return self.type_id == integer_ty_id
@property
def is_floating_point(self):
return self.type_id in [half_ty_id, float_ty_id, double_ty_id]
@staticmethod
def get_void_ty(context):
return context.void_ty
@staticmethod
def get_label_ty(context):
return context.label_ty
class FunctionType(Type):
def __init__(self, result_type, params, is_var_arg=False):
super().__init__(result_type.context, function_ty_id)
self.result_type = result_type
self.params = params
@classmethod
def get(cls, result_type, params=(), is_var_arg=False):
# context = result_type.context
return FunctionType(result_type, params, is_var_arg)
class IntegerType(Type):
def __init__(self, context, bits):
super().__init__(context, integer_ty_id)
self.bits = bits
@staticmethod
def get(context, num_bits):
""" Get the integer type with the given number of bits """
if num_bits not in context.integer_types:
context.integer_types[num_bits] = IntegerType(context, num_bits)
return context.integer_types[num_bits]
class CompositeType(Type):
pass
class StructType(CompositeType):
""" Structure type """
def __init__(self, context):
super().__init__(context, struct_ty_id)
def get_type_at_index(self, idx):
raise NotImplementedError()
@classmethod
def get(cls, context, e_types, is_packed):
""" Get struct type with certain elements """
key = (tuple(e_types), is_packed)
if key in context.struct_types:
st = context.struct_types[key]
else:
st = StructType(context)
st.body = e_types
context.struct_types[key] = st
return st
class SequentialType(CompositeType):
def __init__(self, ty_id, el_type):
super().__init__(el_type.context, ty_id)
self.el_type = el_type
def get_type_at_index(self, idx):
return self.el_type
class PointerType(SequentialType):
def __init__(self, pointed_type, address_space):
super().__init__(pointer_ty_id, pointed_type)
@classmethod
def get(cls, ty, address_space):
context = ty.context
key = ("pointer", id(ty))
if key not in context.type_map:
context.type_map[key] = PointerType(ty, address_space)
return context.type_map[key]
@classmethod
def get_unequal(cls, ty):
return cls.get(ty, 0)
class ArrayType(SequentialType):
def __init__(self, elmty, num):
super().__init__(array_ty_id, elmty)
self.num = num
@staticmethod
def get(elmty, num):
context = elmty.context
key = ("array", num, id(elmty))
if key not in context.type_map:
context.type_map[key] = ArrayType(elmty, num)
return context.type_map[key]
class VectorType(SequentialType):
def __init__(self, elmty, num):
super().__init__(vector_ty_id, elmty)
self.num = num
@staticmethod
def get(elmty, num):
context = elmty.context
key = ("vector", num, id(elmty))
if key not in context.type_map:
context.type_map[key] = VectorType(elmty, num)
return context.type_map[key]
class Context:
""" LLVM context """
def __init__(self):
self.void_ty = Type(self, void_ty_id)
self.half_ty = Type(self, half_ty_id)
self.float_ty = Type(self, float_ty_id)
self.double_ty = Type(self, double_ty_id)
self.label_ty = Type(self, label_ty_id)
self.integer_types = {}
self.int1_ty = IntegerType.get(self, 1)
self.int8_ty = IntegerType.get(self, 8)
self.int16_ty = IntegerType.get(self, 16)
self.int32_ty = IntegerType.get(self, 32)
self.int64_ty = IntegerType.get(self, 64)
self.int128_ty = IntegerType.get(self, 128)
self.vector_types = []
self.struct_types = {}
self.type_map = {}
class DataLayout:
def __init__(self):
self.pointers = {0: 8}
def get_type_alloc_size(self, ty):
# TODO: implement sensible logic here
return self.get_type_size_in_bits(ty) // 8
def get_type_size_in_bits(self, ty):
if ty.type_id == integer_ty_id:
return ty.bits
elif ty.type_id == half_ty_id:
return 16
elif ty.type_id == float_ty_id:
return 32
elif ty.type_id == double_ty_id:
return 64
elif ty.type_id == pointer_ty_id:
return self.pointers[0] * 8
elif ty.type_id == array_ty_id:
return self.get_type_size_in_bits(ty.el_type) * ty.num
elif ty.type_id == vector_ty_id:
return self.get_type_size_in_bits(ty.el_type) * ty.num
else:
raise NotImplementedError(str(ty) + str(ty.type_id))
@classmethod
def from_string(cls, txt):
data_layout = DataLayout()
data_layout.parse_specifier(txt)
return data_layout
def reset(self, layout_description):
self.pointers[0] = 8 # size in bytes of pointer
self.parse_specifier(layout_description)
def parse_specifier(self, desc):
for part in desc.split("-"):
toks = part.split(":")
specifier = toks[0][0]
if specifier == "e":
self.big_endian = False
elif specifier == "E":
self.big_endian = True
elif specifier == "m":
if toks[1] == "e":
self.mangling = "ELF"
else:
raise NotImplementedError(toks[1])
elif specifier in "ivfa":
abi_align = int(toks[1])
print(abi_align)
# pref_align = int(toks[2])
elif specifier == "n":
# Native integer types
legal_int_widths = [int(p) for p in toks[1:]]
print(legal_int_widths)
elif specifier == "S":
pass
# TODO: what is this?
else:
raise NotImplementedError(part)
| [
"[email protected]"
] | |
a38ad724f73002d0f6a61cdd841bd780cfb1b1fc | 6d7336a48936a15484798d59cb863d8419e9578c | /setup.py | 84631162f58c4005edc139c3796016fad04db54b | [
"MIT"
] | permissive | bbengfort/cellular-automata | 48f8f07925e1fb3b4466f090127f44f8f761c799 | c58ab58773796037979da70806b9d19797a64926 | refs/heads/master | 2021-01-01T15:35:44.698790 | 2014-02-07T20:48:49 | 2014-02-07T20:48:49 | 16,409,041 | 5 | 7 | null | null | null | null | UTF-8 | Python | false | false | 2,347 | py | #!/usr/bin/env python
# setup
# Setup script for cellular-automata
#
# Author: Benjamin Bengfort <[email protected]>
# Created: Fri Jan 31 09:15:08 2014 -0500
#
# Copyright (C) 2014 Bengfort.com
# For license information, see LICENSE.txt
#
# ID: setup.py [] [email protected] $
"""
Setup script for cellular-automata
"""
##########################################################################
## Imports
##########################################################################
try:
from setuptools import setup
from setuptools import find_packages
except ImportError:
raise ImportError("Could not import \"setuptools\"."
"Please install the setuptools package.")
##########################################################################
## Package Information
##########################################################################
packages = find_packages(where=".", exclude=("tests", "bin", "docs", "fixtures",))
requires = []
with open('requirements.txt', 'r') as reqfile:
for line in reqfile:
requires.append(line.strip())
classifiers = (
'Development Status :: 3 - Alpha',
'Environment :: MacOS X',
'Environment :: Console',
'Environment :: Other Environment',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Topic :: Multimedia :: Graphics',
'Topic :: Artistic Software',
'Topic :: Scientific/Engineering :: Visualization',
'Topic :: Scientific/Engineering :: Artificial Life',
)
config = {
"name": "PyCellularAutomata",
"version": "0.2",
"description": "a Python Cellular Automata visualization library",
"author": "Benjamin Bengfort",
"author_email": "[email protected]",
"url": "https://github.com/bbengfort/cellular-automata",
"packages": packages,
"install_requires": requires,
"classifiers": classifiers,
"zip_safe": False,
"scripts": ["bin/pyca",],
}
##########################################################################
## Run setup script
##########################################################################
if __name__ == '__main__':
setup(**config)
| [
"[email protected]"
] | |
55f1881da1b8b571d4ead59e470624bd417dde15 | eb2df6020f5759feee3d6d78c5f8c78999454a09 | /scheduled_jobs/trulight_energy/capacity_curves/read_capacity.py | 7a06b2ec665d19e94399d9f1b55cf383d3f12c1f | [] | no_license | mywork-dragon/dave-energy | 7a08f855d245c2d90a9c13aa85fc3b9f28ae9294 | 4b3430be6ef6957389ab05be3a17a0245f5d6662 | refs/heads/master | 2023-07-28T02:55:26.791724 | 2021-09-06T11:44:30 | 2021-09-06T11:44:30 | 365,872,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py |
from scheduled_jobs.trulight_energy.run import run
from scheduled_jobs.trulight_energy.run import TrulightAPIType
CAPACITY_CURVE_URL = "/api/CapacityCurve"
if __name__ == "__main__":
run(CAPACITY_CURVE_URL, TrulightAPIType.Capacity) | [
"[email protected]"
] | |
9b080ae18cafe06d4113940f5175b3baafc4f009 | 311c4dc3034b108049c0d21adad71144433c539d | /cars/tests.py | 7215f9f9f078bb99be857970d1c462350d43c22c | [] | no_license | Osama-Yousef/cars-api-permissions-postgres | 34acfd9fdc0545e147d07e821f767e1e414149f9 | 2292f10e6ae53b6da3f0848ed1832d1fe1346c4d | refs/heads/master | 2022-12-22T07:21:24.926870 | 2020-09-28T15:00:47 | 2020-09-28T15:00:47 | 299,285,921 | 1 | 0 | null | 2020-09-28T15:00:48 | 2020-09-28T11:25:05 | Python | UTF-8 | Python | false | false | 907 | py | from django.test import TestCase
from django.contrib.auth import get_user_model
from .models import Car
# Create your tests here.
class BlogTest(TestCase):
@classmethod
def setUpTestData(cls):
test_user = get_user_model().objects.create_user(username='testuser', password='password')
test_user.save()
car = Car.objects.create(
author = test_user,
title = 'ferrari enzo',
body = 'amazing car through the years'
)
car.save() # Save the object to mock Database
def test_blog_content(self):
car = Car.objects.get(id=1)
actual_author = str(car.author)
actual_title = str(car.title)
actual_body = str(car.body)
self.assertEqual(actual_author, 'testuser')
self.assertEqual(actual_title, 'ferrari enzo')
self.assertEqual(actual_body, 'amazing car through the years') | [
"[email protected]"
] | |
0ae56314edbd5ce6556f06888ff948ebbb88126a | abd65dd8249cd4c9d50c7dae9b572306d32c94dc | /crm_campaign_blog/__init__.py | 822186dcd9a90c8bf4d7fd26462e7cb9034df31e | [] | no_license | intrepidux/odoo-marketing | 17e74dda84fa4dac2f34503e2527e0639a84c922 | 192266a6748c09aeadec7a45f28288d3503fb836 | refs/heads/master | 2023-06-10T06:07:01.894348 | 2020-10-19T09:15:34 | 2020-10-19T09:15:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12 | py | import blog
| [
"[email protected]"
] | |
4dd7ac5371dafbffb4120681c5a84bd01d820d3f | 7860f554d0a51f8af87f71c0a927d12f618b2197 | /all_topic/esay_topic/38. 外观数列.py | 2ee250f25b470d91b81d208e631bc16b3aa019bd | [] | no_license | starrye/LeetCode | 79586b5984a4c28b9a641a0108a41dccacd9375a | 83c589464e0caad960679aea259681c965218d13 | refs/heads/master | 2022-07-26T21:05:42.616460 | 2022-06-23T10:10:38 | 2022-06-23T10:10:38 | 158,910,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,603 | py | #!/usr/local/bin/python3
# -*- coding:utf-8 -*-
"""
@author:
@file: 38. 外观数列.py
@time: 2020/8/10 16:15
@desc:
"""
from typing import List
"""
给定一个正整数 n(1 ≤ n ≤ 30),输出外观数列的第 n 项。
注意:整数序列中的每一项将表示为一个字符串。
「外观数列」是一个整数序列,从数字 1 开始,序列中的每一项都是对前一项的描述。前五项如下:
1. 1
2. 11
3. 21
4. 1211
5. 111221
第一项是数字 1
描述前一项,这个数是 1 即 “一个 1 ”,记作 11
描述前一项,这个数是 11 即 “两个 1 ” ,记作 21
描述前一项,这个数是 21 即 “一个 2 一个 1 ” ,记作 1211
描述前一项,这个数是 1211 即 “一个 1 一个 2 两个 1 ” ,记作 111221
示例 1:
输入: 1
输出: "1"
解释:这是一个基本样例。
示例 2:
输入: 4
输出: "1211"
解释:当 n = 3 时,序列是 "21",其中我们有 "2" 和 "1" 两组,"2" 可以读作 "12",也就是出现频次 = 1 而 值 = 2;类似 "1" 可以读作 "11"。所以答案是 "12" 和 "11" 组合在一起,也就是 "1211"。
"""
"""
每次外循环含义为给定上一个人报的数,求下一个人报的数
每次内循环为遍历上一个人报的数
先设置上一人为'1'
开始外循环
每次外循环先置下一人为空字符串,置待处理的字符num为上一人的第一位,置记录出现的次数为1
开始内循环,遍历上一人的数,如果数是和num一致,则count增加。
若不一致,则将count和num一同添加到next_person报的数中,同时更新num和count
别忘了更新next_person的最后两个数为上一个人最后一个字符以及其出现次数!
作者:qsctech-sange
链接:https://leetcode-cn.com/problems/count-and-say/solution/ji-su-jie-bu-di-gui-zhi-ji-lu-qian-hou-liang-ren-p/
来源:力扣(LeetCode)
著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。
"""
class Solution:
def countAndSay(self, n: int) -> str:
# 设置初始值
pre_str = "1"
for i in range(1, n):
cur_str, count, tmp_str = "", 1, pre_str[0]
for j in pre_str[1:]:
if j == tmp_str:
count += 1
else:
cur_str += str(count) + tmp_str
count = 1
tmp_str = j
cur_str += str(count) + tmp_str
pre_str = cur_str
return pre_str
a = Solution().countAndSay(3)
print(a) | [
"[email protected]"
] | |
03a9735ba626fe96b762c8a33362a10ef2117c76 | 21e177a4d828f4e0a003e9424c4952dbc0b47d29 | /testlints/test_lint_sub_ca_certificate_policies_marked_critical.py | fdded72a9b8ed872ca9ac65bd20ff499b974937f | [] | no_license | 846468230/Plint | 1071277a55144bb3185347a58dd9787562fc0538 | c7e7ca27e5d04bbaa4e7ad71d8e86ec5c9388987 | refs/heads/master | 2020-05-15T12:11:22.358000 | 2019-04-19T11:46:05 | 2019-04-19T11:46:05 | 182,255,941 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,279 | py | import sys
sys.path.append("..")
from lints import base
from lints import lint_sub_ca_certificate_policies_marked_critical
import unittest
import os
from cryptography import x509
from cryptography.hazmat.backends import default_backend
class TestSubCaPolicyCrit(unittest.TestCase):
'''test lint_sub_ca_certificate_policies_marked_critical.py'''
def test_SubCaPolicyCrit(self):
certPath ='..\\testCerts\\subCAWCertPolicyCrit.pem'
lint_sub_ca_certificate_policies_marked_critical.init()
with open(certPath, "rb") as f:
cert = x509.load_pem_x509_certificate(f.read(), default_backend())
out = base.Lints["w_sub_ca_certificate_policies_marked_critical"].Execute(cert)
self.assertEqual(base.LintStatus.Warn,out.Status)
def test_SubCaPolicyNotCrit(self):
certPath ='..\\testCerts\\subCAWCertPolicyNoCrit.pem'
lint_sub_ca_certificate_policies_marked_critical.init()
with open(certPath, "rb") as f:
cert = x509.load_pem_x509_certificate(f.read(), default_backend())
out = base.Lints["w_sub_ca_certificate_policies_marked_critical"].Execute(cert)
self.assertEqual(base.LintStatus.Pass,out.Status)
if __name__=="__main__":
unittest.main(verbosity=2) | [
"[email protected]"
] | |
7db12f0089bc848a4c09ea3a04ec1d7797dfce8a | 675cdd4d9d2d5b6f8e1383d1e60c9f758322981f | /supervised_learning/0x11-attention/8-transformer_decoder_block.py | 6297de99d6e9a896c8f3eaa4dd43d2e1acc22042 | [] | no_license | AndresSern/holbertonschool-machine_learning-1 | 5c4a8db28438d818b6b37725ff95681c4757fd9f | 7dafc37d306fcf2ea0f5af5bd97dfd78d388100c | refs/heads/main | 2023-07-11T04:47:01.565852 | 2021-08-03T04:22:38 | 2021-08-03T04:22:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,821 | py | #!/usr/bin/env python3
""" Tranformer decoder block"""
import tensorflow as tf
MultiHeadAttention = __import__('6-multihead_attention').MultiHeadAttention
class DecoderBlock(tf.keras.layers.Layer):
""" create an encoder block for a transformer: """
def __init__(self, dm, h, hidden, drop_rate=0.1):
"""
ARGS:
-dm - the dimensionality of the model
-h - the number of heads
-hidden - the number of hidden units
in the fully connected layer
-drop_rate - the dropout rate
"""
super().__init__()
self.mha1 = MultiHeadAttention(dm, h)
self.mha2 = MultiHeadAttention(dm, h)
self.dense_hidden = tf.keras.layers.Dense(units=hidden,
activation='relu')
self.dense_output = tf.keras.layers.Dense(units=dm)
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm3 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(drop_rate)
self.dropout2 = tf.keras.layers.Dropout(drop_rate)
self.dropout3 = tf.keras.layers.Dropout(drop_rate)
def call(self, x, encoder_output, training, look_ahead_mask, padding_mask):
""" Function that returns a tensor containing the block’s output
ARGS:
-x :{tensor} shape (batch, target_seq_len, dm)
containing the input to the decoder block
-encoder_output :{tensor} shape (batch, input_seq_len, dm)
containing the output of the encoder
-training: {boolean} : to determine if the model is training
-look_ahead_mask: the mask to be applied to the
first multi head attention layer
-padding_mask: the mask to be applied to the
second multi head attention layer
Returns:
-a tensor of shape (batch, target_seq_len, dm)
containing the block’s output
"""
attn1, attn_weights_block1 = self.mha1(x, x, x, look_ahead_mask)
attn1 = self.dropout1(attn1, training=training)
out1 = self.layernorm1(attn1 + x)
attn2, attn_weights_block2 = self.mha2(out1, encoder_output,
encoder_output,
padding_mask)
attn2 = self.dropout2(attn2, training=training)
out2 = self.layernorm2(attn2 + out1)
ffn_output = self.dense_hidden(out2)
ffn_output = self.dense_output(ffn_output)
ffn_output = self.dropout3(ffn_output, training=training)
out3 = self.layernorm3(ffn_output + out2)
return out3
| [
"[email protected]"
] | |
ea1f2d359cf39f1aa47f4fd798f27d8dfcc65e11 | f05e629875840e53e80f5b9f5ed954e44df20808 | /knp_utils/models.py | 46004f6b7e778d231b60f61c9a2cf249d5217fd9 | [
"MIT"
] | permissive | fumankaitori/knp-utils-py | 75ea6ab252a013dce7f7133073ac97d2eb410602 | af1e0eb0990e6cdf1fb340407ebbe97f9044ab81 | refs/heads/master | 2020-06-25T18:36:16.162389 | 2019-01-06T06:06:24 | 2019-01-06T06:06:24 | 199,391,725 | 0 | 0 | null | 2019-07-29T06:26:20 | 2019-07-29T06:26:20 | null | UTF-8 | Python | false | false | 29,696 | py | #! -*- coding: utf-8 -*-
# package module
from knp_utils.logger_unit import logger
# else
from datetime import datetime
import subprocess
import traceback
import six
import re
import shutil
import pexpect
import os
import sys
from typing import List, Dict, Any, Tuple, Optional
from six import text_type
import zlib
# errors
from knp_utils.errors import ParserIntializeError
if six.PY2:
ConnectionRefusedError = Exception
TimeoutError = Exception
class UnixProcessHandler(object):
"""This class is a handler of any UNIX process. The class keeps UNIX process running.
"""
def __init__(self,
unix_command,
option=None,
pattern='EOS',
timeout_second=10):
# type: (text_type,text_type,text_type,int)->None
self.unix_command = unix_command
self.timeout_second = timeout_second
self.pattern = pattern
self.option = option
self.launch_process(unix_command)
def __del__(self):
if hasattr(self, "process_analyzer"):
self.process_analyzer.kill(sig=9)
def launch_process(self, command):
# type: (text_type)->None
"""* What you can do
- It starts jumanpp process and keep it.
"""
if not self.option is None:
command_plus_option = self.unix_command + " " + self.option
else:
command_plus_option = self.unix_command
if six.PY3:
if shutil.which(command) is None:
raise Exception("No command at {}".format(command))
else:
self.process_analyzer = pexpect.spawnu(command_plus_option)
self.process_id = self.process_analyzer.pid
else:
doc_command_string = "echo '' | {}".format(command)
command_check = os.system(doc_command_string)
if not command_check == 0:
raise Exception("No command at {}".format(command))
else:
self.process_analyzer = pexpect.spawnu(command_plus_option)
self.process_id = self.process_analyzer.pid
def restart_process(self):
# type: ()->None
""""""
if not self.option is None:
command_plus_option = self.unix_command + " " + self.option
else:
command_plus_option = self.unix_command
self.process_analyzer.kill(sig=9)
self.process_analyzer = pexpect.spawnu(command_plus_option)
self.process_id = self.process_analyzer.pid
def stop_process(self):
# type: ()->bool
"""* What you can do
- You're able to stop the process which this instance has now.
"""
if hasattr(self, "process_analyzer"):
self.process_analyzer.kill(sig=9)
else:
pass
return True
def __query(self, input_string):
# type: (text_type)->text_type
"""* What you can do
- It takes the result of Juman++
- This function monitors time which takes for getting the result.
"""
input_encoded = input_string
self.process_analyzer.sendline(input_encoded)
buffer = ""
while True:
line_string = self.process_analyzer.readline() # type: text_type
if line_string.strip() == input_string:
"""Skip if process returns the same input string"""
continue
elif line_string.strip() == self.pattern:
buffer += line_string
return buffer
else:
buffer += line_string
def __notify_handler(self, signum, frame):
raise Exception("""It takes longer time than {time} seconds. You're able to try,
1. Change your setting of 'timeout_second' parameter
2. Run restart_process() method when the exception happens.""".format(**{"time": self.timeout_second}))
def query(self, input_string):
# type: (text_type)->text_type
"""* What you can do
"""
try:
return self.__query(input_string=input_string)
except UnicodeDecodeError:
logger.error(msg=traceback.format_exc())
raise Exception()
class SubprocessHandler(object):
"""A old fashion way to keep connection into UNIX process"""
def __init__(self, command, timeout_second=None):
# type: (text_type,int)->None
subproc_args = {'stdin': subprocess.PIPE, 'stdout': subprocess.PIPE,
'stderr': subprocess.STDOUT, 'cwd': '.',
'close_fds': sys.platform != "win32"}
try:
env = os.environ.copy()
self.process = subprocess.Popen('bash -c "%s"' % command, env=env,
shell=True, **subproc_args)
except OSError:
raise ParserIntializeError(message='Failed to initialize parser.', path_to_parser=command)
self.command = command
(self.stdouterr, self.stdin) = (self.process.stdout, self.process.stdin)
if timeout_second is None:
self.timeout_second = 10000000
else:
self.timeout_second = timeout_second
def __del__(self):
self.process.stdin.close()
self.process.stdout.close()
try:
self.process.kill()
self.process.wait()
except OSError:
pass
def query(self, sentence, eos_pattern, document_type):
# type: (text_type, text_type, text_type)->text_type
assert (isinstance(sentence, six.text_type))
if document_type == 'juman':
if isinstance(sentence, six.text_type) and six.PY2:
# python2で入力がunicodeだった場合の想定 #
self.process.stdin.write(sentence.encode('utf-8') + '\n'.encode('utf-8'))
elif isinstance(sentence, str) and six.PY2:
self.process.stdin.write(sentence + '\n'.encode('utf-8'))
elif isinstance(sentence, str) and six.PY3:
self.process.stdin.write(sentence.encode('utf-8') + '\n'.encode('utf-8'))
elif document_type=='knp':
if isinstance(sentence, six.text_type) and six.PY2:
# python2で入力がunicodeだった場合の想定 #
self.process.stdin.write(sentence.encode('utf-8'))
elif isinstance(sentence, str) and six.PY2:
self.process.stdin.write(sentence)
elif isinstance(sentence, str) and six.PY3:
self.process.stdin.write(sentence.encode('utf-8'))
self.process.stdin.flush()
if six.PY2:
result = "".encode('utf-8')
elif six.PY3:
result = "".encode('utf-8')
else:
raise Exception()
start_time = datetime.now()
eos_pattern_byte = eos_pattern.encode('utf-8')
no_file_pattern_byte = r'No\ssuch\sfile\sor\sdirectory'.encode('utf-8')
while True:
line = self.stdouterr.readline()[:-1]
result = result + line + "\n".encode('utf-8')
if re.search(eos_pattern_byte, line):
break
if re.search(pattern=no_file_pattern_byte, string=result):
raise ParserIntializeError(message=result, path_to_parser=self.command)
elapsed_time = (datetime.now() - start_time).seconds
if elapsed_time > self.timeout_second:
raise TimeoutError("It wastes longer time than {}".format(self.timeout_second))
result_unicode = result.decode('utf-8')
return result_unicode
class KnpSubProcess(object):
"""This class defines process to run KNP analysis."""
def __init__(self,
knp_command,
juman_command,
knp_options=None,
juman_options=None,
knp_server_host=None,
knp_server_port=None,
juman_server_host=None,
juman_server_port=None,
is_use_jumanpp=False,
process_mode='subprocess',
path_juman_rc=None,
eos_pattern="EOS",
timeout_second=60):
# type: (str,str,str,str,str,int,str,int,bool,str,str,str,int)->None
"""* Parameters
- knp_command: Path into Bin of KNP
- juman_command: Path into Bin of Juman(or Juman++)
- knp_options: Option strings of KNP(or KNP++)
- juman_options: Option string of Juman(or Juman++)
- knp_server_host: Host address where KNP server is working
- knp_server_port: Port number where KNP server is working
- juman_server_host: Host address where Juman server is working
- juman_server_port: Port number where Juman server is working
- is_use_jumanpp: Bool flag to use Juman++ instead of Juman. You're supposed to install Juman++ beforehand.
- process_mode: Way to call UNIX commands. 1; You call UNIX commands everytime. 2; You keep UNIX commands running.
- path_juman_rc: Path into Jumanrc file.
"""
PROCESS_MODE = ('everytime', 'pexpect', 'subprocess')
self.knp_command = knp_command
self.juman_command = juman_command
self.juman_options = juman_options
self.knp_options = knp_options
self.knp_server_host = knp_server_host
self.knp_server_port = knp_server_port
self.juman_server_host = juman_server_host
self.juman_server_port = juman_server_port
self.path_juman_rc = path_juman_rc
self.is_use_jumanpp = is_use_jumanpp
self.process_mode = process_mode
self.eos_pattern = eos_pattern
self.timeout_second = timeout_second
# Check jumanrc file path #
if not self.path_juman_rc is None and not os.path.exists(self.path_juman_rc):
raise Exception("No jumanrc file at {}".format(self.path_juman_rc))
else:
pass
# Check flag combination & show warning message for a user #
if not self.path_juman_rc is None:
if self.is_use_jumanpp:
logger.warning("path_juman_rc is invalid when is_use_jumanpp is True.")
elif not self.juman_server_host is None:
logger.warning("path_juman_rc is invalid when you use juman server mode.")
else:
pass
else:
pass
if not self.process_mode in PROCESS_MODE:
raise Exception("No process_mode named {}".format(self.process_mode))
else:
pass
if os.name=='nt' and self.process_mode=='pexpect':
logger.warning(msg="You could not use process_mode='pexpect' in Windows. It forces set process_mode = 'everytime'")
else:
pass
# choose a way to call unix commands #
if (not self.juman_server_host is None and not self.juman_server_port is None) and (not self.knp_server_host is None and self.knp_server_port is None):
self.__launch_server_model()
elif self.process_mode == 'pexpect':
self.__launch_pexpect_mode()
elif self.process_mode == 'everytime':
self.__launch_everytime_mode()
elif self.process_mode == 'subprocess':
self.__launch_subprocess_model()
else:
raise Exception("It failed to initialize. Check your configurations.")
def __launch_subprocess_model(self):
# type: ()->None
"""* What you can do
- It defines process with subprocess handler
"""
self.validate_arguments()
if self.juman_options is None:
self.juman = SubprocessHandler(command='{}'.format(self.juman_command), timeout_second=self.timeout_second)
else:
self.juman = SubprocessHandler(command='{} {}'.format(self.juman_command, self.juman_options), timeout_second=self.timeout_second)
if self.knp_options is None:
self.knp = SubprocessHandler(command='{} -tab'.format(self.knp_command), timeout_second=self.timeout_second)
else:
self.knp = SubprocessHandler(command='{} {}'.format(self.knp_command, self.knp_options), timeout_second=self.timeout_second)
def __launch_pexpect_mode(self, is_keep_process=True):
# type: (bool)->None
"""* What you can do
- It defines process with pexpect
- For KNP
- with keep process running (experimental)
- with launching KNP command every time
"""
self.validate_arguments()
# set juman/juman++ unix process #
if self.is_use_jumanpp:
self.juman = UnixProcessHandler(unix_command=self.juman_command,
timeout_second=self.timeout_second,
option=self.juman_options)
else:
if not self.path_juman_rc is None:
option_string = ' '.join(['-r', self.path_juman_rc])
elif not self.juman_options is None:
option_string = self.juman_options
else:
option_string = None
self.juman = UnixProcessHandler(unix_command=self.juman_command,
option=option_string,
timeout_second=self.timeout_second)
# set KNP process #
if is_keep_process:
self.knp = SubprocessHandler(command='knp -tab')
elif not self.knp_options is None:
self.knp = [self.knp_command, self.knp_options]
else:
self.knp = [self.knp_command, '-tab']
def __launch_everytime_mode(self):
""""""
self.validate_arguments()
# set juman/juman++ unix command #
if self.is_use_jumanpp:
self.juman = [self.juman_command]
else:
if not self.path_juman_rc is None:
self.juman = [self.juman_command, '-B', '-e2', '-r', self.path_juman_rc]
elif not self.juman_options is None:
self.juman = [self.juman_command] + self.juman_options.split()
else:
self.juman = [self.juman_command, '-B', '-e2']
# set KNP unix command #
if not self.knp_options is None:
self.knp = [self.knp_command] + self.knp_options.split()
else:
self.knp = [self.knp_command, '-tab']
def __launch_server_model(self):
""""""
self.juman = [self.juman_command, '-C', '{}:{}'.format(self.juman_server_host, self.juman_server_port)]
self.knp = [self.knp_command, '-C', '{}:{}'.format(self.knp_server_host, self.knp_server_port), '-tab']
def validate_arguments(self):
# argumentの検証方法を考えること
if six.PY3:
if shutil.which(self.juman_command) is None:
raise Exception("No command at {}".format(self.juman_command))
if shutil.which(self.knp_command) is None:
raise Exception("No command at {}".format(self.juman_command))
elif six.PY2:
doc_command_string = "echo '' | {}".format(self.juman_command)
command_check = os.system(doc_command_string)
if not command_check == 0:
raise Exception("No command at {}".format(self.juman_command))
doc_command_string = "echo '' | {}".format(self.knp_command)
command_check = os.system(doc_command_string)
if not command_check == 0:
raise Exception("No command at {}".format(self.knp_command))
# Check options either of under Python2.x or Python3.x
if not self.juman_options is None:
echo_process = ["echo", '']
echo_ps = subprocess.Popen(echo_process, stdout=subprocess.PIPE)
p = subprocess.Popen([self.juman_command] + self.juman_options.split(),
stdin=echo_ps.stdout,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False)
error_lines = p.stderr.readlines() # type: List[bytes]
error_lines_str = [line.decode() for line in error_lines]
for line in error_lines_str:
if re.match('^usage:', line.lower()):
raise Exception("Invalid options: {} {}".format(self.juman_command, self.juman_options))
if not self.knp_options is None:
echo_process = ["echo", '']
echo_ps = subprocess.Popen(echo_process, stdout=subprocess.PIPE)
echo_ps.wait()
if self.juman_options is None:
juman_command = [self.juman_command]
else:
juman_command = [self.juman_command] + self.juman_options.split()
juman_ps = subprocess.Popen(juman_command, stdin=echo_ps.stdout, stdout=subprocess.PIPE)
juman_ps.wait()
p = subprocess.Popen([self.knp_command] + self.knp_options.split(),
stdin=juman_ps.stdout,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False)
error_lines = p.stderr.readlines() # type: List[bytes]
error_lines_str = [line.decode() for line in error_lines]
for line in error_lines_str:
if re.match('^usage:', line.lower()):
raise Exception("Invalid options: {} {}".format(self.knp_command, self.knp_options))
def __run_subprocess_mode(self, input_string):
# type: (text_type)->Tuple[bool,text_type]
assert isinstance(self.juman, SubprocessHandler)
assert isinstance(self.knp, SubprocessHandler)
try:
juman_result = self.juman.query(input_string, '^EOS', 'juman')
knp_result = self.knp.query(juman_result, '^EOS', 'knp')
return (True, knp_result)
except UnicodeDecodeError:
traceback_message = traceback.format_exc()
logger.error("Error with command={}".format(traceback.format_exc()))
return (False, 'error with UnicodeDecodeError traceback={}'.format(traceback_message))
except TimeoutError:
traceback_message = traceback.format_exc()
logger.error("Error with command={}".format(traceback.format_exc()))
return (False, 'error with TimeoutErro traceback={}'.format(traceback_message))
except Exception:
traceback_message = traceback.format_exc()
logger.error("Error with command={}".format(traceback.format_exc()))
return (False, 'error traceback={}'.format(traceback_message))
def __run_pexpect_mode(self, input_string):
# type: (text_type)->Tuple[bool,text_type]
"""* What you can do
- It calls Juman in UNIX process.
- It calls KNP
- with keep process running
- with launching KNP command everytime
"""
assert isinstance(self.juman, UnixProcessHandler)
try:
juman_result = self.juman.query(input_string=input_string)
except:
return (False, 'error traceback={}'.format(traceback.format_exc()))
if isinstance(self.knp, SubprocessHandler):
try:
parsed_result = self.knp.query(sentence=juman_result.strip(), eos_pattern='^EOS')
return (True, parsed_result)
except:
traceback_message = traceback.format_exc()
return (False, 'error traceback={}'.format(traceback_message))
else:
# Delete final \n of Juman result document. This \n causes error at KNP #
echo_process = ["echo", juman_result.strip()]
try:
echo_ps = subprocess.Popen(echo_process, stdout=subprocess.PIPE)
echo_ps.wait()
parsed_result = subprocess.check_output(self.knp, stdin=echo_ps.stdout)
if six.PY2:
return (True, parsed_result)
else:
return (True, parsed_result.decode('utf-8'))
except subprocess.CalledProcessError:
traceback_message = traceback.format_exc()
logger.error("Error with command={}".format(traceback.format_exc()))
return (False, 'error with CalledProcessError. traceback={}'.format(traceback_message))
except UnicodeDecodeError:
traceback_message = traceback.format_exc()
logger.error("Error with command={}".format(traceback.format_exc()))
return (False, 'error with UnicodeDecodeError traceback={}'.format(traceback_message))
except Exception:
traceback_message = traceback.format_exc()
logger.error("Error with command={}".format(traceback.format_exc()))
return (False, 'error traceback={}'.format(traceback_message))
def __run_everytime_mode(self, input_string):
# type: (text_type)->Tuple[bool,text_type]
assert isinstance(self.juman, list)
assert isinstance(self.knp, list)
echo_process = ["echo", input_string]
try:
echo_ps = subprocess.Popen(echo_process, stdout=subprocess.PIPE)
echo_ps.wait()
juman_ps = subprocess.Popen(self.juman, stdin=echo_ps.stdout, stdout=subprocess.PIPE)
juman_ps.wait()
parsed_result = subprocess.check_output(self.knp, stdin=juman_ps.stdout)
if six.PY2:
return (True, parsed_result)
else:
return (True, parsed_result.decode('utf-8'))
except subprocess.CalledProcessError:
traceback_message = traceback.format_exc()
logger.error("Error with command={}".format(traceback.format_exc()))
return (False, 'error with CalledProcessError. traceback={}'.format(traceback_message))
except UnicodeDecodeError:
traceback_message = traceback.format_exc()
logger.error("Error with command={}".format(traceback.format_exc()))
return (False, 'error with UnicodeDecodeError traceback={}'.format(traceback_message))
except Exception:
traceback_message = traceback.format_exc()
logger.error("Error with command={}".format(traceback.format_exc()))
return (False, 'error traceback={}'.format(traceback_message))
def __run_server_model(self, input_string):
# type: (text_type)->Tuple[bool,text_type]
assert isinstance(self.juman, list)
assert isinstance(self.knp, list)
echo_process = ["echo", input_string]
try:
echo_ps = subprocess.Popen(echo_process, stdout=subprocess.PIPE)
echo_ps.wait()
juman_ps = subprocess.Popen(self.juman, stdin=echo_ps.stdout, stdout=subprocess.PIPE)
juman_ps.wait()
parsed_result = subprocess.check_output(self.knp, stdin=juman_ps.stdout)
if six.PY2:
return (True, parsed_result)
else:
return (True, parsed_result.decode('utf-8'))
except subprocess.CalledProcessError:
traceback_message = traceback.format_exc()
logger.error("Error with command={}".format(traceback.format_exc()))
return (False, 'error with CalledProcessError. traceback={}'.format(traceback_message))
except UnicodeDecodeError:
traceback_message = traceback.format_exc()
logger.error("Error with command={}".format(traceback.format_exc()))
return (False, 'error with UnicodeDecodeError traceback={}'.format(traceback_message))
except Exception:
traceback_message = traceback.format_exc()
logger.error("Error with command={}".format(traceback.format_exc()))
return (False, 'error traceback={}'.format(traceback_message))
def run_command(self, text):
# type: (text_type)->Tuple[bool,text_type]
"""* What you can do
- You run analysis of Juman(Juman++) and KNP.
- You have 2 ways to call commands.
"""
if (not self.juman_server_host is None and not self.juman_server_port is None) and (not self.knp_server_host is None and self.knp_server_port is None):
return self.__run_server_model(text)
elif self.process_mode == 'pexpect':
return self.__run_pexpect_mode(text)
elif self.process_mode == 'everytime':
return self.__run_everytime_mode(text)
elif self.process_mode == 'subprocess':
return self.__run_subprocess_mode(text)
else:
raise Exception("It failed to initialize. Check your configurations.")
## For keeping old version
Params = KnpSubProcess
class DocumentObject(object):
__slots__ = ('record_id', 'status', 'text',
'is_success', 'timestamp', 'updated_at',
'sub_id', 'sentence_index', 'parsed_result', 'document_args', 'is_compress')
def __init__(self,
record_id,
text,
status,
parsed_result=None,
is_success=None,
sub_id=None,
sentence_index=None,
timestamp=datetime.now(),
updated_at=datetime.now(),
document_args=None):
# type: (int,text_type,bool,Optional[text_type],bool,str,int,datetime,datetime,Dict[str, Any]) -> None
"""
:param record_id: unique id in backend DB
:param text: input text to be parsed.
:param status: boolean flag to describe status of knp parsing.
:param parsed_result: parsing result text.
:param is_success: boolean flag to describe status of knp parsing.
:param sub_id: id in the original given text.
This is used when the original input text is too long and the original text is separated.
:param sentence_index: sentence index when the original input text is separated.
:param timestamp:
:param updated_at:
:param document_args: dict object which is attribute information for input document.
"""
self.record_id = record_id
self.status = status
self.timestamp = timestamp
self.updated_at = updated_at
self.is_success = is_success
self.sentence_index = sentence_index
self.document_args = document_args
if six.PY2:
try:
if isinstance(text, str):
self.text = text.decode('utf-8')
else:
self.text = text
except UnicodeDecodeError:
logger.error(traceback.format_exc())
try:
if isinstance(sub_id, str):
self.sub_id = sub_id.decode('utf-8')
else:
self.sub_id = sub_id
except UnicodeDecodeError:
logger.error(traceback.format_exc())
try:
if isinstance(parsed_result, str):
self.parsed_result = parsed_result.decode('utf-8')
else:
self.parsed_result = parsed_result
except UnicodeDecodeError:
logger.error(traceback.format_exc())
else:
self.text = text
self.sub_id = sub_id
self.parsed_result = parsed_result
def set_knp_parsed_result(self, t_parsed_result):
# type: (Tuple[bool,text_type])->None
"""* What you can do
- It sets KNP parsed result
"""
if t_parsed_result[0] is False:
# If it has something system error, tuple[0] is False #
is_success_flag = False
else:
# It checks KNP result has error message or NOT #
is_success_flag = self.__check_knp_result(parsed_result=t_parsed_result[1])
self.is_success = is_success_flag
self.parsed_result = t_parsed_result[1]
@staticmethod
def __check_knp_result(parsed_result):
# type: (text_type)->bool
"""* What you can do
- It checks if knp result is error or not
"""
if parsed_result is None:
return False
elif 'error' in parsed_result.lower():
return False
else:
return True
def to_dict(self):
# type: ()->Dict[str,Any]
"""* What you can do
- You see parsed result with dict format
"""
return {
"record_id": self.record_id,
"sub_id": self.sub_id,
"status": self.status,
"text": self.text,
"is_success": self.is_success,
"parsed_result": self.parsed_result,
"timestamp": self.timestamp,
"update_at": self.updated_at,
"document_args": self.document_args
}
class ResultObject(object):
def __init__(self,
seq_document_obj,
path_working_db,
db_handler):
# type: (List[DocumentObject],Optional[str],Any)->None
self.seq_document_obj = seq_document_obj
self.path_working_db = path_working_db
self.db_handler = db_handler
def to_dict(self):
# type: ()->List[Dict[str,Any]]
"""* What you can do
- You get parsed result with dict format
"""
return [doc_obj.to_dict() for doc_obj in self.seq_document_obj]
| [
"[email protected]"
] | |
57a43a810299204f78640312238dbe2c58d1d9a6 | 551b75f52d28c0b5c8944d808a361470e2602654 | /huaweicloud-sdk-kms/huaweicloudsdkkms/v1/model/list_secrets_request.py | ec09eb7b62cb6671442582b7035c6ecf09abdefa | [
"Apache-2.0"
] | permissive | wuchen-huawei/huaweicloud-sdk-python-v3 | 9d6597ce8ab666a9a297b3d936aeb85c55cf5877 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | refs/heads/master | 2023-05-08T21:32:31.920300 | 2021-05-26T08:54:18 | 2021-05-26T08:54:18 | 370,898,764 | 0 | 0 | NOASSERTION | 2021-05-26T03:50:07 | 2021-05-26T03:50:07 | null | UTF-8 | Python | false | false | 3,501 | py | # coding: utf-8
import pprint
import re
import six
class ListSecretsRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'limit': 'str',
'marker': 'str'
}
attribute_map = {
'limit': 'limit',
'marker': 'marker'
}
def __init__(self, limit=None, marker=None):
"""ListSecretsRequest - a model defined in huaweicloud sdk"""
self._limit = None
self._marker = None
self.discriminator = None
if limit is not None:
self.limit = limit
if marker is not None:
self.marker = marker
@property
def limit(self):
"""Gets the limit of this ListSecretsRequest.
每页返回的个数。 默认值:50。
:return: The limit of this ListSecretsRequest.
:rtype: str
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ListSecretsRequest.
每页返回的个数。 默认值:50。
:param limit: The limit of this ListSecretsRequest.
:type: str
"""
self._limit = limit
@property
def marker(self):
"""Gets the marker of this ListSecretsRequest.
分页查询起始的资源id,为空时为查询第一页
:return: The marker of this ListSecretsRequest.
:rtype: str
"""
return self._marker
@marker.setter
def marker(self, marker):
"""Sets the marker of this ListSecretsRequest.
分页查询起始的资源id,为空时为查询第一页
:param marker: The marker of this ListSecretsRequest.
:type: str
"""
self._marker = marker
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListSecretsRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
415113fb75120a82dfd4e56463217dc72faec19b | 10abe784a87ce1f9b37383471af15c69db7087ed | /snippets/input_3.py | cc99892ca9e2792c2974c9f1a1221bbd023922d8 | [] | no_license | The-Cooper-Union-CS102/Lesson-7-Introduction-To-Python | e5f27dadbea27dda83bde7893771d39c0d4cb064 | e663bb2e5373b19fd79553757c0ab52e9600518a | refs/heads/main | 2023-01-24T13:03:21.161831 | 2020-11-24T22:51:54 | 2020-11-24T22:51:54 | 303,213,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | opened_file = open('README.md')
content = opened_file.read()
print(f'Number of characters: {len(content)}')
words = content.split()
print(f'Number of words: {len(words)}')
lines = content.split('\n')
print(f'Number of lines: {len(lines)}')
print(f'Number of unique words: {len(set(words))}')
opened_file.close()
| [
"[email protected]"
] | |
5c6af0f82f0c47d71d0ca32753a263aaa76c23a5 | 3784268a19831d75753392c726ed59118dde632d | /cifar/eval_all.py | cef8a0014979778f66cbd7f671d7f481776959c7 | [
"MIT"
] | permissive | joaomonteirof/e2e_verification | d789399e026a9e6505395a7decae79da0057f3f4 | 867f7a2fbdb2ac9154c31e1e63762b9a58b32d7e | refs/heads/master | 2022-12-08T13:24:03.653512 | 2020-09-02T03:28:42 | 2020-09-02T03:28:42 | 186,659,374 | 8 | 3 | null | null | null | null | UTF-8 | Python | false | false | 5,461 | py | from __future__ import print_function
import argparse
import torch
from torchvision import datasets, transforms
from models import vgg, resnet, densenet
import numpy as np
import os
import sys
import glob
from utils import *
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Cifar10 - Evaluation of set of cps')
parser.add_argument('--cp-path', type=str, default=None, metavar='Path', help='Path for checkpoints')
parser.add_argument('--data-path', type=str, default='./data/', metavar='Path', help='Path to data')
parser.add_argument('--model', choices=['vgg', 'resnet', 'densenet'], default='resnet')
parser.add_argument('--dropout-prob', type=float, default=0.25, metavar='p', help='Dropout probability (default: 0.25)')
parser.add_argument('--no-cuda', action='store_true', default=False, help='Disables GPU use')
args = parser.parse_args()
args.cuda = True if not args.no_cuda and torch.cuda.is_available() else False
transform_test = transforms.Compose([transforms.ToTensor(), transforms.Normalize([x / 255 for x in [125.3, 123.0, 113.9]], [x / 255 for x in [63.0, 62.1, 66.7]])])
validset = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
labels_list = [x[1] for x in validset]
idxs_enroll, idxs_test, labels = create_trials_labels(labels_list)
print('\n{} trials created out of which {} are target trials'.format(len(idxs_enroll), np.sum(labels)))
cp_list = glob.glob(args.cp_path+'*.pt')
best_model, best_e2e_eer = None, float('inf')
for cp in cp_list:
ckpt = torch.load(cp, map_location = lambda storage, loc: storage)
try :
dropout_prob, n_hidden, hidden_size, softmax = ckpt['dropout_prob'], ckpt['n_hidden'], ckpt['hidden_size'], ckpt['sm_type']
except KeyError as err:
print("Key Error: {0}".format(err))
print('\nProbably old cp has no info regarding classifiers arch!\n')
try:
n_hidden, hidden_size, softmax = get_classifier_config_from_cp(ckpt)
dropout_prob = args.dropout_prob
except:
print('\nSkipping cp {}. Could not load it.'.format(cp))
continue
if args.model == 'vgg':
model = vgg.VGG('VGG16', nh=n_hidden, n_h=hidden_size, dropout_prob=dropout_prob, sm_type=softmax)
elif args.model == 'resnet':
model = resnet.ResNet18(nh=n_hidden, n_h=hidden_size, dropout_prob=dropout_prob, sm_type=softmax)
elif args.model == 'densenet':
model = densenet.densenet_cifar(nh=n_hidden, n_h=hidden_size, dropout_prob=dropout_prob, sm_type=softmax)
try:
model.load_state_dict(ckpt['model_state'], strict=True)
except:
print('\nSkipping model {}'.format(cp.split('/')[-1]))
continue
if args.cuda:
device = get_freer_gpu()
model = model.cuda(device)
cos_scores = []
e2e_scores = []
out_e2e = []
out_cos = []
mem_embeddings = {}
model.eval()
with torch.no_grad():
for i in range(len(labels)):
enroll_ex = str(idxs_enroll[i])
try:
emb_enroll = mem_embeddings[enroll_ex]
except KeyError:
enroll_ex_data = validset[idxs_enroll[i]][0].unsqueeze(0)
if args.cuda:
enroll_ex_data = enroll_ex_data.cuda(device)
emb_enroll = model.forward(enroll_ex_data).detach()
mem_embeddings[str(idxs_enroll[i])] = emb_enroll
test_ex = str(idxs_test[i])
try:
emb_test = mem_embeddings[test_ex]
except KeyError:
test_ex_data = validset[idxs_test[i]][0].unsqueeze(0)
if args.cuda:
test_ex_data = test_ex_data.cuda(device)
emb_test = model.forward(test_ex_data).detach()
mem_embeddings[str(idxs_test[i])] = emb_test
e2e_scores.append( model.forward_bin(torch.cat([emb_enroll, emb_test],1)).squeeze().item() )
cos_scores.append( torch.nn.functional.cosine_similarity(emb_enroll, emb_test).mean().item() )
out_e2e.append([str(idxs_enroll[i]), str(idxs_test[i]), e2e_scores[-1]])
out_cos.append([str(idxs_enroll[i]), str(idxs_test[i]), cos_scores[-1]])
e2e_scores = np.asarray(e2e_scores)
cos_scores = np.asarray(cos_scores)
all_scores = (e2e_scores + 0.5*(cos_scores+1.))*0.5
labels = np.asarray(labels)
model_id = cp.split('/')[-1]
print('\nEval of model {}:'.format(model_id))
e2e_eer, e2e_auc, avg_precision, acc, threshold = compute_metrics(labels, e2e_scores)
print('\nE2E:')
print('ERR, AUC, Average Precision, Accuracy and corresponding threshold: {}, {}, {}, {}, {}'.format(e2e_eer, e2e_auc, avg_precision, acc, threshold))
cos_eer, cos_auc, avg_precision, acc, threshold = compute_metrics(labels, cos_scores)
print('\nCOS:')
print('ERR, AUC, Average Precision, Accuracy and corresponding threshold: {}, {}, {}, {}, {}'.format(e2e_eer, e2e_auc, avg_precision, acc, threshold))
fus_eer, fus_auc, avg_precision, acc, threshold = compute_metrics(labels, all_scores)
print('\nFUS:')
print('ERR, AUC, Average Precision, Accuracy and corresponding threshold: {}, {}, {}, {}, {}'.format(e2e_eer, e2e_auc, avg_precision, acc, threshold))
if e2e_eer<best_e2e_eer:
best_model, best_e2e_eer, best_e2e_auc, best_cos_eer, best_cos_auc, best_fus_eer, best_fus_auc = model_id, e2e_eer, e2e_auc, cos_eer, cos_auc, fus_eer, fus_auc
print('Best model and corresponding E2E eer and auc: {} - {} - {}'.format(best_model, best_e2e_eer, best_e2e_auc))
print('Corresponding COS eer and auc: {} - {} - {}'.format(best_model, best_cos_eer, best_cos_auc))
print('Corresponding FUS eer and auc: {} - {} - {}'.format(best_model, best_fus_eer, best_fus_auc))
| [
"[email protected]"
] | |
34cd44260e0cfe1b01d0c68d5483e76202bc1907 | 92e3a6424326bf0b83e4823c3abc2c9d1190cf5e | /scripts/icehouse/opt/stack/heat/heat/openstack/common/db/exception.py | 27987c57b6aeadb45b9547d26ff7820317516fa3 | [
"Apache-2.0"
] | permissive | AnthonyEzeigbo/OpenStackInAction | d6c21cf972ce2b1f58a93a29973534ded965d1ea | ff28cc4ee3c1a8d3bbe477d9d6104d2c6e71bf2e | refs/heads/master | 2023-07-28T05:38:06.120723 | 2020-07-25T15:19:21 | 2020-07-25T15:19:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,853 | py | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""DB related custom exceptions."""
from heat.openstack.common.gettextutils import _
class DBError(Exception):
"""Wraps an implementation specific exception."""
def __init__(self, inner_exception=None):
self.inner_exception = inner_exception
super(DBError, self).__init__(str(inner_exception))
class DBDuplicateEntry(DBError):
"""Wraps an implementation specific exception."""
def __init__(self, columns=[], inner_exception=None):
self.columns = columns
super(DBDuplicateEntry, self).__init__(inner_exception)
class DBDeadlock(DBError):
def __init__(self, inner_exception=None):
super(DBDeadlock, self).__init__(inner_exception)
class DBInvalidUnicodeParameter(Exception):
message = _("Invalid Parameter: "
"Unicode is not supported by the current database.")
class DbMigrationError(DBError):
"""Wraps migration specific exception."""
def __init__(self, message=None):
super(DbMigrationError, self).__init__(str(message))
class DBConnectionError(DBError):
"""Wraps connection specific exception."""
pass
| [
"[email protected]"
] | |
ae802470108ec86a3a76c105e3ae18dd8acbfab3 | 904fd519e3f10c8a21653d3fdea34915d2e708e2 | /dat/migrations/0001_initial.py | 586992533e52fb421a095ca0084fdeddfc2485fe | [
"Apache-2.0"
] | permissive | Kgermando/es-script | 5d4c74996dd83a2e91fb462f3ceb4943c887811c | f1b10ecf2c805e8875a025e7033c724e236f6cd1 | refs/heads/main | 2023-06-26T00:54:31.351588 | 2021-07-26T15:40:14 | 2021-07-26T15:40:14 | 366,509,163 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,548 | py | # Generated by Django 3.1.7 on 2021-06-11 05:52
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contacts', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Dat',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True)),
('questions1', models.CharField(default='-', max_length=200, null=True)),
('questions2', models.CharField(default='-', max_length=200, null=True)),
('Statut', models.CharField(choices=[('---------', '--------------'), ('Accord', 'Accord'), ('Déjà payé son crédit', 'Déjà payé son crédit'), ('Refus', 'Refus'), ('Rappel', 'Rappel (interlocuteur demande de rappeler)'), ('Injoignable', 'Injoignable'), ('Absent', 'Absent'), ('Faux numéro', 'Faux numéro'), ('Réfléchir', 'Réfléchir')], max_length=30, null=True)),
('Bound', models.CharField(max_length=20, null=True)),
('Contact', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contacts.contact')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
fc31d030779e0d6f5442220445962d8dc9cd3eaf | c0ca5fe06bce08f9ab739cd4bdcddd782b915898 | /Python_tests/decoraror_kwargs/spookiplugins.py | 6fcf735fc81f09b74b864cfc0566b0d1b31bdb8a | [] | no_license | PhilippeCarphin/tests | bac30ceb9d2d8135b34af3e520fe2a9030028044 | 9716f53a41ed025c2d346be90c19866c59a03daa | refs/heads/master | 2023-07-06T15:09:53.048447 | 2023-07-04T23:52:43 | 2023-07-04T23:52:43 | 53,367,022 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,215 | py | from pyunix import make_unix_function
# noinspection PyUnusedLocal
@make_unix_function
def decorated_arg_function(humeur=None,
couleur=8,
bonjour=False) -> IMO:
"""EXAMPLE SPECIFICATION : A function that doesn't do anything.
# TODO The behavior will be that declaring a function to be
or simpler than that.
a factory function that does this:
make_unix_function(initial_words, arg_spec):
git_clone('https://github.com/philippecarphin/configurations')
git_clone('https://github.com/philippecarphin/configurations', target='~/Documents/GitHub/config')
mv(src='somefile', 'dst=somefile')
mv(src='somefile', 'dst=somefile')
{target: NO_DEFAULT, link_name= NO_DEFAULT})
It is decorated to make a function that checks the argumensts, builds a
string and makes a call to the spooki plugin by the same name.
humeur is a mandatory argument, an exception will be raised if a call is
made without it.
couleur has a default value, it doesn't need to be specified.
bonjour is a flag if it is set to true then the flag will be appended
to the command
"""
pass
class PyUnixArg:
def __init__(self, argname):
self.argname = argnamejk
class PyUnixOption:
# option
# optarg
pass
class PyUnixPosarg:
# argname
# default
pass
class PyUnixFlag:
pass
class PyArgList:
# just a list of words
# like for mv or rm
# except mv is more complicated. Then you have
# to add something like a last_arg property along with the option_before
# property. If last_arg is true, then the function will have
# unix_move(target=os.path.join(...), files_to_move=my_file_list)
# unix_move(files_to_move=my_file_list, target=os.path.join(...))
# So I would specify one kwarg that is a PyArgList, and one that is a PyUnixOption.
# and I would get the function above.
class PyUnixKwarg(name=this, other_prop=that, option_prefix='--', with_equal=False)
# def make_unix_function(initial_words, args = ((name, default), (name, default), ...), kwargs, options_before=True)
def make_unix_function(initial_words,list_arg_spec):
# list_arg_spec is a list of PyUnixXYarg
def make_unix_function(initial_words, argspec=[()], kwargspec={}, options_before=True, varargs=false):
# Validate that argspec is a list of 2-element tuples
# validate that kwargspec is a dictionary of
# {
# PyUnixKwarg(name=this, other_prop=that, option_prefix='--', with_equal=False, flag=False)
# }
# Both argspec and kwargspec will be lists of these. Optio
# Argspec same 'struct' but an ordered list of them instead of a dictionary of them.
if options_before:
def new_func(*args, **kwargs):
option_words = make_option_words(kwargspec, kwargs) # A list
arg_words = list(args)
command_words = initial_words + option_words + arg_words
subprocess.run(command_words)
else:
def new_func(*args, **kwargs):
option_words = make_option_words(kwargspec, kwargs) # A list
arg_words = list(args)
command_words = initial_words + arg_words + option_words
subprocess.run(command_words)
# noinspection PyUnusedLocal
@make_unix_function
def git(temperature="cold",
pressure=130,
some_flag=False) -> IMO:
""" This docstring will be given over to the new function """
pass
# noinspection PyUnusedLocal
@make_unix_function
def annotated_func(humidity: int=8,
stickyness: str="hello",
sweatiness: str=None,
sunny: bool=False) -> IMO:
pass
if __name__ == "__main__":
# print("CALLING DECORATED FUNCTION")
decorated_arg_function(humeur='piss-bucket', couleur=8, bonjour=True)
# print("CALLING DECORATED FUNCTION")
# decorated_arg_function(humeur='Joyeux', bonjour=True)
# print("CALLING DECORATED FUNCTION")
# decorated_arg_function(couleur='rouge', humeur='piss-bucket', bonjour=True)
windchill()
windchill(some_flag=True)
annotated_func(humidity=8, stickyness="9", sweatiness="asdf", sunny=True)
| [
"[email protected]"
] | |
bdc0db1a009ab22c4fed0cab30158cc6d04d80fb | dde0d75db42c19390f2625a7888586e4d2a14fd7 | /devel/.private/cob_object_detection_msgs/lib/python2.7/dist-packages/cob_object_detection_msgs/msg/_TrainObjectResult.py | 4334f1679464dd809bb35e2ddf81fa5c4c2cac43 | [] | no_license | dhemp09/uml-robotics | 16460efe8195a3f9a6a8296047f4fd4d9df0de80 | 862132e00e221b0a86bc283e7568efa984be673f | refs/heads/master | 2020-03-26T09:44:04.033762 | 2018-08-15T18:11:18 | 2018-08-15T18:11:18 | 144,762,178 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,963 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from cob_object_detection_msgs/TrainObjectResult.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class TrainObjectResult(genpy.Message):
_md5sum = "d41d8cd98f00b204e9800998ecf8427e"
_type = "cob_object_detection_msgs/TrainObjectResult"
_has_header = False #flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
# The results
"""
__slots__ = []
_slot_types = []
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(TrainObjectResult, self).__init__(*args, **kwds)
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
| [
"[email protected]"
] | |
276311ba192111ad3852fba51761d00a98d969e2 | e0a51ac08f13f4d3d89ccd770225a9ca0cecb80a | /seucorretor/cidades/admin.py | 6d8a0d0175ea14209d65b26b1205eee421665a9c | [] | no_license | MarcosDihl/corretaza-buscador | 8bbc94a81f7414a3cbc4a1b7ce7b841431209b1c | a3579059839f32c585dda05775fa525fdd34121e | refs/heads/master | 2022-04-04T03:36:47.360708 | 2018-01-31T03:05:13 | 2018-01-31T03:05:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
from django.contrib import admin
from .models import Cidade, Regiao, Bairro
admin.site.register(Cidade)
admin.site.register(Regiao)
admin.site.register(Bairro)
| [
"[email protected]"
] | |
d12d58a30488fa6fba934eb791e0ccbbffd9631b | ed63c99ccb0beebcfe9bff2ef68e9c86877fa7d8 | /vocoder/train.py | b2449f66ffe8fcb05e7c7aeccb502f70c6c114b5 | [
"MIT"
] | permissive | X-CCS/Real-Time-Voice-Cloning-1 | d25588a852b87849f9a517d587a3a36d086bbae0 | ae4aa2aa1605168d2f04275e1a45f6de2d88f3f0 | refs/heads/master | 2022-02-28T03:29:26.135339 | 2019-10-23T12:01:10 | 2019-10-23T12:01:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,376 | py | from vocoder.models.fatchord_version import WaveRNN
from vocoder.vocoder_dataset import VocoderDataset, collate_vocoder
from vocoder.distribution import discretized_mix_logistic_loss
from vocoder.display import stream, simple_table
from vocoder.gen_wavernn import gen_testset
from torch.utils.data import DataLoader
from pathlib import Path
from torch import optim
import torch.nn.functional as F
import vocoder.hparams as hp
import numpy as np
import time
def train(run_id: str, syn_dir: Path, voc_dir: Path, models_dir: Path, ground_truth: bool,
save_every: int, backup_every: int, force_restart: bool):
# Check to make sure the hop length is correctly factorised
assert np.cumprod(hp.voc_upsample_factors)[-1] == hp.hop_length
# Instantiate the model
print("Initializing the model...")
model = WaveRNN(
rnn_dims=hp.voc_rnn_dims,
fc_dims=hp.voc_fc_dims,
bits=hp.bits,
pad=hp.voc_pad,
upsample_factors=hp.voc_upsample_factors,
feat_dims=hp.num_mels,
compute_dims=hp.voc_compute_dims,
res_out_dims=hp.voc_res_out_dims,
res_blocks=hp.voc_res_blocks,
hop_length=hp.hop_length,
sample_rate=hp.sample_rate,
mode=hp.voc_mode
).cuda()
# Initialize the optimizer
optimizer = optim.Adam(model.parameters())
for p in optimizer.param_groups:
p["lr"] = hp.voc_lr
loss_func = F.cross_entropy if model.mode == "RAW" else discretized_mix_logistic_loss
# Load the weights
model_dir = models_dir.joinpath(run_id)
model_dir.mkdir(exist_ok=True)
weights_fpath = model_dir.joinpath(run_id + ".pt")
if force_restart or not weights_fpath.exists():
print("\nStarting the training of WaveRNN from scratch\n")
model.save(weights_fpath, optimizer)
else:
print("\nLoading weights at %s" % weights_fpath)
model.load(weights_fpath, optimizer)
print("WaveRNN weights loaded from step %d" % model.step)
# Initialize the dataset
metadata_fpath = syn_dir.joinpath("train.txt") if ground_truth else \
voc_dir.joinpath("synthesized.txt")
mel_dir = syn_dir.joinpath("mels") if ground_truth else voc_dir.joinpath("mels_gta")
wav_dir = syn_dir.joinpath("audio")
dataset = VocoderDataset(metadata_fpath, mel_dir, wav_dir)
test_loader = DataLoader(dataset,
batch_size=1,
shuffle=True,
pin_memory=True)
# Begin the training
simple_table([('Batch size', hp.voc_batch_size),
('LR', hp.voc_lr),
('Sequence Len', hp.voc_seq_len)])
for epoch in range(1, 350):
data_loader = DataLoader(dataset,
collate_fn=collate_vocoder,
batch_size=hp.voc_batch_size,
num_workers=2,
shuffle=True,
pin_memory=True)
start = time.time()
running_loss = 0.
for i, (x, y, m) in enumerate(data_loader, 1):
x, m, y = x.cuda(), m.cuda(), y.cuda()
# Forward pass
y_hat = model(x, m)
if model.mode == 'RAW':
y_hat = y_hat.transpose(1, 2).unsqueeze(-1)
elif model.mode == 'MOL':
y = y.float()
y = y.unsqueeze(-1)
# Backward pass
loss = loss_func(y_hat, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
speed = i / (time.time() - start)
avg_loss = running_loss / i
step = model.get_step()
k = step // 1000
if backup_every != 0 and step % backup_every == 0:
model.checkpoint(model_dir, optimizer)
if save_every != 0 and step % save_every == 0:
model.save(weights_fpath, optimizer)
msg = "| Epoch: %04d (%d/%d) | Loss: %.4f | %.1f steps/s | Step: %d | " % (
epoch, i, len(data_loader), avg_loss, speed, k
)
stream(msg)
gen_testset(model, test_loader, hp.voc_gen_at_checkpoint, hp.voc_gen_batched,
hp.voc_target, hp.voc_overlap, model_dir)
print("")
| [
"[email protected]"
] | |
baf29f1497792eadf8baea08bff17463c2572a94 | 5f6c16e89cf58304c2e70f1e34f14110fcec636c | /python-swagger-sdk/test/test_secret_network_rest_api.py | 67db62646ec81cd1341b284912b0e92b3292fe27 | [] | no_license | mohammedpatla/secretapi | 481c97901a5e92ca02e29470ab683df80ea0f26a | df420498bd0ae37fd1a152c3877a1342275a8f43 | refs/heads/master | 2022-12-25T01:55:18.038954 | 2020-10-04T23:13:54 | 2020-10-04T23:13:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 958 | py | # coding: utf-8
"""
API for Secret Network by ChainofSecrets.org
A REST interface for state queries, transaction generation and broadcasting. # noqa: E501
OpenAPI spec version: 3.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.api.secret_network_rest_api import SecretNetworkRESTApi # noqa: E501
from swagger_client.rest import ApiException
class TestSecretNetworkRESTApi(unittest.TestCase):
"""SecretNetworkRESTApi unit test stubs"""
def setUp(self):
self.api = swagger_client.api.secret_network_rest_api.SecretNetworkRESTApi() # noqa: E501
def tearDown(self):
pass
def test_node_info_get(self):
"""Test case for node_info_get
The properties of the connected node # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
62cb616462e647828c49ec52cc887a48020ea0e7 | 9398d8433fdb29ee630a6ee43a07bc36a2adbd88 | /openstackclient/tests/compute/v2/fakes.py | ecf7f599ba8d1d43e321c07e2f7ed7f030142719 | [] | no_license | bopopescu/OpenStack_Liberty_Control | ca5a21d0c32c55dc8c517f5c7c9938ce575a4888 | 0f6ec1b4d38c47776fdf8935266bcaef2464af4c | refs/heads/master | 2022-12-03T10:41:53.210667 | 2016-03-29T06:25:58 | 2016-03-29T06:25:58 | 282,089,815 | 0 | 0 | null | 2020-07-24T01:04:15 | 2020-07-24T01:04:14 | null | UTF-8 | Python | false | false | 9,083 | py | # Copyright 2013 Nebula Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import copy
import mock
import uuid
from openstackclient.tests import fakes
from openstackclient.tests.identity.v2_0 import fakes as identity_fakes
from openstackclient.tests.image.v2 import fakes as image_fakes
from openstackclient.tests.network.v2 import fakes as network_fakes
from openstackclient.tests import utils
from openstackclient.tests.volume.v2 import fakes as volume_fakes
extension_name = 'Multinic'
extension_namespace = 'http://docs.openstack.org/compute/ext/'\
'multinic/api/v1.1'
extension_description = 'Multiple network support'
extension_updated = '2014-01-07T12:00:0-00:00'
extension_alias = 'NMN'
extension_links = '[{"href":'\
'"https://github.com/openstack/compute-api", "type":'\
' "text/html", "rel": "describedby"}]'
EXTENSION = {
'name': extension_name,
'namespace': extension_namespace,
'description': extension_description,
'updated': extension_updated,
'alias': extension_alias,
'links': extension_links,
}
floating_ip_num = 100
fix_ip_num = 100
injected_file_num = 100
injected_file_size_num = 10240
injected_path_size_num = 255
key_pair_num = 100
core_num = 20
ram_num = 51200
instance_num = 10
property_num = 128
secgroup_rule_num = 20
secgroup_num = 10
project_name = 'project_test'
QUOTA = {
'project': project_name,
'floating-ips': floating_ip_num,
'fix-ips': fix_ip_num,
'injected-files': injected_file_num,
'injected-file-size': injected_file_size_num,
'injected-path-size': injected_path_size_num,
'key-pairs': key_pair_num,
'cores': core_num,
'ram': ram_num,
'instances': instance_num,
'properties': property_num,
'secgroup_rules': secgroup_rule_num,
'secgroups': secgroup_num,
}
QUOTA_columns = tuple(sorted(QUOTA))
QUOTA_data = tuple(QUOTA[x] for x in sorted(QUOTA))
service_host = 'host_test'
service_binary = 'compute_test'
service_status = 'enabled'
SERVICE = {
'host': service_host,
'binary': service_binary,
'status': service_status,
}
class FakeComputev2Client(object):
def __init__(self, **kwargs):
self.images = mock.Mock()
self.images.resource_class = fakes.FakeResource(None, {})
self.servers = mock.Mock()
self.servers.resource_class = fakes.FakeResource(None, {})
self.services = mock.Mock()
self.services.resource_class = fakes.FakeResource(None, {})
self.extensions = mock.Mock()
self.extensions.resource_class = fakes.FakeResource(None, {})
self.flavors = mock.Mock()
self.flavors.resource_class = fakes.FakeResource(None, {})
self.quotas = mock.Mock()
self.quotas.resource_class = fakes.FakeResource(None, {})
self.quota_classes = mock.Mock()
self.quota_classes.resource_class = fakes.FakeResource(None, {})
self.volumes = mock.Mock()
self.volumes.resource_class = fakes.FakeResource(None, {})
self.auth_token = kwargs['token']
self.management_url = kwargs['endpoint']
class TestComputev2(utils.TestCommand):
def setUp(self):
super(TestComputev2, self).setUp()
self.app.client_manager.compute = FakeComputev2Client(
endpoint=fakes.AUTH_URL,
token=fakes.AUTH_TOKEN,
)
self.app.client_manager.identity = identity_fakes.FakeIdentityv2Client(
endpoint=fakes.AUTH_URL,
token=fakes.AUTH_TOKEN,
)
self.app.client_manager.image = image_fakes.FakeImagev2Client(
endpoint=fakes.AUTH_URL,
token=fakes.AUTH_TOKEN,
)
self.app.client_manager.network = network_fakes.FakeNetworkV2Client(
endpoint=fakes.AUTH_URL,
token=fakes.AUTH_TOKEN,
)
self.app.client_manager.volume = volume_fakes.FakeVolumeClient(
endpoint=fakes.AUTH_URL,
token=fakes.AUTH_TOKEN,
)
class FakeServer(object):
"""Fake one or more compute servers."""
@staticmethod
def create_one_server(attrs={}, methods={}):
"""Create a fake server.
:param Dictionary attrs:
A dictionary with all attributes
:param Dictionary methods:
A dictionary with all methods
:return:
A FakeResource object, with id, name, metadata
"""
# Set default attributes.
server_info = {
'id': 'server-id-' + uuid.uuid4().hex,
'name': 'server-name-' + uuid.uuid4().hex,
'metadata': {},
}
# Overwrite default attributes.
server_info.update(attrs)
server = fakes.FakeResource(info=copy.deepcopy(server_info),
methods=methods,
loaded=True)
return server
@staticmethod
def create_servers(attrs={}, methods={}, count=2):
"""Create multiple fake servers.
:param Dictionary attrs:
A dictionary with all attributes
:param Dictionary methods:
A dictionary with all methods
:param int count:
The number of servers to fake
:return:
A list of FakeResource objects faking the servers
"""
servers = []
for i in range(0, count):
servers.append(FakeServer.create_one_server(attrs, methods))
return servers
@staticmethod
def get_servers(servers=None, count=2):
"""Get an iterable MagicMock object with a list of faked servers.
If servers list is provided, then initialize the Mock object with the
list. Otherwise create one.
:param List servers:
A list of FakeResource objects faking servers
:param int count:
The number of servers to fake
:return:
An iterable Mock object with side_effect set to a list of faked
servers
"""
if servers is None:
servers = FakeServer.create_servers(count)
return mock.MagicMock(side_effect=servers)
class FakeFlavorResource(fakes.FakeResource):
"""Fake flavor object's methods to help test.
The flavor object has three methods to get, set, unset its properties.
Need to fake them, otherwise the functions to be tested won't run properly.
"""
# Fake properties.
_keys = {'property': 'value'}
def set_keys(self, args):
self._keys.update(args)
def unset_keys(self, keys):
for key in keys:
self._keys.pop(key, None)
def get_keys(self):
return self._keys
class FakeFlavor(object):
"""Fake one or more flavors."""
@staticmethod
def create_one_flavor(attrs={}):
"""Create a fake flavor.
:param Dictionary attrs:
A dictionary with all attributes
:return:
A FakeFlavorResource object, with id, name, ram, vcpus, properties
"""
# Set default attributes.
flavor_info = {
'id': 'flavor-id-' + uuid.uuid4().hex,
'name': 'flavor-name-' + uuid.uuid4().hex,
'ram': 8192,
'vcpus': 4,
}
# Overwrite default attributes.
flavor_info.update(attrs)
flavor = FakeFlavorResource(info=copy.deepcopy(flavor_info),
loaded=True)
return flavor
@staticmethod
def create_flavors(attrs={}, count=2):
"""Create multiple fake flavors.
:param Dictionary attrs:
A dictionary with all attributes
:param int count:
The number of flavors to fake
:return:
A list of FakeFlavorResource objects faking the flavors
"""
flavors = []
for i in range(0, count):
flavors.append(FakeFlavor.create_one_flavor(attrs))
return flavors
@staticmethod
def get_flavors(flavors=None, count=2):
"""Get an iterable MagicMock object with a list of faked flavors.
If flavors list is provided, then initialize the Mock object with the
list. Otherwise create one.
:param List flavors:
A list of FakeFlavorResource objects faking flavors
:param int count:
The number of flavors to fake
:return:
An iterable Mock object with side_effect set to a list of faked
flavors
"""
if flavors is None:
flavors = FakeServer.create_flavors(count)
return mock.MagicMock(side_effect=flavors)
| [
"[email protected]"
] | |
8566f9e75de89e699548243255089d570f8f980a | a59ec95fddc064ea9a554ad41e4ac8e82376701a | /myshop/orders/forms.py | 85b29fe6f1e2dd581d3dc42576bb2d6f250431df | [] | no_license | Nicholas86/PythonDemos | 449c08713c7c03633719a4ae7287b127783d7574 | 4f06639cc65a5e10cc993335d3d34e2d60aac983 | refs/heads/master | 2021-01-22T21:07:11.457179 | 2017-08-18T06:40:44 | 2017-08-18T06:40:44 | 100,681,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
reload(sys)
sys.setdefaultencoding( "utf-8" )
from django import forms
from .models import *
# 创建模型表单,根据模型Order
class OrderCreateForm(forms.ModelForm):
class Meta:
model = Order
# 表单展示的字段,用户需要填写的内容
fields = ['first_name', 'last_name', 'email', 'address','postal_code', 'city']
| [
"[email protected]"
] | |
58cb0959aa62afa5c2cbef4ea5407a68d55606ce | cceeb787cf02dfee98f6b913e0815a5250505a29 | /special/special_m_CochranQ检验.py | 9ec770271c99a0204bdee065914558bcfc190c82 | [] | no_license | shandakangping/BioStat | 4782bdf599ddc4b44dcb9c4ad100459edbd2d221 | c2ecc1282bc12228b88eeba66551aec87d30fd8e | refs/heads/master | 2022-03-28T05:37:50.322348 | 2019-12-05T04:20:17 | 2019-12-05T04:20:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,392 | py | #CochranQ检验 https://spssau.com/front/spssau/helps/medicalmethod/cochranQ.html
import pandas as pd
from statsmodels.sandbox.stats.runs import cochrans_q
def core(x):
'''
x: pd.DataFrame()
'''
n = len(x)
freq = x.apply(lambda a:a.value_counts()).T
perc = freq.apply(lambda a:a/n)
f1 = freq.T
f1 = f1.reset_index()
f1.index = ['频数', '频数']
f1 = f1.reset_index().set_index(['level_0','index'])
f2 = perc.T
f2 = f2.reset_index()
f2.index = ['百分比', '百分比']
f2 = f2.reset_index().set_index(['level_0','index'])
f = f1.append(f2).T
f.columns.names = [None, None]
z, p = cochrans_q(x)
df = pd.Series({'样本量':n, 'CochransQ 统计量':z, 'p':p, 'df':x.shape[1]-1})
df = df.to_frame().T.set_index('样本量')
res = {'频数分析结果':f, 'CochranQ检验结果': df}
return res
if __name__ == '__main__':
d = '''村长 村民1 村民2 村民3 村民4 村民5 村民6 村民7 村民8 村民9 村民10
村长1 0 1 1 0 0 1 1 1 1 1
村长2 1 1 0 0 0 1 1 1 1 1
村长3 0 1 1 1 1 0 0 0 0 1
村长4 0 0 0 0 1 1 0 0 1 0'''
df = pd.DataFrame([i.split('\t') for i in d.split('\n')]).T.set_index(0).T
df = df.set_index('村长')
df.columns.name = None
df = df.astype(int)
res = core(x) | [
"[email protected]"
] | |
12a26396e8dab5f0018ea9c4f98927ab88a5a63b | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /pzQXHMqizBmaLDCHc_9.py | 15c6a97cb23a0e43aaf7b064fe8045b18ba3fc93 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,858 | py | """
It's a Pokemon battle! Your task is to calculate the damage that a particular
move would do using the following formula (not the actual one from the game):
damage = 50 * (attack / defense) * effectiveness
* attack = your attack power
* defense = the opponent's defense
* effectiveness = the effectiveness of the attack based on the matchup (see explanation below)
Effectiveness:
Attacks can be super effective, neutral, or not very effective depending on
the matchup. For example, water would be super effective against fire, but not
very effective against grass.
* Super effective: 2x damage
* Neutral: 1x damage
* Not very effective: 0.5x damage
To prevent this challenge from being tedious, you'll only be dealing with four
types: fire, water, grass, and electric. Here is the effectiveness of each
matchup:
* fire > grass
* fire < water
* fire = electric
* water < grass
* water < electric
* grass = electric
The function you must implement takes in:
* your type
* the opponent's type
* your attack power
* the opponent's defense
### Examples
calculate_damage("fire", "water", 100, 100) ➞ 25
calculate_damage("grass", "fire", 35, 5) ➞ 175
calculate_damage("electric", "fire", 100, 100) ➞ 50
### Notes
Any type against itself is not very effective. Also, assume that the
relationships between different types are symmetric (if A is super effective
against B, then B is not very effective against A).
"""
def calculate_damage(your_type, opponent_type, attack, defense):
eff = (("fire", "grass"), ("water", "fire"), ("grass", "water"), ("electric", "water"))
if (your_type, opponent_type) in eff:
effectiveness = 2
elif (opponent_type, your_type) in eff:
effectiveness = 0.5
else:
effectiveness = 1
return 50 * (attack / defense) * effectiveness
| [
"[email protected]"
] | |
294569d8a26a8235760f1210848bfc69ed7e87ec | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_242/ch150_2020_04_13_20_26_06_768478.py | 39ac017bfbff2072d7991d338db44e25d37255e3 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | import math
def calcula_pi(n):
contador=1
soma=0
while contador<=n:
soma+=6/(contador**2)
contador+=1
pi = math.sqrt(soma)
return pi
| [
"[email protected]"
] | |
b8e9300a6881004ad7de71e3459c045a7a356399 | 6cb25fcb5ce0e4f3b8cfb1165abe15d3de7fce82 | /link/tasks.py | 17747a2aee50b47ae5cd6eecc5882eb6e64a79f8 | [] | no_license | RoodrigoRoot/bills_celery | 4d3f904821f49629f2c024772899943e06042451 | 3ec5c5806c9b1ec75ff6ec3b287bf4d0a2537164 | refs/heads/master | 2022-12-24T09:04:46.917201 | 2020-02-10T22:41:55 | 2020-02-10T22:41:55 | 239,628,828 | 0 | 0 | null | 2022-12-08T03:35:29 | 2020-02-10T22:42:03 | Python | UTF-8 | Python | false | false | 330 | py | from celery import shared_task
from ftplib import FTP
@shared_task
def get_bills_moth():
#month = "manual_en.pdf"
#ftp = FTP("demo.wftpserver.com")
#ftp.login("demo-user", "demo-user")
#ftp.cwd("download")
#ftp.retrbinary("RETR " + month ,open(month, 'wb').write)
print("Factura descargada")
return 0 | [
"[email protected]"
] | |
e0d214ad9218a623239cde1c8e194ac4ca8332fb | 7d44d7e8b12263ed3f692fba4f19eaff0420c5c0 | /earth/adventurers.py | d812355915f9774a05acd68bdb8c1dd2cbebff3e | [
"MIT"
] | permissive | ryltsev/earth | 64a59b5af20477d0e9c1e2585d8772223d6b7421 | 2787eeb37692f1c82bc12cb24a4c1574826204a7 | refs/heads/master | 2023-08-18T15:37:25.506828 | 2019-05-26T09:11:40 | 2019-05-26T09:11:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,272 | py | import time
import typing
import attr
from .year import Months
from .travel import fly, AirportProblem
class Busy(RuntimeError):
"""Raised when Adventurer is busy."""
def pack(adventurer):
print(f"{adventurer.profile} {adventurer.name} is packing 👜")
@attr.s(auto_attribs=True, kw_only=True)
class Adventurer:
name: str
location: str
profile: str
_availability: typing.List = attr.ib(repr=False)
_calendar: typing.Dict = attr.ib(repr=False, init=False)
_getting_ready: typing.List[typing.Callable] = attr.ib(repr=False)
_ready: bool = attr.ib(repr=False, default=False, init=False)
def __str__(self):
return f"{self.profile} {self.name}"
@_getting_ready.default
def default_activities(self):
return [pack]
@_availability.default
def default_availability(self):
return list(Months)
def __attrs_post_init__(self):
self._calendar = {
month: month in self._availability for month in Months
}
def hello(self):
print(f"{self.profile} Hello, my name is {self.name}!")
def rsvp(self, event):
available = self._calendar[event.month]
if not available:
raise Busy(f"{self} sorry, I'm busy!")
self._calendar[event.month] = False
def get_ready(self):
if self._ready is not True:
for activity in self._getting_ready:
activity(self)
self._ready = True
return self._ready
def travel_to(self, event):
if self.location != event.location:
try:
location = fly(self.location, event.location)
except AirportProblem as exc:
print(f"{self}'s flight was cancelled 😞 {exc}")
else:
print(
f"{self} is travelling: "
f"{self.location} ✈️ {event.location}"
)
self.location = location
def new_panda(name, **kwargs):
def eat(panda):
for i in range(4):
print(f"{panda.profile} {panda.name} is eating... 🌱")
time.sleep(5)
kwargs.setdefault("location", "Asia")
return Adventurer(
name=name, profile="🐼", getting_ready=[eat, pack], **kwargs
)
def new_bear(name, **kwargs):
kwargs.setdefault("location", "North America")
kwargs.setdefault("availability", [Months.JUN, Months.JUL, Months.AUG])
return Adventurer(name=name, profile="🐻", **kwargs)
def new_tiger(name, **kwargs):
# Tigers travel light; do not pack
kwargs.setdefault("location", "Asia")
return Adventurer(name=name, profile="🐯", getting_ready=[], **kwargs)
def new_koala(name, **kwargs):
kwargs.setdefault("location", "Australia")
return Adventurer(name=name, profile="🐨", **kwargs)
def new_lion(name, **kwargs):
kwargs.setdefault("location", "Africa")
return Adventurer(name=name, profile="🦁", **kwargs)
def new_frog(name, **kwargs):
kwargs.setdefault("location", "South America")
return Adventurer(name=name, profile="🐸", **kwargs)
def new_fox(name, **kwargs):
kwargs.setdefault("location", "Europe")
return Adventurer(name=name, profile="🦊", getting_ready=[pack], **kwargs)
| [
"[email protected]"
] | |
dc2c6fac31db0f166a99c5899a1563edaf631d31 | f682bf62d7de4afeeadf7c4f93dd51cfe51a78ec | /vikuraa/NumberDisplay.py | e781b41148dfc47b4834f3122482c00767f9e032 | [] | no_license | aliaafee/vikuraa | 1159d19573f043baa8401510888c22920d9edf04 | df02493249b563c2f14ecd517ef8cbd09f1641a0 | refs/heads/master | 2020-05-29T13:13:35.554124 | 2017-02-20T18:21:48 | 2017-02-20T18:21:48 | 82,592,264 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,457 | py | import wx
class NumberDisplay(wx.Panel):
def __init__(self, parent, rows, first_row_big=True, size=wx.Size(-1,-1)):
wx.Panel.__init__(self, parent, style=wx.SUNKEN_BORDER, size=size)
self.SetBackgroundColour('WHITE')
self.label = {}
self.value = {}
gs = wx.FlexGridSizer(len(self.label), 2, 0, 0)
self.fontBig = wx.Font(
24,
family=wx.MODERN,
style=wx.NORMAL,
weight=wx.FONTWEIGHT_BOLD)
self.fontMed = wx.Font(
16,
family=wx.MODERN,
style=wx.NORMAL,
weight=wx.FONTWEIGHT_BOLD)
for i in range(len(rows)):
self.label[i] = wx.StaticText(
self,
label = rows[i],
size = wx.Size(-1,-1),
style=0)
self.value[i] = wx.StaticText(
self,
label = '',
size = wx.Size(-1,-1),
style=0)
if i == 0:
if first_row_big == True:
self.value[i].SetFont(self.fontBig)
else:
self.value[i].SetFont(self.fontMed)
else:
self.value[i].SetFont(self.fontMed)
gs.Add(self.label[i],
proportion=0,
flag=wx.ALL| wx.ALIGN_CENTER_VERTICAL,
border=5)
gs.Add(self.value[i],
proportion=0,
flag=wx.ALL| wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT,
border=5)
for i in range(len(self.label)):
gs.AddGrowableRow(i)
gs.AddGrowableCol(0)
self.SetSizer(gs)
def SetValue(self, row, value):
self.value[row].SetLabel(value)
self.Layout()
def GetStaticText(self, row):
return self.value[row]
if __name__ == "__main__":
app = wx.App()
frame = wx.Frame(None)
pnl = NumberDisplay(frame,
['Total Due',
'Incl. 6% GST',
'Tender',
'Balance'])
pnl.SetValue(0, 'Rf 9412.00')
sz = wx.BoxSizer()
sz.Add(pnl,1,wx.ALL|wx.EXPAND,10)
frame.SetSizer(sz)
frame.Show()
app.MainLoop()
| [
"[email protected]"
] | |
2bd17aa550e72879b0baf592355a60b8120b372f | 1f9611710ffaf5e0493799f716d3006c2f27b412 | /tests/test_madlib.py | dea7c141a360f9b41dc1300064bd9d61a683a647 | [] | no_license | majdalkilany/madlib-cli.1 | 2ea8d37cfe39e60151480c15ccad2841f58f53d8 | 48fb9e0b5aa1a9d29081e50304ff1e208ede3921 | refs/heads/master | 2022-12-02T19:14:57.538714 | 2020-08-22T14:39:52 | 2020-08-22T14:39:52 | 289,232,834 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 752 | py | from madlib_cli_1.madlib import read_file ,parse ,merge_and_write_file
def test_read() :
expected = open('assets/mad_file.txt').read().strip()
received = read_file()
assert expected == received
def user_input_test () :
accepted = 'please enter name >> '
actual = user_input(['name'])
assert accepted == actual
def test_Parse():
expected = ["majd","27"]
received = parse( "hello i ma {majd}, I am {27} years old")
assert expected == received
def testMerge():
words = ['smart', 'boxes', 'hungry', 'eat']
text = 'A {} {} had a {} dog so they {} them'
received = merge_and_write_file(words, text)
expected = 'A smart boxes had a hungry dog so they eat them'
assert expected == received
| [
"[email protected]"
] | |
8e338265078e527e3fb0019e38f7b755c53fe0fc | 67f9452cc0511c3d5ed501b65c31d6e8a5e7576b | /set36thas.py | 84906297a569828ec146edbd10ea10608db02ad9 | [] | no_license | thasleem-banu/python | 621bdc73722bcaa336bcbd95cd27d9aabfe7dd97 | 1003c3f32776d4ccf3ab1a1d98256c1158ca5670 | refs/heads/master | 2020-06-07T17:02:11.887671 | 2019-07-28T15:16:26 | 2019-07-28T15:16:26 | 193,060,927 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 105 | py | sou=int(input())
finl=list(map(int,input().split()[:sou]))
finl.sort()
for i in finl:
print(i,end=" ")
| [
"[email protected]"
] | |
452b9e305dc8e43d3f7fc84b3e781c57477f5b93 | feb9d0e303dd6d45a160aef20c6f045871b56fcf | /networks/resnet.py | 6c3733d5cd23ba7c2fc17c02a903f1bddedae73d | [] | no_license | TrendingTechnology/REFILLED | c0fe8bef1120668ca715c5dce7f819903ca66f49 | c5e077f3b4708384908919253da6fdece08f1dab | refs/heads/master | 2022-10-13T18:43:28.721174 | 2020-06-12T08:59:24 | 2020-06-12T08:59:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,537 | py | # -*- coding: utf-8 -*-
"""
@Author: Su Lu
@Date: 2019-07-15 15:21:44
"""
import numpy as np
import torch
from torch import nn
from torch.nn import init
from torch.nn import functional as F
def conv_init(m):
"""
Introduction of function
------------------------
This function inits parameters in a layer.
Parameters
----------
m: torch.nn.Module
a layer containing parameters to be inited
Returns
-------
NONE
"""
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.xavier_uniform_(m.weight, gain = np.sqrt(2))
elif classname.find('BatchNorm') != -1:
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
class LambdaLayer(nn.Module):
"""
Introduction of class
---------------------
This class implements lambda layer which completes a specified computing
process according to a given function.
Variables
---------
f: function
a function defining how to calculate output
Attributes
----------
f: function
a function defining how to calculate output
Methods
-------
forward([x]): torch.Tensor
forward process of LambdaLayer
"""
def __init__(self, f):
super(LambdaLayer, self).__init__()
self.f = f
def forward(self, x):
"""
Introduction of method
----------------------
This method implements forward process of LambdaLayer.
Parameters
----------
x: torch.Tensor
input of LambdaLayer
Returns
-------
y: torch.Tensor
output of LambdaLayer
"""
y = self.f(x)
return y
class BasicBlock(nn.Module):
"""
Introduction of class
---------------------
This class implements basic block in residual network.
Variables
---------
in_channels_of_basic_block: int
number of input channels of basic block
out_channels_of_basic_block: int
number of output channels of basic block
stride: int
stride used by convolutional layer of basic block
Attributes
----------
in_channels_of_basic_block: int
number of input channels of basic block
out_channels_of_basic_block: int
number of output channels of basic block
stride: int
stride used by convolutional layer of basic block
conv1: torch.nn.Conv2d
first convolutional layer
bn1: torch.nn.BatchNorm2d
first batch normalization layer
conv2: torch.nn.Conv2d
second convolutional layer
bn2: torch.nn.BatchNorm2d
second batch normalization layer
shortcut: torch.nn.Sequential
shortcut in basic block
Methods
-------
forward([x]): torch.autograd.Variable
forward process of basic block
"""
def __init__(self, in_channels_of_basic_block, out_channels_of_basic_block, stride):
super(BasicBlock, self).__init__()
self.in_channels = in_channels_of_basic_block
self.out_channels = out_channels_of_basic_block
self.stride = stride
self.conv1 = nn.Conv2d(in_channels = in_channels_of_basic_block, out_channels = out_channels_of_basic_block,
kernel_size = (3, 3), stride = (stride, stride), padding = (1, 1), bias = False)
self.bn1 = nn.BatchNorm2d(num_features = out_channels_of_basic_block)
self.conv2 = nn.Conv2d(in_channels = out_channels_of_basic_block, out_channels = out_channels_of_basic_block,
kernel_size = (3, 3), stride = (1, 1), padding = (1, 1), bias = False)
self.bn2 = nn.BatchNorm2d(num_features = out_channels_of_basic_block)
self.shortcut = nn.Sequential()
# size of feature map changes or number of channels changes
if stride != 1 or in_channels_of_basic_block != out_channels_of_basic_block:
self.shortcut = LambdaLayer(
lambda x: F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, out_channels_of_basic_block // 4,
out_channels_of_basic_block // 4), 'constant', 0))
def forward(self, x):
"""
Introduction of method
----------------------
This method implements forward process of BasicBlock.
Parameters
----------
x: torch.Tensor
input of BasicBlock
Returns
-------
y: torch.Tensor
output of BasicBlock
"""
y = self.conv1(x)
y = self.bn1(y)
y = F.relu(y)
y = self.conv2(y)
y = self.bn2(y)
y += self.shortcut(x)
y = F.relu(y)
return y
class ResNet(nn.Module):
"""
Introduction of class
---------------------
This class implements residual network.
Variables
---------
depth: int
total number of simple layers in wide residual network
number_of_classes: int
number of classes in a classification task
Attributes
----------
depth: int
total number of simple layers in wide residual network
number_of_classes: int
number of classes in a classification task
conv1: torch.nn.Conv2d
first convolutional layers in wide residual network
bn1: torch.nn.BatchNorm2d
batch normalization layer
layer1: torch.nn.Sequential
first layer composed of several basic blocks
layer2: torch.nn.Sequential
second layer composed of several basic blocks
layer3: torch.nn.Sequential
third layer composed of several basic blocks
fc: torch.nn.Linear
full connected(linear) layer
Methods
-------
generate_layer([in_channels_of_layer, out_channels_of_layer,
number_of_blocks, stride_of_first_block]): torch.nn.Sequential
generate a whole layer composed of several basic blocks and some parameters defining
this layer and basic blocks are given to the method
forward([x]): torch.Tensor
forward process of residual network
forward_embedding([x]): torch.Tensor
forward process of residual network in embedding
"""
def __init__(self, depth, number_of_classes):
super(ResNet, self).__init__()
self.depth = depth
self.number_of_classes = number_of_classes
# depth must be of form (6n + 2)
# number of convolutional layers in a basic block = 2
# number of layers in a wide residual network = 3
# number of blocks in each layer = n
# number of other simple layers = 2
assert((depth - 2) % 6 == 0)
# calculate number of blocks in each layer
number_of_blocks_in_each_layer = int((depth - 2) / 6)
# define number of channels after each block
number_of_channels_after_each_layer = [16, 16, 32, 64]
self.conv1 = nn.Conv2d(in_channels = 3, out_channels = number_of_channels_after_each_layer[0],
kernel_size = (3, 3), stride = (1, 1), padding = (1, 1), bias = False)
self.bn1 = nn.BatchNorm2d(num_features = number_of_channels_after_each_layer[0])
# generate 3 layers
self.layer1 = self.generate_layer(in_channels_of_layer = number_of_channels_after_each_layer[0],
out_channels_of_layer = number_of_channels_after_each_layer[1], number_of_blocks = number_of_blocks_in_each_layer,
stride_of_first_block = 1)
self.layer2 = self.generate_layer(in_channels_of_layer = number_of_channels_after_each_layer[1],
out_channels_of_layer = number_of_channels_after_each_layer[2], number_of_blocks = number_of_blocks_in_each_layer,
stride_of_first_block = 2)
self.layer3 = self.generate_layer(in_channels_of_layer = number_of_channels_after_each_layer[2],
out_channels_of_layer = number_of_channels_after_each_layer[3], number_of_blocks = number_of_blocks_in_each_layer,
stride_of_first_block = 2)
# generate linear layer
self.fc = nn.Linear(in_features = number_of_channels_after_each_layer[3], out_features = number_of_classes)
def generate_layer(self, in_channels_of_layer, out_channels_of_layer, number_of_blocks,
stride_of_first_block):
"""
Introduction of method
----------------------
This method generates a whole layer using basic blocks.
Parameters
----------
in_channels_of_layer: int
number of input channels of layer
out_channels_of_layer: int
number of output channels of layer
number_of_blocks: int
number of basic blocks in a single layer
stride_of_first_block: int
stride used by first basic block in this layer, stride of other basic blocks is 1
Returns
-------
layer: torch.nn.Sequential
a whole layer generated using basic blocks
"""
strides_of_each_block = [stride_of_first_block] + [1] * (number_of_blocks - 1)
blocks = []
# generate a layer with number_of_blocks blocks
for i in range(0, number_of_blocks):
# generate the first basic block in this layer
if i == 0:
blocks.append(BasicBlock(in_channels_of_basic_block = in_channels_of_layer, out_channels_of_basic_block = out_channels_of_layer,
stride = strides_of_each_block[i]))
# generate other basic blocks
else:
blocks.append(BasicBlock(in_channels_of_basic_block = out_channels_of_layer, out_channels_of_basic_block = out_channels_of_layer,
stride = strides_of_each_block[i]))
# generate the whole layer using blocks
layer = nn.Sequential(*blocks)
return layer
def forward(self, x):
"""
Introduction of method
----------------------
This method implements forward process of residual network.
Parameters
----------
x: torch.Tensor
input of residual network
Returns
-------
y: torch.Tensor
output of residual network
"""
y = self.conv1(x)
y = self.bn1(y)
y = F.relu(y)
y = self.layer1(y)
y = self.layer2(y)
y = self.layer3(y)
y = F.avg_pool2d(y, y.size()[3])
y = y.view(y.size()[0], -1)
y = self.fc(y)
return y
def forward_embedding(self, x):
"""
Introduction of method
----------------------
This method implements forward process of residual network used in embedding.
Parameters
----------
x: torch.Tensor
input of residual network
Returns
-------
y: torch.Tensor
output of residual network in embedding
"""
y = self.conv1(x)
y = self.bn1(y)
y = F.relu(y)
y = self.layer1(y)
y = self.layer2(y)
y = self.layer3(y)
y = F.avg_pool2d(y, y.size()[3])
y = y.view(y.size()[0], -1)
return y | [
"[email protected]"
] | |
4717ab086f2fef8ceb6be1f76e7cc08bce0871f3 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02400/s039065215.py | adef60ad100287d511a75c9a9b9032286684a3a2 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 72 | py | import math
r = input()
print '%.10f %.10f' %(r*r*math.pi , r*2*math.pi) | [
"[email protected]"
] | |
458e59de4379f587cb0638e512f274a0e96dade3 | 7efe824669f3d87fa48e775d13d2a0fb6e9c005e | /tensorflow/contrib/eager/python/datasets_test.py | 2917eaac97c48d59f8e5857ec8daff994558ed57 | [
"Apache-2.0"
] | permissive | codeteenager/tensorflow | 1c90bc1acf22b28c648c41176d18c28179f59dc4 | fc8118a853c26f0773145f2f1a3fba2fc968ea73 | refs/heads/master | 2020-03-25T01:41:40.765379 | 2018-08-02T06:18:03 | 2018-08-02T06:21:39 | 143,251,668 | 1 | 1 | Apache-2.0 | 2018-08-02T06:25:59 | 2018-08-02T06:25:58 | null | UTF-8 | Python | false | false | 14,389 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import threading
import time
import numpy as np
from tensorflow.contrib import lookup
from tensorflow.contrib.data.python.ops import prefetching_ops
from tensorflow.contrib.data.python.ops import threadpool
from tensorflow.contrib.data.python.ops import unique
from tensorflow.contrib.eager.python import datasets
from tensorflow.python.data import Dataset
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.training import saver
from tensorflow.python.training.checkpointable import util as checkpointable_utils
class IteratorTest(test.TestCase):
def testBasic(self):
got = []
for t in datasets.Iterator(Dataset.range(4)):
got.append(t.numpy())
self.assertAllEqual([0, 1, 2, 3], got)
def testBasicOneShotIterator(self):
got = []
for t in Dataset.range(4).make_one_shot_iterator():
got.append(t.numpy())
self.assertAllEqual([0, 1, 2, 3], got)
def testBasicImplicitIterator(self):
got = []
for t in Dataset.range(4):
got.append(t.numpy())
self.assertAllEqual([0, 1, 2, 3], got)
def testGetNext(self):
iterator = datasets.Iterator(Dataset.range(4))
self.assertEqual(0, iterator.get_next().numpy())
self.assertEqual(1, iterator.get_next().numpy())
self.assertEqual(2, iterator.get_next().numpy())
self.assertEqual(3, iterator.get_next().numpy())
with self.assertRaises(errors.OutOfRangeError):
iterator.get_next()
def testGetNextOneShotIterator(self):
iterator = Dataset.range(4).make_one_shot_iterator()
self.assertEqual(0, iterator.get_next().numpy())
self.assertEqual(1, iterator.get_next().numpy())
self.assertEqual(2, iterator.get_next().numpy())
self.assertEqual(3, iterator.get_next().numpy())
with self.assertRaises(errors.OutOfRangeError):
iterator.get_next()
def testMultipleIteratorsOnTheSameDataset(self):
ds = Dataset.range(4)
it1 = datasets.Iterator(ds)
it2 = datasets.Iterator(ds)
got = [x.numpy() for x in it1]
self.assertAllEqual([0, 1, 2, 3], got)
got = [x.numpy() for x in it2]
self.assertAllEqual([0, 1, 2, 3], got)
def testNestedOutputs(self):
ds = Dataset.zip((Dataset.range(4), Dataset.zip((Dataset.range(4),
Dataset.range(4)))))
total = 0
# The Iterator will return a nested structure of Tensor objects.
# Some funkiness to compare against simple integers.
for (i, x) in enumerate(datasets.Iterator(ds)):
want = (i, (i, i))
got = (x[0].numpy(), (x[1][0].numpy(), x[1][1].numpy()))
self.assertEqual(got, want)
total += 1
self.assertEqual(4, total)
def testMapAndFilter(self):
def even(x):
return math_ops.equal(math_ops.mod(x, 2), 0)
it = datasets.Iterator(Dataset.range(8).map(math_ops.square).filter(even))
got = [x.numpy() for x in it]
self.assertAllEqual([0, 4, 16, 36], got)
def testMapCaptureLookupTable(self):
default_val = -1
keys = constant_op.constant(['brain', 'salad', 'surgery'])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
dataset = Dataset.from_tensor_slices(['brain', 'salad', 'surgery'])
dataset = dataset.map(table.lookup)
it = datasets.Iterator(dataset)
got = [x.numpy() for x in it]
self.assertAllEqual([0, 1, 2], got)
def testMultipleIteratorsOnADatasetThatUsesFunctions(self):
ds = Dataset.from_tensor_slices([1, 2, 3, 4, 5, 6]).map(math_ops.square)
got1 = [x.numpy() for x in datasets.Iterator(ds)]
self.assertAllEqual([1, 4, 9, 16, 25, 36], got1)
got2 = [x.numpy() for x in datasets.Iterator(ds)]
self.assertAllEqual(got1, got2)
def assertSparseValuesEqual(self, a, b):
self.assertAllEqual(a.indices, b.indices)
self.assertAllEqual(a.values, b.values)
self.assertAllEqual(a.dense_shape, b.dense_shape)
def testSparseTensorElements(self):
components = (sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 0], [2, 0]]),
values=np.array([0, 0, 0]),
dense_shape=np.array([3, 1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 1], [2, 2]]),
values=np.array([1, 2, 3]),
dense_shape=np.array([3, 3])))
expected = [
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([1]),
dense_shape=np.array([3]))),
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[1]]),
values=np.array([2]),
dense_shape=np.array([3]))),
(sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[2]]),
values=np.array([3]),
dense_shape=np.array([3]))),
]
for i, result in enumerate(
datasets.Iterator(Dataset.from_tensor_slices(components))):
self.assertSparseValuesEqual(expected[i][0], result[0])
self.assertSparseValuesEqual(expected[i][1], result[1])
def testPyFunc(self):
def my_map(inp):
return [[x + 1 for x in inp]]
ds = Dataset.range(4).map(
lambda x: script_ops.py_func(my_map, [[x]], dtypes.int64))
got = [x.numpy() for x in datasets.Iterator(ds)]
self.assertAllEqual([[1], [2], [3], [4]], got)
def testTensorsPlacedOnDevice(self):
ds = Dataset.from_tensors([0., 1.])
with ops.device(test.gpu_device_name()):
x = datasets.Iterator(ds).next()
x = math_ops.add(x, x)
self.assertAllEqual([0., 2.], x.numpy())
def testGpuTensor(self):
ds = Dataset.from_tensors([0., 1.])
with ops.device(test.gpu_device_name()):
for x in ds:
y = math_ops.add(x, x)
self.assertAllEqual([0., 2.], y.numpy())
def testGpuDefinedDataset(self):
with ops.device(test.gpu_device_name()):
ds = Dataset.from_tensors([0., 1.])
for x in ds:
y = math_ops.add(x, x)
self.assertAllEqual([0., 2.], y.numpy())
def testTensorsExplicitPrefetchToDevice(self):
ds = Dataset.from_tensor_slices([0., 1.])
ds = ds.apply(prefetching_ops.prefetch_to_device(test.gpu_device_name()))
with self.assertRaisesRegexp(TypeError, 'prefetch_to_device'):
datasets.Iterator(ds)
for i, x in enumerate(ds):
with ops.device(test.gpu_device_name()):
x = math_ops.add(x, x)
self.assertEqual(float(i) + float(i), x.numpy())
def testOverrideThreadPool(self):
def get_thread_id(_):
# Python creates a dummy thread object to represent the current
# thread when called from an "alien" thread (such as a
# `PrivateThreadPool` thread in this case). It does not include
# the TensorFlow-given display name, but it has a unique
# identifier that maps one-to-one with the underlying OS thread.
return np.array(threading.current_thread().ident).astype(np.int64)
for num_threads in [1, 2, 4, 8, 16]:
dataset = (
Dataset.range(1000).map(
lambda x: script_ops.py_func(get_thread_id, [x], dtypes.int64),
num_parallel_calls=32).apply(unique.unique()))
dataset = threadpool.override_threadpool(
dataset,
threadpool.PrivateThreadPool(
num_threads, display_name='private_thread_pool_%d' % num_threads))
thread_ids = []
for next_element in datasets.Iterator(dataset):
thread_ids.append(next_element)
self.assertEqual(len(thread_ids), len(set(thread_ids)))
self.assertGreater(len(thread_ids), 0)
# NOTE(mrry): We don't control the thread pool scheduling, and
# so cannot guarantee that all of the threads in the pool will
# perform work.
self.assertLessEqual(len(thread_ids), num_threads)
def testSaveRestore(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, 'ckpt')
dataset = Dataset.from_tensor_slices([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
dataset = dataset.map(math_ops.square).batch(2)
iterator = datasets.Iterator(dataset)
checkpoint = checkpointable_utils.Checkpoint(iterator=iterator)
self.assertAllEqual([1, 4], iterator.get_next().numpy())
save_path = checkpoint.save(checkpoint_prefix)
self.assertAllEqual([9, 16], iterator.get_next().numpy())
self.assertAllEqual([25, 36], iterator.get_next().numpy())
checkpoint.restore(save_path)
self.assertAllEqual([9, 16], iterator.get_next().numpy())
self.assertAllEqual([25, 36], iterator.get_next().numpy())
def testSaveRestoreMultipleIterator(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, 'ckpt')
dataset = Dataset.from_tensor_slices([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
dataset = dataset.map(math_ops.square).batch(2)
iterator_1 = datasets.Iterator(dataset)
iterator_2 = datasets.Iterator(dataset)
dataset_2 = Dataset.range(10)
iterator_3 = datasets.Iterator(dataset_2)
checkpoint = checkpointable_utils.Checkpoint(
iterator_1=iterator_1, iterator_2=iterator_2, iterator_3=iterator_3)
self.assertAllEqual([1, 4], iterator_1.get_next().numpy())
self.assertEqual(0, iterator_3.get_next().numpy())
self.assertEqual(1, iterator_3.get_next().numpy())
self.assertEqual(2, iterator_3.get_next().numpy())
save_path = checkpoint.save(checkpoint_prefix)
self.assertAllEqual([1, 4], iterator_2.get_next().numpy())
self.assertAllEqual([9, 16], iterator_2.get_next().numpy())
self.assertEqual(3, iterator_3.get_next().numpy())
checkpoint.restore(save_path)
self.assertAllEqual([9, 16], iterator_1.get_next().numpy())
self.assertAllEqual([1, 4], iterator_2.get_next().numpy())
self.assertEqual(3, iterator_3.get_next().numpy())
def testRestoreExhaustedIterator(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, 'ckpt')
dataset = Dataset.range(3)
iterator = datasets.Iterator(dataset)
checkpoint = checkpointable_utils.Checkpoint(iterator=iterator)
self.assertEqual(0, iterator.get_next().numpy())
self.assertEqual(1, iterator.get_next().numpy())
save_path = checkpoint.save(checkpoint_prefix)
self.assertEqual(2, iterator.get_next().numpy())
checkpoint.restore(save_path)
self.assertEqual(2, iterator.get_next().numpy())
def testRestoreInReconstructedIterator(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, 'ckpt')
dataset = Dataset.range(10)
for i in range(5):
iterator = datasets.Iterator(dataset)
checkpoint = checkpointable_utils.Checkpoint(iterator=iterator)
checkpoint.restore(saver.latest_checkpoint(checkpoint_directory))
for j in range(2):
self.assertEqual(i * 2 + j, iterator.get_next().numpy())
checkpoint.save(file_prefix=checkpoint_prefix)
class DatasetConstructorBenchmark(test.Benchmark):
def benchmarkSliceRepeatBatchEager(self):
input_size = 10000
batch_size = 100
num_epochs = 100
input_data = np.random.randn(input_size)
dataset = (
Dataset.from_tensor_slices(input_data).repeat(num_epochs)
.batch(batch_size))
iterator = datasets.Iterator(dataset)
ends = [time.time()]
for _ in iterator:
ends.append(time.time())
deltas = np.ediff1d(ends)
median_wall_time = np.median(deltas)
print(
'Slice/repeat/batch eager input size: %d batch size: %d Median wall '
'time per element: %f'
% (input_size, batch_size, median_wall_time))
self.report_benchmark(
iters=len(deltas),
wall_time=median_wall_time,
name='benchmark_slice_repeat_batch_eager_input_%d_batch_%d' %
(input_size, batch_size))
def benchmarkSliceBatchCacheRepeatCallable(self):
input_size = 10000
batch_size = 100
num_epochs = 100
input_data = np.random.randn(input_size)
dataset = (
Dataset.from_tensor_slices(input_data).batch(batch_size).cache()
.repeat(num_epochs))
iterator = datasets.Iterator(dataset)
ends = [time.time()]
for _ in iterator:
ends.append(time.time())
deltas = np.ediff1d(ends)
median_wall_time = np.median(deltas)
print(
'Slice/batch/cache/repeat eager input size: %d batch size: %d Median '
'wall time per element: %f'
% (input_size, batch_size, median_wall_time))
self.report_benchmark(
iters=len(deltas),
wall_time=median_wall_time,
name='benchmark_slice_batch_cache_repeat_eager_input_%d_batch_%d' %
(input_size, batch_size))
if __name__ == '__main__':
test.main()
| [
"[email protected]"
] | |
dbd591edba4b84e2c524efb869936b5da91baed6 | 692eceac2533150b86aa173b451698b7a12ff735 | /PycharmProjects/lession4/hello7.py | d86e22fa46e409bec4414864c2e7482c6a4746b8 | [] | no_license | linlufeng/LufengLearnPython | cb74f34926663dc9b7d4d6789e6e7e044dd73db3 | bedcbf4fea6d048a3903a623a4386ac5d484a70d | refs/heads/master | 2022-09-12T22:14:19.243757 | 2022-08-25T02:54:13 | 2022-08-25T02:54:13 | 200,183,327 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# Python Unicode 字符串
a = u'hello word'
print a
b = u'hello\u0020word'
print b | [
"[email protected]"
] | |
51df05dbb475d3d601e27c671a0abd7bfd968318 | a02789088ef6f7134d70b7235fa92ddcab9a667b | /eventsourcing/popo.py | 62bf9a207f000983720a274e3360dcd1d0d436a9 | [
"BSD-3-Clause"
] | permissive | bernardotorres/eventsourcing | 89f4f6aaf62f321ce28f5ec9ddbb60712495aa8b | 002077d185c261920f8ea8a397e292ab73a25319 | refs/heads/main | 2023-08-07T17:38:59.665618 | 2021-09-18T07:04:19 | 2021-09-18T07:04:19 | 407,116,568 | 0 | 0 | BSD-3-Clause | 2021-09-16T10:13:36 | 2021-09-16T10:13:35 | null | UTF-8 | Python | false | false | 4,826 | py | from collections import defaultdict
from threading import Lock
from typing import Any, Dict, Iterable, List, Optional
from uuid import UUID
from eventsourcing.persistence import (
AggregateRecorder,
ApplicationRecorder,
InfrastructureFactory,
IntegrityError,
Notification,
ProcessRecorder,
StoredEvent,
Tracking,
)
class POPOAggregateRecorder(AggregateRecorder):
def __init__(self) -> None:
self.stored_events: List[StoredEvent] = []
self.stored_events_index: Dict[UUID, Dict[int, int]] = defaultdict(dict)
self.database_lock = Lock()
def insert_events(self, stored_events: List[StoredEvent], **kwargs: Any) -> None:
with self.database_lock:
self.assert_uniqueness(stored_events, **kwargs)
self.update_table(stored_events, **kwargs)
def assert_uniqueness(
self, stored_events: List[StoredEvent], **kwargs: Any
) -> None:
new = set()
for s in stored_events:
# Check events don't already exist.
if s.originator_version in self.stored_events_index[s.originator_id]:
raise IntegrityError()
new.add((s.originator_id, s.originator_version))
# Check new events are unique.
if len(new) < len(stored_events):
raise IntegrityError()
def update_table(self, stored_events: List[StoredEvent], **kwargs: Any) -> None:
for s in stored_events:
self.stored_events.append(s)
self.stored_events_index[s.originator_id][s.originator_version] = (
len(self.stored_events) - 1
)
def select_events(
self,
originator_id: UUID,
gt: Optional[int] = None,
lte: Optional[int] = None,
desc: bool = False,
limit: Optional[int] = None,
) -> List[StoredEvent]:
with self.database_lock:
results = []
index = self.stored_events_index[originator_id]
positions: Iterable = index.keys()
if desc:
positions = reversed(list(positions))
for p in positions:
if gt is not None:
if not p > gt:
continue
if lte is not None:
if not p <= lte:
continue
s = self.stored_events[index[p]]
results.append(s)
if len(results) == limit:
break
return results
class POPOApplicationRecorder(ApplicationRecorder, POPOAggregateRecorder):
def select_notifications(self, start: int, limit: int) -> List[Notification]:
with self.database_lock:
results = []
i = start - 1
j = i + limit
for notification_id, s in enumerate(self.stored_events[i:j], start):
n = Notification(
id=notification_id,
originator_id=s.originator_id,
originator_version=s.originator_version,
topic=s.topic,
state=s.state,
)
results.append(n)
return results
def max_notification_id(self) -> int:
with self.database_lock:
return len(self.stored_events)
class POPOProcessRecorder(ProcessRecorder, POPOApplicationRecorder):
def __init__(self) -> None:
super().__init__()
self.tracking_table: Dict[str, int] = defaultdict(None)
def assert_uniqueness(
self, stored_events: List[StoredEvent], **kwargs: Any
) -> None:
super().assert_uniqueness(stored_events, **kwargs)
tracking: Optional[Tracking] = kwargs.get("tracking", None)
if tracking:
last = self.tracking_table.get(tracking.application_name, 0)
if tracking.notification_id <= last:
raise IntegrityError()
def update_table(self, stored_events: List[StoredEvent], **kwargs: Any) -> None:
super().update_table(stored_events, **kwargs)
tracking: Optional[Tracking] = kwargs.get("tracking", None)
if tracking:
self.tracking_table[tracking.application_name] = tracking.notification_id
def max_tracking_id(self, application_name: str) -> int:
with self.database_lock:
try:
return self.tracking_table[application_name]
except KeyError:
return 0
class Factory(InfrastructureFactory):
def aggregate_recorder(self, purpose: str = "events") -> AggregateRecorder:
return POPOAggregateRecorder()
def application_recorder(self) -> ApplicationRecorder:
return POPOApplicationRecorder()
def process_recorder(self) -> ProcessRecorder:
return POPOProcessRecorder()
| [
"[email protected]"
] | |
456af233a7576621e75971e4b2de66322b1689c9 | 5a914243183e26f26e445c6f9d0bdf38eacde83a | /em/here_em1.py | b2054ec7f7c21d3da446dab1657a78cc4e948c56 | [] | no_license | linafeng/KDD-demo | b95834a0c9ae3e0e08d1d2fb710bf421b9eda8d5 | 8648a69f49dd8b27060e8349d84d6cde8b070716 | refs/heads/master | 2022-06-16T02:08:10.140190 | 2020-05-07T10:04:23 | 2020-05-07T10:04:23 | 255,523,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,922 | py | # -*- coding: utf-8 -*-
"""
用全部的特征值矩阵进行聚类训练
"""
import pandas as pd
import csv
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.mixture import GaussianMixture
from sklearn.preprocessing import StandardScaler
# 数据加载,避免中文乱码问题
data_ori = pd.read_csv('./heros.csv', encoding='gb18030')
features = [u'最大生命', u'生命成长', u'初始生命', u'最大法力', u'法力成长', u'初始法力', u'最高物攻', u'物攻成长', u'初始物攻', u'最大物防', u'物防成长', u'初始物防',
u'最大每5秒回血', u'每5秒回血成长', u'初始每5秒回血', u'最大每5秒回蓝', u'每5秒回蓝成长', u'初始每5秒回蓝', u'最大攻速', u'攻击范围']
data = data_ori[features]
data.loc[:, u'最大攻速'] = data.loc[:, (u'最大攻速')].apply(lambda x: float(x.strip('%')) / 100)
# data.loc[:,(u'最大攻速')].apply(lambda x: float(x.strip('%')) / 100)
data[u'攻击范围'] = data[u'攻击范围'].map({'远程': 1, '近战': 0})
print(data[u'攻击范围'])
# 采用 Z-Score 规范化数据,保证每个特征维度的数据均值为 0,方差为 1
ss = StandardScaler()
data = ss.fit_transform(data)
# 构造 GMM 聚类
# n_components为分组数
gmm = GaussianMixture(n_components=3, covariance_type='full')
gmm.fit(data)
# 训练数据
prediction = gmm.predict(data)
print(prediction)
from sklearn.metrics import calinski_harabaz_score
print('3组的分数')
print(calinski_harabaz_score(data, prediction))
gmm = GaussianMixture(n_components=30, covariance_type='full')
gmm.fit(data)
# 训练数据
prediction = gmm.predict(data)
print(prediction)
#指标分数越高,代表聚类效果越好,也就是相同类中的差异性小,不同类之间的差异性大
from sklearn.metrics import calinski_harabaz_score
print('30组的分数')
print(calinski_harabaz_score(data, prediction))
| [
"[email protected]"
] | |
b488e8f76122fb33075b71ea5447d66f8dab36ea | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2986/60705/264139.py | b726ef653c77a48cfa8e0baed697e3d4e6069ce1 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | word1 = input()
word2 = input()
m = len(word1)
n = len(word2)
line = [0 for i in range(0, n+1)]
matrix = [line.copy() for i in range(0, m+1)]
for i in range(0, m+1):
matrix[i][0] = i
for i in range(0, n+1):
matrix[0][i] = i
for i in range(1, m+1):
for j in range(1, n+1):
if word1[i-1] == word2[j-1]:
matrix[i][j] = matrix[i-1][j-1]
else:
matrix[i][j] = 1 + min(matrix[i-1][j-1], matrix[i][j-1], matrix[i-1][j])
print(matrix[m][n])
| [
"[email protected]"
] | |
03d890f8f2dfbdcee709b7d5c2e90d16d30b8606 | 277f976227c7590f6de5e7991d8fbed23b6646fe | /euler/solution/p7.py | 24b6e6d889b48270005e27956ac6af780eaa9deb | [] | no_license | domspad/euler | ca19aae72165eb4d08104ef7a2757115cfdb9a18 | a4901403e442b376c2edd987a1571ab962dadab2 | refs/heads/master | 2021-01-17T14:04:39.198658 | 2016-07-25T23:40:10 | 2016-07-25T23:40:10 | 54,561,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 959 | py |
# BRUTE Force - alg from p3... 10 mins
# In [35]: %time p7()
# 10 0
# 20 8
# 40 12
# 80 22
# 160 37
# 320 66
# 640 115
# 1280 207
# 2560 375
# 5120 685
# 10240 1254
# 20480 2312
# 40960 4288
# 81920 8009
# CPU times: user 1.05 s, sys: 24.2 ms, total: 1.07 s
# Wall time: 1.1 s
# Out[35]: 104743
from math import sqrt
def extend_primes(N, primes=None):
"""
Return all primes less than N > 0 int
"""
if primes is None or len(primes) == 0:
# test every number less than N/2
primes = [ i for i in xrange(2,N)
if not any( ( i % p == 0 for p in xrange(2,int(sqrt(i))+1) ) )]
return primes
else:
start = primes[-1] + 1
more_primes = [ i for i in xrange(start,N)
if not any( ( i % p == 0 for p in xrange(2,int(sqrt(i))+1) ) )]
primes.extend(more_primes)
return primes
def p7():
primes = []
N = 10
while len(primes) < 10001:
print N, len(primes)
N *= 2
primes = extend_primes(N, primes=primes)
return primes[10000] | [
"[email protected]"
] | |
abb757860c83b8d6040106470b0abcc1405c33f1 | f3b233e5053e28fa95c549017bd75a30456eb50c | /bace_input/L4N/4N-4M_MD_NVT_rerun/set_3.py | e43d9b8429c088bc81e4ec4d4db05a1ba78a0721 | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | import os
dir = '/mnt/scratch/songlin3/run/bace/L4N/MD_NVT_rerun/ti_one-step/4N_4M/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_3.in'
temp_pbs = filesdir + 'temp_3.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_3.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_3.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"[email protected]"
] | |
b132f75964416e8fb6db865f13619d5a5d4feebc | ca41bc15576624f4be22c777833b6dbf80a3d5f9 | /dolly/tarjetas/views.py | 6f751f0cebaf9d47fe33c218daa4b79650d76b56 | [] | no_license | aris-osorio/dolly | 74840477e01a020dfaaaf3a4e94c4f95f48f690e | 256042bae4d4253fbc93f50aa125047e5090b68c | refs/heads/main | 2023-02-01T14:48:19.840785 | 2020-12-17T07:30:34 | 2020-12-17T07:30:34 | 321,873,299 | 0 | 0 | null | 2020-12-17T06:51:59 | 2020-12-16T04:58:55 | Python | UTF-8 | Python | false | false | 309 | py | from rest_framework import viewsets
from django.shortcuts import render
from tarjetas.models import Tarjeta
from tarjetas.serializers import TarjetaSerializer
# Create your views here.
class TarjetasViewSet(viewsets.ModelViewSet):
serializer_class = TarjetaSerializer
queryset = Tarjeta.objects.all() | [
"[email protected]"
] | |
1783bd109902241c813c9e0a689496e14d5cedcc | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_089/ch48_2020_04_10_20_07_55_415960.py | 44a694247dd0499ee9a39cc77edfd3fd0ea899fd | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | def eh_crescente(x):
numero = True
i = 0
while numero:
while (i + 1) < len(x):
if x[i] < x[i+1]:
i = i + 1
else:
numero = False
return(False)
return(True)
| [
"[email protected]"
] | |
a48aae75de72aac5915ef51f45e6d1a46a853b69 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/recoveryservices/v20201201/get_protection_container.py | f52c0b9299418c861bc4540e9f8abd775c160bd0 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 5,276 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetProtectionContainerResult',
'AwaitableGetProtectionContainerResult',
'get_protection_container',
]
@pulumi.output_type
class GetProtectionContainerResult:
"""
Base class for container with backup items. Containers with specific workloads are derived from this class.
"""
def __init__(__self__, e_tag=None, id=None, location=None, name=None, properties=None, tags=None, type=None):
if e_tag and not isinstance(e_tag, str):
raise TypeError("Expected argument 'e_tag' to be a str")
pulumi.set(__self__, "e_tag", e_tag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> Optional[str]:
"""
Optional ETag.
"""
return pulumi.get(self, "e_tag")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id represents the complete path to the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name associated with the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> Any:
"""
ProtectionContainerResource properties
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type represents the complete path of the form Namespace/ResourceType/ResourceType/...
"""
return pulumi.get(self, "type")
class AwaitableGetProtectionContainerResult(GetProtectionContainerResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetProtectionContainerResult(
e_tag=self.e_tag,
id=self.id,
location=self.location,
name=self.name,
properties=self.properties,
tags=self.tags,
type=self.type)
def get_protection_container(container_name: Optional[str] = None,
fabric_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
vault_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetProtectionContainerResult:
"""
Base class for container with backup items. Containers with specific workloads are derived from this class.
:param str container_name: Name of the container whose details need to be fetched.
:param str fabric_name: Name of the fabric where the container belongs.
:param str resource_group_name: The name of the resource group where the recovery services vault is present.
:param str vault_name: The name of the recovery services vault.
"""
__args__ = dict()
__args__['containerName'] = container_name
__args__['fabricName'] = fabric_name
__args__['resourceGroupName'] = resource_group_name
__args__['vaultName'] = vault_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:recoveryservices/v20201201:getProtectionContainer', __args__, opts=opts, typ=GetProtectionContainerResult).value
return AwaitableGetProtectionContainerResult(
e_tag=__ret__.e_tag,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
tags=__ret__.tags,
type=__ret__.type)
| [
"[email protected]"
] | |
dc8f58995b56a74a71ab55bfcf0f407a1856b1e8 | e21f7d14e564d7fb921277a329ff078e86ad86a2 | /2020/day-02/day_02.py | 77772c41380808b0f53d3cf39eedf829893ccbe2 | [] | no_license | MrGallo/advent-of-code-solutions | 31456a0718303cca6790cf1227831bcb14649e27 | 28e0331e663443ffa2638188437cc7e46d09f465 | refs/heads/master | 2022-07-07T08:49:30.460166 | 2020-12-17T17:22:24 | 2020-12-17T17:22:24 | 160,988,019 | 0 | 1 | null | 2022-06-21T22:26:19 | 2018-12-08T23:34:51 | Python | UTF-8 | Python | false | false | 1,269 | py | from typing import List, Tuple, Callable
Password = Tuple[str, str, int, int]
def main():
with open('input.txt', 'r') as f:
lines = f.read().split("\n")
passwords = []
for line in lines:
password = line.split()[-1] # last 'word'
colon_pos = line.index(':')
minmax, letter = line[:colon_pos].split()
a, b = map(int, minmax.split("-"))
passwords.append((password, letter, a, b))
print(len(valid_passwords(passwords, is_valid_sled_rental_password))) # Part 1: 628
print(len(valid_passwords(passwords, is_valid_toboggan_rental_password))) # Part 2: 705
def valid_passwords(passwords: List[Password], validation_func: Callable) -> List[Password]:
return [password for password in passwords if validation_func(password)]
def is_valid_sled_rental_password(password: Password) -> bool:
text, letter, low, high = password
count = text.count(letter)
return count >= low and count <= high
def is_valid_toboggan_rental_password(password: Password) -> bool:
text, letter, a, b = password
in_pos_one = text[a-1] == letter
in_pos_two = text[b-1] == letter
return in_pos_one and not in_pos_two or in_pos_two and not in_pos_one
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
76a65c84f9cf87f1a638d3ca4ab887975e058090 | 9bd1c702f6a764f1227bb92b3c25045e1270e6d7 | /utility/queueUtility/queueFactory.py | 2111478742d447159b4ed8236151ef2c58c2ba4a | [] | no_license | sdz7121211/Crawler | 94b429d343165c959713ff1fffa40c54e41bcb57 | 6b3f1d0f78fed3027f2f075963e433d2c1c13bb8 | refs/heads/master | 2021-01-22T11:46:20.299124 | 2014-01-21T07:04:59 | 2014-01-21T07:04:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,512 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import Queue
instanceList = {}
def GetInstance(queue_id):
return instanceList[queue_id]
def CreatInstance(queue_id, maxNum = 0):
obj = Queue.Queue(maxsize = maxNum)
instanceList[queue_id] = obj
##!/usr/bin/python
## -*- coding: utf-8 -*-
##from queuesControler import queuesControler
#import dataQueue
#
#
##QueuesControlerInstance = None
#QueuesInstance = []
##
##
##def getQueuesControler():
## global QueuesControlerInstance
## if not QueuesControlerInstance:
## QueuesControlerInstance = queuesControler()
## print "鏂板缓 getQueuesControler"
## return QueuesControlerInstance
## else:
## print "杩斿洖宸插瓨鍦ㄧ殑 getQueuesControler"
## return QueuesControlerInstance
#
#def getDataQueueInstance(maxNum = 0,description = None):
# dataQueueInstance = dataQueue.dataQueue(maxNum,description)
# return dataQueueInstance
#
#
#if QueuesInstance == []:
# print '鍒濆鍖�涓槦鍒�
# QueuesInstance.append(getDataQueueInstance(1000,"鑾峰彇 '缃戠珯鍚� '鍒嗙被URL' '鍒嗙被鍚�"))
# QueuesInstance.append(getDataQueueInstance(1000,"锛�缃戠珯鍚� '鍒嗙被URL' '鍒嗙被鍚�锛夎幏鍙� '鍒嗙被URL'鎵�寚鍚戠殑婧愪唬鐮�))
# QueuesInstance.append(getDataQueueInstance(20000,"锛�缃戠珯鍚� '鍒嗙被URL' '鍒嗙被鍚�锛�'鍒嗙被URL'鎵�寚鍚戠殑婧愪唬鐮� 鑾峰彇鍏朵粬鐨�)) | [
"[email protected]"
] | |
4e7a333cf591059236324b62a574244c81efc0c5 | 7bdf57cdb8d7a8dd0916e9263e63ef2d20723bec | /src/app/config.py | 29b15c27ea5973e7184897e54f7bfe46ab9bfd75 | [] | no_license | cyr1z/api-generate-table-image | 085b855c6ec48f6b46f602f92cd21ec0f03391e3 | d06e13eb6538ac4d2986903fba086140b11c3832 | refs/heads/main | 2023-07-13T20:50:01.146353 | 2021-08-30T06:03:10 | 2021-08-30T06:03:10 | 401,231,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | from os import getenv
from dotenv import load_dotenv
load_dotenv()
class Config:
port = getenv('PORT')
production = getenv('PROD')
project = getenv('PROJECT_NAME', 'default')
base_url = getenv('BASE_URL')
storage_dir = getenv('STORAGE_DIR')
token = getenv('TOKEN')
| [
"[email protected]"
] | |
dc077392fce96fd1e468c011306a5a12dc207654 | f38496cf4d3f8dcf835adc63fbc4d1d975028c7e | /vae.py | 464c73864e81a04ea9dde18e63799697f10bb07d | [] | no_license | Namenaro/Terau | c8c73ff585a01d296b4d334d01b6a1109022b38e | 177f212e88ce3399330baf49b168a0d19be49d98 | refs/heads/master | 2021-01-01T16:29:08.724267 | 2017-08-02T14:24:34 | 2017-08-02T14:24:34 | 97,843,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | # -*- coding: utf-8 -*
# Автоэнкодер n-1-n
class VAE:
def __init__(self):
pass
| [
"[email protected]"
] | |
31234163102d0defea9f5e52da692f3b32689513 | dbbd9e8767f052ce499245fd2d1356b57c714eda | /aper.py | 499f34e45fa2b04ec24075f472c60b0ece264486 | [] | no_license | djbrout/pysmp | 8ef67dbb303c848fdd641d97ca31e2b33c60089d | 59ebed1c5aab1c920ae5a6eccc5219fb0736879f | refs/heads/master | 2020-04-03T20:59:21.629614 | 2019-07-09T22:59:05 | 2019-07-09T22:59:05 | 59,670,805 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 19,994 | py | #!/usr/bin/env python
#D. Jones - 1/15/14
#S. Rodney - 2014.07.06
"""This code is translated from the IDL Astronomy Users Library
example call:
from PythonPhot import aper
import numpy as np
image = pyfits.getdata(fits_filename)
xpos,ypos = np.array([1450,1400]),np.array([1550,1600])
mag,magerr,flux,fluxerr,sky,skyerr,badflag,outstr = \
aper.aper(image,xpos,ypos,phpadu=1,apr=5,zeropoint=25,
skyrad=[40,50],badpix=[-12000,60000],
exact=True)
"""
import numpy as np
from scipy.stats import sigmaclip, skew
import pixwt
import mmm
where,asfarray,asarray,array,zeros,arange = np.where,np.asfarray,np.asarray,np.array,np.zeros,np.arange
def aper(image,xc,yc, phpadu=1, apr=5, zeropoint=25,
skyrad=[40,60], badpix=[0,0], setskyval = None, minsky=[],
skyisempty=False, exact = False, readnoise = 0,
verbose=True, debug=False,ignoreneg=False):
""" Compute concentric aperture photometry on one ore more stars
(adapted for IDL from DAOPHOT, then translated from IDL to Python).
APER can compute photometry in several user-specified aperture radii.
A separate sky value is computed for each source using specified inner
and outer sky radii.
By default, APER uses a magnitude system where a magnitude of
25 corresponds to 1 flux unit. APER returns both
fluxes and magnitudes.
REQUIRED INPUTS:
image - input image array
xc - scalar x value or 1D array of x coordinates.
yc - scalar y value or 1D array of y coordinates
OPTIONAL KEYWORD INPUTS:
phpadu - Photons per Analog Digital Units, numeric scalar. Converts
the data numbers in IMAGE to photon units. (APER assumes
Poisson statistics.)
apr - scalar or 1D array of photometry aperture radii in pixel units.
zeropoint - zero point for converting flux (in ADU) to magnitudes
skyrad - Two element list giving the inner and outer radii
to be used for the sky annulus
badpix - Two element list giving the minimum and maximum value
of a good pix. If BADPIX[0] is equal to BADPIX[1] then
it is assumed that there are no bad pixels.
exact - By default, APER counts subpixels, but uses a polygon
approximation for the intersection of a circular aperture with
a square pixel (and normalize the total area of the sum of the
pixels to exactly match the circular area). If the /EXACT
keyword, then the intersection of the circular aperture with a
square pixel is computed exactly. The /EXACT keyword is much
slower and is only needed when small (~2 pixels) apertures are
used with very undersampled data.
print - if set and non-zero then APER will also write its results to
a file aper.prt. One can specify the output file name by
setting PRINT = 'filename'.
verbose - Print warnings, status, and ancillary info to the terminal
setskyval - Use this keyword to force the sky to a specified value
rather than have APER compute a sky value. SETSKYVAL
can either be a scalar specifying the sky value to use for
all sources, or a 3 element vector specifying the sky value,
the sigma of the sky value, and the number of elements used
to compute a sky value. The 3 element form of SETSKYVAL
is needed for accurate error budgeting.
RETURNS:
mags - NAPER by NSTAR array giving the magnitude for each star in
each aperture. (NAPER is the number of apertures, and NSTAR
is the number of stars). A flux of 1 digital unit is assigned
a zero point magnitude of 25.
magerr - NAPER by NSTAR array giving error in magnitude
for each star. If a magnitude could not be deter-
mined then ERRAP = 9.99.
flux - NAPER by NSTAR array giving fluxes
fluxerr - NAPER by NSTAR array giving error in each flux
sky - NSTAR element array giving sky value for each star
skyerr - NSTAR element array giving error in sky values
outstr - string for each star and aperture reporting the mag and err
PROCEDURES USED:
MMM, PIXWT()
NOTES:
Reasons that a valid magnitude cannot be computed include the following:
(1) Star position is too close (within 0.5 pixels) to edge of the frame
(2) Less than 20 valid pixels available for computing sky
(3) Modal value of sky could not be computed by the procedure MMM
(4) *Any* pixel within the aperture radius is a "bad" pixel
APER was modified in June 2000 in two ways: (1) the /EXACT keyword was
added (2) the approximation of the intersection of a circular aperture
with square pixels was improved (i.e. when /EXACT is not used)
REVISON HISTORY:
Adapted to IDL from DAOPHOT June, 1989 B. Pfarr, STX
Adapted for IDL Version 2, J. Isensee, July, 1990
Code, documentation spiffed up W. Landsman August 1991
TEXTOUT may be a string W. Landsman September 1995
FLUX keyword added J. E. Hollis, February, 1996
SETSKYVAL keyword, increase maxsky W. Landsman, May 1997
Work for more than 32767 stars W. Landsman, August 1997
Converted to IDL V5.0 W. Landsman September 1997
Don't abort for insufficient sky pixels W. Landsman May 2000
Added /EXACT keyword W. Landsman June 2000
Allow SETSKYVAL = 0 W. Landsman December 2000
Set BADPIX[0] = BADPIX[1] to ignore bad pixels W. L. January 2001
Fix chk_badpixel problem introduced Jan 01 C. Ishida/W.L. February 2001
Converted from IDL to python D. Jones January 2014
Adapted for hstphot project S. Rodney July 2014
"""
if verbose > 1:
import time
tstart = time.time()
elif verbose: import time
if debug :
import pdb
pdb.set_trace()
# Set parameter limits
if len(minsky) == 0: minsky = 20
#minsky = -1000.
# Number of columns and rows in image array
s = np.shape(image)
ncol = s[1]
nrow = s[0]
if setskyval is not None :
if not np.iterable(setskyval) :
setskyval = [setskyval,0.,1.]
assert len(setskyval)==3, 'Keyword SETSKYVAL must contain 1 or 3 elements'
skyrad = [ 0., max(apr) + 1]
skyrad = asfarray(skyrad)
if not np.iterable( xc ):
xc = np.array([xc])
yc = np.array([yc])
assert len(xc) == len(yc), 'xc and yc arrays must be identical length.'
if not np.iterable( apr ) :
apr = np.array( [ apr ] )
Naper = len( apr ) # Number of apertures
Nstars = len( xc ) # Number of stars to measure
# String array to display mags for all apertures in one line for each star
outstr = [ '' for star in range(Nstars)]
# Declare arrays
mag = zeros( [ Nstars, Naper])
magerr = zeros( [ Nstars, Naper])
flux = zeros( [ Nstars, Naper])
fluxerr = zeros( [ Nstars, Naper])
badflag = zeros( [ Nstars, Naper])
sky = zeros( Nstars )
skyerr = zeros( Nstars )
area = np.pi*apr*apr # Area of each aperture
if exact:
bigrad = apr + 0.5
smallrad = apr/np.sqrt(2) - 0.5
if setskyval is None :
rinsq = skyrad[0]**2
routsq = skyrad[1]**2
# Compute the limits of the submatrix. Do all stars in vector notation.
lx = (xc-skyrad[1]).astype(int) # Lower limit X direction
ly = (yc-skyrad[1]).astype(int) # Lower limit Y direction
ux = (xc+skyrad[1]).astype(int) # Upper limit X direction
uy = (yc+skyrad[1]).astype(int) # Upper limit Y direction
lx[where(lx < 0)[0]] = 0
ux[where(ux > ncol-1)[0]] = ncol-1
nx = ux-lx+1 # Number of pixels X direction
ly[where(ly < 0)[0]] = 0
uy[where(uy > nrow-1)[0]] = nrow-1
ny = uy-ly +1 # Number of pixels Y direction
dx = xc-lx # X coordinate of star's centroid in subarray
dy = yc-ly # Y coordinate of star's centroid in subarray
# Find the edge of the subarray that is closest to each star
# and then flag any stars that are too close to the edge or off-image
edge = zeros(len(dx))
for i,dx1,nx1,dy1,ny1 in zip(range(len(dx)),dx,nx,dy,ny):
edge[i] = min([(dx[i]-0.5),(nx[i]+0.5-dx[i]),(dy[i]-0.5),(ny[i]+0.5-dy[i])])
badstar = np.where( (xc<0.5) | (xc>ncol-1.5) |
(yc<0.5) | (yc>nrow-1.5), 1, 0 )
if np.any( badstar ) :
nbad = badstar.sum()
print('WARNING - ' + str(nbad) + ' star positions outside image')
if verbose :
tloop = time.time()
for i in range(Nstars): # Compute magnitudes for each star
#print i,Nstars
while True :
# mimic GOTO statements : break out of this while block whenever
# we decide this star is bad
apflux = asarray([np.nan]*Naper)
apfluxerr = asarray([np.nan]*Naper)
apmag = asarray([np.nan]*Naper)
apmagerr = asarray([np.nan]*Naper)
skymod = 0. # Sky mode
skysig = 0. # Sky sigma
skyskw = 0. # Sky skew
error1 = asarray([np.nan]*Naper)
error2 = asarray([np.nan]*Naper)
error3 = array([np.nan]*Naper)
apbad = np.ones( Naper )
if badstar[i]: # star is bad, return NaNs for all values
break
rotbuf = image[ ly[i]:uy[i]+1,lx[i]:ux[i]+1 ] #Extract subarray from image
shapey,shapex = np.shape(rotbuf)[0],np.shape(rotbuf)[1]
# RSQ will be an array, the same size as ROTBUF containing the square of
# the distance of each pixel to the center pixel.
dxsq = ( arange( nx[i] ) - dx[i] )**2
# if ny[i] < 0:
# if verbose:
# print("WARNING : aperture extends outside the image!")
# continue
# if nx[i] < 0:
# if verbose:
# print("WARNING : aperture extends outside the image!")
# continue
try:
rsq = np.ones( [ny[i], nx[i]] )
except:
print 'negative dimension'
break
for ii in range(ny[i]):
rsq[ii,:] = dxsq + (ii-dy[i])**2
if exact:
nbox = range(nx[i]*ny[i])
xx = (nbox % nx[i]).reshape( ny[i], nx[i])
yy = (nbox/nx[i]).reshape(ny[i],nx[i])
x1 = np.abs(xx-dx[i])
y1 = np.abs(yy-dy[i])
else:
r = np.sqrt(rsq) - 0.5 #2-d array of the radius of each pixel in the subarray
rsq,rotbuf = rsq.reshape(shapey*shapex),rotbuf.reshape(shapey*shapex)
if setskyval is None :
# skypix will be 1-d array of sky pixels
skypix = np.zeros( rsq.shape )
# Select pixels within sky annulus,
skypix[where(( rsq >= rinsq ) &
( rsq <= routsq ))[0]] = 1
if badpix[0]!=badpix[1] :
# Eliminate pixels above or below the badpix threshold vals
skypix[where(((rotbuf < badpix[0]) | (rotbuf > badpix[1])) &
(skypix == 1))[0]] = 0
sindex = where(skypix)[0]
nsky = len(sindex)
if ( nsky < minsky ): # Insufficient sky pixels?
if verbose:
print("ERROR: nsky=%i is fewer than minimum %i valid pixels in the sky annulus."%(nsky,minsky))
break
skybuf = rotbuf[ sindex[0:nsky] ]
if skyisempty :
# The sky annulus is (nearly) empty of stars, (as in a diff image)
# so we can simply compute the sigma-clipped mean of all pixels in
# the annulus
skybufclipped = sigmaclip( skybuf, low=4.0, high=4.0)
try:
skymod = np.mean( skybufclipped )[0]
skysig = np.std( skybufclipped )[0]
except:
print 'sky buff excepted'
skymod, skysig, skyskw = mmm.mmm(skybuf, readnoise=readnoise, minsky=minsky)
skyskw = -999#skew( skybufclipped )
else:
# Compute the sky mode, sigma and skewness using the
# mean/median/mode algorithm in mmm.py, which assumes that
# most of the outlier pixels are positive.
skymod, skysig, skyskw = mmm.mmm(skybuf,readnoise=readnoise,minsky=minsky)
skyvar = skysig**2 #Variance of the sky brightness
sigsq = skyvar/nsky #Square of standard error of mean sky brightness
if ( skysig < 0.0 ):
# If the modal sky value could not be determined, then all
# apertures for this star are bad. So skip to the next.
break
if skysig > 999.99: skysig = 999 #Don't overload output formats
if skyskw < -99: skyskw = -99
if skyskw > 999.9: skyskw = 999.9
else:
skymod = setskyval[0]
skysig = setskyval[1]
nsky = setskyval[2]
skyvar = skysig**2
sigsq = skyvar/nsky
skyskw = 0
for k in range(Naper): # Find pixels within each aperture
thisapd = array([])
if ( edge[i] >= apr[k] ): #Does aperture extend outside the image?
if exact:
mask = zeros(ny[i]*nx[i])
x1,y1 = x1.reshape(ny[i]*nx[i]),y1.reshape(ny[i]*nx[i])
good = where( ( x1 < smallrad[k] ) & (y1 < smallrad[k] ))[-1]
Ngood = len(good)
if Ngood > 0: mask[good] = 1
bad = where( (x1 > bigrad[k]) | (y1 > bigrad[k] ))[-1]
mask[bad] = -1
gfract = where(mask == 0.0)[0]
Nfract = len(gfract)
if Nfract > 0:
yygfract = yy.reshape(ny[i]*nx[i])[gfract]
xxgfract = xx.reshape(ny[i]*nx[i])[gfract]
mask[gfract] = pixwt.Pixwt(dx[i],dy[i],apr[k],xxgfract,yygfract)
mask[gfract[where(mask[gfract] < 0.0)[0]]] = 0.0
thisap = where(mask > 0.0)[0]
thisapd = rotbuf[thisap]
fractn = mask[thisap]
else:
# approximating the circular aperture shape
rshapey,rshapex = np.shape(r)[0],np.shape(r)[1]
thisap = where( r.reshape(rshapey*rshapex) < apr[k] )[0] # Select pixels within radius
thisapd = rotbuf.reshape(rshapey*rshapex)[thisap]
thisapr = r.reshape(rshapey*rshapex)[thisap]
fractn = apr[k]-thisapr
fractn[where(fractn > 1)[0]] = 1
fractn[where(fractn < 0)[0]] = 0 # Fraction of pixels to count
full = zeros(len(fractn))
full[where(fractn == 1)[0]] = 1.0
gfull = where(full)[0]
Nfull = len(gfull)
gfract = where(1 - full)[0]
factor = (area[k] - Nfull ) / np.sum(fractn[gfract])
fractn[gfract] = fractn[gfract]*factor
else:
if verbose :
print("WARNING : aperture extends outside the image!")
continue
# END "if exact ... else ..."
# Check for any bad pixel values (nan,inf) and those outside
# the user-specified range of valid pixel values. If any
# are found in the aperture, raise the badflux flag.
apbad[k] = 0
if not np.all( np.isfinite(thisapd) ) :
if verbose :
print("WARNING : nan or inf pixels detected in aperture.\n"
"We're setting these to 0, but the photometry"
"may be biased.")
thisapd[np.isfinite()==False] = 0
apbad[k] = 1
fractn = 0
if badpix[0] < badpix[1] :
ibadpix = np.where((thisapd<=badpix[0]) | (thisapd>=badpix[1]))
if len(ibadpix[0]) > 0 :
if verbose :
print("WARNING : pixel values detected in aperture"
" that are outside of the allowed range "
" [%.1f , %.1f] \n"%(badpix[0],badpix[1]) +
"We're treating these as 0, but the "
"photometry may be biased.")
thisapd[ibadpix] = 0
apbad[k] = 1
# Sum the flux over the irregular aperture
apflux[k] = np.sum(thisapd*fractn)
# END for loop over apertures
g = where(np.isfinite(apflux))[0]
Ng = len(g)
if Ng > 0:
# Subtract sky from the integrated brightnesses
apflux[g] = apflux[g] - skymod*area[g]
# Compute flux error
error1[g] = area[g]*skyvar #Scatter in sky values
error2[g] = where( apflux[g]>=0, apflux[g]/phpadu, 0 ) #Random photon noise
error3[g] = sigsq*area[g]**2 #Uncertainty in mean sky brightness
apfluxerr[g] = np.sqrt(error1[g] + error2[g] + error3[g])
good = where (apflux > 0.0)[0] # Are there any valid integrated fluxes?
Ngood = len(good)
if ( Ngood > 0 ) : # convert valid fluxes to mags
apmagerr[good] = 1.0857*apfluxerr[g]/apflux[g] #1.0857 = log(10)/2.5
apmag[good] = zeropoint-2.5*np.log10(apflux[g])
break # Closing the 'while True' loop.
# TODO : make a more informative output string
outstr[i] = '%.3f,%.3f :'%(xc[i],yc[i]) + \
' '.join( [ '%.4f+-%.4f'%(apmag[ii],apmagerr[ii])
for ii in range(Naper) ] )
sky[i] = skymod
skyerr[i] = skysig
mag[i,:] = apmag
magerr[i,:]= apmagerr
flux[i,:] = apflux
fluxerr[i,:]= apfluxerr
badflag[i,:] = apbad
if Nstars == 1 :
sky = sky[0]
skyerr = skyerr[0]
mag = mag[0]
magerr = magerr[0]
flux = flux[0]
fluxerr = fluxerr[0]
badflag = badflag[0]
outstr = outstr[0]
if verbose>1:
print('hstphot.aper took %.3f seconds'%(time.time()-tstart))
print('Each of %i loops took %.3f seconds'%(Nstars,(time.time()-tloop)/Nstars))
return(mag,magerr,flux,fluxerr,sky,skyerr,badflag,outstr)
| [
"[email protected]"
] | |
47debde28beda902a0c661b99da718fe0df26f64 | e2f7350e08784a793044e911670f50fdc560bcbc | /examples/population-annealing-qpu.py | 5229bee006233aecdf36ffa1c65d186d6815a2fa | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | hsadeghidw/dwave-hybrid | 7a5dc4dd9ce7d8544f9276309eb1a4e8b450a5f8 | e667035b5623f122813795d433a40db6e520ff66 | refs/heads/master | 2020-08-21T21:31:19.708429 | 2019-09-19T16:59:50 | 2019-09-19T16:59:50 | 216,250,242 | 0 | 0 | Apache-2.0 | 2019-10-19T18:12:07 | 2019-10-19T18:12:07 | null | UTF-8 | Python | false | false | 2,194 | py | #!/usr/bin/env python
# Copyright 2019 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
import neal
import dimod
import hybrid
from hybrid.reference.pt import FixedTemperatureSampler
from hybrid.reference.pa import (
CalculateAnnealingBetaSchedule, ProgressBetaAlongSchedule, EnergyWeightedResampler)
# load a problem
problem = sys.argv[1]
with open(problem) as fp:
bqm = dimod.BinaryQuadraticModel.from_coo(fp)
print("BQM: {} nodes, {} edges, {:.2f} density".format(
len(bqm), len(bqm.quadratic), hybrid.bqm_density(bqm)))
# sweeps per fixed-temperature sampling step
num_sweeps = 1000
# number of generations, or temperatures to progress through
num_iter = 20
# population size
num_samples = 20
# QPU initial sampling: limits the PA workflow to QPU-sized problems
qpu_init = (
hybrid.IdentityDecomposer()
| hybrid.QPUSubproblemAutoEmbeddingSampler(num_reads=num_samples)
| hybrid.IdentityComposer()
) | hybrid.AggregatedSamples(False)
# PA workflow: after initial beta schedule estimation, we do `num_iter` steps
# (one per beta/temperature) of fixed-temperature sampling / weighted resampling
workflow = qpu_init | CalculateAnnealingBetaSchedule(length=num_iter) | hybrid.Loop(
ProgressBetaAlongSchedule() | FixedTemperatureSampler(num_sweeps=num_sweeps) | EnergyWeightedResampler(),
max_iter=num_iter
)
# run the workflow
state = hybrid.State.from_problem(bqm)
solution = workflow.run(state).result()
# show execution profile
hybrid.profiling.print_counters(workflow)
# show results
print("Solution: sample={0.samples.first}, energy={0.samples.first.energy}".format(solution))
| [
"[email protected]"
] | |
1dfbf1e610adedc057bd7bcab7b89bb09b77db5e | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /covid_epidemiology/src/models/shared/feature_utils_test.py | 9166d7a1d131f7f72dbf2916c7e3814c2dde998c | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 11,089 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
import numpy as np
import pandas as pd
from pandas_gbq import gbq
import tensorflow as tf
from covid_epidemiology.src.models.shared import feature_utils
from covid_epidemiology.src.models.shared import model_spec
class FeatureUtilsTest(tf.test.TestCase):
def test_filter_data_based_on_location(self):
ts_data = pd.DataFrame([{
"feature_name": "death",
"dt": "2020/01/01",
"country_code": "IR"
}, {
"feature_name": "death",
"dt": "2020/01/01",
"country_code": "US"
}, {
"feature_name": "death",
"dt": "2020/01/01",
"country_code": "CH"
}])
static_data = pd.DataFrame([{
"population": 70,
"country_code": "IR"
}, {
"population": 50,
"country_code": "US"
}, {
"population": 10,
"country_code": "CH"
}])
got_static_data, got_ts_data = feature_utils.filter_data_based_on_location(
static_data=static_data, ts_data=ts_data, locations=["IR", "US"])
wanted_ts_data = pd.DataFrame([{
"feature_name": "death",
"dt": "2020/01/01",
"country_code": "IR"
}, {
"feature_name": "death",
"dt": "2020/01/01",
"country_code": "US"
}])
wanted_static_data = pd.DataFrame([{
"population": 70,
"country_code": "IR"
}, {
"population": 50,
"country_code": "US"
}])
pd.testing.assert_frame_equal(got_ts_data, wanted_ts_data)
pd.testing.assert_frame_equal(got_static_data, wanted_static_data)
def test_static_feature_map_for_locations(self):
static_data = {
"population": {
"US": 100,
"CH": 200,
"IT": 30,
},
"land_area": {
"US": 100,
"CH": 150,
}
}
static_feature_specs = [
model_spec.FeatureSpec(name="population", initializer=None),
model_spec.FeatureSpec(name="land_area", initializer=None),
]
locations = ["US", "CH"]
expected = np.array([[100, 100], [200, 150]])
actual = feature_utils.static_feature_to_dense(static_data,
static_feature_specs,
locations)
self.assertAllEqual(actual, expected)
def test_covariates_as_tensors_for_location(self):
ts_data = {
"temperature": {
"US": [70.5, 73.0],
"CH": [72.5, 75.3],
},
"mobility": {
"US": [98.4, 70.1],
"CH": [73.5, 65.3],
"IT": [83.5, 65.0],
}
}
covariate_feature_specs = [
model_spec.FeatureSpec(name="temperature", initializer=None),
model_spec.FeatureSpec(name="mobility", initializer=None),
]
expected = [
np.array([[70.5, 98.4], [72.5, 73.5]]),
np.array([[73.0, 70.1], [75.3, 65.3]])
]
actual = feature_utils.covariate_features_to_dense(ts_data,
covariate_feature_specs,
["US", "CH"], 2)
self.assertAllClose(actual, expected)
def test_covariates_as_tensors_for_location_filters_nones(self):
ts_data = {
"temperature": {
"US": [70.5, 73.0],
"CH": [72.5, 75.3],
},
"mobility": {
"US": [98.4, 70.1],
"CH": [73.5, 65.3],
"IT": [83.5, 65.0],
},
"humidity": {
"US": [34.3, 38.2],
"CH": [44.2, 42.4],
"IT": None,
}
}
covariate_feature_specs = [
model_spec.FeatureSpec(name="temperature", initializer=None),
model_spec.FeatureSpec(name="mobility", initializer=None),
model_spec.FeatureSpec(name="humidity", initializer=None),
]
expected = [
np.array([[70.5, 98.4, 34.3], [72.5, 73.5, 44.2]]),
np.array([[73.0, 70.1, 38.2], [75.3, 65.3, 42.4]])
]
actual = feature_utils.covariate_features_to_dense(ts_data,
covariate_feature_specs,
["US", "CH"], 2)
self.assertAllClose(actual, expected)
def test_get_categorical_features_mask_for_ts(self):
covariate_feature_specs = [
model_spec.FeatureSpec(name="temperature", initializer=None),
model_spec.FeatureSpec(name="mobility", initializer=None),
model_spec.FeatureSpec(name="chc_npi_School", initializer=None),
]
categorical_features = ["chc_npi_School"]
expected = tf.constant(np.array([[0, 0, 1], [0, 0, 1]]))
actual = feature_utils.get_categorical_features_mask(
covariate_feature_specs, categorical_features, 2, is_static=False)
self.assertAllClose(actual, expected)
def test_periodic_forecast_works_for_weekly_case(self):
example_periods = np.array([
[2, 3, 4, 5, 6, 7, 1],
[0, 0, 0, 0, 1, 1, 0],
])
expected_output = np.array([
[2, 3, 4, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7, 1],
[0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0],
])
output_array = np.empty((2, 14))
feature_utils.periodic_forecast(example_periods, output_array, period=7)
np.testing.assert_equal(output_array, expected_output)
def test_periodic_forecast_works_shorter_than_period(self):
num_locations = 5
num_days = 4
start_day = 2
short_example = np.arange(start_day, start_day + num_days)
most_recent_days = np.tile(short_example, (num_locations, 1))
most_recent_days[-1, :] += 1
output_array = np.empty((num_locations, 5))
expected_output = np.tile([4, 5, 2, 3, 4], (num_locations, 1))
expected_output[-1, :] += 1
feature_utils.periodic_forecast(most_recent_days, output_array, period=6)
np.testing.assert_equal(output_array, expected_output)
class TestForecastFeatures(tf.test.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.covariates = np.tile(np.arange(28).reshape((-1, 1, 1)), (1, 3, 4))
cls.feature_specs = {
"none":
model_spec.FeatureSpec(
"none", forecast_method=model_spec.ForecastMethod.NONE),
"constant":
model_spec.FeatureSpec(
"constant", forecast_method=model_spec.ForecastMethod.CONSTANT),
"week":
model_spec.FeatureSpec(
"week",
forecast_method=model_spec.ForecastMethod.PERIODIC_WEEKLY),
"xg":
model_spec.FeatureSpec(
"xg", forecast_method=model_spec.ForecastMethod.XGBOOST),
}
_, n_locations, n_features = cls.covariates.shape
cls.expected_output = np.zeros((10, n_locations, n_features))
cls.expected_output[:, :, 1] = 27
cls.expected_output[:7, :, 2] = np.arange(21, 28).reshape(-1, 1)
cls.expected_output[7:, :, 2] = np.arange(21, 24).reshape(-1, 1)
cls.expected_output[:, :, 3] = 1.0
def test_forecast_features_same_length(self):
output_time_points = 5
forecasts = feature_utils.forecast_features(
self.covariates,
self.feature_specs,
num_forecast_steps=output_time_points,
)
_, n_locations, n_features = self.covariates.shape
self.assertTupleEqual(forecasts.shape,
(output_time_points, n_locations, n_features))
np.testing.assert_allclose(forecasts,
self.expected_output[:output_time_points, :, :])
def test_forecast_features_forecast_shorter(self):
output_time_points = 3
_, n_locations, n_features = self.covariates.shape
expected_output = np.zeros((output_time_points, n_locations, n_features))
expected_output[:, :, 1] = 27
expected_output[:, :, 2] = np.arange(21, 24).reshape(-1, 1)
expected_output[:, :, 3] = 1.0
forecasts = feature_utils.forecast_features(
self.covariates,
self.feature_specs,
num_forecast_steps=output_time_points,
)
_, n_locations, n_features = self.covariates.shape
self.assertTupleEqual(forecasts.shape,
(output_time_points, n_locations, n_features))
np.testing.assert_allclose(forecasts,
self.expected_output[:output_time_points, :, :])
def test_forecast_features_num_threads(self):
output_time_points = 5
forecasts = feature_utils.forecast_features(
self.covariates,
self.feature_specs,
num_forecast_steps=output_time_points,
num_threads=1,
)
forecasts_parallel = feature_utils.forecast_features(
self.covariates,
self.feature_specs,
num_forecast_steps=output_time_points,
num_threads=2,
)
_, n_locations, n_features = self.covariates.shape
self.assertTupleEqual(forecasts.shape,
(output_time_points, n_locations, n_features))
np.testing.assert_allclose(forecasts,
self.expected_output[:output_time_points, :, :])
np.testing.assert_allclose(forecasts, forecasts_parallel)
def test_read_from_a_project_raises_if_no_projects_specified(self):
with self.assertRaises(ValueError):
feature_utils.read_from_a_project("SELECT * from FAKE", [])
@mock.patch.object(
pd, "read_gbq", side_effect=gbq.NotFoundException, autospec=pd.read_gbq)
def test_read_from_a_project_raises_if_not_found_in_any_projects(
self, read_mock):
with self.assertRaises(gbq.NotFoundException):
feature_utils.read_from_a_project("SELECT * from FAKE",
["test_project", "test_project_2"])
read_mock.assert_has_calls([
mock.call("SELECT * from FAKE", project_id="test_project"),
mock.call("SELECT * from FAKE", project_id="test_project_2")
])
@mock.patch.object(
pd,
"read_gbq",
side_effect=[gbq.NotFoundException,
pd.DataFrame([1])],
autospec=pd.read_gbq)
def test_read_from_a_project_can_handle_missing_project(self, read_mock):
output_df = feature_utils.read_from_a_project(
"SELECT * from FAKE", ["test_project", "test_project_2"])
read_mock.assert_has_calls([
mock.call("SELECT * from FAKE", project_id="test_project"),
mock.call("SELECT * from FAKE", project_id="test_project_2")
])
pd.testing.assert_frame_equal(output_df, pd.DataFrame([1]))
if __name__ == "__main__":
tf.test.main()
| [
"[email protected]"
] | |
8dccce2cb401bc399d13979fc9d02d0db03644cd | cea3a0be3209626c11f3ec7c235b0a7f7fdbe256 | /appengine_config.py | cad07b958597b5ca5a2a64641a75d268d1cd0720 | [
"LicenseRef-scancode-public-domain"
] | permissive | snarfed/plusstreamfeed | 56a2d1441097dd1660b9c5a46b546505ac8c2fa8 | 01362468594b820d9f57f4830a13df5f1aceaed1 | refs/heads/master | 2020-04-10T15:53:21.389755 | 2019-04-18T14:18:33 | 2019-04-18T23:18:43 | 41,272,334 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | # Load packages from virtualenv
# https://cloud.google.com/appengine/docs/python/tools/libraries27#vendoring
from google.appengine.ext import vendor
vendor.add('local')
from granary.appengine_config import *
| [
"[email protected]"
] | |
e26cbf3aa76e7ccbaad9ffdccf7b920147bf8c64 | cd555725b300579d44c0bd3f6fc8f6a968912dfb | /UF1/Nieto_Alejandro_Gomez_Alejandro_PT15/Exercici2-2.py | 847dc7532d4ef40f9f8a3a48b08218e721990879 | [] | no_license | aleexnl/aws-python | 2da5d8a416927f381618f1d6076d98d5e35b3b5e | 03fce7744b443b2b59a02c261067ecae46ecc3d9 | refs/heads/master | 2022-11-24T08:58:24.686651 | 2020-04-18T15:58:32 | 2020-04-18T15:58:32 | 221,772,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 833 | py | # Fem una tupla amb els dias que tenen cada mes
dias_mes = (31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
# Fem una altra on passarem una data.
avui = (2018, 4, 11)
# Guardamos los valores de la tupla en las variables a(any), m(mes) y d(dia)
a, m, d = avui
# Creem una tupla a la cual possarem el dia que es nadal (25 de desdembre)
nadal = (2018, 12, 25)
# Guardamos los valores de la tupla en las variables a1, m1 y d1 (com en la tupla de abans)
a2, m2, d2 = nadal
# Creem una variable on guardarem el dia de nadal mes la suma de las resta dels dias del mes menys la data actual
suma = d2 + (dias_mes[m-1] -d)
# Finalment farem un bucle on guardarem en una variable la quantiat de dias que queden fins nadal i els printarem .
for i in range(m, m2-1):
suma = suma + dias_mes[i]
print("El dies entre avui i nadal són:", suma)
| [
"[email protected]"
] | |
e1edaf60e374412bee0cbbd1ef24f83c1c72e19a | 0bdd797b3e95429e03108152cacfbb26069f1d76 | /stack/ternaryexpression.py | da9bb4cd80a9007718dd489b83661c65f5fdbb77 | [
"MIT"
] | permissive | mengyangbai/leetcode | 3eda89470c933fdec5ffdfbf9cceb57e85c7e44d | e7a6906ecc5bce665dec5d0f057b302a64d50f40 | refs/heads/master | 2021-06-28T15:39:16.258564 | 2019-06-27T07:30:22 | 2019-06-27T07:30:22 | 142,398,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 533 | py | class Solution(object):
def parseTernary(self, expression):
"""
:type expression: str
:rtype: str
"""
stack = []
expr = list(expression)
while len(stack) > 1 or expr:
tail = stack[-5:]
if len(tail) == 5 and tail[3] == '?' and tail[1] == ':':
tail = tail[2] if tail[4] == 'T' else tail[0]
stack = stack[:-5] + [tail]
else:
stack.append(expr.pop())
return stack[0] if stack else None | [
"[email protected]"
] | |
2dc546f6dbc042377bebe95d64321e6f51650677 | af57db915a3e0376a400511417c5750e180de3d5 | /auto/forms.py | 2a3709428dbab0f1cf9f461305895e35748bcd2a | [] | no_license | Aitodev/ais | a8377e572a7cbf902c0d2229e27465af01a202f3 | a463dac02dde4be2586e4d3e77b58b7c0cb89f7e | refs/heads/main | 2023-02-25T06:55:09.594195 | 2021-02-01T16:06:45 | 2021-02-01T16:06:45 | 335,005,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,618 | py | from django import forms
from django.contrib.auth.models import User
from .models import *
from .models import Feedback
class LoginForm(forms.Form):
username = forms.CharField()
password = forms.CharField(widget=forms.PasswordInput)
class UserRegistrationForm(forms.ModelForm):
password = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(label='Repeat password',widget=forms.PasswordInput)
class Meta:
model = User
fields = ('username', 'first_name')
def clean_password2(self):
cd = self.cleaned_data
if cd['password'] != cd['password2']:
raise forms.ValidationError('Passwords don\'t match.')
return cd['password2']
class UserEditForm(forms.ModelForm):
class Meta:
model = User
fields = ('first_name', 'last_name')
class ProfileEditForm(forms.ModelForm):
class Meta:
model = Profile
fields = ('userClient', 'name', 'sirName', 'patronymic', 'sex', 'birthDate', 'country', 'docs',
'docNumber', 'gave', 'date', 'inn', 'postalCode', 'region', 'district', 'settlement',
'settlementName', 'street', 'buildingNumber', 'housing', 'apartment', 'phoneNumber',
'location', 'city', 'carCost', 'postalCode_f', 'region_f', 'district_f', 'settlement_f', 'settlementName_f',
'street_f', 'buildingNumber_f', 'housing_f', 'apartment_f', 'employmentType', 'income', 'familyIncome',
'password', 'passwordRepeat', 'phoneNumber2', 'phoneNumberSub', 'secretWord')
class PartnerEditForm(forms.ModelForm):
class Meta:
model = Partner
fields = ('userPartner' ,'namePartner', 'sirNamePartner', 'patronymicPartner', 'phonePartner', 'emailPartner',
'adressPartner', 'secretWordPartner', 'passwordPartner', 'passwordRepeatPartner', 'docNumberPartner',
'gavePartner', 'datePartner', 'upload')
class FeedbackForm(forms.ModelForm):
class Meta:
model = Feedback
fields = ['phoneNumber', 'name', 'text']
class VerificationOfDocumentsForm(forms.ModelForm):
class Meta:
model = VerificationOfDocuments
fields = ['applicationForMembership', 'passportSides', 'addressReference']
class MakeAPaymentForm(forms.ModelForm):
class Meta:
model = MakeAPayment
fields = ['entranceFee', 'dateOfPayment', 'paymentMethod', 'document']
class UserList(forms.ModelForm):
class Meta:
model = UserList
fields = ['first_name', 'last_name']
| [
"[email protected]"
] | |
6fd84c7eaaf2f3f098c6eafed22ac366f1da98cb | 5c59dd613315aefbdc26d2494efd9184c177f8a9 | /langevin_thermostat.py | b23b17896f0ae28f3e6aabaf25bd74692e63de0b | [] | no_license | DuaneNielsen/odes | d7f4480e18a5044726e8faacf85eb2be67b08fb3 | 26735aad34d0658755f639464670cae0add3cd02 | refs/heads/master | 2022-12-28T15:34:19.014633 | 2020-10-10T17:43:05 | 2020-10-10T17:43:05 | 302,956,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,126 | py | from vpython import *
import random
ground = box(color=color.red, size=vector(22, 1, 1), pos=vector(0, -1.5, 0))
top = box(color=color.red, size=vector(22, 1, 1), pos=vector(0, 19.5, 0))
left = box(color=color.red, size=vector(1, 22, 1), pos=vector(-11, 9, 0))
right = box(color=color.red, size=vector(1, 22, 1), pos=vector(11, 9, 0))
s = sphere()
s.pos = vector(-10, 0, 0)
s.velocity = vector(5, 19.2, 0)
s.mass = 1.0
k_boltzmann = 1.0
alpha = 0.2
def random_vector():
return vector(random.gauss(0, 1), random.gauss(0, 1), random.gauss(0, 1))
def f_langevin(alpha, m, temp, dt):
return sqrt(2 * alpha * m * temp / dt) * random_vector()
t = 0
dt = 0.001
while t < 20:
s.accel = f_langevin(alpha, s.mass, 400.0, dt) - alpha * s.velocity * s.mass
s.velocity += s.accel * dt
s.pos += s.velocity * dt
t = t + dt
rate(abs(1.0 / dt))
""" bounce off the sides of the box"""
if not -10.0 < s.pos.x < 10.0:
s.velocity.x = - s.velocity.x
if not 0.0 < s.pos.y < 19.0:
s.velocity.y = - s.velocity.y
if not 0.0 < s.pos.z < 1.0:
s.velocity.z = - s.velocity.z
| [
"[email protected]"
] | |
f983b13f56d37aa51fd6fad03f39562f4d2a7fd3 | 9e2935a5186914e44a739989c9b5c1a89fef9f65 | /dht11_no_sucess/2.py | 7049346ef0a6743c5245a6eb104bb2cfb894eb16 | [] | no_license | raspberrypitools/sensor | a36001f0eea6e423ea1659c7bb514e244a12bcf6 | 96e731416843c6cba04e3e549a59714123e665e6 | refs/heads/master | 2021-02-17T09:58:22.822618 | 2020-06-12T14:40:33 | 2020-06-12T14:40:33 | 245,087,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,769 | py | #!/usr/bin/python
#-*- coding:utf-8 -*-
import RPi.GPIO as GPIO
import time
channel =7
data = []
j = 0
GPIO.setmode(GPIO.BCM)
time.sleep(1)
GPIO.setup(channel, GPIO.OUT)
GPIO.output(channel, GPIO.LOW)
time.sleep(0.02)
GPIO.output(channel, GPIO.HIGH)
GPIO.setup(channel, GPIO.IN)
print GPIO.input(channel),GPIO.LOW,GPIO.HIGH
#while GPIO.input(channel) == GPIO.LOW:
# continue
#while GPIO.input(channel) == GPIO.HIGH:
# continue
while j < 40:
k = 0
while GPIO.input(channel) == GPIO.LOW:
continue
while GPIO.input(channel) == GPIO.HIGH:
k += 1
if k > 100:
break
if k < 8:
data.append(0)
else:
data.append(1)
j += 1
print "sensor is working."
print data
humidity_bit = data[0:8]
humidity_point_bit = data[8:16]
temperature_bit = data[16:24]
temperature_point_bit = data[24:32]
check_bit = data[32:40]
humidity = 0
humidity_point = 0
temperature = 0
temperature_point = 0
check = 0
for i in range(8):
humidity += humidity_bit[i] * 2 ** (7-i)
humidity_point += humidity_point_bit[i] * 2 ** (7-i)
temperature += temperature_bit[i] * 2 ** (7-i)
temperature_point += temperature_point_bit[i] * 2 ** (7-i)
check += check_bit[i] * 2 ** (7-i)
tmp = humidity + humidity_point + temperature + temperature_point
print 'tmp:',tmp
print 'check:',check
if check == tmp:
print "temperature :", temperature, "*C, humidity :", humidity, "%"
else:
print "wrong"
print "temperature :", temperature, "*C, humidity :", humidity, "% check :", check, ", tmp :", tmp
mytemp = '%f' %temperature
myhumi = '%f' %humidity
tmp_output = open('./tmp_data.txt', 'w')
hud_output = open('./hum_data.txt', 'w')
tmp_output.write(mytemp)
hud_output.write(myhumi)
tmp_output.close
hud_output.close
GPIO.cleanup()
| [
"[email protected]"
] | |
a891d1335f0277aed08bff930a81a3111785cbcd | 5dd5d4f80a883459ece27066bb88a8a951b1f88a | /examples/volumetric/read_vti.py | bc37309bb1de11d7eca624cf3460946772df96b7 | [
"MIT"
] | permissive | Gjacquenot/vtkplotter | 428cd7c302ca50980a829aa274cf0c4165990267 | dc865f28dec0c6f10de159dc1f8f20dd69ee74cf | refs/heads/master | 2020-05-25T17:19:55.569739 | 2019-05-22T14:56:50 | 2019-05-22T14:56:50 | 170,382,207 | 0 | 0 | MIT | 2019-02-12T19:58:25 | 2019-02-12T19:58:24 | null | UTF-8 | Python | false | false | 873 | py | """
Using normal vtk commands to load a xml vti file
then use vtkplotter to show the resulting 3d image.
"""
import vtk
from vtkplotter import datadir
# Create the reader for the data.
reader = vtk.vtkXMLImageDataReader()
reader.SetFileName(datadir+"vase.vti")
reader.Update()
img = reader.GetOutput()
# specify the data array in the file to process
# img.GetPointData().SetActiveAttribute('SLCImage', 0)
#################################
from vtkplotter import Volume, load, show, Text
# can set colors and transparencies along the scalar range
vol = Volume(img, c=["gray", "fuchsia", "dg", (0, 0, 1)], alpha=[0.1, 0.2, 0.3, 0.8])
# load command returns an isosurface (vtkActor) of the 3d image
iso = load(datadir+"vase.vti", threshold=140).wire(True).alpha(0.1)
# show command creates and returns an instance of class Plotter
show(vol, iso, Text(__doc__), bg="w")
| [
"[email protected]"
] | |
7f4c31ff4d35f469ac39c66f2f9f5a3237164696 | 6929f9696a8f90b3778d449a199cee8891f3f739 | /python_core/packages_test/package_described.py | 9212ff4dd84afff229f7e04dd0d8c09253353576 | [] | no_license | chemplife/Python | 881d492a4271fb2b423f2dd611eaac53a0efdc34 | 7fdfbf442a915e4f41506503baad4345a52d1e86 | refs/heads/master | 2022-12-31T20:00:22.475985 | 2020-10-19T20:14:43 | 2020-10-19T20:14:43 | 305,503,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,935 | py | """
The directory this file is in, is now a python package.
Package: It is a MODULE that has additional functionalities.
- It can have more packages inside it
- It can have module inside it.
To make any directory a package recoganisable by python compiler,
- There needs to be a __init__.py file in it.
Eg:
app/
pack1/
__init__.py
module1.py
module2.py
Now,
import pack1
-> pack1 code is in __init__.py file
-> code is loaded and execute in sys.modules['pack1']: JUST LIKE A MODULE
-> 'pack1' will be added to the global namespace of the file: JUST LIKE A MODULE
Package and Module, difference:
- __path__:
- module_name.__path__: either EMPTY or MISSING
- package_name.__path__: contains ABSOLUTE PATH to the Package.
- __file__:
- module_name.__file__: ABSOLUTE path to file that contains source_code of the Module.
- package_name.__file__: ABSOLUTE path to __init__.py file
- __package__:
- module_name.__package__: package the module_code is located in. If Module is in application root, this is empty.
- package_name.__package__: contains 'package_name' (it's own name.)
(In case of nested packaged, the inner package_name is: pack1.pack_inner)
app/
module_root.py
pack1/
__init__.py
module1.py
module2.py
pack_inner/
__init__.py
module1_inner.py
module2_inner.py
module_root.py
import pack1.pack_inner.module1_inner.py
- Sequence of loading
- Load pack1 and cache it in sys.modules['pack1']
- Load pack_inner and cache it in sys.modules['pack_inner']
- Load module1_inner.py and cache it in sys.modules['module1_inner']
- global namespace will have 'pack1' in it. -> global()['pack1'] / module_root.__dict__['pack1']
**** While loading pack1 and pack_inner, they execute __init__.py files and load any dependent modules as well.
############################################################################################################################
############################################################################################################################
So, it can be EXTREMELY USEFUL
If we do
- import pack1 or import pack1.pack_inner
(By default- The modules in these packages are NOT going to load in sys.modules[].
So, we cannot access them.)
- pack1.module1a.value -> will NOT work (Until we 'import pack1.module1a') -> or any other module in these package.
- So, to access all the modules in a package just by import package_name
- import module_name -> inside __init__.py file of the package
(Because __init__.py file gets executed when the package is imported
and it will import all the imports of __init__.py file.)
############################################################################################################################
############################################################################################################################
"""
| [
"[email protected]"
] | |
03a69a90e7668a902e87e4659702760b40bab9f8 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_200/2553.py | 33b08c886bcd825a1acedf643a39ec999814fd9f | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,332 | py |
# function to flip
def flip(s):
if s == '+' :
return '-'
else:
return '+'
# function takes N as input and prints output
def compare(f, s):
return int(f) < int(s)
def solution(N):
result = None
n = list(str(N))
i = len(n) - 1
while i > 0 :
# print(i, len(n))
if compare(n[i], n[i-1]):
# do something
# change i, i -1
f = int(n[i -1])
# s = int(n[i])
# change i+1 till end with 999
n[i-1] = str(f - 1)
n[i] = "9"
n = n[:i + 1] + list("9" * (len(n) - (i + 1)) )
else:
i-=1
k = ''.join(n)
if k[0] == '0':
result = k[1:]
else:
result = k
return result
# brute force check :P
# def check(N):
# n = list(str(N))
# result = True
# i = len(n) - 1
# while i > 0:
# if compare(n[i], n[i - 1]):
# result = False
# break
# i-=1
# return result
# def solution_b(N):
# while not check(N):
# N-=1
# return N
n = int(raw_input())
for i in range(n):
# pass
# read the inout number
N = int(raw_input())
result = solution(N)
print("Case #" + str(i + 1) + ": " + str(result))
# print Case #i: str(result) | [
"[email protected]"
] | |
73f04e629564a2b74fae518681395ab7581c4fe8 | 3649dce8b44c72bbfee56adf4e29ca6c5ba2703a | /code_up1290.py | b77336690c08f70b9c8eed863fcd139837ae78b0 | [] | no_license | beOk91/code_up | 03c7aca76e955e3a59d797299749e7fc2457f24a | ca1042ce216cc0a80e9b3d3ad363bc29c4ed7690 | refs/heads/master | 2022-12-06T08:23:00.788315 | 2020-08-20T11:21:59 | 2020-08-20T11:21:59 | 284,844,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 91 | py | num=int(input())
sum1=0
for i in range(1,num):
if num%i==0:
sum1+=1
print(sum1) | [
"[email protected]"
] | |
46062115961e2560753e312d28159cdebeda7ca2 | 849cd35166a93259c8bf84f001a3c40d9fe18b98 | /bomj.py | 3ab89c246b9b4a91d31b4921845d35dac34a030e | [] | no_license | Larionov0/Group2-lessons | 98c3d20d7532583ee66e766371235cfe888264c5 | 6426962e9b6766a9470ab1408b95486e63e4c2fa | refs/heads/master | 2023-05-07T01:42:57.290428 | 2021-05-27T17:52:02 | 2021-05-27T17:52:02 | 334,012,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,404 | py | import random
from time import sleep
money = 1000
satiety = max_satiety = 10
satiety = 1
things = [
['аксесуар', 'шорти', 0],
['аксесуар', "кепка", 0],
]
store = [
['аксесуар', 'тапки', 50],
['аксесуар', "телефон", 200],
['аксесуар', "спортивний костюм", 300],
['аксесуар', "смартфон", 1000],
['аксесуар', 'ноутбук', 6000],
['аксесуар', 'квартира', 100000],
['їжа', 'хліб', 10, 2],
['їжа', 'окорок', 70, 6],
['їжа', 'піца з равликами', 700, 10]
]
print("Welcome to Симулятор Бомжа")
while True:
if satiety <= 0:
print("Бомж відкинув копита :(")
break
print("\n\n\n--= Головне меню =--")
print("Гроші: ", money, 'грн')
print("Ситість: ", satiety, '/', max_satiety)
print("Речі:")
for thing in things:
print('-', thing[1])
print('----= Ваші дії:')
print("0 - вихід з гри")
print("1 - магазин")
print("2 - на заробітки")
print("3 - вуличні битви")
print("4 - поритися на звалці")
print("5 - поїсти")
choice = input("Ваш вибір: ")
if choice == '1':
while True:
print("\n\n--= Магазин =--")
print("Гроші: ", money, 'грн')
print("Ситість: ", satiety, '/', max_satiety)
print("Речі:")
for thing in things:
print('-', thing[1])
print('----= Товари:')
print("0 - назад")
# Виводимо вс товари на екран
i = 1
for thing in store:
print(i, '-', thing[1], '(', thing[2], 'грн )')
i += 1
# Користувач вибирає номер товару
choice = int(input("Ваш вибір: "))
if choice == 0:
break
# Дістаємо річ зі списку за заданим номером
thing = store[choice - 1] # наприклад thing = ["смартфон", 1000]
# Стандартна логіка зі зняттям коштів (покупка)
if money >= thing[2]:
money -= thing[2]
things.append(thing)
print("Успішна закупка: ", thing[1])
else:
print("Недостатньо грошей:(")
elif choice == '2':
while True:
print("\n\n--= Заробітки =--")
print("Гроші: ", money, 'грн')
print("Ситість: ", satiety, '/', max_satiety)
print("Речі:")
for thing in things:
print('-', thing[1])
print('----= Ваші дії:')
print("0 - назад")
print("1 - жебракувати (0 - 30 грн)")
print("2 - збирати пляшки (10 - 20 грн)")
print("3 - гружчик (потрібне взуття) (50 грн)")
print("4 - менеджер (потрібен телефон, костюм) (200 грн)")
choice = input("Ваш вибір: ")
if choice == '0':
break
elif choice == '1':
print("Бомж став на коліна і збирає данину...")
sleep(3)
zarobitok = random.randint(0, 30)
money += zarobitok
print("Бомж заробив", zarobitok, "грн. Тепер у нього", money, 'грн')
satiety -= 1
elif choice == '2':
print("Бомж збирає пляшки...")
sleep(3)
zarobitok = random.randint(10, 20)
money += zarobitok
print("Бомж заробив", zarobitok, "грн. Тепер у нього", money, 'грн')
satiety -= 1
elif choice == '3':
if ['аксесуар', 'тапки', 50] in things:
print("Бомж іде грузити товари")
print("Перетаскуємо мішки...")
input("<Enter>")
print("Тягаємо ящики...")
input("<Enter>")
print("Грузимо мішки...")
input("<Enter>")
print("Чистимо взуття начальнику...")
input("<Enter>")
zarobitok = 50
money += zarobitok
print("Бомж заробив", zarobitok, "грн. Тепер у нього", money, 'грн')
satiety -= 2
else:
print("Недостатньо речей!")
elif choice == '3':
pass
elif choice == '4':
pass
elif choice == '5':
while True:
print("--= Поїдання =--")
print("Гроші: ", money, 'грн')
print("Ситість: ", satiety, '/', max_satiety)
print("Речі:")
for thing in things:
print('-', thing[1])
food = []
for thing in things:
if thing[0] == 'їжа':
food.append(thing)
# виводимо всю їжу, яка є у гравця
print("0 - назад")
i = 1
for dish in food:
print(f"{i} - {dish[1]} (+{dish[3]} ситості)")
i += 1
choice = int(input("Ваш вибір: "))
if choice == 0:
break
dish = food[choice - 1] # dish=['їжа', 'хліб', 10, 2]
print("Бомж зїв", dish[1])
things.remove(dish)
satiety += dish[3]
if satiety > max_satiety:
satiety = max_satiety
elif choice == '0':
break
else:
pass
| [
"[email protected]"
] | |
8cafb8dc6888632bb689dd46e15eb44721ac98d4 | 210b05500599fe0fbe165c1cd3056e9a11487b0d | /ico/cmd/refund.py | 06da4a97c1ca722f174293455fb0c5996989f604 | [
"Apache-2.0"
] | permissive | streamr-dev/token-launch-smart-contracts | b7fa04c9b33b5c2e371fc88dee1555b74334000e | 209d574ded70b4c382894d09ea886a2704291ce0 | refs/heads/master | 2021-01-01T04:48:56.809663 | 2017-07-10T01:23:33 | 2017-07-10T01:23:33 | 97,254,821 | 11 | 4 | null | null | null | null | UTF-8 | Python | false | false | 5,520 | py | """Distribute ETH refunds."""
import csv
import datetime
import json
import os
import time
from decimal import Decimal
import shutil
import click
from eth_utils import from_wei
from eth_utils import is_checksum_address
from eth_utils import to_wei
from populus.utils.accounts import is_account_locked
from populus import Project
from populus.utils.cli import request_account_unlock
from ico.utils import check_succesful_tx
@click.command()
@click.option('--chain', nargs=1, default="mainnet", help='On which chain to deploy - see populus.json')
@click.option('--hot-wallet-address', nargs=1, help='The account that deploys the issuer contract, controls the contract and pays for the gas fees', required=True)
@click.option('--csv-file', nargs=1, help='CSV file containing distribution data', required=True)
@click.option('--address-column', nargs=1, help='Name of CSV column containing Ethereum addresses', default="address")
@click.option('--amount-column', nargs=1, help='Name of CSV column containing decimal token amounts', default="amount")
@click.option('--id-column', nargs=1, help='Name of CSV column containing unique identifier for all refund participants (usually email)', default="email")
@click.option('--limit', nargs=1, help='How many items to import in this batch', required=False, default=1000)
@click.option('--start-from', nargs=1, help='First row to import (zero based)', required=False, default=0)
@click.option('--state-file', nargs=1, help='JSON file where we keep the state', required=True)
def main(chain, hot_wallet_address, csv_file, limit, start_from, address_column, amount_column, id_column, state_file):
"""Distribute ETh refunds.
Reads in funds distribution data as CSV. Then sends funds from a local address.
The refund status is stored as a JSON file.
Example:
refund --chain=kovan --hot-wallet-address=0x001fc7d7e506866aeab82c11da515e9dd6d02c25 --csv-file=refunds.csv --address-column="Refund address" --amount-column="ETH" --id-column="Email" --start-from=0 --limit=2 --state-file=refund-state.json
Example CSV data:
.. code-block:: csv
Email,ETH,Refund address
[email protected],61.52,0x0078EF811B6564c996fD10012579633B1a518b9D
[email protected],111.21,0xf0b91641CCe2ADB4c0D7B90c54E7eE96CCCBc3d1
[email protected],61.52,0x0dAbC71Faa8982bF23eE2c4979d22536F5101065
[email protected],61.52,0x0B8EceBc18153166Beec1b568D510B55B560789D
"""
# Make a backup of the state file
if os.path.exists(state_file):
assert state_file.endswith(".json")
backup_name = state_file.replace(".json", "." + datetime.datetime.utcnow().isoformat() + ".bak.json")
print("Backing up state file to", backup_name)
shutil.copy(state_file, backup_name)
project = Project()
with project.get_chain(chain) as c:
web3 = c.web3
print("Web3 provider is", web3.currentProvider)
print("Hot wallet address is", hot_wallet_address)
print("Hot wallet balance is", from_wei(web3.eth.getBalance(hot_wallet_address), "ether"), "ETH")
# Goes through geth account unlock process if needed
if is_account_locked(web3, hot_wallet_address):
request_account_unlock(c, hot_wallet_address, timeout=3600*6)
assert not is_account_locked(web3, hot_wallet_address)
print("Reading data", csv_file)
with open(csv_file, "rt", encoding='utf-8-sig') as inp:
reader = csv.DictReader(inp)
rows = [row for row in reader]
# Check that we have unique addresses
uniq_ids = set()
for row in rows:
print(row)
id = row[id_column].strip()
if id in uniq_ids:
raise RuntimeError("Id appears twice in input data", id)
uniq_ids.add(id)
addr = row[address_column]
if not is_checksum_address(addr):
print("Not a checksummed address", addr)
# Start distribution
start_time = time.time()
start_balance = from_wei(web3.eth.getBalance(hot_wallet_address), "ether")
print("Total rows", len(rows))
if os.path.exists(state_file):
with open(state_file, "rt") as inp:
state = json.load(inp)
else:
state = {}
for i in range(start_from, min(start_from+limit, len(rows))):
data = rows[i]
addr = data[address_column].strip()
id = data[id_column].strip()
amount = Decimal(data[amount_column].strip())
amount_wei = to_wei(amount, "ether")
if id in state:
print("Already refunded", id, addr, amount)
continue
# Use non-default gas price for speedier processing
gas_price = int(web3.eth.gasPrice * 10)
txid = web3.eth.sendTransaction({"from": hot_wallet_address, "to": addr, "value": amount_wei, "gasPrice": gas_price})
duration = time.time() - start_time
print("Transferring", id, amount_wei, "to", addr, "txid", txid, "duration", duration)
state[id] = txid
with open(state_file, "wt") as out:
json.dump(state, out)
check_succesful_tx(web3, txid)
end_balance = from_wei(web3.eth.getBalance(hot_wallet_address), "ether")
print("Refund cost is", start_balance - end_balance, "ETH")
print("All done! Enjoy your decentralized future.")
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
32b5aa500d83bd14aee751d92265569351a866f2 | e4066b34668bbf7fccd2ff20deb0d53392350982 | /project_scrapy/spiders/fishpond.py | a505966943e5272cc2c64493bb9cf1f8f5c9e360 | [] | no_license | sushma535/WebSites | 24a688b86e1c6571110f20421533f0e7fdf6e1a8 | 16a3bfa44e6c7e22ae230f5b336a059817871a97 | refs/heads/master | 2023-08-18T09:09:16.052555 | 2021-10-11T00:41:50 | 2021-10-11T00:41:50 | 415,621,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,545 | py | import scrapy
from scrapy.crawler import CrawlerProcess
import os
import csv
from csv import reader
import re
total_data = {}
class SimilarWeb(scrapy.Spider):
name = 'SW'
user_agent = 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'
start_urls = ['https://www.fishpond.com.au/', 'https://www.similarsites.com/site/fishpond.com.au/']
csv_columns = ['Category', 'Description', 'Name', 'Url']
csv_file = 'websites1_data.csv'
count = 0
def parse(self, response):
data, desc, cat = '', '', ''
print('response url:', response.url)
if response.url == self.start_urls[0]:
data = response.css('title::text').get()
if data:
data = re.sub("\n\t\t", '', data)
total_data['Name'] = data
self.count += 1
elif response.url == self.start_urls[1]:
cat = response.css(
'div[class="StatisticsCategoriesDistribution__CategoryTitle-fnuckk-6 jsMDeK"]::text').getall()
desc = response.css('div[class="SiteHeader__Description-sc-1ybnx66-8 hhZNQm"]::text').get()
if cat:
cat = ": ".join(cat[:])
total_data['Category'] = cat
total_data['Description'] = desc
total_data['Url'] = self.start_urls[0]
self.count += 1
if self.count == 2:
print("total data", total_data)
new_data = [total_data['Category'], total_data['Description'], total_data['Name'],
total_data['Url']]
print("new data", new_data)
self.row_appending_to_csv_file(new_data)
def row_appending_to_csv_file(self, data):
if os.path.exists(self.csv_file):
need_to_add_headers = False
with open(self.csv_file, 'a+', newline='') as file:
file.seek(0)
csv_reader = reader(file)
if len(list(csv_reader)) == 0:
need_to_add_headers = True
csv_writer = csv.writer(file)
if need_to_add_headers:
csv_writer.writerow(self.csv_columns)
csv_writer.writerow(data)
else:
with open(self.csv_file, 'w', newline='') as file:
csv_writer = csv.writer(file)
csv_writer.writerow(self.csv_columns) # header
csv_writer.writerow(data)
process = CrawlerProcess()
process.crawl(SimilarWeb)
process.start()
| [
"[email protected]"
] | |
96c83faea4fa8fcd498552cfd69510ae95d4500b | 368c66467b78adf62da04cb0b8cedd2ef37bb127 | /BOJ/Python/15684_사다리조작.py | ad8c21b700f5ed8bafd0f746b37d65e039941596 | [] | no_license | DJHyun/Algorithm | c8786ddcd8b5693fc9b3b4721fdf1eeda21611c5 | fd6ae800886dac4ec5ff6cf2618bc2c839a76e7a | refs/heads/master | 2020-07-30T16:32:49.344329 | 2020-02-25T07:59:34 | 2020-02-25T07:59:34 | 210,289,983 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,413 | py | # baekjoon source = "https://www.acmicpc.net/problem/15684"
def check(x, y):
if x < 0 or x > h : return False
if y < 0 or y > n - 1: return False
return True
def make_ladder(a,b):
pass
def ladder(a, b):
q = [[a, b]]
visited = [[0] * n for _ in range(h +1)]
while q:
t = q.pop(0)
# visited[t[0]+1][t[1]] = 1
tx = t[0] + 1
ty = t[1]
if check(tx,ty):
visited[tx][ty] = 1
if arr[tx][ty] == 1:
for i in range(2):
x = tx + dx[i]
y = ty + dy[i]
if check(x, y) and arr[x][y] == 1:
q.append([x, y])
visited[x][y]= 1
break
else:
q.append([tx, ty])
visited[tx][ty] = 1
for i in visited:
print(i)
print()
if b == t[1]:
return False
else:
return True
n, m, h = map(int, input().split())
arr = [[0] * n for _ in range(h + 1)]
dx, dy = [0, 0], [1, -1]
print(n, m, h)
for i in arr:
print(i)
print()
for i in range(m):
a, b = map(int, input().split())
# if b%2:
# arr[a-1][b] = 1
# else:
arr[a][b - 1] = 1
arr[a][b] = 1
for i in arr:
print(i)
print()
idx = 0
while True:
if ladder(0,idx):
make_ladder(0,idx)
else:
idx += 1
| [
"[email protected]"
] | |
a179f9c4dfc6c25e8e7b9b0ebef9df266971e387 | 4911cc3eaadd536a234dc9e7e563ad0df8e7ba3c | /src/bd2k/util/test/test_d32.py | aaf9711801ab83db106c1de7ff75ea68a9b4ab52 | [] | no_license | hschmidt/bd2k-python-lib | 56ca4747e2b2cc3baf98c5f0efa343229018ea77 | a662f3c5564299a6c2d86233e6bee741e9e44c3d | refs/heads/master | 2020-06-28T06:04:27.265541 | 2016-11-18T07:22:44 | 2016-11-18T07:22:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,516 | py | # Copyright (c) 2014 Dominic Tarr
# Copyright (c) 2015 Hannes Schmidt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Inspired by JavaScript code found at https://github.com/dominictarr/d64
from __future__ import absolute_import
from unittest import TestCase
from bd2k.util.d32 import standard as d32
import os
class TestD32( TestCase ):
def test( self ):
l = [ os.urandom( i ) for i in xrange( 1000 ) ]
self.assertEqual( map( d32.decode, sorted( map( d32.encode, l ) ) ), sorted( l ) )
| [
"[email protected]"
] | |
c0f861945d032cb15056a8e6d63ad54fa9f7c2b0 | c10f20abec372f81dbd6468ead208543f60940f1 | /learning/5.Package/5.2.stat.py | de809c5d862f6396d89748097203c556a8f6ae77 | [] | no_license | alenzhd/meachineLearning | 64876e7a6c0b8b39a63a9eb586d306a3489b4447 | 1b66ce2f73b226548f07e45c8537b8286635a048 | refs/heads/master | 2021-08-24T10:55:52.056439 | 2017-12-09T10:26:37 | 2017-12-09T10:26:37 | 112,688,163 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,425 | py | #!/usr/bin/python
# -*- coding:utf-8 -*-
import numpy as np
from scipy import stats
import math
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
def calc_statistics(x):
n = x.shape[0] # 样本个数
# 手动计算
m = 0
m2 = 0
m3 = 0
m4 = 0
for t in x:
m += t
m2 += t*t
m3 += t**3
m4 += t**4
m /= n
m2 /= n
m3 /= n
m4 /= n
mu = m
sigma = np.sqrt(m2 - mu*mu)
skew = (m3 - 3*mu*m2 + 2*mu**3) / sigma**3
kurtosis = (m4 - 4*mu*m3 + 6*mu*mu*m2 - 4*mu**3*mu + mu**4) / sigma**4 - 3
print ('手动计算均值、标准差、偏度、峰度:', mu, sigma, skew, kurtosis)
# 使用系统函数验证
mu = np.mean(x, axis=0)
sigma = np.std(x, axis=0)
skew = stats.skew(x)
kurtosis = stats.kurtosis(x)
return mu, sigma, skew, kurtosis
if __name__ == '__main__':
d = np.random.randn(100000)
print (d)
mu, sigma, skew, kurtosis = calc_statistics(d)
print ('函数库计算均值、标准差、偏度、峰度:', mu, sigma, skew, kurtosis)
# 一维直方图
mpl.rcParams[u'font.sans-serif'] = 'SimHei'
mpl.rcParams[u'axes.unicode_minus'] = False
y1, x1, dummy = plt.hist(d, bins=50, normed=True, color='g', alpha=0.75)
t = np.arange(x1.min(), x1.max(), 0.05)
y = np.exp(-t**2 / 2) / math.sqrt(2*math.pi)
plt.plot(t, y, 'r-', lw=2)
plt.title(u'高斯分布,样本个数:%d' % d.shape[0])
plt.grid(True)
# plt.show()
d = np.random.randn(100000, 2)
mu, sigma, skew, kurtosis = calc_statistics(d)
print ('函数库计算均值、标准差、偏度、峰度:', mu, sigma, skew, kurtosis)
# 二维图像
N = 30
density, edges = np.histogramdd(d, bins=[N, N])
print ('样本总数:', np.sum(density))
density /= density.max()
x = y = np.arange(N)
t = np.meshgrid(x, y)
fig = plt.figure(facecolor='w')
ax = fig.add_subplot(111, projection='3d')
ax.scatter(t[0], t[1], density, c='r', s=15*density, marker='o', depthshade=True)
ax.plot_surface(t[0], t[1], density, cmap=cm.Accent, rstride=2, cstride=2, alpha=0.9, lw=0.75)
ax.set_xlabel(u'X')
ax.set_ylabel(u'Y')
ax.set_zlabel(u'Z')
plt.title(u'二元高斯分布,样本个数:%d' % d.shape[0], fontsize=20)
plt.tight_layout(0.1)
plt.show()
| [
"[email protected]"
] | |
3f7fb0c976574d21edfe92fca912a67f210f5bfa | 9a9788a67925ba563f835fac204e76dc6cabb5bd | /Products/CMFDefault/browser/discussion/tests/test_discussion.py | ca429f37cfe1f2a3600665d5f7040b2a8ec0931b | [
"ZPL-2.1"
] | permissive | zopefoundation/Products.CMFDefault | d8f5fe6754d90abfaa37460ee2a3b0314167e34a | a176d9aac5a7e04725dbd0f7b76c6ac357062139 | refs/heads/master | 2023-06-21T20:54:29.719764 | 2021-02-05T17:36:40 | 2021-02-05T17:36:40 | 36,096,105 | 0 | 4 | NOASSERTION | 2021-02-04T15:08:40 | 2015-05-22T21:29:53 | Python | UTF-8 | Python | false | false | 950 | py | ##############################################################################
#
# Copyright (c) 2008 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
""" Test Products.CMFDefault.browser.rss
"""
import unittest
from Testing import ZopeTestCase
from Products.CMFDefault.testing import FunctionalLayer
ftest_suite = ZopeTestCase.FunctionalDocFileSuite('discussion.txt')
ftest_suite.layer = FunctionalLayer
def test_suite():
return unittest.TestSuite((
ftest_suite,
))
| [
"[email protected]"
] | |
e7a1e8397a95a893a5775826d1166c55a55ea203 | 993ef8924418866f932396a58e3ad0c2a940ddd3 | /Production/python/PrivateSamples/EMJ_UL16_mMed-1200_mDark-20_kappa-0p14_aligned-down_cff.py | 6a4f474763f324fda07dff8fdc7a866ec6a434be | [] | no_license | TreeMaker/TreeMaker | 48d81f6c95a17828dbb599d29c15137cd6ef009a | 15dd7fe9e9e6f97d9e52614c900c27d200a6c45f | refs/heads/Run2_UL | 2023-07-07T15:04:56.672709 | 2023-07-03T16:43:17 | 2023-07-03T16:43:17 | 29,192,343 | 16 | 92 | null | 2023-07-03T16:43:28 | 2015-01-13T13:59:30 | Python | UTF-8 | Python | false | false | 1,972 | py | import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL16/step4_MINIAODv2_mMed-1200_mDark-20_kappa-0p14_aligned-down_n-500_part-1.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL16/step4_MINIAODv2_mMed-1200_mDark-20_kappa-0p14_aligned-down_n-500_part-10.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL16/step4_MINIAODv2_mMed-1200_mDark-20_kappa-0p14_aligned-down_n-500_part-11.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL16/step4_MINIAODv2_mMed-1200_mDark-20_kappa-0p14_aligned-down_n-500_part-3.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL16/step4_MINIAODv2_mMed-1200_mDark-20_kappa-0p14_aligned-down_n-500_part-4.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL16/step4_MINIAODv2_mMed-1200_mDark-20_kappa-0p14_aligned-down_n-500_part-5.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL16/step4_MINIAODv2_mMed-1200_mDark-20_kappa-0p14_aligned-down_n-500_part-6.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL16/step4_MINIAODv2_mMed-1200_mDark-20_kappa-0p14_aligned-down_n-500_part-7.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL16/step4_MINIAODv2_mMed-1200_mDark-20_kappa-0p14_aligned-down_n-500_part-8.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL16/step4_MINIAODv2_mMed-1200_mDark-20_kappa-0p14_aligned-down_n-500_part-9.root',
] )
| [
"[email protected]"
] | |
96343d7dbf5cef604566654a976175dd9694d385 | aaed251a860f6606fa826ccc057d5bbb13800fe1 | /swagger_client/models/jvm_info.py | ca9db7012cb553c3b0715ee388d53a85989b7044 | [] | no_license | japaniel/insightvm-python | 707f0bd16f20302e99c117f3e562cd5dbf25359e | 9bf8ae98b6d61c1d5c4ab2d8c6c810a68e16bf3d | refs/heads/main | 2023-02-02T04:01:36.204154 | 2020-12-16T01:21:20 | 2020-12-16T01:21:20 | 320,130,684 | 1 | 0 | null | 2020-12-16T01:21:21 | 2020-12-10T01:57:52 | Python | UTF-8 | Python | false | false | 6,254 | py | # coding: utf-8
"""
InsightVM API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 3
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class JVMInfo(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'start_time': 'str',
'uptime': 'str',
'vendor': 'str',
'version': 'str'
}
attribute_map = {
'name': 'name',
'start_time': 'startTime',
'uptime': 'uptime',
'vendor': 'vendor',
'version': 'version'
}
def __init__(self, name=None, start_time=None, uptime=None, vendor=None, version=None): # noqa: E501
"""JVMInfo - a model defined in Swagger""" # noqa: E501
self._name = None
self._start_time = None
self._uptime = None
self._vendor = None
self._version = None
self.discriminator = None
if name is not None:
self.name = name
if start_time is not None:
self.start_time = start_time
if uptime is not None:
self.uptime = uptime
if vendor is not None:
self.vendor = vendor
if version is not None:
self.version = version
@property
def name(self):
"""Gets the name of this JVMInfo. # noqa: E501
The name of the Java Virtual Machine. # noqa: E501
:return: The name of this JVMInfo. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this JVMInfo.
The name of the Java Virtual Machine. # noqa: E501
:param name: The name of this JVMInfo. # noqa: E501
:type: str
"""
self._name = name
@property
def start_time(self):
"""Gets the start_time of this JVMInfo. # noqa: E501
The date and time the Java Virtual Machine last started. # noqa: E501
:return: The start_time of this JVMInfo. # noqa: E501
:rtype: str
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""Sets the start_time of this JVMInfo.
The date and time the Java Virtual Machine last started. # noqa: E501
:param start_time: The start_time of this JVMInfo. # noqa: E501
:type: str
"""
self._start_time = start_time
@property
def uptime(self):
"""Gets the uptime of this JVMInfo. # noqa: E501
Total up-time of the Java Virtual Machine, in ISO 8601 format. For example: `\"PT1H4M24.214S\"`. # noqa: E501
:return: The uptime of this JVMInfo. # noqa: E501
:rtype: str
"""
return self._uptime
@uptime.setter
def uptime(self, uptime):
"""Sets the uptime of this JVMInfo.
Total up-time of the Java Virtual Machine, in ISO 8601 format. For example: `\"PT1H4M24.214S\"`. # noqa: E501
:param uptime: The uptime of this JVMInfo. # noqa: E501
:type: str
"""
self._uptime = uptime
@property
def vendor(self):
"""Gets the vendor of this JVMInfo. # noqa: E501
The vendor of the Java Virtual Machine. # noqa: E501
:return: The vendor of this JVMInfo. # noqa: E501
:rtype: str
"""
return self._vendor
@vendor.setter
def vendor(self, vendor):
"""Sets the vendor of this JVMInfo.
The vendor of the Java Virtual Machine. # noqa: E501
:param vendor: The vendor of this JVMInfo. # noqa: E501
:type: str
"""
self._vendor = vendor
@property
def version(self):
"""Gets the version of this JVMInfo. # noqa: E501
The version of the Java Virtual Machine. # noqa: E501
:return: The version of this JVMInfo. # noqa: E501
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this JVMInfo.
The version of the Java Virtual Machine. # noqa: E501
:param version: The version of this JVMInfo. # noqa: E501
:type: str
"""
self._version = version
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(JVMInfo, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, JVMInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
5e0de4bfd6bde7090acbcccc3bf5523f1cca16b6 | 75ec986d34d5391d46d6469c513626f69f5d978d | /Incepator/listcomprehensions/listcomprehensions1.py | 68f786bc3bef3a729cc3f9c8b4b0748696eef5ee | [] | no_license | CatrunaMarius/python | d9f8dc221458e4b65c3f801daf3b59aa2b946358 | d063bffb4eafa56ac1e205c2d39fc893ab50e992 | refs/heads/master | 2020-04-24T05:23:22.756002 | 2020-01-06T11:56:12 | 2020-01-06T11:56:12 | 171,703,482 | 0 | 0 | null | 2019-02-20T16:12:39 | 2019-02-20T15:59:08 | null | UTF-8 | Python | false | false | 181 | py | #fillind with random numbers
from random import randint
low = int(input("Entre min: "))
high = int(input("Entre max: "))
a= [randint(low, high) for i in range(10)]
print(a) | [
"[email protected]"
] | |
f039e27d9ed5f302d3c0eadb99c82005051d3598 | 66c9ce87b3462ec49caa8cd5286ffccc0e6f26ee | /dag.py | b415d17786098c013d11d9ce2e1e0304774e8779 | [
"MIT"
] | permissive | kendricktan/dpim | e0b839f8ce9e65c48b020819b5cd96630ec66789 | 0b8c95f625c3b136f3b938ea7663c90bcb73c5e4 | refs/heads/master | 2020-03-12T03:25:16.904858 | 2018-04-21T08:43:45 | 2018-04-21T08:43:45 | 130,424,055 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,979 | py | import hashlib
import crypto
from collections import namedtuple
MIN_WORK = '000'
# directed acyclic graph primitives
# OpenTx
# params:
# account => which blockchain account you trying to open
# hash => hash of the opentx
# work => work done to get the 'valid' txid (starts with X amount of zeros)
OpenTx = namedtuple("OpenTx", "account hash work")
# SendTx
# params:
# prev => previous hash
# hash => hash of the sendtx
# rpk => random pk (for stealth address)
# signature => signature to verify that the sender authorized it
# msg => msg type
# work => work done to get the 'valid' hash (starts with X amount of zeros)
SendTx = namedtuple("SendTx", "prev hash rpk destination signature msg work")
# ReceiveTx
# params:
# prev => previous hash
# hash => hash of the receive tx
# source => source of the receiveTx (hash of the sendtx)
# work => work done to get the 'valid' hash (starts with X amount of zeros)
ReceiveTx = namedtuple("ReceiveTx", "prev hash source work")
# DAG
class DAG:
def __init__(self, usedtxids={}, cachehash={}, cachedmessages={}, accounts={}):
"""
params:
usedtxids => {}
cachehash => {}
cachedmessages => {}
accounts => {}
usedtxids is a dictionary containing used send txids
cachehash is a dictionary where key: hash value: tx
cachedmessages is a dictionary where key: hash, value: message
accounts is a dictionary where each key is an address e.g.
accounts = {
'abcdefgh': {
'latest': 5,
1: tx(),
2: tx(),
3: tx()
}
}
"""
self.usedtxids = usedtxids
self.accounts = accounts
self.cachehash = cachehash
self.cachedmessages = cachedmessages
def insert_tx(self, pk, tx):
t = type(tx)
if t == OpenTx:
self._insert_open(pk, tx)
elif t == SendTx:
self._insert_send(pk, tx)
elif t == ReceiveTx:
self._insert_receive(pk, tx)
self.cachehash[tx.hash] = tx
def _insert_open(self, pk, tx):
if not valid_work(tx):
return
# Don't overwrite existing account
if self.accounts.get(pk, None) is not None:
return
self.accounts[pk] = {
'latest': 0,
0: tx
}
def _insert_send(self, pk, tx):
if not (valid_signature(pk, tx) and valid_work(tx)):
return
if not (self.get_latest(pk).hash == tx.prev):
return
new_latest = self.accounts[pk]['latest'] + 1
self.accounts[pk]['latest'] = new_latest
self.accounts[pk][new_latest] = tx
def _insert_receive(self, pk, tx):
if not valid_work(tx):
return
if not (self.get_latest(pk).hash == tx.prev):
return
new_latest = self.accounts[pk]['latest'] + 1
self.accounts[pk]['latest'] = new_latest
self.accounts[pk][new_latest] = tx
def get_message(self, h):
return self.cachedmessages.get(h, None)
def get_messages(self):
return self.cachedmessages
def add_message(self, h, decrypted_msg):
self.cachedmessages[h] = decrypted_msg
def get_latest(self, pk):
pk_dict = self.accounts.get(pk, {})
if pk_dict == {}:
return None
latest_no = pk_dict['latest']
return pk_dict[latest_no]
def get_account(self, pk):
return self.accounts.get(pk, {})
def get_hash(self, h):
if self.hash_received(h):
return self.cachehash[h]
return None
def hash_received(self, h):
return h in self.cachehash
# Hashes an opentx
def hash_opentx(opentx):
bytestr = str.encode("account:{},work:{}".format(
opentx.account, opentx.work))
h = hashlib.sha256(bytestr).hexdigest()
return h
# Hashes a send tx
def hash_sendtx(sendtx):
bytestr = str.encode(
"prev:{},destination:{},rpk:{},signature:{},msg:{},work:{}".format(
sendtx.prev, sendtx.destination, sendtx.rpk, sendtx.signature, sendtx.msg, sendtx.work
)
)
h = hashlib.sha256(bytestr).hexdigest()
return h
# Hashes a receive tx
def hash_receivetx(receivetx):
bytestr = str.encode(
"prev:{},source:{},work:{}".format(
receivetx.prev, receivetx.source, receivetx.work
)
)
h = hashlib.sha256(bytestr).hexdigest()
return h
# Hashes tx
def hash_tx(tx):
t = type(tx)
if t != OpenTx and t != SendTx and t != ReceiveTx:
return -1
if t == OpenTx:
h = hash_opentx(tx)
elif t == SendTx:
h = hash_sendtx(tx)
elif t == ReceiveTx:
h = hash_receivetx(tx)
return h
def prep_signature(sendtx):
s = "prev:{},destination:{},rpk:{},msg:{}".format(
sendtx.prev, sendtx.destination, sendtx.rpk, sendtx.msg)
return s
def sign_sendtx(sk, sendtx):
sk = crypto.decodeint(sk[:64].decode('hex'))
msg = prep_signature(sendtx)
pk = crypto.publickey(sk)
sig = crypto.signature(msg, sk, pk)
# Reconstruct named tuple
tx_dict = sendtx._asdict()
tx_dict['signature'] = sig.encode('hex')
return SendTx(**tx_dict)
def valid_work(tx):
# Tx hash
h = hash_tx(tx)
return h[:len(MIN_WORK)] == MIN_WORK
def valid_signature(pk, sendtx):
sig = sendtx.signature.decode('hex')
msg = prep_signature(sendtx)
return crypto.checkvalid(sig, msg, pk[:64].decode('hex'))
def mine_tx(tx):
# Tx hash
h = hash_tx(tx)
# Tx type
t = type(tx)
if h == -1:
return -1
# Valid work done
# Python and recursion doesn't work well
# So i'll have to use a while loop
while not valid_work(tx):
d = tx._asdict()
d['work'] = tx.work + 1
tx = t(**d)
h = hash_tx(tx)
d = tx._asdict()
d['hash'] = h
return t(**d)
| [
"[email protected]"
] | |
716ca38c7a15e1e3551f77c15814d1f2c1cf5fd2 | caed98915a93639e0a56b8296c16e96c7d9a15ab | /Array and Strings/Contains Duplicate.py | 40c416f92193371c87b997873aa6ff57e39222cc | [] | no_license | PiyushChandra17/365-Days-Of-LeetCode | 0647787ec7e8f1baf10b6bfc687bba06f635838c | 7e9e9d146423ca2c5b1c6a3831f21dd85fa376d5 | refs/heads/main | 2023-02-13T10:41:36.110303 | 2021-01-17T11:58:51 | 2021-01-17T11:58:51 | 319,974,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | class Solution:
def containsDuplicate(self, nums: List[int]) -> bool:
return len(nums) > len(set(nums))
| [
"[email protected]"
] | |
7d339ebc3f20cf56e6b20c247266259ba6e349f3 | 861db97defcdadae5f10263b9a6d41e7d0b85131 | /ex111/utilidadescev/moeda/__init__.py | bd0def54dfeb7a94e37473fc89d39d79370bb5b3 | [] | no_license | gguilherme42/CursoEmVideo_Python | 0483f092fe6563bc97a260ca07c2d4f1f882ac61 | 54eec56e98ae71dbb2ba02eb6904cd236a42be70 | refs/heads/master | 2021-01-06T15:15:25.194038 | 2020-04-27T22:19:42 | 2020-04-27T22:19:42 | 241,374,850 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,056 | py | '''
Exercício 110:
Crie um pacote chamado utilidadezCeV que tenha dois
módulos internos chamados moeda e dado.
Transfira todas as funções utilizadas nos desafios 107,
108 e 109 para o primeiro pacote e mantenha tudo funcionando.
'''
def moeda(n=0, moeda='R$'):
return f'{moeda}{n:.2f}'.replace('.', ',')
def aumentar(n=0, a=0, f=True):
c = (n * a) / 100
l = c + n
return l if not f else moeda(l)
def diminuir(n=0, a=0, f=True):
c = (n * a) / 100
l = n - c
return l if not f else moeda(l)
def dobro(n=0, f=True):
d = n * 2
return d if not f else moeda(d)
def metade(n=0, f=True):
m = n / 2
return m if not f else moeda(m)
def resumo(n, a=0, b=0, f=True):
print('=' * 30)
print(f' {"RESUMO":^25} ')
print('-' * 30)
print(f'{"- Aumento: ":<5} {aumentar(n, a, f):>10}')
print(f'{"- Diminuição: ":<5} {diminuir(n, b, f):>10}')
print(f'{"- Dobro: ":<5} {dobro(n, f):>10}')
print(f'{"- Metade: ":<5} {metade(n, f):>10}')
print('=' * 30)
| [
"[email protected]"
] | |
1ab177bf9094897a7f5e1adc529942f3e0d1ed73 | ccfe4e0bb18b46b4dd5ce4503ae54b1eaae3bba5 | /scripts/queue_splitter.py | 3e43004ca4b1cfebde60515da52ec0887e608299 | [
"ISC"
] | permissive | dimitri/skytools | 5cb67f5f581ab16acd8b84d78ba5cc9e05b9cb56 | 37ec8fc09ba897b02a1ca871055eb69a00deceae | refs/heads/master | 2021-01-15T18:05:21.396894 | 2011-10-17T15:06:18 | 2011-10-17T15:06:18 | 1,082,431 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,438 | py | #! /usr/bin/env python
"""Puts events into queue specified by field from 'queue_field' config parameter.
Config parameters::
## Parameters for queue_splitter
# database locations
src_db = dbname=sourcedb_test
dst_db = dbname=destdb_test
# event fields from where target queue name is read
#queue_field = extra1
"""
import sys
import pkgloader
pkgloader.require('skytools', '3.0')
import pgq
class QueueSplitter(pgq.SerialConsumer):
__doc__ = __doc__
def __init__(self, args):
pgq.SerialConsumer.__init__(self, "queue_splitter", "src_db", "dst_db", args)
def process_remote_batch(self, db, batch_id, ev_list, dst_db):
cache = {}
queue_field = self.cf.get('queue_field', 'extra1')
for ev in ev_list:
row = [ev.type, ev.data, ev.extra1, ev.extra2, ev.extra3, ev.extra4, ev.time]
queue = ev.__getattr__(queue_field)
if queue not in cache:
cache[queue] = []
cache[queue].append(row)
# should match the composed row
fields = ['type', 'data', 'extra1', 'extra2', 'extra3', 'extra4', 'time']
# now send them to right queues
curs = dst_db.cursor()
for queue, rows in cache.items():
pgq.bulk_insert_events(curs, rows, fields, queue)
if __name__ == '__main__':
script = QueueSplitter(sys.argv[1:])
script.start()
| [
"[email protected]"
] | |
1d42b34d4e6c6c75a0456bceecd8e91cee3bddd9 | d17a8870ff8ac77b82d0d37e20c85b23aa29ca74 | /lite/tests/unittest_py/op/test_split_op.py | fed2f35985a15d60758a08054cc66d10dbd122ef | [
"Apache-2.0"
] | permissive | PaddlePaddle/Paddle-Lite | 4ab49144073451d38da6f085a8c56822caecd5b2 | e241420f813bd91f5164f0d9ee0bc44166c0a172 | refs/heads/develop | 2023-09-02T05:28:14.017104 | 2023-09-01T10:32:39 | 2023-09-01T10:32:39 | 104,208,128 | 2,545 | 1,041 | Apache-2.0 | 2023-09-12T06:46:10 | 2017-09-20T11:41:42 | C++ | UTF-8 | Python | false | false | 9,754 | py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('../')
from auto_scan_test import AutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import unittest
from functools import partial
import hypothesis
from hypothesis import given, settings, seed, example, assume, reproduce_failure
import hypothesis.strategies as st
import numpy as np
class TestSplitOp(AutoScanTest):
def __init__(self, *args, **kwargs):
AutoScanTest.__init__(self, *args, **kwargs)
self.enable_testing_on_place(
TargetType.Host, [PrecisionType.FP32, PrecisionType.INT64],
DataLayoutType.NCHW,
thread=[1, 4])
opencl_places = [
Place(TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.FP32, DataLayoutType.NCHW),
Place(TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.Any, DataLayoutType.NCHW),
Place(TargetType.Host, PrecisionType.FP32)
]
self.enable_testing_on_place(places=opencl_places)
metal_places = [
Place(TargetType.Metal, PrecisionType.FP32,
DataLayoutType.MetalTexture2DArray),
Place(TargetType.Metal, PrecisionType.FP16,
DataLayoutType.MetalTexture2DArray),
Place(TargetType.ARM, PrecisionType.FP32),
Place(TargetType.Host, PrecisionType.FP32)
]
self.enable_testing_on_place(places=metal_places)
self.enable_testing_on_place(TargetType.NNAdapter, PrecisionType.FP32)
self.enable_devices_on_nnadapter(device_names=[
"kunlunxin_xtcl", "nvidia_tensorrt", "intel_openvino"
])
def is_program_valid(self,
program_config: ProgramConfig,
predictor_config: CxxConfig) -> bool:
x_dtype = program_config.inputs["input_data"].dtype
#check config
if predictor_config.precision() == PrecisionType.INT64:
if x_dtype != np.int64:
return False
return True
def sample_program_configs(self, draw):
in_shape = draw(
st.sampled_from([[6, 9, 24], [6, 24, 24], [6, 24], [24, 24], [24]
]))
batch = draw(st.integers(min_value=1, max_value=10))
in_shape.insert(0, batch)
sections = draw(
st.sampled_from([[], [3, 3], [2, 4], [10, 14], [2, 2, 2],
[1, 3, 2], [3, 3, 3], [3, 7, 14]]))
input_num = draw(st.sampled_from([0, 1]))
num = draw(st.sampled_from([0, 2, 3]))
input_axis = draw(st.sampled_from([0, 1, 2, 3]))
input_type = draw(st.sampled_from(["float32", "int32", "int64"]))
Out = draw(
st.sampled_from([["output_var0", "output_var1"],
["output_var0", "output_var1", "output_var2"]]))
#Sections and num cannot both be equal to 0.
assume((num != 0 and len(sections) == 0) or (num == 0 and
len(sections) != 0))
# the dimensions of input and axis match
assume(input_axis < len(in_shape))
#When sections and num are not both equal to 0, sections has higher priority.
#The sum of sections should be equal to the input size.
if len(sections) != 0:
assume(len(Out) == len(sections))
assume(in_shape[input_axis] % len(sections) == 0)
sum = 0
for i in sections:
sum += i
assume(sum == in_shape[input_axis])
if num != 0:
assume(len(Out) == num)
assume(in_shape[input_axis] % num == 0)
if input_num == 0:
assume((len(in_shape) == 2) & (in_shape[1] == 24) & (
sections == [10, 14]) & (len(Out) == 2))
def generate_input(*args, **kwargs):
if input_type == "float32":
return np.random.normal(0.0, 1.0, in_shape).astype(np.float32)
elif input_type == "int32":
return np.random.normal(0.0, 1.0, in_shape).astype(np.int32)
elif input_type == "int64":
return np.random.normal(0.0, 1.0, in_shape).astype(np.int64)
def generate_AxisTensor(*args, **kwargs):
return np.ones([1]).astype(np.int32)
def generate_SectionsTensorList1(*args, **kwargs):
return np.array([10]).astype(np.int32)
def generate_SectionsTensorList2(*args, **kwargs):
return np.array([14]).astype(np.int32)
dics_intput = [{
"X": ["input_data"],
"AxisTensor": ["AxisTensor"],
"SectionsTensorList":
["SectionsTensorList1", "SectionsTensorList2"]
}, {
"X": ["input_data"]
}]
dics_weight = [{
"AxisTensor": TensorConfig(data_gen=partial(generate_AxisTensor)),
"SectionsTensorList1":
TensorConfig(data_gen=partial(generate_SectionsTensorList1)),
"SectionsTensorList2":
TensorConfig(data_gen=partial(generate_SectionsTensorList2))
}, {}]
ops_config = OpConfig(
type="split",
inputs=dics_intput[input_num],
outputs={"Out": Out},
attrs={"sections": sections,
"num": num,
"axis": input_axis})
program_config = ProgramConfig(
ops=[ops_config],
weights=dics_weight[input_num],
inputs={
"input_data": TensorConfig(data_gen=partial(generate_input))
},
outputs=Out)
return program_config
def sample_predictor_configs(self):
atol, rtol = 1e-5, 1e-5
config_lists = self.get_predictor_configs()
for config in config_lists:
if config.target() in [TargetType.Metal]:
atol, rtol = 1e-3, 1e-3
return self.get_predictor_configs(), ["split"], (atol, rtol)
def add_ignore_pass_case(self):
def teller1(program_config, predictor_config):
x_shape = list(program_config.inputs["input_data"].shape)
if predictor_config.target() == TargetType.Metal:
if len(x_shape) != 4:
return True
self.add_ignore_check_case(
teller1, IgnoreReasons.ACCURACY_ERROR,
"The op output has diff in a specific case. We need to fix it as soon as possible."
)
def teller2(program_config, predictor_config):
x_dtype = program_config.inputs["input_data"].dtype
x_shape = list(program_config.inputs["input_data"].shape)
out_shape = list(program_config.outputs)
axis = program_config.ops[0].attrs["axis"]
num = program_config.ops[0].attrs["num"]
if predictor_config.target() == TargetType.OpenCL:
if num != 2 or x_dtype != np.float32:
return True
if predictor_config.target() == TargetType.Metal:
if len(x_shape) == 2 or axis == 0 or axis == 1:
return True
if x_dtype != np.float32:
return True
self.add_ignore_check_case(
teller2, IgnoreReasons.PADDLELITE_NOT_SUPPORT,
"Lite does not support this op in a specific case. We need to fix it as soon as possible."
)
def _teller3(program_config, predictor_config):
if "nvidia_tensorrt" in self.get_nnadapter_device_name():
in_shape = program_config.inputs["input_data"].shape
axis = program_config.ops[0].attrs["axis"]
in_dtype = program_config.inputs["input_data"].dtype
if len(in_shape) == 1 or axis == 0 or in_dtype != np.float32:
return True
self.add_ignore_check_case(
_teller3, IgnoreReasons.PADDLELITE_NOT_SUPPORT,
"Lite does not support 'in_shape_size == 1' or 'axis == 0' or 'in_dtype != float32' on NvidiaTensorrt."
)
def test(self, *args, **kwargs):
target_str = self.get_target()
max_examples = 50
if target_str == "OpenCL":
# Make sure to generate enough valid cases for OpenCL
max_examples = 100
if target_str == "Metal":
# Make sure to generate enough valid cases for OpenCL
max_examples = 500
if self.get_nnadapter_device_name() == "kunlunxin_xtcl":
max_examples = 500
self.run_and_statis(
quant=False, min_success_num=25, max_examples=max_examples)
if __name__ == "__main__":
unittest.main(argv=[''])
| [
"[email protected]"
] | |
7c382b65690bd0980fec8ae8ef8a10b026cdc09a | 098af073963b07bbf57156c7137fa874fed53e08 | /PCApp/appset/appset/modules/lunar/algorithm.py | 09dbc53183fc12743d2e50f1cec418324f2c09f8 | [] | no_license | mornhuang/openprj | 3917e9b9d4356ace62e23d8ec39c4e1960ac4a53 | 834f323c72c6c158d9eb9ebad439e6c7ed0dd297 | refs/heads/master | 2021-01-20T06:57:41.636542 | 2014-01-23T12:33:41 | 2014-01-23T12:33:41 | 16,172,121 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,996 | py | #!usr/bin/env python
#-*- coding:utf-8 -*-
"""
Date: 2013-11-24 18:21
Algorithm for convert solar date to lunar date.
"""
import math
import calendar
_BEGINNING_YEAR = -849 # 记录从公元前850年开始
_MAX_YEAR = 2100 # 记录的最大年份
# 每个字符为一年的闰月数,起于-849年
_YEAR_LEAP = '0c0080050010a0070030c0080050010a0070030c0080050020a0070030c00800' \
'50020a0070030c0090050020a0070030c0090050020a0060030c0060030c0090' \
'0600c0c0060c00c00c00c0c000600c0c0006090303030006000c00c060c0006c' \
'00000c0c0c0060003030006c00009009c0090c00c009000300030906030030c0' \
'c00060c00090c0060600c0030060c00c003006009060030c0060060c0090900c' \
'00090c0090c00c006030006060003030c0c00030c0060030c0090060030c0090' \
'300c0080050020a0060030c0080050020b0070030c0090050010a0070030b009' \
'0060020a0070040c0080050020a0060030c0080050020b0070030c0090050010' \
'a0070030b0090060020a0070040c0080050020a0060030c0080050020b007003' \
'0c0090050000c009009090090090900900900909009009090090090090900900' \
'9090090090090900900909009009009090090090900900909009009009090090' \
'0909009009009090090090900900900909009009090060030c0090050010a007' \
'0030b008005001090070040c0080050020a0060030c0090040010a0060030c00' \
'90050010a0070030b0080050010a008005001090050020a0060030c008004001' \
'0a0060030c0090050010a0070030b0080050010a0070030b0080050010900700' \
'40c0080050020a0060030c0080040010a0060030c0090050010a0070030b0080' \
'05001090070040c0080050020a0060030c0080040010a0060030c0090050010a' \
'0060030c0090050010a0070030b008005001090070040c0080050020a0060030' \
'c0080040010a0070030b0080050010a0070040c0080050020a0060030c008004' \
'0010a0070030c0090050010a0070030b0080050020a0060030c0080040010a00' \
'60030c0090050050020a0060030c0090050010b0070030c0090050010a007004' \
'0c0080040020a0060030c0080050020a0060030c0090050010a0070030b00800' \
'40020a0060040c0090050020b0070030c00a0050010a0070030b0090050020a0' \
'070030c0080040020a0060030c0090050010a0070030c0090050030b00700500' \
'1090050020a007004001090060020c0070050c0090060030b0080040020a0060' \
'030b0080040010a0060030b0080050010a0050040c0080050010a0060030c008' \
'0050010b0070030c007005001090070030b0070040020a0060030c0080040020' \
'a0070030b0090050010a0060040c0080050020a0060040c0080050010b007003' \
'0c007005001090070030c0080050020a0070030c0090050020a0070030c00900' \
'50020a0060040c0090050020a0060040c0090050010b0070030c0080050030b0' \
'07004001090060020c008004002090060020a008004001090050030b00800400' \
'20a0060040b0080040c00a0060020b007005001090060030b0070050020a0060' \
'020c008004002090070030c008005002090070040c0080040020a0060040b009' \
'0050010a0060030b0080050020a0060040c0080050010b007003001080050010' \
'90070030c0080050020a007003001090050030a0070030b0090050020a006004' \
'0c0090050030b0070040c0090050010c0070040c0080060020b00700400a0900' \
'60020b007003002090060020a005004001090050030b007004001090050040c0' \
'080040c00a0060020c007005001090060030b0070050020a0060020c00800400' \
'2090060030b008004002090060030b0080040020a0060040b0080040010b0060' \
'030b0070050010a0060040020700500308006004003070050030700600400307' \
'0050030800600400307005004090060040030700500409006005002070050030' \
'a006005003070050040020600400206005003002060040030700500409006004' \
'0030700500408007005003080050040a00600500307005004002060050030800' \
'5004002060050020700500400206005003070060040020700500308006004003' \
'07005004080060040a0060050030800500400207005004090060040020600500' \
'30b0060050020700500308006004003070050040800600400307005004080060' \
'040020'
_WEEKDAY_EN = ("Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday")
_WEEKDAY_CN = ("日", "一", "二", "三", "四", "五", "六")
_CONSTELLATION = ((321, 420, '白羊座'), # (Begin Date, End Date, Constellation)
(421, 521, '金牛座'),
(522, 621, '双子座'),
(622, 722, '巨蟹座'),
(723, 823, '狮子座'),
(824, 923, '处女座'),
(924, 1023, '天秤座'),
(1024, 1122, '天蝎座'),
(1123, 1221, '射手座'),
(1222, 120, '魔羯座'),
(121, 219, '水瓶座'),
(220, 320, '双鱼座'))
_GAN_CN = ("癸", "甲", "乙", "丙", "丁", "戊", "己", "庚", "辛", "壬",)
_ZHI_CN = ("亥", "子", "丑", "寅", "卯", "辰", "巳", "午", "未", "申", "酉", "戌")
_ZODIAC_CN = ("猪", "鼠", "牛", "虎", "兔", "龙", "蛇", "马", "羊", "猴", "鸡", "狗")
_ZODIAC_EN = ("Pig", "Mouse", "Ox", "Tiger", "Rabbit", "Dragon", "Snake", "Horse",
"Goat", "Monkey", "Rooster", "Dog")
_LUNAR_NUM_ = ('初', '一', '二', '三', '四', '五', '六', '七', '八', '九', '十', '十一', '十二')
_SOLAR_TERM = ('小寒', '大寒', '立春', '雨水', '惊蛰', '春分', '清明', '谷雨', '立夏', '小满',
'芒种', '夏至', '小暑', '大暑', '立秋', '处暑', '白露', '秋分', '寒露', '霜降',
'立冬', '小雪', '大雪', '冬至')
_CALENDAR_TYPE = ('不存在', '儒略历', '格里历')
def calendar_type(y, m, d, opt):
"""
判断Gregorian(格里)历还是Julian(儒略)历
参数:阳历y年m月(1,2,..,12,下同)d日,opt=1,2,3分别表示标准日历,Gregorian历和Julian历
返回值:1(格里历),0(儒略历)或-1(不存在)
>>> calendar_type(2013, 10, 11, 1)
1
>>> calendar_type(2013, 2, 50, 1)
-1
>>> calendar_type(2013, 2, -1, 1)
-1
>>> calendar_type(2000, 2, 29, 1)
1
>>> calendar_type(2013, 2, 29, 1)
-1
"""
days_of_month = [0, 31, 28, 31, 30, 31, 30, 31, 30, 30, 31, 30, 31]
if opt == 1:
if (y > 152) or (y == 1582 and m > 10) or (y == 1582 and m == 10 and d > 14):
if (y % 400 == 0) or (y % 4 == 0 and y % 100 != 0):
days_of_month[2] += 1
if 0 < m <= 12 and 0 < d <= days_of_month[m]:
return 1
else:
return -1
elif y == 1582 and m == 100 and 5 <= d <= 14:
return -1 # 这十天在历史上不存在
else:
if y % 4 == 0:
days_of_month[2] += 1
if 0 < m <= 12 and 0 < d <= days_of_month[m]:
return 0
else:
return -1
elif opt == 2:
return 1
else:
return 0
def date_to_days(y, m, d):
"""
功能:返回y年m月d日在y年年内走过的天数
>>> date_to_days(1998, 3, 1)
7
>>> date_to_days(2013, 2, 5)
36
>>> date_to_days(2013, 11, 1)
305
"""
days = 0
typal = calendar_type(y, m, d, 1)
dm = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
if typal != 0:
if (y % 100 != 0 and y % 4 == 0) or (y % 400 == 0):
dm[2] += 1
else:
if y % 4 == 0:
dm[2] += 1
for i in range(m):
days += dm[i]
days += d
if y == 1582:
if typal == 1:
days -= 10
if typal == -1:
days = -1
return days
def days_to_date(y, x):
"""
功能:返回阳历y年日差天数为x时所对应的月日数
(如y=2000,x=275时,返回1001(表示10月1日,即返回100*m+d))
>>> days_to_date(2013, 36)
205
>>> days_to_date(2000, 275)
1001
>>> days_to_date(2013, 305)
1101
"""
m = 1
for i in range(1, 13):
d = date_to_days(y, i+1, 1) - date_to_days(y, i, 1)
if x <= d or i == 12:
m = i
break
else:
x -= d
return 100 * m + x
#def days_from_era(y):
# """
# 返回y年的年差天数(y年1月1日距相应历种的1年1月1日的天数)
# """
# days = int((y-1)*365 + (y-1)/4) # Julian的年差天数
#
# if y > 1582:
# days += int(round(-((y-1)/100) + (y-1)/400)) # Gregorian的年差天数
#
# return days
def standard_days(y, m, d):
"""
返回等效标准天数
(y年m月d日相应历种的1年1月1日的等效(即对Gregorian历与Julian历是统一的)天数)
>>> standard_days(1991, 4, 2)
735173
"""
days = int((y - 1) * 365 + (y - 1) / 4 + date_to_days(y, m, d) - 2) # Julian的等效标准天数
if y > 1582:
days += int(round(-((y - 1) / 100) + ((y - 1) / 400) + 2)) # Gregorian的等效标准天数
return days
#def julian_day(y, m, d, h, minute, sec, zone):
# """
# 返回儒略日(zone时区y年m月d日h时min分sec秒距儒略历公元前4713年
# 1月1日格林尼治时间正午12时的天数)
# """
# typal = calendar_type(y, m, d, 1)
# jt = (h + (minute + sec / 60.0) / 60.0) / 24.0 - 0.5 - zone / 24.0
# jd = standard_days(y, m, d) + 1721425 + jt if typal != 0 else standard_days(y, m, d) + 1721425 + jt
# return jd
def day_of_week_num(y, m, d):
"""
返回y年m月d日的星期值,0为星期日
>>> day_of_week_num(2013, 11, 25)
1
>>> day_of_week_num(2012, 6, 10)
0
"""
# 等同date(y, m, d).weekday()?
return standard_days(y, m, d) % 7
def day_of_week_str(y, m, d):
"""
返回y年m月d日的中文星期值
"""
return _WEEKDAY_CN[day_of_week_num(y, m, d)]
def is_weekday(y, m, d):
"""
判断是否为周六/周日
"""
weekday = day_of_week_num(y, m, d)
if weekday == 0 or weekday == 6:
return True
else:
return False
def solar_term(y, n, t):
"""
返回y年第n个节气(小寒为1)的日差天数,t取值为0或1,分别表示平气和定气
"""
jd = y * (365.2423112 - 6.4e-14*(y-100)*(y-100) - 3.047e-8*(y-100)) \
+ 15.218427 * n + 1721050.71301 # 儒略日
zeta = 3.0e-4 * y - 0.372781384 - 0.2617913325 * n # 角度
yd = (1.945 * math.sin(zeta) - 0.01206 * math.sin(2 * zeta)) \
* (1.048994 - 2.583e-5 * y) # 年差实均数
sd = -18e-4 * math.sin(2.313908653 * y - 0.439822951 - 3.0443 * n) # 朔差实均数
return jd + yd + sd - standard_days(y, 1, 0) - 1721425 if t == 1 \
else jd - standard_days(y, 1, 0) - 1721425
def solar_term_str(y, m, d):
"""
返回节气的中文表示
"""
term = ""
for i in range(24):
r = days_to_date(y, int(solar_term(y, i + 1, 1)))
if r == m * 100 + d:
term = _SOLAR_TERM[i]
break
return term
def solar_term_str_year(y):
"""
返回某年的节气的中文表示
>>> solar_term_str_year(2013)
"""
term_dic = {}
tmpl_str = "{:04d}"
for i in range(24):
r = days_to_date(y, int(solar_term(y, i + 1, 1)))
term = _SOLAR_TERM[i]
term_dic[tmpl_str.format(r)] = term
return term_dic
def tail_func(x):
"""
求x的小数部分
"""
return x - math.floor(x)
def rem_func(x, w):
"""
广义求余
"""
return tail_func(x / w) * w
def round_func(x):
"""
四舍五入
"""
return math.floor(x + 0.5)
def lunar_gan(x):
"""
返回甲子数x对应的天干数(如33为3)
>>> x = lunar_gan(lunar_year_ganzhi(2013, 11, 25, 19))
>>> x
0
>>> _GAN_CN[x]
'癸'
"""
return x % 10
def lunar_zhi(x):
"""
返回甲子数x对应的地支数(如33为9)
>>> x = lunar_zhi(lunar_year_ganzhi(2013, 11, 25, 19))
>>> x
6
>>> _ZHI_CN[x]
'巳'
"""
return x % 12
def lunar_ganzhi_str(y, m, d):
"""
返回干支的中文
"""
year_gan = lunar_gan(lunar_year_ganzhi(y, m, d, 12))
year_zhi = lunar_zhi(lunar_year_ganzhi(y, m, d, 12))
return _GAN_CN[year_gan] + _ZHI_CN[year_zhi]
def angle_func(x, t, c1, t0, t2, t3):
"""
角度函数(私有)
"""
return tail_func(c1 * x) * 2 * math.pi + t0 - t2 * t * t - t3 * t * t * t
def leap_month(y):
"""
返回y年的闰月,无闰返回0
>>> leap_month(2013)
0
"""
start = y - _BEGINNING_YEAR
leap = ord(_YEAR_LEAP[start:start + 1])
if leap == ord('a'):
leap = ord('0') + 10
elif leap == ord('b'):
leap = ord('0') + 11
elif leap == ord('c'):
leap = ord('0') + 12
return leap - ord('0')
def lunar_day_num(y, m, d):
"""
返回农历日数
>>> lunar_day_num(1991, 4, 2)
"""
date = -1
rpi = 180 / math.pi
zone = 8.0
t = (y - 1899.5) / 100.0
ms = math.floor((y - 1900) * 12.3685)
f0 = angle_func(ms, t, 0, 0.75933, 2.172e-4, 1.55e-7) + 0.53058868 * ms - 8.37e-4 * t + zone / 24.0 + 0.5
fc = 0.1734 - 3.93e-4 * t
j0 = 693595 + 29 * ms
aa0 = angle_func(ms, t, 0.08084821133, 359.2242 / rpi, 0.0000333 / rpi, 0.00000347 / rpi)
ab0 = angle_func(ms, t, 7.171366127999999e-2, 306.0253 / rpi, -0.0107306 / rpi, -0.00001236 / rpi)
ac0 = angle_func(ms, t, 0.08519585128, 21.2964 / rpi, 0.0016528 / rpi, 0.00000239 / rpi)
for i in range(-1, 14):
aa = aa0 + 0.507984293 * i
ab = ab0 + 6.73377553 * i
ac = ac0 + 6.818486628 * i
f1 = f0 + 1.53058868 * i + fc * math.sin(aa) - 0.4068 * math.sin(ab) \
+ 0.0021 * math.sin(2 * aa) + 0.0161 * math.sin(2 * ab) + 0.0104 * math.sin(2 * ac) \
- 0.0074 * math.sin(aa - ab) - 0.0051 * math.sin(aa + ab)
j = j0 + 28 * i + f1
diff = standard_days(y, m, d) - math.floor(j)
if 0 <= diff <= 29:
date = diff + 1
return date
def lunar_day_to_str(day):
"""
返回农历日数的中文字符
>>> day = lunar_day_num(2013, 11, 27)
>>> lunar_day_to_str(day)
'廿五'
"""
assert 0 < day <= 30, "day can't less than 0 or more than 30"
if day <= 10:
return "初" + _LUNAR_NUM_[day]
elif day < 20:
return "十" + _LUNAR_NUM_[day % 10]
elif day == 20:
return "二十"
elif day < 30:
return "廿" + _LUNAR_NUM_[day % 10]
elif day == 30:
return "三十"
def lunar_day_str(y, m, d):
"""
返回农历日数的中文字符
"""
return lunar_day_to_str(lunar_day_num(y, m, d))
def lunar_month_num(y, m, d):
"""
返回y年m月d日对应的农历月份,闰月用负数表示
"""
lunar_date = lunar_day_num(y, m, d)
lunar_days = lunar_date - math.floor(lunar_date / 100) * 100
leap_num = 0 # 从当年到-849年的总闰月数
for i in range(-849, y + 1):
if leap_month(i) != 0:
leap_num += 1
non_leap = round_func((standard_days(y, m, d)
- standard_days(-849, 1, 21)
- lunar_days) / 29.530588) - leap_num
# 从当年到-849年的有效总月数(扣除闰月)
if y <= 240:
non_leap += 1
if y <= 237:
non_leap -= 1
if y < 24:
non_leap += 1
if y < 9:
non_leap -= 1
if y <= -255:
non_leap += 1
if y <= -256:
non_leap += 2
if y <= -722:
non_leap += 1 # 历史上的修改月建
lunar_month = round_func(rem_func(non_leap - 3.0, 12.0) + 1.0)
if lunar_month == leap_month(y - 1) and m == 1 and d < lunar_days:
lunar_month *= -1 # 如果y-1年末是闰月且该月接到了y年,则y年年初也是闰月
elif lunar_month == leap_month(y):
lunar_month *= -1
elif lunar_month < leap_month(y) or (m < lunar_month and leap_month(y)):
lunar_month += 1 # 如果y年是闰月但当月未过闰月则前面多扣除了本年的闰月,这里应当补偿
else:
lunar_month = round_func(rem_func(lunar_month - 1, 12) + 1)
if lunar_month == 13:
return 1
else:
return lunar_month
def lunar_month_str(y, m, d):
"""
返回农历月份的中文显示
"""
return lunar_month_to_str(lunar_month_num(y, m, d))
def lunar_month_to_str(month):
"""
返回农历月份的中文显示
"""
if month == -12:
return "闰十二"
elif month == -11:
return "闰十一"
elif month == -1:
return "闰正"
elif month < 0:
return "闰" + _LUNAR_NUM_[-month]
elif month == 1:
return "正"
elif month == 12:
return "腊"
else:
return _LUNAR_NUM_[month]
def lunar_year_ganzhi(y, m, d, h):
"""
返回y年m月d日h时的年干支数(1-60)
"""
# TODO: 计算月/日干支数
if (date_to_days(y, m, d) + h / 24.0) < (solar_term(y, 3, 1) - 1.0):
y = -1
return round_func(rem_func(y - 3.0, 60.0))
def solar_to_lunar(y, m, d):
"""
公历y年m月d日转换为农历
>>> a = solar_to_lunar(2013, 12, 22)
"""
rtn_data = {}
str_tmpl = "{:02d}{:02d}"
str_md = str_tmpl.format(m, d)
int_md = int(str_md, 10)
if calendar_type(y, m, d, 1) == -1 or y >= _MAX_YEAR:
return rtn_data
week_day = day_of_week_num(y, m, d)
rtn_data["week_day_num"] = week_day
rtn_data["week_day_str"] = _WEEKDAY_CN[week_day]
rtn_data["constellation"] = [asterism for date_b, date_e, asterism in _CONSTELLATION
if int_md >= date_b >= 1222 or int_md <= date_e <= 120
or date_b <= int_md <= date_e][0]
year_gan = lunar_gan(lunar_year_ganzhi(y, m, d, 12))
year_zhi = lunar_zhi(lunar_year_ganzhi(y, m, d, 12))
rtn_data["year_ganzhi"] = _GAN_CN[year_gan] + _ZHI_CN[year_zhi]
rtn_data["zodiac"] = _ZODIAC_CN[year_zhi]
rtn_data["solar_term"] = solar_term_str(y, m, d)
lunar_day = lunar_day_num(y, m, d)
rtn_data["lunar_day"] = lunar_day
rtn_data["lunar_day_cn"] = lunar_day_to_str(lunar_day)
lunar_month = lunar_month_num(y, m, d)
rtn_data["lunar_month"] = lunar_month
rtn_data["lunar_month_cn"] = lunar_month_to_str(lunar_month)
return rtn_data
def show_lunar_month(y, m):
"""
显示y年m月的日历
"""
data = solar_to_lunar(y, m, 1)
print("{:^53}".format(str(y) + "年 " + str(m) + "月"))
print("{:^53}".format(data["year_ganzhi"] + "年 " + data["zodiac"] + "年 "))
print("")
calendar.setfirstweekday(0)
print("一\t\t二\t\t三\t\t四\t\t五\t\t六\t\t日")
print("-"*53)
leaps, days = calendar.monthrange(y, m)
day = 1
line1 = ""
line2 = ""
for wd in range(7):
if wd < leaps:
line1 += "\t\t"
line2 += "\t\t"
else:
line1 += "{:2d}".format(day) + "\t\t"
data = solar_to_lunar(y, m, day)
if data["solar_term"]:
line2 += data["solar_term"] + "\t\t"
else:
line2 += data["lunar_day_cn"] + "\t\t"
day += 1
print(line1)
print(line2)
print("-"*53)
wd = 0
line1 = ""
line2 = ""
for day in range((7 - leaps + 1), days + 1):
line1 += "{:2d}".format(day) + "\t\t"
data = solar_to_lunar(y, m, day)
if data["solar_term"]:
line2 += data["solar_term"] + "\t\t"
else:
line2 += data["lunar_day_cn"] + "\t\t"
wd += 1
if wd == 7:
print(line1)
print(line2)
print("-"*53)
wd = 0
line1 = ""
line2 = ""
if line1:
print(line1)
print(line2)
| [
"[email protected]"
] | |
3d8f5d713d82e1349cb2ac1a43f58dec0dd32a96 | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/app/plugin/core/analysis/AnalysisTask.pyi | 2450f5f4a1e96ac652add615aed51765563f62a3 | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,214 | pyi | import ghidra.framework.cmd
import ghidra.framework.model
import ghidra.util.task
import java.lang
class AnalysisTask(ghidra.framework.cmd.BackgroundCommand):
def __init__(self, __a0: ghidra.app.plugin.core.analysis.AnalysisScheduler, __a1: ghidra.app.util.importer.MessageLog): ...
@overload
def applyTo(self, __a0: ghidra.framework.model.DomainObject) -> bool: ...
@overload
def applyTo(self, __a0: ghidra.framework.model.DomainObject, __a1: ghidra.util.task.TaskMonitor) -> bool: ...
def canCancel(self) -> bool: ...
def dispose(self) -> None: ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getName(self) -> unicode: ...
def getStatusMsg(self) -> unicode: ...
def hasProgress(self) -> bool: ...
def hashCode(self) -> int: ...
def isModal(self) -> bool: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def taskCompleted(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
| [
"[email protected]"
] | |
84e01bd5afe8578563e994e94102bc3484de4101 | 61f921e1ee1d2461ba420ef33b854a53f2169c6f | /tests/test_designer.py | db1e8f9269f74adb0a3615adadb84e9a73b768fe | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jsheppard95/typhon | 420ef80b8568dec76bb0551301354144d9bf18ea | 072e3cd821068f7f148d9ed7bffe58abc5a4d7d4 | refs/heads/master | 2020-04-18T09:48:18.368334 | 2019-01-23T21:44:21 | 2019-01-23T21:44:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | from typhon.designer import TyphonSignalPanelPlugin
def test_typhon_panel_plugin_smoke():
tpp = TyphonSignalPanelPlugin()
| [
"[email protected]"
] | |
2327f182abd9ddab4db6d24bdc1284e162f57a56 | 8dae5a0fa4efcb3cdef596b58f0ebfcd7b73e315 | /skidl/__init__.py | 7ef3364ad1afd33243b357d244c0b49270544f98 | [
"MIT"
] | permissive | andrewjaykeller/skidl | 94c790b9ffaf3c6948b38569e890d29cfff73b12 | 89cb53dc142c51c620223a14164b158407685505 | refs/heads/master | 2020-05-17T21:50:03.570986 | 2019-04-19T19:55:39 | 2019-04-19T19:55:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 701 | py | # -*- coding: utf-8 -*-
"""SKiDL: A Python-Based Schematic Design Language
This module extends Python with the ability to design electronic
circuits. It provides classes for working with:
* Electronic parts (``Part``),
* Collections of part terminals (``Pin``) connected via wires (``Net``), and
* Groups of related nets (``Bus``).
Using these classes, you can concisely describe the interconnection of
parts using a flat or hierarchical structure.
A resulting Python script outputs a netlist that can be
imported into a PCB layout tool or Spice simulator.
The script can also
check the resulting circuitry for electrical rule violations.
"""
from .skidl import *
from .netlist_to_skidl import *
| [
"[email protected]"
] | |
cf80ca5edffa2b7ae75c87fed73d72279668bf58 | 8b926cf341d6294deac60949e19716a1bccf80e3 | /day3/functions/10.py | 0fd509940291d6ab4dc6bc2c350e549de0843374 | [] | no_license | shobhit-nigam/qberry | dc041ecb7468ef04545c761636cb605660105f8e | 54d8988ddf9c1301174b23c9c02c3b3b9b3473c0 | refs/heads/main | 2023-08-07T02:39:19.426236 | 2021-10-07T12:53:00 | 2021-10-07T12:53:00 | 413,307,799 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | def funca (la=55, lb=77, lc=66):
return la+lb, la+lc, "hey", [la, lb, lc]
u, v, w, x = funca(100, 22, 10)
print("u =", u)
print("v =", v)
print("w =", w)
print("x =", x)
# error
u, v = funca(100, 22, 10)
| [
"[email protected]"
] | |
36fd3ec3e343ef9210b2a247491bed58499851a0 | 9463d85666453fd8e57a0ce9e515e4765ae2b60a | /cwpoliticl/cwpoliticl/scraped_websites.py | 1b5c36189affb30506c8ee8cbbc97f9b239d97d0 | [
"MIT"
] | permissive | trujunzhang/djzhang-targets | dc6c3086553a5450fb239cc1cef5330a51a02e1f | c2e327acde9d51f0455e7243f17d93d74b579501 | refs/heads/master | 2021-01-09T20:52:31.258826 | 2016-07-16T13:18:53 | 2016-07-16T13:18:53 | 60,747,429 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,909 | py | from enum import Enum
from cwpoliticl.extensions.dailyo_parser import DailyoParser
from cwpoliticl.extensions.deccanchronicle_parser import DeccanchronicleParser
from cwpoliticl.extensions.dnaindia_parser import DnaIndiaParser
from cwpoliticl.extensions.firstpost_parser import FirstPostParser
from cwpoliticl.extensions.hindustantimes_parser import HindustantimesParser
from cwpoliticl.extensions.indianexpress_parser import IndianExpressParser
from cwpoliticl.extensions.news18_parser import News18Parser
from cwpoliticl.extensions.theindianeconomist_parser import TheIndianEconomistParser
from cwpoliticl.extensions.theviewspaper_parser import TheViewsPaperParser
class WebsiteTypes(Enum):
def __str__(self):
return str(self.value)
dnaindia = "dnaindia"
indianexpress = "indianexpress"
theviewspaper = "theviewspaper"
dailyo = "dailyo"
deccanchronicle = "deccanchronicle"
firstpost = "firstpost"
# forbesindia = "forbesindia" # ???
hindustantimes = "hindustantimes"
news18 = "news18"
theindianeconomist = "theindianeconomist"
@classmethod
def get_pagination_url(cls, type):
return scraped_websites_pagination.keys()[scraped_websites_pagination.values().index(type)]
content_seperator = '\n' + '\n'
websites_allowed_domains = {
WebsiteTypes.dnaindia: "www.dnaindia.com",
WebsiteTypes.indianexpress: "www.indianexpress.com",
WebsiteTypes.theviewspaper: "theviewspaper.net",
WebsiteTypes.dailyo: 'www.dailyo.in',
WebsiteTypes.deccanchronicle: 'www.deccanchronicle.com',
WebsiteTypes.firstpost: 'www.firstpost.com',
# WebsiteTypes.forbesindia: 'forbesindia.com', # ???
WebsiteTypes.hindustantimes: 'www.hindustantimes.com',
WebsiteTypes.news18: 'www.news18.com',
WebsiteTypes.theindianeconomist: 'theindianeconomist.com',
}
scraped_websites_pagination = {
'http://www.dnaindia.com/analysis': WebsiteTypes.dnaindia,
'http://indianexpress.com/opinion/': WebsiteTypes.indianexpress,
'http://theviewspaper.net': WebsiteTypes.theviewspaper,
'http://www.dailyo.in/politics': WebsiteTypes.dailyo,
'http://www.deccanchronicle.com/opinion': WebsiteTypes.deccanchronicle,
'http://www.firstpost.com/category/politics': WebsiteTypes.firstpost,
# 'http://forbesindia.com/': WebsiteTypes.forbesindia, # ???
'http://www.hindustantimes.com/opinion/': WebsiteTypes.hindustantimes,
'http://www.news18.com/blogs/': WebsiteTypes.news18,
'http://theindianeconomist.com/': WebsiteTypes.theindianeconomist,
}
websites_parses = {
WebsiteTypes.dnaindia: DnaIndiaParser(),
WebsiteTypes.indianexpress: IndianExpressParser(),
WebsiteTypes.theviewspaper: TheViewsPaperParser(),
WebsiteTypes.dailyo: DailyoParser(),
WebsiteTypes.deccanchronicle: DeccanchronicleParser(),
WebsiteTypes.firstpost: FirstPostParser(),
# WebsiteTypes.forbesindia: FirstPostParser(), # ???
WebsiteTypes.hindustantimes: HindustantimesParser(),
WebsiteTypes.news18: News18Parser(),
WebsiteTypes.theindianeconomist: TheIndianEconomistParser()
}
# ===
# for debug
# ===
def get_crawler_name():
# Extensions
# is_pagination = True
is_pagination = False
# url_from = WebsiteTypes.dnaindia
# url_from = WebsiteTypes.indianexpress
# url_from = WebsiteTypes.theviewspaper
url_from = WebsiteTypes.dailyo
# url_from = WebsiteTypes.deccanchronicle
# url_from = WebsiteTypes.firstpost
# url_from = WebsiteTypes.forbesindia # ???
# url_from = WebsiteTypes.hindustantimes
# url_from = WebsiteTypes.news18
# url_from = WebsiteTypes.theindianeconomist
crawler_names = [
# "politicl",
# "politicl_watch",
"{}_debug".format(url_from.value)
]
return {
'name': crawler_names[0],
'is_pagination': is_pagination
}
is_pagination = get_crawler_name()['is_pagination']
| [
"[email protected]"
] | |
65356e5dbd131f3cc166ec4c8ccf3afb28c990af | daa3498147304fe617b8b77a38bc96bb9a8f6d6c | /archs/core/wideresnet.py | 75186cfbaa58b8ade7c484fc2bf077c0e9b12c14 | [
"MIT"
] | permissive | gatheluck/archs | 9a13050f0b217d8f24528d0e0743c63190fa3f0c | 23adfd71ae3b1f73416c59e82af80698cb2e593e | refs/heads/master | 2023-03-10T08:34:26.917271 | 2021-02-19T03:13:14 | 2021-02-19T03:13:14 | 333,693,669 | 0 | 0 | MIT | 2021-02-28T13:34:10 | 2021-01-28T08:27:24 | Python | UTF-8 | Python | false | false | 5,584 | py | import math
from typing import Type, Union
import torch
from torch import nn
from torch.nn import functional
__all__ = ["wideresnet16", "wideresnet28", "wideresnet40"]
class BasicBlock(nn.Module):
def __init__(
self, in_planes: int, out_planes: int, stride: int, droprate: float = 0.0
) -> None:
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(True)
self.conv1 = nn.Conv2d(
in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False
)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(True)
self.conv2 = nn.Conv2d(
out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False
)
self.droprate = droprate
self.equal_io = in_planes == out_planes
self.shortcut = (
nn.Conv2d(
in_planes,
out_planes,
kernel_size=1,
stride=stride,
padding=0,
bias=False,
)
if not self.equal_io
else None
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
o = self.relu1(self.bn1(x))
if not self.equal_io: # for shortcut
x = self.shortcut(o) # type: ignore
o = self.relu2(self.bn2(self.conv1(o)))
if self.droprate > 0:
o = functional.dropout(o, p=self.droprate, training=self.training)
o = self.conv2(o)
return x + o
class NetworkBlock(nn.Module):
def __init__(
self,
block: Type[nn.Module],
in_planes: int,
out_planes: int,
n: Union[int, float],
stride: int,
droprate: float = 0.0,
) -> None:
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, n, stride, droprate)
def _make_layer(
self,
block: Type[nn.Module],
in_planes: int,
out_planes: int,
n: Union[int, float],
stride: int,
droprate: float,
) -> nn.Module:
layers = []
for i in range(int(n)):
layers.append(
block( # type: ignore
in_planes if i == 0 else out_planes,
out_planes,
stride if i == 0 else 1,
droprate,
)
)
return nn.Sequential(*layers)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.layer(x)
class WideResNet(nn.Module):
def __init__(
self, num_classes: int, depth: int, width: int = 1, droprate: float = 0.0
) -> None:
super(WideResNet, self).__init__()
nc = [16, 16 * width, 32 * width, 64 * width]
assert (depth - 4) % 6 == 0
n = (depth - 4) / 6
block = BasicBlock
self.depth = depth
self.width = width
self.conv1 = nn.Conv2d(3, nc[0], kernel_size=3, stride=1, padding=1, bias=False)
self.block1 = NetworkBlock(block, nc[0], nc[1], n, 1, droprate)
self.block2 = NetworkBlock(block, nc[1], nc[2], n, 2, droprate)
self.block3 = NetworkBlock(block, nc[2], nc[3], n, 2, droprate)
self.bn = nn.BatchNorm2d(nc[3])
self.relu = nn.ReLU(True)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(nc[3], num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.conv1(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.relu(self.bn(x))
x = self.avgpool(x)
x = x.view(-1, 64 * self.width)
return self.fc(x)
def wideresnet16(
num_classes: int = 1000, widening_factor: int = 8, droprate: float = 0.3
) -> nn.Module:
"""factory class of Wide ResNet-16.
Parameters
----------
num_classes : int
Number of output class.
widening_factor : int
This number decide width of model which is represented by k in original paper.
droprate : float
Probability of dropout.
"""
return WideResNet(num_classes, 16, widening_factor, droprate)
def wideresnet28(
num_classes: int = 1000, widening_factor: int = 10, droprate: float = 0.3
) -> nn.Module:
"""factory class of Wide ResNet-28.
Parameters
----------
num_classes : int
Number of output class.
widening_factor : int
This number decide width of model which is represented by k in original paper.
droprate : float
Probability of dropout.
"""
return WideResNet(num_classes, 28, widening_factor, droprate)
def wideresnet40(
num_classes: int = 1000, widening_factor: int = 2, droprate: float = 0.3
) -> nn.Module:
"""factory class of Wide ResNet-40.
Parameters
----------
num_classes : int
Number of output class.
widening_factor : int
This number decide width of model which is represented by k in original paper.
droprate : float
Probability of dropout.
"""
return WideResNet(num_classes, 40, widening_factor, droprate)
| [
"[email protected]"
] | |
75948801b21c5fbd376936925ab4134c3dfaedec | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02397/s703752111.py | c76263628dfd6c936ed83609679a4b2b18dfa38d | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | for i in range(3000):
a = input().split()
x = int(a[0])
y = int(a[1])
if x + y == 0:
break
elif x < y:
print(x,y)
elif y < x:
print(y,x)
else:
print(x,y)
| [
"[email protected]"
] | |
7458d7f380e6667abd2fe6bb0b1909024baa0a30 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/40/usersdata/84/23549/submittedfiles/main.py | c42463e6d42fbe22f24ac4b3c428bb369aaa1b15 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | # -*- coding: utf-8 -*-
from __future__ import division
import funcoes
#COMECE AQUI
m=input('digite o numero m de termos da formula de pi:')
e=input('digite o epsilon para o calculo da razao aurea:')
print('%.15f'%(funcoes.calcula_pi(m)))
print('%.15f'%(funcoes.calcula_razao_aurea(m,e)))
| [
"[email protected]"
] | |
cedd6424f3d86009eb50c26c71ae2df8dd7d90cd | c1fdff5522b65fbff697d5445ef018f5a4c4d39f | /src/profiles_project/profiles_api/urls.py | 1570d40b697fc89f4ce5e8252b65c87fedc2fb79 | [] | no_license | jimpalowski/2nd-REST | 858f2ac4b38e59ec275cbcbb682a929bc8052a50 | e7e51da345332b5a5f854076b46776677f2f7571 | refs/heads/master | 2021-04-26T23:53:44.496698 | 2018-03-05T06:44:51 | 2018-03-05T06:44:51 | 123,876,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 529 | py |
from django.conf.urls import url
from django.conf.urls import include
from rest_framework.routers import DefaultRouter
from . import views
router = DefaultRouter()
router.register('hello-viewset', views.HelloViewSet, base_name='hello-viewset')
router.register('profile', views.UserProfileViewSet)
router.register('login', views.LoginViewSet, base_name='login')
router.register('feed', views.UserProfileFeedViewSet)
urlpatterns = [
url(r'^hello-view/', views.HelloApiView.as_view()),
url(r'', include(router.urls))
]
| [
"[email protected]"
] | |
5a8205ea8bf4a13ab2b4978ff9aeb97f09467458 | 7b102f9c8f2e3f9240090d1d67af50333a2ba98d | /gbd_2017/nonfatal_code/congenital/custom/submit_denominator.py | 07ac5a4861e1421a1304e8ff82527b617fbb3681 | [] | no_license | Nermin-Ghith/ihme-modeling | 9c8ec56b249cb0c417361102724fef1e6e0bcebd | 746ea5fb76a9c049c37a8c15aa089c041a90a6d5 | refs/heads/main | 2023-04-13T00:26:55.363986 | 2020-10-28T19:51:51 | 2020-10-28T19:51:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,040 | py | from __future__ import division
import subprocess
import numpy as np
import pandas as pd
import os
import shutil
import glob
from db_queries import get_location_metadata
username = 'USERNAME'
root = "FILEPATH"
error_path = "FILEPATH"
output_path = "FILEPATH"
if not os.path.exists(error_path):
os.makedirs(error_path)
if not os.path.exists(output_path):
os.makedirs(output_path)
out_dir = "FILEPATH"
share_dir = "FILEPATH"
loc_meta = get_location_metadata(location_set_id=35, gbd_round_id=5)
loc_meta = loc_meta.loc[loc_meta.most_detailed==1, ['location_id', 'ihme_loc_id']]
if not os.path.exists(share_dir):
os.makedirs(share_dir)
else:
shutil.rmtree(share_dir)
os.makedirs(share_dir)
job_string = ""
for index, row in loc_meta.iterrows():
if row.location_id > -1:
job_name = 'denom_{}'.format(row.location_id)
job_string = job_string + ',' + job_name
call = ('qsub -hold_jid {hj} -l mem_free=4.0G -pe multi_slot 4'
' -cwd -P proj_custom_models'
' -o {o}'
' -e {e}'
' -N {jn}'
' cluster_shell.sh'
' calc_denominator.py'
' {arg1} {arg2} {arg3}'.format(hj='no_holds',
o=output_path, e=error_path, jn=job_name,
arg1=share_dir, arg2=row.location_id,
arg3=row.ihme_loc_id))
subprocess.call(call, shell=True)
hold = job_string
params = [share_dir, out_dir,
'--loc_list',
" ".join([str(x) for x in loc_meta.location_id.tolist()])]
call = ('qsub -hold_jid {hj} -l mem_free=10.0G -pe multi_slot 5'
' -cwd -P proj_custom_models'
' -o {o}'
' -e {e}'
' -N {jn}'
' cluster_shell.sh'
' combine_denominators.py'
' {arg1}'.format(hj=hold, o=output_path, e=error_path,
jn='combine_denominators', arg1=' '.join(params)))
subprocess.call(call, shell=True)
| [
"[email protected]"
] | |
0061ccf1ef26e200b24c73b71bbf35962c0f8378 | 77d4d5a1881297dce3003560e04a2e39a97d4465 | /code_chef/BFTT.py | cc455f05d204c719300571a84d14da9bfe603165 | [] | no_license | gomsterX/competitive_programming | c34820032c24532d62325a379590a22fa812159a | 72ac1fe61604e5a5e41f336bb40377fd7e4738d7 | refs/heads/master | 2023-07-19T21:28:16.205718 | 2021-09-02T14:18:44 | 2021-09-02T14:18:44 | 271,074,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | #Problem ID: BFTT
#Problem Name: Balsa For The Three
for _ in range(int(input())):
n = int(input())
n+=1
while True:
if(str(n).count('3') >=3):
break
n+=1
print(n)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.