blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
edebe47b4cb17e60d34d08b33c6aff333614da02 | 105f8bb5f417248b2c56fec113746472cea94f5d | /slides/08_expressivity_optimization_generalization/images/make_relu_composition_figures.py | 5f44f0cb8d4a7b2711e798edd969717a4a5c6ed6 | [
"MIT",
"CC-BY-4.0"
]
| permissive | m2dsupsdlclass/lectures-labs | 659ce7d8f7a9eb94e776f16a10d0d1df3f037365 | a41bdfde52081eaa615d86c46fceeae1c4b1d0cd | refs/heads/master | 2023-06-01T20:30:17.669627 | 2022-12-07T11:20:05 | 2022-12-07T11:20:05 | 82,718,394 | 1,482 | 674 | MIT | 2022-03-10T21:34:29 | 2017-02-21T19:27:20 | Jupyter Notebook | UTF-8 | Python | false | false | 698 | py | import matplotlib.pyplot as plt
import numpy as np
import os
# Generate the figures in the same folder
os.chdir(os.path.dirname(__file__))
def relu(x):
return np.maximum(x, 0)
def tri(x):
return relu(relu(2 * x) - relu(4 * x - 2))
x = np.linspace(-.3, 1.3, 1000)
plt.figure()
plt.ylim(-0.1, 1.1)
plt.plot(x, tri(x))
plt.savefig('triangle_x.svg')
plt.figure()
plt.ylim(-0.1, 1.1)
plt.plot(x, tri(tri(x)))
plt.savefig('triangle_triangle_x.svg')
plt.figure()
plt.ylim(-0.1, 1.1)
plt.plot(x, tri(tri(tri(x))))
plt.savefig('triangle_triangle_triangle_x.svg')
plt.figure()
plt.ylim(-0.1, 1.1)
plt.plot(x, tri(tri(tri(tri(x)))))
plt.savefig('triangle_triangle_triangle_triangle_x.svg')
| [
"[email protected]"
]
| |
1ce7266e6731baf04cc90d054a413101bd13b9d3 | 727f1bc2205c88577b419cf0036c029b8c6f7766 | /out-bin/py/google/fhir/models/model_test.runfiles/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/contrib/gan/python/estimator/python/__init__.py | 4155546440516e74ce44604130f538caef5b8abe | [
"Apache-2.0"
]
| permissive | rasalt/fhir | 55cf78feed3596a3101b86f9e9bbf6652c6ed4ad | d49883cc4d4986e11ca66058d5a327691e6e048a | refs/heads/master | 2020-04-13T00:16:54.050913 | 2019-01-15T14:22:15 | 2019-01-15T14:22:15 | 160,260,223 | 0 | 0 | Apache-2.0 | 2018-12-03T22:07:01 | 2018-12-03T22:07:01 | null | UTF-8 | Python | false | false | 199 | py | /home/rkharwar/.cache/bazel/_bazel_rkharwar/c4bcd65252c8f8250f091ba96375f9a5/external/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/contrib/gan/python/estimator/python/__init__.py | [
"[email protected]"
]
| |
db1ef123f5567a57b504517e803bcc932fdfd182 | a5b5c48cec36047b3db3dc29a1f1a311ac4b54fa | /tests/test_names.py | fb8e92f41ff9029b67eaf2cf58b2ffedd4311449 | [
"MIT"
]
| permissive | Josef-Friedrich/dyndns | feea95e94bdc08aa6b45e0163d012fceffe910c2 | a24e45d22ebff45eac04230992402d63ea289c8b | refs/heads/main | 2023-08-05T12:43:59.254785 | 2023-07-26T06:53:45 | 2023-07-26T06:53:45 | 140,106,325 | 8 | 3 | MIT | 2023-07-26T06:53:46 | 2018-07-07T17:58:43 | Python | UTF-8 | Python | false | false | 4,681 | py | import unittest
from _helper import zones
from dyndns.exceptions import NamesError
from dyndns.names import Names, Zone, Zones, validate_hostname, validate_tsig_key
class TestFunctionValidateHostname(unittest.TestCase):
def assertRaisesMsg(self, hostname, msg):
with self.assertRaises(NamesError) as cm:
validate_hostname(hostname)
self.assertEqual(str(cm.exception), msg)
def test_valid(self):
self.assertEqual(
validate_hostname("www.example.com"),
"www.example.com.",
)
def test_invalid_tld(self):
self.assertRaisesMsg(
"www.example.777",
'The TLD "777" of the hostname "www.example.777" must be not '
"all-numeric.",
)
def test_invalid_to_long(self):
self.assertRaisesMsg(
"a" * 300,
'The hostname "aaaaaaaaaa..." is longer than 253 characters.',
)
def test_invalid_characters(self):
self.assertRaisesMsg(
"www.exämple.com",
'The label "exämple" of the hostname "www.exämple.com" is ' "invalid.",
)
class TestFunctionValidateTsigKey(unittest.TestCase):
def assertRaisesMsg(self, tsig_key, msg):
with self.assertRaises(NamesError) as cm:
validate_tsig_key(tsig_key)
self.assertEqual(str(cm.exception), msg)
def test_valid(self):
self.assertEqual(validate_tsig_key("tPyvZA=="), "tPyvZA==")
def test_invalid_empty(self):
self.assertRaisesMsg("", 'Invalid tsig key: "".')
def test_invalid_string(self):
self.assertRaisesMsg("xxx", 'Invalid tsig key: "xxx".')
class TestClassZone(unittest.TestCase):
def test_init(self):
zone = Zone("example.com", "tPyvZA==")
self.assertEqual(zone.zone_name, "example.com.")
self.assertEqual(zone.tsig_key, "tPyvZA==")
def test_method_split_fqdn(self):
zone = Zone("example.com", "tPyvZA==")
record_name, zone_name = zone.split_fqdn("www.example.com")
self.assertEqual(record_name, "www.")
self.assertEqual(zone_name, "example.com.")
def test_method_build_fqdn(self):
zone = Zone("example.com", "tPyvZA==")
fqdn = zone.build_fqdn("www")
self.assertEqual(fqdn, "www.example.com.")
class TestClassZones(unittest.TestCase):
def test_init(self):
zone = zones.zones["example.org."]
self.assertEqual(zone.zone_name, "example.org.")
self.assertEqual(zone.tsig_key, "tPyvZA==")
def test_method_get_zone_by_name(self):
zone = zones.get_zone_by_name("example.org")
self.assertEqual(zone.zone_name, "example.org.")
self.assertEqual(zone.tsig_key, "tPyvZA==")
def test_method_get_zone_by_name_raises(self):
with self.assertRaises(NamesError) as cm:
zones.get_zone_by_name("lol.org")
self.assertEqual(str(cm.exception), 'Unkown zone "lol.org.".')
class TestClassZonesMethodSplitNames(unittest.TestCase):
def test_with_dot(self):
result = zones.split_fqdn("www.example.com")
self.assertEqual(result, ("www.", "example.com."))
def test_with_org(self):
result = zones.split_fqdn("www.example.org")
self.assertEqual(result, ("www.", "example.org."))
def test_unkown_zone(self):
result = zones.split_fqdn("www.xx.org")
self.assertEqual(result, False)
def test_subzones(self):
zones = Zones(
[
{"name": "example.com.", "tsig_key": "tPyvZA=="},
{"name": "dyndns.example.com", "tsig_key": "tPyvZA=="},
]
)
result = zones.split_fqdn("lol.dyndns.example.com")
self.assertEqual(result, ("lol.", "dyndns.example.com."))
class TestClassNames(unittest.TestCase):
def setUp(self):
self.names = Names(zones=zones, fqdn="www.example.com")
def test_attribute_fqdn(self):
self.assertEqual(self.names.fqdn, "www.example.com.")
def test_attribute_zone_name(self):
self.assertEqual(self.names.zone_name, "example.com.")
def test_attribute_record_name(self):
self.assertEqual(self.names.record_name, "www.")
def test_attribute_tsig_key(self):
self.assertEqual(self.names.tsig_key, "tPyvZA==")
# class TestClassNamesRaises(unittest.TestCase):
#
# def assertRaisesMsg(self, kwargs, msg):
# with self.assertRaises(JfErr) as cm:
# Names(zones, **kwargs)
# self.assertEqual(str(cm.exception), msg)
#
# def test_no_kwargs(self):
# self.assertRaisesMsg({'record_name', 'lol'}, '')
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
]
| |
b72eb40ddf04009f6bc497d9343288127410c544 | 75cf6a9fd035883b64ca2309382e0178cf370b43 | /Empirical/python/sklearn/linear_model/plot_iris_logistic.py | 924f1346cf00691abcd213789bd478f944b4bf65 | []
| no_license | ygtfrdes/Program | 171b95b9f32a105185a7bf8ec6c8c1ca9d1eda9d | 1c1e30230f0df50733b160ca73510c41d777edb9 | refs/heads/master | 2022-10-08T13:13:17.861152 | 2019-11-06T04:53:27 | 2019-11-06T04:53:27 | 219,560,170 | 1 | 2 | null | 2022-09-30T19:51:17 | 2019-11-04T17:39:52 | HTML | UTF-8 | Python | false | false | 1,822 | py | #!/usr/bin/python
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
first two dimensions (sepal length and width) of the `iris
<https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The datapoints
are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
logreg = LogisticRegression(C=1e5, solver='lbfgs', multi_class='multinomial')
# Create an instance of Logistic Regression Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
h = .02 # step size in the mesh
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
| [
"[email protected]"
]
| |
a8636ce25a63341b2bda20acac4e2f0e1b086b68 | e70a17e8a37847a961f19b136f3bbe74393fa2af | /RPI/build/image_view/catkin_generated/generate_cached_setup.py | 2fe0a0bf295843f90693461632443faef230492a | [
"MIT"
]
| permissive | Mondiegus/ROS-4x4-CAR-AI | 1413ead6f46a8b16005abeea3e0b215caa45f27e | 124efe39168ce96eec13d57e644f4ddb6dfe2364 | refs/heads/Master | 2023-07-14T23:56:53.519082 | 2021-03-27T17:28:45 | 2021-03-27T17:28:45 | 334,233,839 | 0 | 0 | MIT | 2021-02-02T13:00:30 | 2021-01-29T18:46:16 | Makefile | UTF-8 | Python | false | false | 1,303 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/noetic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/noetic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in '/home/pi/catkin_ws/devel;/opt/ros/noetic'.split(';'):
python_path = os.path.join(workspace, 'lib/python3/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/pi/catkin_ws/devel/.private/image_view/env.sh')
output_filename = '/home/pi/catkin_ws/build/image_view/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
# print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"[email protected]"
]
| |
3fb16f147771a0639471c589fa84a073965aed3d | fe3265b72e691c6df8ecd936c25b6d48ac33b59a | /homeassistant/components/devolo_home_network/entity.py | a26d8dce8f6fb23968697733741b152ad3eec79e | [
"Apache-2.0"
]
| permissive | bdraco/home-assistant | dcaf76c0967783a08eec30ce704e5e9603a2f0ca | bfa315be51371a1b63e04342a0b275a57ae148bd | refs/heads/dev | 2023-08-16T10:39:15.479821 | 2023-02-21T22:38:50 | 2023-02-21T22:38:50 | 218,684,806 | 13 | 7 | Apache-2.0 | 2023-02-21T23:40:57 | 2019-10-31T04:33:09 | Python | UTF-8 | Python | false | false | 1,595 | py | """Generic platform."""
from __future__ import annotations
from typing import TypeVar
from devolo_plc_api.device import Device
from devolo_plc_api.device_api import (
ConnectedStationInfo,
NeighborAPInfo,
WifiGuestAccessGet,
)
from devolo_plc_api.plcnet_api import LogicalNetwork
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
from .const import DOMAIN
_DataT = TypeVar(
"_DataT",
bound=(
LogicalNetwork
| list[ConnectedStationInfo]
| list[NeighborAPInfo]
| WifiGuestAccessGet
| bool
),
)
class DevoloEntity(CoordinatorEntity[DataUpdateCoordinator[_DataT]]):
"""Representation of a devolo home network device."""
_attr_has_entity_name = True
def __init__(
self,
entry: ConfigEntry,
coordinator: DataUpdateCoordinator[_DataT],
device: Device,
) -> None:
"""Initialize a devolo home network device."""
super().__init__(coordinator)
self.device = device
self.entry = entry
self._attr_device_info = DeviceInfo(
configuration_url=f"http://{device.ip}",
identifiers={(DOMAIN, str(device.serial_number))},
manufacturer="devolo",
model=device.product,
name=entry.title,
sw_version=device.firmware_version,
)
self._attr_unique_id = f"{device.serial_number}_{self.entity_description.key}"
| [
"[email protected]"
]
| |
bd105c34478ed631078af845e9908abee311a4d0 | c2281d55883a51b2698119e3aeb843df9c8c885b | /Thesis ch 2/ClusteringBuckets/GenericModels/LogisticRegression/LogisticRegressionClassifier.py | a525596ace3707e35cafd39f7b5bbb875dd205da | []
| no_license | akshitasawhney3008/Thesis-Final | 1c004ffc6c2dd6ec711b212f9a35e46ea067c9c7 | 10865bab16bcc2ca4a5d4af345ffb4f2f7222104 | refs/heads/master | 2023-02-01T20:56:43.763024 | 2020-12-10T09:28:45 | 2020-12-10T09:28:45 | 320,037,411 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,871 | py | import pickle
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import precision_score, recall_score, roc_auc_score, accuracy_score, matthews_corrcoef
import pandas as pd
# Configuration section
iter = 5
cvCount = 8
seed = 42
thresholdRange = np.linspace(start=0.40, stop=0.60, num=500)
# Load list of best parameters from Random Search
with open('ListOfBestParamsRS.pkl', 'rb') as f:
best_params = pickle.load(f)
def getPredictionsGivenThreshold(myMatrix, th):
myList = []
for i in range(myMatrix.shape[0]):
p1 = myMatrix[i, 1]
if p1 >= th:
myList.append(1)
else:
myList.append(0)
return np.asarray(myList)
path = "C://Users//Arushi//PycharmProjects//ThesisChap2//fixedBuckets(10)//"
thresholdList = []
precisionList = []
recallList = []
aucList = []
accuracyList = []
mcList = []
for threshold in thresholdRange:
print(threshold)
overallPrecision = 0
overallRecall = 0
overallAuauc = 0
overallAccuracy = 0
overallMc = 0
for i in range(iter):
X_train = np.load(path + 'final_train_binarydata_' + str(i) + '.npy').astype(float)
Y_train = np.load(path + 'final_train_labels_' + str(i) + '.npy').astype(float).astype(int)
X_test = np.load(path + 'final_test_binarydata_' + str(i) + '.npy').astype(float)
Y_test = np.load(path + 'final_test_labels_' + str(i) + '.npy').astype(float).astype(int)
bp = best_params[i]
clf = LogisticRegression(penalty=bp['penalty'], C=bp['C'],
solver=bp['solver'], max_iter=bp['max_iter'],
random_state=seed).fit(X_train, Y_train.ravel())
predictionsProb = clf.predict_proba(X_test)
np.savetxt('pp_lr' + str(i) + '.csv', predictionsProb, delimiter=',')
predictions = getPredictionsGivenThreshold(predictionsProb, threshold)
precision = precision_score(Y_test, predictions)
recall = recall_score(Y_test, predictions)
auroc = roc_auc_score(Y_test, predictionsProb[:, 1])
accuracy = accuracy_score(Y_test, predictions)
matthewsCoeff = matthews_corrcoef(Y_test, predictions)
overallPrecision += precision
overallRecall += recall
overallAuauc += auroc
overallAccuracy +=accuracy
overallMc += matthewsCoeff
thresholdList.append(threshold)
precisionList.append(overallPrecision / iter)
recallList.append(overallRecall / iter)
aucList.append(overallAuauc / iter)
accuracyList.append(overallAccuracy / iter)
mcList.append(overallMc / iter)
df = pd.DataFrame()
df['Threshold'] = thresholdList
df['Precision'] = precisionList
df['Recall'] = recallList
df['AUROC'] = aucList
df['Accuracy'] = accuracyList
df['MC'] = mcList
df.to_csv('Thresholding.csv', index=False)
print('Done') | [
"[email protected]"
]
| |
622a51454f770302f59c66c674d1b292207dda51 | 7aec3f10b07403b542e1c14a30a6e00bb479c3fe | /Codewars/7 kyu/Categorize New Member.py | 55f0cbec80222ea4e7d8b24e7e53967b8d50837a | []
| no_license | VictorMinsky/Algorithmic-Tasks | a5871749b377767176ba82308a6a0962e1b3e400 | 03a35b0541fe413eca68f7b5521eaa35d0e611eb | refs/heads/master | 2020-08-02T23:18:06.876712 | 2020-01-16T19:08:49 | 2020-01-16T19:08:49 | 211,541,179 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,158 | py | """
The Western Suburbs Croquet Club has two categories of membership, Senior and Open. They would like your help with an application form that will tell prospective members which category they will be placed.
To be a senior, a member must be at least 55 years old and have a handicap greater than 7. In this croquet club, handicaps range from -2 to +26; the better the player the lower the handicap.
Input
Input will consist of a list of lists containing two items each. Each list contains information for a single potential member. Information consists of an integer for the person's age and an integer for the person's handicap.
Note for F#: The input will be of (int list list) which is a List<List>
Example Input
[[18, 20],[45, 2],[61, 12],[37, 6],[21, 21],[78, 9]]
Output
Output will consist of a list of string values (in Haskell: Open or Senior) stating whether the respective member is to be placed in the senior or open category.
Example Output
["Open", "Open", "Senior", "Open", "Open", "Senior"]
"""
def openOrSenior(data):
ans = []
for i in data:
ans.append('Open' if i[0] < 55 or i[1] <= 7 else 'Senior')
return ans
| [
"[email protected]"
]
| |
16cffc2b0d201eed1a0e413126a6c2b2f86965ef | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_393/ch16_2020_09_30_19_47_48_293495.py | f585b8d42d65293ca4ae98c3609e87aa3d1db3f0 | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | x= float(input("valor da conta?"))
print("Valor da conta com 10%: R$ {0:.2f}".format(x*1.1)) | [
"[email protected]"
]
| |
4bd113348d198afab8b9cbb961f32431345be6c9 | 90ab150948d5bf8431548a99ce6230b59f2cdf3a | /invoice/migrations/0002_auto_20200619_1055.py | 5909905d230aaec1c5defb77743e0d61a19b4988 | []
| no_license | fadhlimulyana20/halo_bisnis_api | a180768086211d36ea3253fa7db3968818ac97b5 | 20dd00973c1def65b3290f1a640fa0218381e2f8 | refs/heads/master | 2022-11-22T03:57:27.691220 | 2020-07-11T10:24:16 | 2020-07-11T10:24:16 | 278,766,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,011 | py | # Generated by Django 3.0.7 on 2020-06-19 03:55
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('invoice', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='InvoiceProduct',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='invoiceporductitem',
name='invoice',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='invoice.InvoiceProduct'),
),
]
| [
"[email protected]"
]
| |
c4a373608ae84b88523bef5559abaef2eae5e1af | 336431aae640a29c22b723d4889e3a90bd62abb1 | /tests/demoproject/demo/sample/admin.py | 1df03211880776a0367394d714a88a32c7db733f | [
"Apache-2.0"
]
| permissive | csabadenes/unicef-snapshot | ef1a864e9f558d0fb18af5ea8d2a641192946592 | 2dcf6a8e15ff75566c168297f0f4194627bcb083 | refs/heads/master | 2020-04-25T18:02:30.771620 | 2018-08-08T16:36:09 | 2018-08-08T16:36:09 | 172,970,780 | 0 | 0 | null | 2019-02-27T18:48:55 | 2019-02-27T18:48:54 | null | UTF-8 | Python | false | false | 288 | py | from demo.sample.models import Author
from django.contrib import admin
from unicef_snapshot.admin import ActivityInline, SnapshotModelAdmin
class AuthorAdmin(SnapshotModelAdmin):
list_display = ('name', )
inlines = (ActivityInline, )
admin.site.register(Author, AuthorAdmin)
| [
"[email protected]"
]
| |
cee68a32de153c1ce0e9544ae443df9f22c119b9 | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /ec2_write_1/launch-template-version_delete.py | 3d243ad877badd7d21340ef9b6563f7511c32dd3 | []
| no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,169 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_one_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/delete-launch-template-versions.html
if __name__ == '__main__':
"""
create-launch-template-version : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/create-launch-template-version.html
describe-launch-template-versions : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-launch-template-versions.html
"""
parameter_display_string = """
# versions : The version numbers of one or more launch template versions to delete.
(string)
"""
add_option_dict = {}
#######################################################################
# parameter display string
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_one_parameter("ec2", "delete-launch-template-versions", "versions", add_option_dict)
| [
"[email protected]"
]
| |
dc91348fc701b6f59294d0f264888ff96417929c | 6733716dcdcacfcc739ae5c4af976db81ead852b | /ROOT/Project/soomin/N1_Twitter_basic_plot/TWITTER_TXT_generator_week3.py | 3b32adf857274629e8765734434c3acd6469adc2 | []
| no_license | StudyGroupPKU/fruit_team | 45202a058d59057081670db97b9229ee720fa77e | 9f9f673f5ce22ce6d25736871f3d7a5bd232c29d | refs/heads/master | 2021-01-24T08:15:37.909327 | 2018-05-11T08:53:06 | 2018-05-11T08:53:06 | 122,975,404 | 0 | 5 | null | 2018-04-05T02:37:14 | 2018-02-26T13:41:24 | Python | UTF-8 | Python | false | false | 16,171 | py | ####### This code is for calulating total words, total positive words, total negative words, tweet number :: txt file
import os
def read_file_name(filename): # returning [filename, filename.root, absolute path filename, absolute path filename without root file]
f = open(filename,"r")
if(filename[0] == '/'): # 'filename' of absoulte location
filename = filename
elif(filename[0] == '~'):
filename = filename.replace("~",os.environ['HOME'])
else:
filename = os.getcwd() + "/" + filename
loca = len(filename)
for i in range(1,len(filename)+1): # find the "/" location
if(filename[-i] == '/'):
loca = i-1
break
FILENAME = filename.replace(filename[:-loca],"")
FILE = FILENAME.replace(".txt","")
filename_NoRoot = filename.replace(filename[len(filename)-loca:len(filename)],"")
filelist = [FILE, FILENAME, filename, filename_NoRoot]
# print(filelist)
return(filelist)
def readLines(filename):
f = open(filename,'r')
T,P,N =0,0,0
List = []
it = 0
for line in f: ####### can not transfer value???!?!?!?!?
try:
# if(it==0):
# it = 1
# continue
lis = []
TOTAL, pos, neg = line.split()
lis.append(int(TOTAL))
lis.append(int(pos))
lis.append(int(neg))
List.append(lis)
except:
ValueError
pass
# print("a time value error")
# print(filename)
for ii in range(len(List)):
T = T + List[ii][0]
P = P + List[ii][1]
N = N + List[ii][2]
LL = list()
LL = [T,P,N,len(List)]
f.close()
return LL
#print(T,P,N)
def main():
list_Filename =list()
# list_Filename=["newyork/beer_newyork/beer_0402Mon_NY.txt","newyork/beer_newyork/beer_0403Tue_NY.txt","newyork/beer_newyork/beer_0404Wed_NY.txt","newyork/beer_newyork/beer_0405Thu_NY.txt","newyork/beer_newyork/beer_0406Fri_NY.txt","newyork/beer_newyork/beer_0407Sat_NY.txt","newyork/beer_newyork/beer_0408Sun_NY.txt",
#"newyork/coffee_newyork/coffee_0402Mon_NY.txt","newyork/coffee_newyork/coffee_0403Tue_NY.txt","newyork/coffee_newyork/coffee_0404Wed_NY.txt","newyork/coffee_newyork/coffee_0405Thu_NY.txt","newyork/coffee_newyork/coffee_0406Fri_NY.txt","newyork/coffee_newyork/coffee_0407Sat_NY.txt","newyork/coffee_newyork/coffee_0408Sun_NY.txt",
#"newyork/tea_newyork/tea_0402Mon_NY.txt","newyork/tea_newyork/tea_0403Tue_NY.txt","newyork/tea_newyork/tea_0404Wed_NY.txt","newyork/tea_newyork/tea_0405Thu_NY.txt","newyork/tea_newyork/tea_0406Fri_NY.txt","newyork/tea_newyork/tea_0407Sat_NY.txt","newyork/tea_newyork/tea_0408Sun_NY.txt",
#"newyork/juice_newyork/juice_0402Mon_NY.txt","newyork/juice_newyork/juice_0403Tue_NY.txt","newyork/juice_newyork/juice_0404Wed_NY.txt","newyork/juice_newyork/juice_0405Thu_NY.txt","newyork/juice_newyork/juice_0406Fri_NY.txt","newyork/juice_newyork/juice_0407Sat_NY.txt","newyork/juice_newyork/juice_0408Sun_NY.txt",
#"newyork/coke_n_cola_newyork/COLA_COKE_0402Mon_NY.txt","newyork/coke_n_cola_newyork/COLA_COKE_0403Tue_NY.txt","newyork/coke_n_cola_newyork/COLA_COKE_0404Wed_NY.txt","newyork/coke_n_cola_newyork/COLA_COKE_0405Thu_NY.txt","newyork/coke_n_cola_newyork/COLA_COKE_0406Fri_NY.txt","newyork/coke_n_cola_newyork/COLA_COKE_0407Sat_NY.txt","newyork/coke_n_cola_newyork/COLA_COKE_0408Sun_NY.txt",
#"newyork/water_newyork/water_0402Mon_NY.txt","newyork/water_newyork/water_0403Tue_NY.txt","newyork/water_newyork/water_0404Wed_NY.txt","newyork/water_newyork/water_0405Thu_NY.txt","newyork/water_newyork/water_0406Fri_NY.txt","newyork/water_newyork/water_0407Sat_NY.txt","newyork/water_newyork/water_0408Sun_NY.txt",
#"newyork/wine_newyork/wine_0402Mon_NY.txt","newyork/wine_newyork/wine_0403Tue_NY.txt","newyork/wine_newyork/wine_0404Wed_NY.txt","newyork/wine_newyork/wine_0405Thu_NY.txt","newyork/wine_newyork/wine_0406Fri_NY.txt","newyork/wine_newyork/wine_0407Sat_NY.txt","newyork/wine_newyork/wine_0408Sun_NY.txt"
#]
list_Filename = ["LA/beer_LA/beer_0402Mon_LA.txt","LA/beer_LA/beer_0403Tue_LA.txt","LA/beer_LA/beer_0404Wed_LA.txt","LA/beer_LA/beer_0405Thu_LA.txt","LA/beer_LA/beer_0406Fri_LA.txt","LA/beer_LA/beer_0407Sat_LA.txt","LA/beer_LA/beer_0408Sun_LA.txt",
"LA/coffee_LA/coffee_0402Mon_LA.txt","LA/coffee_LA/coffee_0403Tue_LA.txt","LA/coffee_LA/coffee_0404Wed_LA.txt","LA/coffee_LA/coffee_0405Thu_LA.txt","LA/coffee_LA/coffee_0406Fri_LA.txt","LA/coffee_LA/coffee_0407Sat_LA.txt","LA/coffee_LA/coffee_0408Sun_LA.txt",
"LA/coke_n_cola_LA/COLA_COKE_0402Mon_LA.txt","LA/coke_n_cola_LA/COLA_COKE_0403Tue_LA.txt","LA/coke_n_cola_LA/COLA_COKE_0404Wed_LA.txt","LA/coke_n_cola_LA/COLA_COKE_0405Thu_LA.txt","LA/coke_n_cola_LA/COLA_COKE_0406Fri_LA.txt","LA/coke_n_cola_LA/COLA_COKE_0407Sat_LA.txt","LA/coke_n_cola_LA/COLA_COKE_0408Sun_LA.txt",
"LA/juice_LA/juice_0402Mon_LA.txt","LA/juice_LA/juice_0403Tue_LA.txt","LA/juice_LA/juice_0404Wed_LA.txt","LA/juice_LA/juice_0405Thu_LA.txt","LA/juice_LA/juice_0406Fri_LA.txt","LA/juice_LA/juice_0407Sat_LA.txt","LA/juice_LA/juice_0408Sun_LA.txt",
"LA/tea_LA/tea_0402Mon_LA.txt","LA/tea_LA/tea_0403Tue_LA.txt","LA/tea_LA/tea_0404Wed_LA.txt","LA/tea_LA/tea_0405Thu_LA.txt","LA/tea_LA/tea_0406Fri_LA.txt","LA/tea_LA/tea_0407Sat_LA.txt","LA/tea_LA/tea_0408Sun_LA.txt",
"LA/water_LA/water_0402Mon_LA.txt","LA/water_LA/water_0403Tue_LA.txt","LA/water_LA/water_0404Wed_LA.txt","LA/water_LA/water_0405Thu_LA.txt","LA/water_LA/water_0406Fri_LA.txt","LA/water_LA/water_0407Sat_LA.txt","LA/water_LA/water_0408Sun_LA.txt",
"LA/wine_LA/wine_0402Mon_LA.txt","LA/wine_LA/wine_0403Tue_LA.txt","LA/wine_LA/wine_0404Wed_LA.txt","LA/wine_LA/wine_0405Thu_LA.txt","LA/wine_LA/wine_0406Fri_LA.txt","LA/wine_LA/wine_0407Sat_LA.txt","LA/wine_LA/wine_0408Sun_LA.txt"
]
# list_Filename = ["LA/beer_LA/beer_0326Mon_LA.txt","LA/beer_LA/beer_0327Tue_LA.txt","LA/beer_LA/beer_0328Wed_LA.txt","LA/beer_LA/beer_0329Thu_LA.txt","LA/beer_LA/beer_0330Fri_LA.txt","LA/beer_LA/beer_0331Sat_LA.txt","LA/beer_LA/beer_0401Sun_LA.txt",
#"LA/coffee_LA/coffee_0326Mon_LA.txt","LA/coffee_LA/coffee_0327Tue_LA.txt","LA/coffee_LA/coffee_0328Wed_LA.txt","LA/coffee_LA/coffee_0329Thu_LA.txt","LA/coffee_LA/coffee_0330Fri_LA.txt","LA/coffee_LA/coffee_0331Sat_LA.txt","LA/coffee_LA/coffee_0401Sun_LA.txt",
#"LA/coke_n_cola_LA/COLA_COKE_0327Mon_LA.txt","LA/coke_n_cola_LA/COLA_COKE_0328Tue_LA.txt","LA/coke_n_cola_LA/COLA_COKE_0328Wed_LA.txt","LA/coke_n_cola_LA/COLA_COKE_0329Thu_LA.txt","LA/coke_n_cola_LA/COLA_COKE_0330Fri_LA.txt","LA/coke_n_cola_LA/COLA_COKE_0331Sat_LA.txt","LA/coke_n_cola_LA/COLA_COKE_0401Sun_LA.txt",
#"LA/juice_LA/juice_0326Mon_LA.txt","LA/juice_LA/juice_0327Tue_LA.txt","LA/juice_LA/juice_0328Wed_LA.txt","LA/juice_LA/juice_0329Thu_LA.txt","LA/juice_LA/juice_0330Fri_LA.txt","LA/juice_LA/juice_0331Sat_LA.txt","LA/juice_LA/juice_0401Sun_LA.txt",
#"LA/tea_LA/tea_0326Mon_LA.txt","LA/tea_LA/tea_0327Tue_LA.txt","LA/tea_LA/tea_0328Wed_LA.txt","LA/tea_LA/tea_0329Thu_LA.txt","LA/tea_LA/tea_0330Fri_LA.txt","LA/tea_LA/tea_0331Sat_LA.txt","LA/tea_LA/tea_0401Sun_LA.txt",
#"LA/water_LA/water_0326Mon_LA.txt","LA/water_LA/water_0327Tue_LA.txt","LA/water_LA/water_0328Wed_LA.txt","LA/water_LA/water_0329Thu_LA.txt","LA/water_LA/water_0330Fri_LA.txt","LA/water_LA/water_0331Sat_LA.txt","LA/water_LA/water_0401Sun_LA.txt",
#"LA/wine_LA/wine_0326Mon_LA.txt","LA/wine_LA/wine_0327Tue_LA.txt","LA/wine_LA/wine_0328Wed_LA.txt","LA/wine_LA/wine_0329Thu_LA.txt","LA/wine_LA/wine_0330Fri_LA.txt","LA/wine_LA/wine_0331Sat_LA.txt","LA/wine_LA/wine_0401Sun_LA.txt"
#]
# list_Filename=["newyork/beer_newyork/beer_0326Mon_newyork.txt","newyork/beer_newyork/beer_0327Tue_newyork.txt","newyork/beer_newyork/beer_0328Wed_NY.txt","newyork/beer_newyork/beer_0329Thu_NY.txt","newyork/beer_newyork/beer_0330Fri_NY.txt","newyork/beer_newyork/beer_0331Sat_NY.txt","newyork/beer_newyork/beer_0401Sun_NY.txt",
#"newyork/coffee_newyork/coffee_0326Mon_newyork.txt","newyork/coffee_newyork/coffee_0327Tue_newyork.txt","newyork/coffee_newyork/coffee_0328Wed_NY.txt","newyork/coffee_newyork/coffee_0329Thu_NY.txt","newyork/coffee_newyork/coffee_0330Fri_NY.txt","newyork/coffee_newyork/coffee_0331Sat_NY.txt","newyork/coffee_newyork/coffee_0401Sun_NY.txt",
#"newyork/tea_newyork/tea_0326Mon_newyork.txt","newyork/tea_newyork/tea_0327Thu_newyork.txt","newyork/tea_newyork/tea_0328Wed_NY.txt","newyork/tea_newyork/tea_0329Thu_NY.txt","newyork/tea_newyork/tea_0330Fri_NY.txt","newyork/tea_newyork/tea_0331Sat_NY.txt","newyork/tea_newyork/tea_0401Sun_NY.txt",
#"newyork/juice_newyork/juice_0326Mon_newyork.txt","newyork/juice_newyork/juice_0327Tue_newyork.txt","newyork/juice_newyork/juice_0328Wed_NY.txt","newyork/juice_newyork/juice_0329Thu_NY.txt","newyork/juice_newyork/juice_0330Fri_NY.txt","newyork/juice_newyork/juice_0331Sat_NY.txt","newyork/juice_newyork/juice_0401Sun_NY.txt",
#"newyork/coke_n_cola_newyork/ COLA_COKE_0326Mon_NY.txt","newyork/coke_n_cola_newyork/COLA_COKE_0327Tue_NY.txt","newyork/coke_n_cola_newyork/COLA_COKE_0328Wed_NY.txt","newyork/coke_n_cola_newyork/COLA_COKE_0329Thu_NY.txt","newyork/coke_n_cola_newyork/COLA_COKE_0330Fri_NY.txt","newyork/coke_n_cola_newyork/COLA_COKE_0331Sat_NY.txt","newyork/coke_n_cola_newyork/COLA_COKE_0401Sun_NY.txt",
#"newyork/water_newyork/water_0326Mon_newyork.txt","newyork/water_newyork/water_0327Tue_newyork.txt","newyork/water_newyork/water_0328Wed_NY.txt","newyork/water_newyork/water_0329Thu_NY.txt","newyork/water_newyork/water_0330Fri_NY.txt","newyork/water_newyork/water_0331Sat_NY.txt","newyork/water_newyork/water_0401Sun_NY.txt",
#"newyork/wine_newyork/wine_0326Mon_newyork.txt","newyork/wine_newyork/wine_0327Tue_newyork.txt","newyork/wine_newyork/wine_0328Wed_NY.txt","newyork/wine_newyork/wine_0329Thu_NY.txt","newyork/wine_newyork/wine_0330Fri_NY.txt","newyork/wine_newyork/wine_0331Sat_NY.txt","newyork/wine_newyork/wine_0401Sun_NY.txt"
#]
# list_Filename = ["LA/beer_LA/beer_0319Mon_LA.txt","LA/beer_LA/beer_0320Tue_LA.txt","LA/beer_LA/beer_0321Wed_LA.txt","LA/beer_LA/beer_0322Thu_LA.txt","LA/beer_LA/beer_0323Fri_LA.txt","LA/beer_LA/beer_0324Sat_LA.txt","LA/beer_LA/beer_0325Sun_LA.txt","LA/beer_LA/beer_0326Mon_LA.txt","LA/beer_LA/beer_0327Tue_LA.txt",
#"LA/coffee_LA/coffee_0319Mon_LA.txt" ,"LA/coffee_LA/coffee_0320Tue_LA.txt" ,"LA/coffee_LA/coffee_0321Wed_LA.txt" ,"LA/coffee_LA/coffee_0322Thu_LA.txt" ,"LA/coffee_LA/coffee_0323Fri_LA.txt" ,"LA/coffee_LA/coffee_0324Sat_LA.txt" ,"LA/coffee_LA/coffee_0325Sun_LA.txt" ,"LA/coffee_LA/coffee_0326Mon_LA.txt" ,"LA/coffee_LA/coffee_0327Tue_LA.txt" ,
#"LA/coke_n_cola_LA/COLA_COKE_0319Mon_LA.txt" ,"LA/coke_n_cola_LA/COLA_COKE_0320Tue_LA.txt" ,"LA/coke_n_cola_LA/COLA_COKE_0321Wed_LA.txt" ,"LA/coke_n_cola_LA/COLA_COKE_0322Thu_LA.txt" ,"LA/coke_n_cola_LA/COLA_COKE_0323Fri_LA.txt" ,"LA/coke_n_cola_LA/COLA_COKE_0324Sat_LA.txt" ,"LA/coke_n_cola_LA/COLA_COKE_0325Sun_LA.txt" ,"LA/coke_n_cola_LA/COLA_COKE_0326Mon_LA.txt" ,"LA/coke_n_cola_LA/COLA_COKE_0327Tue_LA.txt" ,
#"LA/juice_LA/juice_0319Mon_LA.txt", "LA/juice_LA/juice_0320Tue_LA.txt" ,"LA/juice_LA/juice_0321Wed_LA.txt" ,"LA/juice_LA/juice_0322Thu_LA.txt" ,"LA/juice_LA/juice_0323Fri_LA.txt" ,"LA/juice_LA/juice_0324Sat_LA.txt" ,"LA/juice_LA/juice_0325Sun_LA.txt" ,"LA/juice_LA/juice_0326Mon_LA.txt" ,"LA/juice_LA/juice_0327Tue_LA.txt",
#"LA/tea_LA/tea_0319Mon_LA.txt", "LA/tea_LA/tea_0320Tue_LA.txt", "LA/tea_LA/tea_0321Wed_LA.txt", "LA/tea_LA/tea_0322Thu_LA.txt", "LA/tea_LA/tea_0323Fri_LA.txt", "LA/tea_LA/tea_0324Sat_LA.txt", "LA/tea_LA/tea_0325Sun_LA.txt", "LA/tea_LA/tea_0326Mon_LA.txt", "LA/tea_LA/tea_0327Tue_LA.txt",
#"LA/water_LA/water_0319Mon_LA.txt", "LA/water_LA/water_0320Tue_LA.txt", "LA/water_LA/water_0321Wed_LA.txt", "LA/water_LA/water_0322Thu_LA.txt", "LA/water_LA/water_0323Fri_LA.txt", "LA/water_LA/water_0324Sat_LA.txt", "LA/water_LA/water_0325Sun_LA.txt", "LA/water_LA/water_0326Mon_LA.txt", "LA/water_LA/water_0327Tue_LA.txt",
#"LA/wine_LA/wine_0319Mon_LA.txt", "LA/wine_LA/wine_0320Tue_LA.txt", "LA/wine_LA/wine_0321Wed_LA.txt","LA/wine_LA/wine_0322Thu_LA.txt","LA/wine_LA/wine_0323Fri_LA.txt","LA/wine_LA/wine_0324Sat_LA.txt","LA/wine_LA/wine_0325Sun_LA.txt","LA/wine_LA/wine_0326Mon_LA.txt","LA/wine_LA/wine_0327Tue_LA.txt"]
# list_Filename = ["newyork/beer_newyork/beer_0319Mon_newyork.txt", "newyork/beer_newyork/beer_0320Tue_newyork.txt", "newyork/beer_newyork/beer_0321Wed_newyork.txt", "newyork/beer_newyork/beer_0322Thu_newyork.txt", "newyork/beer_newyork/beer_0323Fri_newyork.txt", "newyork/beer_newyork/beer_0324Sat_newyork.txt", "newyork/beer_newyork/beer_0325Sun_newyork.txt", "newyork/beer_newyork/beer_0326Mon_newyork.txt", "newyork/beer_newyork/beer_0327Tue_newyork.txt",
#"newyork/coffee_newyork/coffee_0319Mon_newyork.txt", "newyork/coffee_newyork/coffee_0320Thu_newyork.txt", "newyork/coffee_newyork/coffee_0321Wed_newyork.txt", "newyork/coffee_newyork/coffee_0322Thu_newyork.txt", "newyork/coffee_newyork/coffee_0323Fri_newyork.txt", "newyork/coffee_newyork/coffee_0324Sat_newyork.txt", "newyork/coffee_newyork/coffee_0325Sun_newyork.txt", "newyork/coffee_newyork/coffee_0326Mon_newyork.txt", "newyork/coffee_newyork/coffee_0327Tue_newyork.txt",
# "newyork/coke_n_cola_newyork/COLA_COKE_0319Mon_newyork.txt", "newyork/coke_n_cola_newyork/COLA_COKE_0320Tue_newyork.txt", "newyork/coke_n_cola_newyork/COLA_COKE_0321Wed_newyork.txt", "newyork/coke_n_cola_newyork/COLA_COKE_0322Thu_newyork.txt", "newyork/coke_n_cola_newyork/COLA_COKE_0323Fri_newyork.txt", "newyork/coke_n_cola_newyork/COLA_COKE_0324Sat_newyork.txt", "newyork/coke_n_cola_newyork/COLA_COKE_0325Sun_newyork.txt", "newyork/coke_n_cola_newyork/COLA_COKE_0326Mon_newyork.txt", "newyork/coke_n_cola_newyork/COLA_COKE_0327Tue_newyork.txt",
# "newyork/juice_newyork/juice_0319Mon_newyork.txt", "newyork/juice_newyork/juice_0320Tue_newyork.txt", "newyork/juice_newyork/juice_0321Wed_newyork.txt", "newyork/juice_newyork/juice_0322Thu_newyork.txt", "newyork/juice_newyork/juice_0323Fri_newyork.txt", "newyork/juice_newyork/juice_0324Sat_newyork.txt", "newyork/juice_newyork/juice_0325Sun_newyork.txt", "newyork/juice_newyork/juice_0326Mon_newyork.txt", "newyork/juice_newyork/juice_0327Tue_newyork.txt",
# "newyork/tea_newyork/tea_0319Mon_newyork.txt", "newyork/tea_newyork/tea_0320Tue_newyork.txt", "newyork/tea_newyork/tea_0321Wed_newyork.txt", "newyork/tea_newyork/tea_0322Thu_newyork.txt", "newyork/tea_newyork/tea_0323Fri_newyork.txt", "newyork/tea_newyork/tea_0324Sat_newyork.txt", "newyork/tea_newyork/tea_0325Sun_newyork.txt", "newyork/tea_newyork/tea_0326Mon_newyork.txt", "newyork/tea_newyork/tea_0327Thu_newyork.txt",
# "newyork/water_newyork/water_0319TMon_newyork.txt", "newyork/water_newyork/water_0320Tue_newyork.txt", "newyork/water_newyork/water_0321Wed_newyork.txt", "newyork/water_newyork/water_0322Thu_newyork.txt", "newyork/water_newyork/water_0323Fri_newyork.txt", "newyork/water_newyork/water_0324Sat_newyork.txt", "newyork/water_newyork/water_0325Sun_newyork.txt", "newyork/water_newyork/water_0326Mon_newyork.txt", "newyork/water_newyork/water_0327Tue_newyork.txt",
#"newyork/wine_newyork/wine_0319Mon_newyork.txt", "newyork/wine_newyork/wine_0320Tue_newyork.txt","newyork/wine_newyork/wine_0321Wed_newyork.txt","newyork/wine_newyork/wine_0322Thu_newyork.txt","newyork/wine_newyork/wine_0323Fri_newyork.txt","newyork/wine_newyork/wine_0324Sat_newyork.txt","newyork/wine_newyork/wine_0325Sun_newyork.txt","newyork/wine_newyork/wine_0326Mon_newyork.txt","newyork/wine_newyork/wine_0327Tue_newyork.txt"]
wf = open("LA_OUTPUT_week3.txt","w+")
SName = []
indicator = 0
for fina in range(len(list_Filename)):
indicator = indicator + 1
fff = read_file_name(list_Filename[fina])
TPN = readLines(fff[2])
if indicator == 1:
wf.write("Title total_words total_positive_words total_negative_words Tweets_num\n")
wf.write("%s %i %i %i %i\n" %(fff[0], TPN[0], TPN[1], TPN[2], TPN[3]))
# print(fff[2])
SName.append(fff[0])
# print(SName)
wf.close()
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
ef613d5f2cfe3b42bbeb213dd2f5ccdfec452315 | 17ca5bae91148b5e155e18e6d758f77ab402046d | /analysis_SWarp/CID1174/analysis_PSF_smaller/mcmc_QSO_example.py | 8cd84f1d159d472091840fdd7edbbd6ec8799eba | []
| no_license | dartoon/QSO_decomposition | 5b645c298825091c072778addfaab5d3fb0b5916 | a514b9a0ad6ba45dc9c3f83abf569688b9cf3a15 | refs/heads/master | 2021-12-22T19:15:53.937019 | 2021-12-16T02:07:18 | 2021-12-16T02:07:18 | 123,425,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,663 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 13 08:56:38 2018
@author: Dartoon
MCMC for CID1174
"""
import sys
sys.path.insert(0,'../../../py_tools')
import numpy as np
import matplotlib.pylab as plt
from matplotlib.colors import LogNorm
import astropy.io.fits as pyfits
from psfs_average import psf_ave
from flux_profile import QSO_psfs_compare, flux_profile
import glob
ID = 'CID1174'
psf_NO=7 # The number of the psf.
for i in range(psf_NO):
fitsFile = pyfits.open('PSF{0}.fits'.format(i))
PSF = fitsFile[0].data
if i == 0 :
psf_list = np.empty([psf_NO, PSF.shape[0], PSF.shape[1]])
psf_list[0] = PSF
else:
psf_list[i] = PSF
# PSFs= PSFs.append(PSF)
mask_list = glob.glob("PSF*.reg") # Read *.reg files in a list.
psf_final, psf_std=psf_ave(psf_list,mode = 'CI', not_count=(1,4,5),
mask_list=mask_list)
QSO_im = pyfits.getdata('{0}_cutout.fits'.format(ID))
# data specifics need to set up based on the data situation
background_rms = 0.04 # background noise per pixel (Gaussian)
exp_time = 2400. # exposure time (arbitrary units, flux per pixel is in units #photons/exp_time unit)
numPix = len(QSO_im) # cutout pixel size
deltaPix = 0.13 # pixel size in arcsec (area per pixel = deltaPix**2)
fwhm = 0.1 # full width half max of PSF (only valid when psf_type='gaussian')
psf_type = 'PIXEL' # 'gaussian', 'pixel', 'NONE'
kernel_size = len(psf_final)
kernel = psf_final
#plt.matshow(psf_final, origin= 'low', norm=LogNorm())
#plt.colorbar()
#plt.show()
flux_profile(psf_final, center = (kernel_size/2, kernel_size/2), radius= kernel_size/2, ifplot=True, fits_plot=True)
kwargs_numerics = {'subgrid_res': 1, 'psf_subgrid': False}
kwargs_numerics.get('psf_error_map', False) #Turn on the PSF error map
## lens model choicers (lenstronomy requires the instances of them, but we can keep them empty)
#fixed_lens = [{}]
#kwargs_lens_init = [{}]
#kwargs_lens_sigma = [{}]
#kwargs_lower_lens = [{}]
#kwargs_upper_lens = [{}]
#
## lens light model choices (lenstronomy requires the instances of them, but we can keep them empty)
#fixed_lens_light = [{}]
#kwargs_lens_light_init = [{}]
#kwargs_lens_light_sigma = [{}]
#kwargs_lower_lens_light = [{}]
#kwargs_upper_lens_light = [{}]
# here are the options for the host galaxy fitting
fixed_source = []
kwargs_source_init = []
kwargs_source_sigma = []
kwargs_lower_source = []
kwargs_upper_source = []
# Disk component, as modelled by an elliptical Sersic profile
fixed_source.append({}) # we fix the Sersic index to n=1 (exponential)
kwargs_source_init.append({'R_sersic': 1., 'n_sersic': 1, 'e1': 0, 'e2': 0, 'center_x': 0, 'center_y': 0})
kwargs_source_sigma.append({'n_sersic_sigma': 0.5, 'R_sersic_sigma': 0.5, 'ellipse_sigma': 0.1, 'center_x_sigma': 0.1, 'center_y_sigma': 0.1})
kwargs_lower_source.append({'e1': -0.5, 'e2': -0.5, 'R_sersic': 0.001, 'n_sersic': .5, 'center_x': -10, 'center_y': -10})
kwargs_upper_source.append({'e1': 0.5, 'e2': 0.5, 'R_sersic': 10, 'n_sersic': 5., 'center_x': 10, 'center_y': 10})
## Buldge component, as modelled by a spherical Sersic profile
#fixed_source.append({'n_sersic': 4}) # we fix the Sersic index to n=4 (buldgy)
#kwargs_source_init.append({'R_sersic': .5, 'n_sersic': 4, 'center_x': 0, 'center_y': 0})
#kwargs_source_sigma.append({'n_sersic_sigma': 0.5, 'R_sersic_sigma': 0.3, 'center_x_sigma': 0.1, 'center_y_sigma': 0.1})
#kwargs_lower_source.append({'R_sersic': 0.001, 'n_sersic': .5, 'center_x': -10, 'center_y': -10})
#kwargs_upper_source.append({'R_sersic': 10, 'n_sersic': 5., 'center_x': 10, 'center_y': 10})
source_params = [kwargs_source_init, kwargs_source_sigma, fixed_source, kwargs_lower_source, kwargs_upper_source]
center_x = 0.0
center_y = 0.0
point_amp = 334.
fixed_ps = [{}]
kwargs_ps = [{'ra_image': [center_x], 'dec_image': [center_y], 'point_amp': [point_amp]}]
kwargs_ps_init = kwargs_ps
kwargs_ps_sigma = [{'pos_sigma': 0.01, 'pos_sigma': 0.01}]
kwargs_lower_ps = [{'ra_image': [-10], 'dec_image': [-10]}]
kwargs_upper_ps = [{'ra_image': [10], 'dec_image': [10]}]
ps_param = [kwargs_ps_init, kwargs_ps_sigma, fixed_ps, kwargs_lower_ps, kwargs_upper_ps]
kwargs_params = {'source_model': source_params,
'point_source_model': ps_param}
#==============================================================================
#Doing the QSO fitting
#==============================================================================
from lenstronomy.SimulationAPI.simulations import Simulation
SimAPI = Simulation()
data_class = SimAPI.data_configure(numPix, deltaPix, exp_time, background_rms)
psf_class = SimAPI.psf_configure(psf_type=psf_type, fwhm=fwhm, kernelsize=kernel_size, deltaPix=deltaPix, kernel=kernel)
data_class.update_data(QSO_im)
from lenstronomy.LightModel.light_model import LightModel
light_model_list = ['SERSIC_ELLIPSE']
lightModel = LightModel(light_model_list=light_model_list)
from lenstronomy.PointSource.point_source import PointSource
point_source_list = ['UNLENSED']
pointSource = PointSource(point_source_type_list=point_source_list)
### Make simulation:
from lenstronomy.ImSim.image_model import ImageModel
kwargs_numerics = {'subgrid_res': 1, 'psf_subgrid': False}
imageModel = ImageModel(data_class, psf_class, source_model_class=lightModel,
point_source_class=pointSource, kwargs_numerics=kwargs_numerics)
kwargs_model = { 'source_light_model_list': light_model_list,
'point_source_model_list': point_source_list
}
# numerical options and fitting sequences
kwargs_constraints = {'joint_center_source_light': True, # if set to True, all the components in the host galaxy will have a shared center
'fix_to_point_source_list': [True, True], # this results in a shared center of the host galaxy with the point source (quasar)
'num_point_source_list': [1]
}
kwargs_likelihood = {'check_bounds': True, #Set the bonds, if exceed, reutrn "penalty"
'source_marg': False, #In likelihood_module.LikelihoodModule -- whether to fully invert the covariance matrix for marginalization
}
kwargs_data = data_class.constructor_kwargs() # The "dec_at_xy_0" means the dec at the (0,0) point.
kwargs_psf = psf_class.constructor_kwargs()
kwargs_psf['psf_error_map'] = psf_std
image_band = [kwargs_data, kwargs_psf, kwargs_numerics]
multi_band_list = [image_band]
from lenstronomy.Workflow.fitting_sequence import FittingSequence
mpi = False # MPI possible, but not supported through that notebook.
# The Params for the fitting. kwargs_init: initial input. kwargs_sigma: The parameter uncertainty. kwargs_fixed: fixed parameters;
#kwargs_lower,kwargs_upper: Lower and upper limits.
fitting_seq = FittingSequence(multi_band_list, kwargs_model, kwargs_constraints, kwargs_likelihood, kwargs_params)
fitting_kwargs_list = [
{'fitting_routine': 'PSO', 'mpi': False, 'sigma_scale': 1., 'n_particles': 50,
'n_iterations': 50},
{'fitting_routine': 'MCMC', 'n_burn': 10, 'n_run': 20, 'walkerRatio': 50, 'mpi': False, ##Inputs to CosmoHammer:
#n_particles - particleCount; n_burn - burninIterations; n_run: sampleIterations (n_burn and n_run usually the same.); walkerRatio: walkersRatio.
'sigma_scale': .1}
]
import time
start_time = time.time()
lens_result, source_result, lens_light_result, ps_result, chain_list, param_list, samples_mcmc, param_mcmc, dist_mcmc = fitting_seq.fit_sequence(fitting_kwargs_list)
end_time = time.time()
print(end_time - start_time, 'total time needed for computation')
print('============ CONGRATULATION, YOUR JOB WAS SUCCESSFUL ================ ')
# let's plot the output of the PSO minimizer
from lenstronomy.Plots.output_plots import LensModelPlot
lensPlot = LensModelPlot(kwargs_data, kwargs_psf, kwargs_numerics, kwargs_model, lens_result, source_result,
lens_light_result, ps_result, arrow_size=0.02, cmap_string="gist_heat", high_res=5)
f, axes = plt.subplots(3, 3, figsize=(16, 16), sharex=False, sharey=False)
lensPlot.data_plot(ax=axes[0,0])
lensPlot.model_plot(ax=axes[0,1])
lensPlot.normalized_residual_plot(ax=axes[0,2], v_min=-6, v_max=6)
lensPlot.decomposition_plot(ax=axes[1,0], text='Source light', source_add=True, unconvolved=True)
lensPlot.decomposition_plot(ax=axes[1,1], text='Source light convolved', source_add=True)
lensPlot.decomposition_plot(ax=axes[1,2], text='All components convolved', source_add=True, lens_light_add=True, point_source_add=True)
lensPlot.subtract_from_data_plot(ax=axes[2,0], text='Data - Point Source', point_source_add=True)
lensPlot.subtract_from_data_plot(ax=axes[2,1], text='Data - host galaxy', source_add=True)
lensPlot.subtract_from_data_plot(ax=axes[2,2], text='Data - host galaxy - Point Source', source_add=True, point_source_add=True)
f.tight_layout()
#f.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0., hspace=0.05)
plt.show()
import corner
# here the (non-converged) MCMC chain of the non-linear parameters
if not samples_mcmc == []:
n, num_param = np.shape(samples_mcmc)
plot = corner.corner(samples_mcmc, labels=param_mcmc, show_titles=True)
# this is the linear inversion. The kwargs will be updated afterwards
image_reconstructed, _, _, _ = imageModel.image_linear_solve(kwargs_source=source_result, kwargs_ps=ps_result)
# flux count in point source
image_ps = imageModel.point_source(ps_result)
print np.sum(image_ps)
print ps_result
# for point sources, the fluxes in 'point_amp' are equivalent to the flux counts in the image.
# The only difference is the smaller cutout size in the image
# flux count in host galaxy
image_host = imageModel.source_surface_brightness(source_result)
print np.sum(image_host)
# if we only want the first component (disk in our case), we can do that
#image_disk = imageModel.source_surface_brightness(source_result, k=0) # Don't need k=0
#print np.sum(image_disk)
## and if we only want the second component (buldge in our case)
#image_buldge = imageModel.source_surface_brightness(source_result, k=1)
#print np.sum(image_buldge)
# to summarize
print("quasar-to-host galaxy ratio: ", np.sum(image_ps)/np.sum(image_host))
#print("buldge-to-disk ratio:", np.sum(image_buldge)/np.sum(image_disk))
from lenstronomy.Workflow.parameters import Param
param = Param(kwargs_model, kwargs_constraints, kwargs_fixed_source=fixed_source, kwargs_fixed_ps=fixed_ps)
mcmc_new_list = []
labels_new = [r"Quasar flux", r"host_flux", r"source_x", r"source_y"]
for i in range(len(samples_mcmc)/10):
# transform the parameter position of the MCMC chain in a lenstronomy convention with keyword arguments #
kwargs_lens_out, kwargs_light_source_out, kwargs_light_lens_out, kwargs_ps_out = param.getParams(samples_mcmc[i+ len(samples_mcmc)/10*9])
image_reconstructed, _, _, _ = imageModel.image_linear_solve(kwargs_source=kwargs_light_source_out, kwargs_ps=kwargs_ps_out)
image_ps = imageModel.point_source(kwargs_ps_out)
flux_quasar = np.sum(image_ps)
image_disk = imageModel.source_surface_brightness(kwargs_light_source_out, k=0)
flux_disk = np.sum(image_disk)
source_x = kwargs_ps_out[0]['ra_image']
source_y = kwargs_ps_out[0]['dec_image']
# image_buldge = imageModel.source_surface_brightness(kwargs_light_source_out, k=1)
# flux_buldge = np.sum(image_buldge)
kwargs_ps_out
mcmc_new_list.append([flux_quasar, flux_disk, source_x, source_y])
plot = corner.corner(mcmc_new_list, labels=labels_new, show_titles=True) | [
"[email protected]"
]
| |
9326fc2d0a4f51d1f43a77f1d261bb416676505a | d4f9a423353fe79cf8824a8407690655fc1379fe | /django/virtualenv/django/lib/python2.7/site-packages/django/core/mail/message.py | fb15a8c29fda6c85db5c629660fd124015ca6d5e | []
| no_license | 007root/python | 9ab62d433d17c8bb57622fd1d24a3b17cb3d13ad | 16bf729e5824555eab0c9de61ce6b8b055551bd1 | refs/heads/master | 2020-06-23T09:43:05.308328 | 2020-06-09T08:31:20 | 2020-06-09T08:31:20 | 74,656,519 | 9 | 1 | null | null | null | null | UTF-8 | Python | false | false | 15,657 | py | from __future__ import unicode_literals
import mimetypes
import os
import random
import sys
import time
from email import (charset as Charset, encoders as Encoders,
message_from_string, generator)
from email.message import Message
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.message import MIMEMessage
from email.header import Header
from email.utils import formatdate, getaddresses, formataddr, parseaddr
from django.conf import settings
from django.core.mail.utils import DNS_NAME
from django.utils.encoding import force_text
from django.utils import six
# Don't BASE64-encode UTF-8 messages so that we avoid unwanted attention from
# some spam filters.
utf8_charset = Charset.Charset('utf-8')
utf8_charset.body_encoding = None # Python defaults to BASE64
# Default MIME type to use on attachments (if it is not explicitly given
# and cannot be guessed).
DEFAULT_ATTACHMENT_MIME_TYPE = 'application/octet-stream'
class BadHeaderError(ValueError):
pass
# Copied from Python standard library, with the following modifications:
# * Used cached hostname for performance.
# * Added try/except to support lack of getpid() in Jython (#5496).
def make_msgid(idstring=None):
"""Returns a string suitable for RFC 2822 compliant Message-ID, e.g:
<[email protected]>
Optional idstring if given is a string used to strengthen the
uniqueness of the message id.
"""
timeval = time.time()
utcdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(timeval))
try:
pid = os.getpid()
except AttributeError:
# No getpid() in Jython, for example.
pid = 1
randint = random.randrange(100000)
if idstring is None:
idstring = ''
else:
idstring = '.' + idstring
idhost = DNS_NAME
msgid = '<%s.%s.%s%s@%s>' % (utcdate, pid, randint, idstring, idhost)
return msgid
# Header names that contain structured address data (RFC #5322)
ADDRESS_HEADERS = set([
'from',
'sender',
'reply-to',
'to',
'cc',
'bcc',
'resent-from',
'resent-sender',
'resent-to',
'resent-cc',
'resent-bcc',
])
def forbid_multi_line_headers(name, val, encoding):
"""Forbids multi-line headers, to prevent header injection."""
encoding = encoding or settings.DEFAULT_CHARSET
val = force_text(val)
if '\n' in val or '\r' in val:
raise BadHeaderError("Header values can't contain newlines (got %r for header %r)" % (val, name))
try:
val.encode('ascii')
except UnicodeEncodeError:
if name.lower() in ADDRESS_HEADERS:
val = ', '.join(sanitize_address(addr, encoding)
for addr in getaddresses((val,)))
else:
val = Header(val, encoding).encode()
else:
if name.lower() == 'subject':
val = Header(val).encode()
return str(name), val
def sanitize_address(addr, encoding):
if isinstance(addr, six.string_types):
addr = parseaddr(force_text(addr))
nm, addr = addr
# This try-except clause is needed on Python 3 < 3.2.4
# http://bugs.python.org/issue14291
try:
nm = Header(nm, encoding).encode()
except UnicodeEncodeError:
nm = Header(nm, 'utf-8').encode()
try:
addr.encode('ascii')
except UnicodeEncodeError: # IDN
if '@' in addr:
localpart, domain = addr.split('@', 1)
localpart = str(Header(localpart, encoding))
domain = domain.encode('idna').decode('ascii')
addr = '@'.join([localpart, domain])
else:
addr = Header(addr, encoding).encode()
return formataddr((nm, addr))
class MIMEMixin():
def as_string(self, unixfrom=False, linesep='\n'):
"""Return the entire formatted message as a string.
Optional `unixfrom' when True, means include the Unix From_ envelope
header.
This overrides the default as_string() implementation to not mangle
lines that begin with 'From '. See bug #13433 for details.
"""
fp = six.StringIO()
g = generator.Generator(fp, mangle_from_=False)
if six.PY2:
g.flatten(self, unixfrom=unixfrom)
else:
g.flatten(self, unixfrom=unixfrom, linesep=linesep)
return fp.getvalue()
if six.PY2:
as_bytes = as_string
else:
def as_bytes(self, unixfrom=False, linesep='\n'):
"""Return the entire formatted message as bytes.
Optional `unixfrom' when True, means include the Unix From_ envelope
header.
This overrides the default as_bytes() implementation to not mangle
lines that begin with 'From '. See bug #13433 for details.
"""
fp = six.BytesIO()
g = generator.BytesGenerator(fp, mangle_from_=False)
g.flatten(self, unixfrom=unixfrom, linesep=linesep)
return fp.getvalue()
class SafeMIMEMessage(MIMEMixin, MIMEMessage):
def __setitem__(self, name, val):
# message/rfc822 attachments must be ASCII
name, val = forbid_multi_line_headers(name, val, 'ascii')
MIMEMessage.__setitem__(self, name, val)
class SafeMIMEText(MIMEMixin, MIMEText):
def __init__(self, text, subtype, charset):
self.encoding = charset
if charset == 'utf-8':
# Unfortunately, Python doesn't support setting a Charset instance
# as MIMEText init parameter (http://bugs.python.org/issue16324).
# We do it manually and trigger re-encoding of the payload.
MIMEText.__init__(self, text, subtype, None)
del self['Content-Transfer-Encoding']
# Workaround for versions without http://bugs.python.org/issue19063
if (3, 2) < sys.version_info < (3, 3, 4):
payload = text.encode(utf8_charset.output_charset)
self._payload = payload.decode('ascii', 'surrogateescape')
self.set_charset(utf8_charset)
else:
self.set_payload(text, utf8_charset)
self.replace_header('Content-Type', 'text/%s; charset="%s"' % (subtype, charset))
else:
MIMEText.__init__(self, text, subtype, charset)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEText.__setitem__(self, name, val)
class SafeMIMEMultipart(MIMEMixin, MIMEMultipart):
def __init__(self, _subtype='mixed', boundary=None, _subparts=None, encoding=None, **_params):
self.encoding = encoding
MIMEMultipart.__init__(self, _subtype, boundary, _subparts, **_params)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEMultipart.__setitem__(self, name, val)
class EmailMessage(object):
"""
A container for email information.
"""
content_subtype = 'plain'
mixed_subtype = 'mixed'
encoding = None # None => use settings default
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, cc=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings
(or UTF-8 bytestrings). The SafeMIMEText class will handle any
necessary encoding conversions.
"""
if to:
assert not isinstance(to, six.string_types), '"to" argument must be a list or tuple'
self.to = list(to)
else:
self.to = []
if cc:
assert not isinstance(cc, six.string_types), '"cc" argument must be a list or tuple'
self.cc = list(cc)
else:
self.cc = []
if bcc:
assert not isinstance(bcc, six.string_types), '"bcc" argument must be a list or tuple'
self.bcc = list(bcc)
else:
self.bcc = []
self.from_email = from_email or settings.DEFAULT_FROM_EMAIL
self.subject = subject
self.body = body
self.attachments = attachments or []
self.extra_headers = headers or {}
self.connection = connection
def get_connection(self, fail_silently=False):
from django.core.mail import get_connection
if not self.connection:
self.connection = get_connection(fail_silently=fail_silently)
return self.connection
def message(self):
encoding = self.encoding or settings.DEFAULT_CHARSET
msg = SafeMIMEText(self.body, self.content_subtype, encoding)
msg = self._create_message(msg)
msg['Subject'] = self.subject
msg['From'] = self.extra_headers.get('From', self.from_email)
msg['To'] = self.extra_headers.get('To', ', '.join(self.to))
if self.cc:
msg['Cc'] = ', '.join(self.cc)
# Email header names are case-insensitive (RFC 2045), so we have to
# accommodate that when doing comparisons.
header_names = [key.lower() for key in self.extra_headers]
if 'date' not in header_names:
msg['Date'] = formatdate()
if 'message-id' not in header_names:
msg['Message-ID'] = make_msgid()
for name, value in self.extra_headers.items():
if name.lower() in ('from', 'to'): # From and To are already handled
continue
msg[name] = value
return msg
def recipients(self):
"""
Returns a list of all recipients of the email (includes direct
addressees as well as Cc and Bcc entries).
"""
return self.to + self.cc + self.bcc
def send(self, fail_silently=False):
"""Sends the email message."""
if not self.recipients():
# Don't bother creating the network connection if there's nobody to
# send to.
return 0
return self.get_connection(fail_silently).send_messages([self])
def attach(self, filename=None, content=None, mimetype=None):
"""
Attaches a file with the given filename and content. The filename can
be omitted and the mimetype is guessed, if not provided.
If the first parameter is a MIMEBase subclass it is inserted directly
into the resulting message attachments.
"""
if isinstance(filename, MIMEBase):
assert content is None
assert mimetype is None
self.attachments.append(filename)
else:
assert content is not None
self.attachments.append((filename, content, mimetype))
def attach_file(self, path, mimetype=None):
"""Attaches a file from the filesystem."""
filename = os.path.basename(path)
with open(path, 'rb') as f:
content = f.read()
self.attach(filename, content, mimetype)
def _create_message(self, msg):
return self._create_attachments(msg)
def _create_attachments(self, msg):
if self.attachments:
encoding = self.encoding or settings.DEFAULT_CHARSET
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.mixed_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for attachment in self.attachments:
if isinstance(attachment, MIMEBase):
msg.attach(attachment)
else:
msg.attach(self._create_attachment(*attachment))
return msg
def _create_mime_attachment(self, content, mimetype):
"""
Converts the content, mimetype pair into a MIME attachment object.
If the mimetype is message/rfc822, content may be an
email.Message or EmailMessage object, as well as a str.
"""
basetype, subtype = mimetype.split('/', 1)
if basetype == 'text':
encoding = self.encoding or settings.DEFAULT_CHARSET
attachment = SafeMIMEText(content, subtype, encoding)
elif basetype == 'message' and subtype == 'rfc822':
# Bug #18967: per RFC2046 s5.2.1, message/rfc822 attachments
# must not be base64 encoded.
if isinstance(content, EmailMessage):
# convert content into an email.Message first
content = content.message()
elif not isinstance(content, Message):
# For compatibility with existing code, parse the message
# into an email.Message object if it is not one already.
content = message_from_string(content)
attachment = SafeMIMEMessage(content, subtype)
else:
# Encode non-text attachments with base64.
attachment = MIMEBase(basetype, subtype)
attachment.set_payload(content)
Encoders.encode_base64(attachment)
return attachment
def _create_attachment(self, filename, content, mimetype=None):
"""
Converts the filename, content, mimetype triple into a MIME attachment
object.
"""
if mimetype is None:
mimetype, _ = mimetypes.guess_type(filename)
if mimetype is None:
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
attachment = self._create_mime_attachment(content, mimetype)
if filename:
try:
filename.encode('ascii')
except UnicodeEncodeError:
if six.PY2:
filename = filename.encode('utf-8')
filename = ('utf-8', '', filename)
attachment.add_header('Content-Disposition', 'attachment',
filename=filename)
return attachment
class EmailMultiAlternatives(EmailMessage):
"""
A version of EmailMessage that makes it easy to send multipart/alternative
messages. For example, including text and HTML versions of the text is
made easier.
"""
alternative_subtype = 'alternative'
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, alternatives=None,
cc=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings (or UTF-8
bytestrings). The SafeMIMEText class will handle any necessary encoding
conversions.
"""
super(EmailMultiAlternatives, self).__init__(subject, body, from_email, to, bcc, connection, attachments, headers, cc)
self.alternatives = alternatives or []
def attach_alternative(self, content, mimetype):
"""Attach an alternative content representation."""
assert content is not None
assert mimetype is not None
self.alternatives.append((content, mimetype))
def _create_message(self, msg):
return self._create_attachments(self._create_alternatives(msg))
def _create_alternatives(self, msg):
encoding = self.encoding or settings.DEFAULT_CHARSET
if self.alternatives:
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.alternative_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for alternative in self.alternatives:
msg.attach(self._create_mime_attachment(*alternative))
return msg
| [
"[email protected]"
]
| |
57bd1d895445c2982d60ae8f2bcdf3f21b6857bb | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /database/exec_command_vo.py | dc9926f6c0697fd34e623775fb3e01dffcfd0a23 | []
| no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,508 | py | #!/usr/bin/python
class ExecCommandVo:
id = 0
name = ""
command_name = ""
parameter_num = 0
jmespath = ""
is_output = False
type = ""
execService_id = 0
description = ""
require_parameters = ""
def __init__(self, row):
self.id = int(row[0])
self.name = row[1]
self.command_name = row[2]
self.parameter_num = int(row[3])
self.jmespath = row[4]
self.is_output = bool(row[5])
self.type = row[6]
self.execService_id = int(row[7])
self.description = row[8]
self.require_parameters = row[9]
@classmethod
def get_select_query(cls):
return "id, name, command_name, parameter_num, jmespath, is_output, type, execService_id, description, require_parameters"
def getName(self):
return self.name
def getId(self):
return self.id
def getExecServiceId(self):
return self.execService_id
def getType(self):
return self.type
def getCommandName(self):
return self.command_name
def getParameterNum(self):
return self.parameter_num
def getJmespath(self):
return self.jmespath
def getDescription(self):
return self.description
def getRequireParameters(self):
return self.require_parameters
def __str__(self):
return "{} => {} {} {} {} {} {}".format(self.id, self.name, self.command_name, self.parameter_num, self.is_output, self.type, self.execService_id)
| [
"[email protected]"
]
| |
79489b9bd967a1add2a9b89db6a08cda65f6fa10 | 9889e7fd73314382fb2f9e8f63d92cf3254b75fb | /ThirdParty/Peridigm/test/verification/Compression_3x1x1_InfluenceFunction/np3/Compression_3x1x1_InfluenceFunction.py | d5676e6cfe92fcf1d1b4d2eb8cd6139f5825624c | []
| no_license | bbanerjee/ParSim | 0b05f43cff8e878658dc179b4a604eabd873f594 | 87f87816b146f40013a5e6648dfe20f6d2d002bb | refs/heads/master | 2023-04-27T11:30:36.252023 | 2023-04-13T22:04:50 | 2023-04-13T22:04:50 | 13,608,512 | 16 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,903 | py | #! /usr/bin/env python
import sys
import os
import re
from subprocess import Popen
test_dir = "Compression_3x1x1_InfluenceFunction/np3"
base_name = "Compression_3x1x1_InfluenceFunction"
if __name__ == "__main__":
result = 0
# log file will be dumped if verbose option is given
verbose = False
if "-verbose" in sys.argv:
verbose = True
# change to the specified test directory
os.chdir(test_dir)
# open log file
log_file_name = base_name + ".log"
if os.path.exists(log_file_name):
os.remove(log_file_name)
logfile = open(log_file_name, 'w')
# remove old output files, if any
# use regular expression module since differentiating
# between gold files and old output files can be tricky
files_to_remove = base_name + ".e"
for file in os.listdir(os.getcwd()):
if file in files_to_remove:
os.remove(file)
# run Peridigm
command = ["mpiexec", "-np", "3", "../../../../src/Peridigm", "../"+base_name+".xml"]
p = Popen(command, stdout=logfile, stderr=logfile)
return_code = p.wait()
if return_code != 0:
result = return_code
# compare output files against gold files
command = ["../../../../scripts/epu", "-p", "3", base_name]
p = Popen(command, stdout=logfile, stderr=logfile)
return_code = p.wait()
if return_code != 0:
result = return_code
command = ["../../../../scripts/exodiff", \
"-stat", \
"-f", \
"../"+base_name+".comp", \
base_name+".e", \
"../"+base_name+"_gold.e"]
p = Popen(command, stdout=logfile, stderr=logfile)
return_code = p.wait()
if return_code != 0:
result = return_code
logfile.close()
# dump the output if the user requested verbose
if verbose == True:
os.system("cat " + log_file_name)
sys.exit(result)
| [
"[email protected]"
]
| |
b1e479091df62ec24625058db05cbe6459f76496 | a222e2999251ba7f0d62c428ba8cc170b6d0b3b7 | /Company_Con_past/DDCC_2016_Qualifing/C.py | bcf410cbd0806e1ff63d727adc1bd31342b6811b | [
"MIT"
]
| permissive | yosho-18/AtCoder | 3e1f3070c5eb44f154c8104fbd5449f47446ce14 | 50f6d5c92a01792552c31ac912ce1cd557b06fb0 | refs/heads/master | 2020-06-02T10:21:29.458365 | 2020-05-29T12:40:48 | 2020-05-29T12:40:48 | 188,795,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,609 | py | import sys
# import math, string, itertools, fractions, heapq, collections, re, array, bisect, copy, functools, random
# from collections import deque, defaultdict, Counter; from heapq import heappush, heappop
# from itertools import permutations, combinations, product, accumulate, groupby
# from bisect import bisect_left, bisect_right, insort_left, insort_right
# from operator import itemgetter as ig
sys.setrecursionlimit(10 ** 7)
inf = 10 ** 20; INF = float("INF"); ans = 0; tmp = 0; ansli = []; tmpli = []; candili = []; mod = 10 ** 9 + 7
# dd = [(-1, 0), (0, 1), (1, 0), (0, -1)]; ddn = dd + [(-1, 1), (1, 1), (1, -1), (-1, -1)]; ddn9 = ddn + [(0, 0)]
"""for dx, dy in dd:
nx = j + dx; ny = i + dy
if 0 <= nx < w and 0 <= ny < h:"""
def wi(): return list(map(int, sys.stdin.readline().split()))
def wip(): return [int(x) - 1 for x in sys.stdin.readline().split()]#WideIntPoint
def ws(): return sys.stdin.readline().split()
def i(): return int(sys.stdin.readline())
def s(): return input()
def hi(n): return [i() for _ in range(n)]
def hs(n): return [s() for _ in range(n)]#HeightString
def mi(n): return [wi() for _ in range(n)]#MatrixInt
def mip(n): return [wip() for _ in range(n)]
def ms(n): return [ws() for _ in range(n)]
n, c = wi()
L = hi(n)
L.sort()
K = sorted(L)
k = 0
while True:
for l in range(len(L) - 1, -1, -1):
if k == l:
ans += 1
print(ans)
exit()
if k > l:
print(ans)
exit()
if L[l] + K[k] + 1 <= c:
ans += 1
k += 1
else:
ans += 1
| [
"[email protected]"
]
| |
8ecd323ed8eab418c79c9efc15c63141b501ee8c | 0fccee4c738449f5e0a8f52ea5acabf51db0e910 | /genfragments/ThirteenTeV/Zprime/ZprimeToBBbar_M_500_TuneCUETP8M1_13TeV_pythia8_cfi.py | b784115f04b91be013aae77247be97e4c644b5bc | []
| no_license | cms-sw/genproductions | f308ffaf3586c19b29853db40e6d662e937940ff | dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4 | refs/heads/master | 2023-08-30T17:26:02.581596 | 2023-08-29T14:53:43 | 2023-08-29T14:53:43 | 11,424,867 | 69 | 987 | null | 2023-09-14T12:41:28 | 2013-07-15T14:18:33 | Python | UTF-8 | Python | false | false | 1,333 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
comEnergy = cms.double(13000.0),
crossSection = cms.untracked.double(5.511e-5),
filterEfficiency = cms.untracked.double(1),
maxEventsToPrint = cms.untracked.int32(0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'NewGaugeBoson:ffbar2gmZZprime = on',
'Zprime:gmZmode = 3', # only pure Z' contribution
'32:m0 = 500',
'32:onMode = off', # switch off all of the Z' decay
'32:onIfAny = 5', # switch on the Z'->BBbar
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters')
)
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"[email protected]"
]
| |
15951dadb81a218712916a676485e8860d3655e9 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2635/60636/251230.py | 272bd556cb478f51d8723a20d1005270dd20cb0d | []
| no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | def f(x):
count_2=0
count_5=0
for i in range(1,x+1):
a=i
while(a%2==0):
a=a/2
count_2=count_2+1
b=i
while(b%5==0):
b=b/5
count_5=count_5+1
return min(count_2,count_5)
res=[]
k=int(input())
i=0
while(True):
if(f(i)==k):
print(res)
elif(f(i)>k):
break
i=i+1
print(len(res)) | [
"[email protected]"
]
| |
05a420e0ca727d9c90a5299c4100aa1377a32dac | 35b6013c1943f37d1428afd2663c8aba0a02628d | /monitoring/opencensus/main.py | 0330f1fd136e2e2ce6687901f2b6708829c58ee9 | [
"Apache-2.0"
]
| permissive | GoogleCloudPlatform/python-docs-samples | d2a251805fbeab15d76ed995cf200727f63f887d | 44e819e713c3885e38c99c16dc73b7d7478acfe8 | refs/heads/main | 2023-08-28T12:52:01.712293 | 2023-08-28T11:18:28 | 2023-08-28T11:18:28 | 35,065,876 | 7,035 | 7,593 | Apache-2.0 | 2023-09-14T20:20:56 | 2015-05-04T23:26:13 | Jupyter Notebook | UTF-8 | Python | false | false | 4,753 | py | """
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
import random
import time
# [START monitoring_sli_metrics_opencensus_setup]
from flask import Flask
from opencensus.ext.prometheus import stats_exporter as prometheus
from opencensus.stats import aggregation as aggregation_module
from opencensus.stats import measure as measure_module
from opencensus.stats import stats as stats_module
from opencensus.stats import view as view_module
from opencensus.tags import tag_map as tag_map_module
from prometheus_flask_exporter import PrometheusMetrics
# [END monitoring_sli_metrics_opencensus_setup]
# set up measures
# [START monitoring_sli_metrics_opencensus_measure]
m_request_count = measure_module.MeasureInt(
"python_request_count", "total requests", "requests"
)
m_failed_request_count = measure_module.MeasureInt(
"python_failed_request_count", "failed requests", "requests"
)
m_response_latency = measure_module.MeasureFloat(
"python_response_latency", "response latency", "s"
)
# [END monitoring_sli_metrics_opencensus_measure]
# set up stats recorder
stats_recorder = stats_module.stats.stats_recorder
# [START monitoring_sli_metrics_opencensus_view]
# set up views
latency_view = view_module.View(
"python_response_latency",
"The distribution of the latencies",
[],
m_response_latency,
aggregation_module.DistributionAggregation(
[0, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000]
),
)
request_count_view = view_module.View(
"python_request_count",
"total requests",
[],
m_request_count,
aggregation_module.CountAggregation(),
)
failed_request_count_view = view_module.View(
"python_failed_request_count",
"failed requests",
[],
m_failed_request_count,
aggregation_module.CountAggregation(),
)
# register views
def register_all_views(view_manager: stats_module.stats.view_manager) -> None:
view_manager.register_view(latency_view)
view_manager.register_view(request_count_view)
view_manager.register_view(failed_request_count_view)
# [END monitoring_sli_metrics_opencensus_view]
# set up exporter
# [START monitoring_sli_metrics_opencensus_exporter]
def setup_openCensus_and_prometheus_exporter() -> None:
stats = stats_module.stats
view_manager = stats.view_manager
exporter = prometheus.new_stats_exporter(prometheus.Options(namespace="oc_python"))
view_manager.register_exporter(exporter)
register_all_views(view_manager)
# [END monitoring_sli_metrics_opencensus_exporter]
app = Flask(__name__)
metrics = PrometheusMetrics(app)
@app.route("/")
def homePage() -> (str, int):
# start timer
# [START monitoring_sli_metrics_opencensus_latency]
start_time = time.perf_counter()
# [START monitoring_sli_metrics_opencensus_counts]
mmap = stats_recorder.new_measurement_map()
# [END monitoring_sli_metrics_opencensus_latency]
# count request
mmap.measure_int_put(m_request_count, 1)
# fail 10% of the time
# [START monitoring_sli_metrics_opencensus_latency]
if random.randint(0, 100) > 90:
# [END monitoring_sli_metrics_opencensus_latency]
mmap.measure_int_put(m_failed_request_count, 1)
# [END monitoring_sli_metrics_opencensus_counts]
# [START monitoring_sli_metrics_opencensus_latency]
response_latency = time.perf_counter() - start_time
mmap.measure_float_put(m_response_latency, response_latency)
# [START monitoring_sli_metrics_opencensus_counts]
tmap = tag_map_module.TagMap()
mmap.record(tmap)
# [END monitoring_sli_metrics_opencensus_latency]
return ("error!", 500)
# [END monitoring_sli_metrics_opencensus_counts]
else:
random_delay = random.randint(0, 5000) / 1000
# delay for a bit to vary latency measurement
time.sleep(random_delay)
# record latency
response_latency = time.perf_counter() - start_time
mmap.measure_float_put(m_response_latency, response_latency)
tmap = tag_map_module.TagMap()
mmap.record(tmap)
return ("home page", 200)
if __name__ == "__main__":
setup_openCensus_and_prometheus_exporter()
app.run(port=8080)
| [
"[email protected]"
]
| |
37aa35cf0f329661e957df6318327be084e1a169 | 55647a80c8b412af9df0ba3f50595cc2f29c25e6 | /res/scripts/client/gui/shared/fortifications/fort_listener.py | 32a43db5d6dcb68cebf92ebad77403b2096c27e1 | []
| no_license | cnsuhao/WOT-0.9.17-CT | 0035eb6070fb4fab8d8ee9f8bbc676c10d511cfb | d1f932d8cabaf8aa21708622e87f83c8d24d6451 | refs/heads/master | 2021-06-08T18:11:07.039293 | 2016-11-19T19:12:37 | 2016-11-19T19:12:37 | null | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 1,594 | py | # 2016.11.19 19:52:40 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/shared/fortifications/fort_listener.py
from gui.shared.ClanCache import g_clanCache
from gui.shared.fortifications import interfaces
class fortProviderProperty(property):
def __get__(self, obj, objType = None):
return g_clanCache.fortProvider
class fortCtrlProperty(property):
def __get__(self, obj, objType = None):
provider = g_clanCache.fortProvider
ctrl = None
if provider:
ctrl = provider.getController()
return ctrl
class fortStateProperty(property):
def __get__(self, obj, objType = None):
provider = g_clanCache.fortProvider
state = None
if provider:
state = provider.getState()
return state
class FortListener(interfaces.IFortListener):
@fortProviderProperty
def fortProvider(self):
return None
@fortCtrlProperty
def fortCtrl(self):
return interfaces.IFortController()
@fortStateProperty
def fortState(self):
return None
def startFortListening(self):
provider = self.fortProvider
if provider:
provider.addListener(self)
def stopFortListening(self):
provider = self.fortProvider
if provider:
provider.removeListener(self)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\shared\fortifications\fort_listener.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.11.19 19:52:40 Střední Evropa (běžný čas)
| [
"[email protected]"
]
| |
13659523e15ba66adc96db6742268b1e2ea4bd47 | 5e84763c16bd6e6ef06cf7a129bb4bd29dd61ec5 | /blimgui/dist/OpenGL/GLX/OML/sync_control.py | 904b62014908c17354efdbca4c660238438fba59 | [
"MIT"
]
| permissive | juso40/bl2sdk_Mods | 8422a37ca9c2c2bbf231a2399cbcb84379b7e848 | 29f79c41cfb49ea5b1dd1bec559795727e868558 | refs/heads/master | 2023-08-15T02:28:38.142874 | 2023-07-22T21:48:01 | 2023-07-22T21:48:01 | 188,486,371 | 42 | 110 | MIT | 2022-11-20T09:47:56 | 2019-05-24T20:55:10 | Python | UTF-8 | Python | false | false | 748 | py | '''OpenGL extension OML.sync_control
This module customises the behaviour of the
OpenGL.raw.GLX.OML.sync_control to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/OML/sync_control.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLX import _types, _glgets
from OpenGL.raw.GLX.OML.sync_control import *
from OpenGL.raw.GLX.OML.sync_control import _EXTENSION_NAME
def glInitSyncControlOML():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | [
"[email protected]"
]
| |
93a7e272213ba1df834a42e3302f2ce4cc6c579b | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/programa-stic_barf-project/barf-project-master/barf/core/dbg/gdbdebugger.py | 911aad2625d2131efb29b070c98b6fbfe34d3b30 | []
| no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 3,420 | py | # Copyright (c) 2014, Fundacion Dr. Manuel Sadosky
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""GDB Debugger Interface.
"""
import gdb
from pybfd.bfd import Bfd
from barf.core.bi import Memory
from barf.core.dbg.debugger import Debugger
# TODO: See how to get this information from gdb.
def get_section_text_limits(filename):
"""Get setion .text limits.
"""
bfd = Bfd(filename)
section_name = ".text"
section = bfd.sections.get(section_name)
section_start = section.vma
section_end = section.vma + len(section.content) - 1
bfd.close()
return section_start, section_end
class GDBDebugger(Debugger):
"""GDB Debugger Interface.
"""
def __init__(self):
super(GDBDebugger, self).__init__()
def get_memory(self):
"""Get memory.
"""
inf = gdb.selected_inferior()
return Memory(inf.read_memory, inf.write_memory)
def get_architecture(self):
"""Get architecture.
"""
return "x86"
def get_registers(self):
"""Get registers.
"""
registers32 = ["eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi",
"eip"]
regs = {}
for reg in registers32:
regs[reg] = int(long(gdb.parse_and_eval("$" + reg)) & 0xffffffff)
return regs
def get_flags(self):
"""Get flags.
"""
flags32 = ["af", "cf", "of", "pf", "sf", "zf"]
eflags = str(gdb.parse_and_eval("$eflags"))[2:-2].lower().split(" ")
flags = {}
for flag in flags32:
if flag in eflags:
flags[flag] = 0x1
else:
flags[flag] = 0x0
return flags
def get_current_file(self):
"""Get current file name.
"""
return gdb.current_progspace().filename
def get_section_text_limits(self):
"""Get section .text limits.
"""
text, start, end = get_section_text(self.get_current_file())
self._section_text = text
self._section_text_start = start
self._section_text_end = end
return self._section_text_start, self._section_text_end
| [
"[email protected]"
]
| |
a59cece9c7251f5a52b3709ee9e785dceec4b697 | 2923b9f58e6a143a3e070169612165585c301def | /LA/gp_rupture_test/LA/gp_rupture_test/gp_021219_Scott_7.35_noplas_GPU_2hz/plot_pgv.py | 75c4de52fe39fe8ad4f2230c474f0b4f9aea77a9 | []
| no_license | hzfmer/summit_work_021421 | 16536dd716519bc9244da60007b9061ef5403429 | 6981b359fefb2af22e0bea6c47511de16cad22bd | refs/heads/master | 2023-03-11T15:34:36.418971 | 2021-02-05T23:22:10 | 2021-02-05T23:22:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,962 | py | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import os
import re
def discrete_cmap(N, base_cmap=None):
"""Create an N-bin discrete colormap from the specified input map"""
# Note that if base_cmap is a string or None, you can simply do
# return plt.cm.get_cmap(base_cmap, N)
# The following works for string, None, or a colormap instance:
base = plt.cm.get_cmap(base_cmap)
color_list = base(np.linspace(0, 1, N))
cmap_name = base.name + str(N)
return base.from_list(cmap_name, color_list, N)
M = re.findall("\d+\.\d+", os.getcwd().split('/')[-1])[0]
params = {'6.35': (354, 212), '7.35': (980, 240), '8.45': (5116, 220)}
nx, ny = 6320, 4200
nxf, nzf = params[M]
dx = 0.1
with open('cities_name.txt', 'r') as f_name:
cities_name = f_name.readlines()
cities_idx = dx * np.loadtxt('cities.idx', dtype='int')
vx = np.fromfile('./peak_velocity_H_01.0Hz.bin', dtype='float32').reshape((ny, nx))
vy = np.fromfile('./peak_velocity_Z_01.0Hz.bin', dtype='float32').reshape((ny, nx))
vx = np.flip(vx, 0)
vy = np.flip(vy, 0)
trace = np.fromfile('./fault_idx.bin', dtype='int32').reshape((nzf, nxf, 2))
fig, ax = plt.subplots(figsize=(6,6))
c = ax.imshow(vx, extent=[0, nx * dx, 0, ny * dx], cmap=discrete_cmap(20, 'RdBu_r'),
norm=LogNorm(vmin=0.01, vmax=3))
# c2 = ax[1].imshow(vy, extent=[0, nx * dx, 0, ny * dx], cmap='RdBu_r', vmax=10)
ax.scatter(trace[0, :, 0] * dx, trace[0, :, 1] * dx, 50, 'g', marker='*')
cb = plt.colorbar(c, ax=ax, format='%.2f', label='PGV (m/s)', orientation='horizontal')
ax.scatter(cities_idx[:, 0], cities_idx[:, 1], 20, 'w', marker='^')
for i in range(len(cities_name)):
ax.text(cities_idx[i, 0] - 45, cities_idx[i, 1] - 20, cities_name[i].strip('\n'), color='b')
plt.tight_layout()
ax.set_xlabel('X (km)')
ax.set_ylabel('Y (km)')
fig.savefig(f"PGV_with_trace_{M}.png", dpi=600, bbox_inches='tight', pad_inches=0.05)
| [
"[email protected]"
]
| |
18f2373d574ce875cbf52e046f87fe869a7d1f80 | ea1d88d99e854ceb7f5620bd371349acb51f6607 | /SimulationFramework/ClassFiles/genesisBeamFile.py | 4a88c143b9af2e2863297c6d9ff314de21f1aeef | []
| no_license | VELA-CLARA-software/SimFramed | 7dee4efc86531662495eed1bf2a3e9ec8287b640 | b647590f8cb87ea2bffed6733012b6e9141032bb | refs/heads/master | 2022-12-07T05:13:46.486342 | 2020-08-10T13:52:13 | 2020-08-10T13:52:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,394 | py | import os, errno, sys, socket
import glob
import numpy as np
import random
from scipy.constants import c
sys.path.append(os.path.abspath(__file__+'../../../../../OCELOT/'))
sys.path.append(os.path.abspath(__file__+'../../../'))
# print sys.path
from ocelot.S2E_STFC import FEL_simulation_block
from ocelot.adaptors.genesis import generate_input, get_genesis_launcher, run_genesis, rematch_edist, edist2beam
from ocelot.gui.genesis_plot import fwhm3
from ocelot.common.math_op import *
from ocelot.cpbd.beam import Twiss
import SimulationFramework.Modules.read_beam_file as rbf
from SimulationFramework.Modules.optimiser import optimiser
opt = optimiser()
from SimulationFramework.Modules.constraints import constraintsClass
import matplotlib.pyplot as plt
import time
import csv
import multiprocessing
from scoop import futures
from deap import base, creator, tools, algorithms
from copy import copy
import SimulationFramework.ClassFiles.runElegant as runEle
from shutil import copyfile
import argparse
# from genesis_launcher_client import genesis_client
# gclient = genesis_client()
parser = argparse.ArgumentParser(description='Run Elegant + Genesis')
parser.add_argument('-g', '--gaussian', default=False)
parser.add_argument('-s', '--set', default=False)
parser.add_argument('-v', '--vbc', default=False)
parser.add_argument('-p', '--postinjector', default=True)
class FEL_sim(FEL_simulation_block.FEL_simulation_block):
def __init__(self, initial_data, alphax=-0.189, betax=3.76, alphay=0, betay=1.44, **kwargs):
print ('initial_data = ', (initial_data))
super(FEL_sim,self).__init__((initial_data),**kwargs)
self.alphax = alphax
self.betax = betax
self.alphay = alphay
self.betay = betay
def convert_HDF5_edist(self, inp, filename='CLA-FMS-APER-02.hdf5', center=True):
from ocelot.adaptors.genesis import GenesisElectronDist
inp_out = inp
beam = rbf.beam()
beam.read_HDF5_beam_file(filename)
# print 'beta_x = ', beam.beta_x, ' alpha_x = ', beam.alpha_x
# print 'beta_y = ', beam.beta_y, ' alpha_y = ', beam.alpha_y
beam.rematchXPlane(beta=self.betax, alpha=self.alphax)
beam.rematchYPlane(beta=self.betay, alpha=self.alphay)
# print 'beta_y = ', beam.beta_y, ' alpha_y = ', beam.alpha_y
# print 'beta_x = ', beam.beta_x, ' alpha_x = ', beam.alpha_x
edist = GenesisElectronDist()
edist.x = beam.x
edist.y = beam.y
# edist.t = -(adist[:, 2] - np.mean(adist[:, 2])) / speed_of_light # long position normalized to 0 and converted to time #HMCC add -
edist.t = beam.t
edist.t = edist.t - np.mean(edist.t)# - 1.80e-13
edist.xp = beam.xp
edist.yp = beam.yp
edist.cp = beam.cp
edist.g = beam.gamma - np.mean(beam.gamma) + 1000/0.511
edist.part_charge = abs(self.startcharge)*1e-12 / len(beam.x)
print ('self.startcharge = ', self.startcharge)
self.bunch_length = np.std(beam.t)
if center:
edist.x -= np.mean(edist.x)
edist.y -= np.mean(edist.y)
edist.xp -= np.mean(edist.xp)
edist.yp -= np.mean(edist.yp)
setattr(edist,'filePath',getattr(self,'file_pout')+'read_edist_astra')
setattr(inp_out,'edist',edist)
return inp_out, beam
def GEN_simul_preproc(self, A_input, i_aft=0, dirno=1, startcharge=250):
self.startcharge = startcharge
if not self.file_pout.endswith('/'):
self.file_pout=self.file_pout+'/'
#print('\n\n\nis it working?\n\n')
inp_arr = []
A_bbeam = ['rxbeam','rybeam','emitx','emity','alphax','alphay','xbeam','ybeam','pxbeam','pybeam']
A_simul = ['alignradf','npart','ncar','zsep','delz','dmpfld','fbess0','dgrid','rmax0','xkx','xky','iwityp',
'nptr','lbc','zrayl','zstop','zwaist','delgam','xlamd','nscr','nscz','curpeak',
'iphsty','nharm','curlen','nbins','gamma0','isravg','isrsig','eloss','version',
'multconv','imagl','convharm','idril','ffspec','ndcut','ibfield','nslice','ntail',
'ippart','ispart','ipradi','isradi']
A_td = ['itdp','prad0','shotnoise']
A_und = ['quadd', 'quadf','fl','dl','drl','nsec','nwig','aw0', 'awd']
# print('++++ Output Path {0} ++++++'.format(self.file_pout))
# Setting the number of noise realisations and scan (over quads or different parameters)
if (self.i_scan ==0):
s_scan = range(1)
num = self.stat_run
run_ids = range(0,num)
# print('++++++++ No scan ++++++++++')
elif (self.i_scan !=0):
if (self.parameter in A_und):
run_ids= range(1)
if self.parameter !='aw0':
s_scan = range(int(self.init),int(self.end),int(np.ceil((self.end-self.init)/(self.n_scan))))
else:
s_scan = np.linspace(self.init,self.end,self.n_scan)
# print('++++ Quad scan, parameter {0} ++++++'.format(self.parameter))
elif (self.parameter=='xlamds'):
run_ids= range(1)
s_scan = np.linspace(self.init,self.end,self.n_scan)
# print('++++ Quad scan, parameter {0} ++++++'.format(self.parameter))
else:
s_scan = np.linspace(self.init,self.end,self.n_scan)
num = self.stat_run
run_ids = range(0,num)
# print('++++ Number of noise realisations {0} ++++++'.format(num))
# setting the undulator design( Magnetic Lattice)
A_undulator = self.undulator_design(A_input)
# Fill in the beam object
A_beam = self.BeamDefinition(A_input)
if (getattr(A_input,'itdp')==0):
# print('++++ Steady State run +++++')
i_tdp = False
elif (getattr(A_input,'itdp')==1):
i_tdp = True
# Generate input object
inp = generate_input(A_undulator['Undulator Parameters'],A_beam,itdp=i_tdp)
# Overwrite the simulation attributes of the input object with the ones defined in the input file
for key in A_input.__dict__:
if (key in A_simul) or (key in A_und) or (key in A_td) or (key =='xlamds') or (key == 'f1st') or (key == 'nslice') or (key == 'ntail'):
setattr(inp,key, getattr(A_input,key))
for key in ['edist','beam','dfl']:
if getattr(A_input,key)!=None:
setattr(inp,key,getattr(A_input,key))
# Set up some input parameters
if getattr(inp,'itdp')==0:
setattr(inp,'type','steady')
else:
setattr(inp,'type','tdp')
setattr(inp, 'awd', float(getattr(inp, 'aw0')))
# idump attribute
if (getattr(self,'idump')) == 1:
setattr(inp,'idump',1)
setattr(inp,'idmpfld',1)
# Existent dist or beam file (if exists)
if (getattr(self,'i_edist') == 1) and (hasattr(self,'file_edist')):
inp=self.GEN_existent_beam_dist_dpa_rad(inp,'edist')
elif (getattr(self,'i_beam') == 1) and (hasattr(self,'file_beam')):
#print inp.__dict__.keys()
inp=self.GEN_existent_beam_dist_dpa_rad(inp,'beam')
#print inp.__dict__.keys()
elif (getattr(self,'i_rad') == 1) and (hasattr(self,'file_rad')):
inp=self.GEN_existent_beam_dist_dpa_rad(inp,'rad')
elif (getattr(self,'i_dpa') == 1) and (hasattr(self,'file_dpa')):
inp=self.GEN_existent_beam_dist_dpa_rad(inp,'dpa')
elif (getattr(self,'i_dfl') == 1) and (hasattr(self,'file_dfl')):
inp=self.GEN_existent_beam_dist_dpa_rad(inp,'dfl')
else:
pass
# print('++++ No edist or beam or dpa or rad file available ++++++')
# print inp.beam
# Read HDF5 file.
if hasattr(self,'i_HDF5') and getattr(self,'i_HDF5')==1 and hasattr(self,'HDF5_file'):
inp, beam = self.convert_HDF5_edist(inp, getattr(self,'HDF5_file'))
setattr(inp,'beam',None)
gamma = np.median(inp.edist.g)
# setattr(inp,'xlamds',float(inp.xlamd*(1.0+np.square(inp.aw0))/(2.0*np.square(gamma))))
# setattr(inp,'xlamds', float(0.022058051560136/(gamma**2))) ## DJD 15/02/2019 - for 250MeV/c
# setattr(inp,'xlamds', float(0.01685042566473/(gamma**2))) ## JKJ 04/06/2019 - for 1000MeV/c
setattr(inp,'xlamds', 4.4E-09) ## JKJ 18/06/2019 - Fixed for 1GeV/c
print( 'aw0 = ', inp.aw0)
print( 'awd = ', inp.awd)
print( 'xlamds = ', inp.xlamds)
elif (hasattr(self,'i_HDF5') and getattr(self,'i_HDF5')==1) and not (hasattr(self,'HDF5_file')):
# print('Path of the HDF5 file not provided')
return
else:
pass
# print('No need to HDF5 ASTRA file')
# if (getattr(self,'i_edist')==1) or (getattr(inp,'edist')!=None) or (getattr(inp,'beam')!=None) :
# setattr(inp,'ntail',0)
# else:
# if (getattr(self,'i_edist')==0) and getattr(A_input,'ntail')!=0 :
# setattr(inp,'ntail',int(getattr(A_input,'ntail')))
# else:
# setattr(inp,'ntail',-int(np.floor(getattr(inp,'nslice')/2)))
# Overwrite the simulation attributes if the user has new values for them defined in the input data structure
if (hasattr(self, 'i_rewrite')) and (hasattr(self, 'par_rew')) and (getattr(self, 'i_rewrite') == 1):
inp = self.GEN_rewrite_par(inp)
else:
pass
# Running over noise realisations and/or scan parameters
for n_par in s_scan:
for run_id in run_ids:
inp.runid = run_id
inp.lout = [1,1,1,1,1,0,1,1,1,1,1,1,1,1,0,0,0,0,0]
inp.run_dir = getattr(self,'file_pout')
try:
os.makedirs(inp.run_dir)
except:
pass
if self.i_scan==1 and inp.f1st==1:
inp= self.GEN_scan(n_par ,A_input,A_undulator,inp)
elif self.i_scan==0 and inp.f1st==1:
inp.lat = A_undulator['Magnetic Lattice']
setattr(inp,'magin',1)
else:
inp.lat =None
setattr(inp,'magin',0)
# DJD 16/10/18 temporary hack to randomise seed
ipseed = 56#np.random.randint(9999)
setattr(inp,'ipseed', ipseed)
inp_arr.append(inp)
launcher=get_genesis_launcher(self.gen_launch)
g = run_genesis(inp,launcher,i_aft=i_aft)
setattr(g,'filePath',str(inp.run_dir))
# g.Lsat = g.z[np.argmax(g.bunching[np.argmax(g.p_int[:,-1])])]
g.bunch_length = self.bunch_length
g.Lsat = g.z[np.argmax(g.bunching[np.argmax(g.I)])]
g.momentum = inp.edist.cp
# print( 'g.momentum = ', g.momentum)
return g
########################################################
def find_saturation(power, z, n_smooth=20):
p = np.diff(np.log10(power))
u = np.convolve(p, np.ones(n_smooth) / float(n_smooth), mode='same')
um = np.max(u)
ii = 0
for i in range(len(u)):
if u[i] < 0.1 * um and z[i] > 5:
ii = i
print( 'break! i = ', ii)
break
#plt.plot(g.z[1:], u, lw=3)
#plt.plot(g.z[ii+1], p[ii], 'rd')
#plt.plot(g.z, power, lw=3)
#plt.plot(z[ii+1], np.log10(power[ii]), 'rd')
return z[ii+1], ii+1
def evalBeamWithGenesis(dir, run=True, startcharge=250, genesis_file='NEW_SHORT_NOM_TD_v7.in', beam_file='CLA-FMS-APER-02.hdf5', ncpu=22):
# Genesis run:
data={'gen_file': genesis_file,
'file_pout': dir,
'file_beam':'beamfile.txt',
'i_scan':0,
'gen_launch':'',
'stat_run':1,
'idump':0,
'i_edist':0,
'i_beam':0,
'i_rad':0,
'i_dpa':0,
'i_dfl':0,
'i_HDF5':1,
'HDF5_file': dir+'/'+beam_file,
'i_match': 0}
if run:
hostname = socket.gethostname()
if 'apclara1' in hostname:
data['gen_launch'] = '/opt/OpenMPI-3.1.3/bin/mpiexec --timeout 600 -np 12 /opt/Genesis/bin/genesis2 < tmp.cmd 2>&1 > /dev/null'
elif 'apclara3' in hostname:
data['gen_launch'] = 'source /opt/MPI/mpich2.sh && MPIEXEC_TIMEOUT=1500 /usr/lib64/mpich/bin/mpiexec -np '+str(ncpu)+' /opt/Genesis/bin/genesis2mpich < tmp.cmd 2>&1 > /dev/null'
# data['gen_launch'] = '/opt/Genesis/bin/genesis2 < tmp.cmd 2>&1 > /dev/null'
else:
data['gen_launch'] = ''
f = FEL_sim(data)
A_inp = f.read_GEN_input_file()
g = f.GEN_simul_preproc(A_inp, dirno=dir, startcharge=startcharge)
p = map(np.mean, zip(*g.p_int))
g.Lsat, g.Lsatindex = find_saturation(p, g.z)
# this section is copied from genesis_plot.py - to calculate bandwidth
spectrum_lamdwidth_fwhm = np.zeros_like(g.z)
spectrum_lamdwidth_std = np.zeros_like(g.z)
for zz in range(g.nZ):
spectrum_lamdwidth_fwhm[zz] = None
spectrum_lamdwidth_std[zz] = None
if np.sum(g.spec[:,zz])!=0:
pos, width, arr = fwhm3(g.spec[:, zz])
if width != None:
if arr[0] == arr[-1]:
dlambda = abs(g.freq_lamd[pos] - g.freq_lamd[pos-1])
else:
dlambda = abs( (g.freq_lamd[arr[0]] - g.freq_lamd[arr[-1]]) / (arr[0] - arr[-1]) )
spectrum_lamdwidth_fwhm[zz] = dlambda * width / g.freq_lamd[pos]
# spectrum_lamdwidth_fwhm[zz] = abs(g.freq_lamd[arr[0]] - g.freq_lamd[arr[-1]]) / g.freq_lamd[pos] # the FWHM of spectral line (error when peakpos is at the edge of lamdscale)
spectrum_lamdwidth_std[zz] = std_moment(g.freq_lamd, g.spec[:, zz]) / n_moment(g.freq_lamd, g.spec[:, zz], 0, 1)
######### end of section copied from genesis_plot.py
brightness = g.energy / spectrum_lamdwidth_std
for i in range(len(g.z)):
if g.z[i] < 5:
brightness[i] = 0
max = np.argmax(brightness)
max = len([l for l in g.z if l <= 12.5])
maxe = np.argmax(g.energy)
g.spectrum_lamdwidth_std = spectrum_lamdwidth_std
return g.energy[max], g.spectrum_lamdwidth_std[max], g.z[max], g.energy[maxe], g.spectrum_lamdwidth_std[maxe], g.z[maxe], g.bunch_length, g
csv_out = ''
def saveState(args, id, *values):
global csv_out
args=list(args)
for v in values:
args.append(v)
args.append(id)
csv_out.writerow(args)
# csv_out.flush()
class genesisSimulation(runEle.fitnessFunc):
parameter_names = []
def __init__(self):
super(genesisSimulation, self).__init__()
self.lattice = 'Lattices/clara400_v12_v3_elegant_jkj.def'
self.optdir = None
self.runElegant = True
self.runGenesis = True
self.savestate = False
self.startcharge = 250
self.genesis_file = 'NEW_SHORT_NOM_TD_v7.in'
self.beam_file = 'CLA-FMS-APER-02.hdf5'
self.delete_most_output = False
def optfunc(self, inputargs, **kwargs):
if not self.post_injector:
parameternames = self.injector_parameter_names + self.parameter_names
else:
parameternames = copy(self.parameter_names)
self.inputlist = map(lambda a: a[0]+[a[1]], zip(parameternames, inputargs))
if dir in kwargs.keys() and kwargs['dir'] is not None:
tmpdir = kwargs['dir']
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
self.tmpdir = os.path.relpath(tmpdir)
return self.run_simulation(self.inputlist, self.tmpdir)
elif self.optdir is not None:
tmpdir = self.optdir
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
self.tmpdir = os.path.relpath(tmpdir)
return self.run_simulation(self.inputlist, self.tmpdir)
else:
if 'subdir' in kwargs:
self.subdir = kwargs['subdir']
# del kwargs['subdir']
else:
self.subdir = ''
print( 'subdir = ', subdir)
with runEle.TemporaryDirectory(dir=os.getcwd()+'/'+subdir+'/') as tmpdir:
self.tmpdir = os.path.relpath(tmpdir)
return self.run_simulation(self.inputlist, self.tmpdir)
def delete_files(self, glob_pattern):
# get a recursive list of file paths that matches pattern including sub directories
fileList = glob.glob(glob_pattern)
# Iterate over the list of filepaths & remove each file.
for filePath in fileList:
try:
os.remove(filePath)
except OSError:
print("Error while deleting file")
def run_simulation(self, inputargs, dir):
sys.stdout = open(dir+'/'+'std.out', 'w', buffering=1)
sys.stderr = open(dir+'/'+'std.err', 'w', buffering=1)
# print('self.start_lattice = ', self.start_lattice)
if self.runElegant:
self.setup_lattice(inputargs, dir)
self.before_tracking()
fitvalue = self.track()
e, b, l, ee, be, le, bunchlength, g = evalBeamWithGenesis(dir, run=self.runGenesis, startcharge=self.startcharge, genesis_file=self.genesis_file, beam_file=self.beam_file, ncpu=self.genesis_ncpu)
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
if self.verbose:
print( 'bandwidth = ', 1e2*b, ' pulse energy =', 1e6*e, ' Sat. Length =', l)
print( 'bandwidth E = ', 1e2*be, ' max pulse energy =', 1e6*ee, ' Sat. Length E =', le)
if self.delete_most_output:
self.delete_files(dir+'/'+'*.sdds')
self.delete_files(dir+'/'+'*.SDDS')
self.delete_files(dir+'/'+'CLA-S02*.hdf5')
self.delete_files(dir+'/'+'CLA-S03*.hdf5')
self.delete_files(dir+'/'+'CLA-S04*.hdf5')
self.delete_files(dir+'/'+'CLA-S05*.hdf5')
self.delete_files(dir+'/'+'CLA-S06*.hdf5')
self.delete_files(dir+'/'+'CLA-VBC*.hdf5')
self.delete_files(dir+'/'+'*.edist')
if self.savestate:
try:
saveState(inputargs, idNumber, e, b, l, ee, be, le, bunchlength)
except:
pass
# except Exception as error:
# print 'genesisBeamFile.run_simulation Error! ', error
# e, b, ee, be, l, g = [0, 1, 0, 1, 0, {}]
return 1e4*e, 1e2*b, 1e4*ee, 1e2*be, l, g
if __name__ == "__main__":
args = parser.parse_args()
injector_startingvalues = [-8.906156010951616,0.3420474160090586,2.0515744815221354e7,-16.281405933324855,0.05036027437405955,-0.0502414403704962]
startingvalues = best = [2.018490050471744e7,-23.04340196585034,2.934266187158792e7,
-1.7771024303105327,1.7144513765057914e7,167.20031122662812,3.185636245553371e7,41.97162063476029,-0.14363986757360986, 1]
if args.postinjector == False or args.postinjector == 'False' or args.postinjector == 0:
best = injector_startingvalues + startingvalues
POST_INJECTOR = False
else:
best = startingvalues
POST_INJECTOR = True
print( 'Post Injector = ', POST_INJECTOR, ' [', args.postinjector,']')
global_best = 0
from SimulationFramework.Framework import Framework
framework = Framework('longitudinal_best', overwrite=False)
framework.loadSettings('Lattices/clara400_v12_v3_elegant_jkj.def')
parameters = []
parameters.append(framework.getElement('CLA-L02-CAV', 'field_amplitude'))
parameters.append(framework.getElement('CLA-L02-CAV', 'phase'))
parameters.append(framework.getElement('CLA-L03-CAV', 'field_amplitude'))
parameters.append(framework.getElement('CLA-L03-CAV', 'phase'))
parameters.append(framework.getElement('CLA-L4H-CAV', 'field_amplitude'))
parameters.append(framework.getElement('CLA-L4H-CAV', 'phase'))
parameters.append(framework.getElement('CLA-L04-CAV', 'field_amplitude'))
parameters.append(framework.getElement('CLA-L04-CAV', 'phase'))
parameters.append(framework['bunch_compressor']['angle'])
def gaussian_beam_test():
## This is for the Gaussian Beam Test!
master_subdir = 'gaussianBeam'
beam = rbf.beam()
elegantbeamfilename = 'CLA-FMS-APER-02.sdds'
beam.read_SDDS_beam_file(master_subdir + '/' + elegantbeamfilename, charge=250e-12)
beam.beam['total_charge'] = 250e-12
HDF5filename = elegantbeamfilename.replace('.sdds','.hdf5').replace('.SDDS','.hdf5').strip('\"')
beam.write_HDF5_beam_file(master_subdir + '/' + HDF5filename, centered=False, sourcefilename=elegantbeamfilename)
optfunc(best, dir=os.path.abspath('gaussianBeam'), scaling=6, post_injector=POST_INJECTOR, verbose=True, savestate=False, runGenesis=True, runElegant=False)
# 1351 == 50uJ
set253 = [30000000.0, -23, 27000000.0, -8, 24000000.0, 184, 32000000.0, 45, -0.1185, 0]
def npart_test():
## This is for the set Beam Test!
for n in [253]:
best = eval('set'+str(n))
print( n,' = ', best)
optfunc(best, dir='test/set'+str(n)+'_32k', scaling=5, post_injector=True, verbose=True, savestate=False)
# optfunc(best, dir=os.path.abspath('set'+str(n)+'_262k'), scaling=6, post_injector=True, verbose=True, savestate=False)
def scan_vbc_angle():
for vbc in np.arange(startingvalues[-1]-0.003, startingvalues[-1]+0.003,0.001):
newbest = best
newbest[-1] = vbc
print( 'VBC angle = ', vbc)
optfunc(newbest, dir='de/vbc_angle_'+str(vbc)+'_32k', scaling=5, post_injector=True, verbose=True, savestate=False)
if args.gaussian or args.gaussian == 'True' or args.gaussian > 0:
print( 'Gaussian Beam')
gaussian_beam_test()
elif args.set or args.set == 'True' or args.set > 0:
print( 'Set Test')
npart_test()
elif args.vbc or args.vbc == 'True' or args.vbc > 0:
print( 'VBC Scan')
scan_vbc_angle()
exit()
| [
"[email protected]"
]
| |
60b05258a7af383e9669549c72215b9efe7aec78 | aaa762ce46fa0347cdff67464f56678ea932066d | /AppServer/lib/django-1.5/tests/regressiontests/model_formsets_regress/models.py | f94ad51929420a9df6641b04d954802d88f6f8ba | [
"BSD-3-Clause",
"LGPL-2.1-or-later",
"Apache-2.0",
"MIT",
"GPL-2.0-or-later",
"MPL-1.1"
]
| permissive | obino/appscale | 3c8a9d8b45a6c889f7f44ef307a627c9a79794f8 | be17e5f658d7b42b5aa7eeb7a5ddd4962f3ea82f | refs/heads/master | 2022-10-01T05:23:00.836840 | 2019-10-15T18:19:38 | 2019-10-15T18:19:38 | 16,622,826 | 1 | 0 | Apache-2.0 | 2022-09-23T22:56:17 | 2014-02-07T18:04:12 | Python | UTF-8 | Python | false | false | 829 | py | from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class User(models.Model):
username = models.CharField(max_length=12, unique=True)
serial = models.IntegerField()
class UserSite(models.Model):
user = models.ForeignKey(User, to_field="username")
data = models.IntegerField()
class Place(models.Model):
name = models.CharField(max_length=50)
class Restaurant(Place):
pass
class Manager(models.Model):
retaurant = models.ForeignKey(Restaurant)
name = models.CharField(max_length=50)
class Network(models.Model):
name = models.CharField(max_length=15)
@python_2_unicode_compatible
class Host(models.Model):
network = models.ForeignKey(Network)
hostname = models.CharField(max_length=25)
def __str__(self):
return self.hostname
| [
"[email protected]"
]
| |
7a3116c0966d70d7ce49d8c5048be8540ad7cea9 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit_Class2098.py | 6d4cc286b7cbe8fb8b857dd6c11383572739cef7 | [
"BSD-3-Clause"
]
| permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,072 | py | # qubit number=4
# total number=34
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=20
prog.cz(input_qubit[0],input_qubit[3]) # number=21
prog.h(input_qubit[3]) # number=22
prog.cx(input_qubit[0],input_qubit[3]) # number=31
prog.x(input_qubit[3]) # number=32
prog.cx(input_qubit[0],input_qubit[3]) # number=33
prog.h(input_qubit[3]) # number=23
prog.cz(input_qubit[0],input_qubit[3]) # number=24
prog.h(input_qubit[3]) # number=25
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
prog.y(input_qubit[2]) # number=18
prog.z(input_qubit[3]) # number=28
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.cx(input_qubit[2],input_qubit[0]) # number=10
prog.h(input_qubit[1]) # number=19
prog.h(input_qubit[0]) # number=15
prog.cz(input_qubit[2],input_qubit[0]) # number=16
prog.h(input_qubit[0]) # number=17
prog.y(input_qubit[1]) # number=26
prog.y(input_qubit[1]) # number=27
prog.swap(input_qubit[1],input_qubit[0]) # number=29
prog.swap(input_qubit[1],input_qubit[0]) # number=30
# circuit end
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('statevector_simulator')
sample_shot =8000
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_Class2098.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"[email protected]"
]
| |
b8ee7d84333a2089a26b2d3ee0588cefa285e7f4 | 5989e503a733e8b29f4c502008446a75c2b43ff8 | /src/geofr/forms/forms.py | 3c0372717e90babdab4621efae6ea4bf114d1570 | []
| no_license | samuelpath/aides-territoires | 399a6a7b0607ef5a8d2b327247446b239f5b1a42 | 5793bd49d7157a34e08c29e56a46e1e3ead0651f | refs/heads/master | 2022-12-20T14:35:18.671563 | 2020-08-21T08:00:33 | 2020-08-21T08:00:33 | 288,424,578 | 0 | 0 | null | 2020-08-18T10:27:17 | 2020-08-18T10:27:16 | null | UTF-8 | Python | false | false | 1,069 | py | from django import forms
from django.contrib import admin
from django.contrib.admin.widgets import AutocompleteSelectMultiple
from django.utils.translation import ugettext_lazy as _
# from core.forms import AutocompleteSelectMultiple
from geofr.models import Perimeter
class PerimeterUploadForm(forms.Form):
city_list = forms.FileField(
label=_('City list'),
required=True)
class PerimeterCombineForm(forms.Form):
add_perimeters = forms.ModelMultipleChoiceField(
label=_('Perimeters to add'),
queryset=Perimeter.objects.all(),
widget=AutocompleteSelectMultiple(Perimeter._meta, admin.AdminSite()),
help_text=_('Select a list of perimeters to combines'))
rm_perimeters = forms.ModelMultipleChoiceField(
label=_('Perimeters to substract'),
required=False,
queryset=Perimeter.objects.all(),
widget=AutocompleteSelectMultiple(Perimeter._meta, admin.AdminSite()),
help_text=_('Those perimeters will be substracted from the '
'combined perimeters'))
| [
"[email protected]"
]
| |
5f0bfc67ef67c011e059a43bcfd8dc2ead2c0091 | 6d233ad2059a941e4ce4c5b5ee3857b8a4a0d212 | /Everyday_alg/2022/2022-01/01/convert-1d-array-into-2d-array.py | 8ece2df0c75c81071c9834f30b59c191f075b716 | []
| no_license | Alexanderklau/Algorithm | 7c38af7debbe850dfc7b99cdadbf0f8f89141fc6 | eac05f637a55bfcc342fa9fc4af4e2dd4156ea43 | refs/heads/master | 2022-06-12T21:07:23.635224 | 2022-06-12T08:12:07 | 2022-06-12T08:12:07 | 83,501,915 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,474 | py | # coding: utf-8
__author__ = 'Yemilice_lau'
"""
给你一个下标从 0 开始的一维整数数组 original 和两个整数 m 和 n 。
你需要使用 original 中 所有 元素创建一个 m 行 n 列的二维数组。
original 中下标从 0 到 n - 1 (都 包含 )的元素构成二维数组的第一行,
下标从 n 到 2 * n - 1 (都 包含 )的元素构成二维数组的第二行,依此类推。
请你根据上述过程返回一个 m x n 的二维数组。如果无法构成这样的二维数组,请你返回一个空的二维数组。
输入:original = [1,2,3,4], m = 2, n = 2
输出:[[1,2],[3,4]]
解释:
构造出的二维数组应该包含 2 行 2 列。
original 中第一个 n=2 的部分为 [1,2] ,构成二维数组的第一行。
original 中第二个 n=2 的部分为 [3,4] ,构成二维数组的第二行。
输入:original = [3], m = 1, n = 2
输出:[]
解释:
original 中只有 1 个元素。
无法将 1 个元素放满一个 1x2 的二维数组,所以返回一个空的二维数组。
"""
class Solution(object):
def construct2DArray(self, original, m, n):
"""
:type original: List[int]
:type m: int
:type n: int
:rtype: List[List[int]]
"""
if len(original) != (m * n):
return []
res = []
for i in range(0, len(original), n):
res.append(original[i:i+n])
return res
| [
"[email protected]"
]
| |
a326515df2fc5497a6a153866ca631b50686fee8 | 64188fd40699e5f542c12190f0892b082d5d643b | /src/problem_63/solution.py | 31314c0f53cc7a557a6306dacf386548b0d7dd4f | []
| no_license | tyronedamasceno/Daily-Coding-Problem | 9b7cd7a0f19762854986f5ab858a110353e81410 | a34bebbe635c4a7c8fb7400ea11fd03e4c2dea4b | refs/heads/master | 2020-04-30T00:51:18.020367 | 2019-06-11T18:47:11 | 2019-06-11T18:47:11 | 176,513,062 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,388 | py | """
Given a 2D matrix of characters and a target word, write a function that
returns whether the word can be found in the matrix by going left-to-right,
or up-to-down.
For example, given the following matrix:
[['F', 'A', 'C', 'I'],
['O', 'B', 'Q', 'P'],
['A', 'N', 'O', 'B'],
['M', 'A', 'S', 'S']]
and the target word 'FOAM', you should return true, since it's the leftmost
column. Similarly, given the target word 'MASS', you should return true,
since it's the last row.
"""
import unittest
from copy import deepcopy
def solve(matrix, word):
transp = deepcopy(matrix)
for i in range(len(matrix)):
for j in range(len(matrix)):
transp[i][j] = matrix[j][i]
for line in matrix:
if word in ''.join(line):
return True
for line in transp:
if word in ''.join(line):
return True
return False
class Tests(unittest.TestCase):
def test_example1(self):
matrix = [
['F', 'A', 'C', 'I'],
['O', 'B', 'Q', 'P'],
['A', 'N', 'O', 'B'],
['M', 'A', 'S', 'S']
]
self.assertTrue(solve(matrix, 'FOAM'))
def test_example2(self):
matrix = [
['F', 'A', 'C', 'I'],
['O', 'B', 'Q', 'P'],
['A', 'N', 'O', 'B'],
['M', 'A', 'S', 'S']
]
self.assertTrue(solve(matrix, 'MASS'))
| [
"[email protected]"
]
| |
21fe9fffee018e8624644005eda4caea17cd4fd1 | 471b8043bedd60f73b532b5bd23aa19a32cb7d44 | /where/models/site/nt_atm_loading.py | 9637c4a6106b723c07da794f4b2e619a8b47a87a | [
"MIT"
]
| permissive | gahjelle/where | 8be56137001eed20c55413549bbf43a5978b918c | f7d025c4fb36adb3e511c3a61b244263bbfd000d | refs/heads/master | 2021-06-04T05:02:16.596468 | 2020-06-24T08:52:27 | 2020-06-24T08:52:27 | 136,427,535 | 0 | 0 | MIT | 2018-06-07T05:45:24 | 2018-06-07T05:45:23 | null | UTF-8 | Python | false | false | 2,208 | py | """Apply non tidal atmospheric loading displacements
Description:
------------
"""
# External library imports
import numpy as np
# Midgard imports
from midgard.dev import plugins
# Where imports
from where import apriori
from where.data import position
@plugins.register
def non_tidal_atmospheric_loading(dset):
"""Apply non tidal atmospheric loading displacements at all stations.
Corrections are returned in meters in the Geocentric Celestial Reference System for each
observation. A Numpy array with 6 columns is returned, the first three columns are \f$ x, y, z \f$ for station 1,
while the last three columns are \f$ x, y, z \f$ for station 2.
Args:
dset: A Dataset containing model data.
Returns:
Numpy array: GCRS corrections in meters.
"""
ntapl = apriori.get("non_tidal_atmospheric_loading", time=dset.time)
data_out = list()
for _ in dset.for_each_suffix("station"):
data_out.append(non_tidal_atmospheric_loading_station(ntapl, dset))
return data_out
def non_tidal_atmospheric_loading_station(ntapl, dset):
"""Apply non tidal atmospheric loading displacements for a station field.
Corrections are returned in meters in the Geocentric
Celestial Reference System for each observation.
Args:
dset: A Dataset containing model data
Returns:
Numpy array: GCRS corrections in meters.
"""
lat, lon, _ = dset.site_pos.pos.llh.T
dup = ntapl["up"](dset.time, lon, lat)
deast = ntapl["east"](dset.time, lon, lat)
dnorth = ntapl["north"](dset.time, lon, lat)
denu = np.stack((deast, dnorth, dup), axis=1)
if position.is_position(dset.site_pos):
pos_correction = position.PositionDelta(denu, system="enu", ref_pos=dset.site_pos, time=dset.time)
elif position.is_posvel(dset.site_pos):
# set velocity to zero
denu = np.concatenate((denu, np.zeros(denu.shape)), axis=1)
pos_correction = position.PosVelDelta(denu, system="enu", ref_pos=dset.site_pos, time=dset.time)
else:
log.fatal(f"dset.site_pos{dset.default_field_suffix} is not a PositionArray or PosVelArray.")
return pos_correction.gcrs
| [
"[email protected]"
]
| |
f84a1959d9738890de81d54ba34b897e6d9da60f | efe05b0ea0a11b50a42b81795b22b89724177180 | /stratlib/sample_SMA_Live.py | b4e6ba3c014d13472410b233eb4a030b598b6027 | [
"Apache-2.0",
"MIT"
]
| permissive | dongtianqi1125/mooquant | b0fc5018f3aaf93ed69d8c249cccde7d8c98b8cb | 244a87d4cd8b4d918eec4f16905e0921c3b39f50 | refs/heads/master | 2023-01-01T00:34:09.722476 | 2018-09-25T18:57:47 | 2018-09-25T18:57:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,161 | py | from mooquant import bar, strategy
from mooquant.barfeed.tusharefeed import Feed
from mooquant.broker.backtesting import TradePercentage
from mooquant.broker.fillstrategy import DefaultStrategy
from mooquant.technical import cross, ma
class thrSMA(strategy.BacktestingStrategy):
def __init__(self, feed, instrument, short_l, mid_l, long_l, up_cum):
strategy.BacktestingStrategy.__init__(self, feed)
self.getBroker().setFillStrategy(DefaultStrategy(None))
self.getBroker().setCommission(TradePercentage(0.001))
self.__position = None
self.__instrument = instrument
self.__prices = feed[instrument].getPriceDataSeries()
self.__malength1 = int(short_l)
self.__malength2 = int(mid_l)
self.__malength3 = int(long_l)
self.__circ = int(up_cum)
self.__ma1 = ma.SMA(self.__prices, self.__malength1)
self.__ma2 = ma.SMA(self.__prices, self.__malength2)
self.__ma3 = ma.SMA(self.__prices, self.__malength3)
def getPrice(self):
return self.__prices
def getSMA(self):
return self.__ma1, self.__ma2, self.__ma3
def onEnterCanceled(self, position):
self.__position = None
def onEnterOK(self):
pass
def onExitOk(self, position):
self.__position = None
def onExitCanceled(self, position):
self.__position.exitMarket()
def buyCon1(self):
if cross.cross_above(self.__ma1, self.__ma2) > 0:
return True
def buyCon2(self):
m1 = 0
m2 = 0
for i in range(self.__circ):
if self.__ma1[-i - 1] > self.__ma3[-i - 1]:
m1 += 1
if self.__ma2[-i - 1] > self.__ma3[-i - 1]:
m2 += 1
if m1 >= self.__circ and m2 >= self.__circ:
return True
def sellCon1(self):
if cross.cross_below(self.__ma1, self.__ma2) > 0:
return True
def onBars(self, bars):
# If a position was not opened, check if we should enter a long
# position.
if self.__ma2[-1] is None:
return
if self.__position is not None:
if not self.__position.exitActive() and cross.cross_below(self.__ma1, self.__ma2) > 0:
self.__position.exitMarket()
if self.__position is None:
if self.buyCon1() and self.buyCon2():
shares = int(self.getBroker().getCash() * 0.2 / bars[self.__instrument].getPrice())
self.__position = self.enterLong(self.__instrument, shares)
print(bars[self.__instrument].getDateTime(),
bars[self.__instrument].getPrice())
# self.info("buy %s" % (bars.getDateTime()))
def runStratOnTushare(strat, paras, security_id, market, frequency):
liveFeed = Feed([security_id], frequency, 1024, 5)
strat = strat(liveFeed, security_id, *paras)
strat.run()
if __name__ == "__main__":
strat = thrSMA
security_id = '600848'
market = 'SH'
frequency = bar.Frequency.MINUTE
paras = [2, 20, 60, 10]
runStratOnTushare(strat, paras, security_id, market, frequency)
| [
"[email protected]"
]
| |
14eefa052f00c116a9fc1c131e9f3b820d88dca3 | 94e06376dc265c7bf1a2e51acb9714d02b21503a | /scrapy项目/lianjia/lianjia/spiders/lianjia.py | d6bd34d3bf933a776b66ff1409338e227c37e0fd | []
| no_license | zhangquanliang/python | 4b2db32bed4e4746c8c49c309563f456dc41c6be | f45ef96e385b1cd6c5dfb53bf81042d953a9ec46 | refs/heads/master | 2021-04-26T23:30:12.217397 | 2019-03-20T06:18:14 | 2019-03-20T06:18:14 | 124,005,916 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,808 | py | # -*- coding: utf-8 -*-
"""
Title = 深圳地区链家二手房
Date = 20180511
"""
import scrapy
import re
class LianjiaSpider(scrapy.Spider):
name = 'lianjia'
allowed_domains = ['sz.lianjia.com']
start_urls = ['https://sz.lianjia.com/ershoufang/']
def parse(self, response):
page_text = response.xpath("//div[@class='page-box house-lst-page-box']").extract_first()
page = re.findall('{"totalPage":(.*?),"curPage":1}', page_text, re.I | re.S)[0]
for i in range(1, int(page)+1):
url = 'https://sz.lianjia.com/ershoufang/pg{}/'.format(i)
yield scrapy.Request(url, callback=self.get_html)
def get_html(self, response):
from ..items import LianjiaItem
item = LianjiaItem()
title_ = response.xpath("//ul[@class='sellListContent']/li/div[@class='info clear']/div[1]/a/text()").extract()
houseIcon__ = response.xpath("//ul[@class='sellListContent']/li/div[@class='info clear']/div[2]/div[1]/a/text()").extract()
houseIcon_ = response.xpath("//ul[@class='sellListContent']/li/div[@class='info clear']/div[2]/div[1]/text()").extract()
positionInfo__ = response.xpath("//ul[@class='sellListContent']/li/div[@class='info clear']/div[3]/div[1]/text()").extract()
positionInfo_ = response.xpath("//ul[@class='sellListContent']/li/div[@class='info clear']/div[3]/div[1]/a/text()").extract()
totalPrice__ = response.xpath("//ul[@class='sellListContent']/li/div[@class='info clear']/div[6]/div[1]/span/text()").extract()
totalPrice_ = response.xpath("//ul[@class='sellListContent']/li/div[@class='info clear']/div[6]/div[1]/text()").extract()
unitPrice_ = response.xpath("//ul[@class='sellListContent']/li/div[@class='info clear']/div[6]/div[2]/span/text()").extract()
tag_ = response.xpath("//ul[@class='sellListContent']/li/div[@class='info clear']/div[1]").extract()
house_url_ = response.xpath("//ul[@class='sellListContent']//li/a/@href").extract()
for i in range(len(house_url_)):
title = title_[i]
houseIcon = houseIcon__[i] + houseIcon_[i]
positionInfo = positionInfo__[i] + positionInfo_[i]
totalPrice = totalPrice__[i] + totalPrice_[i]
unitPrice = unitPrice_[i]
tag = ""
reg = re.findall('<span class=".*?">(.*?)</span>', str(tag_[i]))
for j in range(len(reg)):
tag += reg[j] + '-'
house_url = house_url_[i]
item['title'] = title
item['houseIcon'] = houseIcon
item['positionInfo'] = positionInfo
item['totalPrice'] = totalPrice
item['unitPrice'] = unitPrice
item['tag'] = tag
item['house_url'] = house_url
yield item
| [
"[email protected]"
]
| |
586e4336d107436cf72073569215a306a9734851 | ba7be04fa897785fb9255df3ece0c1ffbead6acc | /part4_project/apps/model/serializers.py | 2f62d0a55a38aefe870cbd68391f031e2e8ee4c9 | []
| no_license | backupalisher/part4_project | e1f402553502d010ffe974ecce73e313f90b8174 | 09ca16e3021aeac609fe6594e5c4f6c72832d112 | refs/heads/master | 2022-12-10T13:22:15.899332 | 2020-09-21T16:22:02 | 2020-09-21T16:22:02 | 233,295,277 | 0 | 0 | null | 2022-12-08T10:47:45 | 2020-01-11T20:49:10 | Python | UTF-8 | Python | false | false | 347 | py | from rest_framework import serializers
from db_model.models import Models
class ModelsCreateSerialize(serializers.ModelSerializer):
class Meta:
model = Models
depth = 1
fields = '__all__'
class ModelsListSerialize(serializers.ModelSerializer):
class Meta:
model = Models
fields = ('id', 'name')
| [
"[email protected]"
]
| |
6d692836bd0bfc346bada188bc4423d0b09d1ba8 | f5b5a6e3f844d849a05ff56c497638e607f940e0 | /capitulo 08/08.20 - Programa 8.9 Validacao de inteiro usando funcao.py | 0f9bb820f9ba6138690f0e16549b43e71a910b59 | []
| no_license | alexrogeriodj/Caixa-Eletronico-em-Python | 9237fa2f7f8fab5f17b7dd008af215fb0aaed29f | 96b5238437c88e89aed7a7b9c34b303e1e7d61e5 | refs/heads/master | 2020-09-06T21:47:36.169855 | 2019-11-09T00:22:14 | 2019-11-09T00:22:14 | 220,563,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 980 | py | ##############################################################################
# Parte do livro Introdução à Programação com Python
# Autor: Nilo Ney Coutinho Menezes
# Editora Novatec (c) 2010-2019
# Primeira edição - Novembro/2010 - ISBN 978-85-7522-250-8
# Segunda edição - Junho/2014 - ISBN 978-85-7522-408-3
# Terceira edição - Janeiro/2019 - ISBN 978-85-7522-718-3
# Site: http://python.nilo.pro.br/
#
# Arquivo: listagem3\capítulo 08\08.20 - Programa 8.9 – Validação de inteiro usando função.py
# Descrição: Programa 8.9 – Validação de inteiro usando função
##############################################################################
# Programa 8.9 – Validação de inteiro usando função
def faixa_int(pergunta, mínimo, máximo):
while True:
v = int(input(pergunta))
if v < mínimo or v > máximo:
print(f"Valor inválido. Digite um valor entre {mínimo} e {máximo}")
else:
return v
| [
"[email protected]"
]
| |
cd45d516fa4909d89c82ab685f869e71e01a120e | 0a6b950b3022dc1afbc1243be572e3dbe5b8b619 | /src/dirbs/api/v1/schemas/__init__.py | c87fca545e3c94cf892eb650a9697be31308d4f3 | []
| no_license | yasirz/DIRBS-Core | dee56bb350b9d511bbdca87afa41bc1f022e534a | ac26dc97c57216dc3c1fed1e1b17aac27d3a1a2d | refs/heads/master | 2023-03-19T01:27:14.390299 | 2021-02-24T11:41:34 | 2021-02-24T11:41:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,219 | py | """
DIRBS REST-ful API-V1 schemas package.
Copyright (c) 2018-2019 Qualcomm Technologies, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the
limitations in the disclaimer below) provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
- Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
- Neither the name of Qualcomm Technologies, Inc. nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
- The origin of this software must not be misrepresented; you must not claim that you wrote the original software.
If you use this software in a product, an acknowledgment is required by displaying the trademark/log as per the
details provided here: https://www.qualcomm.com/documents/dirbs-logo-and-brand-guidelines
- Altered source versions must be plainly marked as such, and must not be misrepresented as being the original
software.
- This notice may not be removed or altered from any source distribution.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY
THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
| [
"[email protected]"
]
| |
246931098fc852c729f302b71d959e6f3abb2e0e | fa6fa9e154a205d575eda6615e8b62f4cce77a3d | /office365/sharepoint/sitedesigns/site_design_creation_info.py | 771a9e417fdf8852a99a0970c90030df1eed3637 | [
"MIT"
]
| permissive | beliaev-maksim/Office365-REST-Python-Client | 7f94b7b40227de1192bfc0cb325107482caf443c | b2fd54701d83cc91eb5ba3a0ec352a93ded24885 | refs/heads/master | 2023-08-14T20:47:51.972883 | 2021-09-05T12:44:47 | 2021-09-05T12:44:47 | 283,984,055 | 0 | 0 | MIT | 2020-07-31T08:30:48 | 2020-07-31T08:30:48 | null | UTF-8 | Python | false | false | 109 | py | from office365.runtime.client_value import ClientValue
class SiteDesignCreationInfo(ClientValue):
pass
| [
"Ajilon80!"
]
| Ajilon80! |
4e4d46192a0364c4540cd99fa5cf08106f7a70ac | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /gift/tasks/all_tasks.py | 54650bf160cc8d90cd98fdb31716b5acf8b42044 | [
"CC-BY-4.0",
"Apache-2.0"
]
| permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 2,120 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mapping of all defined tasks in the project.
task name --> task class.
"""
from gift.tasks import task
ALL_SINGLE_ENV_TASKS = {
'cls': task.ClassificationTask,
}
# TODO(samiraabnar): Refactor the desgin of the task and dataset classes.
ALL_MULTI_ENV_TASKS = {
'multi_env_cls': task.MultiEnvClassificationTask,
'multi_env_irm_cls': task.MultiEnvIRMClassificationTask,
'multi_env_vrex_cls': task.MultiEnvVRexClassificationTask,
}
ALL_MULTI_ENV_WITH_REPS = {
'multi_env_dm_cls':
task.MultiEnvLinearDomainMappingClassification,
'multi_env_nl_dm_cls':
task.MultiEnvNonLinearDomainMappingClassification,
'multi_env_hungarian_dm_cls':
task.MultiEnvHungarianDomainMappingClassification,
'multi_env_identity_dm_cls':
task.MultiEnvIdentityDomainMappingClassification,
'multi_env_sinkhorn_dm_cls':
task.MultiEnvSinkhornDomainMappingClassification,
}
ALL_MULTI_ENV_DOMAIN_ADVERSARIALS = {
'multi_env_dann_cls': task.MultiEnvDannClassification
}
ALL_TASKS = {}
ALL_TASKS.update(ALL_SINGLE_ENV_TASKS)
ALL_TASKS.update(ALL_MULTI_ENV_TASKS)
ALL_TASKS.update(ALL_MULTI_ENV_WITH_REPS)
ALL_TASKS.update(ALL_MULTI_ENV_DOMAIN_ADVERSARIALS)
def get_task_class(task_name):
"""Maps dataset name to a dataset_builder.
Args:
task_name: string; Name of the task.
Returns:
A dataset builder.
"""
if task_name not in ALL_TASKS.keys():
raise ValueError('Unrecognized task: {}'.format(task_name))
return ALL_TASKS[task_name]
| [
"[email protected]"
]
| |
2efe1894bcc709d14a8cc830715ba99376d18104 | 5b5ed7ca898a3c236f3ec5101e5610f2ddd6d871 | /api/__init__.py | 8b23f6d71e8be98e6d9fd5fa449f3a69ff577144 | []
| no_license | Y4phets/restraunt_menu | 5ee05ed53fb453241517e2c6d8feda03906cb7d6 | 9e7b3084c152c27b44e2571139597f165c9185ed | refs/heads/master | 2023-02-26T11:38:33.707041 | 2021-01-25T10:10:28 | 2021-01-25T10:10:28 | 332,703,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22 | py | from .tests import *
| [
"[email protected]"
]
| |
cb9a2a0fca8de5d2ae056c8b91bac26518e8342a | caa7a39055c3451db43b39ffc5e70dc560749334 | /manage.py | 8abd1f05eae2546d4d038931d6197c57df4827c0 | []
| no_license | OneStage-NITW/website | da2438e3857c03a0c38fa6db6a33619b330a3e0d | af86e38560f16f70a0b74bcf2aeab4d855fbdc74 | refs/heads/master | 2016-08-12T15:17:14.577895 | 2015-05-31T18:10:52 | 2015-05-31T18:10:52 | 36,546,131 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "onestage_website.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"[email protected]"
]
| |
1a982b4c74818e20fa2025d49f804eda3fcc693d | 70d39e4ee19154a62e8c82467ef75b601e584738 | /devops/convert_pdf_to_csv.py | 3c4fc08657b4068bd7a77779b52fc17cd0125452 | [
"Apache-2.0"
]
| permissive | babywyrm/sysadmin | 6f2724be13ae7e5b9372278856a8c072073beffb | 2a5f3d29c7529bc917d4ff9be03af30ec23948a5 | refs/heads/master | 2023-08-16T03:50:38.717442 | 2023-08-16T03:05:55 | 2023-08-16T03:05:55 | 210,228,940 | 10 | 5 | null | 2023-05-01T23:15:31 | 2019-09-22T23:42:50 | PowerShell | UTF-8 | Python | false | false | 494 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
######################
########################
##
##
######################
##
"""
Created on Wed Apr 2 11:20:13 2019
@author: Ganesh
"""
from tabula import read_pdf
import pandas as pd
FILE_NAME="" #Ppdf file as input
dest="" #destination csv file name
df= read_pdf(FILE_NAME)
x=df.values
data=pd.DataFrame(data=x[1:,1:],columns=x[0,1:])
data.to_csv(dest,sep=',',encoding="utf-8")
#########################################################
| [
"[email protected]"
]
| |
47857b06bfc66ba41572248fc6902862e04111c6 | 9d82f8f47c0f95fda1bda4edac9eeee52843cc58 | /unet/unet_model.py | 16b7cd50a40a050254b4ee1151174d0137685263 | []
| no_license | dodler/cvpr-autonomous-drivng | 7e2df21e367efc2467a36827ebbafc6799ee56a3 | 3b1eb786ebe93809ffb46dda3b36f213c83c6e4b | refs/heads/master | 2020-03-10T02:01:58.104341 | 2018-05-14T10:09:55 | 2018-05-14T10:09:55 | 129,125,863 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,188 | py | #!/usr/bin/python
# full assembly of the sub-parts to form the complete net
import torch
import torch.nn as nn
import torch.nn.functional as F
from config import RESIZE_TO
# python 3 confusing imports :(
from .unet_parts import *
class UNet(nn.Module):
def __init__(self, n_channels, n_classes):
super(UNet, self).__init__()
self.inc = inconv(n_channels, 64)
self.down1 = down(64, 128)
self.down2 = down(128, 256)
self.down3 = down(256, 512)
self.down4 = down(512, 512)
self.up1 = up(1024, 256)
self.up2 = up(512, 128)
self.up3 = up(256, 64)
self.up4 = up(128, 64)
self.outc = outconv(64, n_classes)
# self.upsample = torch.nn.Upsample(size=(RESIZE_TO, RESIZE_TO))
self.sigmoid = torch.nn.Sigmoid()
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
x = self.outc(x)
# x = self.upsample(x)
x = self.sigmoid(x)
return x
| [
"[email protected]"
]
| |
3960991bfd0455bb46d1ce6e9144abc9e03ce1af | e02dbefe9f362c3e9b2849c1e22c0ab27e010164 | /NTS/4번.py | 13825ed1f8bffb80f677b8c96b0c938451e4afda | []
| no_license | hoyeoon/CodingTest | ac77574539a7a96cbdb64eb1768ba20ab6ad3b4f | 4d34b422f0dc85f3d506a6c997f3fa883b7162ab | refs/heads/master | 2023-06-05T17:43:38.348537 | 2021-06-28T10:05:22 | 2021-06-28T10:05:22 | 378,081,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | # [2,2,2,2,2,2,3,3,3,3,6,6]
# 저울접시로 했을 때 균형 맞도록 하는 문제 | [
"[email protected]"
]
| |
7563d88374abff5479905e76854c5bd67ca6684d | 53c4460e8cce123276932b4ddf2fe00fdee75b65 | /list01.py | a62881597c3786dcf0b170d5d4673825cd0ad52d | []
| no_license | Yush1nk1m/Study_Python | 5ba8a6eeb73184ea7f1e892daae182b78d265e06 | 516f0ba6d9411453fa0d2df00314e383e3f8cabb | refs/heads/master | 2023-07-09T16:22:22.663219 | 2021-08-22T15:22:22 | 2021-08-22T15:22:22 | 398,831,708 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | # 리스트를 선언합니다.
list_a = [1, 2, 3]
list_b = [4, 5, 6]
# 출력합니다.
print('# 리스트')
print('list_a =', list_a)
print('list_b =', list_b)
print()
# 기본 연산자
print('# 리스트 기본 연산자')
print('list_a + list_b =', list_a + list_b)
print('list_a * 3 =', list_a * 3)
print()
#함수
print('# 길이 구하기')
print('len(list_a) =', len(list_a))
| [
"[email protected]"
]
| |
ff35b942362d9ab3ae11cf91ad3d8c1b2aedaa61 | 1af611cc68a47bb81521d8517a9dbc71777bd401 | /exchanges/hotbit/page_objects/exchange_page.py | 9402c08f8171bc532ace1039b19d6c04baf17a3d | []
| no_license | BTCDIRECT/crypto_exchanges_connectors | d4e3f879bedfd4f0a7b238d2696df8bf261c8868 | 19b852c19ddd197959d30a69ffb773b238e19a49 | refs/heads/master | 2022-04-12T21:04:01.580622 | 2020-03-10T16:22:01 | 2020-03-10T16:22:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 939 | py | import os
from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
from exchanges.hotbit.page_objects.login_page import LoginPage
class ExchangePage(object):
def __init__(self, base, quote, driver=None):
self.url = 'https://www.hotbit.io/exchange?symbol=' + base + '/' + quote
if driver is not None:
self.driver = driver
else:
self.driver = webdriver.Chrome("/usr/lib/chromium-browser/chromedriver")
def open(self):
self.driver.get(self.url)
def click_login_button(self):
login_button = self.driver.find_element_by_css_selector('a[href="/login"]')
if login_button is not None:
login_button.click()
return LoginPage()
if __name__ == '__main__':
ep = ExchangePage('UBT', 'BTC')
ep.open()
ep.login() | [
"[email protected]"
]
| |
9b176b1706f27bc8f34005e2eaac0b741f9c37c0 | 18d51ac0a6ca14c8221c26f0dacd8d3721ca28e9 | /120hun.py | 00a1fb5b064d0784c178ad4c9c9bda3831921983 | []
| no_license | mahakalai/mahak | 05f96d52880ed7b2e5eb70dd1dbf14fc533236e8 | 613be9df7743ef59b1f0e07b7df987d29bb23ec7 | refs/heads/master | 2020-04-15T05:01:58.541930 | 2019-07-15T16:28:32 | 2019-07-15T16:28:32 | 164,406,486 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | n=int(input())
l=list(map(int,input().split()))
c=0
for i in range(0,len(l)):
for j in range(i+1,len(l)):
l1=l[j+1::]
s=l[i]+l[j]
if s in l1:
c=c+1
print(c)
| [
"[email protected]"
]
| |
875e6987c32bb08c0ca9d0065e78ee5f2038ec49 | 92237641f61e9b35ff6af6294153a75074757bec | /Algorithm/programmers/lv2/lv2_정렬_가장 큰 수.py | 3f2b9046ee8b6b04b7555edf6157aed221f10806 | []
| no_license | taepd/study | 8ded115765c4f804813e255d9272b727bf41ec80 | 846d3f2a5a4100225b750f00f992a640e9287d9c | refs/heads/master | 2023-03-08T13:56:57.366577 | 2022-05-08T15:24:35 | 2022-05-08T15:24:35 | 245,838,600 | 0 | 1 | null | 2023-03-05T23:54:41 | 2020-03-08T15:25:15 | JavaScript | UTF-8 | Python | false | false | 1,681 | py | """
문제 설명
0 또는 양의 정수가 주어졌을 때, 정수를 이어 붙여 만들 수 있는 가장 큰 수를 알아내 주세요.
예를 들어, 주어진 정수가 [6, 10, 2]라면 [6102, 6210, 1062, 1026, 2610, 2106]를 만들 수 있고, 이중 가장 큰 수는 6210입니다.
0 또는 양의 정수가 담긴 배열 numbers가 매개변수로 주어질 때, 순서를 재배치하여 만들 수 있는 가장 큰 수를 문자열로 바꾸어 return 하도록 solution 함수를 작성해주세요.
제한 사항
numbers의 길이는 1 이상 100,000 이하입니다.
numbers의 원소는 0 이상 1,000 이하입니다.
정답이 너무 클 수 있으니 문자열로 바꾸어 return 합니다.
입출력 예
numbers return
[6, 10, 2] 6210
[3, 30, 34, 5, 9] 9534330
[40, 403] 40403
[0, 0, 0, 0, 0] 0
[21, 212] 21221
"""
"""
1~6 테스트 케이스 실패하는데, 반례를 찾지 못함
가장 큰 수의 길이만큼 특정 수를 그 수의 첫 번째 수로 채워넣은 뒤, 이를 원래 수와 리스트 형식으로
짝지어주고, 이를 활용해 다중 우선순위 정렬 후, 원래 수를 추출하는 전략
"""
def solution(numbers):
list = [[str(n)] for i, n in enumerate(numbers)]
m = len(str(max(numbers)))
for s in list:
s.append(s[0].ljust(m, s[0][0]))
print(list)
sorted_list = sorted(list, key=lambda x: (x[1], x[0]), reverse=True)
return str(int(''.join([s[0] for s in sorted_list])))
"""
내가 생각한 것의 간결 명쾌한 버전..
"""
def solution(numbers):
numbers = list(map(str, numbers))
numbers.sort(key = lambda x: x*3, reverse=True)
return str(int(''.join(numbers))) | [
"[email protected]"
]
| |
e26085bdb8d7faee4611a6600b4dbc4c569bd91a | 08f89d88585fee98118fa1d42b59519d13ecbf17 | /tests/hwsim/vm/parallel-vm.py | bb79c44e69d9dcc78e36a4cbb6e42b2cefa3b277 | [
"BSD-3-Clause"
]
| permissive | rzr/wpa_supplicant | eb982f7b7d192999332c712734f8b33df04657c6 | 3f7ac05878ba965e941f2b5b80b8cb744e63f506 | refs/heads/master | 2021-01-18T10:37:36.701639 | 2014-10-13T07:40:08 | 2014-10-13T09:24:17 | 31,964,766 | 0 | 0 | NOASSERTION | 2019-08-07T21:56:49 | 2015-03-10T15:19:59 | C | UTF-8 | Python | false | false | 3,655 | py | #!/usr/bin/env python2
#
# Parallel VM test case executor
# Copyright (c) 2014, Jouni Malinen <[email protected]>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import fcntl
import os
import subprocess
import sys
import time
def main():
if len(sys.argv) < 2:
sys.exit("Usage: %s <number of VMs> [params..]" % sys.argv[0])
num_servers = int(sys.argv[1])
if num_servers < 1:
sys.exit("Too small number of VMs")
timestamp = int(time.time())
vm = {}
for i in range(0, num_servers):
print("\rStarting virtual machine {}/{}".format(i + 1, num_servers)),
cmd = ['./vm-run.sh', '--ext', 'srv.%d' % (i + 1),
'--split', '%d/%d' % (i + 1, num_servers)] + sys.argv[2:]
vm[i] = {}
vm[i]['proc'] = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
vm[i]['out'] = ""
vm[i]['err'] = ""
vm[i]['pos'] = ""
for stream in [ vm[i]['proc'].stdout, vm[i]['proc'].stderr ]:
fd = stream.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
print
while True:
running = False
updated = False
for i in range(0, num_servers):
if not vm[i]['proc']:
continue
if vm[i]['proc'].poll() is not None:
vm[i]['proc'] = None
continue
running = True
try:
err = vm[i]['proc'].stderr.read()
vm[i]['err'] += err
except:
pass
try:
out = vm[i]['proc'].stdout.read()
except:
continue
#print("VM {}: '{}'".format(i, out))
vm[i]['out'] += out
lines = vm[i]['out'].splitlines()
last = [ l for l in lines if l.startswith('START ') ]
if len(last) > 0:
try:
pos = last[-1].split(' ')[2]
vm[i]['pos'] = pos
updated = True
except:
pass
else:
vm[i]['pos'] = ''
if not running:
print("All VMs completed")
break
if updated:
status = {}
for i in range(0, num_servers):
if not vm[i]['proc']:
continue
status[i] = vm[i]['pos']
print status
time.sleep(1)
dir = '/tmp/hwsim-test-logs'
try:
os.mkdir(dir)
except:
pass
with open('{}/{}-parallel.log'.format(dir, timestamp), 'w') as f:
for i in range(0, num_servers):
f.write('VM {}\n{}\n{}\n'.format(i, vm[i]['out'], vm[i]['err']))
started = []
passed = []
failed = []
skipped = []
for i in range(0, num_servers):
lines = vm[i]['out'].splitlines()
started += [ l for l in lines if l.startswith('START ') ]
passed += [ l for l in lines if l.startswith('PASS ') ]
failed += [ l for l in lines if l.startswith('FAIL ') ]
skipped += [ l for l in lines if l.startswith('SKIP ') ]
if len(failed) > 0:
print "Failed test cases:"
for f in failed:
print f.split(' ')[1],
print
print("TOTAL={} PASS={} FAIL={} SKIP={}".format(len(started), len(passed), len(failed), len(skipped)))
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
bedb52ec0cc3f7b1914128941bd3c85e12bc1e6e | 222e8a170ea2ebd348c65285479a41692e617cd8 | /pymining/itemmining.py | fc8c680519f20a03964f68baa9018512af543b5d | [
"BSD-3-Clause"
]
| permissive | bufordtaylor/private_relim | 9c86eaf5f064879994057e50b543c67a7de9a282 | 0493a7f64fa042afc732d78fad88ea563e933198 | refs/heads/master | 2016-09-06T00:17:35.487834 | 2012-11-10T03:42:17 | 2012-11-10T03:42:17 | 6,606,715 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,591 | py | from collections import defaultdict, deque, OrderedDict
def _sort_transactions_by_freq(transactions, key_func, reverse_int=False,
reverse_ext=False, sort_ext=True):
key_seqs = [{key_func(i) for i in sequence} for sequence in transactions]
frequencies = get_frequencies(key_seqs)
asorted_seqs = []
for key_seq in key_seqs:
if not key_seq:
continue
# Sort each transaction (infrequent key first)
l = [(frequencies[i], i) for i in key_seq]
l.sort(reverse=reverse_int)
asorted_seqs.append(tuple(l))
# Sort all transactions. Those with infrequent key first, first
if sort_ext:
asorted_seqs.sort(reverse=reverse_ext)
return (asorted_seqs, frequencies)
def get_frequencies(transactions):
'''Computes a dictionary, {key:frequencies} containing the frequency of
each key in all transactions. Duplicate keys in a transaction are
counted twice.
:param transactions: a sequence of sequences. [ [transaction items...]]
'''
frequencies = defaultdict(int)
for transaction in transactions:
for item in transaction:
frequencies[item] += 1
return frequencies
def get_sam_input(transactions, key_func=None):
'''Given a list of transactions and a key function, returns a data
structure used as the input of the sam algorithm.
:param transactions: a sequence of sequences. [ [transaction items...]]
:param key_func: a function that returns a comparable key for a
transaction item.
'''
if key_func is None:
key_func = lambda e: e
(asorted_seqs, _) = _sort_transactions_by_freq(transactions, key_func)
# Group same transactions together
sam_input = deque()
visited = {}
current = 0
for seq in asorted_seqs:
if seq not in visited:
sam_input.append((1, seq))
visited[seq] = current
current += 1
else:
i = visited[seq]
(count, oldseq) = sam_input[i]
sam_input[i] = (count + 1, oldseq)
return sam_input
def sam(sam_input, min_support=2):
'''Finds frequent item sets of items appearing in a list of transactions
based on the Split and Merge algorithm by Christian Borgelt.
:param sam_input: The input of the algorithm. Must come from
`get_sam_input`.
:param min_support: The minimal support of a set to be included.
:rtype: A set containing the frequent item sets and their support.
'''
fis = set()
report = {}
_sam(sam_input, fis, report, min_support)
return report
def _sam(sam_input, fis, report, min_support):
n = 0
a = deque(sam_input)
while len(a) > 0 and len(a[0][1]) > 0:
b = deque()
s = 0
i = a[0][1][0]
while len(a) > 0 and len(a[0][1]) > 0 and a[0][1][0] == i:
s = s + a[0][0]
a[0] = (a[0][0], a[0][1][1:])
if len(a[0][1]) > 0:
b.append(a.popleft())
else:
a.popleft()
c = deque(b)
d = deque()
while len(a) > 0 and len(b) > 0:
if a[0][1] > b[0][1]:
d.append(b.popleft())
elif a[0][1] < b[0][1]:
d.append(a.popleft())
else:
b[0] = (b[0][0] + a[0][0], b[0][1])
d.append(b.popleft())
a.popleft()
while len(a) > 0:
d.append(a.popleft())
while len(b) > 0:
d.append(b.popleft())
a = d
if s >= min_support:
fis.add(i[1])
report[frozenset(fis)] = s
#print('{0} with support {1}'.format(fis, s))
n = n + 1 + _sam(c, fis, report, min_support)
fis.remove(i[1])
return n
def _new_relim_input(size, key_map):
i = 0
l = []
for key in key_map:
if i >= size:
break
l.append(((0, key), []))
i = i + 1
return l
def _get_key_map(frequencies):
l = [(frequencies[k], k) for k in frequencies]
l.sort(reverse=True)
key_map = OrderedDict()
for i, v in enumerate(l):
key_map[v] = i
return key_map
def get_relim_input(transactions, key_func=None):
'''Given a list of transactions and a key function, returns a data
structure used as the input of the relim algorithm.
:param transactions: a sequence of sequences. [ [transaction items...]]
:param key_func: a function that returns a comparable key for a
transaction item.
'''
# Data Structure
# relim_input[x][0] = (count, key_freq)
# relim_input[x][1] = [(count, (key_freq, )]
#
# in other words:
# relim_input[x][0][0] = count of trans with prefix key_freq
# relim_input[x][0][1] = prefix key_freq
# relim_input[x][1] = lists of transaction rests
# relim_input[x][1][x][0] = number of times a rest of transaction appears
# relim_input[x][1][x][1] = rest of transaction prefixed by key_freq
if key_func is None:
key_func = lambda e: e
(asorted_seqs, frequencies) = _sort_transactions_by_freq(transactions,
key_func)
key_map = _get_key_map(frequencies)
relim_input = _new_relim_input(len(key_map), key_map)
for seq in asorted_seqs:
if not seq:
continue
index = key_map[seq[0]]
((count, char), lists) = relim_input[index]
rest = seq[1:]
found = False
for i, (rest_count, rest_seq) in enumerate(lists):
if rest_seq == rest:
lists[i] = (rest_count + 1, rest_seq)
found = True
break
if not found:
lists.append((1, rest))
relim_input[index] = ((count + 1, char), lists)
return (relim_input, key_map)
def relim(rinput, min_support=2):
'''Finds frequent item sets of items appearing in a list of transactions
based on Recursive Elimination algorithm by Christian Borgelt.
In my synthetic tests, Relim outperforms other algorithms by a large
margin. This is unexpected as FP-Growth is supposed to be superior, but
this may be due to my implementation of these algorithms.
:param rinput: The input of the algorithm. Must come from
`get_relim_input`.
:param min_support: The minimal support of a set to be included.
:rtype: A set containing the frequent item sets and their support.
'''
fis = set()
report = {}
_relim(rinput, fis, report, min_support)
return report
def _relim(rinput, fis, report, min_support):
(relim_input, key_map) = rinput
n = 0
# Maybe this one isn't necessary
#a = deque(relim_input)
a = relim_input
while len(a) > 0:
item = a[-1][0][1]
s = a[-1][0][0]
if s >= min_support:
fis.add(item[1])
#print('Report {0} with support {1}'.format(fis, s))
report[frozenset(fis)] = s
b = _new_relim_input(len(a) - 1, key_map)
rest_lists = a[-1][1]
for (count, rest) in rest_lists:
if not rest:
continue
k = rest[0]
index = key_map[k]
new_rest = rest[1:]
# Only add this rest if it's not empty!
((k_count, k), lists) = b[index]
if len(new_rest) > 0:
lists.append((count, new_rest))
b[index] = ((k_count + count, k), lists)
n = n + 1 + _relim((b, key_map), fis, report, min_support)
fis.remove(item[1])
rest_lists = a[-1][1]
for (count, rest) in rest_lists:
if not rest:
continue
k = rest[0]
index = key_map[k]
new_rest = rest[1:]
((k_count, k), lists) = a[index]
if len(new_rest) > 0:
lists.append((count, new_rest))
a[index] = ((k_count + count, k), lists)
a.pop()
return n
class FPNode(object):
root_key = object()
def __init__(self, key, parent):
self.children = {}
self.parent = parent
self.key = key
self.count = 0
self.next_node = None
def add_path(self, path, index, length, heads, last_insert):
if index >= length:
return
child_key = path[index]
index += 1
try:
child = self.children[child_key]
except Exception:
child = self._create_child(child_key, heads, last_insert)
child.count += 1
heads[child_key][1] += 1
child.add_path(path, index, length, heads, last_insert)
def _create_child(self, child_key, heads, last_insert):
child = FPNode(child_key, self)
self.children[child_key] = child
try:
last_child = last_insert[child_key]
last_child.next_node = child
except Exception:
heads[child_key] = [child, 0]
last_insert[child_key] = child
return child
def get_cond_tree(self, child, count, visited, heads, last_insert,
dont_create=False):
key = self.key
if dont_create:
# This is a head, we don't want to copy it.
cond_node = None
else:
try:
cond_node = visited[self]
except Exception:
cond_node = self._create_cond_child(visited, heads,
last_insert)
if self.parent is not None:
# Recursion
parent_node = self.parent.get_cond_tree(cond_node, count, visited,
heads, last_insert, False)
if cond_node is not None:
cond_node.count += count
heads[key][1] += count
cond_node.parent = parent_node
return cond_node
def _create_cond_child(self, visited, heads, last_insert):
key = self.key
cond_node = FPNode(key, None)
visited[self] = cond_node
try:
last_cond_node = last_insert[key]
last_cond_node.next_node = cond_node
except Exception:
# Don't add root!
if self.parent is not None:
heads[key] = [cond_node, 0]
last_insert[key] = cond_node
return cond_node
def _find_ancestor(self, heads, min_support):
ancestor = self.parent
while ancestor.key != FPNode.root_key:
support = heads[ancestor.key][1]
if support >= min_support:
break
else:
ancestor = ancestor.parent
return ancestor
def prune_me(self, from_head_list, visited_parents, merged_before,
merged_now, heads, min_support):
try:
# Parent was merged
new_parent = merged_before[self.parent]
self.parent = new_parent
except KeyError:
# Ok, no need to change parent
pass
ancestor = self._find_ancestor(heads, min_support)
self.parent = ancestor
try:
# Oh, we visited another child of this parent!
other_node = visited_parents[ancestor]
merged_now[self] = other_node
other_node.count += self.count
# Remove yourself from the list
if from_head_list is not None:
from_head_list.next_node = self.next_node
self.next_node = None
except KeyError:
# We are a new child!
visited_parents[ancestor] = self
def __str__(self):
child_str = ','.join([str(key) for key in self.children])
return '{0} ({1}) [{2}] {3}'.format(self.key, self.count, child_str,
self.next_node is not None)
def __repr__(self):
return self.__str__()
def get_fptree(transactions, key_func=None, min_support=2):
'''Given a list of transactions and a key function, returns a data
structure used as the input of the relim algorithm.
:param transactions: a sequence of sequences. [ [transaction items...]]
:param key_func: a function that returns a comparable key for a
transaction item.
:param min_support: minimum support.
'''
if key_func is None:
key_func = lambda e: e
asorted_seqs, frequencies = _sort_transactions_by_freq(transactions,
key_func, True, False, False)
transactions = [[item[1] for item in aseq if item[0] >= min_support] for
aseq in asorted_seqs]
root = FPNode(FPNode.root_key, None)
heads = {}
last_insert = {}
for transaction in transactions:
root.add_path(transaction, 0, len(transaction), heads, last_insert)
# Here, v[1] is = to the frequency
sorted_heads = sorted(heads.values(), key=lambda v: (v[1], v[0].key))
new_heads = OrderedDict()
for (head, head_support) in sorted_heads:
new_heads[head.key] = (head, head_support)
#new_heads = tuple(heads.values())
return (root, new_heads)
def _init_heads(orig_heads):
new_heads = OrderedDict()
for key in orig_heads:
new_heads[key] = (None, 0)
return new_heads
def _create_cond_tree(head_node, new_heads, pruning):
visited = {}
last_insert = {}
while head_node is not None:
head_node.get_cond_tree(None, head_node.count, visited, new_heads,
last_insert, True)
head_node = head_node.next_node
return new_heads
def _prune_cond_tree(heads, min_support):
merged_before = {}
merged_now = {}
for key in reversed(heads):
(node, head_support) = heads[key]
if head_support > 0:
visited_parents = {}
previous_node = None
while node is not None:
# If the node is merged, we lose the next_node
next_node = node.next_node
node.prune_me(previous_node, visited_parents, merged_before,
merged_now, heads, min_support)
if node.next_node is not None:
# Only change the previous node if it wasn't merged.
previous_node = node
node = next_node
merged_before = merged_now
merged_now = {}
def fpgrowth(fptree, min_support=2, pruning=False):
'''Finds frequent item sets of items appearing in a list of transactions
based on FP-Growth by Han et al.
:param fptree: The input of the algorithm. Must come from
`get_fptree`.
:param min_support: The minimal support of a set.
:param pruning: Perform a pruning operation. Default to False.
:rtype: A set containing the frequent item sets and their support.
'''
fis = set()
report = {}
_fpgrowth(fptree, fis, report, min_support, pruning)
return report
def _fpgrowth(fptree, fis, report, min_support=2, pruning=True):
(_, heads) = fptree
n = 0
for (head_node, head_support) in heads.values():
if head_support < min_support:
continue
fis.add(head_node.key)
#print('Report {0} with support {1}'.format(fis, head_support))
report[frozenset(fis)] = head_support
new_heads = _init_heads(heads)
_create_cond_tree(head_node, new_heads, pruning)
if pruning:
_prune_cond_tree(new_heads, min_support)
n = n + 1 + _fpgrowth((None, new_heads), fis, report, min_support,
pruning)
fis.remove(head_node.key)
return n
| [
"[email protected]"
]
| |
4c09f85367aa3c9c504ea9f4d897a6d69a2e2cf1 | 09f0505f3ac1dccaf301c1e363423f38768cc3cc | /r_DailyProgrammer/Intermediate/C252/unittests/unittest.py | b51b46abe99ba600d2ab0f0d513db9d91e533c14 | []
| no_license | Awesome-Austin/PythonPractice | 02212292b92814016d062f0fec1c990ebde21fe7 | 9a717f91d41122be6393f9fcd1a648c5e62314b3 | refs/heads/master | 2023-06-21T11:43:59.366064 | 2021-07-29T23:33:00 | 2021-07-29T23:33:00 | 270,854,302 | 0 | 0 | null | 2020-08-11T20:47:10 | 2020-06-08T23:24:09 | Python | UTF-8 | Python | false | false | 267 | py | #! python3
import unittest
from r_DailyProgrammer.Intermediate.C252.unittests.test_values import TEST_VALUES
class MyTestCase(unittest.TestCase):
def test_something(self):
self.assertEqual(True, False)
if __name__ == '__main__':
unittest.main()
| [
"{ID}+{username}@users.noreply.github.com"
]
| {ID}+{username}@users.noreply.github.com |
35f3320e82f072e78322d8ce608a900689ed71ac | 3be8b5d0334de1f3521dd5dfd8a58704fb8347f9 | /web/app/djrq/admin/mistags.py | a50df1551d6208dca29018445d8fbadbb2266e09 | [
"MIT"
]
| permissive | bmillham/djrq2 | 21a8cbc3087d7ad46087cd816892883cd276db7d | 5f357b3951600a9aecbe6c50727891b1485df210 | refs/heads/master | 2023-07-07T01:07:35.093669 | 2023-06-26T05:21:33 | 2023-06-26T05:21:33 | 72,969,773 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | # encoding: utf-8
from ..templates.admin.mistags import mistagstemplate
from web.ext.acl import when
class Mistags:
__dispatch__ = 'resource'
__resource__ = 'mistags'
def __init__(self, context, name, *arg, **args):
self._ctx = context
self.queries = context.queries
def get(self, *arg, **args):
if 'delete' in args:
self.queries.delete_mistag(args['delete'])
mistaglist = self._ctx.queries.get_mistags()
for r in mistaglist:
if r.title == r.song.title and \
r.artist == r.song.artist.fullname and \
r.album == r.song.album.fullname:
self._ctx.db.delete(r)
self._ctx.db.commit()
return mistagstemplate("Mistags", self._ctx, mistaglist)
| [
"[email protected]"
]
| |
34bd2374972707c758f836f39b7f58724ea233bf | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /cGqjxKhNqZPZ76zac_24.py | de7e6b8845e790236633048386370486dccaae13 | []
| no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 889 | py | """
Remember the game Battleship? Ships are floating in a matrix. You have to fire
torpedos at their suspected coordinates, to try and hit them.
Create a function that takes a list of lists (matrix) and a coordinate as a
string. If the coordinate contains only water `"."`, return `"splash"` and if
the coordinate contains a ship `"*"`, return `"BOOM"`.
### Examples
[
[".", ".", ".", "*", "*"],
[".", "*", ".", ".", "."],
[".", "*", ".", ".", "."],
[".", "*", ".", ".", "."],
[".", ".", "*", "*", "."],
]
fire(matrix, "A1") ➞ "splash"
fire(matrix, "A4") ➞ "BOOM"
fire(matrix, "D2") ➞ "BOOM"
### Notes
* The provided matrix is always a square.
* The provided matrix will not be larger than 5 * 5 ( A1 * E5).
"""
def fire(m, c):
return "splash" if m[ord(c[0]) - 65][int(c[1]) - 1] == "." else "BOOM"
| [
"[email protected]"
]
| |
306dde8ff8b6f8377634fd8dbb46bb6cf5fd85e6 | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/app/plugin/assembler/sleigh/__init__.pyi | 06aa25039d317ada39fe4fc7a2ebed9a6d64d14f | [
"MIT"
]
| permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 350 | pyi | from . import expr as expr
from . import grammars as grammars
from . import parse as parse
from . import sem as sem
from . import symbol as symbol
from . import tree as tree
from . import util as util
from .SleighAssembler import SleighAssembler as SleighAssembler
from .SleighAssemblerBuilder import SleighAssemblerBuilder as SleighAssemblerBuilder
| [
"[email protected]"
]
| |
911de7881b47cc1852f47d65dbed33605ea1dae8 | 07504838d12c6328da093dce3726e8ed096cecdb | /pylon/resources/properties/maxRcvTime.py | 2953f5d58661222f607dd076848ff88ea03fa2ea | []
| no_license | lcoppa/fiat-lux | 9caaa7f3105e692a149fdd384ec590676f06bf00 | 7c166bcc08768da67c241078b397570de159e240 | refs/heads/master | 2020-04-04T02:47:19.917668 | 2013-10-10T10:22:51 | 2013-10-10T10:22:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,387 | py | """maxRcvTime standard property type, originally defined in resource file set
standard 00:00:00:00:00:00:00:00-0."""
# Copyright (C) 2013 Echelon Corporation. All Rights Reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software" to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# This file is generated from device resource files using an automated
# database to source code conversion process. Grammar and punctuation within
# the embedded documentation may not be correct, as this data is gathered and
# combined from several sources. The machine-generated code may not meet
# compliance with PEP-8 and PEP-257 recommendations at all times.
# Generated at 23-Sep-2013 09:14.
import pylon.resources.datapoints.time_sec
from pylon.resources.standard import standard
class maxRcvTime(pylon.resources.datapoints.time_sec.time_sec):
"""maxRcvTime standard property type. Maximum receive time. The maximum
period of time that may expire with no updates on the associated input
network variables before the object goes into heartbeat failure mode. A
zero value disables."""
def __init__(self):
super().__init__(
)
self._default_bytes = b'\x00\x00'
self._original_name = 'SCPTmaxRcvTime'
self._property_scope, self._property_key = 0, 48
self._definition = standard.add(self)
if __name__ == '__main__':
# unit test code.
item = maxRcvTime()
pass
| [
"[email protected]"
]
| |
ecc1d7540dc02c0544f633704a1a9bc3a5f5e92d | b66ec155356c11cabe2350a583f03dd6fad7f105 | /scripts/generate_sample_file.py | 2f4e67bb29a8e0027ed2c6d731c82d62d6540d7f | [
"MIT",
"LicenseRef-scancode-proprietary-license"
]
| permissive | ONSdigital/response-operations-ui | 823344b88d71ddadb36ccc1e7ca2fbd556456e92 | c0e37ac87ca8ba8ae0433d0222b3d7b4ff1c2cbd | refs/heads/main | 2023-08-18T08:18:44.118075 | 2023-08-02T10:43:41 | 2023-08-02T10:43:41 | 112,603,785 | 4 | 2 | MIT | 2023-09-14T10:35:46 | 2017-11-30T11:31:09 | Python | UTF-8 | Python | false | false | 287 | py | f = open("sample.csv", "a")
for i in range(5):
f.write(
f"499300{i:05}:F:50300:50300:45320:45320:8478:801325:9900000576:1:E:FE:01/09/1993:ENTNAME1_COMPANY1:"
f"ENTNAME2_COMPANY1::RUNAME1_COMPANY1:RUNNAME2_COMPANY1::TOTAL UK ACTIVITY:::C:D:7:0001:S\n"
)
f.close()
| [
"[email protected]"
]
| |
539286ddf101e0ad566fe240db4b2cfb93f3f621 | 2821676d816fa69fbb070494691a40fdf465910c | /neptunecontrib/monitoring/sklearn.py | c1e4ca0fd8e3e843a928b916d2ab44243c81fb53 | [
"MIT"
]
| permissive | stjordanis/neptune-contrib | 6837f787766cfd71a6a9d55d4796e9ba0fb9f563 | e15e401bfa959b8a92a268586bdd17f06e65b4e2 | refs/heads/master | 2023-03-19T01:24:46.820515 | 2021-02-19T08:58:22 | 2021-02-19T08:58:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46,726 | py | #
# Copyright (c) 2020, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import matplotlib.pyplot as plt
import neptune
import pandas as pd
from scikitplot.estimators import plot_learning_curve
from scikitplot.metrics import plot_precision_recall
from sklearn.base import is_regressor, is_classifier
from sklearn.cluster import KMeans
from sklearn.metrics import explained_variance_score, max_error, mean_absolute_error, r2_score, \
precision_recall_fscore_support
from yellowbrick.classifier import ClassificationReport, ConfusionMatrix, ROCAUC, ClassPredictionError
from yellowbrick.cluster import SilhouetteVisualizer, KElbowVisualizer
from yellowbrick.model_selection import FeatureImportances
from yellowbrick.regressor import ResidualsPlot, PredictionError, CooksDistance
from neptunecontrib.api.table import log_csv
from neptunecontrib.api.utils import log_pickle
def log_regressor_summary(regressor, X_train, X_test, y_train, y_test,
model_name=None, nrows=1000, experiment=None, log_charts=True):
"""Log sklearn regressor summary.
This method automatically logs all regressor parameters, pickled estimator (model),
test predictions as table, model performance visualizations and test metrics.
Regressor should be fitted before calling this function.
Make sure you created an experiment by using ``neptune.create_experiment()`` before you use this method.
Tip:
Check `Neptune documentation <https://docs.neptune.ai/integrations/scikit_learn.html>`_ for the full example.
Args:
regressor (:obj:`regressor`):
| Fitted sklearn regressor object
X_train (:obj:`ndarray`):
| Training data matrix
X_test (:obj:`ndarray`):
| Testing data matrix
y_train (:obj:`ndarray`):
| The regression target for training
y_test (:obj:`ndarray`):
| The regression target for testing
model_name (`str`, optional, default is ``None``):
| If logging picked model, define a name of the file to be logged to `model/<model_name>`
| If ``None`` - `model/estimator.skl` is used.
nrows (`int`, optional, default is 1000):
| Log first ``nrows`` rows of test predictions.
experiment (:obj:`neptune.experiments.Experiment`, optional, default is ``None``):
| Neptune ``Experiment`` object to control to which experiment you log the data.
| If ``None``, log to currently active, and most recent experiment.
log_charts (:bool:, optional, default is ``True``):
| If True, calculate and send chart visualizations.
|
| NOTE: calculating visualizations is potentially expensive depending on input data and regressor, and
| may take some time to finish.
|
| This is equivalent to calling log_learning_curve_chart, log_feature_importance_chart,
| log_residuals_chart, log_prediction_error_chart, log_cooks_distance_chart functions from this module.
|
| If not all visualizations are needed, it's recommended to set this parameter to ``False`` and call
| only the desired log functions
Returns:
``None``
Examples:
Log random forest regressor summary
.. code:: python3
rfr = RandomForestRegressor()
rfr.fit(X_train, y_train)
neptune.init('my_workspace/my_project')
neptune.create_experiment()
log_regressor_summary(rfr, X_train, X_test, y_train, y_test)
"""
assert is_regressor(regressor), 'regressor should be sklearn regressor.'
exp = _validate_experiment(experiment)
log_estimator_params(regressor, exp)
log_pickled_model(regressor, model_name, exp)
y_pred = regressor.predict(X_test)
log_test_predictions(regressor, X_test, y_test, y_pred=y_pred, nrows=nrows, experiment=exp)
log_scores(regressor, X_test, y_test, y_pred=y_pred, name='test', experiment=exp)
# visualizations
if log_charts:
log_learning_curve_chart(regressor, X_train, y_train, experiment=exp)
log_feature_importance_chart(regressor, X_train, y_train, experiment=exp)
log_residuals_chart(regressor, X_train, X_test, y_train, y_test, experiment=exp)
log_prediction_error_chart(regressor, X_train, X_test, y_train, y_test, experiment=exp)
log_cooks_distance_chart(regressor, X_train, y_train, experiment=exp)
def log_classifier_summary(classifier, X_train, X_test, y_train, y_test,
model_name=None, nrows=1000, experiment=None, log_charts=True):
"""Log sklearn classifier summary.
This method automatically logs all classifier parameters, pickled estimator (model),
test predictions, predictions probabilities as table, model performance visualizations and test metrics.
Classifier should be fitted before calling this function.
Make sure you created an experiment by using ``neptune.create_experiment()`` before you use this method.
Tip:
Check `Neptune documentation <https://docs.neptune.ai/integrations/scikit_learn.html>`_ for the full example.
Args:
classifier (:obj:`classifier`):
| Fitted sklearn classifier object
X_train (:obj:`ndarray`):
| Training data matrix
X_test (:obj:`ndarray`):
| Testing data matrix
y_train (:obj:`ndarray`):
| The classification target for training
y_test (:obj:`ndarray`):
| The classification target for testing
model_name (`str`, optional, default is ``None``):
| If logging picked model, define a name of the file to be logged to `model/<model_name>`
| If ``None`` - `estimator.skl` is used.
nrows (`int`, optional, default is 1000):
| Log first ``nrows`` rows of test predictions and predictions probabilities.
experiment (:obj:`neptune.experiments.Experiment`, optional, default is ``None``):
| Neptune ``Experiment`` object to control to which experiment you log the data.
| If ``None``, log to currently active, and most recent experiment.
log_charts (:bool:, optional, default is ``True``):
| If True, calculate and send chart visualizations.
|
| NOTE: calculating visualizations is potentially expensive depending on input data and classifier, and
| may take some time to finish.
|
| This is equivalent to calling log_classification_report_chart, log_confusion_matrix_chart,
| log_roc_auc_chart, log_precision_recall_chart, log_class_prediction_error_chart functions from this
| module.
|
| If not all visualizations are needed, it's recommended to set this parameter to ``False`` and call
| only the desired log functions
Returns:
``None``
Examples:
Log random forest classifier summary
.. code:: python3
rfc = RandomForestClassifier()
rfc.fit(X_train, y_train)
neptune.init('my_workspace/my_project')
neptune.create_experiment()
log_classifier_summary(rfc, X_train, X_test, y_train, y_test)
"""
assert is_classifier(classifier), 'classifier should be sklearn classifier.'
exp = _validate_experiment(experiment)
log_estimator_params(classifier, exp)
log_pickled_model(classifier, model_name, exp)
log_test_preds_proba(classifier, X_test, nrows=nrows, experiment=exp)
y_pred = classifier.predict(X_test)
log_test_predictions(classifier, X_test, y_test, y_pred=y_pred, nrows=nrows, experiment=exp)
log_scores(classifier, X_test, y_test, y_pred=y_pred, name='test', experiment=exp)
# visualizations
if log_charts:
log_classification_report_chart(classifier, X_train, X_test, y_train, y_test, experiment=exp)
log_confusion_matrix_chart(classifier, X_train, X_test, y_train, y_test, experiment=exp)
log_roc_auc_chart(classifier, X_train, X_test, y_train, y_test, experiment=exp)
log_precision_recall_chart(classifier, X_test, y_test, experiment=exp)
log_class_prediction_error_chart(classifier, X_train, X_test, y_train, y_test, experiment=exp)
def log_estimator_params(estimator, experiment=None):
"""Log estimator parameters.
Log all estimator parameters as experiment properties.
Make sure you created an experiment by using ``neptune.create_experiment()`` before you use this method.
Tip:
Check `Neptune documentation <https://docs.neptune.ai/integrations/scikit_learn.html>`_ for the full example.
Args:
estimator (:obj:`estimator`):
| Scikit-learn estimator from which to log parameters.
experiment (:obj:`neptune.experiments.Experiment`, optional, default is ``None``):
| Neptune ``Experiment`` object to control to which experiment you log the data.
| If ``None``, log to currently active, and most recent experiment.
Returns:
``None``
Examples:
.. code:: python3
rfr = RandomForestRegressor()
neptune.init('my_workspace/my_project')
neptune.create_experiment()
log_estimator_params(rfr)
"""
assert is_regressor(estimator) or is_classifier(estimator) or isinstance(estimator, KMeans),\
'Estimator should be sklearn regressor, classifier or kmeans clusterer.'
exp = _validate_experiment(experiment)
for param, value in estimator.get_params().items():
exp.set_property(param, value)
def log_pickled_model(estimator, model_name=None, experiment=None):
"""Log pickled estimator.
Log estimator as pickled file to Neptune artifacts.
Estimator should be fitted before calling this function.
Path to file in the Neptune artifacts is ``model/<model_name>``.
Make sure you created an experiment by using ``neptune.create_experiment()`` before you use this method.
Tip:
Check `Neptune documentation <https://docs.neptune.ai/integrations/scikit_learn.html>`_ for the full example.
Args:
estimator (:obj:`estimator`):
| Scikit-learn estimator to log.
model_name (`str`, optional, default is ``None``):
| Name of the file.
| If ``None`` - ``estimator.skl`` is used.
experiment (:obj:`neptune.experiments.Experiment`, optional, default is ``None``):
| Neptune ``Experiment`` object to control to which experiment you log the data.
| If ``None``, log to currently active, and most recent experiment.
Returns:
``None``
Examples:
.. code:: python3
rfr = RandomForestRegressor()
rfr.fit(X_train, y_train)
neptune.init('my_workspace/my_project')
neptune.create_experiment()
log_pickled_model(rfr, 'my_model')
"""
assert is_regressor(estimator) or is_classifier(estimator),\
'Estimator should be sklearn regressor or classifier.'
assert isinstance(model_name, str) or model_name is None, 'model_name should be str,' \
' {} was passed instead.'.format(type(model_name))
exp = _validate_experiment(experiment)
if model_name:
model_name = 'model/{}'.format(model_name)
else:
model_name = 'model/estimator.skl'
log_pickle(model_name, estimator, exp)
def log_test_predictions(estimator, X_test, y_test, y_pred=None, nrows=1000, experiment=None):
"""Log test predictions.
Calculate and log test predictions and have them as csv file in the Neptune artifacts.
If you pass ``y_pred``, then predictions are logged without computing from ``X_test`` data.
Estimator should be fitted before calling this function.
Path to predictions in the Neptune artifacts is 'csv/test_predictions.csv'.
Make sure you created an experiment by using ``neptune.create_experiment()`` before you use this method.
Tip:
Check `Neptune documentation <https://docs.neptune.ai/integrations/scikit_learn.html>`_ for the full example.
Args:
estimator (:obj:`estimator`):
| Scikit-learn estimator to compute predictions.
X_test (:obj:`ndarray`):
| Testing data matrix.
y_test (:obj:`ndarray`):
| Target for testing.
y_pred (:obj:`ndarray`, optional, default is ``None``):
| Estimator predictions on test data.
nrows (`int`, optional, default is 1000):
| Number of rows to log.
experiment (:obj:`neptune.experiments.Experiment`, optional, default is ``None``):
| Neptune ``Experiment`` object to control to which experiment you log the data.
| If ``None``, log to currently active, and most recent experiment.
Returns:
``None``
Examples:
.. code:: python3
rfr = RandomForestRegressor()
rfr.fit(X_train, y_train)
neptune.init('my_workspace/my_project')
neptune.create_experiment()
log_test_predictions(rfr, X_test, y_test)
"""
assert is_regressor(estimator) or is_classifier(estimator),\
'Estimator should be sklearn regressor or classifier.'
assert isinstance(nrows, int), 'nrows should be integer, {} was passed'.format(type(nrows))
exp = _validate_experiment(experiment)
if y_pred is None:
y_pred = estimator.predict(X_test)
# single output
if len(y_pred.shape) == 1:
df = pd.DataFrame(data={'y_true': y_test, 'y_pred': y_pred})
log_csv('test_predictions', df.head(nrows), exp)
# multi output
if len(y_pred.shape) == 2:
df = pd.DataFrame()
for j in range(y_pred.shape[1]):
df['y_test_output_{}'.format(j)] = y_test[:, j]
df['y_pred_output_{}'.format(j)] = y_pred[:, j]
log_csv('test_predictions', df.head(nrows), exp)
def log_test_preds_proba(classifier, X_test, y_pred_proba=None, nrows=1000, experiment=None):
"""Log test predictions probabilities.
Calculate and log test predictions probabilities and have them as csv file in the Neptune artifacts.
If you pass ``y_pred_proba``, then predictions probabilities are logged without computing from ``X_test`` data.
Estimator should be fitted before calling this function.
Path to predictions probabilities in the Neptune artifacts is 'csv/test_preds_proba.csv'.
Make sure you created an experiment by using ``neptune.create_experiment()`` before you use this method.
Tip:
Check `Neptune documentation <https://docs.neptune.ai/integrations/scikit_learn.html>`_ for the full example.
Args:
classifier (:obj:`classifier`):
| Scikit-learn classifier to compute predictions probabilities.
X_test (:obj:`ndarray`):
| Testing data matrix.
y_pred_proba (:obj:`ndarray`, optional, default is ``None``):
| Classifier predictions probabilities on test data.
nrows (`int`, optional, default is 1000):
| Number of rows to log.
experiment (:obj:`neptune.experiments.Experiment`, optional, default is ``None``):
| Neptune ``Experiment`` object to control to which experiment you log the data.
| If ``None``, log to currently active, and most recent experiment.
Returns:
``None``
Examples:
.. code:: python3
rfc = RandomForestClassifier()
rfc.fit(X_train, y_train)
neptune.init('my_workspace/my_project')
neptune.create_experiment()
log_test_preds_proba(rfc, X_test, y_test)
"""
assert is_classifier(classifier), 'Classifier should be sklearn classifier.'
assert isinstance(nrows, int), 'nrows should be integer, {} was passed'.format(type(nrows))
exp = _validate_experiment(experiment)
if y_pred_proba is None:
try:
y_pred_proba = classifier.predict_proba(X_test)
except Exception as e:
print('This classifier does not provide predictions probabilities. Error: {}'.format(e))
return
df = pd.DataFrame(data=y_pred_proba, columns=classifier.classes_)
log_csv('test_preds_proba', df.head(nrows), exp)
def log_scores(estimator, X, y, y_pred=None, name=None, experiment=None):
"""Log estimator scores on ``X``.
Calculate and log scores on data and have them as metrics in Neptune.
If you pass ``y_pred``, then predictions are not computed from ``X`` data.
Estimator should be fitted before calling this function.
Make sure you created an experiment by using ``neptune.create_experiment()`` before you use this method.
**Regressor**
For regressors that outputs single value, following scores are logged:
* explained variance
* max error
* mean absolute error
* r2
For multi-output regressor:
* r2
**Classifier**
For classifier, following scores are logged:
* precision
* recall
* f beta score
* support
Tip:
Check `Neptune documentation <https://docs.neptune.ai/integrations/scikit_learn.html>`_ for the full example.
Args:
estimator (:obj:`estimator`):
| Scikit-learn estimator to compute scores.
X (:obj:`ndarray`):
| Data matrix.
y (:obj:`ndarray`):
| Target for testing.
y_pred (:obj:`ndarray`, optional, default is ``None``):
| Estimator predictions on data.
name (`str`, optional, default is ``None``):
| Use 'train', 'valid', 'test' to better define on what data scores are logged.
experiment (:obj:`neptune.experiments.Experiment`, optional, default is ``None``):
| Neptune ``Experiment`` object to control to which experiment you log the data.
| If ``None``, log to currently active, and most recent experiment.
Returns:
``None``
Examples:
.. code:: python3
rfc = RandomForestClassifier()
rfc.fit(X_train, y_train)
neptune.init('my_workspace/my_project')
neptune.create_experiment()
log_scores(rfc, X, y, name='test', experiment=exp)
"""
assert is_regressor(estimator) or is_classifier(estimator),\
'Estimator should be sklearn regressor or classifier.'
assert isinstance(name, str), 'name should be str. {} was passed.'.format(type(name))
exp = _validate_experiment(experiment)
if y_pred is None:
y_pred = estimator.predict(X)
if is_regressor(estimator):
# single output
if len(y_pred.shape) == 1:
evs = explained_variance_score(y, y_pred)
me = max_error(y, y_pred)
mae = mean_absolute_error(y, y_pred)
r2 = r2_score(y, y_pred)
exp.log_metric('evs_{}_sklearn'.format(name), evs)
exp.log_metric('me_{}_sklearn'.format(name), me)
exp.log_metric('mae_{}_sklearn'.format(name), mae)
exp.log_metric('r2_{}_sklearn'.format(name), r2)
# multi output
if len(y_pred.shape) == 2:
r2 = estimator.score(X, y)
exp.log_metric('r2_{}_sklearn'.format(name), r2)
elif is_classifier(estimator):
for metric_name, values in zip(['precision', 'recall', 'fbeta_score', 'support'],
precision_recall_fscore_support(y, y_pred)):
for i, value in enumerate(values):
exp.log_metric('{}_class_{}_{}_sklearn'.format(metric_name, i, name), value)
def log_learning_curve_chart(regressor, X_train, y_train, experiment=None):
"""Log learning curve chart.
Make sure you created an experiment by using ``neptune.create_experiment()`` before you use this method.
Tip:
Check `Neptune documentation <https://docs.neptune.ai/integrations/scikit_learn.html>`_ for the full example.
Args:
regressor (:obj:`regressor`):
| Fitted sklearn regressor object
X_train (:obj:`ndarray`):
| Training data matrix
y_train (:obj:`ndarray`):
| The regression target for training
experiment (:obj:`neptune.experiments.Experiment`, optional, default is ``None``):
| Neptune ``Experiment`` object to control to which experiment you log the data.
| If ``None``, log to currently active, and most recent experiment.
Returns:
``None``
Examples:
.. code:: python3
rfr = RandomForestRegressor()
rfr.fit(X_train, y_train)
neptune.init('my_workspace/my_project')
neptune.create_experiment()
log_learning_curve_chart(rfr, X_train, y_train)
"""
assert is_regressor(regressor), 'regressor should be sklearn regressor.'
exp = _validate_experiment(experiment)
try:
fig, ax = plt.subplots()
plot_learning_curve(regressor, X_train, y_train, ax=ax)
exp.log_image('charts_sklearn', fig, image_name='Learning Curve')
plt.close(fig)
except Exception as e:
print('Did not log learning curve chart. Error: {}'.format(e))
def log_feature_importance_chart(regressor, X_train, y_train, experiment=None):
"""Log feature importance chart.
Make sure you created an experiment by using ``neptune.create_experiment()`` before you use this method.
Tip:
Check `Neptune documentation <https://docs.neptune.ai/integrations/scikit_learn.html>`_ for the full example.
Args:
regressor (:obj:`regressor`):
| Fitted sklearn regressor object
X_train (:obj:`ndarray`):
| Training data matrix
y_train (:obj:`ndarray`):
| The regression target for training
experiment (:obj:`neptune.experiments.Experiment`, optional, default is ``None``):
| Neptune ``Experiment`` object to control to which experiment you log the data.
| If ``None``, log to currently active, and most recent experiment.
Returns:
``None``
Examples:
.. code:: python3
rfr = RandomForestRegressor()
rfr.fit(X_train, y_train)
neptune.init('my_workspace/my_project')
neptune.create_experiment()
log_feature_importance_chart(rfr, X_train, y_train)
"""
assert is_regressor(regressor), 'regressor should be sklearn regressor.'
exp = _validate_experiment(experiment)
try:
fig, ax = plt.subplots()
visualizer = FeatureImportances(regressor, is_fitted=True, ax=ax)
visualizer.fit(X_train, y_train)
visualizer.finalize()
exp.log_image('charts_sklearn', fig, image_name='Feature Importance')
plt.close(fig)
except Exception as e:
print('Did not log feature importance chart. Error: {}'.format(e))
def log_residuals_chart(regressor, X_train, X_test, y_train, y_test, experiment=None):
"""Log residuals chart.
Make sure you created an experiment by using ``neptune.create_experiment()`` before you use this method.
Tip:
Check `Neptune documentation <https://docs.neptune.ai/integrations/scikit_learn.html>`_ for the full example.
Args:
regressor (:obj:`regressor`):
| Fitted sklearn regressor object
X_train (:obj:`ndarray`):
| Training data matrix
X_test (:obj:`ndarray`):
| Testing data matrix
y_train (:obj:`ndarray`):
| The regression target for training
y_test (:obj:`ndarray`):
| The regression target for testing
experiment (:obj:`neptune.experiments.Experiment`, optional, default is ``None``):
| Neptune ``Experiment`` object to control to which experiment you log the data.
| If ``None``, log to currently active, and most recent experiment.
Returns:
``None``
Examples:
.. code:: python3
rfr = RandomForestRegressor()
rfr.fit(X_train, y_train)
neptune.init('my_workspace/my_project')
exp = neptune.create_experiment()
log_residuals_chart(rfr, X_train, X_test, y_train, y_test, experiment=exp)
"""
assert is_regressor(regressor), 'regressor should be sklearn regressor.'
exp = _validate_experiment(experiment)
try:
fig, ax = plt.subplots()
visualizer = ResidualsPlot(regressor, is_fitted=True, ax=ax)
visualizer.fit(X_train, y_train)
visualizer.score(X_test, y_test)
visualizer.finalize()
exp.log_image('charts_sklearn', fig, image_name='Residuals Plot')
plt.close(fig)
except Exception as e:
print('Did not log residuals chart. Error: {}'.format(e))
def log_prediction_error_chart(regressor, X_train, X_test, y_train, y_test, experiment=None):
"""Log prediction error chart.
Make sure you created an experiment by using ``neptune.create_experiment()`` before you use this method.
Tip:
Check `Neptune documentation <https://docs.neptune.ai/integrations/scikit_learn.html>`_ for the full example.
Args:
regressor (:obj:`regressor`):
| Fitted sklearn regressor object
X_train (:obj:`ndarray`):
| Training data matrix
X_test (:obj:`ndarray`):
| Testing data matrix
y_train (:obj:`ndarray`):
| The regression target for training
y_test (:obj:`ndarray`):
| The regression target for testing
experiment (:obj:`neptune.experiments.Experiment`, optional, default is ``None``):
| Neptune ``Experiment`` object to control to which experiment you log the data.
| If ``None``, log to currently active, and most recent experiment.
Returns:
``None``
Examples:
.. code:: python3
rfr = RandomForestRegressor()
rfr.fit(X_train, y_train)
neptune.init('my_workspace/my_project')
neptune.create_experiment()
log_prediction_error_chart(rfr, X_train, X_test, y_train, y_test)
"""
assert is_regressor(regressor), 'regressor should be sklearn regressor.'
exp = _validate_experiment(experiment)
try:
fig, ax = plt.subplots()
visualizer = PredictionError(regressor, is_fitted=True, ax=ax)
visualizer.fit(X_train, y_train)
visualizer.score(X_test, y_test)
visualizer.finalize()
exp.log_image('charts_sklearn', fig, image_name='Prediction Error')
plt.close(fig)
except Exception as e:
print('Did not log prediction error chart. Error: {}'.format(e))
def log_cooks_distance_chart(regressor, X_train, y_train, experiment=None):
"""Log feature importance chart.
Make sure you created an experiment by using ``neptune.create_experiment()`` before you use this method.
Tip:
Check `Neptune documentation <https://docs.neptune.ai/integrations/scikit_learn.html>`_ for the full example.
Args:
regressor (:obj:`regressor`):
| Fitted sklearn regressor object
X_train (:obj:`ndarray`):
| Training data matrix
y_train (:obj:`ndarray`):
| The regression target for training
experiment (:obj:`neptune.experiments.Experiment`, optional, default is ``None``):
| Neptune ``Experiment`` object to control to which experiment you log the data.
| If ``None``, log to currently active, and most recent experiment.
Returns:
``None``
Examples:
.. code:: python3
rfr = RandomForestRegressor()
rfr.fit(X_train, y_train)
neptune.init('my_workspace/my_project')
neptune.create_experiment()
log_cooks_distance_chart(rfr, X_train, y_train)
"""
assert is_regressor(regressor), 'regressor should be sklearn regressor.'
exp = _validate_experiment(experiment)
try:
fig, ax = plt.subplots()
visualizer = CooksDistance(ax=ax)
visualizer.fit(X_train, y_train)
visualizer.finalize()
exp.log_image('charts_sklearn', fig, image_name='Cooks Distance')
plt.close(fig)
except Exception as e:
print('Did not log cooks distance chart. Error: {}'.format(e))
def log_classification_report_chart(classifier, X_train, X_test, y_train, y_test, experiment=None):
"""Log classification report chart.
Make sure you created an experiment by using ``neptune.create_experiment()`` before you use this method.
Tip:
Check `Neptune documentation <https://docs.neptune.ai/integrations/scikit_learn.html>`_ for the full example.
Args:
classifier (:obj:`classifier`):
| Fitted sklearn classifier object
X_train (:obj:`ndarray`):
| Training data matrix
X_test (:obj:`ndarray`):
| Testing data matrix
y_train (:obj:`ndarray`):
| The classification target for training
y_test (:obj:`ndarray`):
| The classification target for testing
experiment (:obj:`neptune.experiments.Experiment`, optional, default is ``None``):
| Neptune ``Experiment`` object to control to which experiment you log the data.
| If ``None``, log to currently active, and most recent experiment.
Returns:
``None``
Examples:
.. code:: python3
rfc = RandomForestClassifier()
rfc.fit(X_train, y_train)
neptune.init('my_workspace/my_project')
exp = neptune.create_experiment()
log_classification_report_chart(rfc, X_train, X_test, y_train, y_test, experiment=exp)
"""
assert is_classifier(classifier), 'classifier should be sklearn classifier.'
exp = _validate_experiment(experiment)
try:
fig, ax = plt.subplots()
visualizer = ClassificationReport(classifier, support=True, is_fitted=True, ax=ax)
visualizer.fit(X_train, y_train)
visualizer.score(X_test, y_test)
visualizer.finalize()
exp.log_image('charts_sklearn', fig, image_name='Classification Report')
plt.close(fig)
except Exception as e:
print('Did not log Classification Report chart. Error: {}'.format(e))
def log_confusion_matrix_chart(classifier, X_train, X_test, y_train, y_test, experiment=None):
"""Log confusion matrix.
Make sure you created an experiment by using ``neptune.create_experiment()`` before you use this method.
Tip:
Check `Neptune documentation <https://docs.neptune.ai/integrations/scikit_learn.html>`_ for the full example.
Args:
classifier (:obj:`classifier`):
| Fitted sklearn classifier object
X_train (:obj:`ndarray`):
| Training data matrix
X_test (:obj:`ndarray`):
| Testing data matrix
y_train (:obj:`ndarray`):
| The classification target for training
y_test (:obj:`ndarray`):
| The classification target for testing
experiment (:obj:`neptune.experiments.Experiment`, optional, default is ``None``):
| Neptune ``Experiment`` object to control to which experiment you log the data.
| If ``None``, log to currently active, and most recent experiment.
Returns:
``None``
Examples:
.. code:: python3
rfc = RandomForestClassifier()
rfc.fit(X_train, y_train)
neptune.init('my_workspace/my_project')
neptune.create_experiment()
log_confusion_matrix_chart(rfc, X_train, X_test, y_train, y_test)
"""
assert is_classifier(classifier), 'classifier should be sklearn classifier.'
exp = _validate_experiment(experiment)
try:
fig, ax = plt.subplots()
visualizer = ConfusionMatrix(classifier, is_fitted=True, ax=ax)
visualizer.fit(X_train, y_train)
visualizer.score(X_test, y_test)
visualizer.finalize()
exp.log_image('charts_sklearn', fig, image_name='Confusion Matrix')
plt.close(fig)
except Exception as e:
print('Did not log Confusion Matrix chart. Error: {}'.format(e))
def log_roc_auc_chart(classifier, X_train, X_test, y_train, y_test, experiment=None):
"""Log ROC-AUC chart.
Make sure you created an experiment by using ``neptune.create_experiment()`` before you use this method.
Tip:
Check `Neptune documentation <https://docs.neptune.ai/integrations/scikit_learn.html>`_ for the full example.
Args:
classifier (:obj:`classifier`):
| Fitted sklearn classifier object
X_train (:obj:`ndarray`):
| Training data matrix
X_test (:obj:`ndarray`):
| Testing data matrix
y_train (:obj:`ndarray`):
| The classification target for training
y_test (:obj:`ndarray`):
| The classification target for testing
experiment (:obj:`neptune.experiments.Experiment`, optional, default is ``None``):
| Neptune ``Experiment`` object to control to which experiment you log the data.
| If ``None``, log to currently active, and most recent experiment.
Returns:
``None``
Examples:
.. code:: python3
rfc = RandomForestClassifier()
rfc.fit(X_train, y_train)
neptune.init('my_workspace/my_project')
exp = neptune.create_experiment()
log_roc_auc_chart(rfc, X_train, X_test, y_train, y_test, experiment=exp)
"""
assert is_classifier(classifier), 'classifier should be sklearn classifier.'
exp = _validate_experiment(experiment)
try:
fig, ax = plt.subplots()
visualizer = ROCAUC(classifier, is_fitted=True, ax=ax)
visualizer.fit(X_train, y_train)
visualizer.score(X_test, y_test)
visualizer.finalize()
exp.log_image('charts_sklearn', fig, image_name='ROC-AUC')
plt.close(fig)
except Exception as e:
print('Did not log ROC-AUC chart. Error {}'.format(e))
def log_precision_recall_chart(classifier, X_test, y_test, y_pred_proba=None, experiment=None):
"""Log precision recall chart.
Make sure you created an experiment by using ``neptune.create_experiment()`` before you use this method.
Tip:
Check `Neptune documentation <https://docs.neptune.ai/integrations/scikit_learn.html>`_ for the full example.
Args:
classifier (:obj:`classifier`):
| Fitted sklearn classifier object
X_test (:obj:`ndarray`):
| Testing data matrix
y_test (:obj:`ndarray`):
| The classification target for testing
y_pred_proba (:obj:`ndarray`, optional, default is ``None``):
| Classifier predictions probabilities on test data.
experiment (:obj:`neptune.experiments.Experiment`, optional, default is ``None``):
| Neptune ``Experiment`` object to control to which experiment you log the data.
| If ``None``, log to currently active, and most recent experiment.
Returns:
``None``
Examples:
.. code:: python3
rfc = RandomForestClassifier()
rfc.fit(X_train, y_train)
neptune.init('my_workspace/my_project')
neptune.create_experiment()
log_precision_recall_chart(rfc, X_test, y_test)
"""
assert is_classifier(classifier), 'classifier should be sklearn classifier.'
exp = _validate_experiment(experiment)
if y_pred_proba is None:
try:
y_pred_proba = classifier.predict_proba(X_test)
except Exception as e:
print('Did not log Precision-Recall chart: this classifier does not provide predictions probabilities.'
'Error {}'.format(e))
return
try:
fig, ax = plt.subplots()
plot_precision_recall(y_test, y_pred_proba, ax=ax)
exp.log_image('charts_sklearn', fig, image_name='Precision Recall Curve')
plt.close(fig)
except Exception as e:
print('Did not log Precision-Recall chart. Error {}'.format(e))
def log_class_prediction_error_chart(classifier, X_train, X_test, y_train, y_test, experiment=None):
"""Log class prediction error chart.
Make sure you created an experiment by using ``neptune.create_experiment()`` before you use this method.
Tip:
Check `Neptune documentation <https://docs.neptune.ai/integrations/scikit_learn.html>`_ for the full example.
Args:
classifier (:obj:`classifier`):
| Fitted sklearn classifier object
X_train (:obj:`ndarray`):
| Training data matrix
X_test (:obj:`ndarray`):
| Testing data matrix
y_train (:obj:`ndarray`):
| The classification target for training
y_test (:obj:`ndarray`):
| The classification target for testing
experiment (:obj:`neptune.experiments.Experiment`, optional, default is ``None``):
| Neptune ``Experiment`` object to control to which experiment you log the data.
| If ``None``, log to currently active, and most recent experiment.
Returns:
``None``
Examples:
.. code:: python3
rfc = RandomForestClassifier()
rfc.fit(X_train, y_train)
neptune.init('my_workspace/my_project')
exp = neptune.create_experiment()
log_class_prediction_error_chart(rfc, X_train, X_test, y_train, y_test, experiment=exp)
"""
assert is_classifier(classifier), 'classifier should be sklearn classifier.'
exp = _validate_experiment(experiment)
try:
fig, ax = plt.subplots()
visualizer = ClassPredictionError(classifier, is_fitted=True, ax=ax)
visualizer.fit(X_train, y_train)
visualizer.score(X_test, y_test)
visualizer.finalize()
exp.log_image('charts_sklearn', fig, image_name='Class Prediction Error')
plt.close(fig)
except Exception as e:
print('Did not log Class Prediction Error chart. Error {}'.format(e))
def log_kmeans_clustering_summary(model, X,
nrows=1000, experiment=None, **kwargs):
"""Log sklearn kmeans summary.
This method fit KMeans model to data and logs cluster labels, all kmeans parameters
and clustering visualizations: KMeans elbow chart and silhouette coefficients chart.
Make sure you created an experiment by using ``neptune.create_experiment()`` before you use this method.
Tip:
Check `Neptune documentation <https://docs.neptune.ai/integrations/scikit_learn.html>`_ for the full example.
Args:
model (:obj:`KMeans`):
| KMeans object.
X (:obj:`ndarray`):
| Training instances to cluster.
nrows (`int`, optional, default is 1000):
| Number of rows to log in the cluster labels
experiment (:obj:`neptune.experiments.Experiment`, optional, default is ``None``):
| Neptune ``Experiment`` object to control to which experiment you log the data.
| If ``None``, log to currently active, and most recent experiment.
kwargs:
KMeans parameters.
Returns:
``None``
Examples:
.. code:: python3
km = KMeans(n_init=11, max_iter=270)
X, y = make_blobs(n_samples=579, n_features=17, centers=7, random_state=28743)
neptune.init('my_workspace/my_project')
neptune.create_experiment()
log_kmeans_clustering_summary(km, X=X)
"""
assert isinstance(model, KMeans), 'model should be sklearn KMeans instance'
exp = _validate_experiment(experiment)
model.set_params(**kwargs)
log_estimator_params(model, exp)
log_cluster_labels(model, X, nrows=nrows, experiment=exp, **kwargs)
# visualizations
log_kelbow_chart(model, X, experiment=exp, **kwargs)
log_silhouette_chart(model, X, experiment=exp, **kwargs)
def log_cluster_labels(model, X, nrows=1000, experiment=None, **kwargs):
"""Log index of the cluster label each sample belongs to.
Make sure you created an experiment by using ``neptune.create_experiment()`` before you use this method.
Tip:
Check `Neptune documentation <https://docs.neptune.ai/integrations/scikit_learn.html>`_ for the full example.
Args:
model (:obj:`KMeans`):
| KMeans object.
X (:obj:`ndarray`):
| Training instances to cluster.
nrows (`int`, optional, default is 1000):
| Number of rows to log.
experiment (:obj:`neptune.experiments.Experiment`, optional, default is ``None``):
| Neptune ``Experiment`` object to control to which experiment you log the data.
| If ``None``, log to currently active, and most recent experiment.
kwargs:
KMeans parameters.
Returns:
``None``
Examples:
.. code:: python3
km = KMeans(n_init=11, max_iter=270)
X, y = make_blobs(n_samples=579, n_features=17, centers=7, random_state=28743)
neptune.init('my_workspace/my_project')
neptune.create_experiment()
log_cluster_labels(km, X=X)
"""
assert isinstance(model, KMeans), 'Model should be sklearn KMeans instance.'
assert isinstance(nrows, int), 'nrows should be integer, {} was passed'.format(type(nrows))
exp = _validate_experiment(experiment)
model.set_params(**kwargs)
labels = model.fit_predict(X)
df = pd.DataFrame(data={'cluster_labels': labels})
log_csv('cluster_labels', df.head(nrows), exp)
def log_kelbow_chart(model, X, experiment=None, **kwargs):
"""Log K-elbow chart for KMeans clusterer.
Make sure you created an experiment by using ``neptune.create_experiment()`` before you use this method.
Tip:
Check `Neptune documentation <https://docs.neptune.ai/integrations/scikit_learn.html>`_ for the full example.
Args:
model (:obj:`KMeans`):
| KMeans object.
X (:obj:`ndarray`):
| Training instances to cluster.
experiment (:obj:`neptune.experiments.Experiment`, optional, default is ``None``):
| Neptune ``Experiment`` object to control to which experiment you log the data.
| If ``None``, log to currently active, and most recent experiment.
kwargs:
KMeans parameters.
Returns:
``None``
Examples:
.. code:: python3
km = KMeans(n_init=11, max_iter=270)
X, y = make_blobs(n_samples=579, n_features=17, centers=7, random_state=28743)
neptune.init('my_workspace/my_project')
neptune.create_experiment()
log_kelbow_chart(km, X=X)
"""
assert isinstance(model, KMeans), 'Model should be sklearn KMeans instance.'
exp = _validate_experiment(experiment)
model.set_params(**kwargs)
if 'n_clusters' in kwargs:
k = kwargs['n_clusters']
else:
k = 10
try:
fig, ax = plt.subplots()
visualizer = KElbowVisualizer(model, k=k, ax=ax)
visualizer.fit(X)
visualizer.finalize()
exp.log_image('charts_sklearn', fig, image_name='KMeans elbow chart')
plt.close(fig)
except Exception as e:
print('Did not log KMeans elbow chart. Error {}'.format(e))
def log_silhouette_chart(model, X, experiment=None, **kwargs):
"""Log Silhouette Coefficients charts for KMeans clusterer.
Charts are computed for j = 2, 3, ..., n_clusters.
Make sure you created an experiment by using ``neptune.create_experiment()`` before you use this method.
Tip:
Check `Neptune documentation <https://docs.neptune.ai/integrations/scikit_learn.html>`_ for the full example.
Args:
model (:obj:`KMeans`):
| KMeans object.
X (:obj:`ndarray`):
| Training instances to cluster.
experiment (:obj:`neptune.experiments.Experiment`, optional, default is ``None``):
| Neptune ``Experiment`` object to control to which experiment you log the data.
| If ``None``, log to currently active, and most recent experiment.
kwargs:
KMeans parameters.
Returns:
``None``
Examples:
.. code:: python3
km = KMeans(n_init=11, max_iter=270)
X, y = make_blobs(n_samples=579, n_features=17, centers=7, random_state=28743)
neptune.init('my_workspace/my_project')
neptune.create_experiment()
log_silhouette_chart(km, X=X, n_clusters=12)
"""
assert isinstance(model, KMeans), 'Model should be sklearn KMeans instance.'
exp = _validate_experiment(experiment)
model.set_params(**kwargs)
n_clusters = model.get_params()['n_clusters']
for j in range(2, n_clusters+1):
model.set_params(**{'n_clusters': j})
model.fit(X)
try:
fig, ax = plt.subplots()
visualizer = SilhouetteVisualizer(model, is_fitted=True, ax=ax)
visualizer.fit(X)
visualizer.finalize()
exp.log_image('charts_sklearn', fig, image_name='Silhouette Coefficients for k={}'.format(j))
plt.close(fig)
except Exception as e:
print('Did not log Silhouette Coefficients chart. Error {}'.format(e))
def _validate_experiment(experiment):
if experiment is not None:
if not isinstance(experiment, neptune.experiments.Experiment):
ValueError('Passed experiment is not Neptune experiment. Create one by using "create_experiment()"')
else:
try:
experiment = neptune.get_experiment()
except neptune.exceptions.NeptuneNoExperimentContextException:
raise neptune.exceptions.NeptuneNoExperimentContextException()
return experiment
| [
"[email protected]"
]
| |
5acce732d50f502803e97c8c160c43ebfa86612f | bfbe642d689b5595fc7a8e8ae97462c863ba267a | /bin/Python27/Lib/site-packages/pylint/test/input/func_format.py | a4eed23cce1b18826301e55835ef0c93e6815fcb | [
"LicenseRef-scancode-other-permissive",
"MIT"
]
| permissive | mcanthony/meta-core | 0c0a8cde1669f749a4880aca6f816d28742a9c68 | 3844cce391c1e6be053572810bad2b8405a9839b | refs/heads/master | 2020-12-26T03:11:11.338182 | 2015-11-04T22:58:13 | 2015-11-04T22:58:13 | 45,806,011 | 1 | 0 | null | 2015-11-09T00:34:22 | 2015-11-09T00:34:22 | null | UTF-8 | Python | false | false | 1,731 | py | # pylint:disable=C0103,W0104,W0105
"""Check format
"""
__revision__ = ''
notpreceded= 1
notfollowed =1
notfollowed <=1
correct = 1
correct >= 1
def func(arg, arg2):
"""test named argument
"""
func(arg=arg+1,
arg2=arg2-arg)
aaaa,bbbb = 1,2
aaaa |= bbbb
aaaa &= bbbb
if aaaa: pass
else:
aaaa,bbbb = 1,2
aaaa,bbbb = bbbb,aaaa
bbbb = (1,2,3)
aaaa = bbbb[1:]
aaaa = bbbb[:1]
aaaa = bbbb[:]
aaaa = {aaaa:bbbb}
# allclose(x,y) uses |x-y|<ATOL+RTOL*|y|
"""docstring,should not match
isn't it:yes!
a=b
"""
aaaa = 'multiple lines\
string,hehehe'
boo = 2 # allclose(x,y) uses |x-y|<ATOL+RTOL*|y|
def other(funky):
"""yo, test formatted result with indentation"""
funky= funky+2
html = """<option value="=">ist genau gleich</option>
yo+=4
"""
html2 = """<option value='='>ist genau gleich</option>
yo+=4
"""
func('''<body>Hello
</body>''', 0)
assert boo <= 10, "Note is %.2f. Either you cheated, or pylint's \
broken!" % boo
def _gc_debug(gcc):
"""bad format undetected w/ py2.5"""
ocount = {}
for obj in gcc.get_objects():
try:
ocount[obj.__class__]+= 1
except KeyError:
ocount[obj.__class__]=1
except AttributeError:
pass
def hop(context):
"""multi-lines string"""
return ['''<a id="sendbutton" href="javascript: $('%(domid)s').submit()">
<img src="%(sendimgpath)s" alt="%(send)s"/>%(send)s</a>''' % context,
'''<a id="cancelbutton" href="javascript: history.back()">
<img src="%(cancelimgpath)s" alt="%(cancel)s"/>%(cancel)s</a>''' % context,
]
titreprojet = '<tr><td colspan="10">\
<img src="images/drapeau_vert.png" alt="Drapeau vert" />\
<strong>%s</strong></td></tr>' % aaaa
| [
"[email protected]"
]
| |
39c38cd5e8854236c9e077276699086fb28a72dc | 7bc54bae28eec4b735c05ac7bc40b1a8711bb381 | /src/dataset_specific/mnli/parsing_jobs/read_data_fn.py | da7fb65fc85bf09e27a79a9a17f912738e950ea6 | []
| no_license | clover3/Chair | 755efd4abbd5f3f2fb59e9b1bc6e7bc070b8d05e | a2102ebf826a58efbc479181f1ebb5de21d1e49f | refs/heads/master | 2023-07-20T17:29:42.414170 | 2023-07-18T21:12:46 | 2023-07-18T21:12:46 | 157,024,916 | 0 | 0 | null | 2023-02-16T05:20:37 | 2018-11-10T21:55:29 | Python | UTF-8 | Python | false | false | 314 | py | from typing import Iterator
from dataset_specific.mnli.parsing_jobs.partition_specs import get_mnli_spacy_ps
from dataset_specific.mnli.parsing_jobs.run_spacy import NLIPairDataSpacy
def read_spacy_nli(split) -> Iterator[NLIPairDataSpacy]:
pds = get_mnli_spacy_ps(split)
return pds.read_pickles_as_itr() | [
"[email protected]"
]
| |
cdd1cda3afaf7c3e8dae492fadf1f64db86326fb | 651d77155bcb104d76fba163060b1d62fc4923f9 | /ScholarConfig/__init__.py | aa3569dfd7e31e192d3a6856e79b878aa1f0cca6 | []
| no_license | leisun123/scholar-private | 908f0ae5c95b6644bb157587007dfbb5e42d5928 | 81b93f6f16d2233419894cf45bdf34883f8b0cd2 | refs/heads/master | 2020-03-22T18:31:49.072513 | 2019-02-18T07:50:34 | 2019-02-18T07:50:34 | 140,465,515 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | #coding:utf-8
"""
@file: __init__.py
@author: IsolationWyn
@contact: [email protected]
@python: 3.5.2
@editor: PyCharm
@create: 2017/7/1 5:40
@description:
--
""" | [
"[email protected]"
]
| |
038d3954ec78ca6127f92666a6ba6b7a18a07af7 | 527e3ca2c316f7713ecf92faac58cd33cfaacb81 | /data_structures/sqlite_dict.py | 9c4f405aeb82b931153db26dce22023b357f4f91 | [
"LicenseRef-scancode-public-domain"
]
| permissive | aaw/yaupon | 1bade3ecd61f11468cb7ff7cd131ecdb1031aee7 | 2608e780abec654ff2d02e76d9ddd528d7fa69fa | refs/heads/master | 2021-01-10T21:27:21.727342 | 2011-04-24T15:42:54 | 2011-04-24T15:42:54 | 832,110 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,069 | py | import sqlite3
import yaupon.backend
from yaupon.data_structures.sqlite_tools import to_db, from_db
class SQLiteDictIterator(object):
def __init__(self, cursor):
self.cursor = cursor
def __iter__(self):
return self
def next(self):
result = self.cursor.fetchone()
if result is None:
raise StopIteration
else:
return self.transform(result)
class SQLiteDictScalarIterator(SQLiteDictIterator):
def __init__(self, cursor):
SQLiteDictIterator.__init__(self, cursor)
def transform(self, result):
return from_db(result[0])
class SQLiteDictRowIterator(SQLiteDictIterator):
def __init__(self, cursor):
SQLiteDictIterator.__init__(self, cursor)
def transform(self, result):
return tuple(map(from_db, result))
class SQLiteDict(object):
def __init__(self,
backend=None,
id=None,
pickle_protocol=2,
dict_args=None,
dict_kwargs=None):
if backend is None:
backend = yaupon.backend.BackendSQLite()
self.backend = yaupon.backend.getbackend(backend)
self.conn = backend.conn
self.pickle_protocol = pickle_protocol
self.conn.execute("""CREATE TABLE IF NOT EXISTS dict_instances
(id INTEGER PRIMARY KEY AUTOINCREMENT)
""")
if id is not None:
self.id = id
else:
self.id = self.conn.execute("""INSERT INTO dict_instances
VALUES (NULL)""").lastrowid
self.conn.execute("""CREATE TABLE IF NOT EXISTS dict_%s
(key BLOB UNIQUE,
value BLOB)
""" % self.id)
self.conn.execute("""CREATE INDEX IF NOT EXISTS dict_%s_key_index
ON dict_%s(key)
""" % (self.id, self.id))
self.conn.commit()
self.__get_STMT = 'SELECT value FROM dict_%s WHERE key = ?' % \
self.id
self.__set_STMT = """REPLACE INTO dict_%s (key, value)
VALUES (?,?)""" % self.id
self.__delete_STMT = 'DELETE FROM dict_%s WHERE key = ?' % self.id
if dict_args is None:
dict_args = []
if dict_kwargs is None:
dict_kwargs = {}
initial_dict = dict(*dict_args, **dict_kwargs)
self.update(initial_dict)
def __getstate__(self):
state = self.__dict__.copy()
del state['backend']
del state['conn']
state['backend_id'] = self.backend.id
return state
def __setstate__(self, state):
backend_id = state.pop('backend_id')
self.__dict__.update(state)
self.backend = yaupon.backend.get_cached_sqlite_backend(backend_id)
self.conn = self.backend.conn
def __backend__(self):
return self.backend
def __get_helper(self, key):
cursor = self.conn.execute(self.__get_STMT, (to_db(key),))
return cursor.fetchone()
def __getitem__(self, key):
result = self.__get_helper(key)
if result is None:
raise KeyError(key)
return from_db(result[0])
def __setitem__(self, key, value):
self.conn.execute(self.__set_STMT, (to_db(key), to_db(value)))
self.conn.commit()
def __delitem__(self, key):
result = self.__get_helper(key)
if result is None:
raise KeyError(key)
self.conn.execute(self.__delete_STMT, (to_db(key),))
self.conn.commit()
def has_key(self, key):
return self.__get_helper(key) is not None
def __contains__(self, key):
return self.has_key(key)
def get(self, key, defaultval=None):
result = self.__get_helper(key)
if result is None:
return defaultval
return from_db(result[0])
def clear(self):
self.conn.execute('DELETE FROM dict_%s' % self.id)
self.conn.commit()
def update(self, d):
for k,v in d.iteritems():
self.__setitem__(k,v)
def setdefault(self, key, value=None):
real_value = self.__get_helper(key)
if real_value is None:
self.__setitem__(key, value)
return value
else:
return from_db(real_value[0])
def iteritems(self):
cursor = self.conn.execute("""SELECT key,value
FROM dict_%s""" % self.id)
return SQLiteDictRowIterator(cursor)
def iterkeys(self):
cursor = self.conn.execute('SELECT key FROM dict_%s' % self.id)
return SQLiteDictScalarIterator(cursor)
def itervalues(self):
cursor = self.conn.execute('SELECT value FROM dict_%s' % self.id)
return SQLiteDictScalarIterator(cursor)
def __len__(self):
cursor = self.conn.execute('SELECT COUNT(*) FROM dict_%s' % self.id)
return cursor.fetchone()[0]
| [
"[email protected]"
]
| |
9efb152f8faa20bf3b3821830425fbcf69e62ea4 | 1625edfe28b4b0979fd32b4a3c5e55249a993fd5 | /baekjoon11048.py | 172ca066d44cfb4bc64b7723fa299d1e33e4c9e9 | []
| no_license | beOk91/baekjoon2 | b8bf504c506c6278899d4107ecfe51974ef13f5e | 39569f8effb8e32405a7d74d98bdabcab783ec56 | refs/heads/master | 2023-05-11T20:11:19.015113 | 2020-09-14T23:58:49 | 2020-09-14T23:58:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | n,m=map(int,input().strip().split())
candy=[[0]*m for _ in range(n)]
dp=[[0]*m for _ in range(n)]
for i in range(n):
candy[i]=list(map(int,input().strip().split()))
for i in range(n):
for j in range(m):
dp[i][j]+=candy[i][j]
if i>=1 and j>=1:
dp[i][j]=candy[i][j]+max(dp[i-1][j],dp[i][j-1],dp[i-1][j-1])
elif j==0 and i>=1:
dp[i][j]+=dp[i-1][j]
elif i==0 and j>=1:
dp[i][j]+=dp[i][j-1]
print(dp[n-1][m-1])
| [
"[email protected]"
]
| |
68b9bcd30296da62fa2be7cdced124d9dbd95fc2 | 7f7efb509c9647bd66546499ae1049e11fe78277 | /DbUsingMysql/manage.py | f19445becdac73935e8d1c545f371d90584c7ea1 | []
| no_license | janardhanvissa/Django_prac | fb5a967eba311b0703ebd0128acaf47cb93ae543 | 00d8ce4141eef8bfa12553b5c8e180d45224d906 | refs/heads/master | 2022-09-08T14:26:02.317817 | 2020-05-21T18:30:27 | 2020-05-21T18:30:27 | 265,869,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DbUsingMysql.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
23cbad3a18804ebdd91da88659a373dc225c8d96 | dd573ed68682fd07da08143dd09f6d2324f51345 | /baekjoon/my_study/1022_다른풀이2.py | 7bf4d84e6ea0cc27ffb55dfd6dcd11b4b3472466 | []
| no_license | chelseashin/My-Algorithm | 0f9fb37ea5c6475e8ff6943a5fdaa46f0cd8be61 | db692e158ebed2d607855c8e554fd291c18acb42 | refs/heads/master | 2021-08-06T12:05:23.155679 | 2021-07-04T05:07:43 | 2021-07-04T05:07:43 | 204,362,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 741 | py | import sys
sys.stdin = open('1022_input.txt')
input = sys.stdin.readline
# 우 상 좌 하
d = [(1, 0), (0, -1), (-1, 0), (0, 1)]
r1, c1, r2, c2 = map(int, input().split())
arr = [[0] * (c2-c1+1) for _ in range(r2-r1+1)]
max_level = max(abs(r1), abs(c1), abs(r2), abs(c2))
r, c = 0, 0
cnt = 1
max_cnt = (max_level*2 + 1) ** 2
dist = 0
max_value = 0
i = 0
while cnt <= max_cnt:
if d[i][0]: dist += 1
for _ in range(dist):
if r1 <= r <= r2 and c1 <= c <= c2:
arr[r-r1][c-c1] = cnt
max_value = cnt
cnt += 1
c += d[i][0]
r += d[i][1]
i = (i + 1) % 4
max_length = len(str(max_value))
for i in arr:
for j in i:
print(str(j).rjust(max_length), end=" ")
print() | [
"[email protected]"
]
| |
ec9c280d082ce9f0c02a0f00672f761fdaa2fba3 | f33fff1f392d19c46a38c5a053bd206186a5c9d4 | /contrib/python/scikit-learn/py2/sklearn/tree/tests/test_tree.py | b8e4a8f4eb428b60483e0cc1fc5a46a17480bc8b | [
"Apache-2.0"
]
| permissive | david-waterworth/catboost | 6d276d284dc0dee4908d88aa6c3b3cdd19918a63 | 134c66156105cb8f7f6e4478dacf727ea53d0d29 | refs/heads/master | 2022-03-10T11:31:31.795631 | 2022-03-07T01:20:08 | 2022-03-07T01:20:08 | 175,712,691 | 0 | 1 | Apache-2.0 | 2019-07-30T23:35:34 | 2019-03-14T22:59:46 | C++ | UTF-8 | Python | false | false | 60,674 | py | """
Testing for the tree module (sklearn.tree).
"""
import copy
import pickle
from functools import partial
from itertools import product
import struct
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils.validation import check_random_state
from sklearn.exceptions import NotFittedError
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree._tree import TREE_LEAF
from sklearn.tree.tree import CRITERIA_CLF
from sklearn.tree.tree import CRITERIA_REG
from sklearn import datasets
from sklearn.utils import compute_sample_weight
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", "mae", "friedman_mse")
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
presort=True),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
presort=True),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = ["DecisionTreeClassifier", "DecisionTreeRegressor",
"ExtraTreeClassifier", "ExtraTreeRegressor"]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
@skip_if_32bit
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = assert_warns(
DeprecationWarning, clf.transform, X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [[-2, -1, 1]] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_leaf=.6).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_leaf=0.).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_leaf=3.).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=0.0).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=1.1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=2.5).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_impurity_split=-1.0).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_split():
"""Test min_samples_split parameter"""
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# test for integer parameter
est = TreeEstimator(min_samples_split=10,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
# count samples on nodes, -1 means it is a leaf
node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert_greater(np.min(node_samples), 9,
"Failed with {0}".format(name))
# test for float parameter
est = TreeEstimator(min_samples_split=0.2,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
# count samples on nodes, -1 means it is a leaf
node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert_greater(np.min(node_samples), 9,
"Failed with {0}".format(name))
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# test integer parameter
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
# test float parameter
est = TreeEstimator(min_samples_leaf=0.1,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
# test case with no weights passed in
total_weight = X.shape[0]
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def check_min_weight_fraction_leaf_with_min_samples_leaf(name, datasets,
sparse=False):
"""Test the interaction between min_weight_fraction_leaf and min_samples_leaf
when sample_weights is not provided in fit."""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
total_weight = X.shape[0]
TreeEstimator = ALL_TREES[name]
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 3)):
# test integer min_samples_leaf
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
min_samples_leaf=5,
random_state=0)
est.fit(X, y)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
max((total_weight *
est.min_weight_fraction_leaf), 5),
"Failed with {0} "
"min_weight_fraction_leaf={1}, "
"min_samples_leaf={2}".format(name,
est.min_weight_fraction_leaf,
est.min_samples_leaf))
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 3)):
# test float min_samples_leaf
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
min_samples_leaf=.1,
random_state=0)
est.fit(X, y)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
max((total_weight * est.min_weight_fraction_leaf),
(total_weight * est.min_samples_leaf)),
"Failed with {0} "
"min_weight_fraction_leaf={1}, "
"min_samples_leaf={2}".format(name,
est.min_weight_fraction_leaf,
est.min_samples_leaf))
def test_min_weight_fraction_leaf_with_min_samples_leaf():
# Check on dense input
for name in ALL_TREES:
yield (check_min_weight_fraction_leaf_with_min_samples_leaf,
name, "iris")
# Check on sparse input
for name in SPARSE_TREES:
yield (check_min_weight_fraction_leaf_with_min_samples_leaf,
name, "multilabel", True)
def test_min_impurity_split():
# test if min_impurity_split creates leaves with impurity
# [0, min_impurity_split) when min_samples_leaf = 1 and
# min_samples_split = 2.
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
min_impurity_split = .5
# verify leaf nodes without min_impurity_split less than
# impurity 1e-7
est = TreeEstimator(max_leaf_nodes=max_leaf_nodes,
random_state=0)
assert_less_equal(est.min_impurity_split, 1e-7,
"Failed, min_impurity_split = {0} > 1e-7".format(
est.min_impurity_split))
est.fit(X, y)
for node in range(est.tree_.node_count):
if (est.tree_.children_left[node] == TREE_LEAF or
est.tree_.children_right[node] == TREE_LEAF):
assert_equal(est.tree_.impurity[node], 0.,
"Failed with {0} "
"min_impurity_split={1}".format(
est.tree_.impurity[node],
est.min_impurity_split))
# verify leaf nodes have impurity [0,min_impurity_split] when using min_impurity_split
est = TreeEstimator(max_leaf_nodes=max_leaf_nodes,
min_impurity_split=min_impurity_split,
random_state=0)
est.fit(X, y)
for node in range(est.tree_.node_count):
if (est.tree_.children_left[node] == TREE_LEAF or
est.tree_.children_right[node] == TREE_LEAF):
assert_greater_equal(est.tree_.impurity[node], 0,
"Failed with {0}, "
"min_impurity_split={1}".format(
est.tree_.impurity[node],
est.min_impurity_split))
assert_less_equal(est.tree_.impurity[node], min_impurity_split,
"Failed with {0}, "
"min_impurity_split={1}".format(
est.tree_.impurity[node],
est.min_impurity_split))
def test_pickle():
for name, TreeEstimator in ALL_TREES.items():
if "Classifier" in name:
X, y = iris.data, iris.target
else:
X, y = boston.data, boston.target
est = TreeEstimator(random_state=0)
est.fit(X, y)
score = est.score(X, y)
fitted_attribute = dict()
for attribute in ["max_depth", "node_count", "capacity"]:
fitted_attribute[attribute] = getattr(est.tree_, attribute)
serialized_object = pickle.dumps(est)
est2 = pickle.loads(serialized_object)
assert_equal(type(est2), est.__class__)
score2 = est2.score(X, y)
assert_equal(score, score2,
"Failed to generate same score after pickling "
"with {0}".format(name))
for attribute in fitted_attribute:
assert_equal(getattr(est2.tree_, attribute),
fitted_attribute[attribute],
"Failed to generate same attribute {0} after "
"pickling with {1}".format(attribute, name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = compute_sample_weight("balanced", unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if not est.presort:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 100)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test precedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0], [1]], [0, 1]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-3 <= value.flat[0] < 3,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._utils import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = 8 * struct.calcsize("P")
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.tree_.decision_path(X1).toarray(),
d.tree_.decision_path(X2).toarray())
assert_array_almost_equal(s.decision_path(X1).toarray(),
d.decision_path(X2).toarray())
assert_array_almost_equal(s.decision_path(X1).toarray(),
s.tree_.decision_path(X1).toarray())
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
@ignore_warnings
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, [X])
#@ignore_warnings
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if not TreeEstimator().presort:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
def check_presort_sparse(est, X, y):
assert_raises(ValueError, est.fit, X, y)
def test_presort_sparse():
ests = (DecisionTreeClassifier(presort=True),
DecisionTreeRegressor(presort=True))
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for est, sparse_matrix in product(ests, sparse_matrices):
yield check_presort_sparse, est, sparse_matrix(X), y
def test_decision_path_hardcoded():
X = iris.data
y = iris.target
est = DecisionTreeClassifier(random_state=0, max_depth=1).fit(X, y)
node_indicator = est.decision_path(X[:2]).toarray()
assert_array_equal(node_indicator, [[1, 1, 0], [1, 0, 1]])
def check_decision_path(name):
X = iris.data
y = iris.target
n_samples = X.shape[0]
TreeEstimator = ALL_TREES[name]
est = TreeEstimator(random_state=0, max_depth=2)
est.fit(X, y)
node_indicator_csr = est.decision_path(X)
node_indicator = node_indicator_csr.toarray()
assert_equal(node_indicator.shape, (n_samples, est.tree_.node_count))
# Assert that leaves index are correct
leaves = est.apply(X)
leave_indicator = [node_indicator[i, j] for i, j in enumerate(leaves)]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
# Ensure only one leave node per sample
all_leaves = est.tree_.children_left == TREE_LEAF
assert_array_almost_equal(np.dot(node_indicator, all_leaves),
np.ones(shape=n_samples))
# Ensure max depth is consistent with sum of indicator
max_depth = node_indicator.sum(axis=1).max()
assert_less_equal(est.tree_.max_depth, max_depth)
def test_decision_path():
for name in ALL_TREES:
yield (check_decision_path, name)
def check_no_sparse_y_support(name):
X, y = X_multilabel, csr_matrix(y_multilabel)
TreeEstimator = ALL_TREES[name]
assert_raises(TypeError, TreeEstimator(random_state=0).fit, X, y)
def test_no_sparse_y_support():
# Currently we don't support sparse y
for name in ALL_TREES:
yield (check_no_sparse_y_support, name)
def test_mae():
# check MAE criterion produces correct results
# on small toy dataset
dt_mae = DecisionTreeRegressor(random_state=0, criterion="mae",
max_leaf_nodes=2)
dt_mae.fit([[3],[5],[3],[8],[5]],[6,7,3,4,3])
assert_array_equal(dt_mae.tree_.impurity, [1.4, 1.5, 4.0/3.0])
assert_array_equal(dt_mae.tree_.value.flat, [4, 4.5, 4.0])
dt_mae.fit([[3],[5],[3],[8],[5]],[6,7,3,4,3], [0.6,0.3,0.1,1.0,0.3])
assert_array_equal(dt_mae.tree_.impurity, [7.0/2.3, 3.0/0.7, 4.0/1.6])
assert_array_equal(dt_mae.tree_.value.flat, [4.0, 6.0, 4.0])
def test_criterion_copy():
# Let's check whether copy of our criterion has the same type
# and properties as original
n_outputs = 3
n_classes = np.arange(3, dtype=np.intp)
n_samples = 100
def _pickle_copy(obj):
return pickle.loads(pickle.dumps(obj))
for copy_func in [copy.copy, copy.deepcopy, _pickle_copy]:
for _, typename in CRITERIA_CLF.items():
criteria = typename(n_outputs, n_classes)
result = copy_func(criteria).__reduce__()
typename_, (n_outputs_, n_classes_), _ = result
assert_equal(typename, typename_)
assert_equal(n_outputs, n_outputs_)
assert_array_equal(n_classes, n_classes_)
for _, typename in CRITERIA_REG.items():
criteria = typename(n_outputs, n_samples)
result = copy_func(criteria).__reduce__()
typename_, (n_outputs_, n_samples_), _ = result
assert_equal(typename, typename_)
assert_equal(n_outputs, n_outputs_)
assert_equal(n_samples, n_samples_)
| [
"[email protected]"
]
| |
ba8981626612bcea6b8a6291d19cb5fff90415c4 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02714/s449498366.py | 6b369949d8b8c4f43a2335477399ed7b459eacfd | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | from collections import *
N=int(input())
S=input()
c=Counter(S)
ans=c['R']*c['G']*c['B']
for i in range(1,N//2+1):
for j in range(i,N-i):
if S[j-i]!=S[j] and S[j]!=S[j+i] and S[j+i]!=S[j-i]:
ans-=1
print(ans)
| [
"[email protected]"
]
| |
4f9889c051ab3c9bec42a8c062228eb2f4078940 | ad963dc590fe3ee16fe70674ffa9a77a3462a2d2 | /taskManager/migrations/0021_auto_20200819_0632.py | 2efe17d7eef88bf142301e0361778a5ec6d7ef21 | []
| no_license | ShuheiKuriki/task_manager | 564dc1a646efdd288ff31bc9044981aecbd6db78 | f5d4a53a758c64615f22c69baae59b36dd5dab1f | refs/heads/master | 2023-05-12T11:06:11.388036 | 2023-01-15T09:12:37 | 2023-01-15T09:12:37 | 234,110,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 869 | py | # Generated by Django 3.0.2 on 2020-08-18 21:32
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('taskManager', '0020_auto_20200425_1624'),
]
operations = [
migrations.AlterField(
model_name='task',
name='deadline',
field=models.DateField(default=django.utils.timezone.now, verbose_name='期限'),
),
migrations.AlterField(
model_name='task',
name='done_date',
field=models.DateField(default=django.utils.timezone.now, verbose_name='完了した日'),
),
migrations.AlterField(
model_name='task',
name='when',
field=models.DateField(default=django.utils.timezone.now, verbose_name='実行予定日'),
),
]
| [
"[email protected]"
]
| |
543728b72f006a8af964401c5f1de19a34155aaa | 1775a5522f465cb74a1e02393d32c363bb7ef215 | /tests/i18n/contenttypes/tests.py | 894ae0a3c737dff6674660450c1c9bdfd1e3d4c2 | [
"BSD-3-Clause"
]
| permissive | trught007/django | b280eaff7706e72a6fc0f298c68e3c065daa448b | d55d21dbb8b307941c2d26b95be46bf83015d868 | refs/heads/master | 2022-12-21T04:23:49.786811 | 2020-10-01T08:24:33 | 2020-10-01T08:24:33 | 300,203,187 | 0 | 0 | NOASSERTION | 2020-10-01T08:23:34 | 2020-10-01T08:23:33 | null | UTF-8 | Python | false | false | 1,164 | py | # coding: utf-8
from __future__ import unicode_literals
import os
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase, override_settings
from django.test.utils import TransRealMixin
from django.utils._os import upath
from django.utils import six
from django.utils import translation
@override_settings(
USE_I18N=True,
LOCALE_PATHS=(
os.path.join(os.path.dirname(upath(__file__)), 'locale'),
),
LANGUAGE_CODE='en',
LANGUAGES=(
('en', 'English'),
('fr', 'French'),
),
)
class ContentTypeTests(TransRealMixin, TestCase):
def test_verbose_name(self):
company_type = ContentType.objects.get(app_label='i18n', model='company')
with translation.override('en'):
self.assertEqual(six.text_type(company_type), 'Company')
with translation.override('fr'):
self.assertEqual(six.text_type(company_type), 'Société')
def test_field_override(self):
company_type = ContentType.objects.get(app_label='i18n', model='company')
company_type.name = 'Other'
self.assertEqual(six.text_type(company_type), 'Other')
| [
"[email protected]"
]
| |
92491a80807b1a8457d04db0b1caa1a5bc99bfc8 | ff738b3ec7e5c8c414f6d3c7d74310d8fab69368 | /Top Interview Questions/236. Lowest Common Ancestor of a Binary Tree/solution.py | a8394b1efd2afa040587dddb0b3ee1a3f7fe33e2 | []
| no_license | jw3329/leetcode-problem-solving | a0684ef13bd60e81bd54b91e1b54827aaac9bf16 | 0cc7ad64891a23e348c8214f806a2820ac8c9e0a | refs/heads/main | 2023-08-17T20:36:51.624415 | 2023-08-17T07:09:56 | 2023-08-17T07:09:56 | 170,944,191 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':
if not root or root == p or root == q: return root
left = self.lowestCommonAncestor(root.left,p,q)
right = self.lowestCommonAncestor(root.right,p,q)
if not left:
return right
else:
if not right:
return left
return root
| [
"[email protected]"
]
| |
bee64d41a2fb444ea06a7502a6646b1ac1631dd2 | 51f887286aa3bd2c3dbe4c616ad306ce08976441 | /pybind/slxos/v17s_1_02/routing_system/interface/ve/vrrpe/track/__init__.py | 6440e57171e0b3f190ed4027b25e11945d81c59b | [
"Apache-2.0"
]
| permissive | b2220333/pybind | a8c06460fd66a97a78c243bf144488eb88d7732a | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | refs/heads/master | 2020-03-18T09:09:29.574226 | 2018-04-03T20:09:50 | 2018-04-03T20:09:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,828 | py |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import network
import interface
class track(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-common-def - based on the path /routing-system/interface/ve/vrrpe/track. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Interface to be tracked
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__network','__interface',)
_yang_name = 'track'
_rest_name = 'track'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__interface = YANGDynClass(base=YANGListType("interface_type interface_name",interface.interface, yang_name="interface", rest_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-type interface-name', extensions={u'tailf-common': {u'callpoint': u'vrrpe_track_vlan', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-drop-node-name': None, u'cli-no-match-completion': None, u'cli-full-no': None}}), is_container='list', yang_name="interface", rest_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'vrrpe_track_vlan', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-drop-node-name': None, u'cli-no-match-completion': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='list', is_config=True)
self.__network = YANGDynClass(base=YANGListType("network_address",network.network, yang_name="network", rest_name="network", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='network-address', extensions={u'tailf-common': {u'info': u'IPv4 network', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-no-match-completion': None, u'callpoint': u'vrrp_track_network_vlan'}}), is_container='list', yang_name="network", rest_name="network", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IPv4 network', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-no-match-completion': None, u'callpoint': u'vrrp_track_network_vlan'}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='list', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'routing-system', u'interface', u've', u'vrrpe', u'track']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'Ve', u'vrrp-extended-group', u'track']
def _get_network(self):
"""
Getter method for network, mapped from YANG variable /routing_system/interface/ve/vrrpe/track/network (list)
"""
return self.__network
def _set_network(self, v, load=False):
"""
Setter method for network, mapped from YANG variable /routing_system/interface/ve/vrrpe/track/network (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_network is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_network() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("network_address",network.network, yang_name="network", rest_name="network", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='network-address', extensions={u'tailf-common': {u'info': u'IPv4 network', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-no-match-completion': None, u'callpoint': u'vrrp_track_network_vlan'}}), is_container='list', yang_name="network", rest_name="network", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IPv4 network', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-no-match-completion': None, u'callpoint': u'vrrp_track_network_vlan'}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """network must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("network_address",network.network, yang_name="network", rest_name="network", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='network-address', extensions={u'tailf-common': {u'info': u'IPv4 network', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-no-match-completion': None, u'callpoint': u'vrrp_track_network_vlan'}}), is_container='list', yang_name="network", rest_name="network", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IPv4 network', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-no-match-completion': None, u'callpoint': u'vrrp_track_network_vlan'}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='list', is_config=True)""",
})
self.__network = t
if hasattr(self, '_set'):
self._set()
def _unset_network(self):
self.__network = YANGDynClass(base=YANGListType("network_address",network.network, yang_name="network", rest_name="network", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='network-address', extensions={u'tailf-common': {u'info': u'IPv4 network', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-no-match-completion': None, u'callpoint': u'vrrp_track_network_vlan'}}), is_container='list', yang_name="network", rest_name="network", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IPv4 network', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-no-match-completion': None, u'callpoint': u'vrrp_track_network_vlan'}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='list', is_config=True)
def _get_interface(self):
"""
Getter method for interface, mapped from YANG variable /routing_system/interface/ve/vrrpe/track/interface (list)
"""
return self.__interface
def _set_interface(self, v, load=False):
"""
Setter method for interface, mapped from YANG variable /routing_system/interface/ve/vrrpe/track/interface (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("interface_type interface_name",interface.interface, yang_name="interface", rest_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-type interface-name', extensions={u'tailf-common': {u'callpoint': u'vrrpe_track_vlan', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-drop-node-name': None, u'cli-no-match-completion': None, u'cli-full-no': None}}), is_container='list', yang_name="interface", rest_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'vrrpe_track_vlan', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-drop-node-name': None, u'cli-no-match-completion': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("interface_type interface_name",interface.interface, yang_name="interface", rest_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-type interface-name', extensions={u'tailf-common': {u'callpoint': u'vrrpe_track_vlan', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-drop-node-name': None, u'cli-no-match-completion': None, u'cli-full-no': None}}), is_container='list', yang_name="interface", rest_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'vrrpe_track_vlan', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-drop-node-name': None, u'cli-no-match-completion': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='list', is_config=True)""",
})
self.__interface = t
if hasattr(self, '_set'):
self._set()
def _unset_interface(self):
self.__interface = YANGDynClass(base=YANGListType("interface_type interface_name",interface.interface, yang_name="interface", rest_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-type interface-name', extensions={u'tailf-common': {u'callpoint': u'vrrpe_track_vlan', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-drop-node-name': None, u'cli-no-match-completion': None, u'cli-full-no': None}}), is_container='list', yang_name="interface", rest_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'vrrpe_track_vlan', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-drop-node-name': None, u'cli-no-match-completion': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='list', is_config=True)
network = __builtin__.property(_get_network, _set_network)
interface = __builtin__.property(_get_interface, _set_interface)
_pyangbind_elements = {'network': network, 'interface': interface, }
| [
"[email protected]"
]
| |
7bd223eed55c60a6bbc672d3eead4cec2eaa85da | 9e87897c988af634c3fddc42113992a65ec006f4 | /sims/publish/Timer.py | 55983e3ee337f2acbf16df75fcc7534d8c10ee4c | [
"MIT"
]
| permissive | luiarthur/cytof5 | 152eb06030785fdff90220f0d0a244a02204c2e9 | 6b4df5e9fd94bfd586e96579b8c618fdf6f913ed | refs/heads/master | 2021-07-20T13:39:45.821597 | 2021-03-02T23:27:35 | 2021-03-02T23:27:35 | 145,253,611 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | import time
class Timer(object):
"""
Usage:
with Timer('Model training'):
time.sleep(2)
x = 1
"""
def __init__(self, name=None, ndigits=3):
self.name = name
self.ndigits = ndigits
def __enter__(self):
self.tstart = time.time()
def __exit__(self, type, value, traceback):
if self.name:
print(self.name, end=' ')
elapsed = time.time() - self.tstart
print('time: {}s'.format(round(elapsed, self.ndigits)))
| [
"[email protected]"
]
| |
c235e510251267965a2a2495d688e9f3a662d539 | 6d143b66abdc9d871468e0576de09c63ca71a630 | /A98_Guess_Bin_Dec/QA.py | 69aa9552f8b49c96a9173f8178b01b531fccdb8b | []
| no_license | ChristerNilsson/KosmosTeacher | 98acf9bf35ed13930a05641652e68c7e0f3d2d7c | f47d2e8dba21f2ea6f19d6c7b766f36ccbcbeda4 | refs/heads/master | 2021-03-19T13:45:51.307065 | 2016-10-13T01:25:34 | 2016-10-13T01:25:34 | 68,541,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,044 | py | import random
class QA:
def __init__(self,width,height):
self.level = 1
self.width = width
self.height = height
self.longList = self.makeLongList()
self.shortList = self.makeShortList()
def makeShortList(self):
self.n = self.level+1
self.index = random.randint(0, self.n-1)
self.guess_index = -1
return random.sample(self.longList, self.n)
def mousePressed(self):
if mouseX < width/2: return
self.guess_index = mouseY/(height/self.n)
self.level += 1 if self.guess_index == self.index else -1
self.level = constrain(self.level, 1, 10)
self.longList = self.makeLongList()
self.shortList = self.makeShortList()
def draw(self):
if self.index >= len(self.shortList): return
self.displayQuestion(self.shortList[self.index], 0, 0, width/2, height)
h = height/self.n
for i in range(len(self.shortList)):
self.displayAnswer(self.shortList[i], width/2, i*h, width/2, h) | [
"[email protected]"
]
| |
7ee95bf97026d05a27e1e151a7c260a48f2df776 | e5b8a5d93989dd53933c5cd417afa8b2a39ad307 | /ultracart/models/cart_settings_payment_credit_card.py | 5035a14959094b67696de38b8d2b473edf852597 | [
"Apache-2.0"
]
| permissive | gstingy/uc_python_api | f3586bfce9c962af2e8c1bc266ff25e0f1971278 | 9a0bd3f6e63f616586681518e44fe37c6bae2bba | refs/heads/master | 2020-03-28T11:13:22.537641 | 2018-09-10T17:07:59 | 2018-09-10T17:07:59 | 148,190,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,222 | py | # coding: utf-8
"""
UltraCart Rest API V2
UltraCart REST API Version 2
OpenAPI spec version: 2.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class CartSettingsPaymentCreditCard(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'collect_credit_card_verification_number': 'bool',
'credit_card_types': 'list[str]',
'hosted_fields_shopping_cart_token': 'str'
}
attribute_map = {
'collect_credit_card_verification_number': 'collect_credit_card_verification_number',
'credit_card_types': 'credit_card_types',
'hosted_fields_shopping_cart_token': 'hosted_fields_shopping_cart_token'
}
def __init__(self, collect_credit_card_verification_number=None, credit_card_types=None, hosted_fields_shopping_cart_token=None):
"""
CartSettingsPaymentCreditCard - a model defined in Swagger
"""
self._collect_credit_card_verification_number = None
self._credit_card_types = None
self._hosted_fields_shopping_cart_token = None
self.discriminator = None
if collect_credit_card_verification_number is not None:
self.collect_credit_card_verification_number = collect_credit_card_verification_number
if credit_card_types is not None:
self.credit_card_types = credit_card_types
if hosted_fields_shopping_cart_token is not None:
self.hosted_fields_shopping_cart_token = hosted_fields_shopping_cart_token
@property
def collect_credit_card_verification_number(self):
"""
Gets the collect_credit_card_verification_number of this CartSettingsPaymentCreditCard.
True if the credit card verification number should be collected
:return: The collect_credit_card_verification_number of this CartSettingsPaymentCreditCard.
:rtype: bool
"""
return self._collect_credit_card_verification_number
@collect_credit_card_verification_number.setter
def collect_credit_card_verification_number(self, collect_credit_card_verification_number):
"""
Sets the collect_credit_card_verification_number of this CartSettingsPaymentCreditCard.
True if the credit card verification number should be collected
:param collect_credit_card_verification_number: The collect_credit_card_verification_number of this CartSettingsPaymentCreditCard.
:type: bool
"""
self._collect_credit_card_verification_number = collect_credit_card_verification_number
@property
def credit_card_types(self):
"""
Gets the credit_card_types of this CartSettingsPaymentCreditCard.
Available credit card types
:return: The credit_card_types of this CartSettingsPaymentCreditCard.
:rtype: list[str]
"""
return self._credit_card_types
@credit_card_types.setter
def credit_card_types(self, credit_card_types):
"""
Sets the credit_card_types of this CartSettingsPaymentCreditCard.
Available credit card types
:param credit_card_types: The credit_card_types of this CartSettingsPaymentCreditCard.
:type: list[str]
"""
self._credit_card_types = credit_card_types
@property
def hosted_fields_shopping_cart_token(self):
"""
Gets the hosted_fields_shopping_cart_token of this CartSettingsPaymentCreditCard.
The shoppingCartToken needed for proper initialization of hosted fields collection
:return: The hosted_fields_shopping_cart_token of this CartSettingsPaymentCreditCard.
:rtype: str
"""
return self._hosted_fields_shopping_cart_token
@hosted_fields_shopping_cart_token.setter
def hosted_fields_shopping_cart_token(self, hosted_fields_shopping_cart_token):
"""
Sets the hosted_fields_shopping_cart_token of this CartSettingsPaymentCreditCard.
The shoppingCartToken needed for proper initialization of hosted fields collection
:param hosted_fields_shopping_cart_token: The hosted_fields_shopping_cart_token of this CartSettingsPaymentCreditCard.
:type: str
"""
self._hosted_fields_shopping_cart_token = hosted_fields_shopping_cart_token
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, CartSettingsPaymentCreditCard):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
]
| |
e5605dc11600c8d220f96a2cd4525bcd1afc3fbe | e164fd9dce5fef093f85ca009f78570ec2b1c492 | /134. Gas Station.py | 460d1262ff314afb450941395616bf4f0bf76202 | []
| no_license | havenshi/leetcode | 58fde93a1f1cbdd3c2faa9566c00383e5812f3a7 | bcb79f329bcb133e6421db8fc1f4780a4eedec39 | refs/heads/master | 2021-01-22T04:15:23.748793 | 2019-11-30T04:25:54 | 2019-11-30T04:25:54 | 92,447,327 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 745 | py | class Solution(object):
def canCompleteCircuit(self, gas, cost):
"""
:type gas: List[int]
:type cost: List[int]
:rtype: int
"""
sum = 0 # total remained gas
subsum = 0 # remained gas for each period
index = 0
for i in range(len(gas)):
if subsum + gas[i] - cost[i] >= 0: # can come to next station
subsum += gas[i] - cost[i]
sum += gas[i] - cost[i]
else:
subsum = 0
index = i + 1 # recount from next station
sum += gas[i] - cost[i] # still the total
if sum<0: # total gas can not cover a circle
return -1
else:
return index | [
"[email protected]"
]
| |
2d61a2d61a5c1cfb5a5512320d6a0d0b8f917e40 | d41d18d3ea6edd2ec478b500386375a8693f1392 | /plotly/validators/pointcloud/_stream.py | e428b82d9afa0bbe1a71fbaabb76554bb63cde83 | [
"MIT"
]
| permissive | miladrux/plotly.py | 38921dd6618650d03be9891d6078e771ffccc99a | dbb79e43e2cc6c5762251537d24bad1dab930fff | refs/heads/master | 2020-03-27T01:46:57.497871 | 2018-08-20T22:37:38 | 2018-08-20T22:37:38 | 145,742,203 | 1 | 0 | MIT | 2018-08-22T17:37:07 | 2018-08-22T17:37:07 | null | UTF-8 | Python | false | false | 857 | py | import _plotly_utils.basevalidators
class StreamValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name='stream', parent_name='pointcloud', **kwargs
):
super(StreamValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str='Stream',
data_docs="""
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to *50*, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See
https://plot.ly/settings for more details.""",
**kwargs
)
| [
"[email protected]"
]
| |
0b51853a0df25836be8b7e03c904f6965d6477c7 | 4bd555bc662b8182a2e7644976bfdb00ed5e1ebe | /PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/matplotlib/testing/jpl_units/UnitDblFormatter.py | c9fcc82b7b8eb9beca0e1bd519b556506a6c9140 | []
| no_license | fhelmli/homeNOWG2 | a103df1ef97194dec9501dbda87ec1f7c111fb4a | e794fd87b296544542fd9dc7ac94c981c6312419 | refs/heads/master | 2020-04-04T13:40:20.417769 | 2019-01-30T21:41:04 | 2019-01-30T21:41:04 | 155,970,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,906 | py | #import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#===========================================================================
#
# UnitDblFormatter
#
#===========================================================================
"""UnitDblFormatter module containing class UnitDblFormatter."""
#===========================================================================
# Place all imports after here.
#
from __future__ import print_function
import matplotlib.ticker as ticker
#
# Place all imports before here.
#===========================================================================
__all__ = [ 'UnitDblFormatter' ]
#===========================================================================
class UnitDblFormatter( ticker.ScalarFormatter ):
"""The formatter for UnitDbl data types. This allows for formatting
with the unit string.
"""
def __init__( self, *args, **kwargs ):
'The arguments are identical to matplotlib.ticker.ScalarFormatter.'
ticker.ScalarFormatter.__init__( self, *args, **kwargs )
def __call__( self, x, pos = None ):
'Return the format for tick val x at position pos'
if len(self.locs) == 0:
return ''
else:
return str(x)
def format_data_short( self, value ):
"Return the value formatted in 'short' format."
return str(value)
def format_data( self, value ):
"Return the value formatted into a string."
return str(value)
| [
"[email protected]"
]
| |
2435df37568acac6639a123d883dec6a89e6d42a | a03eba726a432d8ef133f2dc55894ba85cdc4a08 | /events/contrib/plugins/form_elements/fields/email/__init__.py | dcfddf62d769375628ada36fea952633354c581d | [
"MIT"
]
| permissive | mansonul/events | 2546c9cfe076eb59fbfdb7b4ec8bcd708817d59b | 4f6ca37bc600dcba3f74400d299826882d53b7d2 | refs/heads/master | 2021-01-15T08:53:22.442929 | 2018-01-30T16:14:20 | 2018-01-30T16:14:20 | 99,572,230 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | __title__ = 'fobi.contrib.plugins.form_elements.fields.email'
__author__ = 'Artur Barseghyan <[email protected]>'
__copyright__ = '2014-2017 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = ('default_app_config', 'UID',)
default_app_config = 'fobi.contrib.plugins.form_elements.fields.' \
'email.apps.Config'
UID = 'email'
| [
"[email protected]"
]
| |
46da629c2f47e5979d62a6729f7c6c481856589d | f323771a686df2b934597f40334f168fa5d8915e | /interface_manager/backend/impi_project/interface_app/views/task/task_detail_results_views.py | e538efda5a6adfbdc8bc2239e5165fc01ec335b2 | []
| no_license | luochun3731/test-dev | 4ac560b5cc2935986be7f3d29f445f6c0d59f60f | bab018464d26de5a493e5e5a60b382e612b6ca92 | refs/heads/master | 2022-02-04T13:20:23.748457 | 2019-05-19T03:32:41 | 2019-05-19T03:32:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,469 | py | import datetime
from django.forms.models import model_to_dict
from interface_app import common
from interface_app.models.task import Task, TaskInterface
from interface_app.models.result import TaskResult, InterfaceResult
from django.views.generic import View
from interface_app.my_exception import MyException
# Create your views here.
# task的接口的增删改查
class TaskDetailVersionViews(View):
def get(self, request, pk, *args, **kwargs):
"""
获取单个任务的版本列表
:param request:
:param pk: # 任务的id
:param args:
:param kwargs:
:return:
"""
results = TaskResult.objects.filter(task_id=pk).order_by('-version')
ret = []
for i in results:
tmp = dict()
tmp["version"] = i.version
tmp["task_id"] = i.task_id
tmp['created'] = i.created.strftime("%Y-%m-%d %H:%M")
tmp["id"] = i.id
ret.append(tmp)
return common.response_success(ret)
class TaskDetailVersionResultsViews(View):
def get(self, request, pk, *args, **kwargs):
"""
获取一个版本的结果列表
:param request:
:param pk: # 任务的id
:param args:
:param kwargs:
:return:
"""
results = InterfaceResult.objects.filter(task_result_id=pk)
ret = [model_to_dict(i) for i in results]
return common.response_success(ret)
| [
"[email protected]"
]
| |
9f0405164e1aa388241db459f753662741dad1dd | 3003a8663135aa10f5a152a8642bc6ab270995b9 | /ggCloudSDK/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/third_party/apis/cloudfunctions/v1beta1/cloudfunctions_v1beta1_messages.py | b261bf7b1ecec2f876fe5d06063637dc4a0eae10 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
]
| permissive | bopopescu/packmybot | 1b4d199b36d196e5e769a781b520019bb4d0bdbc | 92de1e72cfc51b41447366ffc81a9dcd9a5e7870 | refs/heads/master | 2022-11-25T23:46:06.946645 | 2015-10-22T08:22:04 | 2015-10-22T08:22:04 | 282,313,675 | 0 | 0 | null | 2020-07-24T20:50:10 | 2020-07-24T20:50:10 | null | UTF-8 | Python | false | false | 21,374 | py | """Generated message classes for cloudfunctions version v1beta1.
API for managing lightweight user-provided functions executed in response to
events.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from protorpc import messages as _messages
from googlecloudsdk.third_party.apitools.base.py import encoding
package = 'cloudfunctions'
class CallFunctionResponse(_messages.Message):
"""Response of CallFunction method.
Fields:
error: Either system or user-function generated error. Set if execution
was not successful.
executionId: Execution id of function invocation.
result: Result populated for successful execution of synchronous function.
Will not be populated if function does not return a result through
context.
"""
error = _messages.StringField(1)
executionId = _messages.StringField(2)
result = _messages.BytesField(3)
class CloudfunctionsOperationsGetRequest(_messages.Message):
"""A CloudfunctionsOperationsGetRequest object.
Fields:
name: The name of the operation resource.
"""
name = _messages.StringField(1, required=True)
class CloudfunctionsProjectsRegionsFunctionsCallRequest(_messages.Message):
"""A CloudfunctionsProjectsRegionsFunctionsCallRequest object.
Fields:
data: Input to be passed to the function.
name: The name of the function to be called.
"""
data = _messages.BytesField(1)
name = _messages.StringField(2, required=True)
class CloudfunctionsProjectsRegionsFunctionsCreateRequest(_messages.Message):
"""A CloudfunctionsProjectsRegionsFunctionsCreateRequest object.
Fields:
hostedFunction: A HostedFunction resource to be passed as the request
body.
location: The project and region in which the function should be created,
specified in the format: projects/*/regions/*
"""
hostedFunction = _messages.MessageField('HostedFunction', 1)
location = _messages.StringField(2, required=True)
class CloudfunctionsProjectsRegionsFunctionsDeleteRequest(_messages.Message):
"""A CloudfunctionsProjectsRegionsFunctionsDeleteRequest object.
Fields:
name: The name of the function which should be deleted.
"""
name = _messages.StringField(1, required=True)
class CloudfunctionsProjectsRegionsFunctionsGetRequest(_messages.Message):
"""A CloudfunctionsProjectsRegionsFunctionsGetRequest object.
Fields:
name: The name of the function which details should be obtained.
"""
name = _messages.StringField(1, required=True)
class CloudfunctionsProjectsRegionsFunctionsListRequest(_messages.Message):
"""A CloudfunctionsProjectsRegionsFunctionsListRequest object.
Fields:
location: The project and region in which the function should be created,
specified in the format: projects/*/regions/*
pageSize: Maximum number of functions to return.
pageToken: The value returned by the last ListFunctionsResponse; indicates
that this is a continuation of a prior ListFunctions call, and that the
system should return the next page of data.
"""
location = _messages.StringField(1, required=True)
pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(3)
class FunctionTrigger(_messages.Message):
"""Describes binding of computation to the event source.
Fields:
gsUri: Google Cloud Storage resource whose changes trigger the events.
Currently, it must have the form gs://<bucket>/ (that is, it must refer
to a bucket, rather than an object).
pubsubTopic: A pub/sub type of source.
"""
gsUri = _messages.StringField(1)
pubsubTopic = _messages.StringField(2)
class HostedFunction(_messages.Message):
"""Describes a cloud function that contains user computation executed in
response to an event. It encapsulate function and triggers configurations.
Enums:
StatusValueValuesEnum: [Output only] Status of the function deployment.
Fields:
entryPoint: The name of the function (as defined in source code) that will
be executed. Defaults to the resource name suffix, if not specified. For
backward compatibility, if function with given name is not found, then
the system will try to use function named 'function'. For Node.js this
is name of a function exported by the module specified in
source_location.
gcsUrl: GCS URL pointing to the zip archive which contains the function.
latestOperation: [Output only] Name of the most recent operation modifying
the function. If the function status is DEPLOYING or DELETING, then it
points to the active operation.
name: A user-defined name of the function. Function names must be unique
globally and match pattern: projects/*/regions/*/functions/*
oauthScopes: The set of Google API scopes to be made available to the
function while it is being executed. Values should be in the format of
scope developer codes, for example:
"https://www.googleapis.com/auth/compute".
sourceRepository: The hosted repository where the function is defined.
status: [Output only] Status of the function deployment.
triggers: List of triggers.
"""
class StatusValueValuesEnum(_messages.Enum):
"""[Output only] Status of the function deployment.
Values:
STATUS_UNSPECIFIED: Status not specified.
READY: Successfully deployed.
FAILED: Not deployed correctly - behavior is undefined. The item should
be updated or deleted to move it out of this state.
DEPLOYING: Creation or update in progress.
DELETING: Deletion in progress.
"""
STATUS_UNSPECIFIED = 0
READY = 1
FAILED = 2
DEPLOYING = 3
DELETING = 4
entryPoint = _messages.StringField(1)
gcsUrl = _messages.StringField(2)
latestOperation = _messages.StringField(3)
name = _messages.StringField(4)
oauthScopes = _messages.StringField(5, repeated=True)
sourceRepository = _messages.MessageField('SourceRepository', 6)
status = _messages.EnumField('StatusValueValuesEnum', 7)
triggers = _messages.MessageField('FunctionTrigger', 8, repeated=True)
class ListFunctionsResponse(_messages.Message):
"""Response for the ListFunctions method.
Fields:
functions: The functions that match the request.
nextPageToken: If not empty, indicates that there may be more functions
that match the request; this value should be passed in a new
ListFunctionsRequest to get more functions.
"""
functions = _messages.MessageField('HostedFunction', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class Operation(_messages.Message):
"""This resource represents a long-running operation that is the result of a
network API call.
Messages:
MetadataValue: Service-specific metadata associated with the operation.
It typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
ResponseValue: The normal response of the operation in case of success.
If the original method returns no data on success, such as `Delete`, the
response is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
Fields:
done: If the value is `false`, it means the operation is still in
progress. If true, the operation is completed, and either `error` or
`response` is available.
error: The error result of the operation in case of failure.
metadata: Service-specific metadata associated with the operation. It
typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
name: The server-assigned name, which is only unique within the same
service that originally returns it. If you use the default HTTP mapping
above, the `name` should have the format of
`operations/some/unique/name`.
response: The normal response of the operation in case of success. If the
original method returns no data on success, such as `Delete`, the
response is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
"""Service-specific metadata associated with the operation. It typically
contains progress information and common metadata such as create time.
Some services might not provide such metadata. Any method that returns a
long-running operation should document the metadata type, if any.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Properties of the object. Contains field @ype with
type URL.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class ResponseValue(_messages.Message):
"""The normal response of the operation in case of success. If the
original method returns no data on success, such as `Delete`, the response
is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
Messages:
AdditionalProperty: An additional property for a ResponseValue object.
Fields:
additionalProperties: Properties of the object. Contains field @ype with
type URL.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a ResponseValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
done = _messages.BooleanField(1)
error = _messages.MessageField('Status', 2)
metadata = _messages.MessageField('MetadataValue', 3)
name = _messages.StringField(4)
response = _messages.MessageField('ResponseValue', 5)
class OperationMetadata(_messages.Message):
"""Metadata describing an Operation
Enums:
TypeValueValuesEnum: Type of operation.
Messages:
RequestValue: The original request that started the operation.
Fields:
request: The original request that started the operation.
target: Target of the operation - for example
projects/project-1/regions/region-1/functions/function-1
type: Type of operation.
"""
class TypeValueValuesEnum(_messages.Enum):
"""Type of operation.
Values:
OPERATION_UNSPECIFIED: Unknown operation type.
CREATE_FUNCTION: Triggered by CreateFunction call
UPDATE_FUNCTION: Triggered by UpdateFunction call
DELETE_FUNCTION: Triggered by DeleteFunction call.
"""
OPERATION_UNSPECIFIED = 0
CREATE_FUNCTION = 1
UPDATE_FUNCTION = 2
DELETE_FUNCTION = 3
@encoding.MapUnrecognizedFields('additionalProperties')
class RequestValue(_messages.Message):
"""The original request that started the operation.
Messages:
AdditionalProperty: An additional property for a RequestValue object.
Fields:
additionalProperties: Properties of the object. Contains field @ype with
type URL.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a RequestValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
request = _messages.MessageField('RequestValue', 1)
target = _messages.StringField(2)
type = _messages.EnumField('TypeValueValuesEnum', 3)
class SourceRepository(_messages.Message):
"""Describes the location of the function source in a remote repository.
Fields:
branch: The name of the branch from which the function should be fetched.
deployedRevision: [Output only] The id of the revision that was resolved
at the moment of function creation or update. For example when a user
deployed from a branch, it will be the revision id of the latest change
on this branch at that time. If user deployed from revision then this
value will be always equal to the revision specified by the user.
revision: The id of the revision that captures the state of the repository
from which the function should be fetched.
sourceUrl: URL to the hosted repository where the function is defined. The
URL should include the path to the directory within the repository where
the function is located. Only paths in
https://source.developers.google.com domain are supported.
tag: The name of the tag that captures the state of the repository from
which the function should be fetched.
"""
branch = _messages.StringField(1)
deployedRevision = _messages.StringField(2)
revision = _messages.StringField(3)
sourceUrl = _messages.StringField(4)
tag = _messages.StringField(5)
class StandardQueryParameters(_messages.Message):
"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
bearer_token: OAuth bearer token.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
pp: Pretty-print response.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" or "email:<ldap>" to
include in api requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json')
bearer_token = _messages.StringField(4)
callback = _messages.StringField(5)
fields = _messages.StringField(6)
key = _messages.StringField(7)
oauth_token = _messages.StringField(8)
pp = _messages.BooleanField(9, default=True)
prettyPrint = _messages.BooleanField(10, default=True)
quotaUser = _messages.StringField(11)
trace = _messages.StringField(12)
uploadType = _messages.StringField(13)
upload_protocol = _messages.StringField(14)
class Status(_messages.Message):
"""The `Status` type defines a logical error model that is suitable for
different programming environments, including REST APIs and RPC APIs. It is
used by [gRPC](https://github.com/grpc). The error model is designed to be:
- Simple to use and understand for most users - Flexible enough to meet
unexpected needs # Overview The `Status` message contains three pieces of
data: error code, error message, and error details. The error code should be
an enum value of google.rpc.Code, but it may accept additional error codes
if needed. The error message should be a developer-facing English message
that helps developers *understand* and *resolve* the error. If a localized
user-facing error message is needed, put the localized message in the error
details or localize it in the client. The optional error details may contain
arbitrary information about the error. There is a predefined set of error
detail types in the package `google.rpc` which can be used for common error
conditions. # Language mapping The `Status` message is the logical
representation of the error model, but it is not necessarily the actual wire
format. When the `Status` message is exposed in different client libraries
and different wire protocols, it can be mapped differently. For example, it
will likely be mapped to some exceptions in Java, but more likely mapped to
some error codes in C. # Other uses The error model and the `Status`
message can be used in a variety of environments, either with or without
APIs, to provide a consistent developer experience across different
environments. Example uses of this error model include: - Partial errors.
If a service needs to return partial errors to the client, it may embed
the `Status` in the normal response to indicate the partial errors. -
Workflow errors. A typical workflow has multiple steps. Each step may
have a `Status` message for error reporting purpose. - Batch operations. If
a client uses batch request and batch response, the `Status` message
should be used directly inside batch response, one for each error sub-
response. - Asynchronous operations. If an API call embeds asynchronous
operation results in its response, the status of those operations should
be represented directly using the `Status` message. - Logging. If some
API errors are stored in logs, the message `Status` could be used
directly after any stripping needed for security/privacy reasons.
Messages:
DetailsValueListEntry: A DetailsValueListEntry object.
Fields:
code: The status code, which should be an enum value of google.rpc.Code.
details: A list of messages that carry the error details. There will be a
common set of message types for APIs to use.
message: A developer-facing error message, which should be in English. Any
user-facing error message should be localized and sent in the
google.rpc.Status.details field, or localized by the client.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class DetailsValueListEntry(_messages.Message):
"""A DetailsValueListEntry object.
Messages:
AdditionalProperty: An additional property for a DetailsValueListEntry
object.
Fields:
additionalProperties: Properties of the object. Contains field @ype with
type URL.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a DetailsValueListEntry object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
code = _messages.IntegerField(1, variant=_messages.Variant.INT32)
details = _messages.MessageField('DetailsValueListEntry', 2, repeated=True)
message = _messages.StringField(3)
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1',
package=u'cloudfunctions')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2',
package=u'cloudfunctions')
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv',
package=u'cloudfunctions')
| [
"[email protected]"
]
| |
00c8fe4031a06644da8c84e78b0266f96322e245 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_domains.py | e47a8adb75f53f5e40efaa4d4d78e4e9ac48034d | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py |
from xai.brain.wordbase.nouns._domain import _DOMAIN
#calss header
class _DOMAINS(_DOMAIN, ):
def __init__(self,):
_DOMAIN.__init__(self)
self.name = "DOMAINS"
self.specie = 'nouns'
self.basic = "domain"
self.jsondata = {}
| [
"[email protected]"
]
| |
03ea1db815ea69be1500fd4fa0602c4e3cb62694 | 25439cf61037818a6c78a4d6db7edfddd7dc9b51 | /src/python/zquantum/core/history/save_conditions.py | 9a731f048d410c40324188401e6837557d0fd8f2 | [
"Apache-2.0"
]
| permissive | antalszava/z-quantum-core | fa1f6b79709256e690194de306ac84df409f6950 | 8137ecb5833ab7996815c24eefa685317b5e2e13 | refs/heads/master | 2023-01-08T08:45:20.277025 | 2020-11-02T15:23:24 | 2020-11-02T15:23:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,527 | py | """Save conditions possible to use with recorder."""
from typing import Any
from typing_extensions import Protocol
class SaveCondition(Protocol):
"""Protocol of a function determining if given call should should be saved in the history."""
def __call__(self, value: Any, params: Any, call_number: int) -> bool:
"""Determine whether current call should be saved in the history.
Suppose the recorder is constructed for a function `f`, and the params
`x` are passed to `f` such that `y`=`f(x)`. Then, if this is `n-th`
evaluation of the function, the value of __call__(y, x, n) determines
if current call should be saved to the history.
:param value: current value of the function.
:param params: parameters passed to the function.
:param call_number: a natural number determining how many times the target
function has been called.
:return: A boolean indicating whether the call being processed should be saved to
history.
"""
pass
def always(value: Any, params: Any, call_number: int) -> bool:
"""Default save condition: save always."""
return True
def every_nth(n: int) -> SaveCondition:
"""Save condition: every n-th step, counting from zero-th one.
Note: this is factory function, i.e. it returns the actual save condition for given n.
"""
def _save_condition(value: Any, params: Any, call_number: int) -> bool:
return call_number % n == 0
return _save_condition
| [
"[email protected]"
]
| |
ed1dae2731b95b2fb16c3f98fb739df9a348662a | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_360/ch149_2020_04_13_19_40_50_377558.py | 5a4ab204d93e91a583e1bd2feb89d2c0154352fc | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 863 | py | #inputs
salario = float(input("Qual o valor do seu salário?"))
dependentes = int(input("Qual o número de dependentes que você possui?"))
#cpi -> contribuição para o INSS
if salario<=1045.00:
cpi = salario*0.075
elif 1045.01<=salario<=2089.60:
cpi = salario*0.09
elif 2089.61<=salario<=3134.40:
cpi = salario*0.12
elif 3134.41<=salario<=6101.06:
cpi = salario*0.15
else:
cpi = 671.12
#bdc -> Base de cálculo
bdc = salario - cpi - (dependentes*189.59)
#alq -> Alíquota / ddc -> Dedução
if bdc<=1903.98:
alq = 0.0
ddc = 0.0
elif 1903.99<=bdc<=2826.65:
alq = 0.075
ddc = 142.8
elif 2826.66<=bdc<=3751.05:
alq = 0.15
ddc = 354.8
elif 3751.06<=bdc<=4664.68:
alq = 0.225
ddc = 636.13
else:
alq = 0.275
ddc = 869.36
#irf -> IRRF
irf = (bdc*alq) - ddc
print("O IRRF simplificado é: {0}".format(irf)) | [
"[email protected]"
]
| |
a7c35533e3ca5b39277826fd3ced2b3e6d144ed4 | 8a25ada37271acd5ea96d4a4e4e57f81bec221ac | /home/pi/GrovePi/Software/Python/others/temboo/Library/CorpWatch/Search/__init__.py | d4a4dbc01fbbb5c4e9e067ff9c8408ca1f330110 | [
"Apache-2.0",
"MIT"
]
| permissive | lupyuen/RaspberryPiImage | 65cebead6a480c772ed7f0c4d0d4e08572860f08 | 664e8a74b4628d710feab5582ef59b344b9ffddd | refs/heads/master | 2021-01-20T02:12:27.897902 | 2016-11-17T17:32:30 | 2016-11-17T17:32:30 | 42,438,362 | 7 | 8 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | from temboo.Library.CorpWatch.Search.CompoundSearch import CompoundSearch, CompoundSearchInputSet, CompoundSearchResultSet, CompoundSearchChoreographyExecution
| [
"[email protected]"
]
| |
faac4a6cd4ec646fc0024bc28f81558a4cd81c2c | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_246/ch43_2020_03_30_19_26_24_928655.py | f0b9458eee10919d0689f4465806a7c9930a7dd9 | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | i=int(input('Qual é o número do mês:'))
meses=[Janeiro, Fevereiro, Março, Abril, Maio, Junho, Julho, Agosto, Setembro, Outubro, Novembro, Dezembro]
print (meses[i-1]) | [
"[email protected]"
]
| |
6170faf7a4373ceea1b8454264dad210b318c3be | 0b25dc3f9b4ef736e739aadddec33b96dd65a0c8 | /面向对象/hello1.py | 777efad3932ed74d0f7ab912c6e50d9975306ee6 | []
| no_license | ttp55/LearnPy | b123f44a74e4364771491c572705742c15eb33ff | 1530e158bde152e5c585f496dd1e5ffcffdb73bc | refs/heads/master | 2023-05-11T13:02:30.157285 | 2023-05-08T07:13:57 | 2023-05-08T07:13:57 | 196,953,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'WZG'
from Hello import Hello
h = Hello()
print(type(Hello), type(h), h.hello())
| [
"[email protected]"
]
| |
cf8f8d73405b71160adac91f36ff89bc04192843 | d98e4ce0a2d5064c38e7445b47d094661cdc2d54 | /lib/bango/constants.py | c7e17a6117303d94734a55dfb791d0c15a0f7293 | []
| no_license | wraithan/solitude | 549f007b4ac294439788f903110719bb6b404844 | c46508929f510513a9ffc91017a1dc7983630ea3 | refs/heads/master | 2021-01-16T19:51:34.925247 | 2013-01-11T17:29:22 | 2013-01-11T17:29:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,541 | py | import re
ACCESS_DENIED = 'ACCESS_DENIED'
BANGO_ALREADY_PREMIUM_ENABLED = 'BANGO_ALREADY_PREMIUM_ENABLED'
BANK_DETAILS_EXIST = 'BANK_DETAILS_EXIST'
INTERNAL_ERROR = 'INTERNAL_ERROR'
# There is one of these for every field.
INVALID = re.compile('^INVALID_\w+$')
NO_BANGO_EXISTS = 'NO_BANGO_EXISTS'
OK = 'OK'
REQUIRED_CONFIGURATION_MISSING = 'REQUIRED_CONFIGURATION_MISSING'
SERVICE_UNAVAILABLE = 'SERVICE_UNAVAILABLE'
HEADERS_SERVICE = 'x-solitude-service'
HEADERS_SERVICE_GET = 'HTTP_X_SOLITUDE_SERVICE'
CURRENCIES = {
'AUD': 'Australian Dollars',
'CAD': 'Canadian Dollars',
'CHF': 'Swiss Francs',
'DKK': 'Danish Krone',
'EUR': 'Euro',
'GBP': 'Pounds Sterling',
'MYR': 'Malaysian Ringgit',
'NOK': 'Norwegian Krone',
'NZD': 'New Zealand Dollars',
'SEK': 'Swedish Krone',
'SDG': 'Singapore Dollar',
'THB': 'Thai Baht',
'USD': 'US Dollars',
'ZAR': 'South African Rand',
}
# TODO: Expand this bug 814492.
CATEGORIES = {
1: 'Games'
}
# List of valid country codes: http://en.wikipedia.org/wiki/ISO_3166-1_alpha-3
COUNTRIES = [
'BRA',
'ESP'
]
RATINGS = ['GLOBAL', 'UNIVERSAL', 'RESTRICTED']
RATINGS_SCHEME = ['GLOBAL', 'USA']
PAYMENT_TYPES = ('OPERATOR', 'PSMS', 'CARD', 'INTERNET')
def match(status, constant):
# There's going to be an INVALID_ something for every field in every form
# adding them all to this is boring. Let's make a regex to map them.
if isinstance(constant, basestring):
return status, constant
return constant.match(status)
| [
"[email protected]"
]
| |
7f9bbff19c960a1447601077fbce7f295141b375 | 15977cb6d59853e3b1c6ce6ef5ed33993b838a93 | /rmgpy/rmg/react.py | d68d358807f917833e00fb769972d8d90a1c38d9 | [
"MIT"
]
| permissive | nateharms/RMG-Py | 43cf7637005829a0ae743763a70bf27b6d29e5ac | 80deaebddcbb14b7c41e232b67e1c973e0b18324 | refs/heads/master | 2021-01-18T18:29:49.279983 | 2016-08-23T14:05:24 | 2016-08-24T18:51:18 | 62,814,768 | 1 | 0 | NOASSERTION | 2020-06-02T21:33:44 | 2016-07-07T14:49:18 | Python | UTF-8 | Python | false | false | 6,546 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2009 Prof. William H. Green ([email protected]) and the
# RMG Team ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
Contains functions for generating reactions.
"""
import logging
import itertools
from rmgpy.molecule.molecule import Molecule
from rmgpy.data.rmg import getDB
from rmgpy.scoop_framework.util import map_
from rmgpy.species import Species
def react(*spcTuples):
"""
Generate reactions between the species in the
list of species tuples for all the reaction families available.
For each tuple of one or more Species objects [(spc1,), (spc2, spc3), ...]
the following is done:
A list of tuples is created for each resonance isomer of the species.
Each tuple consists of (Molecule, index) with the index the species index of the Species object.
Possible combinations between the first spc in the tuple, and the second species in the tuple
is obtained by taking the combinatorial product of the two generated [(Molecule, index)] lists.
Returns a flat generator object containing the generated Reaction objects.
"""
combos = []
for t in spcTuples:
t = tuple([spc.copy(deep=True) for spc in t])
if len(t) == 1:#unimolecular reaction
spc, = t
mols = [(mol, spc.index) for mol in spc.molecule]
combos.extend([(combo,) for combo in mols])
elif len(t) == 2:#bimolecular reaction
spcA, spcB = t
molsA = [(mol, spcA.index) for mol in spcA.molecule]
molsB = [(mol, spcB.index) for mol in spcB.molecule]
combos.extend(itertools.product(molsA, molsB))
results = map_(
reactMolecules,
combos
)
reactionList = itertools.chain.from_iterable(results)
return reactionList
def reactMolecules(moleculeTuples):
"""
Performs a reaction between
the resonance isomers.
The parameter contains a list of tuples with each tuple:
(Molecule, index of the core species it belongs to)
"""
families = getDB('kinetics').families
molecules, reactantIndices = zip(*moleculeTuples)
reactionList = []
for _, family in families.iteritems():
rxns = family.generateReactions(molecules)
reactionList.extend(rxns)
for reactant in molecules:
reactant.clearLabeledAtoms()
deflate(reactionList, molecules, reactantIndices)
return reactionList
def deflate(rxns, molecules, reactantIndices):
"""
The purpose of this function is to replace the reactants and
products of a reaction, stored as Molecule objects by
integer indices, corresponding to the species core index.
Creates a dictionary with Molecule objects as keys and newly
created Species objects as values.
It iterates over the reactantIndices array, with elements in this array
corresponding to the indices of the core species. It creates a
Molecule -> index entry in the previously created dictionary.
It iterates over the reaction list, and iteratively updates the
created dictionary as more reactions are processed.
"""
molDict = {}
for i, coreIndex in enumerate(reactantIndices):
if coreIndex != -1:
molDict[molecules[i]] = coreIndex
for rxn in rxns:
deflateReaction(rxn, molDict)
try:
deflateReaction(rxn.reverse, molDict)
except AttributeError, e:
pass
def reactAll(coreSpcList, numOldCoreSpecies, unimolecularReact, bimolecularReact):
"""
Reacts the core species list via uni- and bimolecular reactions.
"""
# Select reactive species that can undergo unimolecular reactions:
spcTuples = [(coreSpcList[i],)
for i in xrange(numOldCoreSpecies) if (unimolecularReact[i] and coreSpcList[i].reactive)]
for i in xrange(numOldCoreSpecies):
for j in xrange(i, numOldCoreSpecies):
# Find reactions involving the species that are bimolecular
# This includes a species reacting with itself (if its own concentration is high enough)
if bimolecularReact[i,j]:
if coreSpcList[i].reactive and coreSpcList[j].reactive:
spcTuples.append((coreSpcList[i], coreSpcList[j]))
rxns = list(react(*spcTuples))
return rxns
def deflateReaction(rxn, molDict):
"""
This function deflates a single reaction, and uses the provided
dictionary to populate reactants/products/pairs with integer indices,
if possible.
If the Molecule object could not be found in the dictionary, a new
dictionary entry is created, creating a new Species object as the value
for the entry.
The reactants/products/pairs of both the forward and reverse reaction
object are populated with the value of the dictionary, either an
integer index, or either a Species object.
"""
for mol in itertools.chain(rxn.reactants, rxn.products):
if not mol in molDict:
molDict[mol] = Species(molecule=[mol])
rxn.reactants = [molDict[mol] for mol in rxn.reactants]
rxn.products = [molDict[mol] for mol in rxn.products]
rxn.pairs = [(molDict[reactant], molDict[product]) for reactant, product in rxn.pairs] | [
"[email protected]"
]
| |
c388bf079c74e41a5740d641fb36ad2b962ab964 | cf58c2c216f6c76c71b5a04f72d79fb1d58e4b64 | /tests/components/unifiprotect/test_sensor.py | 2daa44699b4cfd474478617c46bb5d359e89238a | [
"Apache-2.0"
]
| permissive | whtsky/home-assistant | c301a7a0c2f8e94806d411b705c8f7b5939355d2 | 2ea5811e3a34e228908802e18c29af1c2fc249c5 | refs/heads/dev | 2023-08-19T07:37:29.365289 | 2023-02-17T22:21:28 | 2023-02-17T22:21:28 | 204,410,639 | 1 | 0 | Apache-2.0 | 2023-02-22T06:14:25 | 2019-08-26T06:30:12 | Python | UTF-8 | Python | false | false | 19,110 | py | """Test the UniFi Protect sensor platform."""
from __future__ import annotations
from datetime import datetime, timedelta
from unittest.mock import AsyncMock, Mock
from pyunifiprotect.data import (
NVR,
Camera,
Event,
EventType,
Sensor,
SmartDetectObjectType,
)
from pyunifiprotect.data.nvr import EventMetadata, LicensePlateMetadata
from homeassistant.components.unifiprotect.const import (
ATTR_EVENT_SCORE,
DEFAULT_ATTRIBUTION,
)
from homeassistant.components.unifiprotect.sensor import (
ALL_DEVICES_SENSORS,
CAMERA_DISABLED_SENSORS,
CAMERA_SENSORS,
EVENT_SENSORS,
MOTION_TRIP_SENSORS,
NVR_DISABLED_SENSORS,
NVR_SENSORS,
OBJECT_TYPE_NONE,
SENSE_SENSORS,
)
from homeassistant.const import (
ATTR_ATTRIBUTION,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
Platform,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from .utils import (
MockUFPFixture,
adopt_devices,
assert_entity_counts,
enable_entity,
ids_from_device_description,
init_entry,
remove_entities,
reset_objects,
time_changed,
)
CAMERA_SENSORS_WRITE = CAMERA_SENSORS[:5]
SENSE_SENSORS_WRITE = SENSE_SENSORS[:8]
async def test_sensor_camera_remove(
hass: HomeAssistant, ufp: MockUFPFixture, doorbell: Camera, unadopted_camera: Camera
) -> None:
"""Test removing and re-adding a camera device."""
ufp.api.bootstrap.nvr.system_info.ustorage = None
await init_entry(hass, ufp, [doorbell, unadopted_camera])
assert_entity_counts(hass, Platform.SENSOR, 25, 12)
await remove_entities(hass, ufp, [doorbell, unadopted_camera])
assert_entity_counts(hass, Platform.SENSOR, 12, 9)
await adopt_devices(hass, ufp, [doorbell, unadopted_camera])
assert_entity_counts(hass, Platform.SENSOR, 25, 12)
async def test_sensor_sensor_remove(
hass: HomeAssistant, ufp: MockUFPFixture, sensor_all: Sensor
) -> None:
"""Test removing and re-adding a light device."""
ufp.api.bootstrap.nvr.system_info.ustorage = None
await init_entry(hass, ufp, [sensor_all])
assert_entity_counts(hass, Platform.SENSOR, 22, 14)
await remove_entities(hass, ufp, [sensor_all])
assert_entity_counts(hass, Platform.SENSOR, 12, 9)
await adopt_devices(hass, ufp, [sensor_all])
assert_entity_counts(hass, Platform.SENSOR, 22, 14)
async def test_sensor_setup_sensor(
hass: HomeAssistant, ufp: MockUFPFixture, sensor_all: Sensor
) -> None:
"""Test sensor entity setup for sensor devices."""
await init_entry(hass, ufp, [sensor_all])
assert_entity_counts(hass, Platform.SENSOR, 22, 14)
entity_registry = er.async_get(hass)
expected_values = (
"10",
"10.0",
"10.0",
"10.0",
"none",
)
for index, description in enumerate(SENSE_SENSORS_WRITE):
if not description.entity_registry_enabled_default:
continue
unique_id, entity_id = ids_from_device_description(
Platform.SENSOR, sensor_all, description
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.unique_id == unique_id
state = hass.states.get(entity_id)
assert state
assert state.state == expected_values[index]
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
# BLE signal
unique_id, entity_id = ids_from_device_description(
Platform.SENSOR, sensor_all, ALL_DEVICES_SENSORS[1]
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.disabled is True
assert entity.unique_id == unique_id
await enable_entity(hass, ufp.entry.entry_id, entity_id)
state = hass.states.get(entity_id)
assert state
assert state.state == "-50"
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
async def test_sensor_setup_sensor_none(
hass: HomeAssistant, ufp: MockUFPFixture, sensor: Sensor
) -> None:
"""Test sensor entity setup for sensor devices with no sensors enabled."""
await init_entry(hass, ufp, [sensor])
assert_entity_counts(hass, Platform.SENSOR, 22, 14)
entity_registry = er.async_get(hass)
expected_values = (
"10",
STATE_UNAVAILABLE,
STATE_UNAVAILABLE,
STATE_UNAVAILABLE,
STATE_UNAVAILABLE,
)
for index, description in enumerate(SENSE_SENSORS_WRITE):
if not description.entity_registry_enabled_default:
continue
unique_id, entity_id = ids_from_device_description(
Platform.SENSOR, sensor, description
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.unique_id == unique_id
state = hass.states.get(entity_id)
assert state
assert state.state == expected_values[index]
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
async def test_sensor_setup_nvr(
hass: HomeAssistant, ufp: MockUFPFixture, fixed_now: datetime
) -> None:
"""Test sensor entity setup for NVR device."""
reset_objects(ufp.api.bootstrap)
nvr: NVR = ufp.api.bootstrap.nvr
nvr.up_since = fixed_now
nvr.system_info.cpu.average_load = 50.0
nvr.system_info.cpu.temperature = 50.0
nvr.storage_stats.utilization = 50.0
nvr.system_info.memory.available = 50.0
nvr.system_info.memory.total = 100.0
nvr.storage_stats.storage_distribution.timelapse_recordings.percentage = 50.0
nvr.storage_stats.storage_distribution.continuous_recordings.percentage = 50.0
nvr.storage_stats.storage_distribution.detections_recordings.percentage = 50.0
nvr.storage_stats.storage_distribution.hd_usage.percentage = 50.0
nvr.storage_stats.storage_distribution.uhd_usage.percentage = 50.0
nvr.storage_stats.storage_distribution.free.percentage = 50.0
nvr.storage_stats.capacity = 50.0
await hass.config_entries.async_setup(ufp.entry.entry_id)
await hass.async_block_till_done()
assert_entity_counts(hass, Platform.SENSOR, 12, 9)
entity_registry = er.async_get(hass)
expected_values = (
fixed_now.replace(second=0, microsecond=0).isoformat(),
"50.0",
"50.0",
"50.0",
"50.0",
"50.0",
"50.0",
"50.0",
"50",
)
for index, description in enumerate(NVR_SENSORS):
unique_id, entity_id = ids_from_device_description(
Platform.SENSOR, nvr, description
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.disabled is not description.entity_registry_enabled_default
assert entity.unique_id == unique_id
if not description.entity_registry_enabled_default:
await enable_entity(hass, ufp.entry.entry_id, entity_id)
state = hass.states.get(entity_id)
assert state
assert state.state == expected_values[index]
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
expected_values = ("50.0", "50.0", "50.0")
for index, description in enumerate(NVR_DISABLED_SENSORS):
unique_id, entity_id = ids_from_device_description(
Platform.SENSOR, nvr, description
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.disabled is not description.entity_registry_enabled_default
assert entity.unique_id == unique_id
await enable_entity(hass, ufp.entry.entry_id, entity_id)
state = hass.states.get(entity_id)
assert state
assert state.state == expected_values[index]
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
async def test_sensor_nvr_missing_values(
hass: HomeAssistant, ufp: MockUFPFixture
) -> None:
"""Test NVR sensor sensors if no data available."""
reset_objects(ufp.api.bootstrap)
nvr: NVR = ufp.api.bootstrap.nvr
nvr.system_info.memory.available = None
nvr.system_info.memory.total = None
nvr.up_since = None
nvr.storage_stats.capacity = None
await hass.config_entries.async_setup(ufp.entry.entry_id)
await hass.async_block_till_done()
assert_entity_counts(hass, Platform.SENSOR, 12, 9)
entity_registry = er.async_get(hass)
# Uptime
description = NVR_SENSORS[0]
unique_id, entity_id = ids_from_device_description(
Platform.SENSOR, nvr, description
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.unique_id == unique_id
await enable_entity(hass, ufp.entry.entry_id, entity_id)
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_UNKNOWN
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
# Memory
description = NVR_SENSORS[8]
unique_id, entity_id = ids_from_device_description(
Platform.SENSOR, nvr, description
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.unique_id == unique_id
state = hass.states.get(entity_id)
assert state
assert state.state == "0"
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
# Memory
description = NVR_DISABLED_SENSORS[2]
unique_id, entity_id = ids_from_device_description(
Platform.SENSOR, nvr, description
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.disabled is True
assert entity.unique_id == unique_id
await enable_entity(hass, ufp.entry.entry_id, entity_id)
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_UNKNOWN
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
async def test_sensor_setup_camera(
hass: HomeAssistant, ufp: MockUFPFixture, doorbell: Camera, fixed_now: datetime
) -> None:
"""Test sensor entity setup for camera devices."""
await init_entry(hass, ufp, [doorbell])
assert_entity_counts(hass, Platform.SENSOR, 25, 12)
entity_registry = er.async_get(hass)
expected_values = (
fixed_now.replace(microsecond=0).isoformat(),
"100",
"100.0",
"20.0",
)
for index, description in enumerate(CAMERA_SENSORS_WRITE):
if not description.entity_registry_enabled_default:
continue
unique_id, entity_id = ids_from_device_description(
Platform.SENSOR, doorbell, description
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.disabled is not description.entity_registry_enabled_default
assert entity.unique_id == unique_id
state = hass.states.get(entity_id)
assert state
assert state.state == expected_values[index]
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
expected_values = ("100", "100")
for index, description in enumerate(CAMERA_DISABLED_SENSORS):
unique_id, entity_id = ids_from_device_description(
Platform.SENSOR, doorbell, description
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.disabled is not description.entity_registry_enabled_default
assert entity.unique_id == unique_id
await enable_entity(hass, ufp.entry.entry_id, entity_id)
state = hass.states.get(entity_id)
assert state
assert state.state == expected_values[index]
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
# Wired signal
unique_id, entity_id = ids_from_device_description(
Platform.SENSOR, doorbell, ALL_DEVICES_SENSORS[2]
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.disabled is True
assert entity.unique_id == unique_id
await enable_entity(hass, ufp.entry.entry_id, entity_id)
state = hass.states.get(entity_id)
assert state
assert state.state == "1000"
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
# WiFi signal
unique_id, entity_id = ids_from_device_description(
Platform.SENSOR, doorbell, ALL_DEVICES_SENSORS[3]
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.disabled is True
assert entity.unique_id == unique_id
await enable_entity(hass, ufp.entry.entry_id, entity_id)
state = hass.states.get(entity_id)
assert state
assert state.state == "-50"
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
# Detected Object
unique_id, entity_id = ids_from_device_description(
Platform.SENSOR, doorbell, EVENT_SENSORS[0]
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.unique_id == unique_id
await enable_entity(hass, ufp.entry.entry_id, entity_id)
state = hass.states.get(entity_id)
assert state
assert state.state == OBJECT_TYPE_NONE
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
async def test_sensor_setup_camera_with_last_trip_time(
hass: HomeAssistant,
entity_registry_enabled_by_default: AsyncMock,
ufp: MockUFPFixture,
doorbell: Camera,
fixed_now: datetime,
) -> None:
"""Test sensor entity setup for camera devices with last trip time."""
await init_entry(hass, ufp, [doorbell])
assert_entity_counts(hass, Platform.SENSOR, 25, 25)
entity_registry = er.async_get(hass)
# Last Trip Time
unique_id, entity_id = ids_from_device_description(
Platform.SENSOR, doorbell, MOTION_TRIP_SENSORS[0]
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.unique_id == unique_id
state = hass.states.get(entity_id)
assert state
assert (
state.state
== (fixed_now - timedelta(hours=1)).replace(microsecond=0).isoformat()
)
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
async def test_sensor_update_motion(
hass: HomeAssistant, ufp: MockUFPFixture, doorbell: Camera, fixed_now: datetime
) -> None:
"""Test sensor motion entity."""
await init_entry(hass, ufp, [doorbell])
assert_entity_counts(hass, Platform.SENSOR, 25, 12)
_, entity_id = ids_from_device_description(
Platform.SENSOR, doorbell, EVENT_SENSORS[0]
)
await enable_entity(hass, ufp.entry.entry_id, entity_id)
event = Event(
id="test_event_id",
type=EventType.SMART_DETECT,
start=fixed_now - timedelta(seconds=1),
end=None,
score=100,
smart_detect_types=[SmartDetectObjectType.PERSON],
smart_detect_event_ids=[],
camera_id=doorbell.id,
api=ufp.api,
)
new_camera = doorbell.copy()
new_camera.is_smart_detected = True
new_camera.last_smart_detect_event_id = event.id
mock_msg = Mock()
mock_msg.changed_data = {}
mock_msg.new_obj = event
ufp.api.bootstrap.cameras = {new_camera.id: new_camera}
ufp.api.bootstrap.events = {event.id: event}
ufp.ws_msg(mock_msg)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state
assert state.state == SmartDetectObjectType.PERSON.value
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
assert state.attributes[ATTR_EVENT_SCORE] == 100
async def test_sensor_update_alarm(
hass: HomeAssistant, ufp: MockUFPFixture, sensor_all: Sensor, fixed_now: datetime
) -> None:
"""Test sensor motion entity."""
await init_entry(hass, ufp, [sensor_all])
assert_entity_counts(hass, Platform.SENSOR, 22, 14)
_, entity_id = ids_from_device_description(
Platform.SENSOR, sensor_all, SENSE_SENSORS_WRITE[4]
)
event_metadata = EventMetadata(sensor_id=sensor_all.id, alarm_type="smoke")
event = Event(
id="test_event_id",
type=EventType.SENSOR_ALARM,
start=fixed_now - timedelta(seconds=1),
end=None,
score=100,
smart_detect_types=[],
smart_detect_event_ids=[],
metadata=event_metadata,
api=ufp.api,
)
new_sensor = sensor_all.copy()
new_sensor.set_alarm_timeout()
new_sensor.last_alarm_event_id = event.id
mock_msg = Mock()
mock_msg.changed_data = {}
mock_msg.new_obj = event
ufp.api.bootstrap.sensors = {new_sensor.id: new_sensor}
ufp.api.bootstrap.events = {event.id: event}
ufp.ws_msg(mock_msg)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state
assert state.state == "smoke"
await time_changed(hass, 10)
async def test_sensor_update_alarm_with_last_trip_time(
hass: HomeAssistant,
entity_registry_enabled_by_default: AsyncMock,
ufp: MockUFPFixture,
sensor_all: Sensor,
fixed_now: datetime,
) -> None:
"""Test sensor motion entity with last trip time."""
await init_entry(hass, ufp, [sensor_all])
assert_entity_counts(hass, Platform.SENSOR, 22, 22)
# Last Trip Time
unique_id, entity_id = ids_from_device_description(
Platform.SENSOR, sensor_all, SENSE_SENSORS_WRITE[-3]
)
entity_registry = er.async_get(hass)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.unique_id == unique_id
state = hass.states.get(entity_id)
assert state
assert (
state.state
== (fixed_now - timedelta(hours=1)).replace(microsecond=0).isoformat()
)
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
async def test_camera_update_licenseplate(
hass: HomeAssistant, ufp: MockUFPFixture, camera: Camera, fixed_now: datetime
) -> None:
"""Test sensor motion entity."""
camera.feature_flags.smart_detect_types.append(SmartDetectObjectType.LICENSE_PLATE)
camera.feature_flags.has_smart_detect = True
camera.smart_detect_settings.object_types.append(
SmartDetectObjectType.LICENSE_PLATE
)
await init_entry(hass, ufp, [camera])
assert_entity_counts(hass, Platform.SENSOR, 24, 13)
_, entity_id = ids_from_device_description(
Platform.SENSOR, camera, EVENT_SENSORS[1]
)
event_metadata = EventMetadata(
license_plate=LicensePlateMetadata(name="ABCD1234", confidence_level=95)
)
event = Event(
id="test_event_id",
type=EventType.SMART_DETECT,
start=fixed_now - timedelta(seconds=1),
end=None,
score=100,
smart_detect_types=[SmartDetectObjectType.LICENSE_PLATE],
smart_detect_event_ids=[],
metadata=event_metadata,
api=ufp.api,
)
new_camera = camera.copy()
new_camera.is_smart_detected = True
new_camera.last_smart_detect_event_id = event.id
mock_msg = Mock()
mock_msg.changed_data = {}
mock_msg.new_obj = new_camera
ufp.api.bootstrap.cameras = {new_camera.id: new_camera}
ufp.api.bootstrap.events = {event.id: event}
ufp.ws_msg(mock_msg)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state
assert state.state == "ABCD1234"
| [
"[email protected]"
]
| |
02e0da6b388d781ffe02391d8ca227943d545ccf | 3efca607aefbd6cf558517bae689ccdacb7b383e | /test/functional/feature_dersig.py | ee176b713d41056f61d283b0c330bfe6a8bfe006 | [
"MIT"
]
| permissive | MicroBitcoinOrg/MicroBitcoin | f761b2ff04bdcb650d7c0ddbef431ef95cd69541 | 0119e8eff44ec4d94313eaa30022a97692b71143 | refs/heads/snapshot | 2022-12-27T10:04:21.040945 | 2021-02-09T05:51:45 | 2021-02-09T05:51:45 | 132,959,214 | 21 | 33 | MIT | 2020-06-12T04:38:45 | 2018-05-10T22:07:51 | C++ | UTF-8 | Python | false | false | 6,145 | py | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test BIP66 (DER SIG).
Test that the DERSIG soft-fork activates at (regtest) height 1251.
"""
from test_framework.blocktools import create_coinbase, create_block, create_transaction
from test_framework.messages import msg_block
from test_framework.mininode import mininode_lock, P2PInterface
from test_framework.script import CScript
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, bytes_to_hex_str, wait_until
DERSIG_HEIGHT = 1251
# Reject codes that we might receive in this test
REJECT_INVALID = 16
REJECT_OBSOLETE = 17
REJECT_NONSTANDARD = 64
# A canonical signature consists of:
# <30> <total len> <02> <len R> <R> <02> <len S> <S> <hashtype>
def unDERify(tx):
"""
Make the signature in vin 0 of a tx non-DER-compliant,
by adding padding after the S-value.
"""
scriptSig = CScript(tx.vin[0].scriptSig)
newscript = []
for i in scriptSig:
if (len(newscript) == 0):
newscript.append(i[0:-1] + b'\0' + i[-1:])
else:
newscript.append(i)
tx.vin[0].scriptSig = CScript(newscript)
class BIP66Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [['-whitelist=127.0.0.1']]
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[0].add_p2p_connection(P2PInterface())
self.log.info("Mining %d blocks", DERSIG_HEIGHT - 2)
self.coinbase_txids = [self.nodes[0].getblock(b)['tx'][0] for b in self.nodes[0].generate(DERSIG_HEIGHT - 2)]
self.nodeaddress = self.nodes[0].getnewaddress()
self.log.info("Test that a transaction with non-DER signature can still appear in a block")
spendtx = create_transaction(self.nodes[0], self.coinbase_txids[0],
self.nodeaddress, amount=1.0)
unDERify(spendtx)
spendtx.rehash()
tip = self.nodes[0].getbestblockhash()
block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
block = create_block(int(tip, 16), create_coinbase(DERSIG_HEIGHT - 1), block_time)
block.nVersion = 2
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
self.log.info("Test that blocks must now be at least version 3")
tip = block.sha256
block_time += 1
block = create_block(tip, create_coinbase(DERSIG_HEIGHT), block_time)
block.nVersion = 2
block.rehash()
block.solve()
self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock)
with mininode_lock:
assert_equal(self.nodes[0].p2p.last_message["reject"].code, REJECT_OBSOLETE)
assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'bad-version(0x00000002)')
assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256)
del self.nodes[0].p2p.last_message["reject"]
self.log.info("Test that transactions with non-DER signatures cannot appear in a block")
block.nVersion = 3
spendtx = create_transaction(self.nodes[0], self.coinbase_txids[1],
self.nodeaddress, amount=1.0)
unDERify(spendtx)
spendtx.rehash()
# First we show that this tx is valid except for DERSIG by getting it
# rejected from the mempool for exactly that reason.
assert_equal(
[{'txid': spendtx.hash, 'allowed': False, 'reject-reason': '64: non-mandatory-script-verify-flag (Non-canonical DER signature)'}],
self.nodes[0].testmempoolaccept(rawtxs=[bytes_to_hex_str(spendtx.serialize())], allowhighfees=True)
)
# Now we verify that a block with this transaction is also invalid.
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock)
with mininode_lock:
# We can receive different reject messages depending on whether
# microd is running with multiple script check threads. If script
# check threads are not in use, then transaction script validation
# happens sequentially, and microd produces more specific reject
# reasons.
assert self.nodes[0].p2p.last_message["reject"].code in [REJECT_INVALID, REJECT_NONSTANDARD]
assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256)
if self.nodes[0].p2p.last_message["reject"].code == REJECT_INVALID:
# Generic rejection when a block is invalid
assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'block-validation-failed')
else:
assert b'Non-canonical DER signature' in self.nodes[0].p2p.last_message["reject"].reason
self.log.info("Test that a version 3 block with a DERSIG-compliant transaction is accepted")
block.vtx[1] = create_transaction(self.nodes[0], self.coinbase_txids[1], self.nodeaddress, amount=1.0)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
if __name__ == '__main__':
BIP66Test().main()
| [
"[email protected]"
]
| |
52d73a48b5c2f302869cc8f0c3a901ca166808fa | bc437dc74647765b51996f64b35fda3d047daf93 | /3_Intermediate+/day32_Automated_Birthday_Wisher/day32_start/day32-start.py | 850642f1ced183de5325ea62b4df0dabbf9f2a78 | []
| no_license | macosta-42/100_days_of_code | e06720d57b6ed870a3dd4fa4e6d019296206a08f | 5b527dc18bae2ef556c26f653ef3c4badf94bb82 | refs/heads/main | 2023-05-22T03:26:02.422275 | 2021-06-10T10:31:26 | 2021-06-10T10:31:26 | 328,963,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,866 | py | import smtplib
import datetime as dt
import random
# Change the receiver email here
EMAIL_TO = ""
# Change your email and password here
EMAIL_FROM = ""
PASSWORD = ""
now = dt.datetime.now()
day_of_week = now.weekday()
if day_of_week == 0:
with open("quotes.txt", "r") as f:
quotes = f.read().splitlines()
rand_quote = random.choice(quotes)
with smtplib.SMTP("smtp.gmail.com") as connection:
connection.starttls()
connection.login(user=EMAIL_FROM, password=PASSWORD)
connection.sendmail(
from_addr=EMAIL_FROM,
to_addrs=EMAIL_TO,
msg=f"Subject:Monday Motivation\n\n{rand_quote}".encode('ascii', errors='ignore')
)
# import smtplib
# # Change the receiver email here
# email_to = ""
#
# # Change your email and password here
# email_from = ""
# password = ""
#
# with smtplib.SMTP("smtp.gmail.com") as connection:
# connection.starttls()
# connection.login(user=email_from, password=password)
# connection.sendmail(
# from_addr=email_from,
# to_addrs=email_to,
# msg="Subject:Hello\n\nThis is the body of my email."
# )
# # Change the receiver email here
# email_to = ""
#
# # Change your email and password here
# email_from = ""
# password = ""
#
# with smtplib.SMTP("smtp.mail.yahoo.com", port=587) as connection:
# connection.starttls()
# connection.login(user=email_from, password=password)
# connection.sendmail(
# from_addr=email_from,
# to_addrs=email_to,
# msg="Subject:Hello\n\nThis is the body of my email."
# )
# import datetime as dt
#
# now = dt.datetime.now()
# year = now.year
# month = now.month
# day_of_week = now.weekday()
# print(now)
# print(year)
# print(month)
# print(day_of_week)
#
# date_of_birth = dt.datetime(year=1985, month=11, day=2)
# print(date_of_birth)
| [
"[email protected]"
]
| |
fee6254c30e679ddc4ff6e8e89e717e9f6d126bd | e55d4b60cb3c77cf48420f6a9d2af374dd7967e4 | /awkward/persist.py | 4224715bc3883fa1a5cde27f5b7ab675da92ccd1 | [
"BSD-3-Clause"
]
| permissive | henryiii/awkward-array | 07bca5c17b65f1fc5c580026a8164165c101c55d | 175d88341a3e1e97408b088b7d91c67f72e34653 | refs/heads/master | 2020-04-03T06:41:21.749643 | 2018-10-26T19:38:33 | 2018-10-26T19:38:33 | 155,081,532 | 0 | 0 | null | 2018-10-28T14:44:33 | 2018-10-28T14:44:33 | null | UTF-8 | Python | false | false | 23,895 | py | #!/usr/bin/env python
# Copyright (c) 2018, DIANA-HEP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import base64
import fnmatch
import importlib
import json
import numbers
import os
import pickle
import zipfile
import zlib
try:
from collections.abc import Mapping, MutableMapping
except ImportError:
from collections import Mapping, MutableMapping
import awkward.type
import awkward.util
import awkward.version
compression = [
{"minsize": 8192, "types": [awkward.util.numpy.bool_, awkward.util.numpy.bool, awkward.util.numpy.integer], "contexts": "*", "pair": (zlib.compress, ("zlib", "decompress"))},
]
partner = {
zlib.compress: ("zlib", "decompress"),
}
whitelist = [["numpy", "frombuffer"],
["zlib", "decompress"],
["awkward", "*Array"],
["awkward", "Table"],
["awkward.persist", "*"]]
def frompython(obj):
return base64.b64encode(pickle.dumps(obj)).decode("ascii")
def topython(string):
return pickle.loads(base64.b64decode(string.encode("ascii")))
def spec2function(obj, whitelist=whitelist):
for white in whitelist:
for n, p in zip(obj, white):
if not fnmatch.fnmatchcase(n, p):
break
else:
gen, genname = importlib.import_module(obj[0]), obj[1:]
while len(genname) > 0:
gen, genname = getattr(gen, genname[0]), genname[1:]
break
else:
raise RuntimeError("callable not in whitelist; add it by passing a whitelist argument:\n\n whitelist = awkward.persist.whitelist + [{0}]".format(obj))
return gen
def dtype2json(obj):
if obj.subdtype is not None:
dt, sh = obj.subdtype
return (dtype2json(dt), sh)
elif obj.names is not None:
return [(n, dtype2json(obj[n])) for n in obj.names]
else:
return str(obj)
def json2dtype(obj):
def recurse(obj):
if isinstance(obj, (list, tuple)) and len(obj) > 0 and (isinstance(obj[-1], numbers.Integral) or isinstance(obj[0], str) or (isinstance(obj[-1], (list, tuple)) and all(isinstance(x, numbers.Integral) for x in obj[-1]))):
return tuple(recurse(x) for x in obj)
elif isinstance(obj, (list, tuple)):
return [recurse(x) for x in obj]
else:
return obj
return awkward.util.numpy.dtype(recurse(obj))
def type2json(obj):
if isinstance(obj, awkward.type.Type):
labeled = obj._labeled()
else:
labeled = []
seen = set()
def takes(n):
if n == float("inf"):
return "inf"
else:
return int(n)
def recurse(obj):
if isinstance(obj, awkward.type.Type):
if id(obj) in seen:
for i, x in enumerate(labeled):
if obj is x:
return {"ref": "T{0}".format(i)}
else:
seen.add(id(obj))
if isinstance(obj, awkward.type.ArrayType):
out = {"takes": takes(obj._takes), "to": recurse(obj._to)}
elif isinstance(obj, awkward.type.TableType):
out = {"fields": [[n, recurse(x)] for n, x in obj._fields.items()]}
elif isinstance(obj, awkward.type.UnionType):
out = {"possibilities": [recurse(x) for x in obj._possibilities]}
elif isinstance(obj, awkward.type.OptionType):
out = {"type": recurse(obj._type)}
for i, x in enumerate(labeled):
if obj is x:
return {"set": "T{0}".format(i), "as": out}
else:
return out
elif isinstance(obj, awkward.util.numpy.dtype):
return {"dtype": dtype2json(obj)}
elif callable(obj):
if obj.__module__ == "__main__":
raise TypeError("cannot persist object type: its generator is defined in __main__, which won't be available in a subsequent session")
if hasattr(obj, "__qualname__"):
spec = [obj.__module__] + obj.__qualname__.split(".")
else:
spec = [obj.__module__, obj.__name__]
gen, genname = importlib.import_module(spec[0]), spec[1:]
while len(genname) > 0:
gen, genname = getattr(gen, genname[0]), genname[1:]
if gen is not obj:
raise TypeError("cannot persist object type: its generator cannot be found via its __name__ (Python 2) or __qualname__ (Python 3)")
return {"function": spec}
else:
raise TypeError("only awkward.type.Type, numpy.dtype, and callables are types")
return recurse(obj)
def json2type(obj, whitelist=whitelist):
labels = {}
def takes(n):
if n == "inf":
return float("inf")
else:
return n
def recurse(obj):
if not isinstance(obj, dict):
raise TypeError("json2type is expecting a JSON object, found: {0}".format(repr(obj)))
if "set" in obj:
placeholder = labels[obj["set"]] = awkward.type.Placeholder()
placeholder.value = recurse(obj["as"])
return placeholder
elif "ref" in obj:
return labels[obj["ref"]]
elif "takes" in obj and "to" in obj:
return awkward.type.ArrayType(takes(obj["takes"]), recurse(obj["to"]))
elif "fields" in obj:
out = awkward.type.TableType()
for n, x in obj["fields"]:
out[n] = recurse(x)
return out
elif "possibilities" in obj:
return awkward.type.UnionType(*[recurse(x) for x in obj["possibilities"]])
elif "type" in obj:
return awkward.type.OptionType(recurse(obj["type"]))
elif "dtype" in obj:
return json2dtype(obj["dtype"])
elif "function" in obj:
return spec2function(obj["function"], whitelist=whitelist)
else:
raise ValueError("unexpected set of keys in JSON: {0}".format(", ".join(repr(x) for x in obj)))
return awkward.type._resolve(recurse(obj), {})
def jsonable(obj):
if obj is None:
return obj
elif isinstance(obj, dict) and all(isinstance(n, str) for n in obj):
return {n: jsonable(x) for n, x in obj.items()}
elif isinstance(obj, list):
return [jsonable(x) for x in obj]
elif isinstance(obj, str):
return str(obj)
elif isinstance(obj, (bool, awkward.util.numpy.bool_, awkward.util.numpy.bool)):
return bool(obj) # policy: eliminate Numpy types
elif isinstance(obj, (numbers.Integral, awkward.util.numpy.integer)):
return int(obj) # policy: eliminate Numpy types
elif isinstance(obj, (numbers.Real, awkward.util.numpy.floating)) and awkward.util.numpy.finite(obj):
return float(obj) # policy: eliminate Numpy types
else:
raise TypeError("object cannot be losslessly serialized as JSON")
def serialize(obj, storage, name=None, delimiter="-", suffix=None, schemasuffix=None, compression=compression, **kwargs):
import awkward.array.base
import awkward.array.virtual
for n in kwargs:
if n not in ():
raise TypeError("unrecognized serialization option: {0}".format(repr(n)))
if name is None or name == "":
name = ""
prefix = ""
elif delimiter is None:
prefix = name
else:
prefix = name + delimiter
if suffix is None:
suffix = ""
if schemasuffix is None:
schemasuffix = ""
if compression is None:
compression = []
if isinstance(compression, dict) or callable(compression) or (len(compression) == 2 and callable(compression[0])):
compression = [compression]
normalized = []
for x in compression:
if isinstance(x, dict):
pass
elif callable(x):
if not x in partner:
raise ValueError("decompression partner for {0} not known".format(x))
x = {"pair": (x, partner[x])}
elif len(x) == 2 and callable(x[0]):
x = {"pair": x}
minsize = x.get("minsize", 0)
tpes = x.get("types", (object,))
if not isinstance(tpes, tuple):
tpes = (tpes,)
contexts = x.get("contexts", "*")
pair = x["pair"]
normalized.append({"minsize": minsize, "types": tpes, "contexts": contexts, "pair": pair})
seen = {}
def fill(obj, context, prefix, suffix, schemasuffix, storage, compression, **kwargs):
if id(obj) in seen:
return {"ref": seen[id(obj)]}
ident = len(seen)
seen[id(obj)] = ident
if type(obj) is awkward.util.numpy.dtype:
return {"dtype": dtype2json(obj)}
elif type(obj) is awkward.util.numpy.ndarray and len(obj.shape) != 0:
if len(obj.shape) > 1:
dtype = awkward.util.numpy.dtype((obj.dtype, obj.shape[1:]))
else:
dtype = obj.dtype
for policy in normalized:
minsize, tpes, contexts, pair = policy["minsize"], policy["types"], policy["contexts"], policy["pair"]
if obj.nbytes >= minsize and issubclass(obj.dtype.type, tuple(tpes)) and any(fnmatch.fnmatchcase(context, p) for p in contexts):
compress, decompress = pair
storage[prefix + str(ident) + suffix] = compress(obj)
return {"id": ident,
"call": ["numpy", "frombuffer"],
"args": [{"call": decompress, "args": [{"read": str(ident) + suffix}]},
{"dtype": dtype2json(dtype)},
{"json": len(obj)}]}
else:
storage[prefix + str(ident) + suffix] = obj.tostring()
return {"id": ident,
"call": ["numpy", "frombuffer"],
"args": [{"read": str(ident) + suffix},
{"dtype": dtype2json(dtype)},
{"json": len(obj)}]}
elif hasattr(obj, "__awkward_persist__"):
return obj.__awkward_persist__(ident, fill, prefix, suffix, schemasuffix, storage, compression, **kwargs)
else:
if hasattr(obj, "__module__") and (hasattr(obj, "__qualname__") or hasattr(obj, "__name__")) and obj.__module__ != "__main__":
if hasattr(obj, "__qualname__"):
spec = [obj.__module__] + obj.__qualname__.split(".")
else:
spec = [obj.__module__, obj.__name__]
gen, genname = importlib.import_module(spec[0]), spec[1:]
while len(genname) > 0:
gen, genname = getattr(gen, genname[0]), genname[1:]
if gen is obj:
return {"id": ident, "function": spec}
try:
obj = jsonable(obj)
except TypeError:
try:
return {"id": ident, "python": awkward.persist.frompython(obj)}
except Exception as err:
raise TypeError("could not persist component as an array, awkward-array, importable function/class, JSON, or pickle; pickle error is\n\n {0}: {1}".format(err.__class__.__name__, str(err)))
else:
return {"id": ident, "json": obj}
schema = {"awkward": awkward.version.__version__,
"schema": fill(obj, "", prefix, suffix, schemasuffix, storage, compression, **kwargs)}
if prefix != "":
schema["prefix"] = prefix
storage[name + schemasuffix] = json.dumps(schema).encode("ascii")
return schema
def deserialize(storage, name="", whitelist=whitelist, cache=None):
import awkward.array.virtual
schema = storage[name]
if isinstance(schema, awkward.util.numpy.ndarray):
schema = schema.tostring()
if isinstance(schema, bytes):
schema = schema.decode("ascii")
schema = json.loads(schema)
prefix = schema.get("prefix", "")
seen = {}
if isinstance(whitelist, str):
whitelist = [whitelist]
elif len(whitelist) > 0 and isinstance(whitelist[0], str):
whitelist = [whitelist]
def unfill(schema):
if isinstance(schema, dict):
if "call" in schema and isinstance(schema["call"], list) and len(schema["call"]) > 0:
gen = spec2function(schema["call"], whitelist=whitelist)
args = [unfill(x) for x in schema.get("args", [])]
kwargs = {}
if schema.get("cacheable", False):
kwargs["cache"] = cache
if schema.get("whitelistable", False):
kwargs["whitelist"] = whitelist
if "kwargs" in schema:
kwargs.update({n: unfill(x) for n, x in schema["kwargs"].items()})
if "*" in schema:
args = args + [unfill(x) for x in schema["*"]]
if "**" in schema:
kwargs.update({n: unfill(x) for n, x in schema["**"].items()})
out = gen(*args, **kwargs)
elif "read" in schema:
if schema.get("absolute", False):
out = storage[schema["read"]]
else:
out = storage[prefix + schema["read"]]
elif "list" in schema:
out = [unfill(x) for x in schema["list"]]
elif "tuple" in schema:
out = tuple(unfill(x) for x in schema["tuple"])
elif "pairs" in schema:
out = [(n, unfill(x)) for n, x in schema["pairs"]]
elif "dict" in schema:
out = {n: unfill(x) for n, x in schema["dict"].items()}
elif "dtype" in schema:
out = json2dtype(schema["dtype"])
elif "function" in schema:
out = spec2function(schema["function"], whitelist=whitelist)
elif "json" in schema:
out = schema["json"]
elif "python" in schema:
out = topython(schema["python"])
elif "ref" in schema:
if schema["ref"] in seen:
out = seen[schema["ref"]]
else:
out = awkward.array.virtual.VirtualArray(lambda: seen[schema["ref"]])
else:
raise ValueError("unrecognized JSON object with fields {0}".format(", ".join(repr(x) for x in schema)))
if "id" in schema:
seen[schema["id"]] = out
return out
elif isinstance(schema, list):
raise ValueError("unrecognized JSON list with length {0}".format(len(schema)))
else:
raise ValueError("unrecognized JSON object: {0}".format(repr(schema)))
return unfill(schema["schema"])
def keys(storage, name="", subschemas=True):
schema = storage[name]
if isinstance(schema, awkward.util.numpy.ndarray):
schema = schema.tostring()
if isinstance(schema, bytes):
schema = schema.decode("ascii")
schema = json.loads(schema)
prefix = schema.get("prefix", "")
def recurse(schema):
if isinstance(schema, dict):
if "call" in schema and isinstance(schema["call"], list) and len(schema["call"]) > 0:
for x in schema.get("args", []):
for y in recurse(x):
yield y
for x in schema.get("kwargs", {}).values():
for y in recurse(x):
yield y
for x in schema.get("*", []):
for y in recurse(x):
yield y
for x in schema.get("**", {}).values():
for y in recurse(x):
yield y
elif "read" in schema:
if schema.get("absolute", False):
yield schema["read"]
else:
yield prefix + schema["read"]
elif "list" in schema:
for x in schema["list"]:
for y in recurse(x):
yield y
elif "tuple" in schema:
for x in schema["tuple"]:
for y in recurse(x):
yield y
elif "pairs" in schema:
for n, x in schema["pairs"]:
for y in recurse(x):
yield y
elif "dict" in schema:
for x in schema["dict"].values():
for y in recurse(x):
yield y
elif "dtype" in schema:
pass
elif "function" in schema:
pass
elif "json" in schema:
pass
elif "python" in schema:
pass
elif "ref" in schema:
pass
yield name
for x in recurse(schema["schema"]):
yield x
def save(file, array, name=None, mode="a", **options):
if isinstance(array, dict):
arrays = array
else:
arrays = {"": array}
if name is not None:
arrays = {name + n: x for n, x in arrays.items()}
arraynames = list(arrays)
for i in range(len(arraynames)):
for j in range(i + 1, len(arraynames)):
if arraynames[i].startswith(arraynames[j]) or arraynames[j].startswith(arraynames[i]):
raise KeyError("cannot write both {0} and {1} to zipfile because one is a prefix of the other", repr(arraynames[i]), repr(arraynames[j]))
if isinstance(file, getattr(os, "PathLike", ())):
file = os.fspath(file)
elif hasattr(file, "__fspath__"):
file = file.__fspath__()
elif file.__class__.__module__ == "pathlib":
import pathlib
if isinstance(file, pathlib.Path):
file = str(file)
if isinstance(file, str) and not file.endswith(".akd"):
file = file + ".akd"
alloptions = {"delimiter": "-", "suffix": ".raw", "schemasuffix": ".json", "compression": compression}
alloptions.update(options)
options = alloptions
class Wrap(object):
def __init__(self, f):
self.f = f
def __setitem__(self, where, what):
self.f.writestr(where, what, compress_type=zipfile.ZIP_STORED)
with zipfile.ZipFile(file, mode=mode, compression=zipfile.ZIP_STORED) as f:
namelist = f.namelist()
for name in arraynames:
if any(n.startswith(name) for n in namelist):
raise KeyError("cannot add {0} to zipfile because the following already exist: {1}".format(repr(name), ", ".join(repr(n) for n in namelist if n.startswith(name))))
wrapped = Wrap(f)
for name, array in arrays.items():
serialize(array, wrapped, name=name, **options)
def load(file, **options):
f = Load(file, **options)
if list(f) == [""]:
out = f[""]
f.close()
return out
else:
return f
class Load(Mapping):
def __init__(self, file, **options):
class Wrap(object):
def __init__(self):
self.f = zipfile.ZipFile(file, mode="r")
def __getitem__(self, where):
return self.f.read(where)
self._file = Wrap()
alloptions = {"schemasuffix": ".json", "whitelist": whitelist, "cache": None}
alloptions.update(options)
self.schemasuffix = alloptions.pop("schemasuffix")
self.options = alloptions
def __getitem__(self, where):
return deserialize(self._file, name=where + self.schemasuffix, whitelist=self.options["whitelist"], cache=self.options["cache"])
def __iter__(self):
for n in self._file.f.namelist():
if n.endswith(".json"):
yield n[:-5]
def __len__(self):
count = 0
for n in self._file.f.namelist():
if n.endswith(".json"):
count += 1
return count
def __repr__(self):
return "<awkward.load ({0} members)>".format(len(self))
def close(self):
self._file.f.close()
def __del__(self):
self.close()
def __enter__(self, *args, **kwds):
return self
def __exit__(self, *args, **kwds):
self.close()
class hdf5(MutableMapping):
def __init__(self, group, **options):
alloptions = {"compression": compression, "whitelist": whitelist, "cache": None}
alloptions.update(options)
self.options = alloptions
self.options["delimiter"] = "/"
self.options["schemasuffix"] = "/schema.json"
class Wrap(object):
def __init__(self):
self.g = group
def __getitem__(self, where):
return self.g[where].value
def __setitem__(self, where, what):
self.g[where] = awkward.util.numpy.frombuffer(what, dtype=awkward.util.numpy.uint8)
self._group = Wrap()
def __getitem__(self, where):
return deserialize(self._group, name=where + self.options["schemasuffix"], whitelist=self.options["whitelist"], cache=self.options["cache"])
def __setitem__(self, where, what):
options = dict(self.options)
if "whitelist" in options:
del options["whitelist"]
if "cache" in options:
del options["cache"]
self._group.g.create_group(where)
serialize(what, self._group, name=where, **options)
def __delitem__(self, where):
for subname in keys(self._group, name=where + self.options["schemasuffix"]):
del self._group.g[subname]
del self._group.g[where]
def __iter__(self):
schemaname = self.options["schemasuffix"].split("/")[-1]
for subname in self._group.g:
if schemaname in self._group.g[subname]:
yield subname
def __len__(self):
schemaname = self.options["schemasuffix"].split("/")[-1]
count = 0
for subname in self._group.g:
if schemaname in self._group.g[subname]:
count += 1
return count
def __repr__(self):
return "<awkward.hdf5 {0} ({1} members)>".format(repr(self._group.g.name), len(self))
| [
"[email protected]"
]
| |
878270727d02763ab73a0b3dbe4846b3a1b33115 | f72c9e46af5ce5ac738693daf65e67a0962a229a | /sdk/lusid/models/entity_identifier.py | 9729ad91a9e7ee0e804c5cd8bde01f77b68ab38b | [
"MIT"
]
| permissive | finbourne/lusid-sdk-python | db8ce602f8408169f6583783c80ebbef83c77807 | 32fedc00ce5a37a6fe3bd9b9962570a8a9348e48 | refs/heads/master | 2023-08-29T18:22:49.488811 | 2023-08-29T15:57:26 | 2023-08-29T15:57:26 | 125,082,278 | 11 | 11 | NOASSERTION | 2023-04-28T07:16:48 | 2018-03-13T16:31:54 | Python | UTF-8 | Python | false | false | 6,715 | py | # coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 1.0.463
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from lusid.configuration import Configuration
class EntityIdentifier(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'identifier_scope': 'str',
'identifier_type': 'str',
'identifier_value': 'str'
}
attribute_map = {
'identifier_scope': 'identifierScope',
'identifier_type': 'identifierType',
'identifier_value': 'identifierValue'
}
required_map = {
'identifier_scope': 'optional',
'identifier_type': 'required',
'identifier_value': 'required'
}
def __init__(self, identifier_scope=None, identifier_type=None, identifier_value=None, local_vars_configuration=None): # noqa: E501
"""EntityIdentifier - a model defined in OpenAPI"
:param identifier_scope: The scope of the identifier
:type identifier_scope: str
:param identifier_type: The type of the identifier (required)
:type identifier_type: str
:param identifier_value: The value of the identifier (required)
:type identifier_value: str
""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._identifier_scope = None
self._identifier_type = None
self._identifier_value = None
self.discriminator = None
self.identifier_scope = identifier_scope
self.identifier_type = identifier_type
self.identifier_value = identifier_value
@property
def identifier_scope(self):
"""Gets the identifier_scope of this EntityIdentifier. # noqa: E501
The scope of the identifier # noqa: E501
:return: The identifier_scope of this EntityIdentifier. # noqa: E501
:rtype: str
"""
return self._identifier_scope
@identifier_scope.setter
def identifier_scope(self, identifier_scope):
"""Sets the identifier_scope of this EntityIdentifier.
The scope of the identifier # noqa: E501
:param identifier_scope: The identifier_scope of this EntityIdentifier. # noqa: E501
:type identifier_scope: str
"""
self._identifier_scope = identifier_scope
@property
def identifier_type(self):
"""Gets the identifier_type of this EntityIdentifier. # noqa: E501
The type of the identifier # noqa: E501
:return: The identifier_type of this EntityIdentifier. # noqa: E501
:rtype: str
"""
return self._identifier_type
@identifier_type.setter
def identifier_type(self, identifier_type):
"""Sets the identifier_type of this EntityIdentifier.
The type of the identifier # noqa: E501
:param identifier_type: The identifier_type of this EntityIdentifier. # noqa: E501
:type identifier_type: str
"""
if self.local_vars_configuration.client_side_validation and identifier_type is None: # noqa: E501
raise ValueError("Invalid value for `identifier_type`, must not be `None`") # noqa: E501
self._identifier_type = identifier_type
@property
def identifier_value(self):
"""Gets the identifier_value of this EntityIdentifier. # noqa: E501
The value of the identifier # noqa: E501
:return: The identifier_value of this EntityIdentifier. # noqa: E501
:rtype: str
"""
return self._identifier_value
@identifier_value.setter
def identifier_value(self, identifier_value):
"""Sets the identifier_value of this EntityIdentifier.
The value of the identifier # noqa: E501
:param identifier_value: The identifier_value of this EntityIdentifier. # noqa: E501
:type identifier_value: str
"""
if self.local_vars_configuration.client_side_validation and identifier_value is None: # noqa: E501
raise ValueError("Invalid value for `identifier_value`, must not be `None`") # noqa: E501
self._identifier_value = identifier_value
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EntityIdentifier):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, EntityIdentifier):
return True
return self.to_dict() != other.to_dict()
| [
"[email protected]"
]
| |
d1e45646f104e957c2362515a6b771fbac434c43 | 397e125e94f4f139f2bf5055824d81f24b8b1757 | /企業コン/M-SOLUTIONS プロコンオープン/D.py | 4be09b400bf8143978a04bfb0cf22cda607a01e7 | []
| no_license | tails1434/Atcoder | ecbab6ee238e3f225551297db961b1b502841fa4 | e7c7fed36be46bbaaf020a70997842240ba98d62 | refs/heads/master | 2021-07-07T00:31:49.235625 | 2020-09-30T01:42:01 | 2020-09-30T01:42:01 | 189,009,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 835 | py | from collections import deque
def main():
N = int(input())
edge = [[] for _ in range(N)]
for _ in range(N-1):
a, b = map(int, input().split())
a -= 1
b -= 1
edge[a].append(b)
edge[b].append(a)
C = list(map(int, input().split()))
cnt = [0] * N
visited = [False] * N
Q = deque([0])
while Q:
q = Q.popleft()
visited[q] = True
for v in edge[q]:
if q == v:
continue
if visited[v]:
continue
cnt[q] += 1
Q.append(v)
tmp = []
for i in range(N):
tmp.append((cnt[i],i))
tmp.sort()
C.sort()
d = [0] * N
for i in range(N):
d[tmp[i][1]] = C[i]
print(sum(C) - max(C))
print(*d)
if __name__ == "__main__":
main() | [
"[email protected]"
]
| |
06b3e54127019eed2360e8eabf390410d6381fe5 | 58c03f5db2e969dd0a982cd95f28b2ad0ecd5651 | /setup.py | 685cb7ed33ff9949436f3716c5180e5da4c59c4d | []
| no_license | rajsid1/django-th | 6bccaa25652d0713f893b9ba533f496cc6fe67f8 | 8060df3803ea0f3d1c965c3b9fdf85628633975b | refs/heads/master | 2021-01-15T09:29:14.177440 | 2014-11-30T21:26:40 | 2014-11-30T21:26:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,280 | py | from setuptools import setup, find_packages
from django_th import __version__ as version
import os
def strip_comments(l):
return l.split('#', 1)[0].strip()
def reqs(*f):
return list(filter(None, [strip_comments(l) for l in open(
os.path.join(os.getcwd(), *f)).readlines()]))
install_requires = reqs('requirements.txt')
setup(
name='django_th',
version=version,
description='Trigger Happy - micro enterprise service bus (ESB) - a bridge between your internet services',
author='Olivier Demah',
author_email='[email protected]',
url='https://github.com/foxmask/django-th',
download_url="https://github.com/foxmask/django-th/archive/trigger-happy-"
+ version + ".zip",
packages=find_packages(exclude=['django_th/local_settings']),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Framework :: Django',
],
install_requires=install_requires,
include_package_data=True,
)
| [
"[email protected]"
]
| |
13306b3ec4618698ff1c402f46e0766579943674 | 9ef7e97ea83ec9dc1f792ef143aaa86c6e626ea6 | /Laplaceequation/.svn/pristine/cd/cd0a97d4dd9b4a0d2ab02c13a884e55158c95ed1.svn-base | 79c9fe07487cfe58b407e081b80e52478090c10a | []
| no_license | leiradom/CFDhomework | b57b0644ad76f61458c5d54e3c023615b64da146 | b59eae622e68f1127e65f3846cf8075ba82849f3 | refs/heads/main | 2023-05-30T19:35:30.088666 | 2021-06-04T14:47:47 | 2021-06-04T14:47:47 | 373,870,890 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,847 | import math
import numpy as np
import matplotlib.pyplot as plt
# n1 = int(input("请输入翼型表面离散点数"))
# n2 = int(input("请输入近场到远场离散点数"))
#NACA0012表面坐标
#y1 = 0.6 * (-0.1015 * x**4 + 0.2843 * x**3 - 0.3576 * x**2 - 0.1221 * x + 0.2969 * x ** 0.5)
#y2 = -0.6 * (-0.1015 * x**4 + 0.2843 * x**3 - 0.3576 * x**2 - 0.1221 * x + 0.2969 * x ** 0.5)
#沿x轴等间距变化翼型表面,这样不好
'''
for i in range (0,101):
x = 0.01 * i
y1 = 0.6 * (-0.1015 * x**4 + 0.2843 * x**3 - 0.3576 * x**2 - 0.1221 * x + 0.2969 * x ** 0.5)
y2 = -0.6 * (-0.1015 * x**4 + 0.2843 * x**3 - 0.3576 * x**2 - 0.1221 * x + 0.2969 * x ** 0.5)
X[i] = x
Y1[i] = y1
Y2[i] = y2
plt.plot(X,Y1,'-o')
plt.plot(X,Y2,'-o')
plt.show()
'''
#定义常数
n1 = 40 #绕翼型一周的点数
n2 = 80 #从壁面到远场的离散点数
e1 = 0.1
e2 = 0.1
e0 = 1e-4 #设定收敛残值
dcx = 1 #求解域的横坐标离散
dcy = 1 #求解域的纵坐标离散
a = 0
b = 0
g = 0
#这里用linspace
Theta = np.linspace(0,2*math.pi,num = n1)
X0 = np.ones(n1)
Y0 = np.ones(n1)
X1 = np.ones(n1)
Y1 = np.ones(n1)
X = np.ones([n1,n2]) #存放所有点的横坐标
Y = np.ones([n1,n2]) #c
x2 = np.ones([n1,n2]) #迭代用
y2 = np.ones([n1,n2]) #迭代用
#-----------------------生成网格初值-------------------#
for i in range(0,n1):
X0[i] = 0.5 * (1 + math.cos(Theta[i]))
if(Theta[i]<math.pi):
y = 0.6 * (-0.1015 * X0[i]**4 + 0.2843 * X0[i]**3 - 0.3576 * X0[i]**2 - 0.1221 * X0[i] + 0.2969 * X0[i] ** 0.5)
else:
y = -0.6 * (-0.1015 * X0[i]**4 + 0.2843 * X0[i]**3 - 0.3576 * X0[i]**2 - 0.1221 * X0[i] + 0.2969 * X0[i] ** 0.5)
Y0[i] = y
X1[i] = 30 * math.cos(Theta[i]) + 0.5
Y1[i] = 30 * math.sin(Theta[i])
#print(X0[i],Y0[i])
for j in range(0,n2):
dx = (X1[i] - X0[i]) / (n2 - 1)
dy = (Y1[i] - Y0[i]) / (n2 - 1)
X[i][j] = X0[i] + j * dx
Y[i][j] = Y0[i] + j * dy
'''
#显示初始网格
for i in range(0,n1):
plt.plot(X[i,:],Y[i,:],'-o',c='b')
for j in range(0,n2):
plt.plot(X[:,j],Y[:,j],'-o',c='b')
plt.axis('square') #等比例显示
plt.show() #放在最外面,最后只画一张图
'''
#-----------------------生成网格初值-------------------#
#迭代解Laplace方程生成网格
while((e1 > e0) or (e2 > e0)):
for i in range(0,n1):
for j in range(1,n2-1):
if(i == 0):
a = ((X[i][j+1]-X[i][j-1]) / (2*dcy))**2 + ((Y[i][j+1]-Y[i][j-1]) / (2*dcy))**2 #alpha
b = ((X[i+1][j]-X[n1-1][j]) / (2*dcx)) * ((X[i][j+1] - X[i][j-1]) / (2*dcy)) \
+ ((Y[i+1][j]-Y[n1-1][j]) / (2*dcx))*((Y[i][j+1] - Y[i][j-1]) / (2*dcy)) #beta
g = ((X[i+1][j]-X[n1-1][j]) / (2*dcx))**2 + ((Y[i+1][j]-Y[n1-1][j]) / (2*dcx))**2 #gamma
#开始迭代
x2[i][j] = 0.5 * (a * (X[i+1][j] + X[n1-1][j]) + g * (X[i][j+1] + X[i][j-1]) \
- 0.5 * b * (X[i+1][j+1]+X[n1-1][j-1]-X[i+1][j-1]-X[n1-1][j+1])) / (a + g)
y2[i][j] = 0.5 * (a * (Y[i+1][j] + Y[n1-1][j]) + g * (Y[i][j+1] + Y[i][j-1]) \
- 0.5 * b * (Y[i+1][j+1]+ Y[n1-1][j-1]- Y[i+1][j-1]- Y[n1-1][j+1])) / (a + g)
e1 = abs(X[i][j] - x2[i][j])
e2 = abs(Y[i][j] - y2[i][j])
X[i][j] = x2[i][j] #更新迭代的x坐标
Y[i][j] = y2[i][j] #更新迭代的y坐标
#迭代方程中的三个系数,分别为一阶导数关系
else:
a = ((X[i][j+1]-X[i][j-1]) / (2*dcy))**2 + ((Y[i][j+1]-Y[i][j-1]) / (2*dcy))**2 #alpha
b = ((X[i+1][j]-X[i-1][j]) / (2*dcx)) * ((X[i][j+1] - X[i][j-1]) / (2*dcy)) \
+ ((Y[i+1][j]-Y[i-1][j]) / (2*dcx))*((Y[i][j+1] - Y[i][j-1]) / (2*dcy)) #beta
g = ((X[i+1][j]-X[i-1][j]) / (2*dcx))**2 + ((Y[i+1][j]-Y[i-1][j]) / (2*dcx))**2 #gamma
#开始迭代
x2[i][j] = 0.5 * (a * (X[i+1][j] + X[i-1][j]) + g * (X[i][j+1] + X[i][j-1]) \
- 0.5 * b * (X[i+1][j+1]+X[i-1][j-1]-X[i+1][j-1]-X[i-1][j+1])) / (a + g)
y2[i][j] = 0.5 * (a * (Y[i+1][j] + Y[i-1][j]) + g * (Y[i][j+1] + Y[i][j-1]) \
- 0.5 * b * (Y[i+1][j+1]+ Y[i-1][j-1]- Y[i+1][j-1]- Y[i-1][j+1])) / (a + g)
e1 = abs(X[i][j] - x2[i][j])
e2 = abs(Y[i][j] - y2[i][j])
X[i][j] = x2[i][j] #更新迭代的x坐标
Y[i][j] = y2[i][j] #更新迭代的y坐标
#最终绘图
for i in range(0,n1):
plt.plot(X[i,:],Y[i,:],c='b')
for j in range(0,n2):
plt.plot(X[:,j],Y[:,j],c='b')
plt.axis('square') #等比例显示
plt.show() #放在最外面,最后只画一张图 | [
"[email protected]"
]
| ||
72359d15c10f05c12783657d8a41809cb92f774b | 21c09799d006ed6bede4123d57d6d54d977c0b63 | /PrintOut.py | 31c51a8b387d5270f2d155b82ae290929209aa36 | []
| no_license | corvettettt/DijetRootTreeAnalyzer | 68cb12e6b280957e1eb22c9842b0b9b30ae2c779 | e65624ffc105798209436fc80fb82e2c252c6344 | refs/heads/master | 2021-05-06T09:57:12.816787 | 2019-04-18T15:32:38 | 2019-04-18T15:32:38 | 114,043,763 | 1 | 0 | null | 2017-12-12T22:02:46 | 2017-12-12T22:02:46 | null | UTF-8 | Python | false | false | 595 | py | import os
#for i in [0.1,0.15,0.2,0.3,0.35,0.45,0.5,0.5803,0.65,0.75,0.8,0.8838,0.9,0.9693]:
# cut = str(int(i*1000))
# os.system('python python/Plot1DLimit.py -d cards_PF0bDijet2017Scan'+cut+'_scan_ns/ -m qq -b PF0bDijet2017Scan'+cut+' -l 41.800 --massMin 1000 --massMax 8000 --xsecMin 1e-4 --xsecMax 1e2 -o exp')
for i in ['M','L','T']:
for j in ['1b','le1b']:
tag = 'PFNo1dDijet2017bgDeepJet'+j+i
print '\n\t'+tag
os.system('python python/Plot1DLimit.py -d cards_'+tag+'_scan/ -m qg -b '+tag+' -l 41.800 --massMin 1000 --massMax 8000 --xsecMin 1e-4 --xsecMax 1e2 -o obs')
| [
"[email protected]"
]
| |
d09077a77e985ee9243a979fe56958da14e954bd | 4101f575c913a1ccbfcbe16314fb343f8ddb2ea2 | /27.Linked_List_Cycle_II/27.Linked_List_Cycle_II.py | 346eea4bfbdb108a6398dbfee8fa4ca78f82cf83 | []
| no_license | Gangadharbhuvan/31-Days-October-Leetcode | a5959b25202d847daeb0f8ddc696838b068b01dc | 8515aa8cfd072b61f7b00be267f96c688474021b | refs/heads/master | 2023-01-04T01:28:15.974195 | 2020-10-31T17:06:56 | 2020-10-31T17:06:56 | 300,222,126 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,683 | py | '''
Given a linked list, return the node where the cycle begins. If there is no cycle, return null.
There is a cycle in a linked list if there is some node in the list that can be reached again by continuously following the next pointer. Internally, pos is used to denote the index of the node that tail's next pointer is connected to. Note that pos is not passed as a parameter.
Notice that you should not modify the linked list.
Follow up:
Can you solve it using O(1) (i.e. constant) memory?
Example 1:
Input: head = [3,2,0,-4], pos = 1
Output: tail connects to node index 1
Explanation: There is a cycle in the linked list, where tail connects to the second node.
Example 2:
Input: head = [1,2], pos = 0
Output: tail connects to node index 0
Explanation: There is a cycle in the linked list, where tail connects to the first node.
Example 3:
Input: head = [1], pos = -1
Output: no cycle
Explanation: There is no cycle in the linked list.
Constraints:
The number of the nodes in the list is in the range [0, 104].
-105 <= Node.val <= 105
pos is -1 or a valid index in the linked-list.
'''
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def detectCycle(self, head: ListNode) -> ListNode:
slow = fast = head
while fast and fast.next:
slow, fast = slow.next, fast.next.next
if slow == fast: break
if not fast or not fast.next: return
slow = head
while slow != fast:
slow, fast = slow.next, fast.next
return slow | [
"[email protected]"
]
| |
d820e8e0ac93df5fd014839584386acd0509363e | abbc2d332bdfa036ac12438983e6d74cf4107e64 | /SiamFCpp/SiamFCpp-video_analyst/siamfcpp/evaluation/got_benchmark/experiments/trackingnet.py | 8492e40ea56be71f3e098bb843cac2ed8f449c58 | []
| permissive | HonglinChu/SiamTrackers | c494cff7543a433e8ec7dbf6d9439b1e7395b0c0 | 805208b5348346d35e64abcbe901a3829743e157 | refs/heads/master | 2023-08-29T06:50:59.532271 | 2023-03-06T09:13:53 | 2023-03-06T09:13:53 | 253,718,080 | 1,166 | 243 | Apache-2.0 | 2023-08-03T16:39:53 | 2020-04-07T07:24:00 | Python | UTF-8 | Python | false | false | 3,041 | py | from __future__ import absolute_import
import os
import numpy as np
from loguru import logger
from .otb import ExperimentOTB
from ..datasets import TrackingNet
from ..utils.metrics import rect_iou, center_error
from ..utils.ioutils import compress
class ExperimentTrackingNet(ExperimentOTB):
r"""Experiment pipeline and evaluation toolkit for TrackingNet dataset.
Only the TEST subset part implemented.
Args:
root_dir (string): Root directory of LaSOT dataset.
subset (string, optional): Specify ``train`` or ``test`` or ``train0,1,...``
subset of TrackingNet. Default is ``test``.
return_meta (bool, optional): whether to fetch meta info
(occlusion or out-of-view). Default is ``False``.
result_dir (string, optional): Directory for storing tracking
results. Default is ``./results``.
report_dir (string, optional): Directory for storing performance
evaluation results. Default is ``./reports``.
"""
def __init__(self, root_dir, subset='test', return_meta=False,
result_dir='results', report_dir='reports'):
# assert subset.upper() in ['TRAIN', 'TEST']
assert subset.startswith(('train', 'test')), 'Unknown subset.'
self.subset = subset
self.dataset = TrackingNet(root_dir, subset, return_meta=return_meta)
self.result_dir = os.path.join(result_dir, 'TrackingNet')
self.report_dir = os.path.join(report_dir, 'TrackingNet')
# as nbins_iou increases, the success score
# converges to the average overlap (AO)
self.nbins_iou = 21
self.nbins_ce = 51
def report(self, tracker_names, *args, plot_curves=True, **kwargs):
if self.subset == 'test':
logger.info("TEST subset's annotations are withholded, generate submission file instead...")
for tracker_name in tracker_names:
# compress all tracking results
result_dir = os.path.join(self.result_dir, tracker_name)
save_file = result_dir
compress(result_dir, save_file)
print('Records saved at', save_file + '.zip')
# print submission guides
print('\033[93mLogin and follow instructions on')
print('http://eval.tracking-net.org/')
print('to upload and evaluate your tracking results\033[0m')
performance = None
else:
performance = super(ExperimentTrackingNet, self).report(tracker_names, *args, plot_curves=plot_curves, **kwargs)
return performance
# def _calc_metrics(self, boxes, anno):
# valid = ~np.any(np.isnan(anno), axis=1)
# if len(valid) == 0:
# print('Warning: no valid annotations')
# return None, None
# else:
# ious = rect_iou(boxes[valid, :], anno[valid, :])
# center_errors = center_error(
# boxes[valid, :], anno[valid, :])
# return ious, center_errors
| [
"[email protected]"
]
| |
d8b5ea5c66a9b6e027e0b4f8aa070b5bbbeb9024 | 8938c94953e30aefebc28a9321da16976345072f | /bound_detect.py | ffc1a50cbf77fb1c2ccb2a3d6ebc7d75da3a25e6 | []
| no_license | liu1073811240/Opencv-4 | ace1df112bf1ba58620086a32237fe0e08b914bc | afc4a676f1fb0f6c47a9bb1ae039791ef6e77467 | refs/heads/master | 2023-01-12T00:21:35.171391 | 2020-11-15T03:30:04 | 2020-11-15T03:30:04 | 312,950,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,502 | py | import cv2
import numpy as np
# "边界检测: 边界矩形、最小(面积)矩形、最小外接圆以及椭圆拟合、直线拟合"
img = cv2.imread("./images/23.jpg")
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(img_gray, 127, 255, 0)
contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# 边界矩形
x, y, w, h = cv2.boundingRect(contours[0]) # 根据轮廓点来获取边界框的坐标
img_contour = cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imshow("img_contour", img_contour)
# 最小矩形
rect = cv2.minAreaRect(contours[0]) # 得到最小外接矩形的(中心(x,y), (宽,高), 旋转角度)
print(rect)
box = cv2.boxPoints(rect) # 获取最小外接矩形的4个顶点坐标
print(box)
print(box.dtype, box.shape)
box = np.int32(box)
print(box.dtype, box.shape)
img_contour1 = cv2.drawContours(img, [box], 0, (0, 0, 255), 2)
cv2.imshow("img_contour1", img_contour1)
# 最小外接圆
(x, y),radius = cv2.minEnclosingCircle(contours[0]) # 根据轮廓点找到最小闭合圆的中心点坐标,半径
center = (int(x), int(y))
radius = int(radius)
img_contour3 = cv2.circle(img, center, radius, (255, 0, 0), 2)
cv2.imshow("img_contour3", img_contour3)
# 椭圆拟合
ellipse = cv2.fitEllipse(contours[0]) # 根据轮廓点找到椭圆
print(ellipse)
img_contour4 = cv2.ellipse(img, ellipse, (0, 255, 255), 2)
cv2.imshow("img_contour4", img_contour4)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"[email protected]"
]
| |
0dd50d721f6ae31735f10048c04f2c5ae82ff43a | 936a8929956eb82b08c8f48c9bf2e7fb621412df | /dlaicourse/course2_part6_lesson3.py | 3192b2c2042036998a334d61bec0d6b33545bcd5 | []
| no_license | chenxu0602/TensorFlow2.0 | 3871b9b0cd391ca65938da7d3409e4667acb929c | c13742750ad09d8ec26fabe32578d27c927701a8 | refs/heads/master | 2021-07-12T11:10:08.665940 | 2021-02-27T09:21:29 | 2021-02-27T09:21:29 | 237,641,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,341 | py |
import os
import tensorflow as tf
local_weights_file = 'inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'
pre_trained_model = tf.keras.applications.inception_v3.InceptionV3(
input_shape=(150, 150, 3),
include_top=False,
weights=None)
pre_trained_model.load_weights(local_weights_file)
for layer in pre_trained_model.layers:
layer.trainable = True
last_layer = pre_trained_model.get_layer("mixed7")
print("last year output shape: ", last_layer.output_shape)
last_output = last_layer.output
x = tf.keras.layers.Flatten()(last_output)
x = tf.keras.layers.Dense(1024, activation="relu")(x)
x = tf.keras.layers.Dropout(0.2)(x)
x = tf.keras.layers.Dense(1, activation="sigmoid")(x)
model = tf.keras.Model(pre_trained_model.input, x)
model.compile(optimizer=tf.keras.optimizers.RMSprop(lr=0.001),
loss="binary_crossentropy",
metrics=["accuracy"])
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import os
import zipfile
# local_zip = 'cats_and_dogs_filtered.zip'
#
# zip_ref = zipfile.ZipFile(local_zip, 'r')
#
# zip_ref.extractall()
# zip_ref.close()
# Define our example directories and files
base_dir = 'cats_and_dogs_filtered'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
train_cats_dir = os.path.join(train_dir, 'cats') # Directory with our training cat pictures
train_dogs_dir = os.path.join(train_dir, 'dogs') # Directory with our training dog pictures
validation_cats_dir = os.path.join(validation_dir, 'cats') # Directory with our validation cat pictures
validation_dogs_dir = os.path.join(validation_dir, 'dogs')# Directory with our validation dog pictures
train_cat_fnames = os.listdir(train_cats_dir)
train_dog_fnames = os.listdir(train_dogs_dir)
train_datagen = ImageDataGenerator(rescale=1/255.,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1/255.)
train_generator = train_datagen.flow_from_directory(train_dir,
batch_size=20,
class_mode="binary",
target_size=(150, 150))
validation_generator = test_datagen.flow_from_directory(validation_dir,
batch_size=20,
class_mode="binary",
target_size=(150, 150))
history = model.fit(
train_generator,
validation_data=validation_generator,
steps_per_epoch=100,
epochs=20,
validation_steps=50,
verbose=2)
import matplotlib.pyplot as plt
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'r', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.legend(loc=0)
plt.figure()
plt.show() | [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.