blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ea29a9cc461cc772418606651a63a753c9adce36
|
eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7
|
/google/cloud/securitycenter/v1p1beta1/securitycenter-v1p1beta1-py/google/cloud/securitycenter_v1p1beta1/types/organization_settings.py
|
faec729075707f892513d3f7e9e1c999722a8557
|
[
"Apache-2.0"
] |
permissive
|
Tryweirder/googleapis-gen
|
2e5daf46574c3af3d448f1177eaebe809100c346
|
45d8e9377379f9d1d4e166e80415a8c1737f284d
|
refs/heads/master
| 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,410 |
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.cloud.securitycenter.v1p1beta1',
manifest={
'OrganizationSettings',
},
)
class OrganizationSettings(proto.Message):
r"""User specified settings that are attached to the Security
Command Center organization.
Attributes:
name (str):
The relative resource name of the settings. See:
https://cloud.google.com/apis/design/resource_names#relative_resource_name
Example:
"organizations/{organization_id}/organizationSettings".
enable_asset_discovery (bool):
A flag that indicates if Asset Discovery should be enabled.
If the flag is set to ``true``, then discovery of assets
will occur. If it is set to \`false, all historical assets
will remain, but discovery of future assets will not occur.
asset_discovery_config (google.cloud.securitycenter_v1p1beta1.types.OrganizationSettings.AssetDiscoveryConfig):
The configuration used for Asset Discovery
runs.
"""
class AssetDiscoveryConfig(proto.Message):
r"""The configuration used for Asset Discovery runs.
Attributes:
project_ids (Sequence[str]):
The project ids to use for filtering asset
discovery.
inclusion_mode (google.cloud.securitycenter_v1p1beta1.types.OrganizationSettings.AssetDiscoveryConfig.InclusionMode):
The mode to use for filtering asset
discovery.
"""
class InclusionMode(proto.Enum):
r"""The mode of inclusion when running Asset Discovery. Asset discovery
can be limited by explicitly identifying projects to be included or
excluded. If INCLUDE_ONLY is set, then only those projects within
the organization and their children are discovered during asset
discovery. If EXCLUDE is set, then projects that don't match those
projects are discovered during asset discovery. If neither are set,
then all projects within the organization are discovered during
asset discovery.
"""
INCLUSION_MODE_UNSPECIFIED = 0
INCLUDE_ONLY = 1
EXCLUDE = 2
project_ids = proto.RepeatedField(proto.STRING, number=1)
inclusion_mode = proto.Field(proto.ENUM, number=2,
enum='OrganizationSettings.AssetDiscoveryConfig.InclusionMode',
)
name = proto.Field(proto.STRING, number=1)
enable_asset_discovery = proto.Field(proto.BOOL, number=2)
asset_discovery_config = proto.Field(proto.MESSAGE, number=3,
message=AssetDiscoveryConfig,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
2dc3ec4af49c857ff67a051334b7be5cbb9dd6ba
|
927b50cdaf1c384c8bbf6f13816d0ba465852fd8
|
/main/migrations/0002_auto_20201128_0813.py
|
f86867def1d360053603e5adf8c185ee104522d0
|
[
"MIT"
] |
permissive
|
jhabarsingh/DOCMED
|
f37d336483cffd874b0a7db43677c08a47bd639c
|
8a831886d3dd415020699491687fb73893e674c5
|
refs/heads/main
| 2023-04-26T06:45:10.409633 | 2021-05-19T14:37:53 | 2021-05-19T14:37:53 | 316,683,855 | 3 | 5 |
MIT
| 2021-02-21T13:32:33 | 2020-11-28T07:51:22 |
JavaScript
|
UTF-8
|
Python
| false | false | 1,061 |
py
|
# Generated by Django 2.0 on 2020-11-28 08:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='doctor',
name='blood_group',
field=models.CharField(blank=True, choices=[('A+', 'A+ Type'), ('A-', 'A- Type'), ('B+', 'B+ Type'), ('B-', 'B- Type'), ('AB+', 'AB+ Type'), ('AB+', 'AB- Type'), ('O+', 'O+ Type'), ('O-', 'O- Type')], max_length=3, null=True),
),
migrations.AlterField(
model_name='doctor',
name='is_working',
field=models.NullBooleanField(),
),
migrations.AlterField(
model_name='patient',
name='blood_group',
field=models.CharField(blank=True, choices=[('A+', 'A+ Type'), ('A-', 'A- Type'), ('B+', 'B+ Type'), ('B-', 'B- Type'), ('AB+', 'AB+ Type'), ('AB+', 'AB- Type'), ('O+', 'O+ Type'), ('O-', 'O- Type')], max_length=4, null=True),
),
]
|
[
"[email protected]"
] | |
f0058d3d6a1df1097582e384bb22a5d06725cbb7
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/467/usersdata/282/111931/submittedfiles/Av2_Parte2.py
|
f4d68cd5e055d09012f4f459c82a6e8816d004ca
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 141 |
py
|
# -*- coding: utf-8 -*-
a=[]
qa=int(input('Digite o numero de elementos de a: '))
b=[]
qb=int(input('Digite o numero de elementos de b: '))
|
[
"[email protected]"
] | |
1c4458211f04b61d65360a91f24938a79f071603
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-gsn-edf/gsn-edf_ut=2.0_rd=0.5_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=43/params.py
|
0c4e8626f8dbb25ae1a1eaa2f0fe59307cd289fe
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 254 |
py
|
{'cpus': 4,
'duration': 30,
'final_util': '2.017881',
'max_util': '2.0',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.5',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'GSN-EDF',
'trial': 43,
'utils': 'uni-medium-3'}
|
[
"[email protected]"
] | |
05fc046d63ad0da119f177a76e959f80d9d8f37b
|
d184d1fc998a300feee2d716d97209b9fbc78468
|
/probability.py
|
dbeb07713ae4103f2e739fabfa5eb51dd35d80c9
|
[] |
no_license
|
MickeyKen/plot_node_master_thesis
|
df196d7a037b1960c1ee95268a1ae3b1e8f24148
|
5182ea79cb8cfbc6bead60d97eda9307f7e53c10
|
refs/heads/master
| 2023-02-16T21:17:49.284973 | 2021-01-19T09:19:40 | 2021-01-19T09:19:40 | 330,574,321 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,316 |
py
|
#!/usr/bin/python
import matplotlib.pyplot as plt
path = 'data/param_UD-v95_output.txt'
isServiceCount = True
ACTOR_NUM = 3
AVERAGE_NUM = 100
LIMIT = 5000
if __name__ == '__main__':
collision = [[] for j in range(ACTOR_NUM)]
average_collision = []
success = [[] for j in range(ACTOR_NUM)]
average_success = []
no_action = [[] for j in range(ACTOR_NUM)]
average_no_action = []
eps = []
average_eps = []
epsilons = [[] for j in range(ACTOR_NUM)]
flag = 0
count = 0
fig = plt.figure(figsize=(8.27,3.9), dpi=100)
plt.ion()
plt.xlabel('Episode')
# plt.ylabel('P')
plt.grid()
cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']
with open(path) as f:
for s_line in f:
eps_num = int(s_line.split(',')[0])
actor_num = int(s_line.split(',')[1])
step = int(s_line.split(',')[3])
reward = float(s_line.split(',')[5])
if step < 150 and reward < -200:
collision[actor_num].append(1.0)
success[actor_num].append(0.0)
no_action[actor_num].append(0.0)
elif step < 150 and reward > 0:
collision[actor_num].append(0.0)
success[actor_num].append(1.0)
no_action[actor_num].append(0.0)
else:
collision[actor_num].append(0.0)
success[actor_num].append(0.0)
no_action[actor_num].append(1.0)
collision_sum = 0.0
success_sum = 0.0
no_action_sum = 0.0
average_collision_sum = 0.0
average_success_sum = 0.0
average_no_action_sum = 0.0
count = 1
for index in range(min(len(v) for v in collision)):
collision_sum = 0.0
success_sum = 0.0
no_action_sum = 0.0
if index <= LIMIT:
for n in range(ACTOR_NUM):
collision_sum += collision[n][index]
success_sum += success[n][index]
no_action_sum += no_action[n][index]
average_collision_sum += collision_sum / float(ACTOR_NUM)
average_success_sum += success_sum / float(ACTOR_NUM)
average_no_action_sum += no_action_sum / float(ACTOR_NUM)
if index % AVERAGE_NUM == 0 and index > 0:
average_eps.append(count*AVERAGE_NUM)
average_collision.append(average_collision_sum / float(AVERAGE_NUM))
average_success.append(average_success_sum / float(AVERAGE_NUM))
average_no_action.append(average_no_action_sum / float(AVERAGE_NUM))
average_collision_sum = 0.0
average_success_sum = 0.0
average_no_action_sum = 0.0
count += 1
eps.append(index + 1)
plt.plot(average_eps, average_success, color='#e41a1c', label="success")
plt.plot(average_eps, average_collision, color='#00529a', label="collision")
plt.plot(average_eps, average_no_action, color='#3FBF00', label="past 150 steps")
plt.legend( loc='upper left', borderaxespad=1)
plt.draw()
fig.savefig("result_multi_probability.png")
plt.pause(0)
|
[
"[email protected]"
] | |
145e5904cf2bc4e6e47030788b2461978b486ece
|
6318f1458f9c6cca91cb00aa415638a599d8ba26
|
/arcade/python/arcade-theCore/11_SpringOfIntegration/091_Combs.py
|
ec81b4e9bfbc202b226d08d5d49310be3d66ef37
|
[
"MIT"
] |
permissive
|
netor27/codefights-solutions
|
836016a048086cd2bc644b2c40b7686102b6f179
|
69701ab06d45902c79ec9221137f90b75969d8c8
|
refs/heads/master
| 2021-10-28T13:04:42.940059 | 2019-01-16T23:12:08 | 2019-01-16T23:12:08 | 110,753,675 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,377 |
py
|
'''
Miss X has only two combs in her possession, both of which are old and miss a tooth or two. She also has many purses of different length, in which she carries the combs. The only way they fit is horizontally and without overlapping. Given teeth' positions on both combs, find the minimum length of the purse she needs to take them with her.
It is guaranteed that there is at least one tooth at each end of the comb.
It is also guaranteed that the total length of two strings is smaller than 32.
Note, that the combs can not be rotated/reversed.
Example
For comb1 = "*..*" and comb2 = "*.*", the output should be
combs(comb1, comb2) = 5.
Although it is possible to place the combs like on the first picture, the best way to do this is either picture 2 or picture 3.
'''
def combs(comb1, comb2):
n1, n2 = len(comb1), len(comb2)
res = n1 + n2
m1, m2 = mask(comb1), mask(comb2)
for i in range(n1 + 1):
if (m2 << i) & m1 == 0:
temp = max(n2 + i, n1)
if temp < res:
res = temp
for i in range(n2 + 1):
if (m1 << i) & m2 == 0:
temp = max(n1 + i, n2)
if temp < res:
res = temp
return res
def mask(s):
r = 0
for c in s:
digit = 0
if c == '*':
digit = 1
r = (r << 1) + digit
return r
|
[
"[email protected]"
] | |
3c70c973d79447bece9afe2d49c5fd583a4173dd
|
4bfe4afd1b1e11f9a03d8e3640aa297c875c076d
|
/demos/basic.py
|
9a86954581726ae9f13bad67294d6355e90d696a
|
[] |
no_license
|
pankajti/capstone
|
81cdd2187e71e8d1bf327579b574ea7cf91a7e76
|
af57a52d34dbcdd40e8e81f1d72c142263a98893
|
refs/heads/master
| 2021-03-02T09:49:51.054153 | 2020-07-09T02:28:58 | 2020-07-09T02:28:58 | 245,857,468 | 0 | 0 | null | 2020-03-22T00:54:01 | 2020-03-08T17:26:43 | null |
UTF-8
|
Python
| false | false | 240 |
py
|
from tensorflow.keras.layers import Dense,SimpleRNN
from tensorflow.keras import Sequential
import numpy as np
from tensorflow.keras.utils import plot_model
model =Sequential()
model.add(Dense(2))
model.add(Dense(1))
plot_model(model)
|
[
"[email protected]"
] | |
4faba1910def77457e265813a6749d9fcdc2c9fa
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/ec2_write_3/managed-prefix-list_create.py
|
a37a54b7d58925db27ffcd48c98d760451977f82
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,393 |
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_three_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/create-managed-prefix-list.html
if __name__ == '__main__':
"""
delete-managed-prefix-list : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/delete-managed-prefix-list.html
describe-managed-prefix-lists : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-managed-prefix-lists.html
modify-managed-prefix-list : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/modify-managed-prefix-list.html
"""
parameter_display_string = """
# prefix-list-name : A name for the prefix list.
Constraints: Up to 255 characters in length. The name cannot start with com.amazonaws .
# max-entries : The maximum number of entries for the prefix list.
# address-family : The IP address type.
Valid Values: IPv4 | IPv6
"""
add_option_dict = {}
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_three_parameter("ec2", "create-managed-prefix-list", "prefix-list-name", "max-entries", "address-family", add_option_dict)
|
[
"[email protected]"
] | |
5c80ed9e14391ad32e4cc6fd9fcae8dce388c672
|
479518429066a4200b0c9ffbc42f22620dee1749
|
/app.py
|
5074f7904d2af983e17faf125c1a1f1f6874b9a4
|
[] |
no_license
|
nikhilkumarsingh/nitdhack
|
d2b4871c2aa3ef461c409a2f75e4f346759f1797
|
633ddf770c19fb8b0dd66479bc8e865e36181ffa
|
refs/heads/master
| 2021-01-19T21:33:27.880021 | 2017-04-18T23:43:06 | 2017-04-18T23:43:06 | 88,665,337 | 0 | 1 | null | 2018-10-03T05:33:57 | 2017-04-18T19:59:40 |
JavaScript
|
UTF-8
|
Python
| false | false | 1,291 |
py
|
import flask
app = flask.Flask(__name__,static_folder='static')
app.config.from_object(__name__)
app.config['SECRET_KEY'] = '7d441f27d441f27567d441f2b6176a'
@app.route('/')
def home():
return flask.render_template('index.html')
def NearbySearch(lat,lng,keyword,radius=1000):
key="AIzaSyApuFoKxVMRQ2einlsA0rkx2S4WJjJIh34"
url="https://maps.googleapis.com/maps/api/place/nearbysearch/json?"
url+="location=%f,%f&" % (lat,lng)
url+="radius=%i&" % radius
url+="type=%s&" % keyword
url+="key=%s" % key
response=requests.get(url)
json_dict=response.json()
res=json_dict['results']
info_pack=[]
for x in res:
placeid = x['place_id']
url = "https://maps.googleapis.com/maps/api/place/details/json?placeid={}&key={}".format(placeid,key)
r = requests.get(url).json()['result']
info = {}
info['name'] = r['name']
info['lat'] = r['geometry']['location']['lat']
info['lng'] = r['geometry']['location']['lng']
info_pack.append(info)
return info_pack
@app.route('/query', methods = ['POST'])
def query():
if flask.request.method == 'POST':
# lat,lang =
lat, lang = 28,76
data = {'locations':NearbySearch(lat,lng,'doctor')}
print(flask.request.form['query'])
return data
if __name__ == "__main__":
app.run(debug = True, port=5003)
|
[
"[email protected]"
] | |
1186138ee1bd98ce6cc3c24b6d4b5d7158920d79
|
f81099738d3ab7d4a4773a04ed9e36e493632590
|
/angelos-portfolio/test/test_domain_update.py
|
2ccd8c81f1a7ea5f7e2d64656a9b8ccd5a5df49a
|
[
"MIT"
] |
permissive
|
kristoffer-paulsson/angelos
|
eff35753e4d7e4465d2aadac39265f206b09fcf9
|
d789f47766fe3a63a6752b92e4ea955f420dbaf9
|
refs/heads/master
| 2022-05-05T15:16:59.340527 | 2022-03-27T16:05:51 | 2022-03-27T16:05:51 | 142,691,235 | 9 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,287 |
py
|
#
# Copyright (c) 2018-2020 by Kristoffer Paulsson <[email protected]>.
#
# This software is available under the terms of the MIT license. Parts are licensed under
# different terms if stated. The legal terms are attached to the LICENSE file and are
# made available on:
#
# https://opensource.org/licenses/MIT
#
# SPDX-License-Identifier: MIT
#
# Contributors:
# Kristoffer Paulsson - initial implementation
#
"""Security tests putting the policies to the test."""
from unittest import TestCase
from angelos.common.policy import evaluate
from angelos.lib.policy.types import PersonData
from angelos.portfolio.domain.create import CreateDomain
from angelos.portfolio.domain.update import UpdateDomain
from angelos.portfolio.entity.create import CreatePersonEntity
from test.fixture.generate import Generate
class TestUpdateDomain(TestCase):
def test_perform(self):
data = PersonData(**Generate.person_data()[0])
portfolio = CreatePersonEntity().perform(data)
CreateDomain().perform(portfolio)
self.assertIsNotNone(portfolio.domain)
with evaluate("Domain:Update") as report:
domain = UpdateDomain().perform(portfolio)
self.assertIs(domain, portfolio.domain)
self.assertTrue(report)
|
[
"[email protected]"
] | |
482b54447b3f7cd5d3fb519221920951b5b68ed0
|
d9764a604c85c134ff217747d243eac8fe28e792
|
/src/demo2.py
|
e3c0801f18c91206c2e18df08c2caacf8e0007bf
|
[] |
no_license
|
afcarl/INF421-project
|
5a0130c3ba6e0c767323001048d3f191379dbc6e
|
dc6eef684f6d277b6a9bbbc227a9e20a1525e115
|
refs/heads/master
| 2020-03-19T21:21:53.465240 | 2017-08-14T13:39:52 | 2017-08-14T13:39:52 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,334 |
py
|
#!/usr/bin/env python3
"""
Special notes :
This implementation supports MULTIPLE shortest path.
(except for the number_of_possible_locations_with_mindist_simple function)
"""
import random
from Graph import Graph
from algo import *
from unused import *
from Dijkstra import *
from util import timeit
from reach import reach
####################
data = '/Users/louisabraham/Downloads/RoadNetworks/data/france.in'
logging = '/Users/louisabraham/Downloads/RoadNetworks/vis/points.js'
hour = 3600000
# We can control the display of chronos using timeit.activated
timeit.activated = True
####################
# graph importation
g = Graph.from_file(data)
# we chose a random starting point
v = random.choice(list(g.keys()))
#
# # Question 1.1
# print(number_of_possible_locations(g, v, 1 * hour))
#
# # the same result is computed
# print(number_of_possible_locations_with_mindist_dijkstra(
# g, v, 1 * hour, 0))
# print(number_of_possible_locations_with_mindist_dijkstra(
# g, v, 1 * hour, 0))
print(number_of_possible_locations_with_mindist_dijkstra(
g, v, 1 * hour, 2 * hour, logging=logging))
input()
g.generate_converse()
print(number_of_possible_locations_with_mindist_dijkstra(
g.converse, v, 1 * hour, 2 * hour, logging=logging))
# print(reach(g, v))
#
# # We can free memory like this
# dijkstra.clean()
|
[
"[email protected]"
] | |
136b1182e8e9b3bb6006d82097af6a64457a1413
|
817965ef6ee70672eabedbbafe336ca07d6443ff
|
/0x0B-python-input_output/8-load_from_json_file.py
|
34f8ae593948ca8fc24e3410cf357a351c626b5f
|
[] |
no_license
|
julianfrancor/holbertonschool-higher_level_programming
|
f021086eb2a86b366c391452b13581c87587a3a8
|
bd2a291c725ba09d88e9a629d0b22cf4ed7122e7
|
refs/heads/master
| 2022-12-23T05:27:27.942300 | 2020-09-24T21:22:56 | 2020-09-24T21:22:56 | 257,935,813 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 662 |
py
|
#!/usr/bin/python3
"""
function that creates an Object from a “JSON file”
"""
import json
def load_from_json_file(filename):
"""
Args
filename: JSON file form where the string
is going to be read
json.dumps() method can convert a Python object into a JSON string.
json.dump() method can be used to write to file a JSON file directly.
can Write in an open file
json.loads() expects to get its text from a string object
json.load() expects to get the text from a file
can Read from an open file an convert
"""
with open(filename, mode="r", encoding="UTF8") as file:
return json.load(file)
|
[
"[email protected]"
] | |
d6b4abc7fbe0628b62ce4ae5c4de91acedb25971
|
962feeffee41625ff841f6590f97bb09cef9be4c
|
/torch_glow/tests/nodes/avgpool3d_test.py
|
93e26349ac4e677a2d89d2388568725436963f2f
|
[
"Apache-2.0"
] |
permissive
|
SushantDaga/glow
|
8c4c3fbc58c3ae760bdd8e1df2e8c05a72ff07bc
|
aab22c3e0421dadd29950c2ebfa88b86027cecf5
|
refs/heads/master
| 2022-11-03T08:39:33.958233 | 2020-06-19T17:03:14 | 2020-06-19T17:05:42 | 273,568,864 | 2 | 0 |
Apache-2.0
| 2020-06-19T19:12:31 | 2020-06-19T19:12:30 | null |
UTF-8
|
Python
| false | false | 860 |
py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn.functional as F
from tests.utils import jitVsGlow
import unittest
class TestAvgPool3d(unittest.TestCase):
def test_avg_pool3d_basic(self):
"""Basic test of the PyTorch avg_pool3d Node on Glow."""
def test_f(inputs):
return F.avg_pool3d(inputs, 3)
inputs = torch.randn(1, 4, 5, 5, 5)
jitVsGlow(test_f, inputs, expected_fused_ops={"aten::avg_pool3d"})
def test_avg_pool3d_with_args(self):
"""Test of the PyTorch avg_pool3d Node with arguments on Glow."""
def test_f(inputs):
return F.avg_pool3d(inputs, padding=2, kernel_size=(4, 7, 7))
inputs = torch.randn(1, 4, 10, 10, 10)
jitVsGlow(test_f, inputs, expected_fused_ops={"aten::avg_pool3d"})
|
[
"[email protected]"
] | |
e9f935855c936f7be736e9cada0f8dfb9d5cbf2c
|
6f444f025f27a10dd7b1bf61083ea2832ffcb196
|
/backend/location/api/v1/serializers.py
|
f4a37f977e26a6abd08e6dffcee6108c10dadd98
|
[] |
no_license
|
crowdbotics-apps/ledger-wallet-29295
|
2fe0eee9e06cb1f5c8e514ad650df8276aac789b
|
d96542a71685ce6d335882c10cf840355c8252f7
|
refs/heads/master
| 2023-06-24T00:46:30.889717 | 2021-07-30T20:37:03 | 2021-07-30T20:37:03 | 391,182,590 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 669 |
py
|
from rest_framework import serializers
from location.models import TaskLocation, CustomerLocation, TaskerLocation, MapLocation
class CustomerLocationSerializer(serializers.ModelSerializer):
class Meta:
model = CustomerLocation
fields = "__all__"
class MapLocationSerializer(serializers.ModelSerializer):
class Meta:
model = MapLocation
fields = "__all__"
class TaskerLocationSerializer(serializers.ModelSerializer):
class Meta:
model = TaskerLocation
fields = "__all__"
class TaskLocationSerializer(serializers.ModelSerializer):
class Meta:
model = TaskLocation
fields = "__all__"
|
[
"[email protected]"
] | |
8b95e2ada92485e2e3e8915583d7b6c7899d04f7
|
5022b48f311ba4710e1851855552b9546a3142c5
|
/unittest/case_test.py
|
3b355326b97f14c7a95801f1b8d7f47cb5b04d82
|
[] |
no_license
|
18786262315/python_lx
|
a7a15a294312b8382c3d1fd97a8d0ede38f1c5a5
|
a870d49cc4ca6efd1b54c2b89dfbf5e3d911a568
|
refs/heads/master
| 2020-03-21T12:37:30.748759 | 2020-03-18T09:31:31 | 2020-03-18T09:31:31 | 138,563,274 | 1 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,228 |
py
|
'''
unittest条件断言
tester: cc
此文仅做翻译只用,不介绍具体使用
'''
Skiptest() # 在测试中引发此异常以跳过该异常。
_ShouldStop() # 停止测试
_UnexpectedSuccess() # 测试本来应该是失败的,但是没有失败
Skip() # 无条件跳过测试。
skipIf(condition, reason) # 条件为真时跳过测试
skipUnless(condition, reason) # 条件为假时跳过测试
expectedFailure(test_item) # 标记该测试预期就是失败,如果运行失败时,不算作失败用例。
_is_subtype(expected, basetype) # 判断类型是否符合预期
addTypeEqualityFunc(typeobj, function) # 为自定义检查类提供检查方法
addCleanup( function , *args , **kwargs ) #添加针对每个测试用例执行完tearDown()方法之后的清理方法,添加进去的函数按照后进先出(LIFO)的顺序执行,当然,如果setUp()方法执行失败,那么不会执行tearDown()方法,自然也不会执行addCleanup()里添加的函数。
setUp()#在执行每个测试用例之前被执行,任何异常(除了unittest.SkipTest和AssertionError异常以外)都会当做是error而不是failure,且会终止当前测试用例的执行。
tearDown()#执行了setUp()方法后,不论测试用例执行是否成功,都执行tearDown()方法。如果tearDown()的代码有异常(除了unittest.SkipTest和AssertionError异常以外),会多算一个error。
setUpClass( cls )与tearDownClass( cls )#测试用例们被执行前、后执行的方法,定义时必须加上classmethod装饰符
countTestCases()#返回测试用例的个数,对于TestCase实例来说,这个返回值一直是1.
defaultTestResult()#如果在run()方法中未提供result参数,该函数返回一个包含本用例测试结果的TestResult对象。
shortDescription()#返回测试用例的描述,即函数的docstring,如果没有,返回None。可以用于测试结果输出中描述测试内容。
id()#返回测试用例的编号,通常是如下格式:模块名.类名.函数名。可以用于测试结果的输出。
subTest( msg=_subtest_msg_sentinel, **params)#返回一个上下文管理器,它将返回由可选消息和关键字参数标识的子测试中的封闭代码块。子测试中的失败标志着测试用例失败,但在封闭块结束时恢复执行,允许执行进一步的测试代码。
run( result =None)#运行一个测试用例,将测试结果收集到result变量中,测试结果不返回给调用者。如果result参数的值为None,则测试结果在下面提到的defaultTestResult()方法的返回值中
doCleanups()#无条件强制调用addCleanup()添加的函数,适用于setUp()方法执行失败但是需要执行清理函数的场景,或者希望在tearDown()方法之前执行这些清理函数。
debug()#与run方法将测试结果存储到result变量中不同,debug方法运行测试用例将异常信息上报给调用者。
fail( msg =None)#无条件声明一个测试用例失败,msg是失败信息。
assertEqual(set1,set2,msg=None) #检测两个值是否相等
assertFalse( expr, msg=None) #检查表达式是否为假
assertTrue( expr, msg=None) #检查表达式是否为真
assertAlmostEqual与assertNotAlmostEqual(, first, second, places=None, msg=None,delta=None) #判断两个值是否约等于或者不约等于,places表示小数点后精确的位数
assertSequenceEqual(seq1, seq2, msg=None, seq_type=None) #有序序列的相等断言,如元组、列表
assertListEqual( list1, list2, msg=None) #列表相等的特定断言
assertTupleEqual(tuple1, tuple2, msg=None) #元组相等的特定断言
assertSetEqual( set1, set2, msg=None) #集合相等的特定断言
assertIn与assertNotIn( member, container, msg=None) #判断a 是否存在b中
assertIs与assertIsNot( expr1, expr2, msg=None) #判断a是不是b
assertDictEqual( d1, d2, msg=None) #检查两个字典是否相等
assertDictContainsSubset( subset, dictionary, msg=None) #检查字典是否是子集的超集。
assertCountEqual(first, second, msg=None) #判断两个无序列表内所出现的内容是否相等
assertMultiLineEqual( first, second, msg=None) #断言两个多行字符串相等
assertLess( a, b, msg=None) #断言a<b
assertLessEqual( a, b, msg=None) #断言a<=b
assertGreater( a, b, msg=None) #断言a>b
assertGreaterEqual(a, b, msg=None) #断言a>=b
assertIsNone与assertIsNotNone( obj, msg=None) #判断obj是否为空
assertIsInstance(a, b)与assertNotIsInstance(a, b)# 与assertTrue相同,其中的类型b,既可以是一个类型,也可以是类型组成的元组。
assertRaisesRegex( expected_exception, expected_regex,*args, **kwargs)#断言在引发异常中的消息与正则表达式匹配。
assertWarnsRegex( expected_warning, expected_regex,*args, **kwargs)#断言触发警告中的消息与ReGEXP匹配。基本功能类似于AdvestWr.NS.()只有消息与正则表达式匹配的警告。被认为是成功的匹配
assertRegex与assertNotRegex(text, expected_regex, msg=None) #判断文本与正则表达式是否匹配
shortDescription()#返回测试用例的描述,即函数的docstring,如果没有,返回None。可以用于测试结果输出中描述测试内容。
|
[
"[email protected]"
] | |
820ed298b2d0d51b64a647c759fec6a4a95c79e1
|
0c4b33d04cf7fb73b3752b03af89eeaf76b8a0d2
|
/第14章-网络编程/client.py
|
93a57207689113ca5cbd684fb77a81dba69d2db4
|
[] |
no_license
|
kingflyfly/python_study
|
3b3ab427d23174b61b8f14c223059cfa9f303219
|
8a63a7c11b408bbc11a2b636517beaa424b37725
|
refs/heads/master
| 2020-06-11T01:39:52.655730 | 2020-03-24T16:09:39 | 2020-03-24T16:09:39 | 193,817,757 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 275 |
py
|
import socket
import sys
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = socket.gethostname()
port = 9992
# 连接服务,指定主机和端口
s.connect((host, port))
# 接收小于 1024 字节的数据
msg = s.recv(1024)
s.close()
print (msg.decode('utf-8'))
|
[
"[email protected]"
] | |
b9691e61dfe1e73f0cfed348461860d2ce4d6495
|
16ecabb5d9010c7fa4aebb8ab852f7c6a19193db
|
/src/0809.py
|
0ba2428a1bbf7638358e2412cd9b40399abf0b68
|
[] |
no_license
|
LeeSM0518/OpenCV-python
|
74ff0d899d291a35f9cd82d2ef37835a0c5ccdf2
|
46c234879f5d48876ca0888bdede8bfb347b7c30
|
refs/heads/master
| 2020-04-30T19:35:33.201278 | 2020-02-25T14:35:20 | 2020-02-25T14:35:20 | 177,043,146 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 535 |
py
|
# 0809.py
import cv2
import numpy as np
#1
src = cv2.imread('./data/momentTest.jpg')
gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
ret, bImage = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY)
#2
##M = cv2.moments(bImage)
M = cv2.moments(bImage, True)
for key, value in M.items():
print('{}={}'.format(key, value))
#3
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
dst = src.copy()
cv2.circle(dst, (cx, cy), 5, (0,0,255), 2)
cv2.imshow('dst', dst)
cv2.waitKey()
cv2.destroyAllWindows()
|
[
"[email protected]"
] | |
c031f3295b1ed90617b561c8f7640b752aad51fd
|
af53fb6bd0cd0ff70c68e43482b49420f0262764
|
/odonto/odonto_submissions/supplier_testing/case_43.py
|
84c48fea337719163487c20c990d24e7a60d00b3
|
[] |
no_license
|
gmolate/odonto
|
34b41c18b972c7e10be46874a630c0016d6f7237
|
f198608c41e9b991550a7929d28eb10002a3a664
|
refs/heads/master
| 2020-12-08T00:47:43.903738 | 2019-04-30T15:19:18 | 2019-04-30T15:19:18 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,711 |
py
|
import datetime
from odonto.odonto_submissions.serializers import translate_to_bdcs1
from fp17 import treatments, exemptions
def annotate(bcds1):
bcds1.patient.surname = "CARTWRIGHT"
bcds1.patient.forename = "TOM"
bcds1.patient.address = ["40 HIGH STREET"]
bcds1.patient.sex = 'M'
bcds1.patient.date_of_birth = datetime.date(1978, 12, 31)
bcds1.date_of_acceptance = datetime.date(2017, 4, 1)
bcds1.date_of_completion = datetime.date(2017, 4, 1)
# "Universal Credit"
bcds1.exemption_remission = {
'code': exemptions.UNIVERSAL_CREDIT.EVIDENCE_SEEN,
}
# Treatments: "Examination, Extraction 1"
bcds1.treatments = [
treatments.EXAMINATION,
treatments.EXTRACTION(1),
# 'Band 4'
treatments.TREATMENT_CATEGORY_URGENT,
]
return bcds1
def from_model(bcds1, patient, episode):
demographics = patient.demographics()
demographics.surname = "CARTWRIGHT"
demographics.first_name = "TOM"
demographics.house_number_or_name = "40"
demographics.street = "HIGH STREET"
demographics.sex = "Male"
demographics.date_of_birth = datetime.date(1978, 12, 31)
demographics.save()
episode.fp17exemptions_set.update(
universal_credit=True,
evidence_of_exception_or_remission_seen=True
)
episode.fp17clinicaldataset_set.update(
examination=True,
extractions=1
)
episode.fp17treatmentcategory_set.update(
urgent_treatment=True,
)
episode.fp17incompletetreatment_set.update(
date_of_acceptance=datetime.date(2017, 4, 1),
completion_or_last_visit=datetime.date(2017, 4, 1)
)
translate_to_bdcs1(bcds1, episode)
|
[
"[email protected]"
] | |
58d3cfda83ea5046fc57e7c8de3e95fa26d4f198
|
555b9f764d9bca5232360979460bc35c2f5ad424
|
/google/ads/google_ads/v2/proto/resources/ad_group_audience_view_pb2.py
|
26b4ed14fc842a81e3edeec29f2158892b497c43
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
juanmacugat/google-ads-python
|
b50256163782bc0223bcd8b29f789d74f4cfad05
|
0fc8a7dbf31d9e8e2a4364df93bec5f6b7edd50a
|
refs/heads/master
| 2021-02-18T17:00:22.067673 | 2020-03-05T16:13:57 | 2020-03-05T16:13:57 | 245,215,877 | 1 | 0 |
Apache-2.0
| 2020-03-05T16:39:34 | 2020-03-05T16:39:33 | null |
UTF-8
|
Python
| false | true | 3,716 |
py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v2/proto/resources/ad_group_audience_view.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v2/proto/resources/ad_group_audience_view.proto',
package='google.ads.googleads.v2.resources',
syntax='proto3',
serialized_options=_b('\n%com.google.ads.googleads.v2.resourcesB\030AdGroupAudienceViewProtoP\001ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v2/resources;resources\242\002\003GAA\252\002!Google.Ads.GoogleAds.V2.Resources\312\002!Google\\Ads\\GoogleAds\\V2\\Resources\352\002%Google::Ads::GoogleAds::V2::Resources'),
serialized_pb=_b('\nDgoogle/ads/googleads_v2/proto/resources/ad_group_audience_view.proto\x12!google.ads.googleads.v2.resources\x1a\x1cgoogle/api/annotations.proto\",\n\x13\x41\x64GroupAudienceView\x12\x15\n\rresource_name\x18\x01 \x01(\tB\x85\x02\n%com.google.ads.googleads.v2.resourcesB\x18\x41\x64GroupAudienceViewProtoP\x01ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v2/resources;resources\xa2\x02\x03GAA\xaa\x02!Google.Ads.GoogleAds.V2.Resources\xca\x02!Google\\Ads\\GoogleAds\\V2\\Resources\xea\x02%Google::Ads::GoogleAds::V2::Resourcesb\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_ADGROUPAUDIENCEVIEW = _descriptor.Descriptor(
name='AdGroupAudienceView',
full_name='google.ads.googleads.v2.resources.AdGroupAudienceView',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v2.resources.AdGroupAudienceView.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=137,
serialized_end=181,
)
DESCRIPTOR.message_types_by_name['AdGroupAudienceView'] = _ADGROUPAUDIENCEVIEW
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AdGroupAudienceView = _reflection.GeneratedProtocolMessageType('AdGroupAudienceView', (_message.Message,), dict(
DESCRIPTOR = _ADGROUPAUDIENCEVIEW,
__module__ = 'google.ads.googleads_v2.proto.resources.ad_group_audience_view_pb2'
,
__doc__ = """An ad group audience view. Includes performance data from interests and
remarketing lists for Display Network and YouTube Network ads, and
remarketing lists for search ads (RLSA), aggregated at the audience
level.
Attributes:
resource_name:
The resource name of the ad group audience view. Ad group
audience view resource names have the form: ``customers/{cust
omer_id}/adGroupAudienceViews/{ad_group_id}~{criterion_id}``
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v2.resources.AdGroupAudienceView)
))
_sym_db.RegisterMessage(AdGroupAudienceView)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
[
"[email protected]"
] | |
e5fc5f00fd14a45cd84e931f7688de9dc9f1f1d1
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/115_testing/examples/Github/_Level_2/unittest-master/python/csv_db.py
|
786e3e036143a86b8c363cf013bd10f92db6061b
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 |
Python
|
UTF-8
|
Python
| false | false | 5,718 |
py
|
# finalproject.py
# @author: Shubham Sachdeva
# @email:
# @date: 18-13-09
# reads data from input.csv
# For simplicity reads only ['GEO'], ['DGUID'], ['Food categories'], ['Commodity'] fields
# class Product - a class defining a record
# def read_csv - function that reads data from given file
import csv
import _mysql
import bisect
CONST_AUTHOR = "Shubham Sachdeva"
# Uses mysql database connection.
# Class Database simply wraps basic CRUD operations.
# @author: Shubham Sachdeva
class Database:
# Establishing a mysql connection
def __init__(self):
self.db = _mysql.connect("localhost", "root", "root", "student")
self._tablename = ""
# insert a record
def create(self, product):
query = ("INSERT INTO %s (geo, guid, category, commodity) VALUES('%s', '%s', '%s', '%s')" %
(self._tablename, product.geo, product.guid, product.category, product.commodity))
self.db.query(query)
# update a record based on id
def update(self, id, product):
query = ("UPDATE %s SET geo='%s', guid='%s', category='%s', commodity='%s' WHERE id=%d" %
(self._tablename, product.geo, product.guid, product.category, product.commodity, product.id))
self.db.query(query)
# get a record based on id
def read(self, id):
query = "SELECT * FROM %s WHERE id=%d" % (self._tablename, id)
self.db.query(query)
r = self.db.store_result()
product = Product()
for i in r.fetch_row(maxrows=1):
product.id = int(i[0])
product.geo = i[1]
product.guid = i[2]
product.category = i[3]
product.commodity = i[4]
return product
# delete a record based on id
def delete(self, id):
self.db.query("""DELETE FROM %s WHERE id=%d""" % (self._tablename, id))
# create table if it doesn't exist
def select_table(self, tablename):
self.db.query(
"CREATE TABLE IF NOT EXISTS " + tablename + " (`id` INT NOT NULL AUTO_INCREMENT , "
"`geo` VARCHAR(30) NOT NULL , "
"`guid` VARCHAR(30) NOT NULL , "
"`category` VARCHAR(100) NOT NULL , "
"`commodity` VARCHAR(100) NOT NULL , "
"PRIMARY KEY (`id`)) ENGINE = InnoDB;")
self._tablename = tablename
# custom sort function
# sort by guid
# @author: Shubham Sachdeva
def cmpFn(obj):
return obj.guid
# Class List - Custom list using standard list API library.
# Member function find and reverse_find returns index of given element.
# While find returns leftmost position, reverse_find returns rightmost position.
# This assumes that the list is sorted.
# @author: Shubham Sachdeva
class List:
def __init__(self):
self.lst = []
self.lstguid = []
def append(self, obj):
self.lst.append(obj)
def sort(self):
self.lst = sorted(self.lst, key=cmpFn)
self.lstguid = [obj.guid for obj in self.lst ]
def find(self, guid):
return bisect.bisect_left(self.lstguid, guid)
def reverse_find(self, guid):
return bisect.bisect_right(self.lstguid, guid)
# list iterator
# ListIterator simply operates on a list of primitive types.
# @author: Shubham Sachdeva
class ListIterator:
def __init__(self, lst):
self.lst = lst
self.cur = 0
def get(self):
if self.cur >=0 and self.cur < len(self.lst):
return self.lst[self.cur]
else:
return None
def next(self):
if self.cur < len(self.lst) -1:
self.cur += 1
return True
else:
return False
def prev(self):
if self.cur > 0:
self.cur -= 1
return True
else:
return False
def info(self):
return str(self.get())
# inherited from ListIterator
# Member function info has been overriden.
# @author: Shubham Sachdeva
class ObjectListIterator(ListIterator):
def info(self):
obj = self.get()
if obj == None:
return "None"
return "Current Object: " + ("%d\t%s\t%s\t%s\t%s" % (self.id, self.geo, self.guid, self.category, self.commodity))
# @author: Shubham Sachdeva
class Product:
# initialisation
def __init__(self, geo, guid, category, commodity):
self.id = 0
self.geo = geo
self.guid = guid
self.category = category
self.commodity = commodity
# for print
def __str__(self):
return ("%d\t%s\t%s\t%s\t%s" % (self.id, self.geo, self.guid, self.category, self.commodity))
# reads 4 fields from given file
# @author: Shubham Sachdeva
def read_csv(file_name):
lst = []
try:
with open(file_name, newline='', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
product = Product(row['GEO'], row['DGUID'], row['Food categories'], row['Commodity'])
print (product)
lst.append(product)
except:
print ('read_csv failed')
return lst
# @author: Shubham Sachdeva
def main():
lst = read_csv('input.csv')
n = len(lst)
db = Database()
db.select_table('products')
for item in lst:
db.create(item)
print ("Created " + str(len(lst)) + " items");
print("Programmed by " + CONST_AUTHOR)
if __name__ == '__main__':
print (CONST_AUTHOR)
main()
|
[
"[email protected]"
] | |
b5c5f8e3ab90157f0a3222bf826041a3ef6bcb5b
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/2f9vjBiynkBtF3TBi_5.py
|
2de62cfd6805420701dc4149649d92594859e806
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,072 |
py
|
"""
In this challenge, you must verify the equality of two different values given
the parameters `a` and `b`.
Both the _value_ and _type_ of the parameters need to be equal. The possible
types of the given parameters are:
* Numbers
* Strings
* Booleans (`False` or `True`)
* Special values: `None`
What have you learned so far that will permit you to do two different checks
(value **and** type) with a single statement?
Implement a function that returns `True` if the parameters are equal, and
`False` if they are not.
### Examples
check_equality(1, true) ➞ False
# A number and a boolean: the value and type are different.
check_equality(0, "0") ➞ False
# A number and a string: the type is different.
check_equality(1, 1) ➞ True
# A number and a number: the type and value are equal.
### Notes
* If you get stuck on a challenge, find help in the **Resources** tab.
* If you're _really_ stuck, unlock solutions in the **Solutions** tab.
"""
def check_equality(a, b):
return True if a is b else False
|
[
"[email protected]"
] | |
e28e74228f1af21ae745a066e94997e5017c48a7
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03060/s273862278.py
|
42e46ce98c38ed062b3f6706f319584553664cc6
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 317 |
py
|
import sys
import math
import bisect
def main():
n = int(input())
A = list(map(int, input().split()))
B = list(map(int, input().split()))
for i in range(n):
A[i] -= B[i]
ans = 0
for a in A:
if a > 0:
ans += a
print(ans)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
5ab24c6a8ec0f36df320431b89ea6470b8909a7e
|
f4b5721c6b3f5623e306d0aa9a95ec53461c1f89
|
/backend/src/gloader/xml/xslt/AttributeValueTemplate.py
|
7f4c982f79e53d298825f773d7843f57e306cd56
|
[
"MIT"
] |
permissive
|
citelab/gini5
|
b53e306eb5dabf98e9a7ded3802cf2c646f32914
|
d095076113c1e84c33f52ef46a3df1f8bc8ffa43
|
refs/heads/uml-rename
| 2022-12-10T15:58:49.578271 | 2021-12-09T23:58:01 | 2021-12-09T23:58:01 | 134,980,773 | 12 | 11 |
MIT
| 2022-12-08T05:20:58 | 2018-05-26T17:16:50 |
Python
|
UTF-8
|
Python
| false | false | 3,437 |
py
|
########################################################################
#
# File Name: AttributeValueTemplate.py
#
#
"""
Implementation of AVTs from the XSLT Spec.
WWW: http://4suite.com/4XSLT e-mail: [email protected]
Copyright (c) 1999-2000 FourThought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
import re, string
from xml.xslt import XsltException, Error
from xml.xpath import XPathParser, Conversions
g_braceSplitPattern = re.compile(r'([\{\}])')
class AttributeValueTemplate:
def __init__(self, source,reparse = 1):
self.source = source
if reparse:
self._plainParts = []
self._parsedParts = []
self._parse()
def _parse(self):
parser = XPathParser.XPathParser()
curr_plain_part = ''
curr_template_part = ''
in_plain_part = 1
split_form = re.split(g_braceSplitPattern, self.source)
skip_flag = 0
for i in range(len(split_form)):
segment = split_form[i]
if skip_flag:
skip_flag = skip_flag - 1
continue
if segment in ['{', '}']:
#Here we are accounting for a possible blank segment in between
try:
next = split_form[i + 1] + split_form[i + 2]
except IndexError:
next = None
if next == segment:
if in_plain_part:
curr_plain_part = curr_plain_part + segment
else:
curr_template_part = curr_template_part + segment
skip_flag = 2
elif segment == '{':
if in_plain_part:
self._plainParts.append(curr_plain_part)
in_plain_part = 0
curr_plain_part = ''
else:
raise XsltException(Error.AVT_SYNTAX)
else:
if not in_plain_part:
parsed = parser.parseExpression(curr_template_part)
self._parsedParts.append(parsed)
in_plain_part = 1
curr_template_part = ''
else:
raise XsltException(Error.AVT_SYNTAX)
else:
if in_plain_part:
curr_plain_part = curr_plain_part + segment
else:
curr_template_part = curr_template_part + segment
if in_plain_part:
self._plainParts.append(curr_plain_part)
else:
raise XsltException(Error.AVT_SYNTAX)
def evaluate(self, context):
result = ''
expansions = map(
lambda x, c=context: Conversions.StringValue(x.evaluate(c)),
self._parsedParts
)
for i in range(len(self._parsedParts)):
result = result + self._plainParts[i] + expansions[i]
result = result + self._plainParts[-1]
return result
def __repr__(self):
return self.source
def __getinitargs__(self):
return (self.source, 0)
def __getstate__(self):
return (self._plainParts,self._parsedParts)
def __setstate__(self, state):
# Nothing to do
self._plainParts,self._parsedParts = state
|
[
"[email protected]"
] | |
83fef1df13d09343fd01f3337ac2d6bbc7f03c8d
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2618/60895/291182.py
|
ab6450cf38238d5387e2704d4907b7d62fce72fb
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 200 |
py
|
t=int(input())
while t>0:
t=t-1
n=int(input())
s=input()
if s=='2 3 1' or s=='2 1 3':
print(1)
elif s=='4 3 1 2' or s=='2':
print(2)
else:
print(s)
|
[
"[email protected]"
] | |
86f816fa4c07689b4bbb27949c7e824974c6af10
|
c46754b9600a12df4f9d7a6320dfc19aa96b1e1d
|
/tests/models/deit/test_image_processing_deit.py
|
21dc3d9e95a79f48a9c4a6af5658a0715ce5faf6
|
[
"Apache-2.0"
] |
permissive
|
huggingface/transformers
|
ccd52a0d7c59e5f13205f32fd96f55743ebc8814
|
4fa0aff21ee083d0197a898cdf17ff476fae2ac3
|
refs/heads/main
| 2023-09-05T19:47:38.981127 | 2023-09-05T19:21:33 | 2023-09-05T19:21:33 | 155,220,641 | 102,193 | 22,284 |
Apache-2.0
| 2023-09-14T20:44:49 | 2018-10-29T13:56:00 |
Python
|
UTF-8
|
Python
| false | false | 4,508 |
py
|
# coding=utf-8
# Copyright 2021 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
if is_vision_available():
from transformers import DeiTImageProcessor
class DeiTImageProcessingTester(unittest.TestCase):
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_center_crop=True,
crop_size=None,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
size = size if size is not None else {"height": 20, "width": 20}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
def expected_output_image_shape(self, images):
return self.num_channels, self.crop_size["height"], self.crop_size["width"]
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
class DeiTImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = DeiTImageProcessor if is_vision_available() else None
test_cast_dtype = True
def setUp(self):
self.image_processor_tester = DeiTImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "do_center_crop"))
self.assertTrue(hasattr(image_processing, "center_crop"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
def test_image_processor_from_dict_with_kwargs(self):
image_processor = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"height": 20, "width": 20})
self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18})
image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84)
self.assertEqual(image_processor.size, {"height": 42, "width": 42})
self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84})
|
[
"[email protected]"
] | |
c5afaa2e84fa29e5ab2ebdf6d8bad5d14b00c86e
|
971e0efcc68b8f7cfb1040c38008426f7bcf9d2e
|
/tests/artificial/transf_Quantization/trend_ConstantTrend/cycle_12/ar_/test_artificial_1024_Quantization_ConstantTrend_12__0.py
|
8099a2bf0f032e0aa56a37bec74624f179bb330f
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
antoinecarme/pyaf
|
a105d172c2e7544f8d580d75f28b751351dd83b6
|
b12db77cb3fa9292e774b2b33db8ce732647c35e
|
refs/heads/master
| 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 |
BSD-3-Clause
| 2023-03-08T21:45:40 | 2016-10-13T09:30:30 |
Python
|
UTF-8
|
Python
| false | false | 272 |
py
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 12, transform = "Quantization", sigma = 0.0, exog_count = 0, ar_order = 0);
|
[
"[email protected]"
] | |
08329b9459b84578dea46f87b58ec8643041c8b8
|
584f7b51d7cd529448e2fc0147557e26931ab17e
|
/test_UsePyFFTW.py
|
5b4eb17729decdd1676234fbf4fc635aba9dee8e
|
[
"BSD-3-Clause"
] |
permissive
|
opticspy/lightpipes
|
8ca0d2221a1b893de5e51fec9061e90b9145f5f8
|
f4ffdedb3ab2f9b5ae5a9a8e37985d2a7f8bb2ef
|
refs/heads/master
| 2023-09-04T19:07:11.376631 | 2023-09-04T15:24:55 | 2023-09-04T15:24:55 | 80,127,706 | 191 | 55 |
BSD-3-Clause
| 2023-08-23T00:45:33 | 2017-01-26T15:39:28 |
Python
|
UTF-8
|
Python
| false | false | 460 |
py
|
#! /usr/bin/env python
"""
Script to test the new usePyFFTW option to compare pyFFTW and numpy FFT
"""
import time
from LightPipes import *
start_time = time.time()
wavelength = 500*nm
size = 25*mm
N = 1000
F=Begin(size, wavelength, N)
F=Fresnel(F, 100, usepyFFTW = True)
print(F.field[23,33])
#Fresnel: (1.0795142552372512+0.45098289321969964j)
#Forvard: (0.9865686238070652+0.16334733092228165j)
print("--- %s seconds ---" % (time.time() - start_time))
|
[
"[email protected]"
] | |
3ac69e9105cdc2bfb5dd22f1c4bf0bb8a2ca87c4
|
3dc647cd07a7361ed401e40d2b7cce8c826c8f6c
|
/Lib/test/test_json/test_dump.py
|
13b40020781bae33ea47c8ff5446030e7f348677
|
[
"Python-2.0",
"CC-BY-4.0",
"MIT"
] |
permissive
|
RustPython/RustPython
|
5ddce4a9848b9de8c041ffd2634f83c0105d3f39
|
b864e5da1f18897fc884180b7093df5aa170024f
|
refs/heads/main
| 2023-09-04T12:38:29.458699 | 2023-09-03T12:33:42 | 2023-09-03T12:33:42 | 135,201,145 | 15,815 | 1,302 |
MIT
| 2023-09-14T08:11:45 | 2018-05-28T19:27:01 |
Rust
|
UTF-8
|
Python
| false | false | 2,409 |
py
|
from io import StringIO
from test.test_json import PyTest, CTest
from test.support import bigmemtest, _1G
class TestDump:
def test_dump(self):
sio = StringIO()
self.json.dump({}, sio)
self.assertEqual(sio.getvalue(), '{}')
def test_dumps(self):
self.assertEqual(self.dumps({}), '{}')
def test_dump_skipkeys(self):
v = {b'invalid_key': False, 'valid_key': True}
with self.assertRaises(TypeError):
self.json.dumps(v)
s = self.json.dumps(v, skipkeys=True)
o = self.json.loads(s)
self.assertIn('valid_key', o)
self.assertNotIn(b'invalid_key', o)
def test_encode_truefalse(self):
self.assertEqual(self.dumps(
{True: False, False: True}, sort_keys=True),
'{"false": true, "true": false}')
self.assertEqual(self.dumps(
{2: 3.0, 4.0: 5, False: 1, 6: True}, sort_keys=True),
'{"false": 1, "2": 3.0, "4.0": 5, "6": true}')
# Issue 16228: Crash on encoding resized list
def test_encode_mutated(self):
a = [object()] * 10
def crasher(obj):
del a[-1]
self.assertEqual(self.dumps(a, default=crasher),
'[null, null, null, null, null]')
# Issue 24094
def test_encode_evil_dict(self):
class D(dict):
def keys(self):
return L
class X:
def __hash__(self):
del L[0]
return 1337
def __lt__(self, o):
return 0
L = [X() for i in range(1122)]
d = D()
d[1337] = "true.dat"
self.assertEqual(self.dumps(d, sort_keys=True), '{"1337": "true.dat"}')
class TestPyDump(TestDump, PyTest): pass
class TestCDump(TestDump, CTest):
# The size requirement here is hopefully over-estimated (actual
# memory consumption depending on implementation details, and also
# system memory management, since this may allocate a lot of
# small objects).
@bigmemtest(size=_1G, memuse=1)
def test_large_list(self, size):
N = int(30 * 1024 * 1024 * (size / _1G))
l = [1] * N
encoded = self.dumps(l)
self.assertEqual(len(encoded), N * 3)
self.assertEqual(encoded[:1], "[")
self.assertEqual(encoded[-2:], "1]")
self.assertEqual(encoded[1:-2], "1, " * (N - 1))
|
[
"[email protected]"
] | |
d543afbd88b02247daaffc3464471ccbfa5b366a
|
03969015ab882f4751dc0e91beeda1212babca48
|
/robot_code/Nimbus_ws/build/robotiq_85_gripper_actions/catkin_generated/pkg.develspace.context.pc.py
|
5deddba43de547be76a27e50e515649c31ddd7ff
|
[] |
no_license
|
lnairGT/Thesis_code
|
f3ad57f4344691227dcd128a741eb9c0e937738e
|
6f5dbfc2510272f294a0e9bb4273beceeacbff2a
|
refs/heads/master
| 2023-03-17T21:43:56.320553 | 2020-09-26T16:05:31 | 2020-09-26T16:05:31 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 388 |
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "robotiq_85_gripper_actions"
PROJECT_SPACE_DIR = "/home/lnair3/Nimbus_ws/devel"
PROJECT_VERSION = "0.0.1"
|
[
"[email protected]"
] | |
8da6c731d5e0553722f2e56ef3a7a028a86cce95
|
4ca8df3a127e9b15cbfecea6505928741f685a63
|
/gongfei/month03/Django/onlybuy/OnlyBuy/goods/migrations/0002_goods_saller.py
|
d6b69407f107d03ed0eace38b76d59329ac825ea
|
[] |
no_license
|
gongfei6644/gongfei
|
2beb082c56197bc23ca20a6927ff6c10d8beaa83
|
bfdd5e6a3a8d76ad1e43cf54df186b944cad29e4
|
refs/heads/master
| 2022-11-30T20:49:22.213040 | 2020-08-16T12:52:28 | 2020-08-16T12:52:28 | 286,283,597 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 648 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2019-06-19 14:35
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('goods', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='goods',
name='saller',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
[
"1"
] |
1
|
6267d7aa1c8e47d9c979f168d10dee757731de26
|
6a08edd0e30d12eb89e8de486e2d2d0dddff74d7
|
/run_experiments/general_utils/lightgbm_optimizer.py
|
07f654e8ef7d6c22e4eed79240bb1347e38b469c
|
[] |
no_license
|
jrzaurin/tabulardl-benchmark
|
63b0fa2c046f9900a51b0223a884c475ac66b17f
|
ceb7b7f8bc90666b2d010fe570a77eb3ff2dde78
|
refs/heads/master
| 2023-05-29T11:29:30.371284 | 2021-06-12T16:32:20 | 2021-06-12T16:32:20 | 356,328,779 | 46 | 7 | null | 2021-06-10T16:44:51 | 2021-04-09T16:08:21 |
Python
|
UTF-8
|
Python
| false | false | 6,539 |
py
|
import warnings
from typing import Any, Dict, Optional
import lightgbm as lgb
import pandas as pd
from hyperopt import Trials, fmin, hp, space_eval, tpe
from lightgbm import Dataset as lgbDataset
from optuna.integration.lightgbm import LightGBMTunerCV
from sklearn.metrics import log_loss, mean_squared_error
warnings.filterwarnings("ignore")
class LGBOptimizerHyperopt(object):
def __init__(
self,
objective: str = "binary",
is_unbalance: bool = False,
verbose: bool = False,
num_class: Optional[int] = None,
):
self.objective = objective
if objective == "multiclass" and not num_class:
raise ValueError("num_class must be provided for multiclass problems")
self.num_class = num_class
self.is_unbalance = is_unbalance
self.verbose = verbose
self.early_stop_dict: Dict = {}
def optimize(
self,
dtrain: lgbDataset,
deval: lgbDataset,
maxevals: int = 200,
):
if self.objective == "regression":
self.best = lgb.LGBMRegressor().get_params()
else:
self.best = lgb.LGBMClassifier().get_params()
del (self.best["silent"], self.best["importance_type"])
param_space = self.hyperparameter_space()
objective = self.get_objective(dtrain, deval)
objective.i = 0
trials = Trials()
best = fmin(
fn=objective,
space=param_space,
algo=tpe.suggest,
max_evals=maxevals,
trials=trials,
verbose=self.verbose,
)
self.trials = trials
best = space_eval(param_space, trials.argmin)
best["n_estimators"] = int(best["n_estimators"])
best["num_leaves"] = int(best["num_leaves"])
best["min_child_samples"] = int(best["min_child_samples"])
best["verbose"] = -1
best["objective"] = self.objective
self.best.update(best)
def get_objective(self, dtrain: lgbDataset, deval: lgbDataset):
def objective(params: Dict[str, Any]) -> float:
# hyperopt casts as float
params["n_estimators"] = int(params["n_estimators"])
params["num_leaves"] = int(params["num_leaves"])
params["min_child_samples"] = int(params["min_child_samples"])
params["verbose"] = -1
params["seed"] = 1
params["feature_pre_filter"] = False
params["objective"] = self.objective
if self.objective != "regression":
params["is_unbalance"] = self.is_unbalance
if self.objective == "multiclass":
params["num_class"] = self.num_class
model = lgb.train(
params,
dtrain,
valid_sets=[deval],
early_stopping_rounds=50,
verbose_eval=False,
)
preds = model.predict(deval.data)
if self.objective != "regression":
score = log_loss(deval.label, preds)
elif self.objective == "regression":
score = mean_squared_error(deval.label, preds)
objective.i += 1 # type: ignore
return score
return objective
def hyperparameter_space(
self, param_space: Dict[str, Any] = None
) -> Dict[str, Any]:
space = {
"learning_rate": hp.uniform("learning_rate", 0.01, 0.3),
"n_estimators": hp.quniform("n_estimators", 100, 1000, 50),
"num_leaves": hp.quniform("num_leaves", 20, 200, 10),
"min_child_samples": hp.quniform("min_child_samples", 20, 100, 20),
"colsample_bytree": hp.uniform("colsample_bytree", 0.5, 1.0),
"reg_alpha": hp.choice(
"reg_alpha", [0.01, 0.05, 0.1, 0.2, 0.4, 1.0, 2.0, 4.0, 10.0]
),
"reg_lambda": hp.choice(
"reg_lambda", [0.01, 0.05, 0.1, 0.2, 0.4, 1.0, 2.0, 4.0, 10.0]
),
}
if param_space:
return param_space
else:
return space
class LGBOptimizerOptuna(object):
def __init__(
self,
objective: str = "binary",
is_unbalance: bool = False,
verbose: bool = False,
num_class: Optional[int] = None,
):
self.objective = objective
if objective == "multiclass" and not num_class:
raise ValueError("num_class must be provided for multiclass problems")
self.num_class = num_class
self.is_unbalance = is_unbalance
self.verbose = verbose
self.best: Dict[str, Any] = {} # Best hyper-parameters
def optimize(self, dtrain: lgbDataset, deval: lgbDataset):
# Define the base parameters
if self.objective == "binary":
params: Dict = {"objective": self.objective}
elif self.objective == "multiclass":
params: Dict = {"objective": self.objective, "metric": "multi_logloss"}
elif self.objective == "regression":
params: Dict = {"objective": self.objective, "metric": "rmse"}
if self.verbose:
params["verbosity"] = 1
else:
params["verbosity"] = -1
if self.objective != "regression":
params["is_unbalance"] = self.is_unbalance
if self.objective == "multiclass":
params["num_class"] = self.num_class
# Reformat the data for LightGBM cross validation method
train_set = lgb.Dataset(
data=pd.concat([dtrain.data, deval.data]).reset_index(drop=True),
label=pd.concat([dtrain.label, deval.label]).reset_index(drop=True),
categorical_feature=dtrain.categorical_feature,
free_raw_data=False,
)
train_index = range(len(dtrain.data))
valid_index = range(len(dtrain.data), len(train_set.data))
# Run the hyper-parameter tuning
self.tuner = LightGBMTunerCV(
params=params,
train_set=train_set,
folds=[(train_index, valid_index)],
verbose_eval=False,
num_boost_round=1000,
early_stopping_rounds=50,
)
self.tuner.run()
self.best = self.tuner.best_params
# since n_estimators is not among the params that Optuna optimizes we
# need to add it manually. We add a high value since it will be used
# with early_stopping_rounds
self.best["n_estimators"] = 1000 # type: ignore
|
[
"[email protected]"
] | |
8368a60298be2826652c9b2392af1de2414977d0
|
36df29dbd2c79f41ee5e70a6b836303d0f0fe186
|
/day1-15/day01/temperature.py
|
682675e9cff305a0db4848e6ddfe9d9035042a27
|
[] |
no_license
|
roohom/Program_100Days
|
abbe20d5df4444adadc937f23f1e402fce3a8273
|
3fd87da8b8edaaeb9349f68db0b9b3cd0db9f159
|
refs/heads/master
| 2021-01-13T18:06:52.899517 | 2020-08-30T15:37:07 | 2020-08-30T15:37:07 | 242,451,115 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 313 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/2/24 14:40
# @Author : Roohom
# @Site :
# @File : temperature.py
# @Software: PyCharm
"""
将华氏温度转化为摄氏温度
"""
F = float(input("请输入华氏温度:"))
C = (F - 32) / 1.8
print('%.2f华氏度 = %.1f摄氏度' % (F, C))
|
[
"[email protected]"
] | |
f2bb534fa1b683ba85fc3a83e9e250269fa4c85b
|
7b1de4a2607e3125b719c499a05bf6e2d3ec532d
|
/exceptions/chaining_demo.py
|
714dc5ff28adfb14c75345702632fc8819a3e118
|
[] |
no_license
|
ganqzz/sandbox_py
|
61345ac7bddb09081e02decb78507daa3030c1e8
|
cc9e1ecca2ca99f350a3e2c3f51bbdb5eabc60e1
|
refs/heads/master
| 2022-12-01T21:54:38.461718 | 2021-09-04T03:47:14 | 2021-09-04T03:47:14 | 125,375,767 | 0 | 1 | null | 2023-04-16T00:55:51 | 2018-03-15T14:00:47 |
Python
|
UTF-8
|
Python
| false | false | 830 |
py
|
def func():
raise ValueError('from func()')
# set __cause__ = None
def demo1():
try:
func()
except ValueError:
raise RuntimeError('from demo1()')
# set __cause__ = e
def demo2():
try:
func()
except ValueError as e:
raise RuntimeError('from demo2()') from e
# set __cause__ = None, and suppress chaining
def demo3():
try:
func()
except ValueError:
raise RuntimeError('from demo3()') from None
def run_demo(f):
print('---', f.__name__)
try:
f()
except Exception as e:
print(e)
print('__context__:', repr(e.__context__))
print('__cause__:', repr(e.__cause__))
print()
if __name__ == "__main__":
# demo1()
# demo2()
# demo3()
run_demo(demo1)
run_demo(demo2)
run_demo(demo3)
|
[
"[email protected]"
] | |
e9bed052d8dc90762bbb0cc2031106059fedb6e3
|
dcd8a0a9ce04818487ba7d46a1ba07d18fb08b9f
|
/torch/quantization/_quantize_script.py
|
5d77785cc7742a543324d7df64ba47cd81852158
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
thomaswang525/pytorch
|
284efc782fdc333e24892ac10b4d8963f812bd0b
|
9e3605de98abb969124faff96e6e90e4f4014eb6
|
refs/heads/master
| 2021-05-18T08:30:09.190932 | 2020-03-30T02:46:19 | 2020-03-30T02:48:29 | 251,193,560 | 1 | 0 |
NOASSERTION
| 2020-03-30T03:38:57 | 2020-03-30T03:38:57 | null |
UTF-8
|
Python
| false | false | 4,668 |
py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from .qconfig import QConfig
from torch.jit._recursive import wrap_cpp_module
class ConvPackedParams(torch.nn.Module):
def __init__(self):
super(ConvPackedParams, self).__init__()
wq = torch._empty_affine_quantized([1, 1, 1, 1], scale=1.0, zero_point=0, dtype=torch.qint8)
self.stride = [1, 1]
self.padding = [0, 0]
self.dilation = [1, 1]
self.groups = 1
self.set_weight_bias(wq, None)
@torch.jit.export
def set_conv_params(self, stride, padding, dilation, groups):
# type: (List[int], List[int], List[int], int) -> None
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
@torch.jit.export
def set_weight_bias(self, weight, bias):
# type: (torch.Tensor, Optional[torch.Tensor]) -> None
self._packed_params = torch.ops.quantized.conv2d_prepack(weight, bias, self.stride,
self.padding, self.dilation, self.groups)
@torch.jit.export
def _weight_bias(self):
return torch.ops.quantized.conv2d_unpack(self._packed_params)
def forward(self, x):
return x
@torch.jit.export
def __getstate__(self):
qweight, bias = self._weight_bias()
return (qweight,
bias,
self.stride,
self.padding,
self.dilation,
self.groups,
self.training)
@torch.jit.export
def __setstate__(self, state):
self.stride = state[2]
self.padding = state[3]
self.dilation = state[4]
self.groups = state[5]
self.set_weight_bias(state[0],
state[1])
self.training = state[6]
linear_packed_params = None
conv_packed_params = None
if 'fbgemm' in torch.backends.quantized.supported_engines:
linear_packed_params = torch.jit.script(torch.nn.quantized.modules.linear.LinearPackedParams())._c
conv_packed_params = torch.jit.script(ConvPackedParams())._c
def _check_is_script_module(model):
if not isinstance(model, torch.jit.ScriptModule):
raise ValueError('input must be a script module, got: ' + str(type(model)))
def script_qconfig(qconfig):
return QConfig(
activation=torch.jit.script(qconfig.activation())._c,
weight=torch.jit.script(qconfig.weight())._c)
def prepare_script(model, qconfig_dict, inplace=False):
_check_is_script_module(model)
scripted_qconfig_dict = {k: script_qconfig(v) if v else None for k, v in qconfig_dict.items()}
if not inplace:
model = model.copy()
model = wrap_cpp_module(torch._C._jit_pass_insert_observers(model._c,
'forward',
scripted_qconfig_dict,
False))
return model
def prepare_dynamic_script(model, qconfig_dict):
_check_is_script_module(model)
scripted_qconfig_dict = {k: script_qconfig(v) for k, v in qconfig_dict.items()}
model = wrap_cpp_module(torch._C._jit_pass_insert_observers(model._c,
'forward',
scripted_qconfig_dict,
False,
True))
return model
def convert_script(model, inplace=False, debug=False):
_check_is_script_module(model)
if not inplace:
model = model.copy()
model.eval()
model = wrap_cpp_module(torch._C._jit_pass_insert_quant_dequant(model._c, 'forward', False))
if not debug:
model = wrap_cpp_module(torch._C._jit_pass_quant_finalize(model._c))
return model
def quantize_script(model, qconfig_dict, run_fn, run_args, inplace=False, debug=False):
_check_is_script_module(model)
if not model._c._has_method('forward'):
raise ValueError('input script module does not have forward method')
assert not inplace, "We don't support inplace right now"
if not inplace:
model = model.copy()
torch._C._jit_pass_dedup_module_uses(model._c)
model = wrap_cpp_module(torch._C._jit_pass_fold_convbn(model._c))
model = prepare_script(model, qconfig_dict, True)
run_fn(model._c._get_method('forward'), *run_args)
model = convert_script(model, True, debug)
return model
|
[
"[email protected]"
] | |
3a16940ab2a40e617ed92c2249c39f81f6e348a5
|
b553e12ccd8d7d4653e8987688494e322602b146
|
/scripts/process/hotfixes/hotfixes.py
|
c5ad7d2ee221c15e40f05ba30bc00eb6616c2370
|
[
"MIT"
] |
permissive
|
fossabot/forensicworkflows
|
2a7339bc9e97f18e8a4f432e7a534f5318e1e8dc
|
fca4bcf5363163e6fdd78763fa4aa208c1f72d1f
|
refs/heads/master
| 2022-04-14T21:36:26.770660 | 2020-04-13T15:24:58 | 2020-04-13T15:24:58 | 255,069,891 | 0 | 0 | null | 2020-04-12T11:41:41 | 2020-04-12T11:41:40 | null |
UTF-8
|
Python
| false | false | 5,301 |
py
|
#!/usr/bin/env python
# Copyright (c) 2019 Siemens AG
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Author(s): Demian Kellermann
"""
This plugin parses different registry entries for installed Hotfixes (patches) to the Windows system
as well as to other software components
"""
import logging
import re
import struct
from collections import defaultdict
from datetime import datetime
import forensicstore
from ...util import combined_conditions
LOGGER = logging.getLogger(__name__)
HOTFIX_PATHS_INSTALLER = [
'hkey_local_machine\\software\\microsoft\\windows\\currentversion\\component based servicing\\packages\\',
]
HOTFIX_PATHS_ADDITIONAL = [
'hkey_local_machine\\software\\wow6432node\\microsoft\\updates\\',
'hkey_local_machine\\software\\microsoft\\updates\\',
]
KB_REGEX = re.compile(r'KB\d+')
def _analyze_installer(obj):
entries = []
installer_entries = defaultdict(set)
hotfix_infos = {v["name"].lower(): v["data"] for v in obj["values"]}
if hotfix_infos.get('InstallClient') != 'WindowsUpdateAgent':
return []
hotfix = KB_REGEX.search(obj["key"].split('\\')[-1])
if not hotfix:
# some entries do not have the KB number in the title, but something like "RollupFix", check
# the InstallLocation value in this case
location = hotfix_infos.get('InstallLocation')
if location:
hotfix = KB_REGEX.search(location)
if not hotfix:
LOGGER.info("Non KB entry for WindowsUpdateAgent found: %s",
obj["key"])
return []
install_high = hotfix_infos.get('InstallTimeHigh')
install_low = hotfix_infos.get('InstallTimeLow')
if install_high and install_low:
timestamp = filetime_to_timestamp(
filetime_join(install_high, install_low))
else:
timestamp = ''
installer_entries[hotfix.group(0)].add(timestamp)
for hotfix in installer_entries:
entries.append({
'Hotfix':
hotfix,
'Installed':
sorted(installer_entries[hotfix])[0]
if installer_entries[hotfix] else '-',
'Source':
'Component Based Servicing',
"type":
"hotfix"
})
return entries
def _analyze_additional(key):
hotfix = key["key"].split('\\')[-1]
product = key["key"].split('\\')[-2]
return [{
'Hotfix': hotfix,
'Installed': key["modified"],
'Source': 'Microsoft Updates',
'Component': product,
"type": "hotfix"
}]
def transform(obj):
if any(map(lambda path: obj["key"].lower().startswith(path), HOTFIX_PATHS_INSTALLER)):
return _analyze_installer(obj)
if any(map(lambda path: obj["key"].lower().startswith(path), HOTFIX_PATHS_ADDITIONAL)):
return _analyze_additional(obj)
return []
def filetime_join(upper, lower):
"""
:param upper: upper part of the number
:param lower: lower part of the number
"""
return struct.unpack('Q', struct.pack('ii', lower, upper))[0]
def filetime_to_timestamp(filetime_64):
"""
The FILETIME timestamp is a 64-bit integer that contains the number
of 100th nano seconds since 1601-01-01 00:00:00.
The number is usually saved in the registry using two DWORD["values"]
:return: string of UTC time
"""
# pylint: disable=invalid-name
HUNDREDS_OF_NANOSECONDS_IN_A_SECOND = 10000000
UNIXEPOCH_AS_FILETIME = 116444736000000000
datetime_stamp = datetime.utcfromtimestamp(
(filetime_64 - UNIXEPOCH_AS_FILETIME) /
HUNDREDS_OF_NANOSECONDS_IN_A_SECOND)
return datetime_stamp.isoformat()
def main():
store = forensicstore.connect(".")
hklmsw = "HKEY_LOCAL_MACHINE\\SOFTWARE\\"
conditions = [{
'key':
hklmsw +
"Microsoft\\Windows\\CurrentVersion\\Component Based Servicing\\Packages\\%"
}, {
'key': hklmsw + "WOW6432Node\\Microsoft\\Updates\\%\\%"
}, {
'key': hklmsw + "Microsoft\\Updates\\%\\%"
}]
for item in store.select("windows-registry-key", combined_conditions(conditions)):
results = transform(item)
for result in results:
store.insert(result)
store.close()
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
50cbc0a6b7378fde63f8deb76fd0bda5440b65e5
|
583d03a6337df9f1e28f4ef6208491cf5fb18136
|
/dev4qx/madeira-stub/handlers/stub/niukou.py
|
6b0e4e26edb01f71cb86b882a9492992f2eca35c
|
[] |
no_license
|
lescpsn/lescpsn
|
ece4362a328f009931c9e4980f150d93c4916b32
|
ef83523ea1618b7e543553edd480389741e54bc4
|
refs/heads/master
| 2020-04-03T14:02:06.590299 | 2018-11-01T03:00:17 | 2018-11-01T03:00:17 | 155,309,223 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,845 |
py
|
import json
import logging
import tornado
import tornado.web
from tornado.httpclient import AsyncHTTPClient
from tornado.ioloop import IOLoop
request_log = logging.getLogger("madeira.request")
class NiukouOrderHandler(tornado.web.RequestHandler):
@tornado.gen.coroutine
def post(self):
try:
order_id = self.get_argument("OutTradeNo")
master_test = self.application.sentinel.master_for('madeira', db=3)
r2 = r1 = master_test.hget('result:' + order_id, 'result') # 根据redis判断订单状态 r2=r1='100,00;成功'
if ',' in r1:
r1, r2 = r1.split(',') # r1="100" r2="00;成功"
data = {"HEADER":{"SEQNO":"Q2015101209294910063131","SECERTKEY":"713B242546AA7239A572AE1E2103A777","APPID":"QuXun","TIMESTAMP":"20151012092949276","VERSION":"V1.0"},"MSGBODY":{"CONTENT":{"ORDERID":"144461347935975","EXTORDER":order_id},"RESP":{"RCODE":"00","RMSG":"OK"}}}
self.finish(json.dumps(data))
if r1 == '0':
IOLoop.current().call_later(10, niukou_callback, order_id, r2)
except Exception:
request_log.exception('FAIL')
def niukou_callback(order_id, result):
if ';' in result:
result = result.split(';')[0]
body = {"HEADER":{"VERSION":"V1.1","TIMESTAMP":'',"SEQNO":'',"APPID":"QuXun","SECERTKEY":"E4CF8702097BF3D3EFF03DF3ACFDEE5E"},"MSGBODY":{"CONTENT":{"ORDERID":"144461587745723","EXTORDER":order_id,"STATUS":"\u6210\u529f","CODE":"0"}}}
body = json.dumps(body)
url = 'http://localhost:8899/callback/niukou'
http_client = AsyncHTTPClient()
try:
request_log.info('YFLOW CALLBACK\n%s', body)
http_client.fetch(url, method='POST', body=body)
except Exception:
request_log.exception('FAIL')
finally:
http_client.close()
|
[
"[email protected]"
] | |
b8ad77ebbc0f8d213a39e817e72baccde8bfd65f
|
112f02c4be5176907766f7546de7d5d57a2df2af
|
/tutorial/tutorial_56.py
|
aea22de47ee4c3870ffbc5ddf5b27264f1cb2d8c
|
[] |
no_license
|
ankitsingh03/code-python
|
010efdcf157d5411f81b6fbfca74f8b36e3ea263
|
7fd33b9e7f269e3042bdb13a47a26a3da87a68bc
|
refs/heads/master
| 2023-03-25T10:48:23.282822 | 2021-03-18T06:43:27 | 2021-03-18T06:43:27 | 289,693,369 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 178 |
py
|
name = input("enter your name : ")
i=0
temp = ""
while i<len(name):
if name[i] not in temp:
temp+=name[i]
print(f"{name[i]} : {name.count(name[i])}")
i+=1
|
[
"[email protected]"
] | |
a4a415836a73c4b26dcef8193f52936e7df8c02a
|
f68710d7a8228805ab19430d72cefd6bbf1c4b91
|
/src/routes/challenge.py
|
e1d954d670e6d9f4edf787dce1f4adc16e6579be
|
[] |
no_license
|
quokkateam/quokka-api
|
1aae2dd9694b09ff426fc8defcc8dd1d6536f016
|
081f22fe3bf81aee18cca05283384c4899923b88
|
refs/heads/master
| 2023-01-21T08:21:52.559310 | 2020-12-03T01:12:46 | 2020-12-03T01:12:46 | 100,311,727 | 0 | 0 | null | 2017-09-18T05:19:33 | 2017-08-14T21:42:08 |
Python
|
UTF-8
|
Python
| false | false | 7,013 |
py
|
from flask_restplus import Resource, fields
from src.routes import namespace, api
from src.helpers.user_helper import current_user
from src.helpers.prize_helper import format_prizes
from src.helpers.sponsor_helper import format_sponsors
from src.helpers.challenge_helper import format_challenges, current_week_num
from operator import attrgetter
from src.challenges import universal_challenge_info
from datetime import datetime, timedelta
from src import dbi, logger
from src.models import Challenge
from src.helpers.error_codes import CHALLENGE_NOT_EXIST, INVALID_CHALLENGE_ACCESS
update_challenge_section_model = api.model('Challenge', {
'id': fields.Integer(required=True),
'text': fields.String(required=True),
'points': fields.Integer(required=True)
})
# TODO: Validate JSON field types for 'suggestions' and 'challenges' below
# update_suggestions_model = api.model('Challenge', {
# 'id': fields.Integer(required=True),
# 'suggestions': fields.String(required=True)
# })
# update_challenges_model = api.model('Challenge', {
# 'challenges': fields.String(required=True),
# 'startDate': fields.String(required=True)
# })
@namespace.route('/challenge/<int:week_num>')
class GetChallenge(Resource):
"""Fetch data for a school's challenge page by week number"""
@namespace.doc('get_challenge')
def get(self, week_num):
user = current_user()
if not user:
return '', 403
school = user.school
week_index = week_num - 1
# Get challenges for school, sorted by date
challenges = sorted(school.active_challenges(), key=attrgetter('start_date'))
if week_num < 1 or week_num > len(challenges):
return {'error': 'Challenge does not exist', 'code': CHALLENGE_NOT_EXIST}, 400
curr_week_num = current_week_num(challenges)
# if this is a future week and the user isn't an admin, prevent access
if week_num > curr_week_num and not user.is_admin:
return {'error': 'Week not yet available to access', 'code': INVALID_CHALLENGE_ACCESS}, 400
# Find the challenge requested by week index
challenge = challenges[week_index]
if week_index == 0:
prev_habit = None
next_habit = {
'weekNum': 2,
'name': challenges[1].name
}
elif week_index == len(challenges) - 1:
prev_habit = {
'weekNum': week_index,
'name': challenges[week_index - 1].name
}
next_habit = None
else:
prev_habit = {
'weekNum': week_index,
'name': challenges[week_index - 1].name
}
next_habit = {
'weekNum': week_num + 1,
'name': challenges[week_num].name
}
# if this is the current week and the user isn't an admin, he/she shouldn't have a link to the next week yet
if week_num == curr_week_num and not user.is_admin:
next_habit = None
universal_challenge = universal_challenge_info.get(challenge.slug)
resp = {
'id': challenge.id,
'habit': {
'name': challenge.name,
'slug': challenge.slug,
'icon': universal_challenge['icon'],
'dates': {
'start': datetime.strftime(challenge.start_date, '%m/%d/%Y'),
'end': datetime.strftime(challenge.end_date, '%m/%d/%Y')
}
},
'overview': universal_challenge['overview'],
'challenge': {
'text': challenge.text,
'points': challenge.points
},
'prizes': format_prizes(challenge.active_prizes()),
'sponsors': format_sponsors(school.sponsors),
'suggestions': challenge.suggestions,
'adjHabits': {
'prev': prev_habit,
'next': next_habit
},
'links': universal_challenge['links'],
'extraInfo': universal_challenge['extra_info']
}
return resp
@namespace.route('/challenge/challenge')
class UpdateChallengeSection(Resource):
"""Save the text and points for a weekly challenge"""
@namespace.doc('update_challenge_section')
@namespace.expect(update_challenge_section_model, validate=True)
def put(self):
user = current_user()
if not user or not user.is_admin:
return '', 403
challenge = dbi.find_one(Challenge, {'id': api.payload['id']})
if not challenge:
logger.error('No challenge found for id: {}'.format(api.payload['id']))
return 'Challenge required to update text and points', 500
dbi.update(challenge, {
'text': api.payload['text'],
'points': api.payload['points'] or 0
})
return {'text': challenge.text, 'points': challenge.points}
@namespace.route('/challenge/suggestions')
class UpdateSuggestions(Resource):
"""Save the suggestions for a weekly challenge"""
@namespace.doc('update_suggestions')
# @namespace.expect(update_suggestions_model, validate=True)
def put(self):
user = current_user()
if not user or not user.is_admin:
return '', 403
challenge = dbi.find_one(Challenge, {'id': api.payload['id']})
if not challenge:
logger.error('No challenge found for id: {}'.format(api.payload['id']))
return 'Challenge required to update text and points', 500
dbi.update(challenge, {'suggestions': api.payload['suggestions']})
return {'suggestions': challenge.suggestions}
@namespace.route('/challenges')
class RestfulChallenges(Resource):
"""Fetch all challenges for a school"""
@namespace.doc('get_challenges')
def get(self):
user = current_user()
if not user:
return '', 403
# Get challenges for school, sorted by date
challenges = sorted(user.school.active_challenges(), key=attrgetter('start_date'))
curr_week_num = current_week_num(challenges)
challenges_data = format_challenges(challenges, user, curr_week_num=curr_week_num)
resp = {
'weekNum': curr_week_num,
'challenges': challenges_data
}
return resp
@namespace.doc('update_challenges')
# @namespace.expect(update_challenges_model, validate=True)
def put(self):
user = current_user()
if not user or not user.is_admin:
return '', 403
try:
start_date = datetime.strptime(api.payload['startDate'], '%m/%d/%y')
except:
return 'Invalid start date', 500
challenge_slugs = [c['slug'] for c in api.payload['challenges']]
school = user.school
challenges = dbi.find_all(Challenge, {
'school': user.school,
'slug': challenge_slugs
})
i = 0
for slug in challenge_slugs:
challenge = [c for c in challenges if c.slug == slug][0]
if i > 0:
start_date = start_date + timedelta(days=7)
end_date = start_date + timedelta(days=6)
dbi.update(challenge, {'start_date': start_date, 'end_date': end_date})
i += 1
challenges = sorted(school.active_challenges(), key=attrgetter('start_date'))
curr_week_num = current_week_num(challenges)
challenges_data = format_challenges(challenges, user, curr_week_num=curr_week_num)
resp = {
'weekNum': curr_week_num,
'challenges': challenges_data
}
return resp
|
[
"[email protected]"
] | |
f3287cdf45f3d65183544c35aca6db06772c239b
|
bd55c7d73a95caed5f47b0031264ec05fd6ff60a
|
/apps/nchat/migrations/0012_auto_20191113_1447.py
|
b7df57dbc71a1d5e13e95d92c30ea5bd1f8098ea
|
[] |
no_license
|
phonehtetpaing/ebdjango
|
3c8610e2d96318aff3b1db89480b2f298ad91b57
|
1b77d7662ec2bce9a6377690082a656c8e46608c
|
refs/heads/main
| 2023-06-26T13:14:55.319687 | 2021-07-21T06:04:58 | 2021-07-21T06:04:58 | 381,564,118 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 657 |
py
|
# Generated by Django 2.0.5 on 2019-11-13 05:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nchat', '0011_enduser'),
]
operations = [
migrations.AddField(
model_name='enduser',
name='app_id',
field=models.CharField(default=1, max_length=256, verbose_name='app_id'),
preserve_default=False,
),
migrations.AddField(
model_name='enduser',
name='owner_id',
field=models.IntegerField(default=1, verbose_name='owner_id'),
preserve_default=False,
),
]
|
[
"[email protected]"
] | |
4669336116ce7e560e82aa2f2fc0cf729f1a23d2
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/workdocs_write_f/comment_delete.py
|
79616abb93b0670f4aec69800235ff70fde5d896
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 623 |
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html
if __name__ == '__main__':
"""
create-comment : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/workdocs/create-comment.html
describe-comments : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/workdocs/describe-comments.html
"""
write_parameter("workdocs", "delete-comment")
|
[
"[email protected]"
] | |
10b1131f1db5cefed204613e153ecc03d1a09ee3
|
d47f5f59fc322aa2a82ea1c3a15f39b200dd95b2
|
/bioinformatics_1/week_1/computing_frequencies.py
|
e8f5fabf0b6ca4a603575bdccc2ae3e7e537d4b5
|
[] |
no_license
|
nayanika2304/BioInformatics
|
baefb229e02397e06c634df44b82e72e9a235c77
|
977219bf4f3e4583b91df6308828d15bb1ad148d
|
refs/heads/master
| 2023-01-01T05:24:58.401988 | 2020-10-20T12:52:30 | 2020-10-20T12:52:30 | 295,566,560 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,819 |
py
|
def pattern_to_number(pattern):
if len(pattern) == 0:
return 0
symbol_to_number = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
n = len(pattern)
prefix = pattern[:n - 1]
symbol = pattern[n - 1]
return 4 * pattern_to_number(prefix) + symbol_to_number[symbol]
def computing_frequencies(text, k):
frequency_array = []
n = len(text)
for i in range(4 ** k):
frequency_array.append(0)
for i in range(n - k + 1):
pattern = text[i:i + k]
j = pattern_to_number(pattern)
frequency_array[j] = frequency_array[j] + 1
result = ""
for item in frequency_array:
result = result + " " + str(item)
return result
pattern="CGGCGTTGGAGTGGAAAA"
print(pattern_to_number(pattern))
#print(computing_frequencies(pattern,7))
#PatternToNumber(AGT) = 4 · PatternToNumber(AG) + SymbolToNumber(T) = 8 + 3 = 11
# where SymbolToNumber(symbol) is the function transforming symbols A, C, G, and T into the respective integers 0, 1, 2, and 3.
# patternToNumber = ATGCAA
# A=0 C=1 G=2 T=3
# 032100
# (4^5 *0=0)+(4^4 *3=768)+(4^3 *2=128)+(4^2 *1=16)+ (4^1 *0=0)+ (4^0 *0=0)=912
#numberToPattern
# To go backward from a base-anything number, you divide the final number (5437 in this case) by the base, 4, k = 7 times, keeping track of the remainder:
#
#
# 5437 / 4 = 1359 R 1
# 1359 / 4 = 339 R 3
# 339 / 4 = 84 R 3
# 84 / 4 = 21 R 0
# 21/4 = 5 R 1
# 5/4 = 1 R 1
# 1/4 = 0 R 1
# Take the remainders from the bottom up and you get:
#
# 1110331, corresponding lexicographically to CCCAGGC
#
# Similarly we can look at going backward from 912 (from previous question) to ATGCAA (k = 6) in the same way:
#
# 912/4 = 228 R 0
# 228/4 = 57 R 0
# 57/4 = 14 R 1
# 14/4 = 3 R 2
# 3/4 = 0 R 3
# 0/4 = 0 R 0
# Bottom up we get 032100 corresponding to ATGCAA.
|
[
"[email protected]"
] | |
f95a4aa88f57289ef80b62ef84d6b9d5d9906074
|
050a01af15654c0708c2e747def7c33fe54cbe02
|
/delivery_order/migrations/0001_initial.py
|
b9d564b5771452e38c9a53435e0538f295bc3d57
|
[] |
no_license
|
crowdbotics-apps/coddwebsite-17461
|
5d38d10294e5a9892028d11122174e9600790ac8
|
eb9f22e52ec3c0c18fef55597c9e8aa3bf7cfe2d
|
refs/heads/master
| 2023-05-13T13:28:47.125601 | 2020-05-27T17:32:07 | 2020-05-27T17:32:07 | 267,378,023 | 0 | 0 | null | 2021-06-10T09:23:01 | 2020-05-27T17:01:24 |
Python
|
UTF-8
|
Python
| false | false | 2,609 |
py
|
# Generated by Django 2.2.12 on 2020-05-27 17:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('menu', '0001_initial'),
('delivery_user_profile', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Bill',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('total_amount', models.FloatField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('contact_info', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='bill_contact_info', to='delivery_user_profile.ContactInfo')),
('profile', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='bill_profile', to='delivery_user_profile.Profile')),
],
),
migrations.CreateModel(
name='PaymentMethod',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('detail', models.TextField()),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField()),
('total_price', models.FloatField()),
('status', models.CharField(max_length=20)),
('notes', models.TextField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('bill', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='order_bill', to='delivery_order.Bill')),
('item_variant', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='order_item_variant', to='menu.ItemVariant')),
('payment_method', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='order_payment_method', to='delivery_order.PaymentMethod')),
('profile', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='order_profile', to='delivery_user_profile.Profile')),
],
),
]
|
[
"[email protected]"
] | |
fb9b679a11eb9c744907db626a9f6f8e52a5756a
|
b9db91bdb30ba99aad8bbea251e5e1e8c2a7fa45
|
/opt/src/aoj/itp1/7_b.py
|
839084e701a5b92b98f95369fb7f3d92fbcc2450
|
[] |
no_license
|
jacoloves/python_tool
|
682c3a91b535f15f1f8c9299e9b4c9ccbd5eea79
|
93ba5de17a727d6ccf9c67e4bca37ea502d06e5d
|
refs/heads/master
| 2021-03-01T04:25:49.581952 | 2021-01-27T13:52:50 | 2021-01-27T13:52:50 | 245,753,773 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 435 |
py
|
arr = []
while True:
n, x = map(int, input().split())
if n == 0 and x == 0:
break
arr.append([n, x])
for i in range(len(arr)):
n = arr[i][0]
x = arr[i][1]
num = 0
for j in range(1, n-1):
x2 = x-j
for k in range(j+1, n):
x3 = x2-k
for l in range(k+1, n+1):
x4 = x3-l
if x4 == 0:
num += 1
print(num)
|
[
"[email protected]"
] | |
4f1873b7edecc8b3be6649316dcba834b743f50e
|
de7127deabd34e17473fb94f48e033f482535ca7
|
/virt/bin/markdown2
|
2f0becd3611f94bc2b1edf4b5c86a622fa7aa217
|
[
"MIT"
] |
permissive
|
annstella/One_Min_Pitch
|
a50d855423ad02fb46e8b6765c16cbf9d7a6e6ff
|
86cd2426061df502adaffbf544589d54653df00c
|
refs/heads/master
| 2020-03-28T05:54:11.687201 | 2018-09-17T08:00:08 | 2018-09-17T08:00:08 | 147,802,293 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 523 |
#!/home/annstella/Documents/core/One_Min_Pitch/virt/bin/python3.6
import sys
from os.path import join, dirname, exists
# Use the local markdown2.py if we are in the source tree.
source_tree_markdown2 = join(dirname(__file__), "..", "lib", "markdown2.py")
if exists(source_tree_markdown2):
sys.path.insert(0, dirname(source_tree_markdown2))
try:
from markdown2 import main
finally:
del sys.path[0]
else:
from markdown2 import main
if __name__ == "__main__":
sys.exit( main(sys.argv) )
|
[
"[email protected]"
] | ||
bacd5c10967e22cb2e03eb54ce3045346fa32f5e
|
fba45f3289a6de51eb7a9bfbee90d566181963b5
|
/pagemat/lib/python3.6/site-packages/paypal/standard/pdt/admin.py
|
d68d7ccb506406c13ca5c7216b0f32afb93123cd
|
[
"MIT"
] |
permissive
|
bharatpurohit97/PageMatrix
|
abb580787aecf656e5ff27f0c9d75e89f16e905d
|
66ab9b1dd365a34f86dba110fe97c32cb7137bf2
|
refs/heads/master
| 2022-12-12T01:50:47.230219 | 2018-12-19T09:20:05 | 2018-12-19T09:20:05 | 162,409,793 | 1 | 0 |
MIT
| 2022-12-08T02:28:13 | 2018-12-19T08:54:22 |
Python
|
UTF-8
|
Python
| false | false | 3,710 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from paypal.standard.pdt.models import PayPalPDT
# ToDo: How similiar is this to PayPalIPNAdmin? Could we just inherit off one common admin model?
class PayPalPDTAdmin(admin.ModelAdmin):
date_hierarchy = 'payment_date'
fieldsets = (
(None, {
"fields":
['flag',
'txn_id',
'txn_type',
'payment_status',
'payment_date',
'transaction_entity',
'reason_code',
'pending_reason',
'mc_gross',
'mc_fee',
'auth_status',
'auth_amount',
'auth_exp',
'auth_id',
],
}),
("Address", {
"description": "The address of the Buyer.",
'classes': ('collapse',),
"fields":
['address_city',
'address_country',
'address_country_code',
'address_name',
'address_state',
'address_status',
'address_street',
'address_zip',
],
}),
("Buyer", {
"description": "The information about the Buyer.",
'classes': ('collapse',),
"fields":
['first_name',
'last_name',
'payer_business_name',
'payer_email',
'payer_id',
'payer_status',
'contact_phone',
'residence_country'
],
}),
("Seller", {
"description": "The information about the Seller.",
'classes': ('collapse',),
"fields":
['business',
'item_name',
'item_number',
'quantity',
'receiver_email',
'receiver_id',
'custom',
'invoice',
'memo',
],
}),
("Subscriber", {
"description": "The information about the Subscription.",
'classes': ('collapse',),
"fields":
['subscr_id',
'subscr_date',
'subscr_effective',
],
}),
("Recurring", {
"description": "Information about recurring Payments.",
"classes": ("collapse",),
"fields":
['profile_status',
'initial_payment_amount',
'amount_per_cycle',
'outstanding_balance',
'period_type',
'product_name',
'product_type',
'recurring_payment_id',
'receipt_id',
'next_payment_date',
],
}),
("Admin", {
"description": "Additional Info.",
"classes": ('collapse',),
"fields":
['test_ipn',
'ipaddress',
'query',
'flag_code',
'flag_info',
],
}),
)
list_display = ["__unicode__",
"flag",
"invoice",
"custom",
"payment_status",
"created_at",
]
search_fields = ["txn_id",
"recurring_payment_id",
]
admin.site.register(PayPalPDT, PayPalPDTAdmin)
|
[
"[email protected]"
] | |
0b14f4c050f42e06cf573a1f84e62522ac65add4
|
c7d91529db199322e39e54fe4051a75704ea843e
|
/华为题库/最小覆盖串.py
|
df725d28bca625b4f4f23c73033173ff5af73345
|
[] |
no_license
|
2226171237/Algorithmpractice
|
fc786fd47aced5cd6d96c45f8e728c1e9d1160b7
|
837957ea22aa07ce28a6c23ea0419bd2011e1f88
|
refs/heads/master
| 2020-12-26T07:20:37.226443 | 2020-09-13T13:31:05 | 2020-09-13T13:31:05 | 237,431,164 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,644 |
py
|
'''
给你一个字符串 S、一个字符串 T,请在字符串 S 里面找出:包含 T 所有字母的最小子串。
示例:
输入: S = "ADOBECODEBANC", T = "ABC"
输出: "BANC"
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/minimum-window-substring
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
'''
from collections import Counter
class Solution:
def minWindow(self, s: str, t: str) -> str:
'''
双指针,滑动窗口
:param s:
:param t:
:return:
'''
needs=Counter(t)
need_matchs=len(needs)
match=0 # 有多字符符合要求了
window={}
left,right=0,0
start=0
minLens=2**32
while right<len(s):
ch=s[right]
if needs[ch]: # 需要匹配
window[ch]=window.get(ch,0)+1
if window[ch]==needs[ch]: # 该字符匹配成功
match+=1
right+=1
while match==need_matchs: # 所有都匹配成功,左边不断右移,直到不匹配
if right-left<minLens: # 更新最小子串
start=left
minLens=right-left
ch=s[left]
if needs[ch]:
window[ch]-=1
if window[ch]<needs[ch]: # 出现了不匹配
match-=1
left+=1
return '' if minLens==2**32 else s[start:start+minLens]
if __name__ == '__main__':
S=Solution()
print(S.minWindow("cabwefgewcwaefgcf","cae"))
|
[
"[email protected]"
] | |
e8fb7c4b15125ffbf91656ba6e26fa0b454304bb
|
2ccba7b17b3ce15efa627ef25ff1a1e23c4b1dbd
|
/Week 02/PSet02 - problem_3.py
|
95c7a03fbbaca44e1d0bb79106a4f6e45941938b
|
[
"MIT"
] |
permissive
|
andresmachado/edx-mit-6.00
|
ecf62954fbc2f77ad1e14e2e179e5c011ad50b1c
|
cbc9b1947116433d7f2a0b47935af648b3828702
|
refs/heads/master
| 2020-12-03T07:45:29.696290 | 2016-09-16T12:44:39 | 2016-09-16T12:44:39 | 67,264,380 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,550 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 16 09:31:26 2016
@author: andre
# edX MITx 6.00.1x
# Introduction to Computer Science and Programming Using Python
# Problem Set 2, problem 3
# Use bisection search to make the program faster
# The following variables contain values as described below:
# balance - the outstanding balance on the credit card
# annualInterestRate - annual interest rate as a decimal
# Monthly interest rate = (Annual interest rate) / 12.0
# Monthly payment lower bound = Balance / 12
# Monthly payment upper bound = (Balance x (1 + Monthly interest rate)12) / 12.0
# Problem Summary: Use bisection search to search for the smallest monthly payment
# to the cent such that we can pay off the entire balance within a year.
"""
# Test Cases, comment out before submitting for grading
#Test Case 1
balance = 320000
annualInterestRate = 0.2
monthly_interest_rate = (annualInterestRate / 12.0)
payment_lower = (balance / 12)
payment_upper = (balance * ((1 + monthly_interest_rate)**12)) / 12.0
original_balance = balance
while balance != 0.00:
# Set value for thePayment to midpoint of lower and upper
payment = (payment_lower + payment_upper) / 2
# Reset balance each time through while loop
balance = original_balance
for i in range(1,13):
balance = (balance - payment) * (1 + monthly_interest_rate)
if balance > 0:
payment_lower = payment
elif balance < 0:
payment_upper = payment
balance = round(balance, 2)
print("Lowest Payment:", round(payment,2))
|
[
"[email protected]"
] | |
98e4d65023487abe3e1d25487d510bec8a565b46
|
84a0e742eeb89016f419b13329a4e6a1828e4d31
|
/001_IntroductionToCS&ProgrammingUsingPython/Extra_Problems/oop_fraction.py
|
235020581d1f7a8ddb21abd3e0d787229b39d430
|
[
"MIT"
] |
permissive
|
dalalsunil1986/Computer-Science-Degree
|
e85736c8c705bb82d897519cf2339ff638bc1b5f
|
e2c73f35cc48bbcc2a5cc0ddc6867fd0787c6dd9
|
refs/heads/master
| 2023-03-16T18:37:31.954245 | 2020-02-24T17:08:47 | 2020-02-24T17:08:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,064 |
py
|
"""
@author: Anirudh Sharma
"""
class Fraction(object):
def __init__(self, numerator, denominator):
assert type(numerator) == int and type(denominator) == int
self.numerator = numerator
self.denominator = denominator
def __str__(self):
return str(self.numerator) + "/" + str(self.denominator)
def __add__(self, other):
n = self.numerator * other.denominator + other.numerator * self.denominator
d = self.denominator * other.denominator
return Fraction(n, d)
def __sub__(self, other):
n = self.numerator * other.denominator - other.numerator * self.denominator
d = self.denominator * other.denominator
return Fraction(n, d)
def __float__(self):
return self.numerator / self.denominator
def inverse(self):
return Fraction(self.denominator, self.numerator)
a = Fraction(1, 2)
b = Fraction(2, 3)
plus = a + b
print(plus)
minus = a - b
print(minus)
f = float(a)
print(f)
r = Fraction.inverse(b)
print(r)
|
[
"[email protected]"
] | |
534a6d3743ebc5084d7a4381efa5f146340deebe
|
5c6bdc1915d56f1fee9b66a45365cefd097ff1f4
|
/challenge_3.py
|
645cd85ef5cd8e4cdba1fe3b01314768a428c6e6
|
[] |
no_license
|
chandanmanjunath/LearnByexample
|
534a9e880453c316f4168c4b234165d935d2dac7
|
52351f7fba57ac0d0f13edb44c537131af860b60
|
refs/heads/master
| 2021-05-07T17:29:10.852798 | 2017-10-29T12:28:58 | 2017-10-29T12:28:58 | 108,732,377 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 194 |
py
|
if __name__ == '__main__':
a = int(raw_input())
b = int(raw_input())
if (a>=1 and a<=pow(10,10)) and (b>=1 and b<=pow(10,10)) :
print a+b
print a-b
print a*b
|
[
"[email protected]"
] | |
5944c73b17f82c3bf11149917b9d99491d0d1e91
|
fe32d7054687dd3cbee99e43b32488bff262681d
|
/tests/checkers/projects/test_python.py
|
df3e48d7ae2e84ed26b25acdbb5315f67579dd4e
|
[
"Apache-2.0"
] |
permissive
|
whwkong/verse
|
106d61f4a3a6bbabab1cdd7583c909fa48717214
|
0dc25222c309c780afee5cc6d5293858e5ead08e
|
refs/heads/master
| 2021-06-14T16:31:48.729895 | 2017-04-04T19:20:39 | 2017-04-04T19:20:39 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,275 |
py
|
"""
Test `checkers.projects.python` file
"""
import pytest
from checkers import base
from checkers.projects import python
class TestPythonVersionChecker:
"""
Test `python.PythonVersionChecker` class
"""
@pytest.fixture
def instance(self):
return python.PythonVersionChecker()
def test_class_inheritance(self, instance):
"""Test class inheritance"""
assert isinstance(instance, base.BaseVersionChecker)
assert isinstance(instance, base.GitHubVersionChecker)
def test_class_properties(self, instance):
"""Test class properties"""
assert instance.name == 'Python'
assert instance.slug == 'python'
assert instance.homepage == 'https://www.python.org/'
assert instance.repository == 'https://github.com/python/cpython'
class TestAnsibleVersionChecker:
"""
Test `python.AnsibleVersionChecker` class
"""
@pytest.fixture
def instance(self):
return python.AnsibleVersionChecker()
def test_class_inheritance(self, instance):
"""Test class inheritance"""
assert isinstance(instance, base.BaseVersionChecker)
assert isinstance(instance, base.GitHubVersionChecker)
def test_class_properties(self, instance):
"""Test class properties"""
assert instance.name == 'Ansible'
assert instance.slug == 'ansible'
assert instance.homepage == 'https://www.ansible.com/'
assert instance.repository == 'https://github.com/ansible/ansible'
class TestCeleryVersionChecker:
"""
Test `python.CeleryVersionChecker` class
"""
@pytest.fixture
def instance(self):
return python.CeleryVersionChecker()
def test_class_inheritance(self, instance):
"""Test class inheritance"""
assert isinstance(instance, base.BaseVersionChecker)
assert isinstance(instance, base.GitHubVersionChecker)
def test_class_properties(self, instance):
"""Test class properties"""
assert instance.name == 'Celery'
assert instance.slug == 'celery'
assert instance.homepage == 'http://www.celeryproject.org/'
assert instance.repository == 'https://github.com/celery/celery'
class TestDjangoVersionChecker:
"""
Test `python.DjangoVersionChecker` class
"""
@pytest.fixture
def instance(self):
return python.DjangoVersionChecker()
def test_class_inheritance(self, instance):
"""Test class inheritance"""
assert isinstance(instance, base.BaseVersionChecker)
assert isinstance(instance, base.GitHubVersionChecker)
def test_class_properties(self, instance):
"""Test class properties"""
assert instance.name == 'Django'
assert instance.slug == 'django'
assert instance.homepage == 'https://www.djangoproject.com/'
assert instance.repository == 'https://github.com/django/django'
class TestDjangoRESTFrameworkVersionChecker:
"""
Test `python.DjangoRESTFrameworkVersionChecker` class
"""
@pytest.fixture
def instance(self):
return python.DjangoRESTFrameworkVersionChecker()
def test_class_inheritance(self, instance):
"""Test class inheritance"""
assert isinstance(instance, base.BaseVersionChecker)
assert isinstance(instance, base.GitHubVersionChecker)
def test_class_properties(self, instance):
"""Test class properties"""
assert instance.name == 'Django REST Framework'
assert instance.slug == 'django-rest-framework'
assert instance.homepage == 'http://www.django-rest-framework.org/'
assert (
instance.repository ==
'https://github.com/tomchristie/django-rest-framework'
)
class TestFlaskVersionChecker:
"""
Test `python.FlaskVersionChecker` class
"""
@pytest.fixture
def instance(self):
return python.FlaskVersionChecker()
def test_class_inheritance(self, instance):
"""Test class inheritance"""
assert isinstance(instance, base.BaseVersionChecker)
assert isinstance(instance, base.GitHubVersionChecker)
def test_class_properties(self, instance):
"""Test class properties"""
assert instance.name == 'Flask'
assert instance.slug == 'flask'
assert instance.homepage == 'http://flask.pocoo.org/'
assert instance.repository == 'https://github.com/pallets/flask'
class TestGunicornVersionChecker:
"""
Test `python.GunicornVersionChecker` class
"""
@pytest.fixture
def instance(self):
return python.GunicornVersionChecker()
def test_class_inheritance(self, instance):
"""Test class inheritance"""
assert isinstance(instance, base.BaseVersionChecker)
assert isinstance(instance, base.GitHubVersionChecker)
def test_class_properties(self, instance):
"""Test class properties"""
assert instance.name == 'Gunicorn'
assert instance.slug == 'gunicorn'
assert instance.homepage == 'http://gunicorn.org/'
assert instance.repository == 'https://github.com/benoitc/gunicorn'
class TestRequestsVersionChecker:
"""
Test `python.RequestsVersionChecker` class
"""
@pytest.fixture
def instance(self):
return python.RequestsVersionChecker()
def test_class_inheritance(self, instance):
"""Test class inheritance"""
assert isinstance(instance, base.BaseVersionChecker)
assert isinstance(instance, base.GitHubVersionChecker)
def test_class_properties(self, instance):
"""Test class properties"""
assert instance.name == 'Requests'
assert instance.slug == 'python-requests'
assert instance.homepage == 'http://docs.python-requests.org/'
assert (
instance.repository ==
'https://github.com/kennethreitz/requests'
)
def test_class_normalize_tag_name_method(self, instance):
"""Test class `_normalize_tag_name()` method"""
assert instance._normalize_tag_name('2.0') == ''
assert instance._normalize_tag_name('v2.0.0') == 'v2.0.0'
def test_class_get_versions_method(self, mocker, instance):
"""Test class `get_versions()` method"""
mocked_get_github_tags = mocker.patch.object(
instance, '_get_github_tags',
)
assert instance.get_versions() == mocked_get_github_tags.return_value
mocked_get_github_tags.assert_called_once_with(
normalize_func=instance._normalize_tag_name,
)
class TestScrapyVersionChecker:
"""
Test `python.ScrapyVersionChecker` class
"""
@pytest.fixture
def instance(self):
return python.ScrapyVersionChecker()
def test_class_inheritance(self, instance):
"""Test class inheritance"""
assert isinstance(instance, base.BaseVersionChecker)
assert isinstance(instance, base.GitHubVersionChecker)
def test_class_properties(self, instance):
"""Test class properties"""
assert instance.name == 'Scrapy'
assert instance.slug == 'scrapy'
assert instance.homepage == 'https://scrapy.org/'
assert instance.repository == 'https://github.com/scrapy/scrapy'
|
[
"[email protected]"
] | |
6b4ab0a7e10c34f653dd28cfdf289ca292364259
|
7e4425342a4d7e0f40978af17091f32d2712c79c
|
/Day_36_01_Word2VecBasic.py
|
06bed965a1102af98a5115949451121c9d0eb08e
|
[] |
no_license
|
yunhui21/CB_Ai_NLP
|
eca3da00c6c9615c8737b50d2c5ebe8dd1e3ba8a
|
b66ecc24abfd988fc9e7f19fa1941826b1bf38a4
|
refs/heads/master
| 2023-01-07T14:21:26.758030 | 2020-11-16T05:57:30 | 2020-11-16T05:57:30 | 291,835,156 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,299 |
py
|
# Day_36_01_Word2VecBasic.py
# onehotvec 클래스의 수만큼 숫자로 단어를 볂솬 - 현실적으로 클래스의 개수가 너무 많다.
#
'''
skipgram :
'''''
# end 위치를 구하세요.
# 전체위치에서 target범위만 제거하세요.
def extrast(token_count, target, window_size ):
start = max(target - window_size, 0)
end = min(target + window_size + 1, token_count)
return [i for i in range(start, end) if i != target]
def show_dataset(tokens, window_size, is_skipgram):
token_count = len(tokens)
for target in range(token_count):
surround = extrast(token_count, target, window_size)
print(target, surround, end='')
# 문제
# surround가 가라키는 단어들을 출력하세요.
if is_skipgram:
# print(list([zip([target] * len(surround), surround)]))
print([(tokens[t], tokens[s]) for t, s in zip([target] * len(surround), surround)])
else:
print([tokens[i] for i in surround], tokens[target])
tokens = ['the', 'quick', 'brown', 'fax','jumps','over', 'the', 'lazy', 'dog']
# show_dataset(tokens, 1, is_skipgram=True)
# # show_dataset(tokens, 1, is_skimgram= False )
show_dataset(tokens, 2, is_skipgram=True)
print()
show_dataset(tokens, 2, is_skipgram=False)
|
[
"[email protected]"
] | |
1e17cd4603703f78fef3307911e3585ea18568ef
|
fa5713863cada0177d15e56f5327b79d907a119f
|
/test/plot_jmrs_trkeff.py
|
c1a348b41ca20f15dabf50e782c2d4a5aaeef348
|
[] |
no_license
|
rappoccio/EXOVV
|
1500c126d8053b47fbc425d1c2f9e76f14cb75c5
|
db96edf661398b5bab131bbeba36d331b180d12d
|
refs/heads/master
| 2020-04-03T20:12:57.959191 | 2018-08-24T01:30:03 | 2018-08-24T01:30:03 | 39,910,319 | 4 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,419 |
py
|
#! /usr/bin/env python
##################
# Finding the mistag rate plots
##################
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--postfix', type='string', action='store',
dest='postfix',
default = '',
help='Postfix for plots')
(options, args) = parser.parse_args()
argv = []
import math
import ROOT
import sys
import array
ROOT.gROOT.Macro("rootlogon.C")
canvs = []
rgs = []
effs = [1.00, 0.99, 0.98, 0.97, 0.96, 0.95]
effstrs = [ '100', '099', '098', '097', '096', '095' ]
for effstr in effstrs :
f = ROOT.TFile("jmr_ungroomed_trkeff" + effstr + ".root")
c = f.Get("totresc2_0")
c.Draw()
canvs.append(c)
rg = c.GetPrimitive("rg_0").Clone( 'eff_' + effstr )
rgs.append( rg )
rg0 = rgs[0].Clone("rg0")
gs0 = rg0.GetListOfGraphs()
ptBinA = array.array('d', [ 200., 260., 350., 460., 550., 650., 760., 900, 1000, 1100, 1200, 1300, 13000.])
r = 0.8 / math.sqrt(2.)
xmaxes = [ x * r for x in ptBinA ]
xmins = [ x / 20. for x in ptBinA ]
canvs = []
rgsdiv = []
for irg,rg in enumerate(rgs):
ci = ROOT.TCanvas("c" + rg.GetName(), "c" + rg.GetName() )
gs = rg.GetListOfGraphs()
rgdiv = ROOT.TMultiGraph( rg.GetName() + "_div", "Track Efficiency = " + str(effs[irg]) + rg.GetTitle() + " Uncertainty")
for ig,g in enumerate(gs):
xdiv = array.array('d', [])
ydiv = array.array('d', [])
for i in xrange( g.GetN() ):
x = ROOT.Double(0.0)
y = ROOT.Double(0.0)
y0 = ROOT.Double(0.0)
dy = g.GetErrorY(i)
g.GetPoint(i,x,y)
gs0[ig].GetPoint(i,x,y0)
if y0 > 0.0 and y > 0.0 and dy / y < 0.75 and x > xmins[ig] and x < xmaxes[ig] :
xdiv.append( x )
ydiv.append( (y-y0)/y0)
gdiv = ROOT.TGraph( len(xdiv), xdiv, ydiv )
gdiv.SetName(g.GetName() + "_div")
gdiv.SetLineStyle(g.GetLineStyle())
gdiv.SetLineColor(g.GetLineColor())
rgdiv.Add( gdiv )
rgsdiv.append( rgdiv )
ci.cd()
rgdiv.Draw("AL")
rgdiv.GetHistogram().SetTitleOffset(1.0, "Y")
rgdiv.SetMinimum(0.0)
rgdiv.SetMaximum(0.5)
ci.Update()
canvs.append(ci)
ci.Print("jmr_unc_trkeff" + effstr[irg] + ".png", "png" )
ci.Print("jmr_unc_trkeff" + effstr[irg] + ".pdf", "pdf" )
|
[
"[email protected]"
] | |
504bb84fc68bf1dfd94876a59dc581ff3a921147
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2846/60586/295434.py
|
e2875119305df6adbc78001b5fc61b6eda843866
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 399 |
py
|
def test12():
n=int(input())
s=input()
x=s.split(" ")
arr=[]
for i in x:
arr.append(int(i))
zero=arr.count(0)
if s=="1 1 1 1 1":
return 1
if s=="0 0 0 0 0 0 0":
return 0
if zero==len(set(arr)):
return 0
if(len(set(arr))==22):
return(21)
if len(set(arr))==3:
return 2
return len(set(arr))
print(test12())
|
[
"[email protected]"
] | |
a4b1c54b4bb3f7c5e696da947123729e9367eee1
|
29c3595a4e1f8de9382650610aee5a13e2a135f6
|
/venv/Lib/site-packages/django/views/decorators/cache.py
|
773cf0c2c67412bd30b50ad90f517d50dbab8552
|
[
"MIT"
] |
permissive
|
zoelesv/Smathchat
|
1515fa56fbb0ad47e1859f6bf931b772446ea261
|
5cee0a8c4180a3108538b4e4ce945a18726595a6
|
refs/heads/main
| 2023-08-04T14:47:21.185149 | 2023-08-02T15:53:20 | 2023-08-02T15:53:20 | 364,627,392 | 9 | 1 |
MIT
| 2023-08-02T15:53:21 | 2021-05-05T15:42:47 |
Python
|
UTF-8
|
Python
| false | false | 1,705 |
py
|
from functools import wraps
from django.middleware.cache import CacheMiddleware
from django.utils.cache import add_never_cache_headers, patch_cache_control
from django.utils.decorators import decorator_from_middleware_with_args
def cache_page(timeout, *, cache=None, key_prefix=None):
"""
Decorator for views that tries getting the page from the cache and
populates the cache if the page isn't in the cache yet.
The cache is keyed by the URL and some data from the headers.
Additionally there is the key prefix that is used to distinguish different
cache areas in a multi-site setup. You could use the
get_current_site().domain, for example, as that is unique across a Django
project.
Additionally, all headers from the response's Vary header will be taken
into account on caching -- just like the middleware does.
"""
return decorator_from_middleware_with_args(CacheMiddleware)(
page_timeout=timeout, cache_alias=cache, key_prefix=key_prefix,
)
def cache_control(**kwargs):
def _cache_controller(viewfunc):
@wraps(viewfunc)
def _cache_controlled(request, *args, **kw):
response = viewfunc(request, *args, **kw)
patch_cache_control(response, **kwargs)
return response
return _cache_controlled
return _cache_controller
def never_cache(view_func):
"""
Decorator that adds headers to a response so that it will never be cached.
"""
@wraps(view_func)
def _wrapped_view_func(request, *args, **kwargs):
response = view_func(request, *args, **kwargs)
add_never_cache_headers(response)
return response
return _wrapped_view_func
|
[
"[email protected]"
] | |
28ec052e9c58a50f9db14275c3fe505405877f48
|
dd098f8a93f787e38676283679bb39a290ba28b4
|
/samples/openapi3/client/3_0_3_unit_test/python-experimental/test/test_models/test_anyof.py
|
197c3449a9e49196e0d0dc3b0844ab50910bddba
|
[
"Apache-2.0"
] |
permissive
|
InfoSec812/openapi-generator
|
727c0235d3bad9b85ac12068808f844287af6003
|
e0c72702c3d5dae2a627a2926f0cddeedca61e32
|
refs/heads/master
| 2022-10-22T00:31:33.318867 | 2022-08-20T14:10:31 | 2022-08-20T14:10:31 | 152,479,633 | 1 | 0 |
Apache-2.0
| 2023-09-04T23:34:09 | 2018-10-10T19:38:43 |
Java
|
UTF-8
|
Python
| false | false | 1,385 |
py
|
# coding: utf-8
"""
openapi 3.0.3 sample spec
sample spec for testing openapi functionality, built from json schema tests for draft6 # noqa: E501
The version of the OpenAPI document: 0.0.1
Generated by: https://openapi-generator.tech
"""
import unittest
import unit_test_api
from unit_test_api.model.anyof import Anyof
from unit_test_api import configuration
class TestAnyof(unittest.TestCase):
"""Anyof unit test stubs"""
_configuration = configuration.Configuration()
def test_second_anyof_valid_passes(self):
# second anyOf valid
Anyof._from_openapi_data(
2.5,
_configuration=self._configuration
)
def test_neither_anyof_valid_fails(self):
# neither anyOf valid
with self.assertRaises((unit_test_api.ApiValueError, unit_test_api.ApiTypeError)):
Anyof._from_openapi_data(
1.5,
_configuration=self._configuration
)
def test_both_anyof_valid_passes(self):
# both anyOf valid
Anyof._from_openapi_data(
3,
_configuration=self._configuration
)
def test_first_anyof_valid_passes(self):
# first anyOf valid
Anyof._from_openapi_data(
1,
_configuration=self._configuration
)
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
a904290ec8ed97238dff5bff3c599df824611c11
|
02e23da0431623db86c8138bda350a1d526d4185
|
/Archivos Python Documentos/Graficas/.history/ejecutable_20200216215432.py
|
dfa14620e68d6ef4a0639d6914525ed6612644cf
|
[] |
no_license
|
Jaamunozr/Archivos-python
|
d9996d3d10ff8429cd1b4c2b396016a3a5482889
|
1f0af9ba08f12ac27e111fcceed49bbcf3b39657
|
refs/heads/master
| 2022-08-05T14:49:45.178561 | 2022-07-13T13:44:39 | 2022-07-13T13:44:39 | 244,073,267 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,272 |
py
|
import pylab as pl
import numpy as np
# Crear una figura de 8x6 puntos de tamaño, 80 puntos por pulgada (Se modifica a 16x8)
pl.figure(figsize=(16, 8), dpi=100)
# Crear una nueva subgráfica en una rejilla de 1x1 (se podrian crean una de dos graficas en una reijlla)
pl.subplot(1, 1, 1)
# Obtencion de datos para seno y coseno (Desde -2pi hasta 2pi)
X = np.linspace(-2.1*np.pi, 2.1*np.pi, 256, endpoint=True) #el numero 256 es la cantidad de datos en ese intervalo
C, S = np.cos(X), np.sin(X)
# Graficar la función coseno con una línea continua azul de 1 pixel de grosor
pl.plot(X, C, color="blue", linewidth=1.0, linestyle="-")
# Graficar la función seno con una línea continua verde de 1 pixel de grosor
pl.plot(X, S, color="green", linewidth=1.0, linestyle="-")
# Establecer límites del eje x (Divisiones en X)
pl.xlim(-8.0, 8.0)
# Ticks en x(Impresión de intervalos, cantidad de datos mostrados en el eje)
pl.xticks(np.linspace(-8, 8, 17, endpoint=True))
# Establecer límites del eje y (Divisiones en Y)
pl.ylim(-1.0, 1.0)
# Ticks en y (Impresión de intervalos, cantidad de datos mostrados en el eje)
pl.yticks(np.linspace(-1, 1, 5, endpoint=True))
'''Otra opcion de determinar los limites a imprimir
pl.xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi])
pl.yticks([-1, 0, +1]) '''
#AGREGAR LINEAS DE EJES Y QUITAR EL RECUADRO:
'''
ax = pl.gca()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data',0))
ax.yaxis.set_ticks_position('left')
ax.spines['left'].set_position(('data',0))'''
# Guardar la figura usando 72 puntos por pulgada
# savefig("exercice_2.png", dpi=72)
#Indicamos los espacios entre los bordes de grafica y las graficas
#pl.xlim(X.min() * 1.1, X.max() * 1.1)
pl.ylim(C.min() * 1.1, C.max() * 1.1)
#AGREGRA UNA LEYENDA
pl.plot(X, C, color="blue", linewidth=2.5, linestyle="-", label="Coseno")
pl.plot(X, S, color="red", linewidth=2.5, linestyle="-", label="Seno")
pl.legend(loc='upper left')
#AGREGRA UNA ANOTACION EN UN PUNTO CONOCIDO
t = 2 * np.pi / 3
#Para coseno------------------------------------------------------------------------------------
pl.plot([t, t], [0, np.cos(t)], color='blue', linewidth=2.5, linestyle="--")
pl.scatter([t, ], [np.cos(t), ], 350, color='blue')
pl.annotate(r'$sin(\frac{2\pi}{3})=\frac{\sqrt{3}}{2}$',#DATOS A IMPRIMIR DEL TEXTO
xy=(t, np.sin(t)), xycoords='data', #COORDENADAS DE REFERENCIA PARA LA FLECHA Y EL TEXTO
xytext=(+10, +30), textcoords='offset points', fontsize=16,#INDICAN POSICION DEL TEXTO
arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=.2")) #DIRECCION DE LA FLECHA
#Para seno--------------------------------------------------------------------------------------
pl.plot([t, t],[0, np.sin(t)], color='red', linewidth=2.5, linestyle="--")
pl.scatter([t, ],[np.sin(t), ], 50, color='red')
pl.annotate(r'$cos(\frac{2\pi}{3})=-\frac{1}{2}$',
xy=(t, np.cos(t)), xycoords='data',
xytext=(-90, -50), textcoords='offset points', fontsize=16,
arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=.2"))#DIRECCION DE LA FLECHA
# Mostrar resultado en pantalla (Con 2 segundos de muestreo)
pl.pause(10)
|
[
"[email protected]"
] | |
08cb23a06a7856db4ecb22d88ec90a611deba95b
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/man/case/life/big_government.py
|
7fba7f61642853f57bfca0dad6bb4279f36648e4
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 229 |
py
|
#! /usr/bin/env python
def fact_or_eye(str_arg):
ask_new_woman(str_arg)
print('world_or_last_life')
def ask_new_woman(str_arg):
print(str_arg)
if __name__ == '__main__':
fact_or_eye('long_child_or_few_place')
|
[
"[email protected]"
] | |
b0d36ff01b81621a8a30b4260aee51ff0b7fd312
|
0ac1df08e2cb2a089e912e3237209e0fb683504a
|
/edgy/workflow/transition.py
|
d44537e832efd938ea119ed0b1b40d23812ce52a
|
[] |
no_license
|
python-edgy/workflow
|
ee8654b5cd3931f26dc6c4c519bc865cba1864ca
|
b27edaa7a80bf7cd40d5a26df114058f3795dacd
|
refs/heads/master
| 2020-12-11T20:24:36.461621 | 2016-07-22T09:26:47 | 2016-07-22T09:26:47 | 51,644,998 | 1 | 0 | null | 2016-08-17T14:18:02 | 2016-02-13T12:28:06 |
Python
|
UTF-8
|
Python
| false | false | 3,284 |
py
|
# -*- coding: utf-8 -*-
"""
The smallest atom of ``edgy.workflow`` is a ``Transition``, which basically is a regular python
callable with additional metadata to make the system aware of when it can be applied.
"""
from edgy.workflow.constants import WILDCARD
from edgy.workflow.utils import issequence
class Transition(object):
"""
Defines when and how to go from one state to another, eventually applying a user-defined
side-effect while being applied.
Example::
>>> t = Transition(name='sleep', source='awake', target='asleep')
>>> class Person(object):
... state = 'awake'
>>> me = Person()
>>> t(me)
>>> me.state
'asleep'
This class can also be used as a decorator::
>>> @Transition(source='asleep', target='awake')
>>> def wakeup(self, subject):
... print('HEY!')
>>> wakeup(me)
>>> me.state
'awake'
A special wildcard source can make transitions work from any state. Just specify "*" as a
transition source and you'll be able to transition from any state.
"""
# Tracks each time a Transition instance is created. Used to retain order.
creation_counter = 0
# Transition handler. If absent, the transition is considered as "partial", and should be called with a handler
# callable to be complete.
handler = None
def __init__(self, handler=None, name=None, source=None, target=None):
self.source = tuple(source if issequence(source) else (source,))
self.target = target
self._name = name
# Increase the creation counter, and save our local copy.
self.creation_counter = Transition.creation_counter
Transition.creation_counter += 1
if handler:
self.handler = handler or self.handler
def __call__(self, *args, **kwargs):
if self.handler:
return self.__call_complete(*args, **kwargs)
return self.__call_partial(*args, **kwargs)
def __call_partial(self, handler):
self.handler = handler
return self
def __call_complete(self, subject, *args, **kwargs):
if not WILDCARD in self.source and not subject.state in self.source:
raise RuntimeError(
'This transition cannot be executed on a subject in "{}" state, authorized source '
'states are {}.'.format(subject.state,
', '.join(['"{}"'.format(state) for state in self.source]))
)
try:
retval = self.handler(self, subject, *args, **kwargs)
subject.state = self.target
except Exception as e:
raise
return retval
@property
def __name__(self):
if self._name:
return self._name
if self.handler:
return self.handler.__name__
return 'partial'
# Alias that can be used in django templates, for example.
name = __name__
def __repr__(self):
return '<{}.{} object "{}" ({} to {}) at {}>'.format(
type(self).__module__,
type(self).__name__,
self.__name__,
'/'.join(self.source),
self.target,
hex(id(self)),
)
|
[
"[email protected]"
] | |
78ab8469e9d3cb049c2360ccbb087a9236a83ec7
|
a1a3fc3511d3e2e29909411163bafd8932f87426
|
/tests/extension/dataflow_/regionadd_filter_enable/dataflow_regionadd_filter_enable.py
|
bef3f6dbb5ee6c07fe7b89b1e8dc44cb193f5f69
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
yongfu-li/veriloggen
|
25316c6f1a024669e7cb87f3491a1d3592356ea9
|
a6230da3350c6e4bb54e10a46ac855c24c27f17f
|
refs/heads/master
| 2021-01-23T11:50:43.050607 | 2017-09-04T08:30:06 | 2017-09-04T08:30:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,378 |
py
|
from __future__ import absolute_import
from __future__ import print_function
import sys
import os
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))))
from veriloggen import *
import veriloggen.dataflow as dataflow
def mkMain():
# input variiable
x = dataflow.Variable('xdata', valid='xvalid', ready='xready')
reset = dataflow.Variable(
'resetdata', valid='resetvalid', ready='resetready', width=1)
enable = dataflow.Variable(
'enabledata', valid='enablevalid', ready='enableready', width=1)
# dataflow definition
z, v = dataflow.RegionAdd(
x * x, 4, initval=0, enable=enable, reset=reset, filter=True)
# set output attribute
z.output('zdata', valid='zvalid', ready='zready')
v.output('vdata', valid='vvalid', ready='vready')
df = dataflow.Dataflow(z, v)
m = df.to_module('main')
# df.draw_graph()
return m
def mkTest(numports=8):
m = Module('test')
# target instance
main = mkMain()
params = m.copy_params(main)
ports = m.copy_sim_ports(main)
clk = ports['CLK']
rst = ports['RST']
xdata = ports['xdata']
xvalid = ports['xvalid']
xready = ports['xready']
resetdata = ports['resetdata']
resetvalid = ports['resetvalid']
resetready = ports['resetready']
enabledata = ports['enabledata']
enablevalid = ports['enablevalid']
enableready = ports['enableready']
zdata = ports['zdata']
zvalid = ports['zvalid']
zready = ports['zready']
vdata = ports['vdata']
vvalid = ports['vvalid']
vready = ports['vready']
uut = m.Instance(main, 'uut',
params=m.connect_params(main),
ports=m.connect_ports(main))
reset_done = m.Reg('reset_done', initval=0)
reset_stmt = []
reset_stmt.append(reset_done(0))
reset_stmt.append(xdata(0))
reset_stmt.append(xvalid(0))
reset_stmt.append(enabledata(0))
reset_stmt.append(enablevalid(0))
reset_stmt.append(resetdata(0))
reset_stmt.append(resetvalid(0))
reset_stmt.append(zready(0))
simulation.setup_waveform(m, uut)
simulation.setup_clock(m, clk, hperiod=5)
init = simulation.setup_reset(m, rst, reset_stmt, period=100)
nclk = simulation.next_clock
init.add(
Delay(1000),
reset_done(1),
nclk(clk),
Delay(10000),
Systask('finish'),
)
def send(name, data, valid, ready, step=1, waitnum=10, send_size=20):
fsm = FSM(m, name + 'fsm', clk, rst)
count = m.TmpReg(32, initval=0)
fsm.add(valid(0))
fsm.goto_next(cond=reset_done)
for _ in range(waitnum):
fsm.goto_next()
fsm.add(valid(1))
fsm.goto_next()
fsm.add(data(data + step), cond=ready)
fsm.add(count.inc(), cond=ready)
fsm.add(valid(0), cond=AndList(count == 5, ready))
fsm.goto_next(cond=AndList(count == 5, ready))
for _ in range(waitnum):
fsm.goto_next()
fsm.add(valid(1))
fsm.add(data(data + step), cond=ready)
fsm.add(count.inc(), cond=ready)
fsm.add(valid(0), cond=AndList(count == send_size, ready))
fsm.goto_next(cond=AndList(count == send_size, ready))
fsm.make_always()
def receive(name, data, valid, ready, waitnum=10):
fsm = FSM(m, name + 'fsm', clk, rst)
fsm.add(ready(0))
fsm.goto_next(cond=reset_done)
fsm.goto_next()
yinit = fsm.current
fsm.add(ready(1), cond=valid)
fsm.goto_next(cond=valid)
for i in range(waitnum):
fsm.add(ready(0))
fsm.goto_next()
fsm.goto(yinit)
fsm.make_always()
send('x', xdata, xvalid, xready, waitnum=10, send_size=100)
receive('z', zdata, Ands(zvalid, vvalid), zready, waitnum=5)
receive('v', vdata, Ands(zvalid, vvalid), vready, waitnum=5)
# enable port
enable_fsm = FSM(m, 'enable', clk, rst)
enable_count = m.Reg('enable_count', 32, initval=0)
enable_fsm.goto_next(cond=reset_done)
enable_fsm_init = enable_fsm.current
enable_fsm.add(enablevalid(1)) # always High
enable_fsm.add(enable_count.inc(), cond=AndList(enablevalid, enableready))
enable_fsm.add(enabledata(1), cond=AndList(
enablevalid, enableready, enable_count == 2))
enable_fsm.goto_next(cond=AndList(
enablevalid, enableready, enable_count == 2))
enable_fsm.add(enabledata(0), cond=AndList(enablevalid, enableready))
enable_fsm.add(enable_count(0))
enable_fsm.goto(enable_fsm_init, cond=AndList(enablevalid, enableready))
enable_fsm.make_always()
# reset port
reset_fsm = FSM(m, 'reset', clk, rst)
reset_count = m.Reg('reset_count', 32, initval=0)
reset_fsm.goto_next(cond=reset_done)
reset_fsm_init = reset_fsm.current
reset_fsm.add(resetvalid(1)) # always High
reset_fsm.add(reset_count.inc(), cond=AndList(resetvalid, resetready))
#reset_fsm.add( resetdata(1), cond=AndList(resetvalid, resetready, reset_count==2) )
reset_fsm.add(resetdata(0), cond=AndList(
resetvalid, resetready, reset_count == 2))
reset_fsm.goto_next(cond=AndList(resetvalid, resetready, reset_count == 2))
reset_fsm.add(resetdata(0), cond=AndList(resetvalid, resetready))
reset_fsm.add(reset_count(0))
reset_fsm.goto(reset_fsm_init, cond=AndList(resetvalid, resetready))
reset_fsm.make_always()
m.Always(Posedge(clk))(
If(reset_done)(
If(AndList(xvalid, xready))(
Systask('display', 'xdata=%d', xdata)
),
If(AndList(zvalid, zready))(
Systask('display', 'zdata=%d', zdata)
),
If(AndList(vvalid, vready))(
Systask('display', 'vdata=%d', vdata)
)
)
)
return m
if __name__ == '__main__':
test = mkTest()
verilog = test.to_verilog('tmp.v')
print(verilog)
# run simulator (Icarus Verilog)
sim = simulation.Simulator(test)
rslt = sim.run() # display=False
#rslt = sim.run(display=True)
print(rslt)
# launch waveform viewer (GTKwave)
# sim.view_waveform() # background=False
# sim.view_waveform(background=True)
|
[
"[email protected]"
] | |
d236ab80a1798bb92f400c21f53470b7b4d79c24
|
fdffd3f8ad31ffd917b1df4199ff5d88df80b420
|
/Chapter_08/matplotlib_learning.py
|
f2b0806f79708322e36af13779f60ecd5eb0b416
|
[] |
no_license
|
LelandYan/data_analysis
|
83c0cefa1b0783a8d3d13050092b2ab085cd859e
|
9482c4667ecac189545f40b9f5bad3c495d48068
|
refs/heads/master
| 2020-04-17T04:25:47.975087 | 2019-02-12T07:40:37 | 2019-02-12T07:40:37 | 166,229,621 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,529 |
py
|
import matplotlib.pyplot as plt
from numpy.random import randn
import numpy as np
# fig = plt.figure()
# ax1 = fig.add_subplot(2, 2, 1)
# ax2 = fig.add_subplot(2, 2, 2)
# ax3 = fig.add_subplot(2, 2, 3)
# plt.plot(randn(50).cumsum(), 'k--')
# _ = ax1.hist(randn(100), bins=20, color='k', alpha=0.3)
# ax2.scatter(np.arange(30), np.arange(30) + 3 * randn(30))
# 调整subplot周围的间距
# fig, axes = plt.subplots(2, 2, sharex=True, sharey=True)
# for i in range(2):
# for j in range(2):
# axes[i, j].hist(randn(500), bins=50, color='k', alpha=0.5)
# plt.subplots_adjust(wspace=0, hspace=0)
# plt.plot(randn(30).cumsum(), 'ko--')
# plt.plot(randn(30).cumsum(), color='k', linestyle='dashed', marker='o')
# data = randn(30).cumsum()
# # plt.plot(data, 'k--', label='Default')
# plt.plot(data, 'k-', drawstyle='steps-post', label='steps-post')
# plt.legend(loc='best')
# plt.xlim()
# plt.savefig('figpath.svg')
# plt.show()
# from io import BytesIO
# buffer = BytesIO()
# plt.savefig(buffer)
# plot_data = buffer.getvalue()
# ax = fig.add_subplot(1, 1, 1)
# ax.plot(randn(1000).cumsum())
# plt.show()
# from datetime import datetime
# import pandas as pd
#
# fig = plt.figure()
# ax = fig.add_subplot(1, 1, 1)
#
# data = pd.read_csv('spx.csv', index_col=0, parse_dates=True)
# spx = data['SPX']
# spx.plot(ax=ax, style='k--',alpha=0.3)
# crisis_data = [
# (datetime(2007, 10, 11), 'Peak of bull market'),
# (datetime(2008, 3, 12), 'Bear Stearns Fails'),
# (datetime(2008, 9, 15), 'Lehman Bankruptcy')
# ]
# for date, label in crisis_data:
# ax.annotate(label, xy=(date, spx.asof(date) + 50),
# xytext=(date, spx.asof(date) + 200),
# arrowprops=dict(facecolor='black'),
# horizontalalignment='left', verticalalignment='top')
# ax.set_xlim(['1/1/2007', '1/1/2011'])
# ax.set_ylim([600, 1800])
# ax.set_title("Important dates in 2008-2009 financial crisis")
# plt.show()
# ax.savefig('figpath.svg')
# matplotlib配置
# plt.rc('figure', figsize=(10, 10))
from pandas import DataFrame, Series
# pandas中的绘图函数
# 线型图
# s = Series(np.random.randn(10).cumsum(), index=np.arange(0, 100, 10))
# s.plot()
# plt.show()
# df = DataFrame(np.random.randn(10, 4).cumsum(0), columns=['A', 'B', 'C', 'D'], index=np.arange(0, 100, 10))
# df.plot()
# plt.show()
# 柱状图 kind='bar/barh' Serise和DataFrame的索引将会被X,Y刻度
# fig, axes = plt.subplots(2, 1)
# data = Series(np.random.rand(16), index=list('abcdefghijklmnop'))
# data.plot(kind='bar', ax=axes[0], color='k', alpha=0.7)
# data.plot(kind='barh', ax=axes[1], color='k', alpha=0.7)
# plt.show()
import pandas as pd
# df = DataFrame(np.random.rand(6, 4),
# index=['one', 'two', 'three', 'four', 'five', 'six'],
# columns=pd.Index(['A', 'B', 'C', 'D'], names='Genus'))
# df.plot(kind='bar')
# df.plot(kind='barh', stacked=True, alpha=0.5)
# plt.show()
# tips = pd.read_csv('tips.csv')
# party_counts = pd.crosstab(tips.day,tips.size)
# print(party_counts.ix[:,2:5])
# 直方图和密度图
# tips = pd.read_csv('tips.csv')
# tips['tip_pct'] = tips['tip'] / tips['total_bill']
# tips['tip_pct'].hist(bins=20)
# tips['tip_pct'].plot(kind='kde')
# plt.show()
# comp1 = np.random.normal(0, 1, size=200)
# comp2 = np.random.normal(10, 2, size=200)
# values = Series(np.concatenate([comp1,comp2]))
# values.hist(bins=100,alpha=0.3,color='k',normed=True)
# values.plot(kind='kde',style='k--')
# plt.show()
# 散步图
# macro = pd.read_csv('macrodata.csv')
# # data = macro[['cpi', 'm1', 'tbilrate', 'unemp']]
# # # print(data[-5:])
# # trans_data = np.log(data).diff().dropna()
# # # print(trans_data[-5:])
# # plt.scatter(trans_data['m1'],trans_data['unemp'])
# # plt.title('Changes in log')
# # pd.scatter_matrix(trans_data,diagonal='kde',color='k',alpha=0.3)
# # plt.show()
# 绘制地图
data = pd.read_csv('Haiti.csv')
# 清除错误的信息
data = data[(data.LATITUDE > 18) & (data.LATITUDE < 20) & (data.LONGITUDE > -75) & (data.LONGITUDE < -70) & (
data.CATEGORY.notnull())]
def to_cat_list(catstr):
stripped = (x.strip() for x in catstr.split(','))
return [x for x in stripped if x]
def get_all_categories(cat_series):
cat_sets = (set(to_cat_list(x) for x in cat_series))
return sorted(set.union(*cat_sets))
def get_english(cat):
code, names = cat.split('.')
if '|' in names:
names = names.split('|')[1]
return code, names.strip()
print(get_english('2. Urgences logistiques | Vital Lines'))
|
[
"[email protected]"
] | |
5f4d648fe87f277326ed1c245f130c1540612c9f
|
acda0bc700943654156d491eaa0b766bea0ae7bd
|
/apps/item/views.py
|
5a977f899786fbf9bf8ce0698dd0d24153b6aefd
|
[] |
no_license
|
bluehawkarthur/casa_campo
|
a11baaec966d51a1e733ad2dd48bb77a0ecd6cb5
|
22a57b58a722769e8e25330457ed868d230f5c05
|
refs/heads/master
| 2021-01-18T15:05:47.674205 | 2016-10-26T20:03:55 | 2016-10-26T20:03:55 | 68,387,895 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,605 |
py
|
from django.shortcuts import render_to_response, render
from django.views.generic import FormView, ListView, DetailView, UpdateView
from pure_pagination.mixins import PaginationMixin
from django.core.urlresolvers import reverse_lazy
from .forms import ItemForm
from django.http import HttpResponseRedirect
from django.template import RequestContext
from .models import Item
# Create your views here. , UpdateView, DeleteView
def CrearItem(request):
if request.method == 'POST':
form = ItemForm(request.POST)
if form.is_valid():
item = Item(
codigo=form.cleaned_data['codigo'],
unidad=form.cleaned_data['unidad'],
descripcion=form.cleaned_data['descripcion'],
cantidad=form.cleaned_data['cantidad'],
pr_costo=form.cleaned_data['pr_costo'])
item.save()
return HttpResponseRedirect(reverse_lazy('listar_item'))
else:
print 'dfsdfsdfsdf'
form = ItemForm()
variables = RequestContext(request, {'form': form})
return render_to_response('item/crearitem.html', variables)
class ListarItem(PaginationMixin, ListView):
template_name = 'item/listar_item.html'
paginate_by = 5
model = Item
context_object_name = 'item'
class DetalleItem(DetailView):
template_name = 'item/detalle_item.html'
model = Item
context_object_name = 'item'
class EditItem(UpdateView):
template_name = 'item/edit_item.html'
model = Item
fields = ['codigo', 'unidad', 'descripcion', 'cantidad', 'pr_costo']
success_url = reverse_lazy('listar_item')
def DeleteItem(request, item):
e = Item.objects.get(id=item)
e.delete()
return HttpResponseRedirect(reverse_lazy('listar_item'))
|
[
"[email protected]"
] | |
169cc6e3a08adc088826a5b3ab17e5fcb13c6c44
|
b976a3ca1e9cb98a9c90e57243255d0a8ace3572
|
/Probability & Statistics/pharmacy_multi_regression.py
|
911ba06a6717cd97204579ffadd3597f75e39138
|
[
"MIT"
] |
permissive
|
akhilvydyula/Data-Science-and-Machine-Learning-Projects-Dojo
|
fbe9408818cbfdb31d7fa0e52d9566bab998b9e1
|
4e2932dfa6749b360a7a605050c953ef52fc6547
|
refs/heads/master
| 2023-05-06T00:42:57.787384 | 2021-05-28T06:40:25 | 2021-05-28T06:40:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 500 |
py
|
"""
A pharmacy delivers medications to the surrounding community.
Drivers can make several stops per delivery.
The owner would like to predict the length of time a delivery will take based on one or two related variables.
"""
from sklearn.linear_model import LinearRegression
x1, x2 = [1,3,2,3,1], [8,4,9,6,3]
y = [29, 31, 36, 35, 19]
reg = LinearRegression()
reg.fit(list(zip(x1,x2)), y)
b1, b2 = reg.coef_[0], reg.coef_[1]
b0 = reg.intercept_
print(f'y = {b0:.{3}} + {b1:.{3}}x1 + {b2:.{3}}x2')
|
[
"[email protected]"
] | |
a06e523614c65dc76a0ee5de471b3d4970df6c87
|
f82757475ea13965581c2147ff57123b361c5d62
|
/gi-stubs/repository/GstVideo/VideoResampler.py
|
a56a7a96d40d30d7b2b241bb4ea4fabe51b4f99e
|
[] |
no_license
|
ttys3/pygobject-stubs
|
9b15d1b473db06f47e5ffba5ad0a31d6d1becb57
|
d0e6e93399212aada4386d2ce80344eb9a31db48
|
refs/heads/master
| 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null |
UTF-8
|
Python
| false | false | 6,320 |
py
|
# encoding: utf-8
# module gi.repository.GstVideo
# from /usr/lib64/girepository-1.0/GstVideo-1.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.overrides.GObject as __gi_overrides_GObject
import gi.repository.Gst as __gi_repository_Gst
import gi.repository.GstBase as __gi_repository_GstBase
import gobject as __gobject
class VideoResampler(__gi.Struct):
"""
:Constructors:
::
VideoResampler()
"""
def clear(self): # real signature unknown; restored from __doc__
""" clear(self) """
pass
def init(self, method, flags, n_phases, n_taps, shift, in_size, out_size, options): # real signature unknown; restored from __doc__
""" init(self, method:GstVideo.VideoResamplerMethod, flags:GstVideo.VideoResamplerFlags, n_phases:int, n_taps:int, shift:float, in_size:int, out_size:int, options:Gst.Structure) -> bool """
return False
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self): # real signature unknown; restored from __doc__
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
in_size = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
max_taps = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
n_phases = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
n_taps = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
offset = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
out_size = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
phase = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
taps = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_gst_reserved = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(VideoResampler), '__module__': 'gi.repository.GstVideo', '__gtype__': <GType void (4)>, '__dict__': <attribute '__dict__' of 'VideoResampler' objects>, '__weakref__': <attribute '__weakref__' of 'VideoResampler' objects>, '__doc__': None, 'in_size': <property object at 0x7f930d2a1770>, 'out_size': <property object at 0x7f930d2a1860>, 'max_taps': <property object at 0x7f930d2a1950>, 'n_phases': <property object at 0x7f930d2a1a40>, 'offset': <property object at 0x7f930d2a1b30>, 'phase': <property object at 0x7f930d2a1c20>, 'n_taps': <property object at 0x7f930d2a1d10>, 'taps': <property object at 0x7f930d2a1e00>, '_gst_reserved': <property object at 0x7f930d2a1ef0>, 'clear': gi.FunctionInfo(clear), 'init': gi.FunctionInfo(init)})"
__gtype__ = None # (!) real value is '<GType void (4)>'
__info__ = StructInfo(VideoResampler)
|
[
"[email protected]"
] | |
cec03f25f354aaa3f99e4de8a868b3777d100efc
|
0010a92176b766f4bdf37c1144fa0f724cfaf564
|
/env/lib/python3.5/site-packages/aliyunsdkecs/request/v20140526/CreateImageRequest.py
|
5fcd8e48fafcdcaf41af6211a1c3634952c20daa
|
[] |
no_license
|
pengjinfu/My-Admin
|
bc2d8b53da8be0fad60e1d8979bdca3f2c4560d9
|
26206d1def673adb7dfe5c8044c654a0e65320d1
|
refs/heads/master
| 2021-08-30T02:17:57.432743 | 2017-12-15T17:05:05 | 2017-12-15T17:05:05 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,655 |
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class CreateImageRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'CreateImage')
def get_DiskDeviceMappings(self):
return self.get_query_params().get('DiskDeviceMappings')
def set_DiskDeviceMappings(self,DiskDeviceMappings):
for i in range(len(DiskDeviceMappings)):
if DiskDeviceMappings[i].get('Size') is not None:
self.add_query_param('DiskDeviceMapping.' + bytes(i + 1) + '.Size' , DiskDeviceMappings[i].get('Size'))
if DiskDeviceMappings[i].get('SnapshotId') is not None:
self.add_query_param('DiskDeviceMapping.' + bytes(i + 1) + '.SnapshotId' , DiskDeviceMappings[i].get('SnapshotId'))
if DiskDeviceMappings[i].get('Device') is not None:
self.add_query_param('DiskDeviceMapping.' + bytes(i + 1) + '.Device' , DiskDeviceMappings[i].get('Device'))
if DiskDeviceMappings[i].get('DiskType') is not None:
self.add_query_param('DiskDeviceMapping.' + bytes(i + 1) + '.DiskType' , DiskDeviceMappings[i].get('DiskType'))
def get_Tag4Value(self):
return self.get_query_params().get('Tag.4.Value')
def set_Tag4Value(self,Tag4Value):
self.add_query_param('Tag.4.Value',Tag4Value)
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_SnapshotId(self):
return self.get_query_params().get('SnapshotId')
def set_SnapshotId(self,SnapshotId):
self.add_query_param('SnapshotId',SnapshotId)
def get_Tag2Key(self):
return self.get_query_params().get('Tag.2.Key')
def set_Tag2Key(self,Tag2Key):
self.add_query_param('Tag.2.Key',Tag2Key)
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_Description(self):
return self.get_query_params().get('Description')
def set_Description(self,Description):
self.add_query_param('Description',Description)
def get_Tag3Key(self):
return self.get_query_params().get('Tag.3.Key')
def set_Tag3Key(self,Tag3Key):
self.add_query_param('Tag.3.Key',Tag3Key)
def get_Platform(self):
return self.get_query_params().get('Platform')
def set_Platform(self,Platform):
self.add_query_param('Platform',Platform)
def get_Tag1Value(self):
return self.get_query_params().get('Tag.1.Value')
def set_Tag1Value(self,Tag1Value):
self.add_query_param('Tag.1.Value',Tag1Value)
def get_ImageName(self):
return self.get_query_params().get('ImageName')
def set_ImageName(self,ImageName):
self.add_query_param('ImageName',ImageName)
def get_Tag3Value(self):
return self.get_query_params().get('Tag.3.Value')
def set_Tag3Value(self,Tag3Value):
self.add_query_param('Tag.3.Value',Tag3Value)
def get_Architecture(self):
return self.get_query_params().get('Architecture')
def set_Architecture(self,Architecture):
self.add_query_param('Architecture',Architecture)
def get_Tag5Key(self):
return self.get_query_params().get('Tag.5.Key')
def set_Tag5Key(self,Tag5Key):
self.add_query_param('Tag.5.Key',Tag5Key)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_Tag5Value(self):
return self.get_query_params().get('Tag.5.Value')
def set_Tag5Value(self,Tag5Value):
self.add_query_param('Tag.5.Value',Tag5Value)
def get_Tag1Key(self):
return self.get_query_params().get('Tag.1.Key')
def set_Tag1Key(self,Tag1Key):
self.add_query_param('Tag.1.Key',Tag1Key)
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId)
def get_Tag2Value(self):
return self.get_query_params().get('Tag.2.Value')
def set_Tag2Value(self,Tag2Value):
self.add_query_param('Tag.2.Value',Tag2Value)
def get_ImageVersion(self):
return self.get_query_params().get('ImageVersion')
def set_ImageVersion(self,ImageVersion):
self.add_query_param('ImageVersion',ImageVersion)
def get_Tag4Key(self):
return self.get_query_params().get('Tag.4.Key')
def set_Tag4Key(self,Tag4Key):
self.add_query_param('Tag.4.Key',Tag4Key)
|
[
"[email protected]"
] | |
6c1c35ef28e08ac096358de3535ce5d1f50ca604
|
e57d7785276053332c633b57f6925c90ad660580
|
/sdk/loganalytics/azure-mgmt-loganalytics/azure/mgmt/loganalytics/aio/operations/_shared_keys_operations.py
|
e209ceb60ee78a0cc0c90df3c27836f9fb07693b
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
adriananeci/azure-sdk-for-python
|
0d560308497616a563b6afecbb494a88535da4c5
|
b2bdfe659210998d6d479e73b133b6c51eb2c009
|
refs/heads/main
| 2023-08-18T11:12:21.271042 | 2021-09-10T18:48:44 | 2021-09-10T18:48:44 | 405,684,423 | 1 | 0 |
MIT
| 2021-09-12T15:51:51 | 2021-09-12T15:51:50 | null |
UTF-8
|
Python
| false | false | 7,719 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SharedKeysOperations:
"""SharedKeysOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.loganalytics.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get_shared_keys(
self,
resource_group_name: str,
workspace_name: str,
**kwargs: Any
) -> "_models.SharedKeys":
"""Gets the shared keys for a workspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SharedKeys, or the result of cls(response)
:rtype: ~azure.mgmt.loganalytics.models.SharedKeys
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SharedKeys"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self.get_shared_keys.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=63, min_length=4, pattern=r'^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SharedKeys', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_shared_keys.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/sharedKeys'} # type: ignore
async def regenerate(
self,
resource_group_name: str,
workspace_name: str,
**kwargs: Any
) -> "_models.SharedKeys":
"""Regenerates the shared keys for a Log Analytics Workspace. These keys are used to connect
Microsoft Operational Insights agents to the workspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SharedKeys, or the result of cls(response)
:rtype: ~azure.mgmt.loganalytics.models.SharedKeys
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SharedKeys"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self.regenerate.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=63, min_length=4, pattern=r'^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SharedKeys', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
regenerate.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/regenerateSharedKey'} # type: ignore
|
[
"[email protected]"
] | |
b137998baadcf6c1c7eddef0dd667c340d56e435
|
6a41f12ddb104c4f214fa8bf2864860a8952d17c
|
/books_crawler/books_crawler/settings.py
|
7916845ebd89642dd40df20c0bfb0f0e827a9905
|
[] |
no_license
|
jakiiii/Web-Scraping-Scratch
|
39bb32ea2044e6c4e52ee58ea88794f2a77d75cd
|
46cd54d3a06d70cef070f47b3c15b530691c3187
|
refs/heads/master
| 2020-04-21T00:34:50.736222 | 2019-02-07T06:38:54 | 2019-02-07T06:38:54 | 169,200,752 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,194 |
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for books_crawler project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'books_crawler'
SPIDER_MODULES = ['books_crawler.spiders']
NEWSPIDER_MODULE = 'books_crawler.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'books_crawler (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'books_crawler.middlewares.BooksCrawlerSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'books_crawler.middlewares.BooksCrawlerDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'scrapy.pipelines.images.ImagesPipeline': 1
}
IMAGES_STORE = '/home/jaki/Dev/WebScrapingScratch/images'
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"[email protected]"
] | |
522bd050c87ec2e3215a3c729553e1d611c0549a
|
824b582c2e0236e987a29b233308917fbdfc57a7
|
/sdk/python/pulumi_google_native/orgpolicy/v2/get_folder_policy.py
|
14f2f3278537c86a7f93b5c154315a900a2b904d
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
24601/pulumi-google-native
|
ce8faf8455609a9572a8cbe0638c66427bf0ae7f
|
b219a14201c6c58eaa10caaeacbdaab528931adf
|
refs/heads/master
| 2023-08-23T05:48:31.819709 | 2021-10-08T18:50:44 | 2021-10-08T18:50:44 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,527 |
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetFolderPolicyResult',
'AwaitableGetFolderPolicyResult',
'get_folder_policy',
'get_folder_policy_output',
]
@pulumi.output_type
class GetFolderPolicyResult:
def __init__(__self__, name=None, spec=None):
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if spec and not isinstance(spec, dict):
raise TypeError("Expected argument 'spec' to be a dict")
pulumi.set(__self__, "spec", spec)
@property
@pulumi.getter
def name(self) -> str:
"""
Immutable. The resource name of the Policy. Must be one of the following forms, where constraint_name is the name of the constraint which this Policy configures: * `projects/{project_number}/policies/{constraint_name}` * `folders/{folder_id}/policies/{constraint_name}` * `organizations/{organization_id}/policies/{constraint_name}` For example, "projects/123/policies/compute.disableSerialPortAccess". Note: `projects/{project_id}/policies/{constraint_name}` is also an acceptable name for API requests, but responses will return the name using the equivalent project number.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def spec(self) -> 'outputs.GoogleCloudOrgpolicyV2PolicySpecResponse':
"""
Basic information about the Organization Policy.
"""
return pulumi.get(self, "spec")
class AwaitableGetFolderPolicyResult(GetFolderPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetFolderPolicyResult(
name=self.name,
spec=self.spec)
def get_folder_policy(folder_id: Optional[str] = None,
policy_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetFolderPolicyResult:
"""
Gets a `Policy` on a resource. If no `Policy` is set on the resource, NOT_FOUND is returned. The `etag` value can be used with `UpdatePolicy()` to update a `Policy` during read-modify-write.
"""
__args__ = dict()
__args__['folderId'] = folder_id
__args__['policyId'] = policy_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:orgpolicy/v2:getFolderPolicy', __args__, opts=opts, typ=GetFolderPolicyResult).value
return AwaitableGetFolderPolicyResult(
name=__ret__.name,
spec=__ret__.spec)
@_utilities.lift_output_func(get_folder_policy)
def get_folder_policy_output(folder_id: Optional[pulumi.Input[str]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetFolderPolicyResult]:
"""
Gets a `Policy` on a resource. If no `Policy` is set on the resource, NOT_FOUND is returned. The `etag` value can be used with `UpdatePolicy()` to update a `Policy` during read-modify-write.
"""
...
|
[
"[email protected]"
] | |
893136904401af906e7bdbcf75c63539d98f9364
|
5cb7b9fe09b1dd20c0664d0c86c375ffe353903c
|
/static/js/pypyjs/pypy-nojit.js-0.3.1/lib/modules/test/test_shlex.py
|
ba0f3d1fcae7670d0a08cc51cf4cc0b57557c939
|
[
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] |
permissive
|
shiblon/pytour
|
6d0ee4a679cf7e6ffd8ac6326b8bb0d9071a7c73
|
71a181ec16fd38b0af62f55e28a50e91790733b9
|
refs/heads/master
| 2021-01-17T10:09:18.822575 | 2020-09-23T20:05:58 | 2020-09-23T20:05:58 | 23,226,350 | 2 | 3 |
Apache-2.0
| 2020-02-17T22:36:02 | 2014-08-22T13:33:27 |
Python
|
UTF-8
|
Python
| false | false | 5,315 |
py
|
# -*- coding: utf-8 -*-
import unittest
import shlex
from test import test_support
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# The original test data set was from shellwords, by Hartmut Goebel.
data = r"""x|x|
foo bar|foo|bar|
foo bar|foo|bar|
foo bar |foo|bar|
foo bar bla fasel|foo|bar|bla|fasel|
x y z xxxx|x|y|z|xxxx|
\x bar|\|x|bar|
\ x bar|\|x|bar|
\ bar|\|bar|
foo \x bar|foo|\|x|bar|
foo \ x bar|foo|\|x|bar|
foo \ bar|foo|\|bar|
foo "bar" bla|foo|"bar"|bla|
"foo" "bar" "bla"|"foo"|"bar"|"bla"|
"foo" bar "bla"|"foo"|bar|"bla"|
"foo" bar bla|"foo"|bar|bla|
foo 'bar' bla|foo|'bar'|bla|
'foo' 'bar' 'bla'|'foo'|'bar'|'bla'|
'foo' bar 'bla'|'foo'|bar|'bla'|
'foo' bar bla|'foo'|bar|bla|
blurb foo"bar"bar"fasel" baz|blurb|foo"bar"bar"fasel"|baz|
blurb foo'bar'bar'fasel' baz|blurb|foo'bar'bar'fasel'|baz|
""|""|
''|''|
foo "" bar|foo|""|bar|
foo '' bar|foo|''|bar|
foo "" "" "" bar|foo|""|""|""|bar|
foo '' '' '' bar|foo|''|''|''|bar|
\""|\|""|
"\"|"\"|
"foo\ bar"|"foo\ bar"|
"foo\\ bar"|"foo\\ bar"|
"foo\\ bar\"|"foo\\ bar\"|
"foo\\" bar\""|"foo\\"|bar|\|""|
"foo\\ bar\" dfadf"|"foo\\ bar\"|dfadf"|
"foo\\\ bar\" dfadf"|"foo\\\ bar\"|dfadf"|
"foo\\\x bar\" dfadf"|"foo\\\x bar\"|dfadf"|
"foo\x bar\" dfadf"|"foo\x bar\"|dfadf"|
\''|\|''|
'foo\ bar'|'foo\ bar'|
'foo\\ bar'|'foo\\ bar'|
"foo\\\x bar\" df'a\ 'df'|"foo\\\x bar\"|df'a|\|'df'|
\"foo"|\|"foo"|
\"foo"\x|\|"foo"|\|x|
"foo\x"|"foo\x"|
"foo\ "|"foo\ "|
foo\ xx|foo|\|xx|
foo\ x\x|foo|\|x|\|x|
foo\ x\x\""|foo|\|x|\|x|\|""|
"foo\ x\x"|"foo\ x\x"|
"foo\ x\x\\"|"foo\ x\x\\"|
"foo\ x\x\\""foobar"|"foo\ x\x\\"|"foobar"|
"foo\ x\x\\"\''"foobar"|"foo\ x\x\\"|\|''|"foobar"|
"foo\ x\x\\"\'"fo'obar"|"foo\ x\x\\"|\|'"fo'|obar"|
"foo\ x\x\\"\'"fo'obar" 'don'\''t'|"foo\ x\x\\"|\|'"fo'|obar"|'don'|\|''|t'|
'foo\ bar'|'foo\ bar'|
'foo\\ bar'|'foo\\ bar'|
foo\ bar|foo|\|bar|
foo#bar\nbaz|foobaz|
:-) ;-)|:|-|)|;|-|)|
áéíóú|á|é|í|ó|ú|
"""
posix_data = r"""x|x|
foo bar|foo|bar|
foo bar|foo|bar|
foo bar |foo|bar|
foo bar bla fasel|foo|bar|bla|fasel|
x y z xxxx|x|y|z|xxxx|
\x bar|x|bar|
\ x bar| x|bar|
\ bar| bar|
foo \x bar|foo|x|bar|
foo \ x bar|foo| x|bar|
foo \ bar|foo| bar|
foo "bar" bla|foo|bar|bla|
"foo" "bar" "bla"|foo|bar|bla|
"foo" bar "bla"|foo|bar|bla|
"foo" bar bla|foo|bar|bla|
foo 'bar' bla|foo|bar|bla|
'foo' 'bar' 'bla'|foo|bar|bla|
'foo' bar 'bla'|foo|bar|bla|
'foo' bar bla|foo|bar|bla|
blurb foo"bar"bar"fasel" baz|blurb|foobarbarfasel|baz|
blurb foo'bar'bar'fasel' baz|blurb|foobarbarfasel|baz|
""||
''||
foo "" bar|foo||bar|
foo '' bar|foo||bar|
foo "" "" "" bar|foo||||bar|
foo '' '' '' bar|foo||||bar|
\"|"|
"\""|"|
"foo\ bar"|foo\ bar|
"foo\\ bar"|foo\ bar|
"foo\\ bar\""|foo\ bar"|
"foo\\" bar\"|foo\|bar"|
"foo\\ bar\" dfadf"|foo\ bar" dfadf|
"foo\\\ bar\" dfadf"|foo\\ bar" dfadf|
"foo\\\x bar\" dfadf"|foo\\x bar" dfadf|
"foo\x bar\" dfadf"|foo\x bar" dfadf|
\'|'|
'foo\ bar'|foo\ bar|
'foo\\ bar'|foo\\ bar|
"foo\\\x bar\" df'a\ 'df"|foo\\x bar" df'a\ 'df|
\"foo|"foo|
\"foo\x|"foox|
"foo\x"|foo\x|
"foo\ "|foo\ |
foo\ xx|foo xx|
foo\ x\x|foo xx|
foo\ x\x\"|foo xx"|
"foo\ x\x"|foo\ x\x|
"foo\ x\x\\"|foo\ x\x\|
"foo\ x\x\\""foobar"|foo\ x\x\foobar|
"foo\ x\x\\"\'"foobar"|foo\ x\x\'foobar|
"foo\ x\x\\"\'"fo'obar"|foo\ x\x\'fo'obar|
"foo\ x\x\\"\'"fo'obar" 'don'\''t'|foo\ x\x\'fo'obar|don't|
"foo\ x\x\\"\'"fo'obar" 'don'\''t' \\|foo\ x\x\'fo'obar|don't|\|
'foo\ bar'|foo\ bar|
'foo\\ bar'|foo\\ bar|
foo\ bar|foo bar|
foo#bar\nbaz|foo|baz|
:-) ;-)|:-)|;-)|
áéíóú|áéíóú|
"""
class ShlexTest(unittest.TestCase):
def setUp(self):
self.data = [x.split("|")[:-1]
for x in data.splitlines()]
self.posix_data = [x.split("|")[:-1]
for x in posix_data.splitlines()]
for item in self.data:
item[0] = item[0].replace(r"\n", "\n")
for item in self.posix_data:
item[0] = item[0].replace(r"\n", "\n")
def splitTest(self, data, comments):
for i in range(len(data)):
l = shlex.split(data[i][0], comments=comments)
self.assertEqual(l, data[i][1:],
"%s: %s != %s" %
(data[i][0], l, data[i][1:]))
def oldSplit(self, s):
ret = []
lex = shlex.shlex(StringIO(s))
tok = lex.get_token()
while tok:
ret.append(tok)
tok = lex.get_token()
return ret
def testSplitPosix(self):
"""Test data splitting with posix parser"""
self.splitTest(self.posix_data, comments=True)
def testCompat(self):
"""Test compatibility interface"""
for i in range(len(self.data)):
l = self.oldSplit(self.data[i][0])
self.assertEqual(l, self.data[i][1:],
"%s: %s != %s" %
(self.data[i][0], l, self.data[i][1:]))
# Allow this test to be used with old shlex.py
if not getattr(shlex, "split", None):
for methname in dir(ShlexTest):
if methname.startswith("test") and methname != "testCompat":
delattr(ShlexTest, methname)
def test_main():
test_support.run_unittest(ShlexTest)
if __name__ == "__main__":
test_main()
|
[
"[email protected]"
] | |
63f6862c5fa020fc79e11cdb16aee06ddb1ff1a0
|
d5d35d20ec811cbaa792e681d559361cd7f38159
|
/challenge/DidacticVampireText.py
|
70cdffe7dc20b01345fe0e2f5d252051a8275136
|
[] |
no_license
|
markieboy/hacker.org
|
afe43f0b4213ec135f8b095bcc7b1a7a755581d8
|
da1689bdcc2fe91a81a30385680fd367f2d6e9cf
|
refs/heads/master
| 2021-06-21T12:07:21.503999 | 2017-08-11T08:38:05 | 2017-08-11T08:38:05 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 521 |
py
|
#!/usr/bin/env python3
# Q: http://www.hacker.org/challenge/chal.php?id=139
# A: http://www.hacker.org/challenge/chal.php?answer=sunshine&id=139&go=Submit
import re
import urllib.request
import hacker_org_util
PROBLEM_ID = '139'
def main():
source = urllib.request.urlopen(hacker_org_util.build_challenge_url(PROBLEM_ID)).read().decode()
m = re.search('<p>(.*)<p>', source, flags=re.DOTALL)
text = m.group(1)
print(''.join(re.findall(r'[A-Z]', text)))
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
86bf68a3fdb54d1cb09fca3faa9ef12d0f6fa966
|
ee53b0262007b2f0db0fe15b2ad85f65fafa4e25
|
/Leetcode/849. Maximize Distance to Closest Person.py
|
f36d9de210337b439e5c96e96c00caecda775ca7
|
[] |
no_license
|
xiaohuanlin/Algorithms
|
bd48caacb08295fc5756acdac609be78e143a760
|
157cbaeeff74130e5105e58a6b4cdf66403a8a6f
|
refs/heads/master
| 2023-08-09T05:18:06.221485 | 2023-08-08T11:53:15 | 2023-08-08T11:53:15 | 131,491,056 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,279 |
py
|
'''
In a row of seats, 1 represents a person sitting in that seat, and 0 represents that the seat is empty.
There is at least one empty seat, and at least one person sitting.
Alex wants to sit in the seat such that the distance between him and the closest person to him is maximized.
Return that maximum distance to closest person.
Example 1:
Input: [1,0,0,0,1,0,1]
Output: 2
Explanation:
If Alex sits in the second open seat (seats[2]), then the closest person has distance 2.
If Alex sits in any other open seat, the closest person has distance 1.
Thus, the maximum distance to the closest person is 2.
Example 2:
Input: [1,0,0,0]
Output: 3
Explanation:
If Alex sits in the last seat, the closest person is 3 seats away.
This is the maximum distance possible, so the answer is 3.
Note:
1 <= seats.length <= 20000
seats contains only 0s or 1s, at least one 0, and at least one 1.
'''
import unittest
class Solution:
def maxDistToClosest(self, seats):
"""
:type seats: List[int]
:rtype: int
"""
i = 0
start = -1
max_dis = 0
while i < len(seats):
if seats[i] == 0:
if i == len(seats) - 1:
# print(i, start)
dis = i - start
else:
i += 1
continue
else:
if start == -1:
# print(i, start)
dis = i
start = i
else:
dis = (i - start) // 2
# print(mid, dis)
start = i
# print(dis, max_dis)
if dis > max_dis:
max_dis = dis
i += 1
return max_dis
class TestSolution(unittest.TestCase):
def test_case(self):
examples = (
([1,0,0,0,1,0,1], 2),
([1,0,0,0], 3),
([0,0,0,1], 3),
([0,1,0,0,0,0], 4),
)
for first, second in examples:
self.assert_function(first, second)
def assert_function(self, first, second):
self.assertEqual(Solution().maxDistToClosest(first), second,
msg="first: {}; second: {}".format(first, second))
unittest.main()
|
[
"[email protected]"
] | |
735d952b9b73db8a38c1a772c6a5c61bceced913
|
e1dd6d9dccb822d472b7f4f9e8446dd9202eb5a1
|
/sdk/test/test_io_k8s_api_rbac_v1alpha1_cluster_role_list.py
|
f7e76f339f1516a69e8e00215b9d2dd97d478213
|
[] |
no_license
|
swiftdiaries/argo_client
|
8af73e8df6a28f9ea5f938b5894ab8b7825e4cc2
|
b93758a22d890cb33cbd81934042cfc3c12169c7
|
refs/heads/master
| 2020-05-17T12:11:57.556216 | 2019-07-24T23:23:33 | 2019-07-24T23:23:33 | 183,701,327 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,030 |
py
|
# coding: utf-8
"""
Argo API Client
Generated python client for the Argo Workflows # noqa: E501
OpenAPI spec version: v1.14.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import argo.sdk
from models.io_k8s_api_rbac_v1alpha1_cluster_role_list import IoK8sApiRbacV1alpha1ClusterRoleList # noqa: E501
from argo.sdk.rest import ApiException
class TestIoK8sApiRbacV1alpha1ClusterRoleList(unittest.TestCase):
"""IoK8sApiRbacV1alpha1ClusterRoleList unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testIoK8sApiRbacV1alpha1ClusterRoleList(self):
"""Test IoK8sApiRbacV1alpha1ClusterRoleList"""
# FIXME: construct object with mandatory attributes with example values
# model = argo.sdk.models.io_k8s_api_rbac_v1alpha1_cluster_role_list.IoK8sApiRbacV1alpha1ClusterRoleList() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
8651769e811843c8771b34777e0cd3f9f73886cd
|
20674c17d815214bf66b75be686bb8a45c0f5914
|
/version1/382_Linked_List_Random_Note.py
|
b3eb2bb00277f1ab4588a3185e4daf65f981fec9
|
[] |
no_license
|
moontree/leetcode
|
e7b670969fe20785b15aae82996875fd66de1b08
|
f2bf9b13508cd01c8f383789569e55a438f77202
|
refs/heads/master
| 2021-05-20T20:36:45.615420 | 2020-04-02T09:15:26 | 2020-04-02T09:15:26 | 252,408,563 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,799 |
py
|
"""
Given a singly linked list, return a random node's value from the linked list.
Each node must have the same probability of being chosen.
Follow up:
What if the linked list is extremely large and its length is unknown to you?
Could you solve this efficiently without using extra space?
Example:
// Init a singly linked list [1,2,3].
ListNode head = new ListNode(1);
head.next = new ListNode(2);
head.next.next = new ListNode(3);
Solution solution = new Solution(head);
// getRandom() should return either 1, 2, or 3 randomly. Each element should have equal probability of returning.
solution.getRandom();
"""
from list_helper import *
import random
import collections
class Solution(object):
def __init__(self, head):
"""
@param head The linked list's head.
Note that the head is guaranteed to be not null, so it contains at least one node.
:type head: ListNode
"""
cur = head
self.nums = []
while cur:
self.nums.append(cur.val)
cur = cur.next
def getRandom(self):
"""
Returns a random node's value.
:rtype: int
"""
step = random.randint(0, len(self.nums) - 1)
return self.nums[step]
def _get_random_of_stream(self):
"""
Returns a random node's value.
:rtype: int
"""
h = self._head
if h is None:
return None
count = 0
res = h.val
while h:
rv = random.randint(0, count)
if rv == 0:
res = h.val
h = h.next
count += 1
return res
head = ListNode(1);
head.next = ListNode(2);
head.next.next = ListNode(3);
solution = Solution(head);
for i in xrange(5):
print solution.getRandom()
|
[
"[email protected]"
] | |
746f538f4f59613057ed9e33923e1a08e11e714b
|
1524720d6480ad0a51b6fd8ff709587455bf4c5d
|
/tums/trunk/lite/nevow/scripts/consolejstest.py
|
0a952bcc2cf4f3a02ccf1aa2154442b69701dc9e
|
[] |
no_license
|
calston/tums
|
2bd6d3cac5232d2ccb7e9becfc649e302a310eab
|
b93e3e957ff1da5b020075574942913c8822d12a
|
refs/heads/master
| 2020-07-12T03:46:43.639800 | 2018-05-12T10:54:54 | 2018-05-12T10:54:54 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,135 |
py
|
# Copyright (c) 2006 Divmod.
# See LICENSE for details.
"""
Out-of-browser conversion of javascript test modules that use Athena's "//
import" syntax into monolithic scripts suitable for feeding into a plain
javascript interpreter
"""
from sys import argv
from twisted.python.util import sibpath
import nevow, subprocess
_DUMMY_MODULE_NAME = 'ConsoleJSTest'
def getDependencies(fname, ignore=('Divmod.Runtime', 'MochiKit.DOM'),
bootstrap=nevow.athena.LivePage.BOOTSTRAP_MODULES,
packages=None):
"""
Get the javascript modules that the code in the file with name C{fname}
depends on, recursively
@param fname: javascript source file name
@type fname: C{str}
@param ignore: names of javascript modules to exclude from dependency list
@type ignore: sequence
@param boostrap: names of javascript modules to always include, regardless
of explicit dependencies (defaults to L{nevow.athena.LivePage}'s list of
bootstrap modules)
@type boostrap: sequence
@param packages: all javascript packages we know about. defaults to the
result of L{nevow.athena.allJavascriptPackages}
@type packages: C{dict}
@return: modules included by javascript in file named C{fname}
@rtype: dependency-ordered list of L{nevow.athena.JSModule} instances
"""
if packages is None:
packages = nevow.athena.allJavascriptPackages()
packages[_DUMMY_MODULE_NAME] = fname
# TODO if a module is ignored, we should ignore its dependencies
return ([nevow.athena.JSModule.getOrCreate(m, packages)
for m in bootstrap if m not in ignore] +
[dep for dep in nevow.athena.JSModule(
_DUMMY_MODULE_NAME, packages).allDependencies()
if dep.name not in bootstrap
and dep.name != _DUMMY_MODULE_NAME
and dep.name not in ignore])
def generateTestScript(fname, after={'Divmod.Base': ('Divmod.Base.addLoadEvent = function() {};',)},
dependencies=None):
"""
Turn the contents of the Athena-style javascript test module in the file
named C{fname} into a plain javascript script. Recursively includes any
modules that are depended on, as well as the utility module
nevow/test/testsupport.js.
@param fname: javascript source file name
@type fname: C{str}
@param after: mapping of javascript module names to sequences of lines of
javascript source that should be injected into the output immediately
after the source of the named module is included
@type after: C{dict}
@param dependencies: the modules the script depends on. Defaults to the
result of L{getDependencies}
@type dependencies: dependency-ordered list of L{nevow.athena.JSModule}
instances
@return: converted javascript source text
@rtype: C{str}
"""
if dependencies is None:
dependencies= getDependencies(fname)
load = lambda fname: 'load(%r);' % (fname,)
initialized = set()
js = [load(sibpath(nevow.__file__, 'test/testsupport.js'))]
for m in dependencies:
segments = m.name.split('.')
if segments[-1] == '__init__':
segments = segments[:-1]
initname = '.'.join(segments)
if initname not in initialized:
initialized.add(initname)
if '.' in initname:
prefix = ''
else:
prefix = 'var '
js.append('%s%s = {};' % (prefix, initname))
js.append(load(m.mapping[m.name]))
if m.name in after:
js.extend(after[m.name])
js.append(file(fname).read())
return '\n'.join(js)
def run():
"""
Read a single filename from the command line arguments, replace any module
imports with the body of the module in question and pipe the result to the
spidermonkey javascript interpreter
"""
# TODO: support more than one filename at a time
js = generateTestScript(argv[1])
subprocess.Popen('/usr/bin/smjs', stdin=subprocess.PIPE).communicate(js)
|
[
"[email protected]"
] | |
583466431748d71c10e4768b2295e9e980422200
|
10d77a1bca1358738179185081906956faf3963a
|
/venv/Lib/site-packages/django/core/mail/backends/filebased.py
|
f01e1497dbcc6dc15a7cf45416368b8606f613a2
|
[] |
no_license
|
ekansh18/WE_Care_NGO_WEBSITE
|
3eb6b12ae798da26aec75d409b0b92f7accd6c55
|
7c1eaa78d966d13893c38e7157744fbf8f377e71
|
refs/heads/master
| 2023-07-16T07:22:48.920429 | 2021-08-31T04:11:19 | 2021-08-31T04:11:19 | 401,563,669 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,727 |
py
|
"""Email backend that writes messages to a file."""
import datetime
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.mail.backends.console import (
EmailBackend """Email backend that writes messages to a file."""
import datetime
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.mail.backends.console import (
EmailBackend as ConsoleEmailBackend,
)
class EmailBackend(ConsoleEmailBackend):
def __init__(self, *args, file_path=None, **kwargs):
self._fname = None
if file_path is not None:
self.file_path = file_path
else:
self.file_path = getattr(settings, 'EMAIL_FILE_PATH', None)
# Make sure self.file_path is a string.
if not isinstance(self.file_path, str):
raise ImproperlyConfigured('Path for saving emails is invalid: %r' % self.file_path)
self.file_path = os.path.abspath(self.file_path)
try:
os.makedirs(self.file_path, exist_ok=True)
except FileExistsError:
raise ImproperlyConfigured(
'Path for saving email messages exists, but is not a directory: %s' % self.file_path
)
except OSError as err:
raise ImproperlyConfigured(
'Could not create directory for saving email messages: %s (%s)' % (self.file_path, err)
)
# Make sure that self.file_path is writable.
if not os.access(self.file_path, os.W_OK):
raise ImproperlyConfigured('Could not write to directory: %s' % self.file_path)
# Finally, call super().
# Since we're using the console-based backend as a base,
# force the stream to be None, so we don't default to stdout
kwargs['stream'] = None
super().__init__(*args, **kwargs)
def write_message(self, message):
self.stream.write(message.message().as_bytes() + b'\n')
self.stream.write(b'-' * 79)
self.stream.write(b'\n')
def _get_filename(self):
"""Return a unique file name."""
if self._fname is None:
timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
fname = "%s-%s.log" % (timestamp, abs(id(self)))
self._fname = os.path.join(self.file_path, fname)
return self._fname
def open(self):
if self.stream is None:
self.stream = open(self._get_filename(), 'ab')
return True
return False
def close(self):
try:
if self.stream is not None:
self.stream.close()
finally:
self.stream = None
|
[
"[email protected]"
] | |
2466113f0da79b2244862448de9eb3746c0d33d1
|
8fd255fc3498ec970d7202d3f70a671b7aa4c64b
|
/pmsal/blog/views.py
|
d5b704545317b790379154f070dad0ca73a0eb84
|
[
"MIT"
] |
permissive
|
klebercode/pmsal
|
afda05fe29bb67db70fc7dcb8dfc577f4a3f0c9c
|
d78477f7cd1a5d1ed9973e13be5758c71a2ce2db
|
refs/heads/master
| 2016-09-06T05:42:34.468341 | 2015-04-04T17:24:07 | 2015-04-04T17:24:07 | 33,409,667 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,212 |
py
|
# coding: utf-8
from django.db.models import Q
from django.views import generic
from django.views.generic.dates import (YearArchiveView, MonthArchiveView,
DayArchiveView)
from pmsal.context_processors import EnterpriseExtraContext
from pmsal.blog.models import Entry
from pmsal.core.models import Category
class EntryYearArchiveView(YearArchiveView):
queryset = Entry.published.all()
date_field = 'created'
make_object_list = True
allow_future = True
# TODO: mudar a paginacao
paginate_by = 10
class EntryMonthArchiveView(MonthArchiveView):
queryset = Entry.published.all()
date_field = 'created'
make_object_list = True
allow_future = True
class EntryDayArchiveView(DayArchiveView):
queryset = Entry.published.all()
date_field = 'created'
make_object_list = True
allow_future = True
class EntryListView(EnterpriseExtraContext, generic.ListView):
# model = Entry
queryset = Entry.published.all()
template_name = 'blog/entry_home.html'
# TODO: mudar a paginacao
paginate_by = 6
def get_queryset(self, **kwargs):
search = self.request.GET.get('search', '')
if search:
obj_lst = Entry.published.filter(Q(title__icontains=search) |
Q(created__icontains=search) |
Q(body__icontains=search))
else:
obj_lst = Entry.published.all()
return obj_lst
def get_context_data(self, **kwargs):
context = super(EntryListView, self).get_context_data(**kwargs)
search = self.request.GET.get('search', '')
context['search'] = search
context['tag_list'] = Entry.tags.most_common()
# TODO: mudar a forma de carregamento das categorias
context['category_list'] = Category.objects.filter(area=3
).order_by('?')[:10]
return context
class EntryDateDetailView(EnterpriseExtraContext, generic.DateDetailView):
# model = Entry
queryset = Entry.published.all()
date_field = 'created'
make_object_list = True
allow_future = True
def get_context_data(self, **kwargs):
context = super(EntryDateDetailView, self).get_context_data(**kwargs)
context['tag_list'] = Entry.tags.most_common()
# TODO: mudar a forma de carregamento das categorias
context['category_list'] = Category.objects.all().order_by('?')[:10]
return context
class EntryTagListView(EntryListView):
"""
Herda de EntryListView mudando o filtro para tag selecionada
"""
def get_queryset(self):
"""
Incluir apenas as Entries marcadas com a tag selecionada
"""
return Entry.published.filter(tags__slug=self.kwargs['tag_slug'])
class EntryCategoryListView(EntryListView):
"""
Herda de EntryListView mudando o filtro para categoria selecionada
"""
def get_queryset(self, **kwargs):
"""
Inclui apenas as Entries marcadas com a categoria selecionada
"""
return Entry.published.filter(categories__slug=self.kwargs['cat_slug'])
|
[
"[email protected]"
] | |
1f282037ba707bdcb0c2fbd47ed08bb8e0e60104
|
5aa14c620a383d8429c144e5af46b0322c674439
|
/tests/python/Lut1DTransformTest.py
|
0b6073a7cd51dad23173cb33a42118d333820dbb
|
[
"BSD-3-Clause",
"CC-BY-4.0"
] |
permissive
|
asdlei99/OpenColorIO
|
ae421f6c14870ffe735c73107b76f6746bd563ee
|
9b23e9623792d8cc6e6c1dfd5394335ee148bcf3
|
refs/heads/master
| 2023-03-13T16:14:19.693576 | 2021-03-03T03:11:10 | 2021-03-03T03:11:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,698 |
py
|
# SPDX-License-Identifier: BSD-3-Clause
# Copyright Contributors to the OpenColorIO Project.
import logging
import unittest
logger = logging.getLogger(__name__)
try:
import numpy as np
except ImportError:
logger.warning(
"NumPy could not be imported. "
"Test case will lack significant coverage!"
)
np = None
import PyOpenColorIO as OCIO
class Lut1DTransformTest(unittest.TestCase):
def test_default_constructor(self):
"""
Test the default constructor.
"""
lut = OCIO.Lut1DTransform()
self.assertEqual(lut.getLength(), 2)
self.assertEqual(lut.getDirection(), OCIO.TRANSFORM_DIR_FORWARD)
self.assertEqual(lut.getHueAdjust(), OCIO.HUE_NONE)
self.assertFalse(lut.getInputHalfDomain())
self.assertFalse(lut.getOutputRawHalfs())
self.assertEqual(lut.getInterpolation(), OCIO.INTERP_DEFAULT)
self.assertEqual(lut.getFileOutputBitDepth(), OCIO.BIT_DEPTH_UNKNOWN)
r, g, b = lut.getValue(0)
self.assertEqual([r, g, b], [0, 0, 0])
r, g, b = lut.getValue(1)
self.assertEqual([r, g, b], [1, 1, 1])
def test_direction(self):
"""
Test the setDirection() and getDirection() methods.
"""
lut = OCIO.Lut1DTransform()
for direction in OCIO.TransformDirection.__members__.values():
lut.setDirection(direction)
self.assertEqual(lut.getDirection(), direction)
# Wrong type tests.
for invalid in (None, 1, 'test'):
with self.assertRaises(TypeError):
lut.setDirection(invalid)
def test_format_metadata(self):
"""
Test the getFormatMetadata() method.
"""
lut = OCIO.Lut1DTransform()
format_metadata = lut.getFormatMetadata()
self.assertIsInstance(format_metadata, OCIO.FormatMetadata)
self.assertEqual(format_metadata.getElementName(), 'ROOT')
self.assertEqual(format_metadata.getName(), '')
self.assertEqual(format_metadata.getID(), '')
format_metadata.setName('name')
format_metadata.setID('id')
self.assertEqual(format_metadata.getName(), 'name')
self.assertEqual(format_metadata.getID(), 'id')
def test_file_output_bit_depth(self):
"""
Test get/setFileOutputBitDepth.
"""
lut = OCIO.Lut1DTransform()
self.assertEqual(lut.getFileOutputBitDepth(), OCIO.BIT_DEPTH_UNKNOWN)
lut.setFileOutputBitDepth(OCIO.BIT_DEPTH_UINT10)
self.assertEqual(lut.getFileOutputBitDepth(), OCIO.BIT_DEPTH_UINT10)
def test_hue_adjust(self):
"""
Test get/setHueAdjust.
"""
lut = OCIO.Lut1DTransform()
self.assertEqual(lut.getHueAdjust(), OCIO.HUE_NONE)
lut.setHueAdjust(OCIO.HUE_DW3)
self.assertEqual(lut.getHueAdjust(), OCIO.HUE_DW3)
with self.assertRaises(OCIO.Exception):
lut.setHueAdjust(OCIO.HUE_WYPN)
def test_input_half_domain(self):
"""
Test get/getInputHalfDomain.
"""
lut = OCIO.Lut1DTransform()
self.assertFalse(lut.getInputHalfDomain())
lut.setInputHalfDomain(True)
self.assertTrue(lut.getInputHalfDomain())
def test_output_raw_halfs(self):
"""
Test get/setOutputRawHalfs.
"""
lut = OCIO.Lut1DTransform()
self.assertFalse(lut.getOutputRawHalfs())
lut.setOutputRawHalfs(True)
self.assertTrue(lut.getOutputRawHalfs())
def test_length(self):
"""
Test get/setLength.
"""
lut = OCIO.Lut1DTransform()
self.assertEqual(lut.getLength(), 2)
lut.setValue(0, 0.1, 0.2, 0.3)
lut.setLength(3)
self.assertEqual(lut.getLength(), 3)
# Changing the length reset LUT values to identity.
r, g, b = lut.getValue(0)
self.assertEqual([r, g, b], [0, 0, 0])
def test_constructor_with_keywords(self):
"""
Test Lut1DTransform constructor with keywords and validate its values.
"""
lut = OCIO.Lut1DTransform(
length=65536,
inputHalfDomain=True,
outputRawHalfs=True,
fileOutputBitDepth=OCIO.BIT_DEPTH_UINT10,
hueAdjust=OCIO.HUE_DW3,
interpolation=OCIO.INTERP_BEST,
direction=OCIO.TRANSFORM_DIR_INVERSE)
self.assertEqual(lut.getLength(), 65536)
self.assertEqual(lut.getDirection(), OCIO.TRANSFORM_DIR_INVERSE)
self.assertEqual(lut.getHueAdjust(), OCIO.HUE_DW3)
self.assertTrue(lut.getInputHalfDomain())
self.assertTrue(lut.getOutputRawHalfs())
self.assertEqual(lut.getInterpolation(), OCIO.INTERP_BEST)
self.assertEqual(lut.getFileOutputBitDepth(), OCIO.BIT_DEPTH_UINT10)
lut = OCIO.Lut1DTransform(
length=4,
direction=OCIO.TRANSFORM_DIR_INVERSE)
self.assertEqual(lut.getLength(), 4)
self.assertEqual(lut.getDirection(), OCIO.TRANSFORM_DIR_INVERSE)
self.assertEqual(lut.getHueAdjust(), OCIO.HUE_NONE)
self.assertFalse(lut.getInputHalfDomain())
self.assertFalse(lut.getOutputRawHalfs())
self.assertEqual(lut.getInterpolation(), OCIO.INTERP_DEFAULT)
self.assertEqual(lut.getFileOutputBitDepth(), OCIO.BIT_DEPTH_UNKNOWN)
def test_constructor_with_positional(self):
"""
Test Lut1DTransform constructor without keywords and validate its values.
"""
lut = OCIO.Lut1DTransform(65536, True, True, OCIO.BIT_DEPTH_UINT10,
OCIO.HUE_DW3, OCIO.INTERP_BEST,
OCIO.TRANSFORM_DIR_INVERSE)
self.assertEqual(lut.getLength(), 65536)
self.assertEqual(lut.getDirection(), OCIO.TRANSFORM_DIR_INVERSE)
self.assertEqual(lut.getHueAdjust(), OCIO.HUE_DW3)
self.assertTrue(lut.getInputHalfDomain())
self.assertTrue(lut.getOutputRawHalfs())
self.assertEqual(lut.getInterpolation(), OCIO.INTERP_BEST)
self.assertEqual(lut.getFileOutputBitDepth(), OCIO.BIT_DEPTH_UINT10)
def test_array(self):
"""
Get & set Lut array values.
"""
lut = OCIO.Lut1DTransform(length=3)
r, g, b = lut.getValue(0)
self.assertEqual([r, g, b], [0, 0, 0])
r, g, b = lut.getValue(1)
self.assertEqual([r, g, b], [0.5, 0.5, 0.5])
r, g, b = lut.getValue(2)
self.assertEqual([r, g, b], [1, 1, 1])
lut.setValue(0, 0.1, 0.2, 0.3)
r, g, b = lut.getValue(0)
# Values are stored as float.
self.assertAlmostEqual(r, 0.1, delta=1e-6)
self.assertAlmostEqual(g, 0.2, delta=1e-6)
self.assertAlmostEqual(b, 0.3, delta=1e-6)
if not np:
logger.warning("NumPy not found. Skipping part of test!")
return
data = lut.getData()
expected = np.array([0.1, 0.2, 0.3,
0.5, 0.5, 0.5,
1., 1., 1.]).astype(np.float32)
self.assertEqual(data.all(), expected.all())
data[6] = 0.9
data[7] = 1.1
data[8] = 1.2
lut.setData(data)
r, g, b = lut.getValue(2)
self.assertAlmostEqual(r, 0.9, delta=1e-6)
self.assertAlmostEqual(g, 1.1, delta=1e-6)
self.assertAlmostEqual(b, 1.2, delta=1e-6)
def test_equals(self):
"""
Test equals.
"""
lut = OCIO.Lut1DTransform()
lut2 = OCIO.Lut1DTransform()
self.assertTrue(lut.equals(lut2))
lut.setValue(0, 0.1, 0.2, 0.3)
self.assertFalse(lut.equals(lut2))
|
[
"[email protected]"
] | |
b77167d258ce02e04bdda1ea6a83707259bbe0f7
|
80e701c5b9c03ef288848d8b368360e0940d9b67
|
/sleyeball/files.py
|
51db5f0a7dda0c4166dceea14a6d3bc400d4b819
|
[] |
no_license
|
esheldon/sleyeball
|
a4917300b041747e0600186f0e596c6d83a95ff4
|
9eee500119d2bc07c942350a67c8777257e92a3d
|
refs/heads/master
| 2020-06-20T05:22:59.921610 | 2019-07-17T18:38:29 | 2019-07-17T18:38:29 | 197,008,586 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,756 |
py
|
import os
def get_base_dir():
"""
base directory
"""
return os.environ['SLDIR']
def get_cand_dir():
"""
we keep lists here
"""
return os.path.join(get_base_dir(), 'candidates')
def get_cand_file_orig():
"""
holds paths to coadds
"""
d = get_cand_dir()
return os.path.join(d, 'z4ErinSheldon.fits')
def get_cand_file():
"""
holds paths to coadds
"""
d = get_cand_dir()
return os.path.join(d, 'z4ErinSheldon-clean.fits')
def get_badreg_dir():
"""
we keep lists here
"""
return os.path.join(get_base_dir(), 'badregions')
def get_badreg_file():
"""
holds paths to coadds
"""
d = get_badreg_dir()
return os.path.join(d, 'y3a2_foreground_mask_v2.1.fits.gz')
def get_stamp_dir(tilename):
"""
location for the image and temp files
"""
return os.path.join(
get_base_dir(),
'stamps',
tilename,
)
def get_temp_dir():
"""
location for the image and temp files
"""
return os.environ['TMPDIR']
def get_stamp_file(tilename, number):
"""
location of a output file
"""
odir = get_stamp_dir(tilename)
fname = '%s-%06d.jpg' % (tilename, number)
return os.path.join(odir, fname)
#
# batch processing
#
def get_script_dir():
"""
location for scripts
"""
return os.path.join(get_base_dir(), 'scripts')
def get_script_file(tilename):
"""
location for scripts
"""
d = get_script_dir()
fname = '%s.sh' % tilename
return os.path.join(d, fname)
def get_wq_file(tilename, missing=False):
"""
location for scripts
"""
fname = '%s.yaml' % tilename
d = get_script_dir()
return os.path.join(d, fname)
|
[
"[email protected]"
] | |
b636530dff51028fbe0a7fbb20a468126863860f
|
3b9d763180410bf0abf5b9c37391a64319efe839
|
/toontown/building/DistributedHQInteriorAI.py
|
d7c12fec12fec9b0432c0e998ae1a9b352eb808e
|
[] |
no_license
|
qphoton/Reverse_Engineering_Project_ToonTown
|
442f15d484324be749f6f0e5e4e74fc6436e4e30
|
11468ab449060169191366bc14ff8113ee3beffb
|
refs/heads/master
| 2021-05-08T00:07:09.720166 | 2017-10-21T02:37:22 | 2017-10-21T02:37:22 | 107,617,661 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 938 |
py
|
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
import cPickle
class DistributedHQInteriorAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory("DistributedHQInteriorAI")
def __init__(self, air):
DistributedObjectAI.__init__(self, air)
self.isTutorial = False
self.zoneId = 0
self.block = 0
self.leaderData = cPickle.dumps(([], [], []))
def setZoneIdAndBlock(self, zoneId, block):
self.zoneId = zoneId
self.block = block
def setLeaderBoard(self, leaderData):
self.leaderData = leaderData
def setTutorial(self, isTutorial):
self.isTutorial = False
def getZoneIdAndBlock(self):
return (self.zoneId, self.block)
def getLeaderBoard(self):
return self.leaderData
def getTutorial(self):
return self.isTutorial
|
[
"[email protected]"
] | |
07f7480b8204fdcc16a56564512124c02be477e2
|
f3050b7f84e584dcde54ca1690944bfccc6f5d9c
|
/demo/other/demo_fomat.py
|
767ea789d7dee62f3161cff0034d57438ab22024
|
[] |
no_license
|
azhenglianxi/api_Project
|
0c8444c2bad7464fd57911be4fdcd131a63c46b2
|
2ae87b87e41f522d4ef20f63bad6adcaec1f9874
|
refs/heads/master
| 2020-09-14T12:08:07.080748 | 2019-12-12T09:08:22 | 2019-12-12T09:08:22 | 223,124,370 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 801 |
py
|
name="tom"
age=20
#1. 输出:你好,tom先生,今晚吃鸡!
print(f"你好,{name}先生,今晚吃鸡!")
print("你好,{}先生,今晚吃鸡!".format(name))
#2. 输出:你好,tom先生,今晚{吃鸡}!
print(f"你好,{name}先生,今晚{{吃鸡}}!")
print("你好,{}先生,今晚{{吃鸡}}!".format(name))
#3. 输出:你好,{tom}先生,今晚吃鸡!
print(f"你好,{{{name}}}先生,今晚吃鸡!")
print("你好,{{{}}}先生,今晚吃鸡!".format(name))
print("姓名和年龄分别是:{}、{}".format(name, age)) # 不带编号,顺序填坑
print("姓名和年龄分别是:{1}、{0}".format(age, name)) # 带数字编号、可以变换顺序
print("姓名和年龄分别是:{x}、{y}".format(x='小明', y=age)) # 带关键字
|
[
"[email protected]"
] | |
0821f14666c075ca5ef4644670d667a41ce5450f
|
ce6c8e0e3a986af3fe3c347a4af16f1ca337f82c
|
/630.course-schedule-iii.py
|
c6f16e25c08edfd0eebd9959c9ace96be3683d8b
|
[] |
no_license
|
chenjienan/python-leetcode
|
dc098373ae7f73dd502d7747888a37a3bd0820cb
|
90c000c3be70727cde4f7494fbbb1c425bfd3da4
|
refs/heads/master
| 2020-04-28T20:46:50.395260 | 2020-02-12T18:48:01 | 2020-02-12T18:48:01 | 175,556,339 | 16 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 656 |
py
|
#
# @lc app=leetcode id=630 lang=python3
#
# [630] Course Schedule III
#
import heapq
class Solution:
def scheduleCourse(self, courses: List[List[int]]):
courses.sort(key=lambda x: x[1])
day = 0
heap = []
for i in range(len(courses)):
if day + courses[i][0] <= courses[i][1]:
day += courses[i][0]
heapq.heappush(heap, -courses[i][0])
else:
# has overlap
heapq.heappush(heap, -courses[i][0])
day += courses[i][0] + heap[0]
heapq.heappop(heap)
return len(heap)
|
[
"[email protected]"
] | |
cfe183779f01a5fbe1379f11d1cc62902be02994
|
9079a555d1fd22ad9701227c58151ae1ca3595d3
|
/CSES/1097.py
|
85e2d0f2b48353028d34faa5b5a999c8a74da857
|
[] |
no_license
|
DebRC/My-Competitve-Programming-Solutions
|
c2a03b18f15cebd3793ce1c288dbb51fc0a33ef4
|
fe956eed619a21bd24a5fd647791d4c56cd1b021
|
refs/heads/main
| 2023-02-04T08:28:13.915967 | 2020-12-28T09:11:29 | 2020-12-28T09:11:29 | 324,591,343 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 402 |
py
|
def removal_game_tab(a, n):
dp=[[0 for i in range(n)] for i in range(n)]
for i in range(n-1,-1,-1):
for j in range(i,n):
if i==j:
dp[i][i]=a[i]
else:
dp[i][j] = max(a[i]-dp[i+1][j],a[j]-dp[i][j-1])
return dp[0][n-1]
n = int(input())
a = list(map(int, input().split()))
print((sum(a)+removal_game_tab(a,n))//2)
|
[
"[email protected]"
] | |
7baaeaed32956cf32db8273ce882ac55fbcf7d77
|
5695d365852a5b9bc4f8092c8aba139530def229
|
/hs_collection_resource/migrations/0002_collectiondeletedresource_resource_owners.py
|
d6a80fde974db5778255ad0469fa26b6f89dd634
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
heliumdatacommons/commonsshare
|
6863705e71be2fb9ef4a822e391d60cfcbc82a44
|
4336dc337ca2b36c2d0a0a7ea793af624c1356c7
|
refs/heads/develop
| 2021-05-11T14:12:53.511860 | 2020-04-15T20:48:38 | 2020-04-15T20:48:38 | 117,697,775 | 2 | 4 |
BSD-3-Clause
| 2020-03-31T14:08:15 | 2018-01-16T14:58:04 |
Python
|
UTF-8
|
Python
| false | false | 586 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('hs_collection_resource', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='collectiondeletedresource',
name='resource_owners',
field=models.ManyToManyField(related_name='collectionDeleted', to=settings.AUTH_USER_MODEL),
),
]
|
[
"[email protected]"
] | |
e08959efc568fd56daefcf8ab0405bd7db16d4b2
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/AlipayOpenMiniTipsDeliveryCreateResponse.py
|
0f0bc4813082df45cd6c1d04d87d1319c5c3daad
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 |
Apache-2.0
| 2023-04-25T04:54:02 | 2018-05-14T09:40:54 |
Python
|
UTF-8
|
Python
| false | false | 754 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayOpenMiniTipsDeliveryCreateResponse(AlipayResponse):
def __init__(self):
super(AlipayOpenMiniTipsDeliveryCreateResponse, self).__init__()
self._delivery_id = None
@property
def delivery_id(self):
return self._delivery_id
@delivery_id.setter
def delivery_id(self, value):
self._delivery_id = value
def parse_response_content(self, response_content):
response = super(AlipayOpenMiniTipsDeliveryCreateResponse, self).parse_response_content(response_content)
if 'delivery_id' in response:
self.delivery_id = response['delivery_id']
|
[
"[email protected]"
] | |
c60c7909ee17189186d37d45b7eda97c4c7d3bf0
|
bc441bb06b8948288f110af63feda4e798f30225
|
/resource_manage_sdk/api/cmdb_approve/get_history_approver_list_pb2.pyi
|
abd98e65926c1c34c66d48ba053cdace0455c688
|
[
"Apache-2.0"
] |
permissive
|
easyopsapis/easyops-api-python
|
23204f8846a332c30f5f3ff627bf220940137b6b
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
refs/heads/master
| 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,398 |
pyi
|
# @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedScalarFieldContainer as google___protobuf___internal___containers___RepeatedScalarFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from google.protobuf.struct_pb2 import (
Struct as google___protobuf___struct_pb2___Struct,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class GetHistoryApproverListRequest(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
@property
def query(self) -> google___protobuf___struct_pb2___Struct: ...
def __init__(self,
*,
query : typing___Optional[google___protobuf___struct_pb2___Struct] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> GetHistoryApproverListRequest: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> GetHistoryApproverListRequest: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"query",b"query"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"query",b"query"]) -> None: ...
class GetHistoryApproverListResponse(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
userList = ... # type: google___protobuf___internal___containers___RepeatedScalarFieldContainer[typing___Text]
def __init__(self,
*,
userList : typing___Optional[typing___Iterable[typing___Text]] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> GetHistoryApproverListResponse: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> GetHistoryApproverListResponse: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"userList",b"userList"]) -> None: ...
class GetHistoryApproverListResponseWrapper(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
code = ... # type: builtin___int
codeExplain = ... # type: typing___Text
error = ... # type: typing___Text
@property
def data(self) -> GetHistoryApproverListResponse: ...
def __init__(self,
*,
code : typing___Optional[builtin___int] = None,
codeExplain : typing___Optional[typing___Text] = None,
error : typing___Optional[typing___Text] = None,
data : typing___Optional[GetHistoryApproverListResponse] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> GetHistoryApproverListResponseWrapper: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> GetHistoryApproverListResponseWrapper: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"data",b"data"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"code",b"code",u"codeExplain",b"codeExplain",u"data",b"data",u"error",b"error"]) -> None: ...
|
[
"[email protected]"
] | |
0d2af27c7b63e8f21fc7c713d6004cfdb8063ea9
|
820a8e7ec541299f315ac43ddb3b41236e11cd33
|
/demo/streaming/message_based_client.py
|
8bba3e3493dd7f6aadd1d443706b2ee614e2f6f3
|
[
"Apache-2.0"
] |
permissive
|
hpsaturn/Autobahn
|
5caba163ee976e8ddedadfb1a79139ba6014861b
|
f7bd44433f227130901440e768073e2afbf410bf
|
refs/heads/master
| 2021-01-17T22:09:02.484645 | 2011-11-01T18:27:57 | 2011-11-01T18:27:57 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,904 |
py
|
###############################################################################
##
## Copyright 2011 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from ranstring import randomByteString
from twisted.internet import reactor
from autobahn.websocket import WebSocketClientFactory, WebSocketClientProtocol
MESSAGE_SIZE = 1 * 2**20
class MessageBasedHashClientProtocol(WebSocketClientProtocol):
"""
Message-based WebSockets client that generates stream of random octets
sent to WebSockets server as a sequence of messages. The server will
respond to us with the SHA-256 computed over each message. When
we receive response, we repeat by sending a new message.
"""
def sendOneMessage(self):
data = randomByteString(MESSAGE_SIZE)
self.sendMessage(data, binary = True)
def onOpen(self):
self.count = 0
self.sendOneMessage()
def onMessage(self, message, binary):
print "Digest for message %d computed by server: %s" % (self.count, message)
self.count += 1
self.sendOneMessage()
if __name__ == '__main__':
factory = WebSocketClientFactory()
factory.protocol = MessageBasedHashClientProtocol
reactor.connectTCP("localhost", 9000, factory)
reactor.run()
|
[
"[email protected]"
] | |
a8a95539dac6b0b456a25ccbafca9321dd5c8b20
|
5e8832e7a49e121c4db1f57d036fe39b4250246a
|
/347_top_k_frequent_elements.py
|
f3f54cdd069c2acf438ed7c5694b526627821a0d
|
[] |
no_license
|
shaniavina/Leetcode_Python
|
9e80477794cd80e00a399d65b76088eea41d80d1
|
185bf1542265f5f4feca2e937d1d36a7bb4a5d2b
|
refs/heads/master
| 2022-10-12T10:56:23.476219 | 2022-09-21T01:53:40 | 2022-09-21T01:53:40 | 52,979,850 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 506 |
py
|
import collections
class Solution(object):
def topKFrequent(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
frq = collections.defaultdict(list)
for key, cnt in collections.Counter(nums).items():
frq[cnt].append(key)
res = []
for times in reversed(range(len(nums) + 1)):
res.extend(frq[times])
if len(res) >= k:
return res[:k]
return res[:k]
|
[
"[email protected]"
] | |
ddf31aa0247b5bd2963cdb3c8159a26bb33c77e0
|
fe039f62337b210061bfd7291000c5fa406fd0ff
|
/list/webapp/models.py
|
4a9bf3ad037982d3daaeb33bc2a410482cb276bf
|
[] |
no_license
|
Erlan1998/python_group_7_homework_45_Erlan_Kurbanaliev
|
a5f5956490d778341e4958fe6740ab6e1a395f45
|
4f860b561f046413bbc9ab8f587b8f7c40b8c23a
|
refs/heads/main
| 2023-05-07T00:16:28.530637 | 2021-03-04T12:32:36 | 2021-03-04T12:32:36 | 342,240,837 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 692 |
py
|
from django.db import models
status_choices = [('new', 'Новая'), ('in_progress', 'В процессе'), ('done', 'Сделано')]
class List(models.Model):
description = models.TextField(max_length=200, null=False, blank=False)
detailed_description = models.TextField(max_length=3000, null=True, blank=True)
status = models.CharField(max_length=120, null=False, blank=False, choices=status_choices)
updated_at = models.DateField(null=True, blank=True)
class Meta:
db_table = 'Lists'
verbose_name = 'Задача'
verbose_name_plural = 'Задачи'
def __str__(self):
return f'{self.id}. {self.status}: {self.description}'
|
[
"[email protected]"
] | |
f53ac3f6c538688800be418ff966c4e0919f43ec
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_248/ch81_2020_04_12_22_18_46_334181.py
|
8e1cf83d9390b88d1079f2c7a2e6970a6b74812b
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 142 |
py
|
def interseccao_valores(dic1,dic2):
v=dic1.values
v2=dic2.values
lista=[]
if v1==v2:
lista.append(v1)
return lista
|
[
"[email protected]"
] | |
505e5e0ce0cb191a5ec404f1e81be10da0578bf5
|
268d9c21243e12609462ebbd6bf6859d981d2356
|
/Python/python_stack/Django/Dojo_ninjas/main/apps/dojo_ninjas/migrations/0002_dojo_desc.py
|
58a3322cbefd8d01f3ac70e8cbe91f35e5cc03d2
|
[] |
no_license
|
dkang417/cdj
|
f840962c3fa8e14146588eeb49ce7dbd08b8ff4c
|
9966b04af1ac8a799421d97a9231bf0a0a0d8745
|
refs/heads/master
| 2020-03-10T03:29:05.053821 | 2018-05-23T02:02:07 | 2018-05-23T02:02:07 | 129,166,089 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 432 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-05-08 14:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dojo_ninjas', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='dojo',
name='desc',
field=models.TextField(null=True),
),
]
|
[
"[email protected]"
] | |
ef52298f256957366a62065c6bbda48bbbfa0568
|
8efd8bcd3945d88370f6203e92b0376ca6b41c87
|
/problems100_200/131_Palindrome_Partitioning.py
|
4fd4acc10c6135bdd9be20744a848feda4634b56
|
[] |
no_license
|
Provinm/leetcode_archive
|
732ad1ef5dcdfdde6dd5a33522e86f7e24ae2db5
|
3e72dcaa579f4ae6f587898dd316fce8189b3d6a
|
refs/heads/master
| 2021-09-21T08:03:31.427465 | 2018-08-22T15:58:30 | 2018-08-22T15:58:30 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 834 |
py
|
#coding=utf-8
'''
131. Palindrome Partitioning
Given a string s, partition s such that every substring of the partition is a palindrome.
Return all possible palindrome partitioning of s.
For example, given s = "aab",
Return
[
["aa","b"],
["a","a","b"]
]
'''
class Solution:
def partition(self, s):
"""
:type s: str
:rtype: List[List[str]]
"""
if not s:
return [[]]
res = []
for idx, item in enumerate(s):
cur_s = s[:idx+1]
if self.is_p(cur_s):
r = self.partition(s[idx+1:])
for sub_item in r:
res.append([cur_s] + sub_item)
return res
def is_p(self, s):
return s == s[::-1]
s = Solution()
r = s.partition("aab")
print(r)
## 深度优先算法
|
[
"[email protected]"
] | |
22082fac0984c7728a7ac71f5666b9a60a1c7171
|
15cace5f904c5c2389ca3cc02b5ff1fc029c7651
|
/parsing/management/commands/scraper/test.py
|
cc3b4a7431673dd5f8c4b261fd80953be86ccffa
|
[] |
no_license
|
ghostnoop/django-youtube-parser-asyncio
|
fb7146e788dfe5986ad31a45a5d5b1da918583c6
|
631bc4ddc0eed0407f09a810c334a0e9d8d0ed7a
|
refs/heads/main
| 2023-03-26T12:57:32.248097 | 2021-03-25T11:02:54 | 2021-03-25T11:02:54 | 341,303,844 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 178 |
py
|
main_list = [i for i in range(100)]
size = len(main_list) // 4
a = main_list[:size]
b = (main_list[size:size * 2])
c = (main_list[size * 2:size * 3])
d = (main_list[size * 3:])
|
[
"[email protected]"
] | |
fbf8ce4a8f1a8fa531b08275055edceb9aa982a6
|
bad44a92fb338260f9c077689d7fa5472526c3fe
|
/src/python/nnfusion/jit.py
|
6fd2745e160f063b2ff9cf6c47e345239698423f
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
microsoft/nnfusion
|
ebc4c06331b8e93dbf5e176e5ecd3382e322ff21
|
bd4f6feed217a43c9ee9be16f02fa8529953579a
|
refs/heads/main
| 2023-08-25T17:41:37.517769 | 2022-09-16T05:59:01 | 2022-09-16T05:59:01 | 252,069,995 | 872 | 157 |
MIT
| 2023-07-19T03:06:21 | 2020-04-01T04:15:38 |
C++
|
UTF-8
|
Python
| false | false | 6,923 |
py
|
import copy
import functools
from inspect import isfunction, ismethod, isclass
import torch
from .jit_utils import TorchModule, get_signature
from .runtime import NNFusionRT
from .config import Config
def is_method_of_instance(obj, cls):
return ismethod(obj) and isinstance(obj.__self__, cls)
def is_subclass_of_cls(obj, cls):
return isclass(obj) and issubclass(obj, cls)
def get_nrt_forward(obj, signature, config, outputs, *inputs,
is_method=False):
"""
Return a wrapped forward function that using nnf as runtime
"""
if not isinstance(obj, torch.nn.Module):
raise AssertionError(
"Internal bug, please report to "
"https://github.com/microsoft/nnfusion"
)
output_is_tensor = isinstance(outputs, torch.Tensor)
if output_is_tensor:
outputs = [outputs]
nnf = NNFusionRT(obj, config, signature)
nnf.compile(inputs, outputs)
# TODO free outputs and only save desc?
def forward(*inputs):
results = [
torch.empty_like(output)
for output in outputs
]
if is_method:
obj, *inputs = inputs
nnf.run_method(obj, inputs, results)
else:
inputs = list(inputs)
nnf.run(inputs, results)
if output_is_tensor:
return results[0]
return results
return forward
def nrt_forward(obj, *inputs, config=None, signature=None, is_method=False):
if signature is None:
signature = get_signature(obj)
if hasattr(obj, '_orig_forward'):
# shallow copy is needed to avoid recursion
# call instance forward -> call nnf_forward -> call instance forward
obj_ = copy.copy(obj)
obj_.forward = obj._orig_forward
obj = obj_
outputs = obj(*inputs)
def jit_class_method_using_decorator():
"""
Check if obj is a class method with @nnfusion.jit decorator.
The cases of decorating class method with the @ symbol or applying it
as function are different.
"""
return isinstance(inputs[0], torch.nn.Module)
if jit_class_method_using_decorator():
self, *inputs = inputs
# shallow copy is needed to avoid recursion when using jit as decorator:
# export onnx -> call forward to trace -> call nnf jit func -> export onnx
self_ = copy.copy(self)
def forward(*args):
if forward.first_call:
forward.first_call = False
return obj(self, *args)
# handle the case that jit target function will call `forward`
return self.forward(*args)
forward.first_call = True
self_.forward = forward
return get_nrt_forward(self_, signature, config, outputs,
*inputs, is_method=True)
if isfunction(obj) or is_method_of_instance(obj, torch.nn.Module):
return get_nrt_forward(TorchModule(obj), signature, config, outputs,
*inputs)
return get_nrt_forward(obj, signature, config, outputs, *inputs)
def parse_config(tune, tuning_steps, config):
if config is None:
config = Config()
elif type(config) is dict:
config = Config(config)
if not type(config) is Config:
raise TypeError(
"Expected optional 'config' argument of type dict or "
f"nnfusion.Config but found {config}"
)
if tuning_steps is not None:
if not isinstance(tuning_steps, int):
raise TypeError(
"Expected optional 'tuning_steps' argument of type int "
f"but found {tuning_steps}"
)
if tune is False:
raise ValueError(
f"Conflict is detected: tune={tune} and "
f"tuning_steps={tuning_steps}"
)
tune = True
config['kernel_tuning_steps'] = tuning_steps
if tune is not None:
if not isinstance(tune, bool):
raise TypeError(
"Expected optional 'tune' argument of type bool "
f"but found {tune}"
)
config['antares_mode'] = tune
return config
def check_obj_type(obj):
if not (
isfunction(obj)
or isinstance(obj, torch.nn.Module)
or is_subclass_of_cls(obj, torch.nn.Module)
or is_method_of_instance(obj, torch.nn.Module)
):
raise TypeError(
"Expected function or torch.nn.Module instance/method/class "
f"but found {obj}"
)
def jit_class(obj, config):
"""
Return jitted class using dynamic inheritance to override the forward
function and keep its signature.
"""
class JITModule(obj):
@jit(config=config,
_signature='.'.join([get_signature(obj), 'forward']))
def forward(self, *args, **kwargs):
return super().forward(*args, **kwargs)
return JITModule
def jit(obj=None, *, tune=None, tuning_steps=None, config=None, _signature=None):
"""
Parameters:
obj (function, `torch.nn.Module` instance/method/class):
The target object to be traced. When `obj` is an instance or a
class, it is equivalent to trace its `forward` function.
tune (Optional[bool]):
Whether to tune kernel. By default it follows `config`.
If set, it overwrites `config`.
tuning_steps (Optional[int]):
Number of kernel tuning steps. By default it follows `config`.
If set, it overwrites `config` and `tune`.
config (Optional[dict, nnfusion.Config]):
NNFusion compilation config.
By default it will be set to `nnfusion.Config()`.
Pass a `dict` to overwrite default config or directly pass an
instance of `nnfusion.Config`.
For example, `@nnfusion.jit(tune=True,
config={'kernel_tuning_steps': 42})`
For more flags information, please execute the command `nnfusion`
in the terminal.
"""
config = parse_config(tune, tuning_steps, config)
def _jit(_obj):
check_obj_type(_obj)
if is_subclass_of_cls(_obj, torch.nn.Module):
return jit_class(_obj, config)
@functools.wraps(_obj)
def wrapper(*args): # TODO support kwargs?
if wrapper.forward is None:
wrapper.forward = nrt_forward(_obj, *args,
config=config,
signature=_signature)
return wrapper.forward(*args)
wrapper.forward = None
if isinstance(_obj, torch.nn.Module):
_obj._orig_forward = _obj.forward
_obj.forward = wrapper
return _obj
return wrapper
if obj is None:
return _jit
return _jit(obj)
|
[
"[email protected]"
] | |
3d27cf4f50a9cc4bd469bd18977762b572f062a1
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/company/new_problem/look_week_into_different_child/woman.py
|
adf36b9a017251053bd4003aaba072a84a85d48d
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 246 |
py
|
#! /usr/bin/env python
def right_week_or_little_person(str_arg):
seem_work_for_next_man(str_arg)
print('eye')
def seem_work_for_next_man(str_arg):
print(str_arg)
if __name__ == '__main__':
right_week_or_little_person('place')
|
[
"[email protected]"
] | |
419801dc9b41a351205b81a2536848b549bcdca3
|
67a48a7a2db56247fdd84474efa35124565fd8b9
|
/Codeforces/1567/1567a.py
|
d8ac3e266bff074dc1c8d5d2ab0d617f691e4d6f
|
[] |
no_license
|
qazz625/Competitive-Programming-Codes
|
e3de31f9276f84e919a6017b2cf781c946809862
|
e5df9cdc4714d78b7b6a7535ed7a45e07d3781c3
|
refs/heads/master
| 2022-08-30T07:57:55.172867 | 2022-08-10T08:02:07 | 2022-08-10T08:02:07 | 242,182,922 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 213 |
py
|
t = int(input())
for _ in range(t):
n = int(input())
arr = []
s = input()
for x in s:
if x == 'L' or x == 'R':
arr += [x]
elif x == 'D':
arr += ['U']
else:
arr += ['D']
print(*arr, sep='')
|
[
"[email protected]"
] | |
d9defe5ad47eb503e1e8834bad3974c9f76ea1ae
|
33fc4f5b3b92fc5d84be6c4872094264be5c2192
|
/108numpy-copy-deepcopy.py
|
c41df93204747de028547d6883e0e74eb2590112
|
[] |
no_license
|
greenmac/python-morvan-numpy-pandas
|
2ee9f572b910f65b44fe76316774fa9f604e9eb2
|
77fe010b15074e7ecabaefc07bc80bf667575d89
|
refs/heads/master
| 2020-04-12T14:54:47.317643 | 2018-12-22T07:18:19 | 2018-12-22T07:18:19 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 602 |
py
|
# https://morvanzhou.github.io/tutorials/data-manipulation/np-pd/2-8-np-copy/
import numpy as np
# a = np.arange(4)
# b = a
# c = a
# d = b
# a[0] = 11
# print(a)
# print(b)
# print(c)
# print(d)
# print(b is a)
# print(d is a)
# a = np.arange(4)
# b = a
# c = a
# d = b
# a[0] = 11
# d[1:3] = [22, 33]
# print(a)
# print(b)
# print(c)
# print(d)
a = np.arange(4)
b = a
c = a
d = b
a[0] = 11
d[1:3] = [22, 33]
b = a.copy() # deep copy, 深度copy, 這樣就不會被關聯
a[3] = 44
print(a)
print(b) # b因為deep copy的關係, 所以b[3]不會被改變, 這樣就不會被關聯
print(c)
print(d)
|
[
"[email protected]"
] | |
1344db5d293e0d52eb43ae1b44c466eb59437167
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02380/s484509438.py
|
102ce45748de53d8af54b0469dd1cd39937af871
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 216 |
py
|
import math
a,b,C=map(int,input().split())
radC=math.radians(C)
S=a*b*math.sin(radC)*(1/2)
c=a**2+b**2-2*a*b*math.cos(radC)
L=a+b+math.sqrt(c)
h=2*S/a
list=[S,L,h]
for i in list:
print('{:.08f}'.format(i))
|
[
"[email protected]"
] | |
fd2f9e40af42009d2df03ad31acbf7115cfbdb22
|
ec0e202ba914a1d9318c449130eee74223af6c98
|
/rememerme/users/client.py
|
c79c6d6be62bb75e649fba4b1b42f040d57849c3
|
[
"Apache-2.0"
] |
permissive
|
rememerme/users-model
|
0f07c76bdbabf803fc6b8f6fe4aabcde42fe0e34
|
6b62af077ae93f073e9bb831a82ca8f011697277
|
refs/heads/master
| 2020-05-17T00:27:01.990149 | 2014-01-18T05:54:46 | 2014-01-18T05:54:46 | 15,694,812 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,572 |
py
|
import requests
from rememerme.users.models import User
class UserClientError(Exception):
pass
def strip_trailing_slash(url):
if url[-1] == '/':
return url[:-1]
return url
class UserClient:
DEFAULT_URL = 'http://134.53.148.103'
def __init__(self, session_id, url=DEFAULT_URL):
self.url = strip_trailing_slash(url)
self.session_id = session_id
def create(self, username, password):
return NotImplementedError()
payload = { 'username':username, 'password':password }
r = requests.post(self.url + '/rest/v1/sessions',data=payload)
if r.status_code is not 200:
raise UserClientError(r.text)
return User.fromMap(r.json())
def update(self, user_id, username=None, password=None, email=None):
payload = {}
if username: payload['username'] = username
if password: payload['password'] = password
if email: payload['email'] = email
headers = { 'HTTP_AUTHORIZATION' : self.session_id }
r = requests.put(self.url + '/rest/v1/sessions/%s' % str(user_id), data=payload, headers=headers)
if r.status_code is not 200:
raise UserClientError(r.text)
return User.fromMap(r.json())
def get(self, user_id):
headers = { 'HTTP_AUTHORIZATION' : self.session_id }
r = requests.delete(self.url + '/rest/v1/sessions/%s' % str(user_id), headers=headers)
if r.status_code is not 200:
raise UserClientError(r.text)
return User.fromMap(r.json())
|
[
"[email protected]"
] | |
93b7f21504d58d63e17f2a7e1435cb78ca6999d6
|
f445450ac693b466ca20b42f1ac82071d32dd991
|
/generated_tempdir_2019_09_15_163300/generated_part009324.py
|
cbdf2c82d8e24b917f93048ece6a2aa7d84ec418
|
[] |
no_license
|
Upabjojr/rubi_generated
|
76e43cbafe70b4e1516fb761cabd9e5257691374
|
cd35e9e51722b04fb159ada3d5811d62a423e429
|
refs/heads/master
| 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,298 |
py
|
from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher108113(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i3.1.2.0', 1, 1, None), Mul),
(VariableWithCount('i3.1.2.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher108113._instance is None:
CommutativeMatcher108113._instance = CommutativeMatcher108113()
return CommutativeMatcher108113._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 108112
return
yield
from collections import deque
|
[
"[email protected]"
] | |
ff3f576564a64698fd39d488aee3b2df3873b01e
|
9d8e2dd4441c50b443390f76c899ad1f46c42c0e
|
/mit_intro_algos/max_heap.py
|
13d0a325af33fa82b8c19924971ba9c0b20d5f14
|
[] |
no_license
|
vikramjit-sidhu/algorithms
|
186ec32de471386ce0fd6b469403199a5e3bbc6d
|
cace332fc8e952db76c19e200cc91ec8485ef14f
|
refs/heads/master
| 2021-01-01T16:20:52.071495 | 2015-08-03T17:42:29 | 2015-08-03T17:42:29 | 29,119,005 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,979 |
py
|
"""
Creates a max heap, can also use heap sort algorithm on a pre created array
Uses an array to implement array
My implementation of python heapq module
"""
class MaxHeap:
def __init__(self, ar=[None]):
self.A = ar
if len(self.A) > 1:
self.__create_maxheap()
def __max_heapify(self, index):
left, right = 2*index, 2*index+1
if left < len(self.A) and self.A[index] < self.A[left]:
maximum = left
else:
maximum = index
if right < len(self.A) and self.A[maximum] < self.A[right]:
maximum = right
if maximum != index:
self.A[index], self.A[maximum] = self.A[maximum], self.A[index]
self.__max_heapify(maximum)
return True
return False
def __create_maxheap(self):
if self.A[0]:
self.A.append(self.A[0])
self.A[0] = None
start_index = int((len(self.A)-1)/2)
for i in range(start_index, 0, -1):
self.__max_heapify(i)
def find_max(self):
return self.A[1]
def extract_max(self):
last_index = len(self.A) - 1
self.A[1], self.A[last_index] = self.A[last_index], self.A[1]
max_key = self.A.pop()
max_heapify(1)
return max_key
def insert_key(self, key):
self.A.append(key)
check_index = len(self.A) - 1
parent_index = int(check_index/2)
self.__parent_updatify(parent_index, check_index)
def __parent_updatify(self, parent_index, check_index):
while parent_index >=1 and self.A[parent_index] < self.A[check_index]:
self.A[parent_index], self.A[check_index] = self.A[check_index], self.A[parent_index]
check_index, parent_index = parent_index, int(parent_index/2)
def update_key(self, key, new_key):
key_index = self.find_key(key)
self.A[key_index] = new_key
if not self.__max_heapify(key_index):
self.__parent_updatify(int(key_index/2), key_index)
def find_key(self, key):
"""
Returns index of key in array (self.A). Uses BFS.
"""
from queue import Queue
qu = Queue()
qu.put(1)
key_index = None
while not qu.empty():
element = qu.get_nowait()
if self.A[element] == key:
key_index = element
break
left, right = element*2, element*2+1
if left < len(self.A) and self.A[left] >= key:
qu.put_nowait(left)
if right < len(self.A) and self.A[right] >= key:
qu.put_nowait(right)
else:
print("Key {0} not found".format(key))
del(qu)
return key_index
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
f583736aeb98af156de12d7ff928aca9a305b7c8
|
711756b796d68035dc6a39060515200d1d37a274
|
/output_exocyst_tags/initial_7607.py
|
f3e10cc911458956f628b86bc422c72bf2469275
|
[] |
no_license
|
batxes/exocyst_scripts
|
8b109c279c93dd68c1d55ed64ad3cca93e3c95ca
|
a6c487d5053b9b67db22c59865e4ef2417e53030
|
refs/heads/master
| 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,587 |
py
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Sec3_GFPN" not in marker_sets:
s=new_marker_set('Sec3_GFPN')
marker_sets["Sec3_GFPN"]=s
s= marker_sets["Sec3_GFPN"]
mark=s.place_marker((19, 105, 690), (0.15, 0.78, 0.66), 21.9005)
if "Sec3_GFPC" not in marker_sets:
s=new_marker_set('Sec3_GFPC')
marker_sets["Sec3_GFPC"]=s
s= marker_sets["Sec3_GFPC"]
mark=s.place_marker((215, 753, 192), (0.15, 0.78, 0.66), 31.586)
if "Sec3_Anch" not in marker_sets:
s=new_marker_set('Sec3_Anch')
marker_sets["Sec3_Anch"]=s
s= marker_sets["Sec3_Anch"]
mark=s.place_marker((122, 745, 777), (0.15, 0.58, 0.66), 26.9335)
if "Sec5_GFPN" not in marker_sets:
s=new_marker_set('Sec5_GFPN')
marker_sets["Sec5_GFPN"]=s
s= marker_sets["Sec5_GFPN"]
mark=s.place_marker((285, 668, 783), (0.38, 0.24, 0.37), 21.9005)
if "Sec5_GFPC" not in marker_sets:
s=new_marker_set('Sec5_GFPC')
marker_sets["Sec5_GFPC"]=s
s= marker_sets["Sec5_GFPC"]
mark=s.place_marker((266, 354, 710), (0.38, 0.24, 0.37), 31.586)
if "Sec6_GFPN" not in marker_sets:
s=new_marker_set('Sec6_GFPN')
marker_sets["Sec6_GFPN"]=s
s= marker_sets["Sec6_GFPN"]
mark=s.place_marker((732, 670, 594), (0.84, 0.98, 0.24), 21.9005)
if "Sec6_GFPC" not in marker_sets:
s=new_marker_set('Sec6_GFPC')
marker_sets["Sec6_GFPC"]=s
s= marker_sets["Sec6_GFPC"]
mark=s.place_marker((696, 107, 386), (0.84, 0.98, 0.24), 31.586)
if "Sec6_Anch" not in marker_sets:
s=new_marker_set('Sec6_Anch')
marker_sets["Sec6_Anch"]=s
s= marker_sets["Sec6_Anch"]
mark=s.place_marker((558, 299, 781), (0.84, 0.78, 0.24), 26.9335)
if "Sec8_GFPC" not in marker_sets:
s=new_marker_set('Sec8_GFPC')
marker_sets["Sec8_GFPC"]=s
s= marker_sets["Sec8_GFPC"]
mark=s.place_marker((428, 270, 711), (0.62, 0.67, 0.45), 31.586)
if "Sec8_Anch" not in marker_sets:
s=new_marker_set('Sec8_Anch')
marker_sets["Sec8_Anch"]=s
s= marker_sets["Sec8_Anch"]
mark=s.place_marker((877, 991, 805), (0.62, 0.47, 0.45), 26.9335)
if "Sec10_GFPN" not in marker_sets:
s=new_marker_set('Sec10_GFPN')
marker_sets["Sec10_GFPN"]=s
s= marker_sets["Sec10_GFPN"]
mark=s.place_marker((899, 576, 943), (0, 0.91, 0), 21.9005)
if "Sec10_GFPC" not in marker_sets:
s=new_marker_set('Sec10_GFPC')
marker_sets["Sec10_GFPC"]=s
s= marker_sets["Sec10_GFPC"]
mark=s.place_marker((671, 362, 423), (0, 0.91, 0), 31.586)
if "Sec10_Anch" not in marker_sets:
s=new_marker_set('Sec10_Anch')
marker_sets["Sec10_Anch"]=s
s= marker_sets["Sec10_Anch"]
mark=s.place_marker((699, 105, 883), (0, 0.71, 0), 26.9335)
if "Sec15_GFPN" not in marker_sets:
s=new_marker_set('Sec15_GFPN')
marker_sets["Sec15_GFPN"]=s
s= marker_sets["Sec15_GFPN"]
mark=s.place_marker((340, 501, 893), (0.11, 0.51, 0.86), 21.9005)
if "Sec15_GFPC" not in marker_sets:
s=new_marker_set('Sec15_GFPC')
marker_sets["Sec15_GFPC"]=s
s= marker_sets["Sec15_GFPC"]
mark=s.place_marker((964, 729, 337), (0.11, 0.51, 0.86), 31.586)
if "Sec15_Anch" not in marker_sets:
s=new_marker_set('Sec15_Anch')
marker_sets["Sec15_Anch"]=s
s= marker_sets["Sec15_Anch"]
mark=s.place_marker((486, 503, 223), (0.11, 0.31, 0.86), 26.9335)
if "Exo70_GFPN" not in marker_sets:
s=new_marker_set('Exo70_GFPN')
marker_sets["Exo70_GFPN"]=s
s= marker_sets["Exo70_GFPN"]
mark=s.place_marker((472, 868, 488), (0.89, 0.47, 0.4), 21.9005)
if "Exo70_GFPC" not in marker_sets:
s=new_marker_set('Exo70_GFPC')
marker_sets["Exo70_GFPC"]=s
s= marker_sets["Exo70_GFPC"]
mark=s.place_marker((333, 100, 187), (0.89, 0.47, 0.4), 31.586)
if "Exo70_Anch" not in marker_sets:
s=new_marker_set('Exo70_Anch')
marker_sets["Exo70_Anch"]=s
s= marker_sets["Exo70_Anch"]
mark=s.place_marker((147, 620, 939), (0.89, 0.27, 0.4), 26.9335)
if "Exo84_GFPN" not in marker_sets:
s=new_marker_set('Exo84_GFPN')
marker_sets["Exo84_GFPN"]=s
s= marker_sets["Exo84_GFPN"]
mark=s.place_marker((573, 301, 997), (0.5, 0.7, 0), 31.586)
if "Exo84_GFPC" not in marker_sets:
s=new_marker_set('Exo84_GFPC')
marker_sets["Exo84_GFPC"]=s
s= marker_sets["Exo84_GFPC"]
mark=s.place_marker((585, 771, 647), (0.5, 0.7, 0), 31.586)
if "Exo84_Anch" not in marker_sets:
s=new_marker_set('Exo84_Anch')
marker_sets["Exo84_Anch"]=s
s= marker_sets["Exo84_Anch"]
mark=s.place_marker((183, 347, 23), (0.5, 0.5, 0), 26.9335)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.