blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fbe5bbf72cfc77e0e0a289bbf4f3e02ff45f6c7d | c421330a5e03df01aa4ec9dc1c60dd2b9c514423 | /movieproject/movieapp/urls.py | 29e716810f30b03d4a9e060a55a905cdf4dcd5f1 | [] | no_license | sayanth123/movieapp | 16051774cbb1766c513a3e2b28c45b905c45c4d0 | f4e50a7f1b7441390ab234c11a13e1d989ec3118 | refs/heads/master | 2023-05-06T05:41:09.735871 | 2021-05-26T12:46:47 | 2021-05-26T12:46:47 | 371,027,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | from . import views
from django.urls import path
app_name='movieapp'
urlpatterns = [
path('', views.index,name='index'),
path('movie/<int:movie_id>/', views.detail,name='detail'),
path('add/', views.add,name='add'),
path('update/<int:id>/', views.update,name='update'),
path('delete/<int:id>/', views.delete,name='delete'),
]
| [
"[email protected]"
] | |
63b6f5beff30f469db12c028c0a1fefdad4c79f5 | d507d0846902e0012a4b2a0aaaea1cbbdb21db46 | /supervisely_lib/annotation/json_geometries_map.py | 394b5674ece0eb53c38ebf1dfc6160b66988b185 | [] | no_license | wpilibsuite/supervisely | a569fdc0d5e5f2fb912f32beab8f3fedb277504e | 19805ca9b2bd20e31d6d41a99dc37dc439bc257a | refs/heads/master | 2022-09-09T02:32:54.883109 | 2020-06-01T20:55:49 | 2020-06-01T20:55:49 | 267,916,361 | 2 | 3 | null | 2020-06-03T13:59:56 | 2020-05-29T17:27:30 | Python | UTF-8 | Python | false | false | 843 | py | # coding: utf-8
from supervisely_lib.geometry.bitmap import Bitmap
from supervisely_lib.geometry.cuboid import Cuboid
from supervisely_lib.geometry.point import Point
from supervisely_lib.geometry.polygon import Polygon
from supervisely_lib.geometry.polyline import Polyline
from supervisely_lib.geometry.rectangle import Rectangle
from supervisely_lib.geometry.graph import GraphNodes
from supervisely_lib.geometry.any_geometry import AnyGeometry
from supervisely_lib.geometry.cuboid_3d import Cuboid3d
_INPUT_GEOMETRIES = [Bitmap, Cuboid, Point, Polygon, Polyline, Rectangle, GraphNodes, AnyGeometry, Cuboid3d]
_JSON_SHAPE_TO_GEOMETRY_TYPE = {geometry.geometry_name(): geometry for geometry in _INPUT_GEOMETRIES}
def GET_GEOMETRY_FROM_STR(figure_shape: str):
geometry = _JSON_SHAPE_TO_GEOMETRY_TYPE[figure_shape]
return geometry
| [
"[email protected]"
] | |
996df35200d2adc6b93a637fd11c0fe8b8974d26 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/confidentialledger/azure-confidentialledger/azure/confidentialledger/receipt/_claims_models.py | 9859688f266bb0aff4d28d6e620d07a0fd31064e | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 4,138 | py | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
"""Models for application claims."""
from typing import Any, Dict, Optional, Union
from dataclasses import dataclass
@dataclass
class LedgerEntryClaim:
"""
LedgerEntryClaim represents an Application Claim derived from ledger entry data.
:keyword protocol: The protocol used to compute the claim.
:paramtype protocol: str
:keyword collectionId: The collection ID of the ledger entry.
:paramtype collectionId: str
:keyword contents: The contents of the ledger entry.
:paramtype contents: str
:keyword secretKey: The secret key used to compute the claim digest.
:paramtype secretKey: str
"""
protocol: str
collectionId: str
contents: str
secretKey: str
@classmethod
def from_dict(cls, ledger_entry_claim_dict: Dict[str, Any]):
"""Create a new instance of this class from a dictionary.
:param dict[str, any] ledger_entry_claim_dict: The dictionary representation of the ledger entry claim.
:return: A new instance of this class corresponding to the provided dictionary.
:rtype: LedgerEntryClaim
"""
return cls(**ledger_entry_claim_dict)
@dataclass
class ClaimDigest:
"""
ClaimDigest represents an Application Claim in digested form.
:keyword protocol: The protocol used to compute the claim.
:paramtype protocol: str
:keyword value: The digest of the claim.
:paramtype value: str
"""
protocol: str
value: str
@classmethod
def from_dict(cls, ledger_entry_claim_dict: Dict[str, Any]):
"""Create a new instance of this class from a dictionary.
:param dict[str, any] ledger_entry_claim_dict: The dictionary representation of the claim digest.
:return: A new instance of this class corresponding to the provided dictionary.
:rtype: ClaimDigest
"""
return cls(**ledger_entry_claim_dict)
@dataclass
class ApplicationClaim:
"""
ApplicationClaim represents a claim of a ledger application.
:keyword kind: The kind of the claim.
:paramtype kind: str
:keyword ledgerEntry: The ledger entry claim.
:paramtype ledgerEntry: Optional[Union[Dict[str, Any], LedgerEntryClaim]]
:keyword digest: The claim digest object.
:paramtype digest: Optional[Union[Dict[str, Any], ClaimDigest]]
"""
kind: str
ledgerEntry: Optional[LedgerEntryClaim] = None
digest: Optional[ClaimDigest] = None
def __init__(
self,
kind: str,
ledgerEntry: Optional[Union[Dict[str, Any], LedgerEntryClaim]] = None,
digest: Optional[Union[Dict[str, Any], ClaimDigest]] = None,
**kwargs: Any
):
"""
:keyword kind: The kind of the claim.
:paramtype kind: str
:keyword ledgerEntry: The ledger entry claim.
:paramtype ledgerEntry: Optional[Union[Dict[str, Any], LedgerEntryClaim]]
:keyword digest: The claim digest object.
:paramtype digest: Optional[Union[Dict[str, Any], ClaimDigest]]
"""
self.kind = kind
if ledgerEntry:
if isinstance(ledgerEntry, LedgerEntryClaim):
self.ledgerEntry = ledgerEntry
else:
self.ledgerEntry = LedgerEntryClaim.from_dict(ledgerEntry)
else:
self.ledgerEntry = None
if digest:
if isinstance(digest, ClaimDigest):
self.digest = digest
else:
self.digest = ClaimDigest.from_dict(digest)
else:
self.digest = None
self.kwargs = kwargs
@classmethod
def from_dict(cls, claim_dict: Dict[str, Any]):
"""Create a new instance of this class from a dictionary.
:param dict[str, any] claim_dict: The dictionary representation of the application claim.
:return: A new instance of this class corresponding to the provided dictionary.
:rtype: ApplicationClaim
"""
return cls(**claim_dict)
| [
"[email protected]"
] | |
0185c4f4c626389ea2464ebda9f072d8a3b86e50 | 1fe8d4133981e53e88abf633046060b56fae883e | /venv/lib/python3.8/site-packages/tensorflow/python/keras/api/_v2/keras/applications/xception/__init__ 2.py | bf93ae01110de2a54ca5eaeaa25020b85ad82eab | [] | no_license | Akira331/flask-cifar10 | 6c49db8485038731ce67d23f0972b9574746c7a7 | 283e7a2867c77d4b6aba7aea9013bf241d35d76c | refs/heads/master | 2023-06-14T16:35:06.384755 | 2021-07-05T14:09:15 | 2021-07-05T14:09:15 | 382,864,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:bdac5cda377d4d39bccb19a4cdeafc9b8f2a51c4983a1c81f5f33a22b6729864
size 731
| [
"[email protected]"
] | |
5528da26ff17297745c4e882767344421f6747fc | 5da373c7f45b65894804002ef33fd53264d976f9 | /ppim/models/__init__.py | 375413f9f747aca74a305719606c6d34f8708fba | [
"Apache-2.0"
] | permissive | chenhaohan88/Paddle-Image-Models | 55bfafdbb43ef001faa4ea2e53570ab3248e4786 | c80e3423ce57779b3426c3c024f3fc51cdb9d1b7 | refs/heads/main | 2023-04-10T22:52:45.251251 | 2021-04-04T02:20:15 | 2021-04-04T02:20:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 955 | py | # Transformer
# from .tnt import tnt_s, TNT
from .vit import VisionTransformer
from .pit import pit_ti, pit_s, pit_xs, pit_b, pit_ti_distilled, pit_s_distilled, pit_xs_distilled, pit_b_distilled, PoolingTransformer, DistilledPoolingTransformer
from .deit import deit_ti, deit_s, deit_b, deit_b_384, deit_ti_distilled, deit_s_distilled, deit_b_distilled, deit_b_distilled_384, DistilledVisionTransformer
# CNN
# from .dla import dla_34, dla_46_c, dla_46x_c, dla_60, dla_60x, dla_60x_c, dla_102, dla_102x, dla_102x2, dla_169, DLA
from .rexnet import rexnet_1_0, rexnet_1_3, rexnet_1_5, rexnet_2_0, rexnet_3_0, ReXNet
from .repvgg import repvgg_a0, repvgg_a1, repvgg_a2, repvgg_b0, repvgg_b1, repvgg_b2, repvgg_b3, repvgg_b1g2, repvgg_b1g4, repvgg_b2g4, repvgg_b3g4, RepVGG
# from .hardnet import hardnet_68, hardnet_85, hardnet_39_ds, hardnet_68_ds, HarDNet
# Involution
from .rednet import rednet_26, rednet_38, rednet_50, rednet_101, rednet_152, RedNet
| [
"[email protected]"
] | |
de9883ebf4e9b195992a3a40d7ed18ada729acc7 | ab1c920583995f372748ff69d38a823edd9a06af | /hw/day9/day9_hw3.py | 16f8486856923f4b36925b819f6988b3d58adbad | [] | no_license | adyadyat/pyprojects | 5e15f4e33892f9581b8ebe518b82806f0cd019dc | c8f79c4249c22eb9e3e19998d5b504153faae31f | refs/heads/master | 2022-11-12T16:59:17.482303 | 2020-07-04T09:08:18 | 2020-07-04T09:08:18 | 265,461,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | for i in range(1,9):
for j in range(8,i,-1):
print(' ',end='')
for k in range(1,i+1):
print(i,end='')
for x in range(2,i+1):
print(i,end='')
print()
for i in range(7,0,-1):
for j in range(i,8):
print(' ',end='')
for k in range(i,0,-1):
print(i,end='')
for x in range(i,1,-1):
print(i,end='')
print()
'''
1
222
33333
4444444
555555555
66666666666
7777777777777
888888888888888
7777777777777
66666666666
555555555
4444444
33333
222
1
'''
| [
"[email protected]"
] | |
45ee33f2b96eb380d8ed54ad93aab76653c54a7a | c3396691665741fe3c680e7d44ee127b05b54a0d | /tensorflow/python/training/sync_replicas_optimizer_test.py | e340a22374f7487b4ab4d96cd1720203e8322f36 | [
"Apache-2.0"
] | permissive | ravi-teja-mullapudi/tensorflow | 72506388755fae70e27bb6b907f3f7e6c3208c3d | e8d0ce80414e2402f648a86b1d3bf3ad435467a9 | refs/heads/master | 2021-05-04T08:35:00.029305 | 2016-11-08T22:49:42 | 2016-11-08T22:49:42 | 70,420,798 | 0 | 0 | null | 2016-10-09T17:57:48 | 2016-10-09T17:57:47 | null | UTF-8 | Python | false | false | 11,997 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sync_replicas_optimizer.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.util import net_lib
def create_local_cluster(num_workers, num_ps, protocol="grpc"):
"""Create local GRPC servers and return them."""
worker_ports = [net_lib.pick_unused_port_or_die() for _ in range(num_workers)]
ps_ports = [net_lib.pick_unused_port_or_die() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]}
cs = tf.train.ClusterSpec(cluster_dict)
workers = [
tf.train.Server(
cs, job_name="worker", protocol=protocol, task_index=ix, start=True)
for ix in range(num_workers)]
ps_servers = [
tf.train.Server(
cs, job_name="ps", protocol=protocol, task_index=ix, start=True)
for ix in range(num_ps)]
return workers, ps_servers
# Creates the workers and return their sessions, graphs, train_ops.
def get_workers(num_workers, replicas_to_aggregate, workers):
sessions = []
graphs = []
train_ops = []
for worker_id in range(num_workers):
graph = tf.Graph()
is_chief = (worker_id == 0)
with graph.as_default():
with tf.device("/job:ps/task:0"):
global_step = tf.Variable(0, name="global_step", trainable=False)
var_0 = tf.Variable(0.0, name="v0")
with tf.device("/job:ps/task:1"):
var_1 = tf.Variable(1.0, name="v1")
var_sparse = tf.Variable([[3.0], [4.0]], name="v_sparse")
with tf.device("/job:worker/task:"+str(worker_id)):
grads_0 = tf.constant(0.1+worker_id*0.2)
grads_1 = tf.constant(0.9+worker_id*0.2)
# This is to test against sparse gradients.
grads_sparse = tf.IndexedSlices(
tf.constant([0.1+worker_id*0.2], shape=[1, 1]),
tf.constant([1], dtype=tf.int64),
tf.constant([2, 1], dtype=tf.int64))
sgd_opt = tf.train.GradientDescentOptimizer(2.0)
sync_rep_opt = tf.train.SyncReplicasOptimizerV2(
sgd_opt, replicas_to_aggregate=replicas_to_aggregate,
total_num_replicas=num_workers)
train_op = [sync_rep_opt.apply_gradients(
zip([grads_0, grads_1, grads_sparse], [var_0, var_1, var_sparse]),
global_step=global_step)]
init_op = tf.initialize_all_variables()
# Needed ops from the sync_rep optimizer. This is mainly for the
# local_step initialization.
local_init_op = sync_rep_opt.local_step_init_op
if is_chief:
local_init_op = sync_rep_opt.chief_init_op
ready_for_local_init_op = sync_rep_opt.ready_for_local_init_op
# Chief_queue_runner
chief_queue_runner = sync_rep_opt.get_chief_queue_runner()
sync_init_op = sync_rep_opt.get_init_tokens_op(num_workers)
# Creates session for chief.
supervisor = tf.train.Supervisor(
graph=graph,
is_chief=is_chief,
recovery_wait_secs=1,
init_op=init_op,
local_init_op=local_init_op,
ready_for_local_init_op=ready_for_local_init_op)
session = supervisor.prepare_or_wait_for_session(workers[worker_id].target)
# Chief should execute the sync_init_op and start the chief queue runner.
if is_chief:
session.run(sync_init_op)
supervisor.StartQueueRunners(session, [chief_queue_runner])
sessions.append(session)
graphs.append(graph)
train_ops.append(train_op)
return sessions, graphs, train_ops
class SyncReplicasOptimizerV2Test(tf.test.TestCase):
def _run(self, train_op, sess):
sess.run(train_op)
def test2Workers(self):
num_workers = 2
replicas_to_aggregate = 2
num_ps = 2
workers, _ = create_local_cluster(num_workers=num_workers, num_ps=num_ps)
# Creates and returns all the workers.
sessions, graphs, train_ops = get_workers(num_workers,
replicas_to_aggregate,
workers)
# Chief should have already initialized all the variables.
var_0_g_0 = graphs[0].get_tensor_by_name("v0:0")
var_1_g_0 = graphs[0].get_tensor_by_name("v1:0")
local_step_0 = graphs[0].get_tensor_by_name("sync_rep_local_step:0")
self.assertAllEqual(0.0, var_0_g_0.eval(session=sessions[0]))
self.assertAllEqual(1.0, var_1_g_0.eval(session=sessions[0]))
self.assertAllEqual(0, local_step_0.eval(session=sessions[0]))
# Will just use session 1 to verify all the variables later.
var_0_g_1 = graphs[1].get_tensor_by_name("v0:0")
var_1_g_1 = graphs[1].get_tensor_by_name("v1:0")
var_sparse_g_1 = graphs[1].get_tensor_by_name("v_sparse:0")
local_step_1 = graphs[1].get_tensor_by_name("sync_rep_local_step:0")
global_step = graphs[1].get_tensor_by_name("global_step:0")
# The steps should also be initialized.
self.assertAllEqual(0, global_step.eval(session=sessions[1]))
self.assertAllEqual(0, local_step_1.eval(session=sessions[1]))
self.assertAllClose([[3.0], [4.0]],
var_sparse_g_1.eval(session=sessions[1]))
# We have initial tokens in the queue so we can call this one by one. After
# the first step, this will no longer work as there will be no more extra
# tokens in the queue.
sessions[0].run(train_ops[0])
sessions[1].run(train_ops[1])
# The global step should have been updated and the variables should now have
# the new values after the average of the gradients are applied.
self.assertAllEqual(1, global_step.eval(session=sessions[1]))
self.assertAllClose(0-(0.1+0.3)/2*2.0, var_0_g_1.eval(session=sessions[1]))
self.assertAllClose(1-(0.9+1.1)/2*2.0, var_1_g_1.eval(session=sessions[1]))
self.assertAllClose([[3.0], [4.0-(0.1+0.3)/2*2.0]],
var_sparse_g_1.eval(session=sessions[1]))
# The local step for both workers should still be 0 because the initial
# tokens in the token queue are 0s. This means that the following
# computation of the gradients will be wasted as local_step is smaller than
# the current global step. However, this only happens once when the system
# just starts and this is necessary to make the system robust for the case
# when chief gets restarted by errors/preemption/...
self.assertAllEqual(0, local_step_0.eval(session=sessions[0]))
self.assertAllEqual(0, local_step_1.eval(session=sessions[1]))
sessions[0].run(train_ops[0])
sessions[1].run(train_ops[1])
# Although the global step should still be 1 as explained above, the local
# step should now be updated to 1. The variables are still the same.
self.assertAllEqual(1, global_step.eval(session=sessions[1]))
self.assertAllEqual(1, local_step_0.eval(session=sessions[0]))
self.assertAllEqual(1, local_step_1.eval(session=sessions[1]))
self.assertAllClose(0-(0.1+0.3)/2*2.0, var_0_g_1.eval(session=sessions[1]))
self.assertAllClose(1-(0.9+1.1)/2*2.0, var_1_g_1.eval(session=sessions[1]))
# At this step, the token queue is empty. So the 2 workers need to work
# together to proceed.
threads = []
threads.append(self.checkedThread(target=self._run,
args=(train_ops[0], sessions[0])))
threads.append(self.checkedThread(target=self._run,
args=(train_ops[1], sessions[1])))
# The two workers starts to execute the train op.
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# The global step should now be 2 and the gradients should have been
# applied twice.
self.assertAllEqual(2, global_step.eval(session=sessions[1]))
self.assertAllClose(0 - 2 * (0.1 + 0.3) / 2 * 2.0,
var_0_g_1.eval(session=sessions[1]))
self.assertAllClose(1 - 2 * (0.9 + 1.1) / 2 * 2.0,
var_1_g_1.eval(session=sessions[1]))
# 3 workers and one of them is backup.
def test3Workers1Backup(self):
num_workers = 3
replicas_to_aggregate = 2
num_ps = 2
workers, _ = create_local_cluster(num_workers=num_workers, num_ps=num_ps)
# Creates and returns all the workers.
sessions, graphs, train_ops = get_workers(num_workers,
replicas_to_aggregate,
workers)
# Chief should have already initialized all the variables.
var_0_g_1 = graphs[1].get_tensor_by_name("v0:0")
var_1_g_1 = graphs[1].get_tensor_by_name("v1:0")
local_step_1 = graphs[1].get_tensor_by_name("sync_rep_local_step:0")
global_step = graphs[1].get_tensor_by_name("global_step:0")
# The steps should also be initilized.
self.assertAllEqual(0, global_step.eval(session=sessions[1]))
self.assertAllEqual(0, local_step_1.eval(session=sessions[1]))
# We have initial tokens in the queue so we can call this one by one. After
# the token queue becomes empty, they should be called concurrently.
# Here worker 0 and worker 2 finished first.
sessions[0].run(train_ops[0])
sessions[2].run(train_ops[2])
# The global step should have been updated since we only need to collect 2
# gradients. The variables should now have the new values after the average
# of the gradients from worker 0/2 are applied.
self.assertAllEqual(1, global_step.eval(session=sessions[1]))
self.assertAllClose(0-(0.1+0.5)/2*2.0, var_0_g_1.eval(session=sessions[1]))
self.assertAllClose(1-(0.9+1.3)/2*2.0, var_1_g_1.eval(session=sessions[1]))
# Worker 1 finished later and its gradients will now be dropped as it is
# stale.
sessions[1].run(train_ops[1])
# As shown in the previous test, the local_step for all workers should be
# still 0 so their next computation will also be dropped.
sessions[0].run(train_ops[0])
sessions[1].run(train_ops[1])
sessions[2].run(train_ops[2])
# Although the global step should still be 1 as explained above, the local
# step should now be updated to 1. Just check worker 1 as an example.
self.assertAllEqual(1, global_step.eval(session=sessions[1]))
self.assertAllEqual(1, local_step_1.eval(session=sessions[1]))
thread_0 = self.checkedThread(target=self._run,
args=(train_ops[0], sessions[0]))
thread_1 = self.checkedThread(target=self._run,
args=(train_ops[1], sessions[1]))
# Lets worker 0 execute first.
# It will wait as we need 2 workers to finish this step and the global step
# should be still 1.
thread_0.start()
self.assertAllEqual(1, global_step.eval(session=sessions[1]))
# Starts worker 1.
thread_1.start()
thread_1.join()
# The global step should now be 2 and the gradients should have been
# applied again.
self.assertAllEqual(2, global_step.eval(session=sessions[1]))
self.assertAllClose(-0.6 -(0.1 + 0.3) / 2 * 2.0,
var_0_g_1.eval(session=sessions[1]))
self.assertAllClose(-1.2 - (0.9 + 1.1) / 2 * 2.0,
var_1_g_1.eval(session=sessions[1]))
if __name__ == "__main__":
tf.test.main()
| [
"[email protected]"
] | |
5b809ff208831e26008b58b30ecc4453fe7f150d | fcc665fc2792820e438d32339cc12ae796c1835c | /opps/core/models/profile.py | d012047a545730de5be9f658dfa00941a86911e5 | [
"MIT"
] | permissive | marcelomilo/opps | e3614e644d97ebc6b62e0083aee9a42c242f567c | bf92a003b6ad1f521d662d767a29f58a6033cb3d | refs/heads/master | 2021-01-16T18:50:12.146646 | 2013-03-02T05:15:51 | 2013-03-02T05:15:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | # -*- coding: utf-8 -*-
from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
class Profile(models.Model):
user = models.ForeignKey(User, related_name='user')
twitter = models.CharField(_(u"Twitter"), max_length=75, blank=True,
null=True)
class Meta:
app_label = 'core'
def __unicode__(self):
return self.user
| [
"[email protected]"
] | |
ef2d18211c323bd7603ec0938ce87dce09755d62 | b4c2bbf32748f381f8918c2c20d2a86b5453dc87 | /plugins/convert/mask/box_blend.py | f42177463e6d8e748353a1bd9354d1eaf432d0ff | [
"MIT"
] | permissive | oveis/DeepVideoFaceSwap | d45c7a18204f851a5c8b9cb6c9618284d4314b59 | e507f94d4f5d74c36e41c386c6fb14bb745a4885 | refs/heads/dev-gan-model | 2022-07-14T10:06:08.131201 | 2019-07-09T00:48:16 | 2019-07-09T00:48:16 | 184,978,011 | 6 | 5 | MIT | 2022-06-21T22:00:38 | 2019-05-05T04:09:53 | Python | UTF-8 | Python | false | false | 1,990 | py | #!/usr/bin/env python3
""" Adjustments for the swap box for faceswap.py converter """
import numpy as np
from ._base import Adjustment, BlurMask, logger
class Mask(Adjustment):
""" Manipulations that occur on the swap box
Actions performed here occur prior to warping the face back to the background frame
For actions that occur identically for each frame (e.g. blend_box), constants can
be placed into self.func_constants to be compiled at launch, then referenced for
each face. """
def __init__(self, mask_type, output_size, predicted_available=False, config=None):
super().__init__(mask_type, output_size, predicted_available, config)
self.mask = self.get_mask() if not self.skip else None
def get_mask(self):
""" The box for every face will be identical, so set the mask just once
As gaussian blur technically blurs both sides of the mask, reduce the mask ratio by
half to give a more expected box """
logger.debug("Building box mask")
mask_ratio = self.config["distance"] / 200
facesize = self.dummy.shape[0]
erode = slice(round(facesize * mask_ratio), -round(facesize * mask_ratio))
mask = self.dummy[:, :, -1]
mask[erode, erode] = 1.0
mask = BlurMask(self.config["type"],
mask,
self.config["radius"],
self.config["passes"]).blurred
logger.debug("Built box mask. Shape: %s", mask.shape)
return mask
def process(self, new_face):
""" The blend box function. Adds the created mask to the alpha channel """
if self.skip:
logger.trace("Skipping blend box")
return new_face
logger.trace("Blending box")
mask = np.expand_dims(self.mask, axis=-1)
new_face = np.clip(np.concatenate((new_face, mask), axis=-1), 0.0, 1.0)
logger.trace("Blended box")
return new_face
| [
"[email protected]"
] | |
1a6bfbbed305ea623e2da442fa25a000b9f34077 | 53568d7c9ca6d53f3f90fe45d33cf6357a732a88 | /170521-lambda-expresions,list-comprehension,classes/ulamek.py | 4b3d6cf7509a99713ff711da7a639b031f54f698 | [] | no_license | majsylw/Python-3.x-examples | eb7ce7df9c582f7b56fa6d40db5f96479858f867 | 59b56ca98a0ea27ce48fb47a173333bf0a9d1349 | refs/heads/main | 2023-06-08T07:24:53.052672 | 2021-06-29T12:46:15 | 2021-06-29T12:46:15 | 348,288,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,596 | py | """
Przykładowa definicja klasy ułamek -- wykorzystanie metod specjalnych
"""
import math
class Ulamek:
def __init__(self, licznik, mianownik):
assert(mianownik > 0)
self.licznik, self.mianownik = licznik, mianownik
self.skracanie()
# funkcja print
def __str__(self):
return f'{self.licznik}/{self.mianownik}'
def skracanie(self):
temp = math.gcd(self.licznik, self.mianownik)
self.licznik //= temp
self.mianownik //= temp
# przeciążamy operator ==
def __eq__(self, u2):
return self.licznik == u2.licznik and self.mianownik == u2.mianownik
# przeciążamy operator + uzywając napisanej wcześniej metody statycznej
def __add__(self, inny_ulamek):
return Ulamek.dodawanie(self, inny_ulamek)
# przeciążamy operator *
def __mul__(self, u2):
wynik = Ulamek(self.licznik*u2.licznik,
self.mianownik*u2.mianownik)
return wynik
# metoda statyczna
@staticmethod
def dodawanie(ulamek1, ulamek2):
wynik = Ulamek(ulamek1.licznik*ulamek2.mianownik + ulamek2.licznik*ulamek1.mianownik,
ulamek1.mianownik*ulamek2.mianownik)
wynik.skracanie()
return wynik
if __name__ == '__main__':
u1 = Ulamek(3, 4)
u2 = Ulamek(2, 6)
print(u1)
print(u1, '+', u2, '=', Ulamek.dodawanie(u1, u2)) # wykorzystanie metody statycznej
print(u1, '+', u2, '=', u1 + u2) # przeciażenie +
print(u1, '*', u2, '=', u1 * u2) # przeciażenie *
print(u1, '==', u2, '->', u1 == u2)
| [
"[email protected]"
] | |
1edbe9210cdaf8b6747c0577918cd4156ca3452d | 57ddfddd1e11db649536a8ed6e19bf5312d82d71 | /AtCoder/ABC1/ABC184/D.py | b4c97e61d1c7838d65bbca5688a51931bd044ccf | [] | no_license | pgDora56/ProgrammingContest | f9e7f4bb77714dc5088c2287e641c0aa760d0f04 | fdf1ac5d1ad655c73208d98712110a3896b1683d | refs/heads/master | 2023-08-11T12:10:40.750151 | 2021-09-23T11:13:27 | 2021-09-23T11:13:27 | 139,927,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | import sys
sys.setrecursionlimit(10**9)
memo = {}
def search(a,b,c,cnt):
tot = a+b+c
if a > b:
a, b = b, a
if b > c:
b, c = c, b
if a > b:
a, b = b, a
if a in memo:
if b in memo[a]:
if c in memo[a][b]:
return memo[a][b][c]
else:
memo[a][b] = {}
else:
memo[a] = {}
memo[a][b] = {}
chil = 0
if a==99:
chil += (cnt+1) * 99
elif a!=0:
chil += search(a+1,b,c,cnt+1) * a
if b==99:
chil += (cnt+1) * 99
elif b!=0:
chil += search(a,b+1,c,cnt+1) * b
if c==99:
chil += (cnt+1) * 99
elif c!=0:
chil += search(a,b,c+1,cnt+1) * c
res = chil / tot
memo[a][b][c] = res
return chil / tot
a, b, c = map(int, input().split())
print(search(a,b,c,0))
| [
"[email protected]"
] | |
f64139a35c4373ac2f6b69e9c1b8e0b8a2ff93ff | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/480/usersdata/321/110867/submittedfiles/Av2_Parte2.py | 130a05edcdad51dd3406a9fd3116a763a0ab7756 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71 | py | # -*- coding: utf-8 -*-valor
numero= int(input('Insira um número: '))
| [
"[email protected]"
] | |
6cbdb1487c6d3378423262ea3ae076dec93232d6 | 7c6b801ff36aa0a82ceb30c98e90091209320c7c | /cloudant121234.py | 36222d26b5123a8e34eafb378d33919373468894 | [] | no_license | SmartPracticeschool/llSPS-INT-2442-Smart-Waste-Management-System-For-Metropolitan-Cities | 5872fc64c1290991bb36b8f7fdc03eceb0025a8f | c6673bf9171b66b08a0c5a5f6643799b0d7fc3e6 | refs/heads/master | 2022-10-20T07:07:52.180598 | 2020-06-09T14:23:00 | 2020-06-09T14:23:00 | 267,571,204 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,459 | py | import time
import sys
import random
import ibmiotf.application
import ibmiotf.device
#Provide your IBM Watson Device Credentials
organization = "q2va6d" # repalce it with organization ID
deviceType = "rsip" #replace it with device type
deviceId = "108" #repalce with device id
authMethod = "token"
authToken = "9110705023"#repalce with token
def myCommandCallback(cmd):
print("Command received: %s" % cmd.data)
if cmd.data['command']=='cover':
print("the bin lid is closed")
elif cmd.data['command'] == 'uncover':
print("the bin lid is open")
try:
deviceOptions = {"org": organization, "type": deviceType, "id": deviceId, "auth-method": authMethod, "auth-token": authToken}
deviceCli = ibmiotf.device.Client(deviceOptions)
#..............................................
except Exception as e:
print("Caught exception connecting device: %s" % str(e))
sys.exit()
deviceCli.connect()
while True:
L = random.randint(0, 100);
F = random.randint(0, 100);
Q = random.randint(0, 100);
W = random.randint(0, 100);
E = random.randint(0, 100);
R = random.randint(0, 100);
T = random.randint(0, 100);
Y = random.randint(0, 100);
lat=17.3984
lon=78.5583
data = {'d':{ 'garbagelevel' : L, 'garbageweight': F,'lat': lat,'lon': lon,'a' : Q, 'b' : W, 'c' : E, 'd' : R,'e' : T, 'f' : Y, 'g' : Y}}
u=time.asctime(time.localtime(time.time()))
print(u)
#print data
def myOnPublishCallback():
print ("Published Your Garbage Level = %s %%" % L, "Garbage Weight = %s %%" % F, "to IBM Watson")
print ("Published Your Garbage Level of bin2 = %s %%" % Q, "Garbage Weight of bin2 = %s %%" % W, "to IBM Watson")
print ("Published Your Garbage Level of bin3 = %s %%" % E, "Garbage Weight of bin3 = %s %%" % R, "to IBM Watson")
print ("Published Your Garbage Level of bin4 = %s %%" % T, "Garbage Weight of bin4 = %s %%" % Y, "to IBM Watson")
success = deviceCli.publishEvent("event", "json", data, qos=0, on_publish=myOnPublishCallback)
if not success:
print("Not connected to IoTF")
time.sleep(5)
deviceCli.commandCallback = myCommandCallback
from cloudant.client import Cloudant
from cloudant.error import CloudantException
from cloudant.result import Result, ResultByKey
client = Cloudant("fa3c80de-84b9-4280-be10-e9ee55d6726b-bluemix", "cd3fd31f55919b590bdd100e21c3278805fab74817ca0ca86c68309a46585792",
url="https://fa3c80de-84b9-4280-be10-e9ee55d6726b-bluemix:cd3fd31f55919b590bdd100e21c3278805fab74817ca0ca86c68309a46585792@fa3c80de-84b9-4280-be10-e9ee55d6726b-bluemix.cloudantnosqldb.appdomain.cloud")
client.connect()
database_name = "dustmanagement"
my_database = client.create_database(database_name)
if my_database.exists():
print(f"'{database_name}' successfully created.")
json_document = {'d':{ 'Garbage Level' : L, 'Garbage Weight': F }}
json_document = {'d':{ 'Garbage Level' : Q, 'Garbage Weight': W }}
json_document = {'d':{ 'Garbage Level' : E, 'Garbage Weight': R }}
json_document = {'d':{ 'Garbage Level' : T, 'Garbage Weight': Y }}
new_document = my_database.create_document(json_document)
if new_document.exists():
print(f"Document '{new_document}' successfully created.")
''' if L>=100:
print("your garbage is full")
import requests
url = "https://www.fast2sms.com/dev/bulk"
querystring = {"authorization":"G3k8jc6SOWqei20PQZJV4otdarXImlCYAygM9RuUxKnb1BvDhEWbJPYeFM1tLASXNKQzj5xp0Gm3Uw6B","sender_id":"FSTSMS","message":"This is test message","language":"english","route":"p","numbers":"9999999999,8919275560,7777777777"}
headers = {
'cache-control': "no-cache"
}
response = requests.request("GET", url, headers=headers, params=querystring)
print(response.text)'''
# Disconnect the device and application from the cloud
deviceCli.disconnect()
| [
"[email protected]"
] | |
9a5b1d2e7d6dea3e986d99e0bb25fe5acc6bb443 | 63b0f544dc8ad899dd605d36e6048077c7a9ed6e | /tests/test_shrinking.py | 1c5b0a732701a01bc5dd6b9c42af810e40883b84 | [] | no_license | DRMacIver/structureshrink | c2372d7e4686879cb035292573d32a60459f1024 | 625e01236d6a7d72295782277737595f81d77d2a | refs/heads/master | 2020-05-22T02:47:24.446684 | 2016-06-16T12:16:39 | 2016-06-16T12:16:39 | 55,408,891 | 101 | 6 | null | 2016-04-18T20:24:31 | 2016-04-04T12:20:29 | Python | UTF-8 | Python | false | false | 734 | py | from structureshrink import shrink
from hypothesis import given, strategies as st
import hashlib
@given(st.binary(), st.random_module())
def test_partition_by_length(b, _):
shrunk = shrink(b, len)
assert len(shrunk) == len(b) + 1
@given(
st.lists(st.binary(min_size=1, max_size=4), min_size=1, max_size=5),
st.random_module()
)
def test_shrink_to_any_substring(ls, _):
shrunk = shrink(
b''.join(ls), lambda x: sum(l in x for l in ls)
)
assert len(shrunk) >= len(ls)
def test_partition_by_last_byte():
seed = b''.join(bytes([i, j]) for i in range(256) for j in range(256))
shrunk = shrink(
seed, lambda s: hashlib.sha1(s).digest()[-1] & 127
)
assert len(shrunk) == 128
| [
"[email protected]"
] | |
b22e2138f9c4c2578dd2761ab351bdc609613b66 | 381b75fe68a4da258e2e60a97105b66ac47214e4 | /qa/rpc-tests/getblocktemplate_proposals.py | bd844d49dd91db1fa1eb0f16535ccea2625de16b | [
"MIT"
] | permissive | lipcoin/lipcoin | 3a5997dfc9193ee7dee6f9fa0adc1cb5fb8c92a3 | 7afc0a02d63620e5a5601474cca131cb0cf3bbe4 | refs/heads/master | 2021-01-24T07:57:56.248620 | 2018-03-17T19:04:38 | 2018-03-17T19:04:38 | 112,155,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,726 | py | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The LipCoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import LipCoinTestFramework
from test_framework.util import *
from binascii import a2b_hex, b2a_hex
from hashlib import sha256
from struct import pack
def b2x(b):
return b2a_hex(b).decode('ascii')
# NOTE: This does not work for signed numbers (set the high bit) or zero (use b'\0')
def encodeUNum(n):
s = bytearray(b'\1')
while n > 127:
s[0] += 1
s.append(n % 256)
n //= 256
s.append(n)
return bytes(s)
def varlenEncode(n):
if n < 0xfd:
return pack('<B', n)
if n <= 0xffff:
return b'\xfd' + pack('<H', n)
if n <= 0xffffffff:
return b'\xfe' + pack('<L', n)
return b'\xff' + pack('<Q', n)
def dblsha(b):
return sha256(sha256(b).digest()).digest()
def genmrklroot(leaflist):
cur = leaflist
while len(cur) > 1:
n = []
if len(cur) & 1:
cur.append(cur[-1])
for i in range(0, len(cur), 2):
n.append(dblsha(cur[i] + cur[i+1]))
cur = n
return cur[0]
def template_to_bytearray(tmpl, txlist):
blkver = pack('<L', tmpl['version'])
mrklroot = genmrklroot(list(dblsha(a) for a in txlist))
timestamp = pack('<L', tmpl['curtime'])
nonce = b'\0\0\0\0'
blk = blkver + a2b_hex(tmpl['previousblockhash'])[::-1] + mrklroot + timestamp + a2b_hex(tmpl['bits'])[::-1] + nonce
blk += varlenEncode(len(txlist))
for tx in txlist:
blk += tx
return bytearray(blk)
def template_to_hex(tmpl, txlist):
return b2x(template_to_bytearray(tmpl, txlist))
def assert_template(node, tmpl, txlist, expect):
rsp = node.getblocktemplate({'data':template_to_hex(tmpl, txlist),'mode':'proposal'})
if rsp != expect:
raise AssertionError('unexpected: %s' % (rsp,))
class GetBlockTemplateProposalTest(LipCoinTestFramework):
'''
Test block proposals with getblocktemplate.
'''
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = False
def setup_network(self):
self.nodes = self.setup_nodes()
connect_nodes_bi(self.nodes, 0, 1)
def run_test(self):
node = self.nodes[0]
node.generate(1) # Mine a block to leave initial block download
tmpl = node.getblocktemplate()
if 'coinbasetxn' not in tmpl:
rawcoinbase = encodeUNum(tmpl['height'])
rawcoinbase += b'\x01-'
hexcoinbase = b2x(rawcoinbase)
hexoutval = b2x(pack('<Q', tmpl['coinbasevalue']))
tmpl['coinbasetxn'] = {'data': '01000000' + '01' + '0000000000000000000000000000000000000000000000000000000000000000ffffffff' + ('%02x' % (len(rawcoinbase),)) + hexcoinbase + 'fffffffe' + '01' + hexoutval + '00' + '00000000'}
txlist = list(bytearray(a2b_hex(a['data'])) for a in (tmpl['coinbasetxn'],) + tuple(tmpl['transactions']))
# Test 0: Capability advertised
assert('proposal' in tmpl['capabilities'])
# NOTE: This test currently FAILS (regtest mode doesn't enforce block height in coinbase)
## Test 1: Bad height in coinbase
#txlist[0][4+1+36+1+1] += 1
#assert_template(node, tmpl, txlist, 'FIXME')
#txlist[0][4+1+36+1+1] -= 1
# Test 2: Bad input hash for gen tx
txlist[0][4+1] += 1
assert_template(node, tmpl, txlist, 'bad-cb-missing')
txlist[0][4+1] -= 1
# Test 3: Truncated final tx
lastbyte = txlist[-1].pop()
assert_raises(JSONRPCException, assert_template, node, tmpl, txlist, 'n/a')
txlist[-1].append(lastbyte)
# Test 4: Add an invalid tx to the end (duplicate of gen tx)
txlist.append(txlist[0])
assert_template(node, tmpl, txlist, 'bad-txns-duplicate')
txlist.pop()
# Test 5: Add an invalid tx to the end (non-duplicate)
txlist.append(bytearray(txlist[0]))
txlist[-1][4+1] = 0xff
assert_template(node, tmpl, txlist, 'bad-txns-inputs-missingorspent')
txlist.pop()
# Test 6: Future tx lock time
txlist[0][-4:] = b'\xff\xff\xff\xff'
assert_template(node, tmpl, txlist, 'bad-txns-nonfinal')
txlist[0][-4:] = b'\0\0\0\0'
# Test 7: Bad tx count
txlist.append(b'')
assert_raises(JSONRPCException, assert_template, node, tmpl, txlist, 'n/a')
txlist.pop()
# Test 8: Bad bits
realbits = tmpl['bits']
tmpl['bits'] = '1c0000ff' # impossible in the real world
assert_template(node, tmpl, txlist, 'bad-diffbits')
tmpl['bits'] = realbits
# Test 9: Bad merkle root
rawtmpl = template_to_bytearray(tmpl, txlist)
rawtmpl[4+32] = (rawtmpl[4+32] + 1) % 0x100
rsp = node.getblocktemplate({'data':b2x(rawtmpl),'mode':'proposal'})
if rsp != 'bad-txnmrklroot':
raise AssertionError('unexpected: %s' % (rsp,))
# Test 10: Bad timestamps
realtime = tmpl['curtime']
tmpl['curtime'] = 0x7fffffff
assert_template(node, tmpl, txlist, 'time-too-new')
tmpl['curtime'] = 0
assert_template(node, tmpl, txlist, 'time-too-old')
tmpl['curtime'] = realtime
# Test 11: Valid block
assert_template(node, tmpl, txlist, None)
# Test 12: Orphan block
tmpl['previousblockhash'] = 'ff00' * 16
assert_template(node, tmpl, txlist, 'inconclusive-not-best-prevblk')
if __name__ == '__main__':
GetBlockTemplateProposalTest().main()
| [
"[email protected]"
] | |
3d3ed85bb76718a4e5973252aefc6b9b998ef6c6 | 0e478f3d8b6c323c093455428c9094c45de13bac | /src/OTLMOW/OTLModel/Datatypes/KlOmegaElementMateriaal.py | 0e4f888f71af1341513eee503beab2556145d36f | [
"MIT"
] | permissive | davidvlaminck/OTLMOW | c6eae90b2cab8a741271002cde454427ca8b75ba | 48f8c357c475da1d2a1bc7820556843d4b37838d | refs/heads/main | 2023-01-12T05:08:40.442734 | 2023-01-10T15:26:39 | 2023-01-10T15:26:39 | 432,681,113 | 3 | 1 | MIT | 2022-06-20T20:36:00 | 2021-11-28T10:28:24 | Python | UTF-8 | Python | false | false | 2,285 | py | # coding=utf-8
import random
from OTLMOW.OTLModel.Datatypes.KeuzelijstField import KeuzelijstField
from OTLMOW.OTLModel.Datatypes.KeuzelijstWaarde import KeuzelijstWaarde
# Generated with OTLEnumerationCreator. To modify: extend, do not edit
class KlOmegaElementMateriaal(KeuzelijstField):
"""De gebruikte materialen van het omega-element."""
naam = 'KlOmegaElementMateriaal'
label = 'Omega element materiaal'
objectUri = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#KlOmegaElementMateriaal'
definition = 'De gebruikte materialen van het omega-element.'
status = 'ingebruik'
codelist = 'https://wegenenverkeer.data.vlaanderen.be/id/conceptscheme/KlOmegaElementMateriaal'
options = {
'aluminium': KeuzelijstWaarde(invulwaarde='aluminium',
label='aluminium',
status='ingebruik',
definitie='Omega-element vervaarigd uit aluminium.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlOmegaElementMateriaal/aluminium'),
'roestvrij-staal': KeuzelijstWaarde(invulwaarde='roestvrij-staal',
label='roestvrij staal',
status='ingebruik',
definitie='Omega-element vervaarigd uit roestvrij staal.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlOmegaElementMateriaal/roestvrij-staal'),
'verzinkt-staal': KeuzelijstWaarde(invulwaarde='verzinkt-staal',
label='verzinkt staal',
status='ingebruik',
definitie='Omega-element vervaarigd uit verzinkt staal.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlOmegaElementMateriaal/verzinkt-staal')
}
@classmethod
def create_dummy_data(cls):
return random.choice(list(map(lambda x: x.invulwaarde,
filter(lambda option: option.status == 'ingebruik', cls.options.values()))))
| [
"[email protected]"
] | |
bf7636f3f80aa31b41bfea8c5de09a9c2c78081e | be5e5aebd753ed1f376dc18ce411f0fac6d2f762 | /natuurpunt_purchase/__openerp__.py | 2c5775f49e8c2d135a4f1389ae4e637f0ac437cf | [] | no_license | smart-solution/natuurpunt-purchase | 7d9fcfdde769b6294d8dc705cecc99a177b4573c | 0ac94cb68cee4ef464158720e04007ee12036179 | refs/heads/master | 2021-05-22T04:43:21.594422 | 2020-11-02T13:32:27 | 2020-11-02T13:32:27 | 39,186,322 | 0 | 2 | null | 2020-11-02T13:32:28 | 2015-07-16T08:42:31 | Python | UTF-8 | Python | false | false | 1,548 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# Smart Solution bvba
# Copyright (C) 2010-Today Smart Solution BVBA (<http://www.smartsolution.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "natuurpunt_purchase",
"version" : "1.0",
"author" : "Smart Solution ([email protected])",
"website" : "www.smartsolution.be",
"category" : "Generic Modules/Base",
"description": """
""",
"depends" : ["purchase_requisition"],
"data" : [
'natuurpunt_purchase_view.xml',
'natuurpunt_purchase_data.xml',
'natuurpunt_purchase_report.xml',
'security/natuurpunt_purchase_security.xml',
# 'security/ir.model.access.csv'
],
"active": False,
"installable": True
}
| [
"[email protected]"
] | |
981bbfed69a5508f0cfab20fc831cfd657c03bfd | 690c4fd238926624c1d3fa594aeb9d7140618b5b | /day04/mysite4/mysite4/settings.py | b6283d1c8dc99f4cc72597551584c5d90b1ccbf3 | [] | no_license | dalaAM/month_04 | 66c4630a169294f4e4dca26c77989ad5879da2ca | 322532fedd095cd9307ee4f2633026debe56f551 | refs/heads/master | 2022-12-04T06:02:12.995054 | 2020-08-23T04:06:19 | 2020-08-23T04:06:19 | 286,018,771 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,203 | py | """
Django settings for mysite4 project.
Generated by 'django-admin startproject' using Django 2.2.13.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'lc!7ik)7n=drgz5wna+v5$_oejjd&c9hr$i2y8ag#rz4!fj4co'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bookstore',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite4.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite4.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'mysite4',
'USER': 'root',
'PASSWORD': '123456',
'HOST': '127.0.0.1',
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'zh-Hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
2a7b308b9a147c9384f1af15affa987a9c80bc18 | 78144baee82268a550400bbdb8c68de524adc68f | /Production/python/Autumn18/WWW_4F_TuneCP5_13TeV-amcatnlo-pythia8_ext1_cff.py | 0404dac24ac1af443c07c6d7567e3d26aecf82b0 | [] | no_license | tklijnsma/TreeMaker | e6989c03189b849aff2007bad22e2bfc6922a244 | 248f2c04cc690ef2e2202b452d6f52837c4c08e5 | refs/heads/Run2_2017 | 2023-05-26T23:03:42.512963 | 2020-05-12T18:44:15 | 2020-05-12T18:44:15 | 263,960,056 | 1 | 2 | null | 2020-09-25T00:27:35 | 2020-05-14T15:57:20 | null | UTF-8 | Python | false | false | 2,416 | py | import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'/store/mc/RunIIAutumn18MiniAOD/WWW_4F_TuneCP5_13TeV-amcatnlo-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15_ext1-v2/280000/97CF62B7-13A7-1144-9021-CDF16708F4B0.root',
'/store/mc/RunIIAutumn18MiniAOD/WWW_4F_TuneCP5_13TeV-amcatnlo-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15_ext1-v2/90000/0DC1264B-98DD-054D-934F-B46D16AEA2DA.root',
'/store/mc/RunIIAutumn18MiniAOD/WWW_4F_TuneCP5_13TeV-amcatnlo-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15_ext1-v2/90000/2C2BC671-C18E-FF47-947E-B293CD33BEE2.root',
'/store/mc/RunIIAutumn18MiniAOD/WWW_4F_TuneCP5_13TeV-amcatnlo-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15_ext1-v2/90000/2CB0A228-DDDD-0946-A030-6B0ED1F50B8A.root',
'/store/mc/RunIIAutumn18MiniAOD/WWW_4F_TuneCP5_13TeV-amcatnlo-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15_ext1-v2/90000/626CC6DD-7373-0A44-99B0-933D20F1088D.root',
'/store/mc/RunIIAutumn18MiniAOD/WWW_4F_TuneCP5_13TeV-amcatnlo-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15_ext1-v2/90000/6F7C5F93-53F0-AE45-BA6B-A95CCDCBD59A.root',
'/store/mc/RunIIAutumn18MiniAOD/WWW_4F_TuneCP5_13TeV-amcatnlo-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15_ext1-v2/90000/7E24A0DA-B32D-5D44-BAF0-7AE8C465D170.root',
'/store/mc/RunIIAutumn18MiniAOD/WWW_4F_TuneCP5_13TeV-amcatnlo-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15_ext1-v2/90000/8FD11F87-C024-1042-A459-FCFDC8445277.root',
'/store/mc/RunIIAutumn18MiniAOD/WWW_4F_TuneCP5_13TeV-amcatnlo-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15_ext1-v2/90000/92CF84BB-1255-3243-9E69-C4C05B8922D1.root',
'/store/mc/RunIIAutumn18MiniAOD/WWW_4F_TuneCP5_13TeV-amcatnlo-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15_ext1-v2/90000/9CBFC750-9804-CF47-8FB7-9C862D1137F2.root',
'/store/mc/RunIIAutumn18MiniAOD/WWW_4F_TuneCP5_13TeV-amcatnlo-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15_ext1-v2/90000/E0CDD379-4CDE-2E4C-8014-F9573A6E9943.root',
'/store/mc/RunIIAutumn18MiniAOD/WWW_4F_TuneCP5_13TeV-amcatnlo-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15_ext1-v2/90000/ED4F9CB6-82B7-054D-A20A-254A0AF0FED3.root',
] )
| [
"[email protected]"
] | |
387622b9565cfcaa2fe10c694aeb971fe457181e | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/MuonSpectrometer/MuonCnv/MuonByteStream/share/WriteMuonByteStream_jobOptions.py | a8c537456ede0b7ccc707e97e9cfe4a5455e6a66 | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | theApp.Dlls += [ "MuonByteStream" ]
StreamBS = Algorithm( "StreamBS" )
StreamBS.ItemList +=["4187#*"]
StreamBS.ItemList +=["4190#*"]
StreamBS.ItemList +=["4186#*"]
StreamBS.ItemList +=["4183#*"]
StreamBS.ForceRead=True
| [
"[email protected]"
] | |
235b0d7e97c24574ab59397ad07507f0a41dccd3 | 45d515a0e33794e7c46a3ad7e1cfdf3ac6c2ee83 | /collector.py | 75168f49016e4b9e35ec36b52b159adbb814a41a | [
"MIT"
] | permissive | djcarter85/Fantasy-Premier-League | 12b2aaef62c5bc4e0656b83572c2ff9087aa4238 | 46a8e72b80b34a1afe3d7a9c9b4f8ad0cba48b7e | refs/heads/master | 2021-07-03T13:04:05.621833 | 2020-12-21T17:16:41 | 2020-12-21T17:16:41 | 201,034,915 | 1 | 0 | NOASSERTION | 2019-08-07T11:16:27 | 2019-08-07T11:16:26 | null | UTF-8 | Python | false | false | 4,066 | py | import os
import sys
import csv
def get_teams(directory):
teams = {}
fin = open(directory + "/teams.csv", 'rU')
reader = csv.DictReader(fin)
for row in reader:
teams[int(row['id'])] = row['name']
return teams
def get_fixtures(directory):
fixtures_home = {}
fixtures_away = {}
fin = open(directory + "/fixtures.csv", 'rU')
reader = csv.DictReader(fin)
for row in reader:
fixtures_home[int(row['id'])] = int(row['team_h'])
fixtures_away[int(row['id'])] = int(row['team_a'])
return fixtures_home, fixtures_away
def get_positions(directory):
positions = {}
names = {}
pos_dict = {'1': "GK", '2': "DEF", '3': "MID", '4': "FWD"}
fin = open(directory + "/players_raw.csv", 'rU',encoding="utf-8")
reader = csv.DictReader(fin)
for row in reader:
positions[int(row['id'])] = pos_dict[row['element_type']]
names[int(row['id'])] = row['first_name'] + ' ' + row['second_name']
return names, positions
def get_expected_points(gw, directory):
xPoints = {}
fin = open(os.path.join(directory, 'xP' + str(gw) + '.csv'), 'rU')
reader = csv.DictReader(fin)
for row in reader:
xPoints[int(row['id'])] = row['xP']
return xPoints
def merge_gw(gw, gw_directory):
merged_gw_filename = "merged_gw.csv"
gw_filename = "gw" + str(gw) + ".csv"
gw_path = os.path.join(gw_directory, gw_filename)
fin = open(gw_path, 'rU', encoding="utf-8")
reader = csv.DictReader(fin)
fieldnames = reader.fieldnames
fieldnames += ["GW"]
rows = []
for row in reader:
row["GW"] = gw
rows += [row]
out_path = os.path.join(gw_directory, merged_gw_filename)
fout = open(out_path,'a', encoding="utf-8")
writer = csv.DictWriter(fout, fieldnames=fieldnames, lineterminator='\n')
print(gw)
if gw == 1:
writer.writeheader()
for row in rows:
writer.writerow(row)
def collect_gw(gw, directory_name, output_dir):
rows = []
fieldnames = []
root_directory_name = "data/2020-21/"
fixtures_home, fixtures_away = get_fixtures(root_directory_name)
teams = get_teams(root_directory_name)
names, positions = get_positions(root_directory_name)
xPoints = get_expected_points(gw, output_dir)
for root, dirs, files in os.walk(u"./" + directory_name):
for fname in files:
if fname == 'gw.csv':
fpath = os.path.join(root, fname)
fin = open(fpath, 'rU')
reader = csv.DictReader(fin)
fieldnames = reader.fieldnames
for row in reader:
if int(row['round']) == gw:
id = int(os.path.basename(root).split('_')[-1])
name = names[id]
position = positions[id]
fixture = int(row['fixture'])
if row['was_home'] == True or row['was_home'] == "True":
row['team'] = teams[fixtures_home[fixture]]
else:
row['team'] = teams[fixtures_away[fixture]]
row['name'] = name
row['position'] = position
row['xP'] = xPoints[id]
rows += [row]
fieldnames = ['name', 'position', 'team', 'xP'] + fieldnames
outf = open(os.path.join(output_dir, "gw" + str(gw) + ".csv"), 'w', encoding="utf-8")
writer = csv.DictWriter(outf, fieldnames=fieldnames, lineterminator='\n')
writer.writeheader()
for row in rows:
writer.writerow(row)
def collect_all_gws(directory_name, output_dir):
for i in range(1,5):
collect_gw(i, directory_name, output_dir)
def merge_all_gws(num_gws, gw_directory):
for i in range(1, num_gws):
merge_gw(i, gw_directory)
def main():
#collect_all_gws(sys.argv[1], sys.argv[2])
merge_all_gws(int(sys.argv[1]), sys.argv[2])
#collect_gw(39, sys.argv[1], sys.argv[2])
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
5d1f082d6d49219104ea342bd2d205351bf8267c | a4a01e251b194f6d3c6654a2947a33fec2c03e80 | /PythonWeb/Django/1809/Day02/DjangoDemo01/sport/apps.py | 8dc5fb5c302ddd038a2fa5369b1ba78cfd405151 | [] | no_license | demo112/1809 | 033019043e2e95ebc637b40eaf11c76bfd089626 | e22972229e5e7831dce2aae0b53ce19a6e3bb106 | refs/heads/master | 2020-04-09T07:10:49.906231 | 2019-02-27T13:08:45 | 2019-02-27T13:08:45 | 160,143,869 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | from django.apps import AppConfig
class SprotConfig(AppConfig):
name = 'sport'
| [
"[email protected]"
] | |
bb010b096427cce84eb368767cc9d17ddb8f16db | a9fc496e0724866093dbb9cba70a8fdce12b67a9 | /scripts/field/eunwol_house.py | 131e7ecadea4a9957479632d96bd39eede25e3ea | [
"MIT"
] | permissive | ryantpayton/Swordie | b2cd6b605f7f08f725f5e35d23ba3c22ef2ae7c0 | ca6f42dd43f63b1d2e6bb5cdc8fc051c277f326e | refs/heads/master | 2022-12-01T09:46:47.138072 | 2020-03-24T10:32:20 | 2020-03-24T10:32:20 | 253,997,319 | 2 | 0 | MIT | 2022-11-24T08:17:54 | 2020-04-08T05:50:22 | Java | UTF-8 | Python | false | false | 878 | py | # 410000001
if sm.hasQuest(38002):
sm.removeEscapeButton()
sm.flipDialoguePlayerAsSpeaker()
sm.sendNext("What happened? A house and a new name... But what happened to my friends? Are they alive? If I am, then maybe we failed to seal the Black Mage...")
sm.sendSay("No. They wouldn't give up that easily. They're probably hiding out somewhere, waiting to get back together. I need to look after myself for now, and get my strength back.")
sm.sendSay("Level 10... It's better than nothing, but it's not the best feeling. I'll hang around and get stronger. That's the only thing I can do now.")
sm.setQRValue(38002, "clear", False)
elif sm.hasQuest(38018):
sm.removeEscapeButton()
sm.flipDialoguePlayerAsSpeaker()
sm.sendNext("W-what is that thing? It looks so fuzzy. I don't think I should touch it...")
sm.setQRValue(38018, "clear", False) | [
"[email protected]"
] | |
cc95e675ce9006d3e9f7d28cffe4c7ef20978ece | e024cc2f51d2c9104a514f3f1a77c5cabbe7691a | /examplePandas.py | c717fed2795c5e5ce4b716fd2779e9a249e1c041 | [] | no_license | wilsonsk/Machine-Learning-for-Stock-Trading | 1818f144df02e69ce3e29fe1eb576675d512324a | bf5a36942e0f39e6c6d1c521bb3532e7eb82b669 | refs/heads/master | 2021-06-08T18:47:35.787532 | 2016-09-23T05:13:22 | 2016-09-23T05:13:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | import pandas as pd
def test_run():
df = pd.read_csv("data/AAPL.csv");
print df #print entire dataframe
#print df.head() -- print first 5 rows
#print df.tail() -- print last 5 rows
#print df.tail(n) -- print last n rows
if __name__ == "__main__":
test_run()
| [
"[email protected]"
] | |
294428420539f48b42712835aa446ba29b706061 | 60096eba428275a28ab53d364aef0b9bc29e71c8 | /hris/models.py | 9a2b067dfbdab5351c3fedc2181e89d2624e2c8f | [] | no_license | RobusGauli/hris_new | 30ef8d17aceceb5f6c8f69f65df508228cb31f33 | 634f18d162310df9331543f7a877cac619ee1622 | refs/heads/master | 2021-01-19T21:55:39.279378 | 2017-04-29T04:32:38 | 2017-04-29T04:32:38 | 88,724,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,787 | py | from sqlalchemy import (
Column,
String,
Integer,
ForeignKey,
Text,
Enum,
CheckConstraint,
DateTime,
func,
Date,
Float,
Boolean
)
#default
#onupdate
from psycopg2 import IntegrityError
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Sequence
from hris import Base
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True, autoincrement=True)
user_name = Column(String(20), nullable=False, unique=True)
password = Column(String, nullable=False)
access_token = Column(String)
created_at = Column(DateTime, default=func.now())
updated_at = Column(DateTime, default=func.now(), onupdate=func.now())
created_by = Column(String(20))
updated_by = Column(String(20))
role_id = Column(Integer, ForeignKey('roles.id'))
activate = Column(Boolean, default=True)
del_flag = Column(Boolean, default=False)
#employee_id
password_changed = Column(Boolean, default=False)
#relationship
role = relationship('Role', back_populates='users')
#one to one with employees
employee = relationship('Employee', uselist=False, back_populates='user')
def to_dict(self):
data = {
'user_name' : self.user_name if self.user_name else '',
'role_id' : self.role_id if self.role_id else '',
'employee_data' : self.employee.to_dict() if self.employee else {},
'id' : self.id if self.id else '',
'role_name' : self.role.role_type
}
return data
class Role(Base):
__tablename__ = 'roles'
id = Column(Integer, primary_key=True, autoincrement=True)
role_type = Column(String, unique=True, nullable=False)
role_code = Column(String(20), unique=True, nullable=False)
role_type_display_name = Column(String(200), nullable=False)
activate = Column(Boolean, default=True)
del_flag = Column(Boolean, default=False)
agency_management_perm = Column(Enum('N', 'R', 'W', 'E', name='amp'), default='N')
division_management_perm = Column(Enum('N', 'R', 'W', 'E', name='dmp'), default='N')
agency_emp_perm = Column(Enum('N', 'R', 'W', 'E', name='aep'), default='N')
division_emp_perm = Column(Enum('N', 'R', 'W', 'E', name='dep'), default='N')
company_management_perm = Column(Enum('N', 'R', 'W', 'E', name='cmp'), default='N')
config_management_perm = Column(Enum('N', 'R', 'W', 'E', name='comp'), default='N')
read_management_perm = Column(Enum('N', 'A', 'B', 'D', 'O', name='rmp'), default='N')
user_management_perm = Column(Enum('N', 'R', 'W', 'E', name='ump'), default='N')
permission_eight = Column(Boolean, default=False)
permission_nine = Column(Boolean, default=False)
permission_ten = Column(Boolean, default=False)
created_at = Column(DateTime, default=func.now())
updated_at = Column(DateTime, default=func.now(), onupdate=func.now())
created_by = Column(String(20))
updated_by = Column(String(20))
#relationship
users = relationship('User', back_populates='role', cascade = 'all, delete, delete-orphan')
def to_dict(self):
role = {
'role_type' : self.role_type,
'id' : self.id,
'agency_management_perm' : self.agency_management_perm if self.agency_management_perm else 'N',
'activate' : self.activate if self.activate else True,
'division_management_perm' : self.division_management_perm if self.division_management_perm else 'N',
'agency_emp_perm' : self.agency_emp_perm if self.agency_emp_perm else 'N',
'division_emp_perm' : self.division_emp_perm if self.division_emp_perm else 'N',
'company_management_perm': self.company_management_perm if self.company_management_perm else 'N',
'config_management_perm': self.config_management_perm if self.config_management_perm else 'N',
'read_management_perm' : self.read_management_perm if self.read_management_perm else 'N',
'user_management_perm' : self.user_management_perm if self.user_management_perm else 'O',
}
return role
class CompanyDetail(Base):
__tablename__ = 'companydetail'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(30), unique=True)
description = Column(String(300))
currency_symbol = Column(String(2), unique=True)
is_prefix = Column(Boolean, default=False)
country = Column(String(30), nullable=False)
created_at = Column(DateTime, default=func.now())
updated_at = Column(DateTime, default=func.now(), onupdate=func.now())
class Branch(Base):
__tablename__ = 'branches'
id = Column(Integer, primary_key=True, autoincrement=True)
is_branch = Column(Boolean, default=False)
facility_name = Column(String(40), nullable=False, unique=True)
facility_display_name = Column(String(40))
acitivate = Column(Boolean, default=True)
del_flag = Column(Boolean, default=False)
#foreignt keys
facility_type_id = Column(Integer, ForeignKey('facilitytypes.id'))
llg_id = Column(Integer, ForeignKey('llg.id'))
district_id = Column(Integer, ForeignKey('districts.id'))
province_id = Column(Integer, ForeignKey('provinces.id'))
region_id = Column(Integer, ForeignKey('regions.id'))
#relationship
facility_type = relationship('FacilityType', back_populates='branches')
llg = relationship('LLG', back_populates='branches')
district = relationship('District', back_populates='branches')
province = relationship('Province', back_populates='branches')
region = relationship('Region', back_populates='branches')
#realiationhsip
employees = relationship('Employee', back_populates='employee_branch', cascade='all, delete, delete-orphan')
class FacilityType(Base):
__tablename__ = 'facilitytypes'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(200), unique=True, nullable=False)
display_name = Column(String(200), nullable=False, unique=True)
del_flag = Column(Boolean, default=False)
branches = relationship('Branch', back_populates='facility_type', cascade='all, delete, delete-orphan')
class LLG(Base):
__tablename__ = 'llg'
id = Column(Integer, primary_key=True)
name = Column(String(100), unique=True, nullable=False)
display_name = Column(String(200), unique=True, nullable=False)
del_flag = Column(Boolean, default=False)
branches = relationship('Branch', back_populates='llg', cascade='all, delete, delete-orphan')
class District(Base):
__tablename__ = 'districts'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(100), unique=True, nullable=False)
display_name = Column(String(200), unique=True, nullable=False)
del_flag = Column(Boolean, default=False)
branches = relationship('Branch', back_populates='district', cascade='all, delete, delete-orphan')
class Province(Base):
__tablename__ = 'provinces'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(100), unique=True, nullable=False)
display_name = Column(String(200), unique=True, nullable=False)
del_flag = Column(Boolean, default=False)
branches = relationship('Branch', back_populates='province', cascade='all, delete, delete-orphan')
class Region(Base):
__tablename__ = 'regions'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(100), unique=True, nullable=False)
display_name = Column(String(200), unique=True, nullable=False)
del_flag = Column(Boolean, default=False)
branches = relationship('Branch', back_populates='region', cascade='all, delete, delete-orphan')
#create an engine
#for employee
class EmployeeCategoryRank(Base):
__tablename__ = 'emp_cat_ranks'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(100), nullable=False, unique=True)
display_name = Column(String(100), nullable=False, unique=True)
activate = Column(Boolean, default=True)
del_flag = Column(Boolean, default=False)
#realtionship
emp_categories = relationship('EmployeeCategory', back_populates='emp_cat_rank', cascade='all, delete, delete-orphan')
class EmployeeCategory(Base):
__tablename__ = 'emp_categories'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(50), nullable=False, unique=True)
display_name = Column(String(50), nullable=False, unique=True)
activate = Column(Boolean, default=True)
emp_cat_rank_id = Column(Integer, ForeignKey('emp_cat_ranks.id'))
#realationship
emp_cat_rank = relationship('EmployeeCategoryRank', back_populates='emp_categories')
#relationship
employees = relationship('Employee', back_populates='employee_category', cascade='all, delete, delete-orphan')
#lets hardcord the grade of the employee
class EmployeeType(Base):
__tablename__ = 'emp_types'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(100), nullable=False, unique=True)
display_name = Column(String(100), nullable=False, unique=True)
activate = Column(Boolean, default=True)
#relationship
employees = relationship('Employee', back_populates='employee_type', cascade='all, delete, delete-orphan')
class SalaryStep(Base):
__tablename__ = 'salarysteps'
id = Column(Integer, primary_key=True, autoincrement=True)
val = Column(String(4), nullable=False, unique=True)
activate = Column(Boolean, default=True)
class Employee(Base):
__tablename__ = 'employees'
id = Column(Integer, primary_key=True, autoincrement=True)
first_name = Column(String(40), nullable=False)
middle_name = Column(String(40))
last_name = Column(String(40), nullable=False)
sex = Column(Enum('M', 'F', 'O', name='sex'), nullable=False)
date_of_birth = Column(Date, nullable=False)
address_one = Column(String(50), nullable=False)
address_two = Column(String(50))
village = Column(String(100))
llg = Column(String(100))
district = Column(String(100))
province = Column(String(100))
region = Column(String(100))
country = Column(String(40))
email_address = Column(String(100), unique=True)
contact_number = Column(String(30), unique=True)
alt_contact_number = Column(String(30), unique=True)
age = Column(Integer, nullable=False)
retirement_age = Column(Integer, nullable=False, default=50)
employement_number = Column(String(20), unique=True)
salary_step = Column(String(6))
date_of_commencement = Column(Date)
contract_end_date = Column(Date)
activate = Column(Boolean, default=True)
#about del flag
del_flag = Column(Boolean, default=False)
created_at = Column(DateTime, default=func.now())
updated_at = Column(DateTime, default=func.now(), onupdate=func.now())
created_by = Column(String(50))
updated_by = Column(String(50))
photo = Column(String(500), unique=True)
document = Column(String(500), unique=True)
is_branch = Column(Boolean, nullable=False, default=True)
#branch_id_of_employee
employee_branch_id = Column(Integer, ForeignKey('branches.id'), nullable=False)
#relationship
employee_branch = relationship('Branch', back_populates='employees')
employee_type_id = Column(Integer, ForeignKey('emp_types.id'), nullable=False)
employee_category_id = Column(Integer, ForeignKey('emp_categories.id'), nullable=False)
#one to one with users table
user_id = Column(Integer, ForeignKey('users.id'), unique=True)
user = relationship('User', back_populates='employee')
#one to one with employeeextra table
employee_extra = relationship('EmployeeExtra', uselist=False, back_populates='employee')
#relationship
employee_type = relationship('EmployeeType', back_populates='employees')
employee_category = relationship('EmployeeCategory', back_populates='employees')
#other relationship
qualifications = relationship('Qualification', back_populates='employee', cascade='all, delete, delete-orphan')
certifications = relationship('Certification', back_populates='employee', cascade='all, delete, delete-orphan')
trainings = relationship('Training', back_populates='employee', cascade='all, delete, delete-orphan')
def to_dict(self):
data = {
'employement_number' : self.employement_number if self.employement_number else '',
'first_name' : self.first_name if self.first_name else '',
'middle_name' : self.middle_name if self.middle_name else '',
'last_name' : self.last_name if self.last_name else '',
'address_one' : self.address_one if self.address_one else '',
'contact_number' : self.contact_number if self.contact_number else '',
'country' : self.country if self.country else '',
'id' : self.id if self.id else ''
}
return data
class EmployeeExtra(Base):
__tablename__ = 'employee_extra'
id = Column(Integer, primary_key=True, autoincrement=True)
employee_id = Column(Integer, ForeignKey('employees.id'), unique=True)
ref_name = Column(String(40))
ref_address = Column(String(40))
ref_contact_number = Column(String(20))
emp_father_name = Column(String(40))
emp_mother_name = Column(String(40))
emp_single = Column(Boolean, default=True)
emp_wife_name = Column(String(40))
emp_num_of_children = Column(Integer)
del_flag = Column(Boolean, default=False)
#relationship
employee = relationship('Employee', back_populates='employee_extra')
class Qualification(Base):
__tablename__ = 'qualifications'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(60))
institute_name = Column(String(100))
city = Column(String(30))
state = Column(String(30))
province = Column(String(30))
country = Column(String(40))
start_date = Column(Date)
end_date = Column(Date)
del_flag = Column(Boolean, default=False)
employee_id = Column(Integer, ForeignKey('employees.id'))
#relationship
employee = relationship('Employee', back_populates='qualifications')
class Certification(Base):
__tablename__ = 'certifications'
id = Column(Integer, primary_key=True, autoincrement=True)
registration_number = Column(String(40), nullable=False, unique=True)
regulatory_body = Column(String(40), nullable=False)
registration_type = Column(String(40))
last_renewal_date = Column(Date)
expiry_date = Column(Date)
del_flag = Column(Boolean, default=False)
employee_id = Column(Integer, ForeignKey('employees.id'))
#relationship
employee = relationship('Employee', back_populates='certifications')
class Training(Base):
__tablename__ = 'trainings'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(200), nullable=False)
organiser_name = Column(String(200))
funding_source = Column(String(200))
duration = Column(String(30))
institue = Column(String(50))
city = Column(String(50))
state = Column(String(50))
province = Column(String(50))
country = Column(String(50))
start_date = Column(Date)
end_date = Column(Date)
del_flag = Column(Boolean, default=False)
employee_id = Column(Integer, ForeignKey('employees.id'))
employee = relationship('Employee', back_populates='trainings')
| [
"[email protected]"
] | |
6b2dc4c4ace54c42df53fad4d1201457c5f52c49 | 881041fab1b4d05f1c5371efed2f9276037eb609 | /tasks/where-civilian-complaints-were-reported-2005-2009/depositor.py | cfc1f38a64c3ca6b8dd165f0179f14f18bf8bf97 | [] | no_license | ResidentMario/urban-physiology-nyc-catalog | b568f3b6ee1a887a50c4df23c488f50c92e30625 | cefbc799f898f6cdf24d0a0ef6c9cd13c76fb05c | refs/heads/master | 2021-01-02T22:43:09.073952 | 2017-08-06T18:27:22 | 2017-08-06T18:27:22 | 99,377,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | import requests
r = requests.get("https://data.cityofnewyork.us/api/views/wqr5-zmgj/rows.csv?accessType=DOWNLOAD")
with open("/home/alex/Desktop/urban-physiology-nyc-catalog/catalog/where-civilian-complaints-were-reported-2005-2009/data.csv", "wb") as f:
f.write(r.content)
outputs = ["/home/alex/Desktop/urban-physiology-nyc-catalog/catalog/where-civilian-complaints-were-reported-2005-2009/data.csv"]
| [
"[email protected]"
] | |
216af594580d96800f9747a8650c7a4f5c81e89f | 88ba19b3303c112a424720106a7f7fde615757b5 | /03-data_manipulation_with_pandas/01-transforming_data/sorting_rows1.py | 0939c1757697add7f2c7c4dbd665fad67ebd8b1c | [] | no_license | mitchisrael88/Data_Camp | 4100f5904c62055f619281a424a580b5b2b0cbc1 | 14356e221f614424a332bbc46459917bb6f99d8a | refs/heads/master | 2022-10-22T18:35:39.163613 | 2020-06-16T23:37:41 | 2020-06-16T23:37:41 | 263,859,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,368 | py | Python 3.7.4 (tags/v3.7.4:e09359112e, Jul 8 2019, 20:34:20) [MSC v.1916 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> # Sort homelessness by individual
homelessness_ind = homelessness.sort_values("individuals")
# Print the top few rows
print(homelessness_ind.head())
SyntaxError: multiple statements found while compiling a single statement
>>>
>>>
=============================== RESTART: Shell ===============================
>>> # Sort homelessness by descending family members
homelessness_fam = homelessness.sort_values("family_members", ascending=False)
# Print the top few rows
print(homelessness_fam.head())
SyntaxError: multiple statements found while compiling a single statement
>>>
=============================== RESTART: Shell ===============================
>>> # Sort homelessness by descending family members
homelessness_fam = homelessness.sort_values("family_members", ascending=False)
# Print the top few rows
print(homelessness_fam.head())
SyntaxError: multiple statements found while compiling a single statement
>>>
=============================== RESTART: Shell ===============================
>>> # Sort homelessness by individual
homelessness_ind = homelessness.sort_values("individuals")
# Print the top few rows
print(homelessness_ind.head())
| [
"[email protected]"
] | |
170a9f6840626ccbdc39ec724bedd10138df1fc0 | 531c47c15b97cbcb263ec86821d7f258c81c0aaf | /sdk/security/azure-mgmt-security/azure/mgmt/security/_configuration.py | 9aa2b7aa11ce32d405db56ca4db44791e423a5c6 | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | YijunXieMS/azure-sdk-for-python | be364d3b88204fd3c7d223df23756386ff7a3361 | f779de8e53dbec033f98f976284e6d9491fd60b3 | refs/heads/master | 2021-07-15T18:06:28.748507 | 2020-09-04T15:48:52 | 2020-09-04T15:48:52 | 205,457,088 | 1 | 2 | MIT | 2020-06-16T16:38:15 | 2019-08-30T21:08:55 | Python | UTF-8 | Python | false | false | 2,145 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrestazure import AzureConfiguration
from .version import VERSION
class SecurityCenterConfiguration(AzureConfiguration):
"""Configuration for SecurityCenter
Note that all parameters used to create this instance are saved as instance
attributes.
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: Azure subscription ID
:type subscription_id: str
:param asc_location: The location where ASC stores the data of the
subscription. can be retrieved from Get locations
:type asc_location: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, asc_location, base_url=None):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
if asc_location is None:
raise ValueError("Parameter 'asc_location' must not be None.")
if not base_url:
base_url = 'https://management.azure.com'
super(SecurityCenterConfiguration, self).__init__(base_url)
# Starting Autorest.Python 4.0.64, make connection pool activated by default
self.keep_alive = True
self.add_user_agent('azure-mgmt-security/{}'.format(VERSION))
self.add_user_agent('Azure-SDK-For-Python')
self.credentials = credentials
self.subscription_id = subscription_id
self.asc_location = asc_location
| [
"[email protected]"
] | |
07ccca1ad2d1ac1eabc7ee6a124434a18a9abf44 | 5e5799e0ccce7a72d514fbc76dcb0a2108013f18 | /DAQConst.py | 97bc899ca96bfab6e6bceb5513c84de6b84fe56f | [] | no_license | sourcery-ai-bot/dash | 6d68937d225473d06a18ef64079a4b3717b5c12c | e1d1c3a601cd397d2508bfd4bb12bdb4e878cd9a | refs/heads/master | 2023-03-07T17:15:39.174964 | 2011-03-01T17:11:21 | 2011-03-01T17:11:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 493 | py | #!/usr/bin/env python
#
# DAQ Constant values
class DAQPort(object):
"DAQLive port"
DAQLIVE = 6659
"IceCube Live logging/monitoring port"
I3LIVE = 6666
"CnCServer XML-RPC port"
CNCSERVER = 8080
"CnCServer->DAQRun logging port"
CNC2RUNLOG = 8999
"DAQRun XML-RPC port"
DAQRUN = 9000
"DAQRun catchall logging port"
CATCHALL = 9001
"First port used by DAQRun for individual component logging"
RUNCOMP_BASE = 9002
| [
"[email protected]"
] | |
6eb0d84530b500e74e8e9edde1228aadfe50f8ea | 8966d83bf85d4738d644624bd7b7063e8534a515 | /data.d/code/python/example/wxpython/frame_boxsizer_horizontal.py | cb90c4dd5a0b24d8d1c6b59f67b455c564814a00 | [] | no_license | taka16a23/.emacs.d | 84a77c04c4d5e00c089cb01cc42a94b884f729ae | ac5794e2594037e316d5fe9cf6bf1fd20b44a726 | refs/heads/master | 2023-05-29T06:25:38.449977 | 2023-05-16T22:08:04 | 2023-05-16T22:08:04 | 82,106,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 676 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from time import sleep
import wx
def _main():
app = wx.App()
frame = wx.Frame(None, wx.ID_ANY, 'test Frameme', size=(400, 200))
panel = wx.Panel(frame, wx.ID_ANY)
panel.SetBackgroundColour('#AFAFAF')
button_1 = wx.Button(panel, wx.ID_ANY, 'botton1')
button_2 = wx.Button(panel, wx.ID_ANY, 'botton2')
button_3 = wx.Button(panel, wx.ID_ANY, 'botton3')
layout = wx.BoxSizer(wx.HORIZONTAL)
layout.Add(button_1)
layout.Add(button_2)
layout.Add(button_3)
panel.SetSizer(layout)
frame.Show()
app.MainLoop()
if __name__ == '__main__':
_main()
| [
"root@qu"
] | root@qu |
d29954de8f62e3c9ec1497319acc72009ec90777 | 42d8bea28c7a34dde8d47f81e9463c5970af7120 | /app/api/convert.py | 6b5629f6f561c782c33307b24c72610c5501db73 | [
"MIT"
] | permissive | Tharitos/mse_teacher_plan | 1e26818811db4991eadca3157b28b2c9ae691416 | 4c577f810eb040c4a74810c98e2c8c4b514caf5d | refs/heads/master | 2020-04-07T05:57:52.084094 | 2018-11-18T19:04:02 | 2018-11-18T19:04:02 | 158,116,922 | 0 | 0 | NOASSERTION | 2018-11-18T19:01:48 | 2018-11-18T19:01:48 | null | UTF-8 | Python | false | false | 2,032 | py | from typing import Union, List, Type, Dict
import bson
import datetime
import mongoengine
from mongoengine.document import Document
ConvertedField = Dict[str, Union[str, int, List[str]]]
ConvertedDocument = List[ConvertedField]
def f(text: str, name: str, type: str, opts: List[str] = None,
value: str='', fixed: bool =False) -> ConvertedField:
if opts is None:
opts = []
return {
'text': text,
'name': name,
'type': type,
'opts': opts,
'value': value,
'fixed': fixed
}
def convert_HTML_to_mongo_types(obj) -> str:
if isinstance(obj, mongoengine.fields.IntField):
return 'number'
if isinstance(obj, mongoengine.fields.DateTimeField):
return 'date'
# if obj.isinstance(mongoengine.fields.StringField):
return 'text'
def convert_mongo_model(obj: Type[Document]) -> ConvertedDocument:
fields = obj._fields_ordered
res = []
for field in fields:
current_field = obj._fields[field]
try:
text = current_field.verbose_name
except AttributeError:
text = '%NO_VERBOSE_NAME%'
try:
fixed = current_field.changeable_by_admin
except AttributeError:
fixed = False
name = current_field.name
type = convert_HTML_to_mongo_types(current_field)
opts = None
if current_field.choices:
opts = current_field.choices
value = ''
res.append(f(text, name, type, opts, value, fixed))
return res
def convert_mongo_document(obj: Document) -> ConvertedDocument:
res = convert_mongo_model(obj)
fields = obj._fields_ordered
for i in range(len(fields)):
data = obj[fields[i]]
if isinstance(data, datetime.datetime):
data = data.date().isoformat()
if isinstance(data, bson.objectid.ObjectId):
data = str(data)
if isinstance(data, Document):
data = str(data.id)
res[i]['value'] = data
return res
| [
"[email protected]"
] | |
c09c4c872e08f2b035c24a8533dc2d86407835e1 | ee53b0262007b2f0db0fe15b2ad85f65fafa4e25 | /Leetcode/1488. Avoid Flood in The City.py | 8c3fd8f830fe17cfd954caa9f8977d15f440474a | [] | no_license | xiaohuanlin/Algorithms | bd48caacb08295fc5756acdac609be78e143a760 | 157cbaeeff74130e5105e58a6b4cdf66403a8a6f | refs/heads/master | 2023-08-09T05:18:06.221485 | 2023-08-08T11:53:15 | 2023-08-08T11:53:15 | 131,491,056 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,234 | py | '''
Your country has an infinite number of lakes. Initially, all the lakes are empty, but when it rains over the nth lake, the nth lake becomes full of water. If it rains over a lake that is full of water, there will be a flood. Your goal is to avoid floods in any lake.
Given an integer array rains where:
rains[i] > 0 means there will be rains over the rains[i] lake.
rains[i] == 0 means there are no rains this day and you can choose one lake this day and dry it.
Return an array ans where:
ans.length == rains.length
ans[i] == -1 if rains[i] > 0.
ans[i] is the lake you choose to dry in the ith day if rains[i] == 0.
If there are multiple valid answers return any of them. If it is impossible to avoid flood return an empty array.
Notice that if you chose to dry a full lake, it becomes empty, but if you chose to dry an empty lake, nothing changes.
Example 1:
Input: rains = [1,2,3,4]
Output: [-1,-1,-1,-1]
Explanation: After the first day full lakes are [1]
After the second day full lakes are [1,2]
After the third day full lakes are [1,2,3]
After the fourth day full lakes are [1,2,3,4]
There's no day to dry any lake and there is no flood in any lake.
Example 2:
Input: rains = [1,2,0,0,2,1]
Output: [-1,-1,2,1,-1,-1]
Explanation: After the first day full lakes are [1]
After the second day full lakes are [1,2]
After the third day, we dry lake 2. Full lakes are [1]
After the fourth day, we dry lake 1. There is no full lakes.
After the fifth day, full lakes are [2].
After the sixth day, full lakes are [1,2].
It is easy that this scenario is flood-free. [-1,-1,1,2,-1,-1] is another acceptable scenario.
Example 3:
Input: rains = [1,2,0,1,2]
Output: []
Explanation: After the second day, full lakes are [1,2]. We have to dry one lake in the third day.
After that, it will rain over lakes [1,2]. It's easy to prove that no matter which lake you choose to dry in the 3rd day, the other one will flood.
Constraints:
1 <= rains.length <= 105
0 <= rains[i] <= 109
'''
import unittest
from typing import *
from bisect import bisect
class Solution:
def avoidFlood(self, rains: List[int]) -> List[int]:
maps = {}
zero_index = []
res = []
for i, num in enumerate(rains):
if num == 0:
zero_index.append(i)
res.append(1)
else:
if num in maps:
if not zero_index:
return []
k = bisect(zero_index, maps[num])
if k == len(zero_index):
return []
res[zero_index[k]] = num
del zero_index[k]
maps[num] = i
res.append(-1)
return res
class TestSolution(unittest.TestCase):
def test_case(self):
examples = (
(([1,2,0,0,2,1],),[-1,-1,2,1,-1,-1]),
)
for first, second in examples:
self.assert_function(first, second)
def assert_function(self, first, second):
self.assertEqual(Solution().avoidFlood(*first), second,
msg="first: {}; second: {}".format(first, second))
unittest.main()
| [
"[email protected]"
] | |
e3bc9b5713cf79fea2f4287a0cc8aff9065c8319 | fc772efe3eccb65e4e4a8da7f2b2897586b6a0e8 | /Compute/nova/service.py | 581b43fe5ed3c2b69b793788585d38688f6cf69a | [] | no_license | iphonestack/Openstack_Kilo | 9ae12505cf201839631a68c9ab4c041f737c1c19 | b0ac29ddcf24ea258ee893daf22879cff4d03c1f | refs/heads/master | 2021-06-10T23:16:48.372132 | 2016-04-18T07:25:40 | 2016-04-18T07:25:40 | 56,471,076 | 0 | 2 | null | 2020-07-24T02:17:46 | 2016-04-18T02:32:43 | Python | UTF-8 | Python | false | false | 16,592 | py | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic Node base class for all workers that run on hosts."""
import os
import random
import sys
from oslo.config import cfg
from oslo import messaging
from oslo.utils import importutils
from oslo_concurrency import processutils
from nova import baserpc
from nova import conductor
from nova import context
from nova import debugger
from nova import exception
from nova.i18n import _, _LE, _LW
from nova.objects import base as objects_base
from nova.openstack.common import log as logging
from nova.openstack.common import service
from nova import rpc
from nova import servicegroup
from nova import utils
from nova import version
from nova import wsgi
LOG = logging.getLogger(__name__)
service_opts = [
cfg.IntOpt('report_interval',
default=10,
help='Seconds between nodes reporting state to datastore'),
cfg.BoolOpt('periodic_enable',
default=True,
help='Enable periodic tasks'),
cfg.IntOpt('periodic_fuzzy_delay',
default=60,
help='Range of seconds to randomly delay when starting the'
' periodic task scheduler to reduce stampeding.'
' (Disable by setting to 0)'),
cfg.ListOpt('enabled_apis',
default=['ec2', 'osapi_compute', 'metadata'],
help='A list of APIs to enable by default'),
cfg.ListOpt('enabled_ssl_apis',
default=[],
help='A list of APIs with enabled SSL'),
cfg.StrOpt('ec2_listen',
default="0.0.0.0",
help='The IP address on which the EC2 API will listen.'),
cfg.IntOpt('ec2_listen_port',
default=8773,
help='The port on which the EC2 API will listen.'),
cfg.IntOpt('ec2_workers',
help='Number of workers for EC2 API service. The default will '
'be equal to the number of CPUs available.'),
cfg.StrOpt('osapi_compute_listen',
default="0.0.0.0",
help='The IP address on which the OpenStack API will listen.'),
cfg.IntOpt('osapi_compute_listen_port',
default=8774,
help='The port on which the OpenStack API will listen.'),
cfg.IntOpt('osapi_compute_workers',
help='Number of workers for OpenStack API service. The default '
'will be the number of CPUs available.'),
cfg.StrOpt('metadata_manager',
default='nova.api.manager.MetadataManager',
help='OpenStack metadata service manager'),
cfg.StrOpt('metadata_listen',
default="0.0.0.0",
help='The IP address on which the metadata API will listen.'),
cfg.IntOpt('metadata_listen_port',
default=8775,
help='The port on which the metadata API will listen.'),
cfg.IntOpt('metadata_workers',
help='Number of workers for metadata service. The default will '
'be the number of CPUs available.'),
cfg.StrOpt('compute_manager',
default='nova.compute.manager.ComputeManager',
help='Full class name for the Manager for compute'),
cfg.StrOpt('console_manager',
default='nova.console.manager.ConsoleProxyManager',
help='Full class name for the Manager for console proxy'),
cfg.StrOpt('consoleauth_manager',
default='nova.consoleauth.manager.ConsoleAuthManager',
help='Manager for console auth'),
cfg.StrOpt('cert_manager',
default='nova.cert.manager.CertManager',
help='Full class name for the Manager for cert'),
cfg.StrOpt('network_manager',
default='nova.network.manager.VlanManager',
help='Full class name for the Manager for network'),
cfg.StrOpt('scheduler_manager',
default='nova.scheduler.manager.SchedulerManager',
help='Full class name for the Manager for scheduler'),
cfg.IntOpt('service_down_time',
default=60,
help='Maximum time since last check-in for up service'),
]
CONF = cfg.CONF
CONF.register_opts(service_opts)
CONF.import_opt('host', 'nova.netconf')
class Service(service.Service):
"""Service object for binaries running on hosts.
A service takes a manager and enables rpc by listening to queues based
on topic. It also periodically runs tasks on the manager and reports
it state to the database services table.
"""
def __init__(self, host, binary, topic, manager, report_interval=None,
periodic_enable=None, periodic_fuzzy_delay=None,
periodic_interval_max=None, db_allowed=True,
*args, **kwargs):
super(Service, self).__init__()
self.host = host
self.binary = binary
self.topic = topic
self.manager_class_name = manager
# NOTE(russellb) We want to make sure to create the servicegroup API
# instance early, before creating other things such as the manager,
# that will also create a servicegroup API instance. Internally, the
# servicegroup only allocates a single instance of the driver API and
# we want to make sure that our value of db_allowed is there when it
# gets created. For that to happen, this has to be the first instance
# of the servicegroup API.
self.servicegroup_api = servicegroup.API(db_allowed=db_allowed)
manager_class = importutils.import_class(self.manager_class_name)
self.manager = manager_class(host=self.host, *args, **kwargs)
self.rpcserver = None
self.report_interval = report_interval
self.periodic_enable = periodic_enable
self.periodic_fuzzy_delay = periodic_fuzzy_delay
self.periodic_interval_max = periodic_interval_max
self.saved_args, self.saved_kwargs = args, kwargs
self.backdoor_port = None
self.conductor_api = conductor.API(use_local=db_allowed)
self.conductor_api.wait_until_ready(context.get_admin_context())
def start(self):
verstr = version.version_string_with_package()
LOG.audit(_('Starting %(topic)s node (version %(version)s)'),
{'topic': self.topic, 'version': verstr})
self.basic_config_check()
self.manager.init_host()
self.model_disconnected = False
ctxt = context.get_admin_context()
try:
self.service_ref = self.conductor_api.service_get_by_args(ctxt,
self.host, self.binary)
self.service_id = self.service_ref['id']
except exception.NotFound:
try:
self.service_ref = self._create_service_ref(ctxt)
except (exception.ServiceTopicExists,
exception.ServiceBinaryExists):
# NOTE(danms): If we race to create a record with a sibling
# worker, don't fail here.
self.service_ref = self.conductor_api.service_get_by_args(ctxt,
self.host, self.binary)
self.manager.pre_start_hook()
if self.backdoor_port is not None:
self.manager.backdoor_port = self.backdoor_port
LOG.debug("Creating RPC server for service %s", self.topic)
target = messaging.Target(topic=self.topic, server=self.host)
endpoints = [
self.manager,
baserpc.BaseRPCAPI(self.manager.service_name, self.backdoor_port)
]
endpoints.extend(self.manager.additional_endpoints)
serializer = objects_base.NovaObjectSerializer()
self.rpcserver = rpc.get_server(target, endpoints, serializer)
self.rpcserver.start()
self.manager.post_start_hook()
LOG.debug("Join ServiceGroup membership for this service %s",
self.topic)
# Add service to the ServiceGroup membership group.
self.servicegroup_api.join(self.host, self.topic, self)
if self.periodic_enable:
if self.periodic_fuzzy_delay:
initial_delay = random.randint(0, self.periodic_fuzzy_delay)
else:
initial_delay = None
self.tg.add_dynamic_timer(self.periodic_tasks,
initial_delay=initial_delay,
periodic_interval_max=
self.periodic_interval_max)
def _create_service_ref(self, context):
svc_values = {
'host': self.host,
'binary': self.binary,
'topic': self.topic,
'report_count': 0,
}
service = self.conductor_api.service_create(context, svc_values)
self.service_id = service['id']
return service
def __getattr__(self, key):
manager = self.__dict__.get('manager', None)
return getattr(manager, key)
@classmethod
def create(cls, host=None, binary=None, topic=None, manager=None,
report_interval=None, periodic_enable=None,
periodic_fuzzy_delay=None, periodic_interval_max=None,
db_allowed=True):
"""Instantiates class and passes back application object.
:param host: defaults to CONF.host
:param binary: defaults to basename of executable
:param topic: defaults to bin_name - 'nova-' part
:param manager: defaults to CONF.<topic>_manager
:param report_interval: defaults to CONF.report_interval
:param periodic_enable: defaults to CONF.periodic_enable
:param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay
:param periodic_interval_max: if set, the max time to wait between runs
"""
if not host:
host = CONF.host
if not binary:
binary = os.path.basename(sys.argv[0])
if not topic:
topic = binary.rpartition('nova-')[2]
if not manager:
manager_cls = ('%s_manager' %
binary.rpartition('nova-')[2])
manager = CONF.get(manager_cls, None)
if report_interval is None:
report_interval = CONF.report_interval
if periodic_enable is None:
periodic_enable = CONF.periodic_enable
if periodic_fuzzy_delay is None:
periodic_fuzzy_delay = CONF.periodic_fuzzy_delay
debugger.init()
service_obj = cls(host, binary, topic, manager,
report_interval=report_interval,
periodic_enable=periodic_enable,
periodic_fuzzy_delay=periodic_fuzzy_delay,
periodic_interval_max=periodic_interval_max,
db_allowed=db_allowed)
return service_obj
def kill(self):
"""Destroy the service object in the datastore."""
self.stop()
try:
self.conductor_api.service_destroy(context.get_admin_context(),
self.service_id)
except exception.NotFound:
LOG.warning(_LW('Service killed that has no database entry'))
def stop(self):
try:
self.rpcserver.stop()
self.rpcserver.wait()
except Exception:
pass
try:
self.manager.cleanup_host()
except Exception:
LOG.exception(_LE('Service error occurred during cleanup_host'))
pass
super(Service, self).stop()
def periodic_tasks(self, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
ctxt = context.get_admin_context()
return self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)
def basic_config_check(self):
"""Perform basic config checks before starting processing."""
# Make sure the tempdir exists and is writable
try:
with utils.tempdir():
pass
except Exception as e:
LOG.error(_LE('Temporary directory is invalid: %s'), e)
sys.exit(1)
class WSGIService(object):
"""Provides ability to launch API from a 'paste' configuration."""
def __init__(self, name, loader=None, use_ssl=False, max_url_len=None):
"""Initialize, but do not start the WSGI server.
:param name: The name of the WSGI server given to the loader.
:param loader: Loads the WSGI application using the given name.
:returns: None
"""
self.name = name
self.manager = self._get_manager()
self.loader = loader or wsgi.Loader()
self.app = self.loader.load_app(name)
self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0")
self.port = getattr(CONF, '%s_listen_port' % name, 0)
self.workers = (getattr(CONF, '%s_workers' % name, None) or
processutils.get_worker_count())
if self.workers and self.workers < 1:
worker_name = '%s_workers' % name
msg = (_("%(worker_name)s value of %(workers)s is invalid, "
"must be greater than 0") %
{'worker_name': worker_name,
'workers': str(self.workers)})
raise exception.InvalidInput(msg)
self.use_ssl = use_ssl
self.server = wsgi.Server(name,
self.app,
host=self.host,
port=self.port,
use_ssl=self.use_ssl,
max_url_len=max_url_len)
# Pull back actual port used
self.port = self.server.port
self.backdoor_port = None
def reset(self):
"""Reset server greenpool size to default.
:returns: None
"""
self.server.reset()
def _get_manager(self):
"""Initialize a Manager object appropriate for this service.
Use the service name to look up a Manager subclass from the
configuration and initialize an instance. If no class name
is configured, just return None.
:returns: a Manager instance, or None.
"""
fl = '%s_manager' % self.name
if fl not in CONF:
return None
manager_class_name = CONF.get(fl, None)
if not manager_class_name:
return None
manager_class = importutils.import_class(manager_class_name)
return manager_class()
def start(self):
"""Start serving this service using loaded configuration.
Also, retrieve updated port number in case '0' was passed in, which
indicates a random port should be used.
:returns: None
"""
if self.manager:
self.manager.init_host()
self.manager.pre_start_hook()
if self.backdoor_port is not None:
self.manager.backdoor_port = self.backdoor_port
self.server.start()
if self.manager:
self.manager.post_start_hook()
def stop(self):
"""Stop serving this API.
:returns: None
"""
self.server.stop()
def wait(self):
"""Wait for the service to stop serving this API.
:returns: None
"""
self.server.wait()
def process_launcher():
return service.ProcessLauncher()
# NOTE(vish): the global launcher is to maintain the existing
# functionality of calling service.serve +
# service.wait
_launcher = None
def serve(server, workers=None):
global _launcher
if _launcher:
raise RuntimeError(_('serve() can only be called once'))
_launcher = service.launch(server, workers=workers)
def wait():
_launcher.wait()
| [
"[email protected]"
] | |
d7c897b1fa38a472e0636bfb49694cb78a9a4151 | 5492859d43da5a8e292777c31eace71e0a57dedf | /chat/migrations/0021_auto_20190711_2100.py | 648c9ff6915b6a1a5b5e87052c58dbab41893255 | [
"MIT"
] | permissive | akindele214/181hub_2 | 93ad21dc6d899b6c56fbe200354b1678bb843705 | 48b8814b5f66ad87f9a54721506076ddf70fe9bc | refs/heads/master | 2022-12-13T01:15:07.925556 | 2020-05-19T09:39:57 | 2020-05-19T09:39:57 | 196,470,605 | 1 | 1 | MIT | 2022-12-08T01:22:55 | 2019-07-11T22:04:42 | Python | UTF-8 | Python | false | false | 762 | py | # Generated by Django 2.1.5 on 2019-07-11 20:00
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('chat', '0020_reportchat_content'),
]
operations = [
migrations.AlterField(
model_name='reportchat',
name='chat',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='report_chat', to='chat.Chat'),
),
migrations.AlterField(
model_name='reportchat',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user', to=settings.AUTH_USER_MODEL),
),
]
| [
"[email protected]"
] | |
72c5c560be0c150db2650cd8ddc1d2d5d0b5b6df | f4d8faeebbf9b7fe43396c637096a56c01a70060 | /blog/migrations/0006_auto_20201105_1114.py | 5f8babf0fc5e5c91344168878c7a39cc28a2de29 | [] | no_license | eloghin/blog | eb44f6d57e88fefacb48111791b9c96fd4883be9 | 3c27a112bb3d51a5a25e901c10a632d4d6251a15 | refs/heads/main | 2023-01-07T05:47:59.124104 | 2020-11-05T13:12:48 | 2020-11-05T13:12:48 | 309,698,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 524 | py | # Generated by Django 3.1.2 on 2020-11-05 11:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0005_auto_20201021_0920'),
]
operations = [
migrations.AlterModelOptions(
name='comment',
options={'ordering': ('date_created',)},
),
migrations.AddField(
model_name='comment',
name='email',
field=models.EmailField(max_length=254, null=True),
),
]
| [
"[email protected]"
] | |
90687734a25d313028207d5b66add9b5d039eb1f | 6ab217b675b0d33dec9d8985efc2de314e3a7a28 | /menus/controllers/restapi/menu_category/urls.py | 05579b2896e01b722317338f1b06535471c80647 | [] | no_license | nujkram/dream_cream_pastries | 3547928af859ebbb93f8d6ff64d02796d8c61a0c | c6a764f4f2c16191661ee6747dc0daa896eae5ec | refs/heads/master | 2023-06-20T20:20:21.001373 | 2021-07-29T00:55:49 | 2021-07-29T00:55:49 | 375,721,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,264 | py | from django.urls import path
from dream_cream_pastries_project.urls import (
URL_READ_ONLY,
URL_DETAIL,
URL_CREATE,
URL_UPDATE,
URL_DELETE
)
from .api import(
ApiPublicMenuCategoryListDetail,
ApiPrivateMenuCategoryViewSet
)
VERSION = 'v1'
urlpatterns = [
# public
path(
f'{VERSION}/public/list',
ApiPublicMenuCategoryListDetail.as_view(URL_READ_ONLY),
name='api_public_menu_category_list_detail'
),
# private
path(
f'{VERSION}/private/list',
ApiPrivateMenuCategoryViewSet.as_view(URL_READ_ONLY),
name='api_private_menu_category_list_detail'
),
path(
f'{VERSION}/private/create',
ApiPrivateMenuCategoryViewSet.as_view(URL_CREATE),
name='api_private_menu_category_create'
),
path(
f'{VERSION}/private/<pk>/update',
ApiPrivateMenuCategoryViewSet.as_view(URL_UPDATE),
name='api_private_menu_category_update'
),
path(
f'{VERSION}/private/<pk>/delete',
ApiPrivateMenuCategoryViewSet.as_view(URL_DELETE),
name='api_private_menu_category_delete'
),
]
"""
Add to urls.py urlpatterns:
path('menu_category/api/', include('menus.controllers.restapi.menu_category.urls'))
| [
"[email protected]"
] | |
0646e9fd57a5a8ba9198885afcbdf59f25a09de9 | 027635467005c93a5b5406030b6f8852368e6390 | /question1_highest_average.py | a26682a8d9d01efbdae4eb073f9c60eec3e52feb | [] | no_license | Shadyaobuya/Opibus-Assessment | 0472a05e4c78b28cc5779d1a2a78c29cabb1ba04 | 8675e82a1c64d864eb4f85604d7843670a3f8078 | refs/heads/master | 2023-08-22T16:14:57.912494 | 2021-10-16T11:40:14 | 2021-10-16T11:40:14 | 417,516,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,073 | py | #This is a program that takes in a list of discharge rates and returns the highest average
def find_highest_average(discharge):
new_list=[] #create an empty list that will hold the sum of two contiguous rates
for rate in range(len(discharge)-1):
high=discharge[rate]+discharge[rate+1]
new_list.append(high) #get the sum of every two contiguous rates and append it to the empty list
highest_sum=new_list[0] #make an assumption that the highest sum is at the first index of the empty list
for i in new_list:
if i >=highest_sum: #loop through the empty list and reasign the value of the highest sum
highest_sum=i
highest_average=highest_sum/2 #get the average of the highest sum
return highest_average
if __name__=='__main__':
print(find_highest_average([2, 3, 4, 1, 5])) #test case 1 output 3.5: [3,4]
print(find_highest_average([2, 3, 4, 8, 1, 5])) #test case 2 output:6.0 [4,8]
print(find_highest_average([6,1,7,3,9,6])) #test case 3 output:7.5: [9,6]
| [
"[email protected]"
] | |
1a54da2add1bd9577ec9109d3620de423fa16e30 | d31d744f62c09cb298022f42bcaf9de03ad9791c | /federated/tensorflow_federated/__init__.py | 7153a5a59599a5d2457dc00818688f32f3380d26 | [] | no_license | yuhuofei/TensorFlow-1 | b2085cb5c061aefe97e2e8f324b01d7d8e3f04a0 | 36eb6994d36674604973a06159e73187087f51c6 | refs/heads/master | 2023-02-22T13:57:28.886086 | 2021-01-26T14:18:18 | 2021-01-26T14:18:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,090 | py | # Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The TensorFlow Federated library."""
import sys
from tensorflow_federated.version import __version__ # pylint: disable=g-bad-import-order
from tensorflow_federated.python import aggregators
from tensorflow_federated.python import learning
from tensorflow_federated.python import simulation
from tensorflow_federated.python.core import backends
from tensorflow_federated.python.core import framework
from tensorflow_federated.python.core import templates
from tensorflow_federated.python.core import test
from tensorflow_federated.python.core import utils
from tensorflow_federated.python.core.api.computation_base import Computation
from tensorflow_federated.python.core.api.computation_types import at_clients as type_at_clients
from tensorflow_federated.python.core.api.computation_types import at_server as type_at_server
from tensorflow_federated.python.core.api.computation_types import FederatedType
from tensorflow_federated.python.core.api.computation_types import FunctionType
from tensorflow_federated.python.core.api.computation_types import SequenceType
from tensorflow_federated.python.core.api.computation_types import StructType
from tensorflow_federated.python.core.api.computation_types import StructWithPythonType
from tensorflow_federated.python.core.api.computation_types import TensorType
from tensorflow_federated.python.core.api.computation_types import to_type
from tensorflow_federated.python.core.api.computation_types import Type
from tensorflow_federated.python.core.api.computations import check_returns_type
from tensorflow_federated.python.core.api.computations import federated_computation
from tensorflow_federated.python.core.api.computations import tf_computation
from tensorflow_federated.python.core.api.intrinsics import federated_aggregate
from tensorflow_federated.python.core.api.intrinsics import federated_apply
from tensorflow_federated.python.core.api.intrinsics import federated_broadcast
from tensorflow_federated.python.core.api.intrinsics import federated_collect
from tensorflow_federated.python.core.api.intrinsics import federated_eval
from tensorflow_federated.python.core.api.intrinsics import federated_map
from tensorflow_federated.python.core.api.intrinsics import federated_mean
from tensorflow_federated.python.core.api.intrinsics import federated_reduce
from tensorflow_federated.python.core.api.intrinsics import federated_secure_sum
from tensorflow_federated.python.core.api.intrinsics import federated_sum
from tensorflow_federated.python.core.api.intrinsics import federated_value
from tensorflow_federated.python.core.api.intrinsics import federated_zip
from tensorflow_federated.python.core.api.intrinsics import sequence_map
from tensorflow_federated.python.core.api.intrinsics import sequence_reduce
from tensorflow_federated.python.core.api.intrinsics import sequence_sum
from tensorflow_federated.python.core.api.placements import CLIENTS
from tensorflow_federated.python.core.api.placements import SERVER
from tensorflow_federated.python.core.api.typed_object import TypedObject
from tensorflow_federated.python.core.api.value_base import Value
from tensorflow_federated.python.core.api.values import to_value
if sys.version_info[0] < 3 or sys.version_info[1] < 6:
raise Exception('TFF only supports Python versions 3.6 or later.')
# Initialize a default execution context. This is implicitly executed the
# first time a module in the `core` package is imported.
backends.native.set_local_execution_context()
| [
"[email protected]"
] | |
abada1167457df8faaf71d0f85057c37fcd5b748 | 929fc8dd47b91c963c8c2f81d88e3d995a9dfc7c | /src/subject/Tree.py | 3ebe30494ae72e4da3574a67bea453247420b88b | [] | no_license | 1325052669/leetcode | fe7571a9201f4ef54089c2e078810dad11205b14 | dca40686c6a280bd394feb8e6e78d40eecf854b9 | refs/heads/master | 2023-04-01T17:53:30.605822 | 2021-04-10T15:17:45 | 2021-04-10T15:17:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,284 | py | class TreeNode:
def __init__(self,val):
self.val=val
self.left =None
self.right = None
class Tree:
def __init__(self):
pass
def pre_order_traverse(self,root):
if not root:return []
res = []
def dfs(node,res):
if not node:return
res.append(node.val)
dfs(node.left,res)
dfs(node.right,res)
dfs(root,res)
return res
def pre_order_iterative(self, root):
if not root:return []
stack = [root]
res =[]
while stack:
node = stack.pop()
res.append(node.val)
if node.right:
stack.append(node.right)
if node.left:
stack.append(node.left)
return res
def pre_order_divide_conquer(self,node):
if not node:return []
res=[node.val]
left = self.pre_order_divide_conquer(node.left)
right = self.pre_order_divide_conquer(node.right)
res.extend(left)
res.extend(right)
return res
def in_order_traverse(self,root):
if not root:return []
res = []
def dfs(node,res):
if not node:return
dfs(node.left,res)
res.append(node.val)
dfs(node.right,res)
dfs(root,res)
return res
def in_order_iterative(self,root):
if not root:return []
stack = []
res = []
cur = root
while stack or cur:
if cur:
stack.append(cur)
cur = cur.left
else:
node = stack.pop()
res.append(node.val)
cur = node.right
return res
def in_order_divide_conqur(self,root):
if not root:return []
res =[]
left = self.in_order_divide_conqur(root.left)
res += left
res.append(root.val)
right = self.in_order_divide_conqur(root.right)
res+=right
return res
def post_order_traverse(self,root):
if not root:return []
def dfs(node,res):
if not node:return
dfs(node.left,res)
dfs(node.right,res)
res.append(node.val)
res=[]
dfs(root,res)
return res
def post_order_divide_conqur(self,node):
if not node:return []
res = []
left = self.post_order_divide_conqur(node.left)
right = self.post_order_divide_conqur(node.right)
res+=left
res+=right
res.append(node.val)
return res
def main():
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(3)
root.left.left = TreeNode(4)
root.left.right = TreeNode(5)
root.right.left = TreeNode(6)
root.right.right = TreeNode(7)
# print(Tree().pre_order_traverse(root))
# print(Tree().pre_order_iterative(root))
# print(Tree().pre_order_divide_conquer(root))
# print(Tree().in_order_traverse(root))
# print(Tree().in_order_iterative(root))
# print(Tree().in_order_divide_conqur(root))
print(Tree().post_order_traverse(root))
print(Tree().post_order_divide_conqur(root))
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
eb96064b42e96778d4d8b0bdffaf9669ba512f73 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2178/60692/285974.py | d49a0e281bfcaae701b49245a014c2d5ce39431b | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | from collections import defaultdict
'''
n = int(input())
list1 = input().split(" ")
count = 0
s1 = ''
res = []
dic1 = defaultdict(int)
for i in range(n):
count += i + 1
if i == 0:
s1 = list1[i]
else:
s1 += list1[i]
if list1[i] == list1[i - 1]:
dic1[list1[i]] += 1
if dic1[list1[i]] > 1:
count += (dic1[list1[i]] - 1) * dic1[list1[i]] // 2
count -= dic1[list1[i]] * (dic1[list1[i]] + 1) // 2
elif s1[0:i].count(list1[i]) and s1.index(list1[i]) != i - 1:
count -= 1
j = i - 1
t = s1[j:]
while s1[0:j].count(t):
count -= 1
j -= 1
t = s1[j:]
res.append(count)
for j in res:
print(j)
'''
n = int(input)
print(input()) | [
"[email protected]"
] | |
5a68d169b1831d85bb68d490f987e3d2d2cbac5a | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startPyquil979.py | e80fc8be8c4791851baaa5a6a9e04a24ad913cfd | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,043 | py | # qubit number=5
# total number=43
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=3
prog += H(1) # number=4
prog += H(2) # number=5
prog += H(3) # number=6
prog += RX(-0.1602212253330796,1) # number=36
prog += H(4) # number=21
prog += H(0) # number=1
prog += H(1) # number=2
prog += H(2) # number=7
prog += H(3) # number=8
prog += X(0) # number=9
prog += CNOT(0,1) # number=28
prog += H(4) # number=31
prog += X(1) # number=29
prog += CNOT(0,1) # number=30
prog += CNOT(0,2) # number=22
prog += CNOT(0,2) # number=25
prog += X(2) # number=26
prog += CNOT(0,2) # number=27
prog += CNOT(0,2) # number=24
prog += X(3) # number=12
prog += X(0) # number=13
prog += X(1) # number=14
prog += X(2) # number=15
prog += X(3) # number=16
prog += H(0) # number=17
prog += H(1) # number=18
prog += H(2) # number=19
prog += H(3) # number=20
prog += H(0) # number=37
prog += CZ(1,0) # number=38
prog += H(0) # number=39
prog += Z(1) # number=34
prog += H(0) # number=40
prog += CZ(1,0) # number=41
prog += H(0) # number=42
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('5q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil979.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| [
"[email protected]"
] | |
becca70bff7e7bf31f995812875dc8047fd6f021 | 177455bdf1fece221eef93b50f16253d342aa6a6 | /alerta/api/v2/views.py | b3627e1aa8dedf3635fb8cf886a08f541b70a809 | [
"Apache-2.0"
] | permissive | ntoll/alerta | c42630d91bf16cb649b43b69ae798abe60f39ed6 | 8122526b1791a0ff0d1aa26061129892b7e86f00 | refs/heads/master | 2021-01-18T05:18:30.062671 | 2013-03-03T23:17:10 | 2013-03-03T23:17:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,637 | py | from flask import jsonify, request, current_app
from functools import wraps
from alerta.api.v2 import app, mongo
import datetime
# TODO(nsatterl): put these constants somewhere appropriate
MAX_HISTORY = -10 # 10 most recent
# TODO(nsatterl): use @before_request and @after_request to attach a unique request id
@app.before_first_request
def before_request():
# print "load config file with warning message"
pass
# TODO(nsatterl): fix JSON-P
def jsonp(func):
"""Wraps JSONified output for JSONP requests."""
@wraps(func)
def decorated_function(*args, **kwargs):
callback = request.args.get('callback', False)
if callback:
data = str(func(*args, **kwargs).data)
content = str(callback) + '(' + data + ')'
mimetype = 'application/javascript'
return current_app.response_class(content, mimetype=mimetype)
else:
return func(*args, **kwargs)
return decorated_function
@app.route('/alerta/api/v2/alerts/alert/<alertid>')
def get_alert(alertid):
alert = mongo.db.alerts.find_one({'_id': alertid})
if alert:
fix_id(alert)
return jsonify(response={'alert': alert, 'status': 'ok', 'total': 1})
else:
# TODO(nsatterl): include error message as 'message': 'not found' etc.
return jsonify(response={"alert": None, "status": "error", "message": "not found", "total": 0})
@app.route('/alerta/api/v2/alerts')
def get_alerts():
hide_details = request.args.get('hide-alert-details', False, bool)
hide_alert_repeats = request.args.getlist('hide-alert-repeats')
# TODO(nsatterl): support comma-separated fields eg. fields=event,summary
fields = dict((k, 1) for k in request.args.getlist('fields'))
# NOTE: if filtering on fields still always include severity and status in response
if fields:
fields['severity'] = 1
fields['status'] = 1
if request.args.get('hide-alert-history', False, bool):
fields['history'] = 0
else:
fields['history'] = {'slice': MAX_HISTORY}
alert_limit = request.args.get('limit', 0, int)
query = dict()
query_time = datetime.datetime.utcnow()
from_date = request.args.get('from-date')
if from_date:
from_date = datetime.datetime.strptime(from_date, '%Y-%m-%dT%H:%M:%S.%fZ')
from_date = from_date.replace(tzinfo=pytz.utc)
to_date = query_time
to_date = to_date.replace(tzinfo=pytz.utc)
query['lastReceiveTime'] = {'$gt': from_date, '$lte': to_date}
sort_by = list()
for s in request.args.getlist('sort-by'):
if s in ['createTime', 'receiveTime', 'lastReceiveTime']:
sort_by.append((s, -1)) # sort by newest first
else:
sort_by.append((s, 1)) # alpha-numeric sort
if not sort_by:
sort_by.append(('lastReceiveTime', -1))
return jsonify(details=hide_details, repeats=hide_alert_repeats, fields=fields)
@app.route('/alerta/api/v1/alerts/alert.json', methods=['POST', 'PUT'])
def create_alert(alertid):
pass
@app.route('/alerta/api/v2/alerts/alert/<alertid>', methods=['POST', 'PUT'])
def modify_alert(alertid):
pass
@app.route('/alerta/api/v2/alerts/alert/<alertid>/tag', methods=['POST', 'PUT'])
def tag_alert(alertid):
pass
@app.route('/alerta/api/v2/alerts/alert/<alertid>', methods=['DELETE'])
def delete_alert(alertid):
pass
@app.route('/alerta/api/v2/resources')
def get_resources(alertid):
pass
def fix_id(alert):
if '_id' in alert:
alert['id'] = alert['_id']
del alert['_id']
return alert
| [
"[email protected]"
] | |
49ef83378fcd0ea9e5514661358c72f05e5b41ae | d37bac0cca5a3fce2eaeded5ab8262f3ec215b85 | /backend/home/migrations/0002_load_initial_data.py | 7de06fbaafa1d57c815551632a91d73c08613ed1 | [] | no_license | crowdbotics-apps/m-18-nov-dev-15260 | 52ada15c3d64dc0ba8fdc83a0887e830268ff02c | 4e951ccfe3ab16025f995ef8fea500522e0470e0 | refs/heads/master | 2023-01-16T06:03:20.219329 | 2020-11-18T06:47:21 | 2020-11-18T06:47:21 | 313,847,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,290 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "m 18 nov"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">m 18 nov</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "m-18-nov-dev-15260.botics.co"
site_params = {
"name": "m 18 nov",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"[email protected]"
] | |
a85b227b221113c684d0bdf1520dd764534526b4 | f82757475ea13965581c2147ff57123b361c5d62 | /gi-stubs/repository/EDataServer/OAuth2ServicesClass.py | 0ce66dc147beaec73e1fb4e0fb2dfd8bde750bc9 | [] | no_license | ttys3/pygobject-stubs | 9b15d1b473db06f47e5ffba5ad0a31d6d1becb57 | d0e6e93399212aada4386d2ce80344eb9a31db48 | refs/heads/master | 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null | UTF-8 | Python | false | false | 4,857 | py | # encoding: utf-8
# module gi.repository.EDataServer
# from /usr/lib64/girepository-1.0/EDataServer-1.2.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.overrides.GObject as __gi_overrides_GObject
import gi.repository.Gio as __gi_repository_Gio
import gi.repository.GObject as __gi_repository_GObject
import gi.repository.Soup as __gi_repository_Soup
import gobject as __gobject
class OAuth2ServicesClass(__gi.Struct):
"""
:Constructors:
::
OAuth2ServicesClass()
"""
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self): # real signature unknown; restored from __doc__
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
parent_class = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
reserved = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(OAuth2ServicesClass), '__module__': 'gi.repository.EDataServer', '__gtype__': <GType void (4)>, '__dict__': <attribute '__dict__' of 'OAuth2ServicesClass' objects>, '__weakref__': <attribute '__weakref__' of 'OAuth2ServicesClass' objects>, '__doc__': None, 'parent_class': <property object at 0x7f626e908ef0>, 'reserved': <property object at 0x7f626e909040>})"
__gtype__ = None # (!) real value is '<GType void (4)>'
__info__ = StructInfo(OAuth2ServicesClass)
| [
"[email protected]"
] | |
a20ffd93c0dcbfea4dfc93f1a9c4a64f1c8d25aa | 36de14c6b188886df6a284ee9ce4a464a5ded433 | /Solutions/0838/0838.py | ca1246e69387c77941ed2610ee370d69c953d1e0 | [] | no_license | washing1127/LeetCode | 0dca0f3caa5fddd72b299e6e8f59b5f2bf76ddd8 | b910ddf32c7e727373449266c9e3167c21485167 | refs/heads/main | 2023-03-04T23:46:40.617866 | 2023-02-21T03:00:04 | 2023-02-21T03:00:04 | 319,191,720 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,323 | py | # -*- coding:utf-8 -*-
# Author: washing
# DateTime: 2022/2/21 11:16
# File: 0838.py
# Desc:
class Solution:
def pushDominoes(self, dominoes: str) -> str:
l = list(dominoes)
status_c = l[0]
status_id = 0
for i in range(1,len(l)):
c = l[i]
if c == '.': continue
elif c == 'L':
if status_c == 'R': # 之前和当前相对,向中间靠拢
idl = status_id; idr = i
while idl < idr:
l[idl] = 'R'
l[idr] = 'L'
idl += 1
idr -= 1
status_id = i
status_c = 'L'
else: # 当前向左,之前为空或向左,中间全向左
for idx in range(status_id,i): l[idx] = 'L'
status_id = i
else:
if status_c == 'R': # 之前向右,当前向右,中间全向右
for idx in range(status_id,i): l[idx] = 'R'
status_id = i
else: # 之前向左或为空,当前向右,中间不变
status_c = 'R'
status_id = i
if l[-1] == '.' and status_c == 'R':
| [
"[email protected]"
] | |
af5e890ed0bb583636307a1cf2b0d3b8d7d1c779 | 6b66e499e7c2c6246c114029b83ae6ed3a4daa27 | /barista/kinematicplots_Bu.py | 01e0803f97f8df228c08c070882104a36adfa5fd | [] | no_license | DryRun/boffea | d837723eee13650306ede501a6e9fe1c5a9c610b | 433fdb92f3b60b6f140c0a0a3b2761d812b7044e | refs/heads/master | 2023-09-01T17:41:04.451388 | 2023-08-24T21:55:18 | 2023-08-24T21:55:18 | 232,651,843 | 1 | 0 | null | 2022-06-22T04:50:57 | 2020-01-08T20:24:05 | Python | UTF-8 | Python | false | false | 6,047 | py | import sys
import numpy as np
import matplotlib.pyplot as plt
from cycler import cycler
from coffea import hist, util
from pprint import pprint
import glob
import mplhep
plt.style.use(mplhep.style.ROOT)
plt.tight_layout()
from brazil.aguapreta import *
figure_directory = "/home/dryu/BFrag/data/kinematic/"
input_files = {
"data": glob.glob("/home/dryu/BFrag/data/histograms/Run*coffea"),
"Bu": ["/home/dryu/BFrag/boffea/barista/Bu2KJpsi2KMuMu/MCEfficiencyHistograms.coffea"],
"Bd": ["/home/dryu/BFrag/boffea/barista/Bd2KsJpsi2KPiMuMu/MCEfficiencyHistograms.coffea"],
"Bs": ["/home/dryu/BFrag/boffea/barista/Bs2PhiJpsi2KKMuMu/MCEfficiencyHistograms.coffea"],
}
coffea = {}
for what in input_files.keys():
for f in input_files[what]:
coffea_tmp = util.load(f)
# Delete Bcands trees
for key in list(coffea_tmp.keys()):
if "Bcands" in key or "cutflow" in key:
del coffea_tmp[key]
# For data, combine subjobs
#if what == "data":
# subjobs = [x.name for x in coffea_tmp["BuToKMuMu_fit_pt_absy_mass"].axis("dataset").identifiers()]
# print(subjobs)
# for key in list(coffea_tmp.keys()):
# if type(coffea_tmp[key]).__name__ == "Hist":
# if "dataset" in [x.name for x in coffea_tmp[key].axes()]:
# print("DEBUG : Attempting to group axes.")
# print("DEBUG : Input identifiers = ")
# print(coffea_tmp[key].axis("dataset").identifiers())
# print("DEBUG : attempt to group")
# print(subjobs)
# coffea_tmp[key] = coffea_tmp[key].group("dataset",
# hist.Cat("dataset", "Primary dataset"),
# {"Run2018": subjobs})
# Persistify
if not what in coffea:
coffea[what] = coffea_tmp
else:
coffea[what].add(coffea_tmp)
print(coffea["Bu"]["BuToKMuMu_fit_pt_absy_mass"].axes())
print(coffea["Bu"]["BuToKMuMu_fit_pt_absy_mass"].axis("dataset").identifiers())
plot_index = {
"Bu": {
"fit_pt":{
"hist_mc": coffea["Bu"]["BuToKMuMu_fit_pt_absy_mass"]\
.group("dataset", hist.Cat("dataset", "Primary dataset"), {"mc": ["Bu2KJpsi2KMuMu_probefilter"]})\
.integrate("fit_mass")\
.integrate("fit_absy", slice(0., 2.25))\
.rebin("fit_pt", hist.Bin("pt", r"$p_{T}$ [GeV]", 50, 0., 50.)),
"hist_data": coffea["data"]["BuToKMuMu_fit_pt_absy_mass"]\
.integrate("fit_mass")\
.integrate("fit_absy", slice(0., 2.25))\
.rebin("fit_pt", hist.Bin("pt", r"$p_{T}$ [GeV]", 50, 0., 50.)),
"xlim": [0., 50.],
"xscale": "linear",
"xlabel": r"$p_{T}$ [GeV]",
"ylim": "auto",
"yscale": "log",
"ylabel": "Events",
},
"fit_absy":{
"hist_mc": coffea["Bu"]["BuToKMuMu_fit_pt_absy_mass"]\
.group("dataset", hist.Cat("dataset", "Primary dataset"), {"mc": ["Bu2KJpsi2KMuMu_probefilter"]})\
.integrate("fit_mass")\
.integrate("fit_pt", slice(0., 30.))\
.rebin("fit_absy", hist.Bin("absy", r"|y|$", 10, 0., 2.5)),
"hist_data": coffea["data"]["BuToKMuMu_fit_pt_absy_mass"]\
.integrate("fit_mass")\
.integrate("fit_pt", slice(0., 30.))\
.rebin("fit_absy", hist.Bin("absy", r"|y|$", 10, 0., 2.5)),
"xlim": [0., 3.0],
"xscale": "linear",
"xlabel": r"$|y|$",
"ylim": "auto",
"yscale": "log",
"ylabel": "Events",
},
"fit_mass":{
"hist_mc": coffea["Bu"]["BuToKMuMu_fit_pt_absy_mass"]\
.group("dataset", hist.Cat("dataset", "Primary dataset"), {"mc": ["Bu2KJpsi2KMuMu_probefilter"]})\
.integrate("fit_absy", slice(0., 2.25))\
.integrate("fit_pt", slice(0., 30.)),
"hist_data": coffea["data"]["BuToKMuMu_fit_pt_absy_mass"]\
.integrate("fit_absy", slice(0., 2.25))\
.integrate("fit_pt", slice(0., 30.)),
"xlim": [5.05, 5.5],
"xscale": "linear",
"xlabel": r"Fitted $B_{u}$ mass [GeV]",
"ylim": "auto",
"yscale": "log",
"ylabel": "Events",
}
}
}
figure_directory = "/home/dryu/BFrag/data/kinematic"
def plot(hist_mc=None, hist_data=None, xlim=[], xscale="", xlabel="", ylim=[], yscale="", ylabel="", data_selection="", mc_selection="", savetag=""):
hist_mc = hist_mc.integrate("selection", mc_selection)
print(hist_data.axis("selection").identifiers())
hist_data = hist_data.integrate("selection", data_selection)
# Normalize MC to data
print(hist_data)
print(hist_data.values())
data_norm = hist_data.values().sum()
hist_all = copy.deepcopy(hist_data).add(hist_mc)
fig, ax = plt.subplots(1, 1, figsize=(10, 12))
hist.plot1d(hist_all, overlay="dataset", ax=ax[0])
ax[0].set_xlim(xlim)
ax[0].set_xscale(xscale)
ax[0].set_xlabel(xlabel)
ax[0].set_ylim(ylim)
ax[0].set_yscale(yscale)
ax[0].set_ylabel(ylabel)
hist.plotratio(
num=hist_all.integrate("dataset", "Run2018"),
den=hist_all.integrate("dataset", "Bu2KJpsi2KMuMu_probefilter"),
unc='num',
ax=ax[1])
ax[1].set_xlim(xlim)
ax[1].set_xscale(xscale)
ax[1].set_xlabel(xlabel)
ax[1].set_ylabel("Data / MC")
fig.savefig(f"{figure_directory}/{savetag}.png")
if __name__ == "__main__":
mc_selection = "recomatch_HLT_Mu9_IP5"
data_selection = "recotrig_HLT_Mu9_IP5"
for btype in ["Bu"]:
for plot_name, metadata in plot_index[btype].items():
plot(**metadata, savetag=f"{plot_name}_reco", mc_selection=mc_selection, data_selection=data_selection) | [
"[email protected]"
] | |
88c304f224ab60062582abbfa1146a651e1233e6 | f21814f3b4c8217e830af48b427de0b24dc398d4 | /missing_value_count_and_percent.py | aed47ea11574bbab9b091a7ff7b5448c8d28d997 | [] | no_license | CaraFJ/Utility | 2d1dbc3f09c33d9d92bf1e602f1a01b0f3ba656e | f032e6b376d65a05fe9d25fca31794c1302ec7ed | refs/heads/master | 2021-09-08T16:47:26.173366 | 2021-09-08T04:51:04 | 2021-09-08T04:52:05 | 248,438,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 496 | py | def missing_value_count_and_percent(df):
"""
Return the number and percent of missing values for each column.
Args:
df (Dataframe): A dataframe with many columns
Return:
df (Dataframe): A dataframe with one column showing number of missing values, one column showing percentage of missing values with 4 digits
"""
df = pd.concat({'num_missing_values':df.isnull().sum(), 'pct_missing_values':df.isnull().mean().round(4)}, axis=1)
)
return df | [
"[email protected]"
] | |
b9515ac414e5682d45d712d99903e92145a5210f | 72319a157507386e7ef9210de3b1e1ab673dcd8c | /activities/jawbone/__init__.py | e5aa8a881b2b208cab7cfcd8a09df8c1327e542b | [
"MIT"
] | permissive | mcescalante/open-humans | 9ddd2b9c89094f05492cb10eebfd9994ecffbc95 | fe474530c8492ad9925e91f72e7736406b7e42e6 | refs/heads/master | 2021-04-29T18:00:54.049868 | 2018-03-05T06:02:20 | 2018-03-05T06:02:20 | 121,683,595 | 0 | 0 | null | 2018-02-15T21:14:39 | 2018-02-15T21:14:38 | null | UTF-8 | Python | false | false | 93 | py | default_app_config = '{}.apps.JawboneConfig'.format(__name__)
label = __name__.split('.')[1]
| [
"[email protected]"
] | |
27b94c9d7849b71176bca1cb1da910235230ce4d | c087e0bbeeac080335240c05255bd682cfea100e | /remap_reads_consensus.py | f0decb7ac4e13e3cab2add1986e43a77371c997a | [] | no_license | ifiddes/notch2nl_10x | f537481da544ec5e3c62a2899b713b4cb68e7285 | 35cfd95b0e7563bad0c5d2354fd7be526bc3a39d | refs/heads/master | 2021-01-10T10:18:59.098115 | 2016-03-24T17:43:03 | 2016-03-24T17:43:03 | 50,366,711 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,718 | py | """
Run the traditional WGS-SUN based pipeline on 10x data to compare to the results
"""
import pysam
import sys
import vcf
import string
import itertools
import numpy as np
import argparse
import tempfile
import os
import subprocess
from pyfasta import Fasta
from operator import itemgetter
from itertools import groupby
from collections import Counter, defaultdict
sys.path.append("/hive/users/ifiddes/pycbio")
from pycbio.sys.procOps import runProc, callProc
from pycbio.sys.fileOps import tmpFileGet
from pycbio.sys.mathOps import format_ratio
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['pdf.fonttype'] = 42
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import pandas as pd
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('inBam', help='(10x) bamfile to remap')
parser.add_argument('outPdf', help='path to write plot to')
parser.add_argument('--outBam', default=None, help='path to write consensus aligned bam to')
parser.add_argument('--consensusVcf', default='/hive/users/ifiddes/notch2nl_suns/Notch2NL_SUN_UniqueIndels_ConsensusRef.vcf.gz')
parser.add_argument('--consensusRef', default='/hive/users/ifiddes/notch2nl_suns/notch2_aligned_consensus.fasta')
return parser.parse_args()
regions = [['chr1', 119990189, 120163923, 'Notch2'],
['chr1', 146149601, 146329308, 'Notch2NL-A'],
['chr1', 148597945, 148786127, 'Notch2NL-B'],
['chr1', 149349476, 149477855, 'Notch2NL-C'],
['chr1', 120706154, 120801963, 'Notch2NL-D']]
def extract_reads(bam, offset=50000):
tmp_reads = tmpFileGet(suffix='reads.fq')
tmp_shuf = tmpFileGet()
region_strs = ['{}:{}-{}'.format(chrom, start - offset, stop + offset) for chrom, start, stop, para in regions]
view_cmd = ['samtools', 'view', '-b', bam]
view_cmd.extend(region_strs)
cmd = [view_cmd,
['samtools', 'bamshuf', '-Ou', '-', tmp_shuf],
['samtools', 'bam2fq', '-']]
with open(tmp_reads, 'w') as tmp_paired_h:
runProc(cmd, stdout=tmp_reads)
return tmp_reads
def remap_reads(tmp_reads, index, out_bam):
sort_tmp = tmpFileGet()
cmd = [['bwa', 'mem', '-p', index, tmp_reads],
['samtools', 'view', '-b', '-'],
['samtools', 'sort', '-T', sort_tmp, '-O', 'bam', '-']]
with open(out_bam, 'w') as f_h:
runProc(cmd, stdout=f_h)
cmd = ['samtools', 'index', out_bam]
runProc(cmd)
def build_remapped_bam(in_bam, consensus_ref, out_bam):
tmp_reads = extract_reads(in_bam)
remap_reads(tmp_reads, consensus_ref, out_bam)
os.remove(tmp_reads)
def pileup(out_bam, vcf_path):
bases = {"A", "T", "G", "C", "a", "t", "g", "c"}
vcf_handle = vcf.Reader(open(vcf_path))
wgs_results = defaultdict(list)
for vcf_rec in vcf_handle:
if vcf_rec.is_indel:
continue
pos_str = "{0}:{1}-{1}".format(vcf_rec.CHROM, vcf_rec.POS)
cmd = ['samtools', 'mpileup', '-r', pos_str, out_bam]
mpileup_rec = callProc(cmd).split()
pile_up_result = Counter(x.upper() for x in mpileup_rec[4] if x in bases)
sample_dict = {s.sample: s.gt_bases for s in vcf_rec.samples}
for s in vcf_rec.samples:
if len([x for x in sample_dict.itervalues() if x == s.gt_bases]) != 1:
continue
if s.gt_bases is None:
continue
c = 1.0 * pile_up_result[s.gt_bases] / len(mpileup_rec[4])
c *= 1.0 * len([x for x in sample_dict.itervalues() if x is not None]) / len(sample_dict)
wgs_results[s.sample].append([vcf_rec.POS, c])
return wgs_results
def plot_results(wgs_results, out_pdf, aln_size):
paralogs = ['Notch2', 'Notch2NL-A', 'Notch2NL-B', 'Notch2NL-C', 'Notch2NL-D']
fig, plots = plt.subplots(5, sharey=True, sharex=True)
plt.yticks((0, 0.1, 0.2, 0.3, 0.4))
plt.ylim((0, 0.4))
xticks = range(0, int(round(aln_size / 10000.0) * 10000.0), 10000)
plt.xticks(xticks, rotation='vertical')
plt.xlim((0, aln_size))
plt.xlabel("Alignment position")
for i, (p, para) in enumerate(zip(plots, paralogs)):
p.set_title(para)
wgs = wgs_results[para]
xvals, yvals = zip(*wgs)
p.vlines(xvals, np.zeros(len(xvals)), yvals, color=sns.color_palette()[0], alpha=0.7, linewidth=0.8)
# mark the zeros
zero_wgs = [[x, y + 0.02] for x, y in wgs if y == 0]
if len(zero_wgs) > 0:
z_xvals, z_yvals = zip(*zero_wgs)
p.vlines(z_xvals, np.zeros(len(z_xvals)), z_yvals, color=sns.color_palette()[2], alpha=0.7, linewidth=0.8)
plt.tight_layout(pad=2.5, h_pad=0.25)
zero_line = matplotlib.lines.Line2D([], [], color=sns.color_palette()[2])
reg_line = matplotlib.lines.Line2D([], [], color=sns.color_palette()[0])
fig.legend(handles=(reg_line, zero_line), labels=["WGS SUN Fraction", "WGS Missing SUN"], loc="upper right")
fig.text(0.01, 0.5, 'SUN fraction of reads', va='center', rotation='vertical')
plt.savefig(out_pdf, format="pdf")
plt.close()
def get_aln_size(consensus_ref):
f = Fasta(consensus_ref)
assert len(f) == 1
return len(f[f.keys()[0]])
def main():
args = parse_args()
if args.outBam is None:
out_bam = tmpFileGet(suffix='merged.sorted.bam')
else:
out_bam = args.outBam
build_remapped_bam(args.inBam, args.consensusRef, out_bam)
wgs_results = pileup(out_bam, args.consensusVcf)
aln_size = get_aln_size(args.consensusRef)
plot_results(wgs_results, args.outPdf, aln_size)
if args.outBam is None:
os.remove(out_bam)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
f59db1371af75f94b82190561a99278bcd02b079 | 551b75f52d28c0b5c8944d808a361470e2602654 | /huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/balancer_active_window.py | aeea9247f02f3b36a9f8fd0019a8e52731f28dcd | [
"Apache-2.0"
] | permissive | wuchen-huawei/huaweicloud-sdk-python-v3 | 9d6597ce8ab666a9a297b3d936aeb85c55cf5877 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | refs/heads/master | 2023-05-08T21:32:31.920300 | 2021-05-26T08:54:18 | 2021-05-26T08:54:18 | 370,898,764 | 0 | 0 | NOASSERTION | 2021-05-26T03:50:07 | 2021-05-26T03:50:07 | null | UTF-8 | Python | false | false | 3,518 | py | # coding: utf-8
import pprint
import re
import six
class BalancerActiveWindow:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'start_time': 'str',
'stop_time': 'str'
}
attribute_map = {
'start_time': 'start_time',
'stop_time': 'stop_time'
}
def __init__(self, start_time=None, stop_time=None):
"""BalancerActiveWindow - a model defined in huaweicloud sdk"""
self._start_time = None
self._stop_time = None
self.discriminator = None
self.start_time = start_time
self.stop_time = stop_time
@property
def start_time(self):
"""Gets the start_time of this BalancerActiveWindow.
活动时间窗开始时间。
:return: The start_time of this BalancerActiveWindow.
:rtype: str
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""Sets the start_time of this BalancerActiveWindow.
活动时间窗开始时间。
:param start_time: The start_time of this BalancerActiveWindow.
:type: str
"""
self._start_time = start_time
@property
def stop_time(self):
"""Gets the stop_time of this BalancerActiveWindow.
活动时间窗结束时间。
:return: The stop_time of this BalancerActiveWindow.
:rtype: str
"""
return self._stop_time
@stop_time.setter
def stop_time(self, stop_time):
"""Sets the stop_time of this BalancerActiveWindow.
活动时间窗结束时间。
:param stop_time: The stop_time of this BalancerActiveWindow.
:type: str
"""
self._stop_time = stop_time
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BalancerActiveWindow):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
07cfd1607796d3ca94ad028d3b8c573a0d32cc3b | 4f998e9798b5d72a508a62013d8179e58d94b8bb | /home/migrations/0001_load_initial_data.py | 592ecd278b5ad39a6095474d97880b4060026301 | [] | no_license | crowdbotics-apps/testcerabc-27781 | 72437420dc97964cfd2c882f723f6e8dc4177fe8 | a58dc42415d0c2c7a523a8b9566f3a64b20a6164 | refs/heads/master | 2023-05-12T14:34:46.264425 | 2021-06-06T18:47:08 | 2021-06-06T18:47:08 | 374,438,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "testcerabc-27781.botics.co"
site_params = {
"name": "testcerabc",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| [
"[email protected]"
] | |
4d905cd191f636da17e610812a9398e3eae689d3 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/sieve-big-4255.py | e4efe63d4df69c3a17f2b7294ac199c649e5d2fb | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,757 | py | # A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [$Type] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"[email protected]"
] | |
01aa0cb5b3fc74abb677dc0ee9eb917630e512c3 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/network/v20210301/security_partner_provider.py | 4e4a82226720393cf0603bb544fee7d22d602c3a | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,291 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['SecurityPartnerProviderArgs', 'SecurityPartnerProvider']
@pulumi.input_type
class SecurityPartnerProviderArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
security_partner_provider_name: Optional[pulumi.Input[str]] = None,
security_provider_name: Optional[pulumi.Input[Union[str, 'SecurityProviderName']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_hub: Optional[pulumi.Input['SubResourceArgs']] = None):
"""
The set of arguments for constructing a SecurityPartnerProvider resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] security_partner_provider_name: The name of the Security Partner Provider.
:param pulumi.Input[Union[str, 'SecurityProviderName']] security_provider_name: The security provider name.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input['SubResourceArgs'] virtual_hub: The virtualHub to which the Security Partner Provider belongs.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if id is not None:
pulumi.set(__self__, "id", id)
if location is not None:
pulumi.set(__self__, "location", location)
if security_partner_provider_name is not None:
pulumi.set(__self__, "security_partner_provider_name", security_partner_provider_name)
if security_provider_name is not None:
pulumi.set(__self__, "security_provider_name", security_provider_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if virtual_hub is not None:
pulumi.set(__self__, "virtual_hub", virtual_hub)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="securityPartnerProviderName")
def security_partner_provider_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Security Partner Provider.
"""
return pulumi.get(self, "security_partner_provider_name")
@security_partner_provider_name.setter
def security_partner_provider_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "security_partner_provider_name", value)
@property
@pulumi.getter(name="securityProviderName")
def security_provider_name(self) -> Optional[pulumi.Input[Union[str, 'SecurityProviderName']]]:
"""
The security provider name.
"""
return pulumi.get(self, "security_provider_name")
@security_provider_name.setter
def security_provider_name(self, value: Optional[pulumi.Input[Union[str, 'SecurityProviderName']]]):
pulumi.set(self, "security_provider_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="virtualHub")
def virtual_hub(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
The virtualHub to which the Security Partner Provider belongs.
"""
return pulumi.get(self, "virtual_hub")
@virtual_hub.setter
def virtual_hub(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "virtual_hub", value)
class SecurityPartnerProvider(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
security_partner_provider_name: Optional[pulumi.Input[str]] = None,
security_provider_name: Optional[pulumi.Input[Union[str, 'SecurityProviderName']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_hub: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
__props__=None):
"""
Security Partner Provider resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] security_partner_provider_name: The name of the Security Partner Provider.
:param pulumi.Input[Union[str, 'SecurityProviderName']] security_provider_name: The security provider name.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] virtual_hub: The virtualHub to which the Security Partner Provider belongs.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SecurityPartnerProviderArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Security Partner Provider resource.
:param str resource_name: The name of the resource.
:param SecurityPartnerProviderArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SecurityPartnerProviderArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
security_partner_provider_name: Optional[pulumi.Input[str]] = None,
security_provider_name: Optional[pulumi.Input[Union[str, 'SecurityProviderName']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_hub: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SecurityPartnerProviderArgs.__new__(SecurityPartnerProviderArgs)
__props__.__dict__["id"] = id
__props__.__dict__["location"] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["security_partner_provider_name"] = security_partner_provider_name
__props__.__dict__["security_provider_name"] = security_provider_name
__props__.__dict__["tags"] = tags
__props__.__dict__["virtual_hub"] = virtual_hub
__props__.__dict__["connection_status"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:network:SecurityPartnerProvider"), pulumi.Alias(type_="azure-native:network/v20200301:SecurityPartnerProvider"), pulumi.Alias(type_="azure-native:network/v20200401:SecurityPartnerProvider"), pulumi.Alias(type_="azure-native:network/v20200501:SecurityPartnerProvider"), pulumi.Alias(type_="azure-native:network/v20200601:SecurityPartnerProvider"), pulumi.Alias(type_="azure-native:network/v20200701:SecurityPartnerProvider"), pulumi.Alias(type_="azure-native:network/v20200801:SecurityPartnerProvider"), pulumi.Alias(type_="azure-native:network/v20201101:SecurityPartnerProvider"), pulumi.Alias(type_="azure-native:network/v20210201:SecurityPartnerProvider"), pulumi.Alias(type_="azure-native:network/v20210501:SecurityPartnerProvider")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(SecurityPartnerProvider, __self__).__init__(
'azure-native:network/v20210301:SecurityPartnerProvider',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'SecurityPartnerProvider':
"""
Get an existing SecurityPartnerProvider resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = SecurityPartnerProviderArgs.__new__(SecurityPartnerProviderArgs)
__props__.__dict__["connection_status"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["security_provider_name"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
__props__.__dict__["virtual_hub"] = None
return SecurityPartnerProvider(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="connectionStatus")
def connection_status(self) -> pulumi.Output[str]:
"""
The connection status with the Security Partner Provider.
"""
return pulumi.get(self, "connection_status")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the Security Partner Provider resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="securityProviderName")
def security_provider_name(self) -> pulumi.Output[Optional[str]]:
"""
The security provider name.
"""
return pulumi.get(self, "security_provider_name")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualHub")
def virtual_hub(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
The virtualHub to which the Security Partner Provider belongs.
"""
return pulumi.get(self, "virtual_hub")
| [
"[email protected]"
] | |
b3b8eb91fa66a2775490954f8c3ff2b4d06a219f | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_critics.py | daa20c16b7ffcfd31864b2f9e82bd272a677bdae | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py |
from xai.brain.wordbase.nouns._critic import _CRITIC
#calss header
class _CRITICS(_CRITIC, ):
def __init__(self,):
_CRITIC.__init__(self)
self.name = "CRITICS"
self.specie = 'nouns'
self.basic = "critic"
self.jsondata = {}
| [
"[email protected]"
] | |
91b1725adfaa4f3636377b6571089cf7925ad856 | 05e634a232574f676434dfa8e4183f3d0a1a4bc9 | /tutorials/pp-series/HRNet-Keypoint/lib/metrics/json_results.py | 9e0ceea69b6c0e57ed0f0224ad12a02078870de0 | [
"Apache-2.0"
] | permissive | PaddlePaddle/models | 67ac00d93c5255ac64a9d80ae5be2e8927e47cee | 8042c21b690ffc0162095e749a41b94dd38732da | refs/heads/release/2.4 | 2023-09-04T15:23:59.543625 | 2023-07-20T11:54:16 | 2023-07-20T11:54:16 | 88,868,842 | 7,633 | 3,597 | Apache-2.0 | 2023-09-05T23:23:54 | 2017-04-20T13:30:15 | Python | UTF-8 | Python | false | false | 5,121 | py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import numpy as np
__all__ = [
'get_det_res', 'get_det_poly_res', 'get_seg_res', 'get_solov2_segm_res',
'get_keypoint_res'
]
def get_det_res(bboxes, bbox_nums, image_id, label_to_cat_id_map, bias=0):
det_res = []
k = 0
for i in range(len(bbox_nums)):
cur_image_id = int(image_id[i][0])
det_nums = bbox_nums[i]
for j in range(det_nums):
dt = bboxes[k]
k = k + 1
num_id, score, xmin, ymin, xmax, ymax = dt.tolist()
if int(num_id) < 0:
continue
category_id = label_to_cat_id_map[int(num_id)]
w = xmax - xmin + bias
h = ymax - ymin + bias
bbox = [xmin, ymin, w, h]
dt_res = {
'image_id': cur_image_id,
'category_id': category_id,
'bbox': bbox,
'score': score
}
det_res.append(dt_res)
return det_res
def get_det_poly_res(bboxes, bbox_nums, image_id, label_to_cat_id_map, bias=0):
det_res = []
k = 0
for i in range(len(bbox_nums)):
cur_image_id = int(image_id[i][0])
det_nums = bbox_nums[i]
for j in range(det_nums):
dt = bboxes[k]
k = k + 1
num_id, score, x1, y1, x2, y2, x3, y3, x4, y4 = dt.tolist()
if int(num_id) < 0:
continue
category_id = label_to_cat_id_map[int(num_id)]
rbox = [x1, y1, x2, y2, x3, y3, x4, y4]
dt_res = {
'image_id': cur_image_id,
'category_id': category_id,
'bbox': rbox,
'score': score
}
det_res.append(dt_res)
return det_res
def get_seg_res(masks, bboxes, mask_nums, image_id, label_to_cat_id_map):
import pycocotools.mask as mask_util
seg_res = []
k = 0
for i in range(len(mask_nums)):
cur_image_id = int(image_id[i][0])
det_nums = mask_nums[i]
for j in range(det_nums):
mask = masks[k].astype(np.uint8)
score = float(bboxes[k][1])
label = int(bboxes[k][0])
k = k + 1
if label == -1:
continue
cat_id = label_to_cat_id_map[label]
rle = mask_util.encode(
np.array(
mask[:, :, None], order="F", dtype="uint8"))[0]
if six.PY3:
if 'counts' in rle:
rle['counts'] = rle['counts'].decode("utf8")
sg_res = {
'image_id': cur_image_id,
'category_id': cat_id,
'segmentation': rle,
'score': score
}
seg_res.append(sg_res)
return seg_res
def get_solov2_segm_res(results, image_id, num_id_to_cat_id_map):
import pycocotools.mask as mask_util
segm_res = []
# for each batch
segms = results['segm'].astype(np.uint8)
clsid_labels = results['cate_label']
clsid_scores = results['cate_score']
lengths = segms.shape[0]
im_id = int(image_id[0][0])
if lengths == 0 or segms is None:
return None
# for each sample
for i in range(lengths - 1):
clsid = int(clsid_labels[i])
catid = num_id_to_cat_id_map[clsid]
score = float(clsid_scores[i])
mask = segms[i]
segm = mask_util.encode(np.array(mask[:, :, np.newaxis], order='F'))[0]
segm['counts'] = segm['counts'].decode('utf8')
coco_res = {
'image_id': im_id,
'category_id': catid,
'segmentation': segm,
'score': score
}
segm_res.append(coco_res)
return segm_res
def get_keypoint_res(results, im_id):
anns = []
preds = results['keypoint']
for idx in range(im_id.shape[0]):
image_id = im_id[idx].item()
kpts, scores = preds[idx]
for kpt, score in zip(kpts, scores):
kpt = kpt.flatten()
ann = {
'image_id': image_id,
'category_id': 1, # XXX hard code
'keypoints': kpt.tolist(),
'score': float(score)
}
x = kpt[0::3]
y = kpt[1::3]
x0, x1, y0, y1 = np.min(x).item(), np.max(x).item(), np.min(
y).item(), np.max(y).item()
ann['area'] = (x1 - x0) * (y1 - y0)
ann['bbox'] = [x0, y0, x1 - x0, y1 - y0]
anns.append(ann)
return anns
| [
"[email protected]"
] | |
c22c2d6937f2f8e7d0605c8690d553ce6add5b2e | 2aac13d0048f12ac877af92a93f73c4ef1311d6e | /mrchunks/process.py | 49a1ec417764052f224f2e231e044e3ae6be2ef8 | [] | no_license | victorpoluceno/mrchunks | 18250e2bf0be375de48e01b2a42976285d556e85 | 8328ed3d836144ccc563b135d78f59e50ff4104b | refs/heads/master | 2021-01-15T22:18:39.091832 | 2015-11-22T23:00:53 | 2015-11-22T23:00:53 | 32,928,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,760 | py | import zmq
from mrchunks.concurrent import Engine, switch
from mrchunks.mailbox import Mailbox
from mrchunks.serializer import decode, encode
class Server:
def __init__(self):
self._context = zmq.Context()
def __call__(self, *args, **kwargs):
self.listen(kwargs['address'])
while True:
envelop = self.get()
self.send(envelop)
def get(self):
while True:
socks = dict(self._poller.poll(100))
if socks:
if socks.get(self._socket) != zmq.POLLIN:
switch()
continue
data = self._socket.recv()
# FIXME may be we need to ack after sendo ipc socket
self._socket.send(b"OK+")
break
else:
switch()
return decode(data)
def send(self, envelop):
sender, recipient, message = envelop
_, _, p = recipient
socket = self._context.socket(zmq.REQ)
socket.connect("ipc:///tmp/%d" % (p,))
socket.send(encode(envelop), zmq.NOBLOCK)
def listen(self, address):
self._socket = self._context.socket(zmq.REP)
address, port = address
self._socket.bind("tcp://*:%s" % (port,))
self._poller = zmq.Poller()
self._poller.register(self._socket, zmq.POLLIN)
class Arbiter:
def __init__(self, address, number_of_workers=1):
self._next_pid = 0
self._address = address
self._engine = Engine(number_of_workers)
self._listen()
def _get_next_pid(self):
pid = self._next_pid
self._next_pid += 1
return self._address + (pid,)
def _listen(self):
server = Server()
self._engine.apply(server, address=self._address)
def spawn(self, start, *args, **kwargs):
pid = self._get_next_pid()
process = Process(pid, start)
self._engine.apply(process, *args, **kwargs)
return pid
def run(self, forever=True):
self._engine.run(forever)
def get_arbiter(*args, **kwargs):
return Arbiter(*args, **kwargs)
class Process(object):
def __init__(self, pid, start):
self.pid = pid
self._start = start
def __call__(self, *args, **kwargs):
self._mailbox = Mailbox()
self._mailbox.run(self.pid)
self._start(self, *args, **kwargs)
def send(self, recipient, message):
print('Sending message: {} from: {} to: {}'.format(message, self.pid,
recipient))
self._mailbox.send(recipient, message)
def receive(self):
print('Receiving...')
envelop = self._mailbox.receive()
return envelop
| [
"[email protected]"
] | |
5bcb5224d286df6b18619cd81e4a38ee67d7c03a | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/django-1.2/tests/modeltests/user_commands/__init__.py | f026fbc3a9ca470bd92710829f6f0b97924d31b1 | [] | no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | /home/action/.parts/packages/googleappengine/1.9.4/lib/django-1.2/tests/modeltests/user_commands/__init__.py | [
"[email protected]"
] | |
530ae96e854fca34aa8899b13ba869d5d6b1f658 | 019fd2c29b8239d7b0a3906cfbdddfd440362417 | /asset/google/cloud/asset_v1beta1/gapic/asset_service_client_config.py | 340e89de38b2510f4f5c219239170706bfdfdc83 | [
"Apache-2.0"
] | permissive | tswast/google-cloud-python | 1334d26cdb994293f307d889251d7daef5fcb826 | d897d56bce03d1fda98b79afb08264e51d46c421 | refs/heads/master | 2021-06-10T17:40:06.968584 | 2020-01-11T17:41:29 | 2020-01-11T17:41:29 | 58,775,221 | 1 | 1 | Apache-2.0 | 2019-04-10T17:09:46 | 2016-05-13T22:06:37 | Python | UTF-8 | Python | false | false | 1,179 | py | config = {
"interfaces": {
"google.cloud.asset.v1beta1.AssetService": {
"retry_codes": {
"idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"],
"non_idempotent": [],
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 20000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 20000,
"total_timeout_millis": 600000,
}
},
"methods": {
"ExportAssets": {
"timeout_millis": 600000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"BatchGetAssetsHistory": {
"timeout_millis": 600000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
},
}
}
}
| [
"[email protected]"
] | |
91c46ed6861438fb001bf94fe1fa600cd41ad2c9 | 423f9cbe3b39e431b7eca2ba6ad15b2fd70ef56b | /EditDistance.py | d1cb6dc2cf6a49df28a308e6019e0e55bb7329c4 | [] | no_license | SerChirag/Dynamic-Programming | 8f7e6f23fd76c8d99fb8eb23b4324e1eb8e5b790 | 672bf3cb726cea302ce72ad7183d7f684b2788f0 | refs/heads/master | 2021-10-10T17:29:16.346125 | 2019-01-14T18:00:59 | 2019-01-14T18:00:59 | 115,982,525 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | def edit(str1,str2):
edit = [[0 for j in range(len(str2)+1)] for i in range(len(str1)+1)]
count = 0
for i in range(1,len(str1)+1):
for j in range(1,len(str2)+1):
if(str1[i-1] == str2[j-1]):
edit[i][j] = edit[i-1][j-1] + 1
else:
edit[i][j] = max(edit[i][j-1],edit[i-1][j])
return edit[len(str1)][len(str2)]
str1 = "ABCDGH"
str2 = "AEDFHR"
print edit(str1,str2)
| [
"[email protected]"
] | |
9947c254c93bcc92b396bff46d6f0321e70fe555 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adverbs/_bleakly.py | 28b608df7c1220eda261a778a410c245be7da9ea | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 434 | py |
#calss header
class _BLEAKLY():
def __init__(self,):
self.name = "BLEAKLY"
self.definitions = [u'in a way that suggests a lack of hope: ', u'in a way that is cold and empty: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adverbs'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
| [
"[email protected]"
] | |
79f2687bc8f4a9add7c0fbbba2af25d1ce45be2a | 4fcb2e797ba83b310fe05461d48f02931ea5a427 | /2017/day-19/solution.py | b22d5be009cad3f1c42f831b1e093a846f34a4d9 | [] | no_license | BrentChesny/AdventOfCode | 5a642d081505563f7518c5244bb814e9e4dfc5de | dad5224961539149bed5757bbae0ccc35a3a293d | refs/heads/master | 2022-12-11T19:51:22.138655 | 2022-12-04T21:46:29 | 2022-12-04T21:46:29 | 47,266,210 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,463 | py | def parse_input():
return [list(line.strip('\n')) for line in open('input.txt').readlines()]
def solve_part_one():
grid = parse_input()
pos_r, pos_c = 0, grid[0].index('|')
dir_r, dir_c = 1, 0
result = ''
while grid[pos_r+dir_r][pos_c+dir_c] != ' ':
pos_r, pos_c = pos_r + dir_r, pos_c + dir_c
if grid[pos_r][pos_c].isupper():
result += grid[pos_r][pos_c]
if grid[pos_r][pos_c] == '+':
dir_r, dir_c = find_new_direction(grid, (pos_r, pos_c), (dir_r, dir_c))
return result
def solve_part_two():
grid = parse_input()
pos_r, pos_c = 0, grid[0].index('|')
dir_r, dir_c = 1, 0
result = 1
while grid[pos_r+dir_r][pos_c+dir_c] != ' ':
result += 1
pos_r, pos_c = pos_r + dir_r, pos_c + dir_c
if grid[pos_r][pos_c] == '+':
dir_r, dir_c = find_new_direction(grid, (pos_r, pos_c), (dir_r, dir_c))
return result
def find_new_direction(grid, pos, old_dir):
pos_r, pos_c = pos
if grid[pos_r-1][pos_c] == '|' and old_dir != (1, 0):
return -1, 0
if grid[pos_r+1][pos_c] == '|' and old_dir != (-1, 0):
return 1, 0
if grid[pos_r][pos_c-1] == '-' and old_dir != (0, 1):
return 0, -1
if grid[pos_r][pos_c+1] == '-' and old_dir != (0, -1):
return 0, 1
def main():
print 'Part one: ', solve_part_one()
print 'Part two: ', solve_part_two()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
128d601cae05a0f318b0a90ac3ac53d97636fa48 | e0980f704a573894350e285f66f4cf390837238e | /.history/home/models_20201026174905.py | 36accded34a74330fba536c865386a721c5957a3 | [] | no_license | rucpata/WagtailWebsite | 28008474ec779d12ef43bceb61827168274a8b61 | 5aa44f51592f49c9a708fc5515ad877c6a29dfd9 | refs/heads/main | 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,730 | py | from django.db import models
from wagtail.core.models import Page
from wagtail.core.fields import StreamField
from wagtail.admin.edit_handlers import FieldPanel, PageChooserPanel, StreamFieldPanel
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.snippets.blocks import SnippetChooserBlock
from streams import blocks
class HomePage(Page):
lead_text = models.CharField(
max_length = 140,
blank = True,
help_text = 'Podtytuł pod tytułem banera'
)
button = models.ForeignKey(
'wagtailcore.Page',
blank = True,
null = True,
related_name = '+',
help_text = 'Wybierz opcjonalną stronę, do której chcesz utworzyć łącze',
on_delete = models.SET_NULL,
)
button_text = models.CharField(
max_length = 50,
default = 'Czytaj więcej',
blank = False,
help_text = 'Przycisk tekstowy'
)
banner_background_image = models.ForeignKey(
'wagtailimages.Image',
blank = False,
null =True,
related_name = '+',
help_text = 'Obraz tła baneru',
on_delete = models.SET_NULL,
)
body = StreamField([
('title', blocks.TitleBlock()),
('cards', blocks.CardsBlock()),
('image_and_text', blocks.ImageAndTextBlock()),
('cta', blocks.CallToActionBlock()),
('testimonial', SnippetChooserBlock(target_model='')),
], null=True, blank=True)
content_panels = Page.content_panels + [
FieldPanel('lead_text'),
PageChooserPanel('button'),
FieldPanel('button_text'),
ImageChooserPanel('banner_background_image'),
StreamFieldPanel('body'),
]
| [
"[email protected]"
] | |
77d4308ef4f478de26c6fc394155d3854b2ea2a6 | 058e5be6d77df6448197708c1b12fd5aca5616e1 | /scripts/dualtor_neighbor_check.py | 161177008a20df1c0e6dcb6bdbda625234238344 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | Junchao-Mellanox/sonic-utilities | d958c8699032cc01155802c29d7174e3fb79c2d8 | 8cb7320e4b9b364da110b7b737eeaf991665b300 | refs/heads/master | 2023-08-31T21:00:17.622141 | 2023-08-16T22:27:23 | 2023-08-16T22:27:23 | 242,450,146 | 0 | 0 | NOASSERTION | 2023-08-31T08:37:57 | 2020-02-23T03:30:43 | Python | UTF-8 | Python | false | false | 18,441 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
dualtor_neighbor_check.py
This tool is designed to verify that, for dualtor SONiC, the neighbors learnt from
mux ports should have correct neighbor/route entry in ASIC.
"""
import argparse
import enum
import functools
import ipaddress
import json
import logging
import shlex
import sys
import syslog
import subprocess
import tabulate
from natsort import natsorted
from swsscommon import swsscommon
from sonic_py_common import daemon_base
try:
from swsssdk import port_util
except ImportError:
from sonic_py_common import port_util
DB_READ_SCRIPT = """
-- this script is to read required tables from db:
-- APPL_DB:
-- - MUX_CABLE_TABLE
-- - HW_MUX_CABLE_TABLE
-- - NEIGH_TABLE
-- ASIC_DB:
-- - ASIC_STATE
--
-- KEYS - None
-- ARGV[1] - APPL_DB db index
-- ARGV[2] - APPL_DB separator
-- ARGV[3] - APPL_DB neighbor table name
-- ARGV[4] - APPL_DB mux cable table name
-- ARGV[5] - APPL_DB hardware mux cable table name
-- ARGV[6] - ASIC_DB db index
-- ARGV[7] - ASIC_DB separator
-- ARGV[8] - ASIC_DB asic state table name
local APPL_DB = 0
local APPL_DB_SEPARATOR = ':'
local neighbor_table_name = 'NEIGH_TABLE'
local mux_state_table_name = 'MUX_CABLE_TABLE'
local hw_mux_state_table_name = 'HW_MUX_CABLE_TABLE'
local ASIC_DB = 1
local ASIC_DB_SEPARATOR = ':'
local asic_state_table_name = 'ASIC_STATE'
local asic_route_key_prefix = 'SAI_OBJECT_TYPE_ROUTE_ENTRY'
local asic_neigh_key_prefix = 'SAI_OBJECT_TYPE_NEIGHBOR_ENTRY'
local asic_fdb_key_prefix = 'SAI_OBJECT_TYPE_FDB_ENTRY'
if table.getn(ARGV) == 7 then
APPL_DB = ARGV[1]
APPL_DB_SEPARATOR = ARGV[2]
neighbor_table_name = ARGV[3]
mux_state_table_name = ARGV[4]
hw_mux_state_table_name = ARGV[5]
ASIC_DB = ARGV[6]
ASIC_DB_SEPARATOR = ARGV[7]
asic_state_table_name = ARGV[8]
end
local neighbors = {}
local mux_states = {}
local hw_mux_states = {}
local asic_fdb = {}
local asic_route_table = {}
local asic_neighbor_table = {}
-- read from APPL_DB
redis.call('SELECT', APPL_DB)
-- read neighbors learnt from Vlan devices
local neighbor_table_vlan_prefix = neighbor_table_name .. APPL_DB_SEPARATOR .. 'Vlan'
local neighbor_keys = redis.call('KEYS', neighbor_table_vlan_prefix .. '*')
for i, neighbor_key in ipairs(neighbor_keys) do
local second_separator_index = string.find(neighbor_key, APPL_DB_SEPARATOR, string.len(neighbor_table_vlan_prefix), true)
if second_separator_index ~= nil then
local neighbor_ip = string.sub(neighbor_key, second_separator_index + 1)
local mac = string.lower(redis.call('HGET', neighbor_key, 'neigh'))
neighbors[neighbor_ip] = mac
end
end
-- read mux states
local mux_state_table_prefix = mux_state_table_name .. APPL_DB_SEPARATOR
local mux_cables = redis.call('KEYS', mux_state_table_prefix .. '*')
for i, mux_cable_key in ipairs(mux_cables) do
local port_name = string.sub(mux_cable_key, string.len(mux_state_table_prefix) + 1)
local mux_state = redis.call('HGET', mux_cable_key, 'state')
if mux_state ~= nil then
mux_states[port_name] = mux_state
end
end
local hw_mux_state_table_prefix = hw_mux_state_table_name .. APPL_DB_SEPARATOR
local hw_mux_cables = redis.call('KEYS', hw_mux_state_table_prefix .. '*')
for i, hw_mux_cable_key in ipairs(hw_mux_cables) do
local port_name = string.sub(hw_mux_cable_key, string.len(hw_mux_state_table_prefix) + 1)
local mux_state = redis.call('HGET', hw_mux_cable_key, 'state')
if mux_state ~= nil then
hw_mux_states[port_name] = mux_state
end
end
-- read from ASIC_DB
redis.call('SELECT', ASIC_DB)
-- read ASIC fdb entries
local fdb_prefix = asic_state_table_name .. ASIC_DB_SEPARATOR .. asic_fdb_key_prefix
local fdb_entries = redis.call('KEYS', fdb_prefix .. '*')
for i, fdb_entry in ipairs(fdb_entries) do
local bridge_port_id = redis.call('HGET', fdb_entry, 'SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID')
local fdb_details = cjson.decode(string.sub(fdb_entry, string.len(fdb_prefix) + 2))
local mac = string.lower(fdb_details['mac'])
asic_fdb[mac] = bridge_port_id
end
-- read ASIC route table
local route_prefix = asic_state_table_name .. ASIC_DB_SEPARATOR .. asic_route_key_prefix
local route_entries = redis.call('KEYS', route_prefix .. '*')
for i, route_entry in ipairs(route_entries) do
local route_details = string.sub(route_entry, string.len(route_prefix) + 2)
table.insert(asic_route_table, route_details)
end
-- read ASIC neigh table
local neighbor_prefix = asic_state_table_name .. ASIC_DB_SEPARATOR .. asic_neigh_key_prefix
local neighbor_entries = redis.call('KEYS', neighbor_prefix .. '*')
for i, neighbor_entry in ipairs(neighbor_entries) do
local neighbor_details = string.sub(neighbor_entry, string.len(neighbor_prefix) + 2)
table.insert(asic_neighbor_table, neighbor_details)
end
local result = {}
result['neighbors'] = neighbors
result['mux_states'] = mux_states
result['hw_mux_states'] = hw_mux_states
result['asic_fdb'] = asic_fdb
result['asic_route_table'] = asic_route_table
result['asic_neigh_table'] = asic_neighbor_table
return redis.status_reply(cjson.encode(result))
"""
DB_READ_SCRIPT_CONFIG_DB_KEY = "_DUALTOR_NEIGHBOR_CHECK_SCRIPT_SHA1"
ZERO_MAC = "00:00:00:00:00:00"
NEIGHBOR_ATTRIBUTES = ["NEIGHBOR", "MAC", "PORT", "MUX_STATE", "IN_MUX_TOGGLE", "NEIGHBOR_IN_ASIC", "TUNNERL_IN_ASIC", "HWSTATUS"]
NOT_AVAILABLE = "N/A"
class LogOutput(enum.Enum):
"""Enum to represent log output."""
SYSLOG = "SYSLOG"
STDOUT = "STDOUT"
def __str__(self):
return self.value
class SyslogLevel(enum.IntEnum):
"""Enum to represent syslog level."""
ERROR = 3
NOTICE = 5
INFO = 6
DEBUG = 7
def __str__(self):
return self.name
SYSLOG_LEVEL = SyslogLevel.INFO
WRITE_LOG_ERROR = None
WRITE_LOG_WARN = None
WRITE_LOG_INFO = None
WRITE_LOG_DEBUG = None
def parse_args():
parser = argparse.ArgumentParser(
description="Verify neighbors state is consistent with mux state."
)
parser.add_argument(
"-o",
"--log-output",
type=LogOutput,
choices=list(LogOutput),
default=LogOutput.STDOUT,
help="log output"
)
parser.add_argument(
"-s",
"--syslog-level",
choices=["ERROR", "NOTICE", "INFO", "DEBUG"],
default=None,
help="syslog level"
)
parser.add_argument(
"-l",
"--log-level",
choices=["ERROR", "WARNING", "INFO", "DEBUG"],
default=None,
help="stdout log level"
)
args = parser.parse_args()
if args.log_output == LogOutput.STDOUT:
if args.log_level is None:
args.log_level = logging.WARNING
else:
args.log_level = logging.getLevelName(args.log_level)
if args.syslog_level is not None:
parser.error("Received syslog level with log output to stdout.")
if args.log_output == LogOutput.SYSLOG:
if args.syslog_level is None:
args.syslog_level = SyslogLevel.NOTICE
else:
args.syslog_level = SyslogLevel[args.syslog_level]
if args.log_level is not None:
parser.error("Received stdout log level with log output to syslog.")
return args
def write_syslog(level, message, *args):
if level > SYSLOG_LEVEL:
return
if args:
message %= args
if level == SyslogLevel.ERROR:
syslog.syslog(syslog.LOG_ERR, message)
elif level == SyslogLevel.NOTICE:
syslog.syslog(syslog.LOG_NOTICE, message)
elif level == SyslogLevel.INFO:
syslog.syslog(syslog.LOG_INFO, message)
elif level == SyslogLevel.DEBUG:
syslog.syslog(syslog.LOG_DEBUG, message)
else:
syslog.syslog(syslog.LOG_DEBUG, message)
def config_logging(args):
"""Configures logging based on arguments."""
global SYSLOG_LEVEL
global WRITE_LOG_ERROR
global WRITE_LOG_WARN
global WRITE_LOG_INFO
global WRITE_LOG_DEBUG
if args.log_output == LogOutput.STDOUT:
logging.basicConfig(
stream=sys.stdout,
level=args.log_level,
format="%(message)s"
)
WRITE_LOG_ERROR = logging.error
WRITE_LOG_WARN = logging.warning
WRITE_LOG_INFO = logging.info
WRITE_LOG_DEBUG = logging.debug
elif args.log_output == LogOutput.SYSLOG:
SYSLOG_LEVEL = args.syslog_level
WRITE_LOG_ERROR = functools.partial(write_syslog, SyslogLevel.ERROR)
WRITE_LOG_WARN = functools.partial(write_syslog, SyslogLevel.NOTICE)
WRITE_LOG_INFO = functools.partial(write_syslog, SyslogLevel.INFO)
WRITE_LOG_DEBUG = functools.partial(write_syslog, SyslogLevel.DEBUG)
def run_command(cmd):
"""Runs a command and returns its output."""
WRITE_LOG_DEBUG("Running command: %s", cmd)
try:
p = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(output, _) = p.communicate()
except Exception as details:
raise RuntimeError("Failed to run command: %s", details)
WRITE_LOG_DEBUG("Command output: %s", output)
WRITE_LOG_DEBUG("Command return code: %s", p.returncode)
if p.returncode != 0:
raise RuntimeError("Command failed with return code %s: %s" % (p.returncode, output))
return output.decode()
def redis_cli(redis_cmd):
"""Call a redis command with return error check."""
run_cmd = "sudo redis-cli %s" % redis_cmd
result = run_command(run_cmd).strip()
if "error" in result or "ERR" in result:
raise RuntimeError("Redis command '%s' failed: %s" % (redis_cmd, result))
return result
def read_tables_from_db(appl_db):
"""Reads required tables from db."""
# NOTE: let's cache the db read script sha1 in APPL_DB under
# key "_DUALTOR_NEIGHBOR_CHECK_SCRIPT_SHA1"
db_read_script_sha1 = appl_db.get(DB_READ_SCRIPT_CONFIG_DB_KEY)
if not db_read_script_sha1:
redis_load_cmd = "SCRIPT LOAD \"%s\"" % DB_READ_SCRIPT
db_read_script_sha1 = redis_cli(redis_load_cmd).strip()
WRITE_LOG_INFO("loaded script sha1: %s", db_read_script_sha1)
appl_db.set(DB_READ_SCRIPT_CONFIG_DB_KEY, db_read_script_sha1)
redis_run_cmd = "EVALSHA %s 0" % db_read_script_sha1
result = redis_cli(redis_run_cmd).strip()
tables = json.loads(result)
neighbors = tables["neighbors"]
mux_states = tables["mux_states"]
hw_mux_states = tables["hw_mux_states"]
asic_fdb = {k: v.lstrip("oid:0x") for k, v in tables["asic_fdb"].items()}
asic_route_table = tables["asic_route_table"]
asic_neigh_table = tables["asic_neigh_table"]
WRITE_LOG_DEBUG("neighbors: %s", json.dumps(neighbors, indent=4))
WRITE_LOG_DEBUG("mux states: %s", json.dumps(mux_states, indent=4))
WRITE_LOG_DEBUG("hw mux states: %s", json.dumps(hw_mux_states, indent=4))
WRITE_LOG_DEBUG("ASIC FDB: %s", json.dumps(asic_fdb, indent=4))
WRITE_LOG_DEBUG("ASIC route table: %s", json.dumps(asic_route_table, indent=4))
WRITE_LOG_DEBUG("ASIC neigh table: %s", json.dumps(asic_neigh_table, indent=4))
return neighbors, mux_states, hw_mux_states, asic_fdb, asic_route_table, asic_neigh_table
def get_if_br_oid_to_port_name_map():
"""Return port bridge oid to port name map."""
db = swsscommon.SonicV2Connector(host="127.0.0.1")
try:
port_name_map = port_util.get_interface_oid_map(db)[1]
except IndexError:
port_name_map = {}
if_br_oid_map = port_util.get_bridge_port_map(db)
if_br_oid_to_port_name_map = {}
for if_br_oid, if_oid in if_br_oid_map.items():
if if_oid in port_name_map:
if_br_oid_to_port_name_map[if_br_oid] = port_name_map[if_oid]
return if_br_oid_to_port_name_map
def is_dualtor(config_db):
"""Check if it is a dualtor device."""
device_metadata = config_db.get_table('DEVICE_METADATA')
return ("localhost" in device_metadata and
"subtype" in device_metadata['localhost'] and
device_metadata['localhost']['subtype'].lower() == 'dualtor')
def get_mux_cable_config(config_db):
"""Return mux cable config from CONFIG_DB."""
return config_db.get_table("MUX_CABLE")
def get_mux_server_to_port_map(mux_cables):
"""Return mux server ip to port name map."""
mux_server_to_port_map = {}
for port, mux_details in mux_cables.items():
if "server_ipv4" in mux_details:
server_ipv4 = str(ipaddress.ip_interface(mux_details["server_ipv4"]).ip)
mux_server_to_port_map[server_ipv4] = port
if "server_ipv6" in mux_details:
server_ipv6 = str(ipaddress.ip_interface(mux_details["server_ipv6"]).ip)
mux_server_to_port_map[server_ipv6] = port
return mux_server_to_port_map
def get_mac_to_port_name_map(asic_fdb, if_oid_to_port_name_map):
"""Return mac to port name map."""
mac_to_port_name_map = {}
for mac, port_br_oid in asic_fdb.items():
if port_br_oid in if_oid_to_port_name_map:
mac_to_port_name_map[mac] = if_oid_to_port_name_map[port_br_oid]
return mac_to_port_name_map
def check_neighbor_consistency(neighbors, mux_states, hw_mux_states, mac_to_port_name_map,
asic_route_table, asic_neigh_table, mux_server_to_port_map):
"""Checks if neighbors are consistent with mux states."""
asic_route_destinations = set(json.loads(_)["dest"].split("/")[0] for _ in asic_route_table)
asic_neighs = set(json.loads(_)["ip"] for _ in asic_neigh_table)
check_results = []
for neighbor_ip in natsorted(list(neighbors.keys())):
mac = neighbors[neighbor_ip]
check_result = {attr: NOT_AVAILABLE for attr in NEIGHBOR_ATTRIBUTES}
check_result["NEIGHBOR"] = neighbor_ip
check_result["MAC"] = mac
is_zero_mac = (mac == ZERO_MAC)
if mac not in mac_to_port_name_map and not is_zero_mac:
check_results.append(check_result)
continue
check_result["NEIGHBOR_IN_ASIC"] = neighbor_ip in asic_neighs
check_result["TUNNERL_IN_ASIC"] = neighbor_ip in asic_route_destinations
if is_zero_mac:
check_result["HWSTATUS"] = ((not check_result["NEIGHBOR_IN_ASIC"]) and check_result["TUNNERL_IN_ASIC"])
else:
port_name = mac_to_port_name_map[mac]
# NOTE: mux server ips are always fixed to the mux port
if neighbor_ip in mux_server_to_port_map:
port_name = mux_server_to_port_map[neighbor_ip]
mux_state = mux_states[port_name]
hw_mux_state = hw_mux_states[port_name]
check_result["PORT"] = port_name
check_result["MUX_STATE"] = mux_state
check_result["IN_MUX_TOGGLE"] = mux_state != hw_mux_state
if mux_state == "active":
check_result["HWSTATUS"] = (check_result["NEIGHBOR_IN_ASIC"] and (not check_result["TUNNERL_IN_ASIC"]))
elif mux_state == "standby":
check_result["HWSTATUS"] = ((not check_result["NEIGHBOR_IN_ASIC"]) and check_result["TUNNERL_IN_ASIC"])
else:
# skip as unknown mux state
continue
check_results.append(check_result)
return check_results
def parse_check_results(check_results):
"""Parse the check results to see if there are neighbors that are inconsistent with mux state."""
failed_neighbors = []
bool_to_yes_no = ("no", "yes")
bool_to_consistency = ("inconsistent", "consistent")
for check_result in check_results:
port = check_result["PORT"]
is_zero_mac = check_result["MAC"] == ZERO_MAC
if port == NOT_AVAILABLE and not is_zero_mac:
continue
in_toggle = check_result["IN_MUX_TOGGLE"]
hwstatus = check_result["HWSTATUS"]
if not is_zero_mac:
check_result["IN_MUX_TOGGLE"] = bool_to_yes_no[in_toggle]
check_result["NEIGHBOR_IN_ASIC"] = bool_to_yes_no[check_result["NEIGHBOR_IN_ASIC"]]
check_result["TUNNERL_IN_ASIC"] = bool_to_yes_no[check_result["TUNNERL_IN_ASIC"]]
check_result["HWSTATUS"] = bool_to_consistency[hwstatus]
if (not hwstatus):
if is_zero_mac:
failed_neighbors.append(check_result)
elif not in_toggle:
failed_neighbors.append(check_result)
output_lines = tabulate.tabulate(
[[check_result[attr] for attr in NEIGHBOR_ATTRIBUTES] for check_result in check_results],
headers=NEIGHBOR_ATTRIBUTES,
tablefmt="simple"
)
for output_line in output_lines.split("\n"):
WRITE_LOG_WARN(output_line)
if failed_neighbors:
WRITE_LOG_ERROR("Found neighbors that are inconsistent with mux states: %s", [_["NEIGHBOR"] for _ in failed_neighbors])
err_output_lines = tabulate.tabulate(
[[neighbor[attr] for attr in NEIGHBOR_ATTRIBUTES] for neighbor in failed_neighbors],
headers=NEIGHBOR_ATTRIBUTES,
tablefmt="simple"
)
for output_line in err_output_lines.split("\n"):
WRITE_LOG_ERROR(output_line)
return False
return True
if __name__ == "__main__":
args = parse_args()
config_logging(args)
config_db = swsscommon.ConfigDBConnector(use_unix_socket_path=False)
config_db.connect()
appl_db = daemon_base.db_connect("APPL_DB")
mux_cables = get_mux_cable_config(config_db)
if not is_dualtor(config_db) or not mux_cables:
WRITE_LOG_DEBUG("Not a valid dualtor setup, skip the check.")
sys.exit(0)
mux_server_to_port_map = get_mux_server_to_port_map(mux_cables)
if_oid_to_port_name_map = get_if_br_oid_to_port_name_map()
neighbors, mux_states, hw_mux_states, asic_fdb, asic_route_table, asic_neigh_table = read_tables_from_db(appl_db)
mac_to_port_name_map = get_mac_to_port_name_map(asic_fdb, if_oid_to_port_name_map)
check_results = check_neighbor_consistency(
neighbors,
mux_states,
hw_mux_states,
mac_to_port_name_map,
asic_route_table,
asic_neigh_table,
mux_server_to_port_map
)
res = parse_check_results(check_results)
sys.exit(0 if res else 1)
| [
"[email protected]"
] | |
0f3a08eb19415e6839f084ef6b5fd54d9bb6cee3 | 6019b48f027b1f62de8474a834f52157fc8faf2c | /src/ch3/cv2io/negaposi.py | 7aa3463cf05aee3a2932641dbca8b3d908f3f44e | [] | no_license | kujirahand/book-mlearn-gyomu | d540aebf96af84d5c271fa11f31bf18417c16f34 | b1d5f04a69777fb3896b28144ecb18d49a744c25 | refs/heads/master | 2023-07-04T01:14:39.673001 | 2023-04-05T13:27:53 | 2023-04-05T13:27:53 | 135,913,708 | 127 | 113 | null | 2020-08-10T23:16:30 | 2018-06-03T14:56:59 | Jupyter Notebook | UTF-8 | Python | false | false | 212 | py | import matplotlib.pyplot as plt
import cv2
# 画像を読み込む
img = cv2.imread("test.jpg")
# ネガポジ反転
img = 255 - img
# 画像を表示
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.show()
| [
"[email protected]"
] | |
3ac431c651416db7e8b8bd8732d24001a67016a2 | 5e8d86f6ddfd516b9768e8617ced0baca8112f4c | /core-python/Core_Python/loop/ForLoopDominoEx.py | 811f9121f51aa999d3f8922e54f65f566aac9aab | [
"MIT"
] | permissive | bharat-kadchha/tutorials | 0a96ce5a3da1a0ceb39a0d464c8f3e2ff397da7c | cd77b0373c270eab923a6db5b9f34c52543b8664 | refs/heads/master | 2022-12-23T11:49:34.042820 | 2020-10-06T03:51:20 | 2020-10-06T03:51:20 | 272,891,375 | 1 | 0 | MIT | 2020-06-17T06:04:33 | 2020-06-17T06:04:33 | null | UTF-8 | Python | false | false | 123 | py | for left in range(1,7):
for right in range(left,7):
print("["+str(left)+"|"+str(right)+"]",end=" ")
print() | [
"[email protected]"
] | |
7d7f982e88dc574bb2ed1b7b4f0f6c36f495a5a7 | ece0d321e48f182832252b23db1df0c21b78f20c | /engine/2.80/scripts/addons/archipack/presets/archipack_stair/l_wood_over_concrete.py | d4fc1344a54ccd723bdcb01aad8a5764c427b8b4 | [
"Unlicense",
"GPL-3.0-only",
"Font-exception-2.0",
"GPL-3.0-or-later",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain-disclaimer",
"Bitstream-Vera",
"LicenseRef-scancode-blender-2010",
"LGPL-2.1-or-later",
"GPL-2.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"PSF-2.0",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"BSD-2-Clause"
] | permissive | byteinc/Phasor | 47d4e48a52fa562dfa1a2dbe493f8ec9e94625b9 | f7d23a489c2b4bcc3c1961ac955926484ff8b8d9 | refs/heads/master | 2022-10-25T17:05:01.585032 | 2019-03-16T19:24:22 | 2019-03-16T19:24:22 | 175,723,233 | 3 | 1 | Unlicense | 2022-10-21T07:02:37 | 2019-03-15T00:58:08 | Python | UTF-8 | Python | false | false | 5,867 | py | import bpy
d = bpy.context.active_object.data.archipack_stair[0]
d.steps_type = 'CLOSED'
d.handrail_slice_right = True
d.total_angle = 6.2831854820251465
d.user_defined_subs_enable = True
d.string_z = 0.30000001192092896
d.nose_z = 0.029999999329447746
d.user_defined_subs = ''
d.idmat_step_side = '3'
d.handrail_x = 0.03999999910593033
d.right_post = True
d.left_post = True
d.width = 1.5
d.subs_offset_x = 0.0
d.rail_mat.clear()
item_sub_1 = d.rail_mat.add()
item_sub_1.name = ''
item_sub_1.index = '4'
d.step_depth = 0.30000001192092896
d.rail_z = (0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806)
d.right_subs = False
d.left_panel = True
d.idmat_handrail = '3'
d.da = 1.5707963705062866
d.post_alt = 0.0
d.left_subs = False
d.n_parts = 3
d.user_defined_post_enable = True
d.handrail_slice_left = True
d.handrail_profil = 'SQUARE'
d.handrail_expand = False
d.panel_alt = 0.25
d.post_expand = False
d.subs_z = 1.0
d.rail_alt = (1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
d.panel_dist = 0.05000000074505806
d.panel_expand = False
d.x_offset = 0.0
d.subs_expand = False
d.idmat_post = '4'
d.left_string = False
d.string_alt = -0.03999999910593033
d.handrail_y = 0.03999999910593033
d.radius = 1.0
d.string_expand = False
d.post_z = 1.0
d.idmat_top = '3'
d.idmat_bottom = '1'
d.parts.clear()
item_sub_1 = d.parts.add()
item_sub_1.name = ''
item_sub_1.manipulators.clear()
item_sub_2 = item_sub_1.manipulators.add()
item_sub_2.name = ''
item_sub_2.p0 = (0.0, 0.0, 1.4040000438690186)
item_sub_2.prop1_name = 'length'
item_sub_2.p2 = (1.0, 0.0, 0.0)
item_sub_2.normal = (0.0, 0.0, 1.0)
item_sub_2.pts_mode = 'SIZE'
item_sub_2.p1 = (0.0, 4.0, 1.4040000438690186)
item_sub_2.prop2_name = ''
item_sub_2.type_key = 'SIZE'
item_sub_1.right_shape = 'RECTANGLE'
item_sub_1.radius = 0.699999988079071
item_sub_1.type = 'S_STAIR'
item_sub_1.length = 4.0
item_sub_1.left_shape = 'RECTANGLE'
item_sub_1.da = 1.5707963705062866
item_sub_1 = d.parts.add()
item_sub_1.name = ''
item_sub_1.manipulators.clear()
item_sub_2 = item_sub_1.manipulators.add()
item_sub_2.name = ''
item_sub_2.p0 = (-1.0, 4.0, 1.944000005722046)
item_sub_2.prop1_name = 'da'
item_sub_2.p2 = (0.0, 1.0, 0.0)
item_sub_2.normal = (0.0, 0.0, 1.0)
item_sub_2.pts_mode = 'RADIUS'
item_sub_2.p1 = (1.0, 0.0, 0.0)
item_sub_2.prop2_name = 'radius'
item_sub_2.type_key = 'ARC_ANGLE_RADIUS'
item_sub_1.right_shape = 'RECTANGLE'
item_sub_1.radius = 0.699999988079071
item_sub_1.type = 'C_STAIR'
item_sub_1.length = 2.0
item_sub_1.left_shape = 'RECTANGLE'
item_sub_1.da = 1.5707963705062866
item_sub_1 = d.parts.add()
item_sub_1.name = ''
item_sub_1.manipulators.clear()
item_sub_2 = item_sub_1.manipulators.add()
item_sub_2.name = ''
item_sub_2.p0 = (-1.0, 5.0, 2.700000047683716)
item_sub_2.prop1_name = 'length'
item_sub_2.p2 = (1.0, 0.0, 0.0)
item_sub_2.normal = (0.0, 0.0, 1.0)
item_sub_2.pts_mode = 'SIZE'
item_sub_2.p1 = (-3.0, 5.0, 2.700000047683716)
item_sub_2.prop2_name = ''
item_sub_2.type_key = 'SIZE'
item_sub_1.right_shape = 'RECTANGLE'
item_sub_1.radius = 0.699999988079071
item_sub_1.type = 'S_STAIR'
item_sub_1.length = 2.0
item_sub_1.left_shape = 'RECTANGLE'
item_sub_1.da = 1.5707963705062866
d.subs_bottom = 'STEP'
d.user_defined_post = ''
d.panel_offset_x = 0.0
d.idmat_side = '1'
d.right_string = False
d.idmat_raise = '1'
d.left_rail = False
d.parts_expand = False
d.panel_z = 0.6000000238418579
d.bottom_z = 0.029999999329447746
d.z_mode = 'STANDARD'
d.panel_x = 0.009999999776482582
d.post_x = 0.03999999910593033
d.presets = 'STAIR_L'
d.steps_expand = True
d.subs_x = 0.019999999552965164
d.subs_spacing = 0.10000000149011612
d.left_handrail = True
d.handrail_offset = 0.0
d.right_rail = False
d.idmat_panel = '5'
d.post_offset_x = 0.019999999552965164
d.idmat_step_front = '3'
d.rail_n = 1
d.string_offset = 0.0
d.subs_y = 0.019999999552965164
d.handrail_alt = 1.0
d.post_corners = False
d.rail_expand = False
d.rail_offset = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
d.rail_x = (0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806, 0.05000000074505806)
d.left_shape = 'RECTANGLE'
d.nose_y = 0.019999999552965164
d.nose_type = 'STRAIGHT'
d.handrail_extend = 0.10000000149011612
d.idmat_string = '3'
d.post_y = 0.03999999910593033
d.subs_alt = 0.0
d.right_handrail = True
d.idmats_expand = False
d.right_shape = 'RECTANGLE'
d.idmat_subs = '4'
d.handrail_radius = 0.019999999552965164
d.right_panel = True
d.post_spacing = 1.0
d.string_x = 0.019999999552965164
d.height = 2.700000047683716
| [
"[email protected]"
] | |
c3f1f40c430acf8791af7d15a9c634c03815ed76 | 3b7b6648b72910046b6a227db30f71aeee2cba9c | /2021-03-08-SimpleRNN/StockReturnPredictionWithLSTM.py | f0ad401fbb2df1790b2b25eb955c0d967a9b1a7c | [] | no_license | ken2190/deep-learning-study | f2abeb1cd302e405a15bbb52188ae44ffb414e2f | f2998be89d0c931176f158ae5f48ca562786e171 | refs/heads/main | 2023-04-02T05:07:08.504212 | 2021-04-11T15:11:22 | 2021-04-11T15:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 788 | py | from tensorflow.keras.layers import Input, SimpleRNN, GRU, Dropout, LSTM, Dense, Flatten, Softmax
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import SGD, Adam
from sklearn.preprocessing import LabelBinarizer, StandardScaler
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
df = pd.read_csv('https://raw.githubusercontent.com/lazyprogrammer/machine_learning_examples/master/tf2.0/sbux.csv')
series = df["close"].values.reshape(-1, 1)
scalar = StandardScaler()
scalar.fit(series[:len(series) // 2])
series = scalar.transform(series).flatten()
df["prevClose"] = df["close"].shift(1)
df["Return"] = (df["close"] - df["prevClose"])/df["prevClose"]
df["Return"].hist()
u = np.array([1, 2])
v = np.array([3, 4])
| [
"[email protected]"
] | |
14e14add80032e04c6e82d148372cd9e1ea89a4a | dbe7731552d8e6d1e63cc0f2e27d3810cc61f350 | /hyper_paras/hp_a2c.py | 1265b7c895a914b699bf58d2d2719a54eb9e5c15 | [] | no_license | ZhangRui111/rl_breakout_tf | 6bb3f57f2b1d52f196323916393234e8abb990ac | 04f259cd3c32eaffbad87fe1035b0f87c96127b0 | refs/heads/master | 2020-04-08T19:24:16.018734 | 2018-12-18T02:42:56 | 2018-12-18T02:42:56 | 159,653,713 | 1 | 1 | null | 2018-12-18T02:42:57 | 2018-11-29T11:12:04 | Python | UTF-8 | Python | false | false | 356 | py | from hyper_paras.base_hyper_paras import BaseHyperparameters
class Hyperparameters(BaseHyperparameters):
def __init__(self):
super().__init__()
self.model = 'A2C'
self.MAX_EPISODES = 50001 # 50001 : 500
self.LEARNING_RATE_ACTOR = 0.00005
self.LEARNING_RATE_CRITIC = 0.0001
self.DISCOUNT_FACTOR = 0.9
| [
"[email protected]"
] | |
7e11fd6bffade16b50990049c688e90b29754bf0 | 282769509af68245596dc73de42f552cfd73cd21 | /autoindex/watcher.py | d560ceaf60985c133ac610de4bc2a6e3972819c7 | [] | no_license | brutasse-archive/autoindex | 1130173d22c1d996a7cb38fcd59b51d07c0b8068 | cc5cfc414325aff133c684257e8c2bfdc9aaa672 | refs/heads/master | 2021-01-19T14:34:18.472167 | 2012-07-17T21:31:27 | 2012-07-17T21:31:27 | 5,048,409 | 15 | 5 | null | null | null | null | UTF-8 | Python | false | false | 1,877 | py | import logging
import os
import signal
from pip.download import is_archive_file
from pyinotify import WatchManager, Notifier, ProcessEvent, EventsCodes
from .indexer import index
logger = logging.getLogger(__name__)
class IndexProcess(ProcessEvent):
def __init__(self, wm, mask):
self.wm = wm
self.mask = mask
self.queue = set()
def update_watch(self, directory):
self.wm.add_watch(directory, mask=self.mask)
def process_IN_CREATE(self, event):
logger.debug("Created {0}".format(event.pathname))
if os.path.isdir(event.pathname):
self.update_watch(event.pathname)
else:
self.index_alarm(event)
def process_IN_MODIFY(self, event):
logger.debug("Modified {0}".format(event.pathname))
self.index_alarm(event)
def process_IN_DELETE(self, event):
logger.debug("Deleted {0}".format(event.pathname))
self.index_alarm(event)
def index_alarm(self, event):
if is_archive_file(event.pathname):
logger.debug("Queuing indexing")
self.queue.add(os.path.dirname(event.pathname))
signal.setitimer(signal.ITIMER_REAL, 5)
def watch(directory):
logger.info("Watching {0}".format(directory))
flags = EventsCodes.ALL_FLAGS
mask = flags['IN_CREATE'] | flags['IN_MODIFY'] | flags['IN_DELETE']
wm = WatchManager()
wm.add_watch(directory, mask, rec=True)
process = IndexProcess(wm, mask)
notifier = Notifier(wm, process)
def update_index(*args):
while process.queue:
# This is slightly sub-optimal, would be better to pop all
# elements at once but this operation needs to be atomic.
dist_dir = process.queue.pop()
index(directory, only=[dist_dir])
signal.signal(signal.SIGALRM, update_index)
notifier.loop()
| [
"[email protected]"
] | |
e1a6d1b6a7f2d662c54225f864327197af261dea | 2b6fa34dac030ec1f2918b1377956bf791219d22 | /leetcode/medium/unique-paths.py | ec4b4d43fdfd54d17af687e347baacf85881da50 | [
"MIT"
] | permissive | rainzhop/cumulus-tank | aa13fb8f14c27893838a67d2eb69fdd2ac3d6450 | 09ebc7858ea53630e30606945adfea856a80faa3 | refs/heads/master | 2020-06-06T23:24:37.498966 | 2020-01-06T09:52:16 | 2020-01-06T09:52:16 | 192,874,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 911 | py | # https://leetcode.com/problems/unique-paths/
#
# A robot is located at the top-left corner of a m x n grid (marked 'Start' in the diagram below).
#
# The robot can only move either down or right at any point in time.
# The robot is trying to reach the bottom-right corner of the grid (marked 'Finish' in the diagram below).
#
# How many possible unique paths are there?
#
# S * * * * * *
# * * * * * * *
# * * * * * * F
#
# Above is a 3 x 7 grid. How many possible unique paths are there?
#
# Note: m and n will be at most 100.
class Solution(object):
def uniquePaths(self, m, n):
"""
:type m: int
:type n: int
:rtype: int
"""
if self.path[m][n] != 0: return self.path[m][n]
if m == 1 or n == 1: return 1
return self.uniquePaths(m-1, n) + self.uniquePaths(m, n-1)
if __name__ == '__main__':
s = Solution()
print s.uniquePaths(3,3)
| [
"[email protected]"
] | |
7ec19266d0afa42c553c1b841b452ceb46a5277f | 8148be8c475ff2b5ae2a55ef1c05d4c2d5626e71 | /test_booking/settings.py | f9de63467db7e3150a7cb59ad754bc5dc9e9bda3 | [] | no_license | dimka1993kh/test_booking | 91836b51e4e005c4ad5f732f56f6f3e5593a63ec | ee5bbeb60317cac8627ce949c76640b16d4688a8 | refs/heads/master | 2023-03-21T11:02:03.611810 | 2021-03-07T12:37:05 | 2021-03-07T12:37:05 | 344,217,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,478 | py | """
Django settings for test_booking project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# import dj_database_url
# db_from_env = dj_database_url.config()
# DATABASES['dafault'].update(db_from_env)
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'jnqp(t&fi6&2=%0758f)+6+rnjc(4c#zyix7@r84_y%g+y0+-='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main',
'accounts.apps.AccountsConfig',
'choose_workplace',
'bootstrap_datepicker_plus',
'bootstrap4'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'test_booking.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'test_booking.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'ru'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
LOGIN_REDIRECT_URL = 'main'
LOGOUT_REDIRECT_URL = 'main'
BOOTSTRAP4 = {
'include_jquery' : True ,
} | [
"[email protected]"
] | |
7b505f33af2491c87a3f1c1bec2cb2ef5c788ad5 | ccbfc7818c0b75929a1dfae41dc061d5e0b78519 | /aliyun-openapi-python-sdk-master/aliyun-python-sdk-edas/aliyunsdkedas/request/v20170801/GetSecureTokenRequest.py | 9d569e099197463fe544d1b9af836d4aecbe0764 | [
"Apache-2.0"
] | permissive | P79N6A/dysms_python | 44b634ffb2856b81d5f79f65889bfd5232a9b546 | f44877b35817e103eed469a637813efffa1be3e4 | refs/heads/master | 2020-04-28T15:25:00.368913 | 2019-03-13T07:52:34 | 2019-03-13T07:52:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,211 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
class GetSecureTokenRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'Edas', '2017-08-01', 'GetSecureToken')
self.set_uri_pattern('/pop/v5/secure_token')
self.set_method('GET')
def get_NamespaceId(self):
return self.get_query_params().get('NamespaceId')
def set_NamespaceId(self,NamespaceId):
self.add_query_param('NamespaceId',NamespaceId) | [
"[email protected]"
] | |
0134e66eaf291387bdcb920be040ffff6e4875bd | 88ae8695987ada722184307301e221e1ba3cc2fa | /third_party/grpc/src/src/python/grpcio_reflection/grpc_version.py | 6dcf1062815aab517877c456458f2dc0678f019e | [
"Apache-2.0",
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later",
"BSD-3-Clause",
"MPL-2.0"
] | permissive | iridium-browser/iridium-browser | 71d9c5ff76e014e6900b825f67389ab0ccd01329 | 5ee297f53dc7f8e70183031cff62f37b0f19d25f | refs/heads/master | 2023-08-03T16:44:16.844552 | 2023-07-20T15:17:00 | 2023-07-23T16:09:30 | 220,016,632 | 341 | 40 | BSD-3-Clause | 2021-08-13T13:54:45 | 2019-11-06T14:32:31 | null | UTF-8 | Python | false | false | 705 | py | # Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_reflection/grpc_version.py.template`!!!
VERSION = '1.54.0.dev0'
| [
"[email protected]"
] | |
ac1ad02284692842c69e7ab3e57e4d92cd325310 | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_10_01/aio/operations/_load_balancer_backend_address_pools_operations.py | 1f45b31465fa0acc9e5bab5cd50ec9f8f0a5fe55 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 8,822 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerBackendAddressPoolsOperations:
"""LoadBalancerBackendAddressPoolsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs
) -> AsyncIterable["_models.LoadBalancerBackendAddressPoolListResult"]:
"""Gets all the load balancer backed address pools.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerBackendAddressPoolListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2017_10_01.models.LoadBalancerBackendAddressPoolListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancerBackendAddressPoolListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerBackendAddressPoolListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools'} # type: ignore
async def get(
self,
resource_group_name: str,
load_balancer_name: str,
backend_address_pool_name: str,
**kwargs
) -> "_models.BackendAddressPool":
"""Gets load balancer backend address pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param backend_address_pool_name: The name of the backend address pool.
:type backend_address_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BackendAddressPool, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_10_01.models.BackendAddressPool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackendAddressPool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'backendAddressPoolName': self._serialize.url("backend_address_pool_name", backend_address_pool_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BackendAddressPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools/{backendAddressPoolName}'} # type: ignore
| [
"[email protected]"
] | |
4729a3d9e08865cacd04820127685a2d0a867ff4 | aa3f670fcc2b43d8a5eb8a131082510bed2eb4d8 | /nagios/check_raster.py | 92fd2d22b4429549a4571011b62d3ee9c259b62b | [
"MIT"
] | permissive | jamayfieldjr/iem | e0d496311d82790ad518c600c2fcffe44e834da1 | 275b77a65f3b12e26e6cbdb230786b9c7d2b9c9a | refs/heads/master | 2020-08-07T11:55:56.256857 | 2019-10-04T04:22:36 | 2019-10-04T04:22:36 | 213,439,554 | 1 | 0 | MIT | 2019-10-07T17:01:20 | 2019-10-07T17:01:20 | null | UTF-8 | Python | false | false | 782 | py | """Check a raster file and count the number of non-zero values."""
from __future__ import print_function
import sys
from osgeo import gdal
import numpy
def main():
"""Go Main Go."""
ntp = gdal.Open('/home/ldm/data/gis/images/4326/USCOMP/ntp_0.png')
data = ntp.ReadAsArray()
count = numpy.sum(numpy.where(data > 0, 1, 0))
sz = data.shape[0] * data.shape[1]
if count > 1000:
print('OK - %s/%s|count=%s;100;500;1000' % (count, sz, count))
status = 0
elif count > 500:
print('WARNING - %s/%s|count=%s;100;500;1000' % (count, sz, count))
status = 1
else:
print('CRITICAL - %s/%s|count=%s;100;500;1000' % (count, sz, count))
status = 2
return status
if __name__ == '__main__':
sys.exit(main())
| [
"[email protected]"
] | |
53241e5667493e3b22a78779f524d5b575342228 | 2fb755e1d23267495345d1a94f4b79a1356657e7 | /black_box_tests/mapper_example.py | 45461118a2357b70b83703ecf1eaf2fdcd10696d | [
"MIT"
] | permissive | daringer/lollygag | 66bc86c7bea7943fd713cd5e463d911552b4d979 | 27da172cfa769ef7b850de517f778059068badca | refs/heads/master | 2021-05-16T03:24:15.691274 | 2017-10-11T12:45:45 | 2017-10-11T12:45:45 | 105,471,520 | 0 | 0 | null | 2017-10-01T20:19:20 | 2017-10-01T20:19:20 | null | UTF-8 | Python | false | false | 849 | py | #!/usr/bin/python
from lollygag import run
from lollygag.services import Services
from lollygag.dependency_injection.inject import Inject
from lollygag.core.crawlers.mapper_crawler import MapperCrawler
import json
def on_finish(log_service, crawler):
def callback(*args):
log_service.important("-------------Yeah boiiii, done-----------------")
result = crawler.make_map()
result = json.dumps(result, indent=4)
with open("result.json", "w+") as f:
f.write(result)
log_service.important("------------Done processing the tree-----------")
return callback
def main():
Services.crawler_factory = MapperCrawler
crawler.on_finish(on_finish(Services.log_service(), crawler))
run(subscribe={'on_finish': on_finish(Services.log_service())})
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
43dde8d0256d76c729723e64d08000466a23902b | d3055f3eedfdb124084f092c0f4540aa82a0f04d | /texture_tool/describe.py | 62e6307f6e97bb0cf9de3478bdc4598cdf08df36 | [] | no_license | podgorskiy/texture-tool | a90ec9adee2c8d19b21cdf42b714d8d4917c9612 | f8973871ee2ce72b4d4756796276b07be06e42dd | refs/heads/master | 2022-04-17T13:36:05.448525 | 2020-04-08T18:03:36 | 2020-04-08T18:03:36 | 253,153,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,430 | py | # Copyright 2020 Stanislav Pidhorskyi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import texture_tool
def describe(self):
assert isinstance(self, texture_tool.PVRTexture)
s = '<' + '\n'
members = [attr for attr in dir(self) if not callable(getattr(self, attr)) and not attr.startswith("__")]
for attr in members:
s += '\t' + attr + ': ' + str(getattr(self, attr)) + '\n'
s += '\t' + str('Flipped X: ' + str(self.get_orientation(texture_tool.Axis.x))) + '\n'
s += '\t' + str('Flipped Y: ' + str(self.get_orientation(texture_tool.Axis.y))) + '\n'
s += '\t' + str('Width: ' + str(self.get_width())) + '\n'
s += '\t' + str('Height: ' + str(self.get_height())) + '\n'
s += '\t' + str('Depth: ' + str(self.get_depth())) + '\n'
s += '\t' + str('dtype: ' + str(self.dtype)) + '\n'
s += '>'
return s
| [
"[email protected]"
] | |
b29df2eab12bee0ea732b5953df4904701e18f95 | c34380b64145b4ce26df9b27c34139d08de27515 | /highest_scoring_word.py | d6718e1ecce87a61b07dea1aab9b93f1d03c0fe1 | [] | no_license | codeandrew/python-algorithms | 531bc1574700cb7d822904f1e1ead9a596a85d29 | c71b0941f14825fcaa3fbb1429365ca1f28a3018 | refs/heads/master | 2023-04-28T23:56:01.283434 | 2023-04-05T03:06:22 | 2023-04-05T03:06:22 | 169,078,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 965 | py | """
Given a string of words, you need to find the highest scoring word.
Each letter of a word scores points according to its position in the alphabet: a = 1, b = 2, c = 3 etc.
You need to return the highest scoring word as a string.
If two words score the same, return the word that appears earliest in the original string.
All letters will be lowercase and all inputs will be valid.
"""
def high(x):
l = x.strip(" ").split()
s = []
for i in l:
ss =[]
s.append(ss)
for ii in i:
ss.append(ord(ii)-96)
sumList = [sum(i) for i in s]
return l[sumList.index(max(sumList))]
"""
Other Options
"""
def high(x):
words=x.split(' ')
list = []
for i in words:
scores = [sum([ord(char) - 96 for char in i])]
list.append(scores)
return words[list.index(max(list))]
def high(words):
return max(words.split(), key=lambda word: sum(ord(c) - ord('a') + 1 for c in word.lower()))
| [
"[email protected]"
] | |
75dc35285e4cc28b0a0071cdf2c074aa2ea6f960 | 37fd103f6b0de68512e3cb6098d0abb9220f5a7d | /Python from scratch/014spectogram_waveform.py | fb3fad05fc153b831ee682fc1949eb029c556f40 | [] | no_license | FlyingMedusa/PythonELTIT | 720d48089738b7e629cad888f0032df3a4ccea2c | 36ab01fc9d42337e3c76c59c383d7b1a6142f9b9 | refs/heads/master | 2020-09-11T18:17:17.825390 | 2020-04-21T16:38:03 | 2020-04-21T16:38:03 | 222,150,066 | 0 | 0 | null | 2020-04-21T16:38:04 | 2019-11-16T19:37:33 | Python | UTF-8 | Python | false | false | 562 | py | from scipy.io import wavfile
import matplotlib.pyplot as pyplot
sampling_frequency, signal_data = wavfile.read('sample_for_task_013.wav')
# duration = len(signal_data)/ sampling_frequency
pyplot.subplot(311) # three rows, one col,1st plot
pyplot.specgram(signal_data, Fs = sampling_frequency)
pyplot.title('Some spectogram')
pyplot.xlabel('duration (s)')
pyplot.ylabel('Frequency (Hz)')
pyplot.subplot(313) # three rows, one col,3rd plot
pyplot.plot(signal_data)
pyplot.title('Some waveform')
pyplot.xlabel('duration')
pyplot.ylabel('intensity')
pyplot.show()
| [
"[email protected]"
] | |
1088e21e565a1e3657d113b966546a1b0eb98ac8 | 5679731cee36c537615d285ed72810f4c6b17380 | /167_TwoSumII_InputArrayIsSorted.py | 4ea08c7abe24681955be0a656cf106fb19e4146e | [] | no_license | manofmountain/LeetCode | 6b76105190a9b62df65a7b56b6def4120498b9fa | 718f688b3d316e8c10ef680d9c21ecd518d062f8 | refs/heads/master | 2021-01-12T03:41:48.318116 | 2017-07-18T12:35:58 | 2017-07-18T12:35:58 | 78,252,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 565 | py |
##43.90%
class Solution(object):
def twoSum(self, numbers, target):
"""
:type numbers: List[int]
:type target: int
:rtype: List[int]
"""
if len(numbers) < 2:
return []
left, right = 0, len(numbers) - 1
while left < right:
sum = numbers[left] + numbers[right]
if sum < target:
left += 1
elif sum > target:
right -= 1
else:
return [left + 1, right + 1]
return [] | [
"[email protected]"
] | |
ce63be621dd2fa160d3e9198752579ac7e8f9b18 | 364b36d699d0a6b5ddeb43ecc6f1123fde4eb051 | /_downloads_1ed/fig_fft_text_example.py | 78f8d57d71630eb3e61ff1ec81dc25ae5256806e | [] | no_license | astroML/astroml.github.com | eae3bfd93ee2f8bc8b5129e98dadf815310ee0ca | 70f96d04dfabcd5528978b69c217d3a9a8bc370b | refs/heads/master | 2022-02-27T15:31:29.560052 | 2022-02-08T21:00:35 | 2022-02-08T21:00:35 | 5,871,703 | 2 | 5 | null | 2022-02-08T21:00:36 | 2012-09-19T12:55:23 | HTML | UTF-8 | Python | false | false | 2,376 | py | """
Example of a Fourier Transform
------------------------------
Figure E.1
An example of approximating the continuous Fourier transform of a function
using the fast Fourier transform.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from scipy import fftpack
from astroML.fourier import FT_continuous, sinegauss, sinegauss_FT
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Choose parameters for the wavelet
N = 10000
t0 = 5
f0 = 2
Q = 2
#------------------------------------------------------------
# Compute the wavelet on a grid of times
Dt = 0.01
t = t0 + Dt * (np.arange(N) - N / 2)
h = sinegauss(t, t0, f0, Q)
#------------------------------------------------------------
# Approximate the continuous Fourier Transform
f, H = FT_continuous(t, h)
rms_err = np.sqrt(np.mean(abs(H - sinegauss_FT(f, t0, f0, Q)) ** 2))
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 3.75))
fig.subplots_adjust(hspace=0.25)
# plot the wavelet
ax = fig.add_subplot(211)
ax.plot(t, h.real, '-', c='black', label='$Re[h]$', lw=1)
ax.plot(t, h.imag, ':', c='black', label='$Im[h]$', lw=1)
ax.legend()
ax.set_xlim(2, 8)
ax.set_ylim(-1.2, 1.2)
ax.set_xlabel('$t$')
ax.set_ylabel('$h(t)$')
# plot the Fourier transform
ax = fig.add_subplot(212)
ax.plot(f, H.real, '-', c='black', label='$Re[H]$', lw=1)
ax.plot(f, H.imag, ':', c='black', label='$Im[H]$', lw=1)
ax.text(0.55, 1.5, "RMS Error = %.2g" % rms_err)
ax.legend()
ax.set_xlim(0.5, 3.5)
ax.set_ylim(-1.9, 1.9)
ax.set_xlabel('$f$')
ax.set_ylabel('$H(f)$')
plt.show()
| [
"[email protected]"
] | |
cfbb540e6dfba1237f2ee80097afe65bc324da40 | 177df2b442866474377498a8b85f3d58410d0193 | /create_glidein_tarball.py | 45b070a821e819c0b6f139301c0d4fe04e8cab66 | [] | no_license | briedel/pyglidein | 6c19f2d310bd15a85df50eb384e8d2f186aaff50 | 835c458e4f7f0dc0dcf785120da31ffa9425f0bd | refs/heads/master | 2020-12-11T03:35:27.540075 | 2017-03-24T14:28:47 | 2017-03-24T14:28:47 | 49,531,789 | 0 | 0 | null | 2016-03-18T17:26:32 | 2016-01-12T22:02:49 | Python | UTF-8 | Python | false | false | 7,707 | py | """
Create a glidein tarball by downloading the source, building it, then
copying what is needed into the tarball.
"""
import sys
import os
import shutil
import subprocess
import tarfile
import tempfile
if sys.version_info[0] < 3 and sys.version_info[1] < 7:
raise Exception('requires python 2.7+')
def libuuid_download(version='1.0.3'):
url = 'http://downloads.sourceforge.net/project/libuuid/libuuid-'+version+'.tar.gz'
subprocess.check_call(['wget', url])
subprocess.check_call(['tar', '-zxf', 'libuuid-'+version+'.tar.gz'])
return 'libuuid-'+version
def libuuid_build():
"""Build uuid statically"""
dirname = libuuid_download()
initial_dir = os.getcwd()
os.chdir(dirname)
try:
if os.path.exists('release_dir'):
shutil.rmtree('release_dir')
os.mkdir('release_dir')
options = ['--enable-static',
'--disable-shared',
'--prefix',os.path.join(os.getcwd(),'release_dir'),
]
subprocess.check_call(['./configure']+options)
subprocess.check_call(['make'])
subprocess.check_call(['make','install'])
return os.path.join(initial_dir,dirname,'release_dir')
finally:
os.chdir(initial_dir)
def cvmfs_download():
url = 'https://github.com/cvmfs/cvmfs/archive/libcvmfs-stable.tar.gz'
subprocess.check_call(['wget', url])
subprocess.check_call(['tar', '-zxf', 'libcvmfs-stable.tar.gz'])
return 'cvmfs-libcvmfs-stable'
def cvmfs_build():
libuuid = libuuid_build()
dirname = cvmfs_download()
initial_dir = os.getcwd()
os.chdir(dirname)
try:
if os.path.exists('release_dir'):
shutil.rmtree('release_dir')
os.mkdir('release_dir')
options = ['-Wno-dev',
'-DINSTALL_MOUNT_SCRIPTS=OFF',
'-DBUILD_SERVER=OFF',
'-DBUILD_CVMFS=OFF',
'-DBUILD_LIBCVMFS=ON',
'-DINSTALL_BASH_COMPLETION=OFF',
'-DUUID_LIBRARY:FILE='+os.path.join(libuuid,'lib','libuuid.a'),
'-DUUID_INCLUDE_DIR:PATH='+os.path.join(libuuid,'include'),
'-DCMAKE_INSTALL_PREFIX='+os.path.join(os.getcwd(),'release_dir'),
]
subprocess.check_call(['cmake']+options)
subprocess.check_call(['make','libpacparser'])
os.chdir('cvmfs')
subprocess.check_call(['make'])
subprocess.check_call(['make','install'])
return os.path.join(initial_dir,dirname,'release_dir')
finally:
os.chdir(initial_dir)
def parrot_download(version):
url = 'http://ccl.cse.nd.edu/software/files/cctools-'+version+'-source.tar.gz'
subprocess.check_call(['wget', url])
subprocess.check_call(['tar', '-zxf', 'cctools-'+version+'-source.tar.gz'])
return 'cctools-'+version+'-source'
def parrot_build(version='6.0.14'):
cvmfs = cvmfs_build()
dirname = parrot_download(version)
initial_dir = os.getcwd()
os.chdir(dirname)
try:
if os.path.exists('release_dir'):
shutil.rmtree('release_dir')
os.mkdir('release_dir')
options = ['--without-system-sand',
'--without-system-allpairs',
'--without-system-wavefront',
'--without-system-makeflow',
# '--without-system-ftp-lite',
# '--without-system-chirp',
'--without-system-umbrella',
'--without-system-resource_monitor',
'--without-system-doc',
'--with-cvmfs-path',cvmfs,
'--prefix',os.path.join(os.getcwd(),'release_dir'),
]
subprocess.check_call(['./configure']+options)
subprocess.check_call(['make'])
subprocess.check_call(['make','install'])
return os.path.join(initial_dir,dirname,'release_dir')
finally:
os.chdir(initial_dir)
def condor_download(version):
version = version.replace('.','_')
url = 'https://github.com/htcondor/htcondor/archive/V'+version+'.tar.gz'
subprocess.check_call(['wget', url])
subprocess.check_call(['tar', '-zxf', 'V'+version+'.tar.gz'])
return 'htcondor-'+version
def condor_build(version='8.6.1'):
dirname = condor_download(version)
initial_dir = os.getcwd()
os.chdir(dirname)
try:
if os.path.exists('release_dir'):
shutil.rmtree('release_dir')
os.mkdir('release_dir')
options = [
'-DHAVE_BACKFILL=OFF',
'-DHAVE_BOINC=OFF',
'-DHAVE_HIBERNATION=OFF',
'-DHAVE_KBDD=OFF',
'-DWANT_GLEXEC=OFF',
'-DWANT_FULL_DEPLOYMENT=OFF',
'-DWITH_BOINC=OFF',
'-DWITH_BOSCO=OFF',
'-DWITH_CAMPUSFACTORY=OFF',
'-DWITH_BLAHP=OFF',
'-DWITH_CURL=OFF',
'-DWITH_COREDUMPER=OFF',
'-DWITH_CREAM=OFF',
'-DWITH_GANGLIA=OFF',
'-DWITH_GLOBUS=OFF',
'-DWITH_GSOAP=OFF',
'-DWITH_LIBDELTACLOUD=OFF',
'-DWITH_LIBVIRT=OFF',
'-DWITH_PYTHON_BINDINGS=OFF',
'-DWITH_UNICOREGAHP=OFF',
'-DWITH_VOMS=OFF',
]
if version > '8.5.2':
options.append('-DWITH_KRB5=OFF')
subprocess.check_call(['cmake','-DCMAKE_INSTALL_PREFIX:PATH='+os.getcwd()+'/release_dir']
+options+['.'])
subprocess.check_call(['make'])
subprocess.check_call(['make','install'])
return os.path.join(initial_dir,dirname,'release_dir')
finally:
os.chdir(initial_dir)
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--template-dir',dest='template',default='glidein_template',
help='Location of template directory')
parser.add_option('--htcondor-version',dest='condor',default=None,
help='HTCondor version to use')
parser.add_option('--parrot-version',dest='parrot',default=None,
help='Parrot (cctools) version to use')
parser.add_option('-o','--output',dest='output',default='glidein.tar.gz',
help='output tarball name')
(options, args) = parser.parse_args()
if not options.template:
raise Exception('need a template directory')
options.template = os.path.abspath(options.template)
curdir = os.getcwd()
d = tempfile.mkdtemp(dir=os.getcwd())
tarfile_name = os.path.abspath(os.path.expandvars(os.path.expanduser(options.output)))
try:
os.chdir(d)
parrot_opts = {}
if options.parrot:
parrot_opts['version'] = options.parrot
parrot_path = parrot_build(**parrot_opts)
condor_opts = {}
if options.condor:
condor_opts['version'] = options.condor
condor_path = condor_build(**condor_opts)
with tarfile.open(tarfile_name,'w:gz') as tar:
for f in os.listdir(options.template):
tar.add(os.path.join(options.template,f),arcname=f)
tar.add('.',arcname='glideinExec',recursive=False)
for f in os.listdir(condor_path):
tar.add(os.path.join(condor_path,f),arcname=os.path.join('glideinExec',f))
tar.add(os.path.join(parrot_path,'bin','parrot_run'),arcname=os.path.join('GLIDEIN_PARROT','parrot_run'))
tar.add(os.path.join(parrot_path,'lib','libparrot_helper.so'),arcname=os.path.join('GLIDEIN_PARROT','libparrot_helper.so'))
finally:
os.chdir(curdir)
shutil.rmtree(d)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
f1fdec782a19b71a749c643458ec9d0408978d66 | 053221e1d90b365f68701dbd5b6466f30d1f6fd7 | /Day4/vd2.py | d2624b1ae91bd834e7c6b6d1c9a499d95af8c68b | [] | no_license | pytutorial/py2011E | eceb4d563cc807294b08b818edadd521ed8da488 | 306437369b0bfe55a2fa827b098283856242e731 | refs/heads/main | 2023-02-28T23:57:32.851536 | 2021-01-30T14:56:12 | 2021-01-30T14:56:12 | 318,186,117 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | # vd2.py
# Nhập vào họ tên đầy đủ của một người
# In ra Họ, tên đệm, tên của người đó
ho_ten = input('Họ và tên:')
#TODO :
items = ho_ten.split()
ho = items[0]
ten = items[-1]
ten_dem = ''
for i in range(1, len(items)-1):
ten_dem += items[i] + ' '
print('Họ: ', ho)
print('Tên đệm:', ten_dem)
print('Tên: ', ten)
| [
"[email protected]"
] | |
6859b7420def17cbc91c49bd229e6028b100e87d | bf3a87fd7725ad4e7e85492509f3e5aa68709fd0 | /chat/.history/Cliente_20191106204840.py | 8b51d56c2ef6c7a8b2f56ce7b17b3a47b7f38cdd | [] | no_license | slalbertojesus/merixo-grpc | f468b4f6349b4367ad6064f175cef7c3e49d829f | 182569a89cad605fd81b095861fd58390729c720 | refs/heads/master | 2020-09-04T21:39:53.488701 | 2019-12-25T02:07:24 | 2019-12-25T02:07:24 | 219,899,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 825 | py | import grpc
import uuid
import chat_pb2 as structure
import chat_pb2_grpc as grpc_chat
from Usuario import Usuario
class Cliente():
def IniciarCliente(self):
id = uuid.uuid1()
print(id)
channel = grpc.insecure_channel('localhost:50051')
conn = grpc_chat.ChatAdminStub(channel)
structure.Usuario.id = id.hex
structure.Usuario.usuario = "Choco"
structure.Usuario.activo = True
request = structure.Usuario
#structure._USUARIO.id = id.hex
#structure._USUARIO.usuario = "Choco"
#structure._USUARIO.activo = True
#request = structure._USUARIO
confirmacion = conn.Subscribirse(request)
print(confirmacion)
if __name__ == '__main__':
cliente = Cliente()
cliente.IniciarCliente() | [
"[email protected]"
] | |
e9689e0654946bbe442befd797dac771d63f7c28 | 4e30d990963870478ed248567e432795f519e1cc | /tests/models/validators/v3_1_1/jsd_a4d5b5da6a50bfaaecc180543fd952.py | 24d54b381f25a25dd8d542827e9372430e636dad | [
"MIT"
] | permissive | CiscoISE/ciscoisesdk | 84074a57bf1042a735e3fc6eb7876555150d2b51 | f468c54998ec1ad85435ea28988922f0573bfee8 | refs/heads/main | 2023-09-04T23:56:32.232035 | 2023-08-25T17:31:49 | 2023-08-25T17:31:49 | 365,359,531 | 48 | 9 | MIT | 2023-08-25T17:31:51 | 2021-05-07T21:43:52 | Python | UTF-8 | Python | false | false | 7,787 | py | # -*- coding: utf-8 -*-
"""Identity Services Engine createDeviceAdminTimeCondition data model.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import json
from builtins import *
import fastjsonschema
from ciscoisesdk.exceptions import MalformedRequest
class JSONSchemaValidatorA4D5B5Da6A50BfAaecC180543Fd952(object):
"""createDeviceAdminTimeCondition request schema definition."""
def __init__(self):
super(JSONSchemaValidatorA4D5B5Da6A50BfAaecC180543Fd952, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"$schema": "http://json-schema.org/draft-04/schema#",
"properties": {
"response": {
"allOf": [
{
"properties": {
"conditionType": {
"enum": [
"ConditionAndBlock",
"ConditionAttributes",
"ConditionOrBlock",
"ConditionReference",
"LibraryConditionAndBlock",
"LibraryConditionAttributes",
"LibraryConditionOrBlock",
"TimeAndDateCondition"
],
"type": "string"
},
"isNegate": {
"type": "boolean"
},
"link": {
"properties": {
"href": {
"type": "string"
},
"rel": {
"enum": [
"next",
"previous",
"self",
"status"
],
"type": "string"
},
"type": {
"type": "string"
}
},
"required": [
"href"
],
"type": "object"
}
},
"required": [
"conditionType"
],
"type": "object"
},
{
"properties": {
"conditionType": {
"enum": [
"ConditionAndBlock",
"ConditionAttributes",
"ConditionOrBlock",
"ConditionReference",
"LibraryConditionAndBlock",
"LibraryConditionAttributes",
"LibraryConditionOrBlock",
"TimeAndDateCondition"
],
"type": "string"
},
"datesRange": {
"properties": {
"endDate": {
"type": "string"
},
"startDate": {
"type": "string"
}
},
"required": [
"endDate",
"startDate"
],
"type": "object"
},
"datesRangeException": {
"properties": {
"endDate": {
"type": "string"
},
"startDate": {
"type": "string"
}
},
"required": [
"endDate",
"startDate"
],
"type": "object"
},
"description":
{
"type": "string"
},
"hoursRange": {
"properties": {
"endTime": {
"type": "string"
},
"startTime": {
"type": "string"
}
},
"required": [
"endTime",
"startTime"
],
"type": "object"
},
"hoursRangeException": {
"properties": {
"endTime": {
"type": "string"
},
"startTime": {
"type": "string"
}
},
"required": [
"endTime",
"startTime"
],
"type": "object"
},
"id": {
"type": "string"
},
"isNegate": {
"type": "boolean"
},
"link": {
"properties": {
"href": {
"type": "string"
},
"rel": {
"enum": [
"next",
"previous",
"self",
"status"
],
"type": "string"
},
"type": {
"type": "string"
}
},
"required": [
"href"
],
"type": "object"
},
"name": {
"type": "string"
},
"weekDays": {
"items": {
"enum": [
"Friday",
"Monday",
"Saturday",
"Sunday",
"Thursday",
"Tuesday",
"Wednesday"
],
"type": "string"
},
"type": "array"
},
"weekDaysException": {
"items": {
"enum": [
"Friday",
"Monday",
"Saturday",
"Sunday",
"Thursday",
"Tuesday",
"Wednesday"
],
"type": "string"
},
"type": "array"
}
},
"required": [
"conditionType",
"name"
],
"type": "object"
}
]
},
"version": {
"type": "string"
}
},
"required": [
"response",
"version"
],
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| [
"[email protected]"
] | |
405a1959f9d4f85a7a2f446f5fc40e3adc4d2834 | f89cd667200844f019dbf2c93798e7fee96b89e2 | /dynamic-programming/exercises/ugly-numbers.py | ab24762e2184774dfc0008339825acefc4170efc | [] | no_license | radomirbrkovic/algorithms | 575f4540c7aab2daf3e55d0df99030e440ee2060 | 621d0f82e0e4cd253afc0e07772a201b019f7889 | refs/heads/master | 2023-07-15T23:59:29.725946 | 2021-09-01T19:47:08 | 2021-09-01T19:47:08 | 250,455,390 | 0 | 0 | null | 2021-09-01T19:47:09 | 2020-03-27T06:12:52 | Python | UTF-8 | Python | false | false | 596 | py | # Ugly Numbers https://www.geeksforgeeks.org/ugly-numbers/
def maxDivide(a, b):
while a % b == 0:
a = a / b
return a
def isUgly(no):
no = maxDivide(no, 2)
no = maxDivide(no, 3)
no = maxDivide(no, 5)
return 1 if no == 1 else 0
# Function to get the nth ugly number
def getNthUglyNo(n):
i = 1
# ugly number count
count = 1
# Check for all integers untill
# ugly count becomes n
while n > count:
i += 1
if isUgly(i):
count += 1
return i
print("150th ugly number is ", getNthUglyNo(150)) | [
"[email protected]"
] | |
b97fb55d1d42347a1c75e55752aa6e6c1587cce1 | 342cd75882fbe61c97c8e6abe68baabac058f89b | /xalpha/misc.py | d134fe8d7993fd847215b4f942a940b9c6b4c474 | [
"MIT"
] | permissive | refraction-ray/xalpha | c8b787dd88810fa32e5e2e223854fd7dbe4e3060 | ad5c9d91942bbcba5f4e27af2b26abdb83056b5d | refs/heads/master | 2023-08-05T10:38:14.014019 | 2023-07-24T11:30:06 | 2023-07-24T11:30:06 | 143,284,193 | 1,851 | 384 | MIT | 2022-02-20T14:03:09 | 2018-08-02T11:12:10 | Python | UTF-8 | Python | false | false | 9,598 | py | # -*- coding: utf-8 -*-
"""
modules for misc crawler without unfied API
"""
import re
import pandas as pd
import datetime as dt
import logging
import numpy as np
from bs4 import BeautifulSoup
from functools import lru_cache
logger = logging.getLogger(__name__)
from xalpha.cons import (
rget,
rpost,
rget_json,
rpost_json,
today_obj,
region_trans,
holidays,
_float,
)
from xalpha.universal import lru_cache_time
from xalpha.exceptions import ParserFailure
# 该模块只是保存其他一些爬虫的函数,其接口很不稳定,不提供文档和测试,且随时增删,慎用!
@lru_cache_time(ttl=600, maxsize=64)
def get_ri_status(suburl=None):
"""
broken due to the website redesign
"""
if not suburl:
suburl = "m=cb&a=cb_all" # 可转债
# url = "http://www.richvest.com/index.php?"
url = "http://www.ninwin.cn/index.php?"
url += suburl
r = rget(url, headers={"user-agent": "Mozilla/5.0"})
b = BeautifulSoup(r.text, "lxml")
cl = []
for c in b.findAll("th"):
cl.append(c.text)
nocl = len(cl)
rl = []
for i, c in enumerate(b.findAll("td")):
if i % nocl == 0:
r = []
r.append(c.text)
if i % nocl == nocl - 1:
rl.append(r)
return pd.DataFrame(rl, columns=cl)
@lru_cache_time(ttl=120)
def get_jsl_cb_status():
url = "https://www.jisilu.cn/data/cbnew/cb_list/?___jsl=LST___t=%s" % (
int(dt.datetime.now().timestamp() * 100)
)
r = rpost_json(url)
return [item["cell"] for item in r["rows"]]
@lru_cache_time(ttl=7200, maxsize=512)
def get_sh_status(category="cb", date=None):
url = "http://query.sse.com.cn/commonQuery.do?jsonCallBack=&"
if category in ["cb", "kzz"]:
url += "isPagination=false&sqlId=COMMON_BOND_KZZFLZ_ALL&KZZ=1"
elif category in ["fund", "fs"]:
if not date:
date = today_obj().strftime("%Y%m%d")
date = date.replace("/", "").replace("-", "")
url += "&sqlId=COMMON_SSE_FUND_LOF_SCALE_CX_S&pageHelp.pageSize=10000&FILEDATE={date}".format(
date=date
)
else:
raise ParserFailure("unrecoginzed category %s" % category)
r = rget_json(
url,
headers={
"user-agent": "Mozilla/5.0",
"Host": "query.sse.com.cn",
"Referer": "http://www.sse.com.cn/market/bonddata/data/convertible/",
},
)
return pd.DataFrame(r["result"])
@lru_cache_time(ttl=7200, maxsize=512)
def get_sz_status(category="cb", date=None):
if not date:
date = today_obj().strftime("%Y%m%d")
date = date.replace("/", "").replace("-", "")
date = date[:4] + "-" + date[4:6] + "-" + date[6:]
url = "http://www.szse.cn/api/report/ShowReport/data?"
if category in ["cb", "kzz"]:
pageno = 1
data = []
while True:
suburl = "SHOWTYPE=JSON&CATALOGID=1277&TABKEY=tab1&PAGENO={pageno}&txtDate={date}".format(
date=date, pageno=pageno
)
r = rget_json(url + suburl)
if r[0]["data"]:
data.extend(r[0]["data"])
pageno += 1
else:
break
# df = pd.DataFrame(r[0]["data"])
df = pd.DataFrame(data)
if len(df) == 0:
return
pcode = re.compile(r".*&DM=([\d]*)&.*")
pname = re.compile(r"^([^&]*)&.*")
df["证券代码"] = df["kzjcurl"].apply(lambda s: re.match(pcode, s).groups()[0])
df["证券简称"] = df["kzjcurl"].apply(lambda s: re.match(pname, s).groups()[0])
df["上市日期"] = pd.to_datetime(df["ssrq"])
df["发行量"] = df["fxlnew"]
df["换股价格"] = df["kzjg"]
df["未转股数量"] = df["kzsl"]
df["未转股比例"] = df["kzbl"]
df["转股截止日期"] = pd.to_datetime(df["kzzzrq"])
df = df[["证券代码", "证券简称", "上市日期", "发行量", "换股价格", "未转股数量", "未转股比例", "转股截止日期"]]
return df
@lru_cache_time(ttl=7200, maxsize=512)
def get_sz_fs(code):
url = "http://www.szse.cn/api/report/ShowReport/data?SHOWTYPE=JSON&\
CATALOGID=1945_LOF&txtQueryKeyAndJC={code}".format(
code=code
)
r = rget_json(url)
return _float(r[0]["data"][0]["dqgm"]) * 1e4
def get_tdx_holidays(holidays=None, format="%Y-%m-%d"):
r = rget("https://www.tdx.com.cn/url/holiday/")
r.encoding = "gbk"
b = BeautifulSoup(r.text, "lxml")
l = b.find("textarea").string.split("\n")
if not holidays:
holidays = {}
for item in l:
if item.strip():
c = item.split("|")
if c[2] in region_trans:
rg = region_trans[c[2]]
tobj = dt.datetime.strptime(c[0], "%Y%m%d")
tstr = tobj.strftime(format)
if rg not in holidays:
holidays[rg] = [tstr]
elif tstr not in holidays[rg]:
holidays[rg].append(tstr)
return holidays
def get_163_fundamentals(code, category="lrb"):
# category xjllb zcfzb
url = "http://quotes.money.163.com/service/{category}_{code}.html".format(
category=category, code=code
)
logger.debug("Fetching from %s . in `get_163_fundamentals`" % url)
df = pd.read_csv(url, encoding="gbk")
df = df.set_index("报告日期")
return df.T
@lru_cache()
def get_ttjj_suggestions(keyword):
url = "http://fundsuggest.eastmoney.com/FundSearch/api/FundSearchAPI.ashx?callback=&m=1&key={key}".format(
key=keyword
)
r = rget_json(url)
return r["Datas"]
def get_cb_historical_from_ttjj(code):
if code.startswith("SH") or code.startswith("SZ"):
code = code[2:]
params = {
"type": "RPTA_WEB_KZZ_LS",
"sty": "ALL",
"source": "WEB",
"p": "1",
"ps": "8000",
"st": "date",
"sr": "1",
"filter": "(zcode={code})".format(code=code),
}
url = "http://datacenter.eastmoney.com/api/data/get"
data = []
r = rget_json(url, params=params)
data.extend(r["result"]["data"])
if int(r["result"]["pages"]) > 1:
for i in range(2, int(r["result"]["pages"]) + 1):
params["p"] = str(i)
r = rget_json(url, params=params)
data.extend(r["result"]["data"])
df = pd.DataFrame(data)
df["date"] = pd.to_datetime(df["DATE"])
df["bond_value"] = df["PUREBONDVALUE"]
df["swap_value"] = df["SWAPVALUE"]
df["close"] = df["FCLOSE"]
return df[["date", "close", "bond_value", "swap_value"]]
@lru_cache()
def get_fund_list(ft):
# hh, zq, zs, gp, qdii, fof
r = rget(
"http://fund.eastmoney.com/data/FundGuideapi.aspx?\
dt=0&ft={ft}&sd=&ed=&sc=z&st=desc&pi=1&pn=10000&zf=diy&sh=list".format(
ft=ft
),
headers={
"Host": "fund.eastmoney.com",
"Referer": "http://fund.eastmoney.com/daogou/",
},
)
d = eval(r.text.split("=")[1].replace("null", "None"))
return [code.split(",")[0] for code in d["datas"] if code.strip()]
def update_caldate(path, year, path_out=None):
"""
Update caldate.csv based on ``cons.holidays["CN"]``
"""
r = {"cal_date": [], "is_open": []}
for d in pd.date_range(str(year) + "-01-01", str(year) + "-12-31"):
r["cal_date"].append(d.strftime("%Y-%m-%d"))
if d.weekday() in [5, 6]:
r["is_open"].append(0)
elif d.strftime("%Y-%m-%d") in holidays["CN"]:
r["is_open"].append(0)
else:
r["is_open"].append(1)
ncal = pd.DataFrame(r)
cal = pd.read_csv(path)
if int(year) <= int(cal.iloc[-1]["cal_date"][:4]):
raise ValueError("We already have cal date for year %s" % year)
tcal = pd.concat([cal, ncal], ignore_index=True)
if path_out is None:
path_out = path
tcal.to_csv(path_out, index=False)
## 常见标的合集列表,便于共同分析, 欢迎贡献:)
# 战略配售封基
zlps = ["SZ160142", "SZ161131", "SZ161728", "SH501186", "SH501188", "SH501189"]
# 科创封基
kcfj = [
"SH501073",
"SH501075",
"SH501076",
"SH501078",
"SH501079",
"SH501080",
"SH501081",
"SH501082",
"SH501082",
"SH501085",
]
# 混合基
hh_cand = [
"001500",
"001278",
"001103",
"519697",
"001182",
"001510",
"001508",
"519700",
"519732",
"519056",
"213001",
"161606",
"519091",
"000717",
"000878",
"000452",
]
## some small tools and calculators below
def summary_cb(df, l=None, cutoff=5):
# not functional since richinvest change
for c in ["转债代码"]:
df[c] = df[c].apply(lambda s: s.strip())
for c in ["老式双低", "转债价格", "股票市值", "转债余额"]:
df[c] = df[c].apply(_float)
for c in ["转股溢价率", "价值溢价", "税后收益率"]:
df[c] = df[c].apply(lambda s: float(str(s).strip("%")))
if l is not None:
df = df[df["转债代码"].isin(l)]
d = {}
for c in ["老式双低", "转债价格", "转股溢价率", "价值溢价", "税后收益率", "股票市值"]:
if cutoff == 0:
yj = sorted(df[c])
else:
yj = sorted(df[c])[cutoff:-cutoff]
d[c + "中位数"] = yj[int(len(yj) / 2)]
d[c + "均值"] = round(np.mean(yj), 3)
d["破面值转债数目"] = len([v for v in df["转债价格"] if v < 100])
d["总转债余额"] = round(np.sum(df["转债余额"]), 0)
return d
| [
"[email protected]"
] | |
31cdbe882af4808f510d60c5303fc71448bad50f | 28a462a28f443c285ca5efec181ebe36b147c167 | /tests/compile/basic/es2016/Symbol.keyFor.spec | 4405fe814db693ef8c40840d1d430431bc104824 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | kaist-plrg/jstar | 63e71f9156860dc21cccc33a9f6c638dfee448ea | 1282919127ea18a7e40c7a55e63a1ddaaf7d9db4 | refs/heads/main | 2022-07-22T08:12:34.947712 | 2022-02-27T04:19:33 | 2022-02-27T11:06:14 | 384,045,526 | 6 | 4 | NOASSERTION | 2022-02-27T11:05:26 | 2021-07-08T07:53:21 | Python | UTF-8 | Python | false | false | 396 | spec | 1. If Type(_sym_) is not Symbol, throw a *TypeError* exception.
1. For each element _e_ of the GlobalSymbolRegistry List (see <emu-xref href="#sec-symbol.for"></emu-xref>),
1. If SameValue(_e_.[[Symbol]], _sym_) is *true*, return _e_.[[Key]].
1. Assert: GlobalSymbolRegistry does not currently contain an entry for _sym_.
1. Return *undefined*. | [
"[email protected]"
] | |
b27373bc38eff28a67ebaad6b5aa01a01e97f5e3 | a884039e1a8b0ab516b80c2186e0e3bad28d5147 | /Livros/Livro-Desenvolvimento web com Flask/Capitulo02/Nível 02/exemplo07a.py | 0129fa2065636c4e62560194d3ba20d2e016d1d8 | [
"MIT"
] | permissive | ramonvaleriano/python- | 6e744e8bcd58d07f05cd31d42a5092e58091e9f0 | ada70918e945e8f2d3b59555e9ccc35cf0178dbd | refs/heads/main | 2023-04-10T14:04:24.497256 | 2021-04-22T18:49:11 | 2021-04-22T18:49:11 | 340,360,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | # Program: exemplo07a.py
# Author: Ramon R. Valeriano
# Description: Programa do Capítulo 2, para melhorar a fixação
# Developed: 02/03/2020 - 16:29
from flask import Flask, make_response
app = Flask(__name__)
@app.route('/')
def index():
response = make_response('<h1>Este documento esta sendo carregado em um cookie.</h1>')
response.set_cookie('answer', '42')
return response
app.run() | [
"[email protected]"
] | |
eb14ad9cc026342ecb88f0372c9d46218bb7bf1c | 584db1be8b6bdedaa56d186692ad72da5ee07164 | /patron/cells/weights/__init__.py | d83f31e1ab2fdb889a4e774c5b82817b6dad2c51 | [
"Apache-2.0"
] | permissive | casbin/openstack-patron | 66006f57725cf1c3d735cd5529d3459fd77384c8 | b41b1262f3a52c8cc9f6b6bdf87be5a1abcf6d25 | refs/heads/master | 2023-05-31T05:23:37.721768 | 2015-12-31T12:18:17 | 2015-12-31T12:18:17 | 382,054,546 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,286 | py | # Copyright (c) 2012-2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cell Scheduler weights
"""
from patron import weights
class WeightedCell(weights.WeighedObject):
def __repr__(self):
return "WeightedCell [cell: %s, weight: %s]" % (
self.obj.name, self.weight)
class BaseCellWeigher(weights.BaseWeigher):
"""Base class for cell weights."""
pass
class CellWeightHandler(weights.BaseWeightHandler):
object_class = WeightedCell
def __init__(self):
super(CellWeightHandler, self).__init__(BaseCellWeigher)
def all_weighers():
"""Return a list of weight plugin classes found in this directory."""
return CellWeightHandler().get_all_classes()
| [
"[email protected]"
] | |
d999acb14a4258c765255569ad0349f26990ecdc | 38bf7e24a2150983f482a6749dc661ed4c4a4439 | /docs/source/conf.py | 914308cfb0a62a3b79401f3a79e53ff0e90b1f3c | [] | no_license | guoweikuang/flask_v2ex | 15b6247d979146ada57fe2e6dd7c93f7708297ff | d84c14b1d90be78e634677dee332a63bca69c7fc | refs/heads/master | 2022-12-17T19:36:57.945884 | 2019-10-23T13:25:44 | 2019-10-23T13:25:44 | 116,472,843 | 20 | 5 | null | 2022-11-22T02:08:35 | 2018-01-06T10:09:07 | JavaScript | UTF-8 | Python | false | false | 4,776 | py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'V2Ex'
copyright = '2018, guoweikuang'
author = 'guoweikuang'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = 'v1.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'zh_CN'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'V2Exdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'V2Ex.tex', 'V2Ex Documentation',
'guoweikuang', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'v2ex', 'V2Ex Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'V2Ex', 'V2Ex Documentation',
author, 'V2Ex', 'One line description of project.',
'Miscellaneous'),
] | [
"[email protected]"
] | |
35191925acafc83ea20ead8135b3732eb249d9f9 | e6132244015942c5ec75c8eff4f90cd0e9302470 | /src/wshop/apps/shipping/__init__.py | 15be34284e0e5d483fe1421db9999cd651d83f84 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | vituocgia/wshop-core | d3173f603861685b523f6b66af502b9e94b7b0c2 | 5f6d1ec9e9158f13aab136c5bd901c41e69a1dba | refs/heads/master | 2020-03-18T08:25:14.669538 | 2018-05-23T05:55:56 | 2018-05-23T05:55:56 | 134,508,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | default_app_config = 'wshop.apps.shipping.config.ShippingConfig'
| [
"[email protected]"
] | |
9bf3d2c051c29082aa33cfeceab377e3427f85ff | 05abb78c60a69422ae3e00a542bbd4573faf8174 | /python-para-zumbis/lista2/exercicio1.py | 9918795b5836c2bd55e4644ea40ede511eb2e42b | [] | no_license | xuting1108/Programas-de-estudo | 72b812d52f5b130a95103c38dbe9e471dc5aa6f9 | 01fe21097055d69c2115cff3da2199429e87dead | refs/heads/master | 2022-10-20T17:06:14.517643 | 2019-04-08T11:16:12 | 2019-04-08T11:16:12 | 179,678,721 | 0 | 1 | null | 2022-10-09T13:13:57 | 2019-04-05T12:38:23 | Python | UTF-8 | Python | false | false | 600 | py | # Faça um Programa que peça os três lados de um triângulo. O programa deverá informar se os valores podem ser um triângulo.
# Indique, caso os lados formem um triângulo, se o mesmo é: equilátero, isósceles ou escaleno.
lado_a = float(input('informe a medida do lado a: '))
lado_b = float(input('informe a medida do lado b: '))
lado_c = float(input('informe a medida do lado c: '))
if lado_a == lado_b == lado_c:
print('o triangulo é equilátero')
elif lado_a == lado_b or lado_a == lado_c or lado_b == lado_c:
print('o triangulo é isósceles')
else:
print('o triangulo é escaleno') | [
"[email protected]"
] | |
b9bbeafefaafd8ff7661334198c1365cd73e36d1 | f73bcada5ab8432d2af07b5cb7fd7a38109d3e3a | /.history/parser_20201108170616.py | c0517d7feb1c60b713329f35cfcf547572ddba48 | [] | no_license | mariajbp/gedcomparser | 837bf4ae5628a81e535d233c7c35313c6d86d78c | 6fc55899e5a82c4071991ab94a344b64c014b84d | refs/heads/master | 2023-01-23T09:01:27.459597 | 2020-11-19T23:58:53 | 2020-11-19T23:58:53 | 310,900,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,247 | py | #!/usr/bin/python3
#python3 parser.py input/bible.gedcom > test.txt
import sys
from re import *
filename = sys.argv[1].split('/')[1]
assetPath = "assets"
indPath = "individuals"
famPath = "families"
cssPath = "assets/gedcom.css"
def createFamily(fk,fi):
f = open('assets/families/'+fk+'.html', 'w')
f.write('<h4> <a href=\"../index.html\"> return to index </a> </h4>')
f.write('<!DOCTYPE html><html><head> <link rel="stylesheet" type="text/css" href="../index.css"></head>\n')
f.write('<h1> Código da familia: ' + fk + '</h1>')
for keys,values in fi.items():
print(keys)
print(values)
f.close()
def createIndex(fam,indi):
f = open("assets/index.html", 'w')
f.write('<!DOCTYPE html><html><head> <link rel="stylesheet" type="text/css" href="index.css"></head>\n')
f.write('<h1> Ficheiro: ' + filename + '</h1>')
f.write('<div class="row"><div class="column"><h2>Familias</h2>')
for keyf in fam:
f.write('<li> <a href=\"'+famPath+'/'+keyf+'.html\">'+keyf+'</a></li>\n')
f.write('</ul> </div>')
f.write('<div class="column"><h2>Individuos</h2>')
for keyi in indi:
f.write('<li> <a href=\"'+indPath+'/'+keyi+'.html\">'+keyi+'</a></li>\n')
f.write('</ul></div></div>')
f.close()
BG = {}
def procIndi(s,i):
indi = {}
v = search(r'\bNAME\s+(.*)', i)
if v:
indi['name']= v.group(1)
v = findall (r'\bFAMS\s+@(.*)@',i)
indi['fams'] = v
BG[s] = indi
BF = {}
def procFam(f,i):
fam={}
h = search(r'\bHUSB\s+@(.*)@',i)
if h:
fam['husb'] = h.group(1)
w = search(r'\bWIFE\s+@(.*)@',i)
if w:
fam['wife'] = w.group(1)
fam['child'] = findall (r'\bCHIL\s+@(.*)@',i)
BF[f] = fam
def process(t):
items = split(r'\n0',t)
for i in items:
z = search(r'@(I\d+)@ *INDI', i) #procura todos os individuos
if z:
procIndi(z.group(1),i)
f = search(r'@(F\d+)@ *FAM', i) #procura todas as familias
if f:
procFam(f.group(1),i)
with open(sys.argv[1], 'r') as f :
gedcom = f.read()
process(gedcom)
createIndex(BF.keys(), BG.keys())
for k,v in BF.items():
createFamily(k,v)
| [
"[email protected]"
] | |
4a5d3fe945019ad4717eef5286af1768dc05b083 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_vicarage.py | 3cfe8615692b2c6a7f3f67bc930f9033fcdd2e06 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py |
#calss header
class _VICARAGE():
def __init__(self,):
self.name = "VICARAGE"
self.definitions = [u'the house in which a vicar lives']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
] | |
4c9ec16df1b6a85b34a767c4e8a4d46e53d950f7 | 82256eb259bf5fa75a8f15500a6b5a1306a07034 | /addintegers3.py | f6d586ed9a6c80d2002f3850a12e20180a03404d | [] | no_license | dennisnderitu254/Andela-Exercises | 1c0d2c309b6ea113a4d812e313ded867f6dea9a4 | edb17f0ed867a4436478a8d9bf5690a749155781 | refs/heads/master | 2021-05-05T13:38:06.658363 | 2017-10-31T14:35:38 | 2017-10-31T14:35:38 | 105,002,996 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | # Exhibiting functional composition
num1 = int(raw_input('Please enter an integer value:'))
num2 = int(raw_input('Please enter another integer value:'))
print(num1, '+', num2, '=', num1 + num2) | [
"[email protected]"
] | |
a972d8916751e7929616031a929acb51c7a7b956 | 3e2447737acc8e6bef6728b1a8e5f1d5e6db2968 | /opennem/pipelines/wem/balancing_summary.py | fd431bcabfb48da0aacae2723bb3de03f7e58e17 | [
"MIT"
] | permissive | gaslitbytech/opennem | 5a5197003662725ccd2f82d790cdb1495a975a07 | deec3e2079db9d9d84171010fd0c239170d1e7ce | refs/heads/master | 2023-07-23T14:08:28.949054 | 2020-10-09T03:53:20 | 2020-10-09T03:53:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,148 | py | import csv
import logging
from sqlalchemy.dialects.postgresql import insert
from opennem.db.models.opennem import BalancingSummary
from opennem.pipelines import DatabaseStoreBase
from opennem.schema.network import NetworkWEM
from opennem.utils.dates import parse_date
from opennem.utils.pipelines import check_spider_pipeline
logger = logging.getLogger(__name__)
class WemStoreBalancingSummary(DatabaseStoreBase):
@check_spider_pipeline
def process_item(self, item, spider=None):
s = self.session()
csvreader = csv.DictReader(item["content"].split("\n"))
records_to_store = []
for record in csvreader:
trading_interval = parse_date(
record["Trading Interval"], dayfirst=True, network=NetworkWEM
)
if not trading_interval:
continue
records_to_store.append(
{
"network_id": "WEM",
"network_region": "WEM",
"trading_interval": trading_interval,
"forecast_load": record["Load Forecast (MW)"],
"generation_scheduled": record[
"Scheduled Generation (MW)"
],
"generation_non_scheduled": record[
"Non-Scheduled Generation (MW)"
],
"generation_total": record["Total Generation (MW)"],
"price": record["Final Price ($/MWh)"],
}
)
stmt = insert(BalancingSummary).values(records_to_store)
stmt.bind = self.engine
stmt = stmt.on_conflict_do_update(
constraint="balancing_summary_pkey",
set_={
"price": stmt.excluded.price,
"generation_total": stmt.excluded.generation_total,
},
)
try:
r = s.execute(stmt)
s.commit()
except Exception as e:
logger.error("Error inserting records")
logger.error(e)
finally:
s.close()
return len(records_to_store)
| [
"[email protected]"
] | |
c54cbc847e347a11beaa33ad2bd3cb4e97c48277 | 28cd350c10e5fe3542f2913e1833f5725aa56fd5 | /prepare_VehicleID.py | 17adc3f1c349e6a19d4ae965ba534f591054547c | [
"MIT"
] | permissive | layumi/Person_reID_baseline_pytorch | dffeb79f25f2fe1b83646746bbb295f2df36bad4 | 4dae9cdf42f71c72a44a64fb23bfc470c501085f | refs/heads/master | 2023-09-03T14:34:04.082508 | 2023-08-17T04:12:26 | 2023-08-17T04:12:26 | 115,712,649 | 4,042 | 1,132 | MIT | 2023-06-19T08:29:17 | 2017-12-29T10:22:41 | Python | UTF-8 | Python | false | false | 2,992 | py | import os
from shutil import copyfile
def copy_file(s, t):
for root, dirs, files in os.walk(s):
for name in files:
copyfile(root+'/'+name,t+'/'+name)
# You only need to change this line to your dataset download path
download_path = './data/VehicleID_V1.0/'
if not os.path.isdir(download_path):
print('please change the download_path')
#---------------------------------------
#train_all
train_path = download_path + '/image'
train_save_path = download_path + '/pytorch/train_test'
if not os.path.isdir(train_save_path):
os.mkdir(train_save_path)
fname = './data/VehicleID_V1.0/attribute/img2vid.txt'
with open(fname) as fp:
for i, line in enumerate(fp):
name, label = line.split(' ')
name = name + '.jpg'
ID = int(label)
src_path = train_path + '/' + name
dst_path = train_save_path + '/p%d'%ID
if not os.path.isdir(dst_path):
os.mkdir(dst_path)
print(src_path, dst_path)
copyfile( src_path, dst_path+'/'+name)
#---------------------------------------
#train
train_list = []
train_only_save_path = download_path + '/pytorch/train'
if not os.path.isdir(train_only_save_path):
os.mkdir(train_only_save_path)
with open(download_path+'train_test_split/train_list.txt', 'r') as f:
for name in f:
name = name.replace('\n','')
train_ID = name.split(' ')
train_ID = int(train_ID[1])
if not train_ID in train_list:
train_list.append(train_ID)
print(len(train_list))
for ID in train_list:
os.system('rsync -r %s/p%d %s'%( train_save_path, ID, train_only_save_path))
#---------------------------------------
#val800
for num in [800,1600,2400]:
val_list = []
query_save_path = download_path + '/pytorch/query%d'%num
gallery_save_path = download_path + '/pytorch/gallery%d'%num
if not os.path.isdir(query_save_path):
os.mkdir(query_save_path)
os.mkdir(gallery_save_path)
with open(download_path+'train_test_split/test_list_%d.txt'%num, 'r') as f:
for name in f:
name = name.replace('\n','')
val_ID = name.split(' ')
val_name = val_ID[0] + '.jpg'
val_ID = int(val_ID[1])
src_path = train_path + '/' + val_name
if val_ID not in val_list:
val_list.append(val_ID)
dst_path = gallery_save_path + '/p%d'%val_ID #For VehicleID QueryNumber > Gallery
if not os.path.isdir(dst_path):
os.mkdir(dst_path)
copyfile( src_path, dst_path+'/'+val_name)
else:
dst_path = query_save_path + '/p%d'%val_ID
if not os.path.isdir(dst_path):
os.mkdir(dst_path)
copyfile( src_path, dst_path+'/'+val_name)
| [
"[email protected]"
] | |
16ee84d5d1b6441baaf6dbf58d95f65b16fd49cb | e1b3816615cce62ebe2b6c59b0eb3fbd3693d73b | /solutions/167-two-sum-ii-input-array-is-sorted/two-sum-ii-input-array-is-sorted.py | 60d0a04a154052849aad48a3e763a43ca3bebcba | [] | no_license | fagan2888/leetcode-6 | 1fb18979ffacb82d5db77988b38ecd7371b428b9 | 14176f1752e2bb94dec51bd90dfd412896ed84de | refs/heads/master | 2022-01-10T03:27:51.388066 | 2019-06-15T14:13:48 | 2019-06-15T14:13:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,107 | py | # -*- coding:utf-8 -*-
# Given an array of integers that is already sorted in ascending order, find two numbers such that they add up to a specific target number.
#
# The function twoSum should return indices of the two numbers such that they add up to the target, where index1 must be less than index2.
#
# Note:
#
#
# Your returned answers (both index1 and index2) are not zero-based.
# You may assume that each input would have exactly one solution and you may not use the same element twice.
#
#
# Example:
#
#
# Input: numbers = [2,7,11,15], target = 9
# Output: [1,2]
# Explanation: The sum of 2 and 7 is 9. Therefore index1 = 1, index2 = 2.
#
class Solution(object):
def twoSum(self, numbers, target):
"""
:type numbers: List[int]
:type target: int
:rtype: List[int]
"""
if len(numbers) <= 1:
return None
buffer_dict = {}
for i in range(len(numbers)):
if numbers[i] in buffer_dict:
return [buffer_dict[numbers[i]], i+1]
else: buffer_dict[target - numbers[i]] = i+1
| [
"[email protected]"
] | |
8852e9dcd8cde183a336da575c9de3ddf255095c | 15a2a8c612545e61dab18a5d0673b1cef95a9638 | /Part/神龙天女.py | a09f4df92188101b24dd402950e0a0ce29b7c469 | [] | no_license | YICHENG-LAI/DNFCalculating | 6fa10b692580dad119446307508a3bf32ff46d1a | 426375e4e0034e435a8f38974ce81323c8ea7f9c | refs/heads/master | 2022-11-17T00:18:06.650791 | 2020-07-05T07:28:50 | 2020-07-05T07:28:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,360 | py | from PublicReference.base import *
class 神龙天女主动技能(主动技能):
def 等效CD(self, 武器类型):
return round(self.CD / self.恢复 * 1.05, 1)
#念珠1.05
class 神龙天女技能0(神龙天女主动技能):
名称 = '罪业加身'
所在等级 = 10
等级上限 = 60
基础等级 = 48
基础 = 2014 - 204.4468
成长 = 204.4468
CD = 6.0
TP成长 = 0.08
TP上限 = 7
class 神龙天女技能1(神龙天女主动技能):
名称 = '唤雷符'
所在等级 = 15
等级上限 = 60
基础等级 = 46
基础 = 1721 - 174.644
成长 = 174.644
CD = 5.0
TP成长 = 0.08
TP上限 = 7
class 神龙天女技能2(神龙天女主动技能):
名称 = '念珠连射'
备注 = '(TP为基础精通)'
所在等级 = 15
等级上限 = 1
基础等级 = 1
基础 = 9195.58 / 9.362
成长 = 0
CD = 1.0
TP成长 = 0.1
TP上限 = 5
class 神龙天女技能3(神龙天女主动技能):
名称 = '木槵子经'
所在等级 = 15
等级上限 = 60
基础等级 = 46
基础 = 1602 - 163.6
成长 = 163.6
CD = 4.0
TP成长 = 0.1
TP上限 = 7
class 神龙天女技能4(神龙天女主动技能):
名称 = '束灵符'
所在等级 = 20
等级上限 = 60
基础等级 = 43
基础 = 2052 - 208.214
成长 = 208.214
CD = 7.0
TP成长 = 0.1
TP上限 = 7
class 神龙天女技能5(神龙天女主动技能):
名称 = '驱邪咒'
所在等级 = 25
等级上限 = 60
基础等级 = 41
基础 = 5100 - 519
成长 = 519
CD = 12.0
TP上限 = 5
TP倍率 = [1, 1.125, 1.228, 1.330, 1.433, 1.535]
def 等效百分比(self, 武器类型):
if self.等级 == 0:
return 0
else:
return int((self.基础 + self.成长 * self.等级)* self.TP倍率[self.TP等级] * self.倍率)
class 神龙天女技能6(被动技能):
名称 = '祈雨祭'
所在等级 = 25
等级上限 = 20
基础等级 = 10
def 加成倍率(self, 武器类型):
if self.等级 == 0:
return 1.0
else:
return round(1.00 + 0.02 * self.等级, 5)
class 神龙天女技能7(被动技能):
名称 = '神术强化'
所在等级 = 30
等级上限 = 20
基础等级 = 10
def 加成倍率(self, 武器类型):
if self.等级 <= 10:
return round(1.05 + 0.015 * self.等级, 5)
else:
return round(1.00 + 0.02 * self.等级, 5)
class 神龙天女技能8(神龙天女主动技能):
名称 = '和合之玉'
所在等级 = 30
等级上限 = 60
基础等级 = 38
基础 = 5233 - 531.108
成长 = 531.108
CD = 15.0
TP成长 = 0.1
TP上限 = 7
class 神龙天女技能9(神龙天女主动技能):
名称 = '聚魂吸星符'
所在等级 = 35
等级上限 = 60
基础等级 = 36
基础 = 6004 - 609.629
成长 = 609.629
CD = 15.0
TP成长 = 0.1
TP上限 = 7
是否有护石 = 1
def 装备护石(self):
self.倍率 *= 1.14
self.CD *= 0.95
class 神龙天女技能10(神龙天女主动技能):
名称 = '龙魂之怒'
所在等级 = 40
等级上限 = 60
基础等级 = 33
基础 = 8116 - 823.406
成长 = 823.406
CD = 20.0
TP成长 = 0.1
TP上限 = 7
class 神龙天女技能11(神龙天女主动技能):
名称 = '百八念珠'
所在等级 = 40
等级上限 = 60
基础等级 = 33
基础 = 13060 - 1326.25
成长 = 1326.25
CD = 25.0
TP成长 = 0.1
TP上限 = 7
是否有护石 = 1
def 装备护石(self):
self.倍率 *= 1.18
self.CD *= 0.83
class 神龙天女技能12(神龙天女主动技能):
名称 = '不动珠箔阵'
所在等级 = 45
等级上限 = 60
基础等级 = 31
基础 = 16138 - 1635.567
成长 = 1635.567
CD = 45.0
TP成长 = 0.1
TP上限 = 7
是否有护石 = 1
def 装备护石(self):
self.倍率 *= 1.09
self.CD *= 0.9
class 神龙天女技能13(神龙天女主动技能):
名称 = '神龙如意珠'
备注 = '(1次)'
是否主动 = 0
所在等级 = 48
等级上限 = 40
基础等级 = 20
基础 = 526 - 83.947
成长 = 83.947
CD = 0.5
关联技能 = ['所有']
def 等效CD(self, 武器类型):
return 0.5
def 加成倍率(self, 武器类型):
if self.等级 == 0:
return 1.0
else:
return round(1.115 + 0.015 * self.等级, 5)
class 神龙天女技能14(神龙天女主动技能):
名称 = '神谕:神龙雷雨祭'
所在等级 = 50
等级上限 = 40
基础等级 = 12
基础 = 45113 - 10407
成长 = 10407
CD = 140
class 神龙天女技能15(神龙天女主动技能):
名称 = '因果业火符'
所在等级 = 60
等级上限 = 40
基础等级 = 23
基础 = 13346 - 1354.864
成长 = 1354.864
CD = 30.0
TP成长 = 0.1
TP上限 = 7
是否有护石 = 1
def 装备护石(self):
self.倍率 *= 1.24
class 神龙天女技能16(神龙天女主动技能):
名称 = '夺命大念阵'
所在等级 = 70
等级上限 = 40
基础等级 = 18
基础 = 24291 - 2464.235
成长 = 2464.235
CD = 50.0
TP成长 = 0.1
TP上限 = 7
是否有护石 = 1
def 装备护石(self):
self.倍率 *= 1.24
class 神龙天女技能17(被动技能):
名称 = '龙神之力'
所在等级 = 75
等级上限 = 40
基础等级 = 11
def 加成倍率(self, 武器类型):
if self.等级 == 0:
return 1.0
else:
return round(1.23 + 0.02 * self.等级, 5)
class 神龙天女技能18(神龙天女主动技能):
名称 = '退魔阴阳符'
所在等级 = 75
等级上限 = 40
基础等级 = 16
基础 = 42399 - 4303.067
成长 = 4303.067
CD = 40.0
class 神龙天女技能19(神龙天女主动技能):
名称 = '天坠阴阳玉'
所在等级 = 80
等级上限 = 40
基础等级 = 13
基础 = 40585 - 4117.917
成长 = 4117.917
CD = 45.0
class 神龙天女技能20(神龙天女主动技能):
名称 = '龙威如狱·龙恩如海'
所在等级 = 85
等级上限 = 40
基础等级 = 5
基础 = 92783 - 21518
成长 = 21518
CD = 180.0
class 神龙天女技能21(被动技能):
名称 = '卓越之力'
所在等级 = 95
等级上限 = 40
基础等级 = 4
def 加成倍率(self, 武器类型):
if self.等级 == 0:
return 1.0
else:
return round(1.18 + 0.02 * self.等级, 5)
class 神龙天女技能22(被动技能):
名称 = '超卓之心'
所在等级 = 95
等级上限 = 11
基础等级 = 1
def 加成倍率(self, 武器类型):
if self.等级 == 0:
return 1.0
else:
return round(1.045 + 0.005 * self.等级, 5)
class 神龙天女技能23(被动技能):
名称 = '觉醒之抉择'
所在等级 = 100
等级上限 = 40
基础等级 = 2
关联技能 = ['无']
def 加成倍率(self, 武器类型):
if self.等级 == 0:
return 1.0
else:
return round(1.10 + 0.05 * self.等级, 5)
class 神龙天女技能24(被动技能):
名称 = '基础精通'
所在等级 = 1
等级上限 = 200
基础等级 = 100
关联技能 = ['念珠连射']
def 加成倍率(self, 武器类型):
if self.等级 == 0:
return 1.0
else:
return round(0.463 + 0.089 * self.等级, 5)
神龙天女技能列表 = []
i = 0
while i >= 0:
try:
exec('神龙天女技能列表.append(神龙天女技能'+str(i)+'())')
i += 1
except:
i = -1
神龙天女技能序号 = dict()
for i in range(len(神龙天女技能列表)):
神龙天女技能序号[神龙天女技能列表[i].名称] = i
神龙天女一觉序号 = 0
神龙天女二觉序号 = 0
神龙天女三觉序号 = 0
for i in 神龙天女技能列表:
if i.所在等级 == 50:
神龙天女一觉序号 = 神龙天女技能序号[i.名称]
if i.所在等级 == 85:
神龙天女二觉序号 = 神龙天女技能序号[i.名称]
if i.所在等级 == 100:
神龙天女三觉序号 = 神龙天女技能序号[i.名称]
神龙天女护石选项 = ['无']
for i in 神龙天女技能列表:
if i.是否有伤害 == 1 and i.是否有护石 == 1:
神龙天女护石选项.append(i.名称)
神龙天女符文选项 = ['无']
for i in 神龙天女技能列表:
if i.所在等级 >= 20 and i.所在等级 <= 80 and i.所在等级 != 50 and i.是否有伤害 == 1:
神龙天女符文选项.append(i.名称)
class 神龙天女角色属性(角色属性):
职业名称 = '神龙天女'
武器选项 = ['念珠']
#'物理百分比','魔法百分比','物理固伤','魔法固伤'
伤害类型选择 = ['魔法百分比']
#默认
伤害类型 = '魔法百分比'
防具类型 = '布甲'
防具精通属性 = ['智力']
主BUFF = 2.08
#基础属性(含唤醒)
基础力量 = 793.0
基础智力 = 952.0
#适用系统奶加成
力量 = 基础力量
智力 = 基础智力
#人物基础 + 唤醒
物理攻击力 = 65.0
魔法攻击力 = 65.0
独立攻击力 = 1045.0
火属性强化 = 13
冰属性强化 = 13
光属性强化 = 13
暗属性强化 = 13
远古记忆 = 0
def __init__(self):
self.技能栏= deepcopy(神龙天女技能列表)
self.技能序号= deepcopy(神龙天女技能序号)
class 神龙天女(角色窗口):
def 窗口属性输入(self):
self.初始属性 = 神龙天女角色属性()
self.角色属性A = 神龙天女角色属性()
self.角色属性B = 神龙天女角色属性()
self.一觉序号 = 神龙天女一觉序号
self.二觉序号 = 神龙天女二觉序号
self.三觉序号 = 神龙天女三觉序号
self.护石选项 = deepcopy(神龙天女护石选项)
self.符文选项 = deepcopy(神龙天女符文选项) | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.