blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4693784784bb42b021025f1ca712c9ce4534686e | 50957651c54cfb3cba809eb84cf56c0cb2e2621d | /tests/cpydiff/modules_sys_stdassign.py | 096af430e4f571587577ec543a3dfb426aa26dbd | [
"MIT"
] | permissive | whyengineer/micropython-esp32 | 94d11e1f5171ea526ac5f97de60e34560b656435 | ab95d9cb19fc8cda42bf3fdecd76625ff9929c4e | refs/heads/esp32 | 2020-12-02T18:20:19.929696 | 2017-07-07T10:21:24 | 2017-07-07T10:21:24 | 96,515,880 | 4 | 0 | null | 2017-07-07T08:14:40 | 2017-07-07T08:14:39 | null | UTF-8 | Python | false | false | 223 | py | """
categories: Modules,sys
description: Override sys.stdin, sys.stdout and sys.stderr. Impossible as they are stored in read-only memory.
cause: Unknown
workaround: Unknown
"""
import sys
sys.stdin = None
print(sys.stdin)
| [
"[email protected]"
] | |
20cc519f37b4cc8a0dbe3cb2c7440dd9e4437f7b | 22d6db28f14ea809fffb3afb187a1b484474713f | /azext_keyvault/mgmt/keyvault/models/__init__.py | e561ff7983d0c7bb82557cb61856036e4c53e434 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Azure/azure-keyvault-cli-extension | 631322637f2311b6833bc9664ef92fd77e1eade6 | a9b4a1f8a1f8e2433f83a81efe6068e3bf4537ef | refs/heads/master | 2023-06-09T18:56:46.388527 | 2023-06-02T16:18:23 | 2023-06-02T16:18:23 | 130,276,163 | 3 | 8 | MIT | 2023-06-02T16:18:24 | 2018-04-19T21:49:23 | Python | UTF-8 | Python | false | false | 4,353 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
try:
from .sku_py3 import Sku
from .permissions_py3 import Permissions
from .access_policy_entry_py3 import AccessPolicyEntry
from .ip_rule_py3 import IPRule
from .virtual_network_rule_py3 import VirtualNetworkRule
from .network_rule_set_py3 import NetworkRuleSet
from .vault_properties_py3 import VaultProperties
from .vault_patch_properties_py3 import VaultPatchProperties
from .vault_access_policy_properties_py3 import VaultAccessPolicyProperties
from .deleted_vault_properties_py3 import DeletedVaultProperties
from .vault_create_or_update_parameters_py3 import VaultCreateOrUpdateParameters
from .vault_patch_parameters_py3 import VaultPatchParameters
from .vault_access_policy_parameters_py3 import VaultAccessPolicyParameters
from .vault_py3 import Vault
from .deleted_vault_py3 import DeletedVault
from .resource_py3 import Resource
from .vault_check_name_availability_parameters_py3 import VaultCheckNameAvailabilityParameters
from .check_name_availability_result_py3 import CheckNameAvailabilityResult
from .operation_display_py3 import OperationDisplay
from .log_specification_py3 import LogSpecification
from .service_specification_py3 import ServiceSpecification
from .operation_py3 import Operation
except (SyntaxError, ImportError):
from .sku import Sku
from .permissions import Permissions
from .access_policy_entry import AccessPolicyEntry
from .ip_rule import IPRule
from .virtual_network_rule import VirtualNetworkRule
from .network_rule_set import NetworkRuleSet
from .vault_properties import VaultProperties
from .vault_patch_properties import VaultPatchProperties
from .vault_access_policy_properties import VaultAccessPolicyProperties
from .deleted_vault_properties import DeletedVaultProperties
from .vault_create_or_update_parameters import VaultCreateOrUpdateParameters
from .vault_patch_parameters import VaultPatchParameters
from .vault_access_policy_parameters import VaultAccessPolicyParameters
from .vault import Vault
from .deleted_vault import DeletedVault
from .resource import Resource
from .vault_check_name_availability_parameters import VaultCheckNameAvailabilityParameters
from .check_name_availability_result import CheckNameAvailabilityResult
from .operation_display import OperationDisplay
from .log_specification import LogSpecification
from .service_specification import ServiceSpecification
from .operation import Operation
from .vault_paged import VaultPaged
from .deleted_vault_paged import DeletedVaultPaged
from .resource_paged import ResourcePaged
from .operation_paged import OperationPaged
from .key_vault_management_client_enums import (
SkuName,
KeyPermissions,
SecretPermissions,
CertificatePermissions,
StoragePermissions,
CreateMode,
NetworkRuleBypassOptions,
NetworkRuleAction,
Reason,
AccessPolicyUpdateKind,
)
__all__ = [
'Sku',
'Permissions',
'AccessPolicyEntry',
'IPRule',
'VirtualNetworkRule',
'NetworkRuleSet',
'VaultProperties',
'VaultPatchProperties',
'VaultAccessPolicyProperties',
'DeletedVaultProperties',
'VaultCreateOrUpdateParameters',
'VaultPatchParameters',
'VaultAccessPolicyParameters',
'Vault',
'DeletedVault',
'Resource',
'VaultCheckNameAvailabilityParameters',
'CheckNameAvailabilityResult',
'OperationDisplay',
'LogSpecification',
'ServiceSpecification',
'Operation',
'VaultPaged',
'DeletedVaultPaged',
'ResourcePaged',
'OperationPaged',
'SkuName',
'KeyPermissions',
'SecretPermissions',
'CertificatePermissions',
'StoragePermissions',
'CreateMode',
'NetworkRuleBypassOptions',
'NetworkRuleAction',
'Reason',
'AccessPolicyUpdateKind',
]
| [
"[email protected]"
] | |
2c0dc72ca231da4f98c7a53bddff61f3cebb751f | c1ef1f1fa94b5dbecff2ec09e94ae29a9094d82a | /study/backjoon/backjoon_2231.py | 0561b5dae7d63b7ff43bdca2e4d945e638375f74 | [] | no_license | MONKEYZ9/algorithm | cd6039a2232615e9bd40f63e2509fddf7edcede7 | 4ffde1ac47294af87152ed740962db600e0b9755 | refs/heads/main | 2023-08-14T17:01:54.792376 | 2021-10-01T06:14:55 | 2021-10-01T06:14:55 | 380,917,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | N = input()
max_N = 9*len(N)+1
ans = []
for i in range(1, max_N):
temp = 0
if int(N)-i > 0:
if len(str(int(N)-i)) >= 2:
for j in list(str(int(N)-i)):
temp += int(j)
if i == temp:
ans.append(int(N)-i)
else:
if ((int(N)-i)*2) == int(N):
ans.append(int(N)-i)
if len(ans) == 0:
print(0)
else:
print(min(ans))
| [
"[email protected]"
] | |
5a7e2ba68068192502f574cba81b2619a076de0a | 258f6619c909be6295078d34639f4ffa171257b3 | /src/edb/model/experiment/__init__.py | 6bf85c797e89e61f8a4009925a8e3d711c405148 | [] | no_license | aidanheerdegen/experimentdb | a12a168c50517c72028ab7ba231a27bda88fc05d | 8a5e77b2b489c4cba8766c8071c238586c11c0a3 | refs/heads/main | 2023-07-30T00:15:44.168950 | 2021-09-10T06:35:32 | 2021-09-10T06:35:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | from .base import Experiment
from ...utils import all_subclasses
# Import the subclasses so they're loaded
from . import accesscm, um, generic, payu
def experiment_factory(type: str, path: str) -> Experiment:
"""
Try to create a 'type' Experiment at 'path'
"""
# Each Experiment subclass has the type it's associated with as the parameter
# 'type'. If it's an abstract class the type should be None
types = {e.type: e for e in all_subclasses(Experiment) if e.type is not None}
return types[type](path)
| [
"[email protected]"
] | |
4bded312dca334a10d59f07a72b4fc7556ae4dc3 | 83316f8e2be55b19d81ccee935c9cfa09ac7b0b3 | /deepaudio/speaker/models/clovaai_resnetse34l/configurations.py | 48705f3a4bafd38013ba99080910d6b4c6daef84 | [] | no_license | TrendingTechnology/deepaudio-speaker | 5769b3ed851c721a57fcc4983c5905401d50f85e | 46f4edef5957e0211b5fe82146e5ce48b1744e15 | refs/heads/main | 2023-07-05T03:39:55.224562 | 2021-08-08T08:02:12 | 2021-08-08T08:02:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 760 | py | from dataclasses import dataclass, field
from deepaudio.speaker.dataclass.configurations import DeepMMDataclass
@dataclass
class ClovaaiResnetse34lConfigs(DeepMMDataclass):
name: str = field(
default="clovaai_ecapa", metadata={"help": "Model name"}
)
embed_dim: int = field(
default=256, metadata={"help": "Dimension of embedding."}
)
encoder_type: str = field(
default="SAP", metadata={"help": "Encoder type."}
)
optimizer: str = field(
default="adam", metadata={"help": "Optimizer for training."}
)
min_num_frames: int = field(
default=200, metadata={"help": "Min num frames."}
)
max_num_frames: int = field(
default=400, metadata={"help": "Max num frames."}
) | [
"[email protected]"
] | |
1897130bdb9a24f6cada979c2535f4bc3279dedf | d066f7fe739fb78f74ec2de8ccbfefdd4270f60f | /appimagebuilder/modules/generate/package_managers/apt/__init__.py | 7ec1d298ae9776e5b17bc702c58ca69a36ebfb00 | [
"MIT"
] | permissive | AppImageCrafters/appimage-builder | 666e75363a74f615cdb3673b3ca9d51a6d292a49 | f38699ef3644fa5409a5a262b7b6d99d6fb85db9 | refs/heads/main | 2023-08-17T06:34:54.029664 | 2023-06-03T17:51:04 | 2023-06-03T17:51:04 | 218,847,680 | 270 | 54 | MIT | 2023-09-06T17:04:18 | 2019-10-31T19:44:17 | Python | UTF-8 | Python | false | false | 751 | py | # Copyright 2021 Alexis Lopez Zubieta
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
from .file_package_resolver import FilePackageResolver
from .package_repository_resolver import PackageRepositoryResolver
| [
"[email protected]"
] | |
8a0a9d68892f0dbac3b8e55eb69e82f1788cc05e | ba2d449486c58578581b8de7b2b6f21074be6274 | /02 Linked Lists/2-4-Partition.py | 382b61d69de5bcd3a2a3495d4d3dfa4e66b26e1c | [] | no_license | theoliao1998/Cracking-the-Coding-Interview | 4e0abef8659a0abf33e09ee78ce2f445f8b5d591 | 814b9163f68795238d17aad5b91327fbceadf49e | refs/heads/master | 2020-12-09T12:46:10.845579 | 2020-07-25T05:39:19 | 2020-07-25T05:39:19 | 233,306,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,801 | py | # Partition: Write code to partition a linked list around a value x, such that all nodes less than x come
# before all nodes greater than or equal to x. If x is contained within the list, the values of x only need
# to be after the elements less than x (see below). The partition element x can appear anywhere in the
# "right partition"; it does not need to appear between the left and right partitions.
# EXAMPLE
# Input:
# Output:
# 3 -> 5 -> 8 -> 5 -> 10 -> 2 -> 1 [partition= 5]
# 3 -> 1 -> 2 -> 10 -> 5 -> 5 -> 8
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
def append(self, x):
n = self
while n.next:
n = n.next
n.next = ListNode(x)
# recursion, time O(n), unstable
def partition1(n,x):
if not n.next:
return n,n
first, end = partition(n.next, x)
if n.val < x:
n.next = first
first = n
else:
end.next = n
n.next = None
end = n
return first, end
# maintain two lists and combine, O(n), stable
def partition2(n,x):
small = None
big = None
first = None
mid = None
while n:
if n.val < x:
if small:
small.next = n
small = small.next
else:
small = n
first = n
else:
if big:
big.next = n
big = big.next
else:
big = n
mid = n
n = n.next
small.next = mid
big.next = None
return first, big
# n = ListNode(3)
# n.append(5)
# n.append(8)
# n.append(5)
# n.append(10)
# n.append(2)
# n.append(1)
# n,_ = partition2(n,5)
# while n:
# print(n.val)
# n = n.next
| [
"[email protected]"
] | |
92a0f711757a1bedc1524c74b2a79606503bc2e9 | b77cc1448ae2c68589c5ee24e1a0b1e53499e606 | /env/Lib/site-packages/celery/signals.py | a9d74096a187bdc77fcb044d3bc0c5991ad6c1e0 | [] | no_license | PregTech-c/Hrp_system | a5514cf6b4c778bf7cc58e8a6e8120ac7048a0a7 | 11d8dd3221497c536dd7df9028b9991632055b21 | refs/heads/master | 2022-10-09T07:54:49.538270 | 2018-08-21T11:12:04 | 2018-08-21T11:12:04 | 145,424,954 | 1 | 1 | null | 2022-10-01T09:48:53 | 2018-08-20T13:58:31 | JavaScript | UTF-8 | Python | false | false | 4,252 | py | # -*- coding: utf-8 -*-
"""Celery Signals.
This module defines the signals (Observer pattern) sent by
both workers and clients.
Functions can be connected to these signals, and connected
functions are called whenever a signal is called.
.. seealso::
:ref:`signals` for more information.
"""
from __future__ import absolute_import, unicode_literals
from .utils.dispatch import Signal
__all__ = [
'before_task_publish', 'after_task_publish',
'task_prerun', 'task_postrun', 'task_success',
'task_retry', 'task_failure', 'task_revoked', 'celeryd_init',
'celeryd_after_setup', 'worker_init', 'worker_process_init',
'worker_ready', 'worker_shutdown', 'setup_logging',
'after_setup_logger', 'after_setup_task_logger',
'beat_init', 'beat_embedded_init', 'heartbeat_sent',
'eventlet_pool_started', 'eventlet_pool_preshutdown',
'eventlet_pool_postshutdown', 'eventlet_pool_apply',
]
# - Task
before_task_publish = Signal(
name='before_task_publish',
providing_args={
'body', 'exchange', 'routing_key', 'headers',
'properties', 'declare', 'retry_policy',
},
)
after_task_publish = Signal(
name='after_task_publish',
providing_args={'body', 'exchange', 'routing_key'},
)
task_prerun = Signal(
name='task_prerun',
providing_args={'task_id', 'task', 'args', 'kwargs'},
)
task_postrun = Signal(
name='task_postrun',
providing_args={'task_id', 'task', 'args', 'kwargs', 'retval'},
)
task_success = Signal(
name='task_success',
providing_args={'result'},
)
task_retry = Signal(
name='task_retry',
providing_args={'request', 'reason', 'einfo'},
)
task_failure = Signal(
name='task_failure',
providing_args={
'task_id', 'exception', 'args', 'kwargs', 'traceback', 'einfo',
},
)
task_revoked = Signal(
name='task_revoked',
providing_args={
'request', 'terminated', 'signum', 'expired',
},
)
task_rejected = Signal(
name='task_rejected',
providing_args={'message', 'exc'},
)
task_unknown = Signal(
name='task_unknown',
providing_args={'message', 'exc', 'name', 'id'},
)
#: Deprecated, use after_task_publish instead.
task_sent = Signal(
name='task_sent',
providing_args={
'task_id', 'task', 'args', 'kwargs', 'eta', 'taskset',
},
)
# - Prorgam: `celery worker`
celeryd_init = Signal(
name='celeryd_init',
providing_args={'instance', 'conf', 'options'},
use_caching=False,
)
celeryd_after_setup = Signal(
name='celeryd_after_setup',
providing_args={'instance', 'conf'},
use_caching=False,
)
# - Worker
import_modules = Signal(name='import_modules')
worker_init = Signal(name='worker_init', use_caching=False)
# use_caching must be false when sender is None.
worker_process_init = Signal(
name='worker_process_init',
use_caching=False,
)
worker_process_shutdown = Signal(
name='worker_process_shutdown',
use_caching=False,
)
worker_ready = Signal(name='worker_ready', use_caching=False)
worker_shutdown = Signal(name='worker_shutdown', use_caching=False)
heartbeat_sent = Signal(name='heartbeat_sent')
# - Logging
setup_logging = Signal(
name='setup_logging',
providing_args={
'loglevel', 'logfile', 'format', 'colorize',
},
use_caching=False,
)
after_setup_logger = Signal(
name='after_setup_logger',
providing_args={
'logger', 'loglevel', 'logfile', 'format', 'colorize',
},
use_caching=False,
)
after_setup_task_logger = Signal(
name='after_setup_task_logger',
providing_args={
'logger', 'loglevel', 'logfile', 'format', 'colorize',
},
use_caching=False,
)
# - Beat
beat_init = Signal(name='beat_init', use_caching=False)
beat_embedded_init = Signal(name='beat_embedded_init', use_caching=False)
# - Eventlet
eventlet_pool_started = Signal(name='eventlet_pool_started')
eventlet_pool_preshutdown = Signal(name='eventlet_pool_preshutdown')
eventlet_pool_postshutdown = Signal(name='eventlet_pool_postshutdown')
eventlet_pool_apply = Signal(
name='eventlet_pool_apply',
providing_args={'target', 'args', 'kwargs'},
)
# - Programs
user_preload_options = Signal(
name='user_preload_options',
providing_args={'app', 'options'},
use_caching=False,
)
| [
"[email protected]"
] | |
1770f233133bfac4134d3c943a64c6377601bf89 | 5d0e76e3c741adc120ce753bacda1e723550f7ac | /804. Unique Morse Code Words.py | 32ec17fb4276bfed3e16e772c733fbe1447b419f | [] | no_license | GoldF15h/LeetCode | d8d9d5dedca3cce59f068b94e2edf986424efdbf | 56fcbede20e12473eaf09c9d170c86fdfefe7f87 | refs/heads/main | 2023-08-25T12:31:08.436640 | 2021-10-20T04:36:23 | 2021-10-20T04:36:23 | 392,336,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | class Solution:
def uniqueMorseRepresentations(self, words: List[str]) -> int:
morse = [".-","-...","-.-.","-..",".","..-.","--.","....","..",".---","-.-",".-..","--","-.","---",".--.","--.-",".-.","...","-","..-","...-",".--","-..-","-.--","--.."]
l = []
for cur in words :
tmp = ''
for i in cur :
tmp += morse[ord(i)-ord('a')]
l.append(tmp)
l = list(set(l))
return len(l) | [
"[email protected]"
] | |
f99716fda4cb4563b8a60f98be2ac6d07ada0747 | 58afefdde86346760bea40690b1675c6639c8b84 | /leetcode/reaching-points/412272224.py | f83c584abb244333b9df14020e18ff100cbbc336 | [] | no_license | ausaki/data_structures_and_algorithms | aaa563f713cbab3c34a9465039d52b853f95548e | 4f5f5124534bd4423356a5f5572b8a39b7828d80 | refs/heads/master | 2021-06-21T10:44:44.549601 | 2021-04-06T11:30:21 | 2021-04-06T11:30:21 | 201,942,771 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 694 | py | # title: reaching-points
# detail: https://leetcode.com/submissions/detail/412272224/
# datetime: Fri Oct 23 22:45:10 2020
# runtime: 20 ms
# memory: 14 MB
class Solution:
def reachingPoints(self, sx: int, sy: int, tx: int, ty: int) -> bool:
if sx == tx and sy == ty:
return True
while tx >= sx and ty >= sy:
if tx > ty:
tx = tx % ty
if sy == ty and sx % sy == tx:
return True
elif tx < ty:
ty = ty % tx
if sx == tx and sy % sx == ty:
return True
else:
return False
return False
| [
"[email protected]"
] | |
111b23c3006355c235c2d5856e279a4634f63d1d | 761e133170e1c34a2360d488ddca43fa40107b96 | /src/tools/MaxMa.py | c04d37033046bb1d752839b484a050412fa19f2c | [] | no_license | bjzz/StockParser | c85b7180eea7ac5fa79b320fe1ad8934513c0482 | a2dc1d2de06b78055786b956de940548bca75054 | refs/heads/master | 2023-03-18T04:54:31.487171 | 2019-09-27T09:48:28 | 2019-09-27T09:48:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,466 | py | #coding:utf-8
#!/usr/bin/env python
import os
import re
import requests,time
import shutil
import sys
import threading
import time
import datetime
reload(sys)
sys.setdefaultencoding('utf-8')
rootPath = sys.path[0][0:sys.path[0].index('StockParser')]+'/StockParser'
sys.path.append(rootPath+'/src')
from common import Tools
from parsers import BaseParser
'''
用“明日涨停价”算最大MA5
'''
def getParams():
code = False if (len(sys.argv) <= 1) else sys.argv[1]
parseDay = time.strftime('%Y-%m-%d',time.localtime(time.time())) if (len(sys.argv) <= 2) else sys.argv[2]
return (code,parseDay)
def getRes(code,parseDay):
parser = BaseParser.BaseParser(parseDay)
priceFile = Tools.getPriceDirPath()+'/'+str(code)
res = open(priceFile,'r').read()
return res
def compute(code,parseDay):
res = getRes(code,parseDay)
parser = BaseParser.BaseParser(parseDay)
dayList = parser.getPastTradingDayList(parseDay,4)
print dayList
e1 = parser.getEndPriceOfDay(res,dayList[0])
e2 = parser.getEndPriceOfDay(res,dayList[1])
e3 = parser.getEndPriceOfDay(res,dayList[2])
e4 = parser.getEndPriceOfDay(res,dayList[3])
e5 = e4 * 1.1
print e1,e2,e3,e4,e5
if 0 == e1*e2*e3*e4:
print 'End Price Error !'
else:
ma3 = (e3+e4+e5)/3.0
ma5 = (e1+e2+e3+e4+e5)/5.0
print 'MA3 = ' + str(ma3)+',MA5 = ' + str(ma5)
if __name__ == '__main__':
(code,parseDay) = getParams()
print code,parseDay
compute(code,parseDay)
| [
"[email protected]"
] | |
1e581d0645442ece0090ccefed1d44c58c5b6f27 | 1f98ccf9ef52d3adab704676480c85fe22c9542d | /simpledb/index/planner/IndexUpdatePlanner.py | 08656dd3f154a4650f863d9049ab5e54285bdf67 | [] | no_license | 61515/simpleDB_Python | 234c671cbbf57f3e8fc5489ec4c292365085b7a8 | b6846da4a78369838f5b3c7a704de704e18f7be7 | refs/heads/master | 2023-02-22T14:07:52.660633 | 2021-01-24T02:25:40 | 2021-01-24T02:25:40 | 332,343,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,129 | py | #
# * A modification of the basic update planner.
# * It dispatches each update statement to the corresponding
# * index planner.
# * @author Edward Sciore
#
from simpledb.plan.SelectPlan import SelectPlan
from simpledb.plan.TablePlan import TablePlan
from simpledb.plan.UpdatePlanner import UpdatePlanner
class IndexUpdatePlanner(UpdatePlanner):
def __init__(self, mdm):
super(IndexUpdatePlanner, self).__init__()
self.mdm = mdm
def executeInsert(self, data, tx):
tblname = data.tableName()
p = TablePlan(tx, tblname, self.mdm)
# first, insert the record
s = p.open()
s.insert()
rid = s.getRid()
# then modify each field, inserting an index record if appropriate
indexes = self.mdm.getIndexInfo(tblname, tx)
valIter = data.vals().__iter__()
for fldname in data.fields():
val = valIter.__next__()
s.setVal(fldname, val)
ii = indexes.get(fldname)
if ii is not None:
idx = ii.open()
idx.insert(val, rid)
idx.close()
s.close()
return 1
def executeDelete(self, data, tx):
tblname = data.tableName()
p = TablePlan(tx, tblname, self.mdm)
p = SelectPlan(p, data.pred())
indexes = self.mdm.getIndexInfo(tblname, tx)
s = p.open()
count = 0
while s.next():
# first, delete the record's RID from every index
rid = s.getRid()
for fldname in indexes.keys():
val = s.getVal(fldname)
idx = indexes.get(fldname).open()
idx.delete(val, rid)
idx.close()
# then delete the record
s.delete()
count += 1
s.close()
return count
def executeModify(self, data, tx):
tblname = data.tableName()
fldname = data.targetField()
p = TablePlan(tx, tblname, self.mdm)
p = SelectPlan(p, data.pred())
ii = self.mdm.getIndexInfo(tblname, tx).get(fldname)
idx = None if (ii is None) else ii.open()
s = p.open()
count = 0
while s.next():
# first, update the record
newval = data.newValue().evaluate(s)
oldval = s.getVal(fldname)
s.setVal(data.targetField(), newval)
# then update the appropriate index, if it exists
if idx is not None:
rid = s.getRid()
idx.delete(oldval, rid)
idx.insert(newval, rid)
count += 1
if idx is not None:
idx.close()
s.close()
return count
def executeCreateTable(self, data, tx):
self.mdm.createTable(data.tableName(), data.newSchema(), tx)
return 0
def executeCreateView(self, data, tx):
self.mdm.createView(data.viewName(), data.viewDef(), tx)
return 0
def executeCreateIndex(self, data, tx):
self.mdm.createIndex(data.indexName(), data.tableName(), data.fieldName(), tx)
return 0
| [
"[email protected]"
] | |
3da2984b78ac4b9d85c60ceb621e0e1f35020a67 | bb150497a05203a718fb3630941231be9e3b6a32 | /framework/e2e/paddleLT/donotuse/debug/jit_export_CSPResNet.py | 223881fbcff5703f345d459931528af5ae9a9e8b | [] | no_license | PaddlePaddle/PaddleTest | 4fb3dec677f0f13f7f1003fd30df748bf0b5940d | bd3790ce72a2a26611b5eda3901651b5a809348f | refs/heads/develop | 2023-09-06T04:23:39.181903 | 2023-09-04T11:17:50 | 2023-09-04T11:17:50 | 383,138,186 | 42 | 312 | null | 2023-09-13T11:13:35 | 2021-07-05T12:44:59 | Python | UTF-8 | Python | false | false | 785 | py | #!/bin/env python
# -*- coding: utf-8 -*-
# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python
"""
ocr rec_srn_head
"""
import copy
import numpy as np
import paddle
import ppdet
paddle.seed(33)
np.random.seed(33)
def randtool(dtype, low, high, shape):
"""
np random tools
"""
if dtype == "int":
return np.random.randint(low, high, shape)
elif dtype == "float":
return low + (high - low) * np.random.random(shape)
def main():
"""main"""
input = {"image": paddle.to_tensor(randtool("float", -1, 1, shape=[4, 3, 224, 224]).astype("float32"))}
net = ppdet.modeling.backbones.cspresnet.CSPResNet()
# net = paddle.jit.to_static(net)
print(net.out_shape)
net(inputs=input)
# paddle.jit.save(net, path='CSPResNet')
main()
| [
"[email protected]"
] | |
ba7a076e824d53bf5d8d6cbbcd4e609c30366bbc | 650e1bea7cec90b3c88ad84a80f5134434920c68 | /larflow/Reco/test/vis_clusters.py | adebb2eeb34637224859b0f832b8b388f6ac7d34 | [] | no_license | NuTufts/larflow | 7698329f50ec7d0db2f0a715e5a9f6dc09998f55 | 1ba2b426f191704a141bb72d7675d9746538eed4 | refs/heads/master | 2023-08-31T04:35:10.251625 | 2020-09-01T01:49:33 | 2020-09-01T01:49:33 | 136,974,430 | 1 | 1 | null | 2020-09-03T03:00:40 | 2018-06-11T19:56:04 | C++ | UTF-8 | Python | false | false | 6,996 | py | from __future__ import print_function
import os,sys,argparse,json
parser = argparse.ArgumentParser("Plot Reco Clusters for Inspection")
parser.add_argument("-ll","--input-larlite",required=True,type=str,help="kpsrecomanager larlite output file")
args = parser.parse_args()
import numpy as np
import ROOT as rt
from larlite import larlite
from larcv import larcv
from larflow import larflow
larcv.SetPyUtil()
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
import lardly
color_by_options = ["larmatch","keypoint"]
colorscale = "Viridis"
option_dict = []
for opt in color_by_options:
option_dict.append( {"label":opt,"value":opt} )
# OPEN LARLITE FILE
io = larlite.storage_manager( larlite.storage_manager.kREAD )
io.add_in_filename( args.input_larlite )
io.open()
nentries = io.get_entries()
CURRENT_EVENT = None
print("NENTRIES: ",nentries)
def make_figures(entry,clustername):
"""
if clustername is None return all clusters.
else if string, return specific cluster
"""
from larcv import larcv
larcv.load_pyutil()
detdata = lardly.DetectorOutline()
from larflow import larflow
larcv.SetPyUtil()
print("making figures for entry={} cluster={}".format(entry,clustername))
global io
global kpsanatree
io.go_to(entry)
traces_v = []
cluster_list = []
plot_producer = None
plot_index = None
if clustername != "all":
plot_producer = clustername.split(":")[0]
plot_index = int(clustername.split(":")[1])
# PLOT TRACK PCA-CLUSTERS: FULL/COSMIC
clusters = [("cosmic","trackprojsplit_full","rgb(150,150,150)",0.15,False),
("wctrack","trackprojsplit_wcfilter","rgb(125,200,125)",1.0,True),
("wcshower","showergoodhit","rgb(200,125,125)",0.5,False)]
for (name,producer,rgbcolor,opa,drawme) in clusters:
if not drawme:
continue
ev_trackcluster = io.get_data(larlite.data.kLArFlowCluster, producer )
ev_pcacluster = io.get_data(larlite.data.kPCAxis, producer )
for icluster in range(ev_trackcluster.size()):
lfcluster = ev_trackcluster.at( icluster )
cluster_trace = lardly.data.visualize_larlite_larflowhits( lfcluster, name="%s[%d]"%(name,icluster) )
clabel = "%s:%d (%d hits)"%(producer,icluster,lfcluster.size())
cvalue = "%s:%d"%(producer,icluster)
cluster_list.append( {"label":clabel,"value":cvalue} )
if clustername!="all":
cluster_trace["marker"]["color"] = "rgb(50,50,50)"
else:
r3 = np.random.randint(255,size=3)
rand_color = "rgb(%d,%d,%d)"%( r3[0], r3[1], r3[2] )
cluster_trace["marker"]["color"] = rand_color
cluster_trace["marker"]["opacity"] = opa
cluster_trace["marker"]["width"] = 5.0
pcaxis = ev_pcacluster.at( icluster )
pcatrace = lardly.data.visualize_pcaxis( pcaxis )
pcatrace["name"] = "%s-pca[%d]"%(name,icluster)
pcatrace["line"]["color"] = "rgb(0,0,0)"
pcatrace["line"]["width"] = 1
pcatrace["line"]["opacity"] = 1.0
if plot_producer is not None and plot_producer==producer and plot_index==icluster:
cluster_trace["marker"]["color"] = rgbcolor
traces_v.append(cluster_trace)
traces_v.append( pcatrace )
# add detector outline
traces_v += detdata.getlines(color=(10,10,10))
print("Number of clusters in event: ",len(cluster_list))
return traces_v,cluster_list
def test():
pass
app = dash.Dash(
__name__,
meta_tags=[{"name": "viewport", "content": "width=device-width, initial-scale=1"}],
)
server = app.server
# 3D PLOT WINDOW
axis_template = {
"showbackground": True,
#"backgroundcolor": "#141414", # black
#"gridcolor": "rgba(255, 255, 255)",
#"zerolinecolor": "rgba(255, 255, 255)",
"backgroundcolor": "rgba(100, 100, 100,0.5)",
"gridcolor": "rgb(50, 50, 50)",
"zerolinecolor": "rgb(0, 0, 0)",
}
plot_layout = {
"title": "",
"height":800,
"margin": {"t": 0, "b": 0, "l": 0, "r": 0},
"font": {"size": 12, "color": "black"},
"showlegend": False,
#"plot_bgcolor": "#141414",
#"paper_bgcolor": "#141414",
"plot_bgcolor": "#ffffff",
"paper_bgcolor": "#ffffff",
"scene": {
"xaxis": axis_template,
"yaxis": axis_template,
"zaxis": axis_template,
"aspectratio": {"x": 1, "y": 1, "z": 3},
"camera": {"eye": {"x": 1, "y": 1, "z": 1},
"up":dict(x=0, y=1, z=0)},
"annotations": [],
},
}
# INPUT FORM: EVENT NUM
eventinput = dcc.Input(
id="input_event",
type="number",
placeholder="Input Event")
# INPUT FORM: CLUSTER LIST
plotcluster = dcc.Dropdown(
options=[
{'label':'all','value':'all'},
],
value='all',
id='plotcluster',
)
# PAGE LAYOUT
app.layout = html.Div( [
html.Div( [ eventinput,
plotcluster,
html.Button("Plot",id="plot")
] ),
html.Hr(),
html.Div( [
dcc.Graph(
id="det3d",
figure={
"data": [],
"layout": plot_layout,
},
config={"editable": True, "scrollZoom": False},
)],
className="graph__container"),
html.Div(id="out")
] )
@app.callback(
[Output("det3d","figure"),
Output("plotcluster","options"),
Output("plotcluster","value"),
Output("out","children")],
[Input("plot","n_clicks")],
[State("input_event","value"),
State("plotcluster","value"),
State("det3d","figure")],
)
def cb_render(*vals):
"""
runs when plot button is clicked
"""
global EVENT_DATA
global UNMATCHED_CLUSTERS
global io
global CURRENT_EVENT
if vals[1] is None:
print("Input event is none")
raise PreventUpdate
if vals[1]>=nentries or vals[1]<0:
print("Input event is out of range")
raise PreventUpdate
clustername = vals[2]
entry = int(vals[1])
if entry!=CURRENT_EVENT:
# first time we access an entry, we default to the "all" view of the vertices
CURRENT_EVENT = entry
clustername = "all"
cluster_traces_v,cluster_options = make_figures(int(vals[1]),clustername)
cluster_options.append( {'label':"all",'value':"all"} )
# update the figure's traces
vals[-1]["data"] = cluster_traces_v
return vals[-1],cluster_options,clustername,"event requested: {}; cluster: {}".format(vals[1],vals[2])
if __name__ == "__main__":
app.run_server(debug=True)
| [
"[email protected]"
] | |
1115dc0f1240a03cd10c4c47d711092e5ac14e36 | 64764cbae8641d051c2e26c0c2283e8e626d88fb | /ecf/tbl/GLBCNO.py | 86a5dd5680a44379898071b551f23ffb7ba4d13b | [] | no_license | jazlee/csp-accounting | eb801ce902170337121a6dbe2b1382be4089ecca | 85f50f9d8defbf52e6c85f5c0fc0464101a01d03 | refs/heads/master | 2021-01-25T14:11:18.700456 | 2018-03-03T06:34:57 | 2018-03-03T06:34:57 | 123,666,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 945 | py | from elixir import *
#
# G/L Batch Numbering Option
#
class GLBCNO(Entity):
GLBCNOID = Field(String(3), primary_key=True)
GLBCNONM = Field(String(32))
GLBCMINO = Field(Integer)
GLBCMXNO = Field(Integer)
GLBCLSNO = Field(Integer)
GLBCAUDT = Field(Numeric(8, 0))
GLBCAUTM = Field(Numeric(6, 0))
GLBCAUUS = Field(String(24))
def getLSNO(cls, noid):
if noid in (None, ''):
raise Exception('Default batch option has not been setup properly')
q = GLBCNO.query
q = q.filter_by(GLBCNOID = noid)
obj = q.first()
if not obj:
raise Exception('Batch option %s does not exist' % noid)
ret = None
if (obj.GLBCMINO > obj.GLBCLSNO):
ret = obj.GLBCMINO
else:
ret = obj.GLBCLSNO + 1
if ret > obj.GLBCMXNO:
raise Exception('Maximum number batch has been reached')
obj.GLBCLSNO = ret
session.update(obj)
return ret
getLSNO = classmethod(getLSNO)
| [
"[email protected]"
] | |
46fe70a671b7a3d75410988284962ba930d7a7ae | 9f59d55bd8466f6f50c5bbec4725c8a073b964bd | /base/urls.py | 2533140c3f85ca26346f0e0d8f03a059d330063f | [] | no_license | youngsoul/django-todo-list | 9a88e14ba2bf305844058d6db94ffc8e11b36e5f | f1d687b72fd066a44b29d8974e6e49a094572a6d | refs/heads/master | 2023-04-11T15:08:54.400185 | 2021-04-26T00:57:14 | 2021-04-26T00:57:14 | 361,575,906 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 726 | py | from django.urls import path
from .views import TaskList, TaskDetail, TaskCreate, TaskUpdate, TaskDelete, CustomLoginView, RegisterPage
from django.contrib.auth.views import LogoutView
urlpatterns = [
path('login/', CustomLoginView.as_view(), name='login'),
path('logout/', LogoutView.as_view(next_page='login'), name='logout'),
path('register/', RegisterPage.as_view(), name='register'),
path('', TaskList.as_view(), name='tasks'),
path('task/<int:pk>', TaskDetail.as_view(), name='task'),
path('task-create/', TaskCreate.as_view(), name='task-create'),
path('task-update/<int:pk>', TaskUpdate.as_view(), name='task-update'),
path('task-delete/<int:pk>', TaskDelete.as_view(), name='task-delete'),
] | [
"[email protected]"
] | |
e472a596c694aca6cb4500d419d1493f0e53bcfa | bb33e6be8316f35decbb2b81badf2b6dcf7df515 | /source/res/scripts/client/gui/impl/gen/view_models/views/lobby/mode_selector/mode_selector_wt_widget_model.py | 2378eac9b0d2e89a56f1349cd4960f6589d3ef65 | [] | no_license | StranikS-Scan/WorldOfTanks-Decompiled | 999c9567de38c32c760ab72c21c00ea7bc20990c | d2fe9c195825ececc728e87a02983908b7ea9199 | refs/heads/1.18 | 2023-08-25T17:39:27.718097 | 2022-09-22T06:49:44 | 2022-09-22T06:49:44 | 148,696,315 | 103 | 39 | null | 2022-09-14T17:50:03 | 2018-09-13T20:49:11 | Python | UTF-8 | Python | false | false | 1,185 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/impl/gen/view_models/views/lobby/mode_selector/mode_selector_wt_widget_model.py
from gui.impl.gen.view_models.views.lobby.mode_selector.mode_selector_base_widget_model import ModeSelectorBaseWidgetModel
class ModeSelectorWtWidgetModel(ModeSelectorBaseWidgetModel):
__slots__ = ()
def __init__(self, properties=4, commands=0):
super(ModeSelectorWtWidgetModel, self).__init__(properties=properties, commands=commands)
def getCurrentProgress(self):
return self._getNumber(1)
def setCurrentProgress(self, value):
self._setNumber(1, value)
def getTotalCount(self):
return self._getNumber(2)
def setTotalCount(self, value):
self._setNumber(2, value)
def getTicketCount(self):
return self._getNumber(3)
def setTicketCount(self, value):
self._setNumber(3, value)
def _initialize(self):
super(ModeSelectorWtWidgetModel, self)._initialize()
self._addNumberProperty('currentProgress', 0)
self._addNumberProperty('totalCount', 0)
self._addNumberProperty('ticketCount', 0)
| [
"[email protected]"
] | |
f4021e8727f0afecf7a0bdc8479df954272d1dde | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/131/usersdata/172/37508/submittedfiles/al10.py | 8b72b71b334d840e9babcac59072316f61d2bb92 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | # -*- coding: utf-8 -*-
#NÃO APAGUE A LINHA ACIMA. COMECE ABAIXO DESTA LINHA
n=int(input('digite um valor'))
i=2
d=3
while 0<n
soma=4*((i/d)*((i+2)/d))
i=i+2
d=d+2
print('%.5d'%soma) | [
"[email protected]"
] | |
40960e8b4d96b9073c43ba86fde89699ce912374 | 993c6595f2d7cb2c4efae2c5264fb09008b9c7d4 | /pychallenge/pychallenge/users/migrations/0003_auto_20190123_1757.py | b1783f138089146c10d8deef5cf7e3a8b036e0c6 | [] | no_license | triump0870/pychallenge | 985af46268a0a83cb3c8a891d3ff0faf01570ef5 | c6c117b41bf981efc0acce814a5b17eec49903c6 | refs/heads/master | 2022-12-15T11:58:39.045942 | 2019-01-23T19:17:33 | 2019-01-23T19:17:33 | 167,192,119 | 1 | 0 | null | 2022-12-08T01:36:26 | 2019-01-23T13:57:47 | CSS | UTF-8 | Python | false | false | 1,032 | py | # Generated by Django 2.1.5 on 2019-01-23 17:57
from django.db import migrations, models
import django.utils.timezone
import markdownx.models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20190123_1746'),
]
operations = [
migrations.AddField(
model_name='about',
name='edited',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='about',
name='status',
field=models.CharField(choices=[('D', 'Draft'), ('P', 'Published')], default='D', max_length=1),
),
migrations.AddField(
model_name='about',
name='timestamp',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AlterField(
model_name='about',
name='content',
field=markdownx.models.MarkdownxField(),
),
]
| [
"[email protected]"
] | |
3068573ee1705acd83f26145b16387a3fb624f9f | 0bde5f7f09aa537ed1f4828d4e5ebee66475918f | /h2o-py/tests/testdir_apis/Data_Manipulation/pyunit_h2oH2OFrame_table.py | 64286297b4600561a077e8c45d8e6733baddfdb9 | [
"Apache-2.0"
] | permissive | Winfredemalx54/h2o-3 | d69f1c07e1f5d2540cb0ce5e6073415fa0780d32 | dfb163c82ff3bfa6f88cdf02465a9bb4c8189cb7 | refs/heads/master | 2022-12-14T08:59:04.109986 | 2020-09-23T08:36:59 | 2020-09-23T08:36:59 | 297,947,978 | 2 | 0 | Apache-2.0 | 2020-09-23T11:28:54 | 2020-09-23T11:28:54 | null | UTF-8 | Python | false | false | 784 | py | from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
from tests import pyunit_utils
import h2o
from h2o.utils.typechecks import assert_is_type
from h2o.frame import H2OFrame
def h2o_H2OFrame_table():
"""
Python API test: h2o.frame.H2OFrame.table(data2=None, dense=True)
Copied from pyunit_table.py
"""
df = h2o.import_file(path=pyunit_utils.locate("smalldata/prostate/prostate_cat.csv"))
tableFrame = df[['DPROS','RACE']].table(data2=None, dense=True)
assert_is_type(tableFrame, H2OFrame)
assert tableFrame.sum(axis=0).sum(axis=1).flatten()==df.nrow, \
"h2o.H2OFrame.table() command is not working."
if __name__ == "__main__":
pyunit_utils.standalone_test(h2o_H2OFrame_table())
else:
h2o_H2OFrame_table()
| [
"[email protected]"
] | |
d3897c6338e630a1d5c705c0bc9eafc08f859249 | 0e383ccac5fdf21dc5059502b9aae26412fd6a88 | /sheaths.icmes/src/extract.py | d3de533033d78081270a6c72c86f666f948d5acf | [
"MIT"
] | permissive | jimsrc/seatos | 63c8ad99f2b5d4ae5f203cdc8f8e061948f257f4 | e775dba1a2a96ff44b837cf8d85101ccfef302b1 | refs/heads/master | 2021-01-02T08:38:51.349670 | 2017-09-01T01:59:35 | 2017-09-01T01:59:35 | 99,040,968 | 0 | 1 | null | 2017-09-01T01:59:36 | 2017-08-01T20:33:55 | Python | UTF-8 | Python | false | false | 5,572 | py | #!/usr/bin/env ipython
from pylab import *
from numpy import *
from scipy.io.netcdf import netcdf_file
from datetime import datetime, time, timedelta
#------------ shared libraries:
"""
--- antes de modificar cosas, tener en cuenta los bugs en:
'../../shared_lib/COMENTARIOS.txt'
"""
import sys
sys.path.append('../../shared_lib')
from shared_funcs import * #c_funcs import *
#------------------------------
#from read_NewTable import tshck, tini_icme, tend_icme, tini_mc, tend_mc, n_icmes, MCsig
from ShiftTimes import *
import numpy as np
from z_expansion_gulisano import z as z_exp
import console_colors as ccl
import read_NewTable as tb
class boundaries:
def __init__(self):
name = 'name'
HOME = os.environ['HOME']
PAO = os.environ['PAO']
gral = general()
day = 86400.
#---- cosas input
gral.fnames = fnames = {}
fnames['ACE'] = '%s/data_ace/64sec_mag-swepam/ace.1998-2014.nc' % HOME
fnames['McMurdo'] = '%s/actividad_solar/neutron_monitors/mcmurdo/mcmurdo_utc_correg.dat' % HOME
fnames['table_richardson'] = '%s/ASOC_ICME-FD/icmes_richardson/data/rich_events_ace.nc' % HOME
fnames['Auger'] = '%s/data_auger/estudios_AoP/data/unir_con_presion/data_final_2006-2013.h5' % PAO
#---- directorios de salida
gral.dirs = dirs = {}
dirs['dir_plots'] = '../plots'
dirs['dir_ascii'] = '../ascii'
dirs['suffix'] = '_auger.data_' # sufijo para el directorio donde guardare
# estas figuras
#-------------------------------------------------------------
#------- seleccionamos MCs con label-de-catalogo (lepping=2, etc)
MCwant = {'flags': ('0', '1', '2', '2H'),
'alias': '0.1.2.2H'} # para "flagear" el nombre/ruta de las figuras
#MCwant = {'flags': ('1', '2', '2H'),
# 'alias': '1.2.2H'} # para "flagear" el nombre/ruta de las figuras
#MCwant = {'flags': ('2', '2H'),
# 'alias': '2.2H'} # para "flagear" el nombre/ruta de las figuras
#MCwant = {'flags': ('2',),
# 'alias': '2'} # para "flagear" el nombre/ruta de las figuras
FILTER = {}
FILTER['Mcmultiple'] = False # True para incluir eventos multi-MC
FILTER['CorrShift'] = False
FILTER['wang'] = False #False #True
FILTER['vsw_filter'] = False
FILTER['z_filter_on'] = False
FILTER['MCwant'] = MCwant
FILTER['B_filter'] = False
FILTER['filter_dR.icme'] = False #True
FILTER['choose_1998-2006'] = False # False:no se restringe al periodo 1998-2006
CUTS = {}
CUTS['ThetaThres'] = 90.0 # all events with theta>ThetaThres
CUTS['dTday'] = 0.0
CUTS['v_lo'] = 550.0
CUTS['v_hi'] = 3000.0
CUTS['z_lo'] = -50.0
CUTS['z_hi'] = 0.65
nBin = {}
nBin['before'] = 2
nBin['after'] = 4
nBin['bins_per_utime'] = 50 # bins por unidad de tiempo
nBin['total'] = (1+nBin['before']+nBin['after'])*nBin['bins_per_utime']
fgap = 0.2
#--- bordes de estructura
bounds = boundaries()
bounds.tini = tb.tshck #tb.tini_mc #tb.tshck
bounds.tend = tb.tini_icme #tb.tend_mc #tb.tini_mc
#+++++++++++++++++++++++++++++++++++++++++++++++++++
gral.data_name = 'McMurdo' #'ACE'
FILTER['vsw_filter'] = False
emgr = events_mgr(gral, FILTER, CUTS, bounds, nBin, fgap, tb, z_exp)
#emgr.run_all()
#+++++++++++++++++++++++++++++++++++++++++++++++++++
emgr.data_name = 'Auger' #'McMurdo'
emgr.FILTER['vsw_filter'] = False
#emgr.run_all()
emgr.filter_events()
emgr.load_data_and_timeshift()
emgr.collect_data()
#+++++++++++++++++++++++++++++++++++++++++++++++++++
"""
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
gral.data_name = 'Auger' #'ACE'
FILTER['vsw_filter'] = False
emgr = events_mgr(gral, FILTER, CUTS, bounds, nBin, fgap, tb, z_exp)
#emgr.run_all()
#++++ limites
LOW, MID1, MID2, TOP = 100.0, 450.0, 550.0, 3000.0
emgr.FILTER['vsw_filter'] = True
emgr.CUTS['v_lo'], emgr.CUTS['v_hi'] = MID2, TOP #MID1, MID2 #LOW, MID1 #
#emgr.run_all()
emgr.filter_events()
emgr.load_data_and_timeshift()
emgr.collect_data()
"""
# save to file
#---- dest directory
dir_dst = '../ascii/MCflag%s' % FILTER['MCwant']['alias']
if FILTER['CorrShift']:
dir_dst += '/wShiftCorr/events_data'
else:
dir_dst += '/woShiftCorr/events_data'
if not(os.path.isdir(dir_dst)):
print "\n ### ERROR ### --> does NOT exist: " + dir_dst
raise SystemExit
#-------------------
events = emgr.out['events_data'].keys()
n_evnts = len(events)
evd = emgr.out['events_data']
"""
for id, i in zip(events, range(n_evnts)):
t = emgr.out['events_data'][id]['t_days']
ndata = len(t)
data_out = np.nan*np.ones((ndata, 3))
data_out[:,0] = t
B = emgr.out['events_data'][id]['B'] # B-data from 'id' event
rmsB = emgr.out['events_data'][id]['rmsB'] # data from 'id' event
data_out[:,1] = B
data_out[:,2] = rmsB
fname_out = '%s/event.data_vlo.%04d_vhi.%04d_id.%s.txt' % (dir_dst, emgr.CUTS['v_lo'], emgr.CUTS['v_hi'], id[3:])
np.savetxt(fname_out, data_out, fmt='%g')
# append a legend
f = open(fname_out, 'a') # append to file
dtsh = emgr.dt_sh[int(id[3:])] # [days] sheath duration
dtmc = emgr.dt_mc[int(id[3:])] # [days] MC duration
COMMS = '# dt_sheath [days]: %g' % dtsh
COMMS += '\n# dt_MC [days]: %g' % dtmc
f.write(COMMS)
f.close()
"""
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
##
| [
"[email protected]"
] | |
a375cf4c71b31dc7efc2a8c11c5da14977bbb778 | 31596558d227ca10911a776023aeef1a7eb0d2c9 | /Other groups/Kaleido/Tate no Yuusha no Nariagari [BD]/tate_06.py | c8cc3bbeb20d34468924be6849d7da32b9be1241 | [
"MIT"
] | permissive | Ichunjo/encode-scripts | 914a113093670f7d07e652ef50f6f04d09cc58b1 | 389a9f497e637eaade6f99acee816636856961d4 | refs/heads/master | 2022-05-03T00:48:54.543905 | 2022-03-17T17:43:02 | 2022-03-17T17:43:02 | 220,536,158 | 45 | 8 | null | 2019-11-08T21:25:59 | 2019-11-08T19:41:14 | Python | UTF-8 | Python | false | false | 10,086 | py | """Tate no Yuusha script"""
__author__ = 'Vardë'
import sys
from pathlib import Path
from typing import NamedTuple, List
from functools import partial
from pymkv import MKVFile, MKVTrack
from acsuite import eztrim
from vsutil import core, vs, depth, get_y
import vardefunc as vdf
import debandshit as dbs
import mvsfunc as mvf
import modfunc as mdf
import placebo
import lvsfunc as lvf
class InfosBD(NamedTuple):
path: str
src: str
src_clip: vs.VideoNode
frame_start: int
frame_end: int
src_cut: vs.VideoNode
a_src: str
a_src_cut: str
a_enc_cut: str
name: str
qpfile: str
output: str
chapter: str
output_final: str
class MaskCredit(NamedTuple):
mask: vs.VideoNode
start_frame: int
end_frame: int
def infos_bd(path, frame_start, frame_end) -> InfosBD:
src = path + '.m2ts'
src_clip = lvf.src(path + '.dgi')
src_cut = src_clip[frame_start:frame_end]
a_src = path + '.mka'
a_src_cut = path + '_cut_track_{}.wav'
a_enc_cut = path + '_track_{}.m4a'
name = sys.argv[0][:-3]
qpfile = name + '_qpfile.log'
output = name + '.264'
chapter = 'chapters/tate_' + name[-2:] + '.txt'
output_final = name + '.mkv'
return InfosBD(path, src, src_clip, frame_start, frame_end, src_cut, a_src, a_src_cut, a_enc_cut,
name, qpfile, output, chapter, output_final)
JPBD = infos_bd(r'[BDMV][190424][Tate no Yuusha no Nariagari][Vol.1]\TATE_1_2\BDMV\STREAM\00011', 0, -24)
JPBD_NCOP = infos_bd(r'[BDMV][190424][Tate no Yuusha no Nariagari][Vol.1]\TATE_1_2\BDMV\STREAM\00013', 24, -24)
USBD = infos_bd(r'[BDMV] The Rising of the Shield Hero S01 Part 1\[BDMV] Rising_Shield_Hero_S1P1_D1\BDMV\STREAM\00019', 24, -24)
X264 = r'C:\Encode Stuff\x264_tmod_Broadwell_r3000\mcf\x264_x64.exe'
X264_ARGS = dict(
qpfile=JPBD.qpfile, threads=18, ref=16, trellis=2, bframes=16, b_adapt=2,
direct='auto', deblock='-2:-2', me='umh', subme=10, psy_rd='0.95:0.00', merange=32,
keyint=360, min_keyint=23, rc_lookahead=60, crf=14, qcomp=0.7, aq_mode=3, aq_strength=1.0
)
def do_filter():
"""Vapoursynth filtering"""
def _sraa(clip: vs.VideoNode, nnargs: dict, eeargs: dict) -> vs.VideoNode:
def _nnedi3(clip):
return clip.nnedi3.nnedi3(0, False, **nnargs)
def _eedi3(clip, sclip):
return clip.eedi3m.EEDI3(0, False, **eeargs, sclip=sclip)
clip = _eedi3(clip, _nnedi3(clip)).std.Transpose()
clip = _eedi3(clip, _nnedi3(clip)).std.Transpose()
return clip
def _nnedi3(clip: vs.VideoNode, factor: float, args: dict) -> vs.VideoNode:
upscale = clip.std.Transpose().nnedi3.nnedi3(0, True, **args) \
.std.Transpose().nnedi3.nnedi3(0, True, **args)
return core.resize.Spline36(
upscale, clip.width * factor, clip.height * factor,
src_top=.5, src_left=.5)
def _line_mask(clip: vs.VideoNode, thr: int) -> vs.VideoNode:
mask = core.std.Prewitt(clip)
mask = core.std.Expr(mask, 'x 2 *').std.Median()
mask = core.std.Expr(mask, f'x {thr} < x x 3 * ?')
return mask.std.Inflate().std.Deflate()
def _ssharp(clip: vs.VideoNode, strength: float, width: int, height: int,
factor: float = 2, b: float = -1, c: float = 6) -> vs.VideoNode:
source = clip
sharp = core.resize.Bicubic(clip, clip.width*factor, clip.height*factor, \
filter_param_a=b, filter_param_b=c).resize.Lanczos(width, height)
source = core.resize.Spline64(source, sharp.width, sharp.height)
sharp = core.rgvs.Repair(sharp, source, 13)
sharp = mvf.LimitFilter(source, sharp, thrc=0.5, elast=6, brighten_thr=0.5, planes=0)
final = core.std.Expr([sharp, source], f'x {strength} * y 1 {strength} - * +')
return final
def to_gray(clip: vs.VideoNode, ref: vs.VideoNode) -> vs.VideoNode:
clip = core.std.AssumeFPS(clip, ref)
return core.resize.Point(clip, format=vs.GRAY16, matrix_s=mvf.GetMatrix(ref))
def _perform_masks_credit(path: Path) -> List[MaskCredit]:
return [MaskCredit(lvf.src(str(mask)), int(str(mask.stem).split('_')[2]),
int(str(mask.stem).split('_')[3]))
for mask in path.glob('*')]
def _w2x(clip: vs.VideoNode) -> vs.VideoNode:
waifu2x = core.w2xc.Waifu2x(mvf.ToRGB(clip, depth=32), noise=2, scale=2) \
.resize.Bicubic(clip.width, clip.height)
return mvf.ToYUV(waifu2x, css='420', depth=16)
def _perform_filtering_ending(clip: vs.VideoNode, adapt_mask: vs.VideoNode) -> vs.VideoNode:
luma = get_y(clip)
denoise_a = mvf.BM3D(luma, 2.25, 1)
denoise_b = mvf.BM3D(luma, 1.25, 1)
denoise = core.std.MaskedMerge(denoise_a, denoise_b, adapt_mask)
grain = core.grain.Add(denoise, 0.3, constant=True)
return core.std.MaskedMerge(denoise, grain, adapt_mask)
# pylint: disable=unused-argument
def _diff(n: int, f: vs.VideoFrame, new: vs.VideoNode, adapt: vs.VideoNode) -> vs.VideoNode:
psa = f.props['PlaneStatsAverage']
if psa > 0.5:
clip = new
elif psa < 0.4:
clip = adapt
else:
weight = (psa - 0.4) * 10
clip = core.std.Merge(adapt, new, weight)
return clip
opstart, opend = 1200, 3357
edstart, edend = 31770, 33927
src = JPBD.src_cut
src = depth(src, 16)
src = core.std.FreezeFrames(src, opstart+2132, opend, opstart+2132)
denoise = mdf.hybriddenoise_mod(src, 0.55, 2.25)
diff = core.std.MakeDiff(src, denoise, [0, 1, 2])
luma = get_y(denoise)
upscale = _nnedi3(luma, 1.5, dict(nsize=0, nns=3, qual=1, pscrn=1))
sraa = _sraa(upscale, dict(nsize=0, nns=3, qual=1, pscrn=1),
dict(alpha=0.2, beta=0.5, gamma=80, nrad=3, mdis=18))
sraa = core.rgvs.Repair(sraa, upscale, 3)
rescale = _ssharp(sraa, 0.55, src.width, src.height, 2)
artefacts_mask = core.std.BlankClip(rescale, color=(256 << 8) - 1)
artefacts_mask = vdf.region_mask(artefacts_mask, 2, 2, 2, 2).std.Inflate()
rescale = core.std.MaskedMerge(luma, rescale, artefacts_mask)
lineart_mask = _line_mask(luma, 8000)
antialias = core.std.MaskedMerge(luma, rescale, lineart_mask)
antialias_merged = vdf.merge_chroma(antialias, denoise)
src_c, src_ncop = [c.knlm.KNLMeansCL(a=6, h=20, d=0, device_type='gpu')
for c in [src, JPBD_NCOP.src_cut[:opend-opstart+1]]]
credit_mask = vdf.dcm(src, src_c[opstart:opend+1], src_ncop, opstart, opend, 2, 2).std.Deflate()
credit = core.std.MaskedMerge(antialias_merged, denoise, credit_mask)
masks_credit_ = _perform_masks_credit(Path('masks_' + JPBD.name[-2:] + '/'))
for mask in masks_credit_:
credit = lvf.rfs(credit, core.std.MaskedMerge(credit, denoise, to_gray(mask.mask, src).std.Deflate()),
[(mask.start_frame, mask.end_frame)])
deband_mask = lvf.denoise.detail_mask(credit, brz_a=3000, brz_b=1500)
deband_a = dbs.f3kpf(credit, 18, 36, 36)
deband_b = dbs.f3kpf(credit, 18, 42, 42)
deband_c = placebo.Deband(credit, radius=16, threshold=4, iterations=1, grain=0)
deband_d = placebo.Deband(deband_b, radius=20, threshold=5, iterations=1, grain=0)
deband = lvf.rfs(deband_a, deband_b, [(opstart, opstart+146)])
deband = lvf.rfs(deband, deband_c, [(opstart+1225, opstart+1238)])
deband = lvf.rfs(deband, deband_d, [(opstart+970, opstart+984)])
deband = core.std.MaskedMerge(deband, credit, deband_mask)
grain_original = core.std.MergeDiff(deband, diff, [0, 1, 2])
grain_new = core.neo_f3kdb.Deband(deband, preset='depth', grainy=32, grainc=32)
avg = core.std.PlaneStats(deband)
adapt_mask = core.adg.Mask(get_y(avg), 28)
grain_adapt = core.std.MaskedMerge(grain_new, grain_original, adapt_mask)
grain = core.std.FrameEval(deband, partial(_diff, new=grain_new, adapt=grain_adapt), avg)
grain = lvf.rfs(grain, grain_new, [(opstart+147, opstart+496), (opstart+575, opstart+644),
(opstart+702, opstart+969), (opstart+1076, opstart+1117),
(opstart+1428, opstart+1461), (opstart+1859, opstart+2035)])
w2x = _w2x(denoise).grain.Add(1, 0.5, constant=True)
w2x = lvf.rfs(grain, w2x, [(opstart+1211, opstart+1224)])
ending = _perform_filtering_ending(src, adapt_mask)
ending = vdf.merge_chroma(ending, denoise)
final = lvf.rfs(w2x, ending, [(edstart, edend)])
return depth(final, 10)
def do_encode(filtered):
"""Compression with x264"""
print('Qpfile generating')
vdf.gk(JPBD.src_cut, JPBD.qpfile)
print('\n\n\nVideo encoding')
vdf.encode(filtered, X264, JPBD.output, **X264_ARGS)
print('\n\n\nAudio extraction')
track_01 = USBD.a_src + '_eng.w64'
track_02 = USBD.a_src + '_jpn.w64'
eac3to_args = ['eac3to', USBD.src, '3:', track_01, '4:', track_02, '-log=NUL']
vdf.subprocess.run(eac3to_args, text=True, check=True, encoding='utf-8')
mka = MKVFile()
mka.add_track(MKVTrack(track_01, 0))
mka.add_track(MKVTrack(track_02, 0))
mka.mux(USBD.a_src)
print('\n\n\nAudio cutting')
eztrim(USBD.src_clip, (USBD.frame_start, USBD.frame_end), USBD.a_src, mkvextract_path='mkvextract')
print('\n\n\nAudio encoding')
for i in range(1, len(mka.tracks) + 1):
qaac_args = ['qaac64', USBD.a_src_cut.format(i), '-V', '127', '--no-delay', '-o', USBD.a_enc_cut.format(i)]
vdf.subprocess.run(qaac_args, text=True, check=True, encoding='utf-8')
print('\nFinal muxing')
mkv = MKVFile()
mkv.add_track(MKVTrack(JPBD.output, language='jpn', default_track=True))
mkv.add_track(MKVTrack(USBD.a_enc_cut.format(2), language='jpn', default_track=True))
mkv.add_track(MKVTrack(USBD.a_enc_cut.format(1), language='eng', default_track=False))
mkv.chapters(JPBD.chapter, 'jpn')
mkv.mux(JPBD.output_final)
if __name__ == '__main__':
FILTERED = do_filter()
do_encode(FILTERED)
| [
"[email protected]"
] | |
df49c2e8770f6ff76b848c7878d8f60d0083ec8f | 84566b23a26a3eeadc3d99e6ada39296759da3e6 | /ptero_auth/implementation/models/scopes.py | 3ca8e29c1c981954ec1c7c2c872f8a68d35e51f6 | [] | no_license | iferguson90/ptero-auth | 2cfd28c11add633c78ef768fede2ff04e2fe064b | 97047466387df71a8cb8ae29d955f6471540ebfe | refs/heads/master | 2021-01-22T14:39:45.335063 | 2014-08-29T20:56:25 | 2014-08-29T20:56:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,352 | py | from .base import Base
from sqlalchemy import Column, ForeignKey, Integer, Text
from sqlalchemy import Table, PrimaryKeyConstraint
from sqlalchemy.orm import relationship
__all__ = ['Scope']
class Scope(Base):
__tablename__ = 'scope'
scope_pk = Column(Integer, primary_key=True)
value = Column(Text, index=True, unique=True, nullable=False)
allowed_scope_table = Table('allowed_scope_bridge', Base.metadata,
Column('client_pk', Integer, ForeignKey('client.client_pk')),
Column('scope_pk', Integer, ForeignKey('scope.scope_pk')),
PrimaryKeyConstraint('client_pk', 'scope_pk')
)
default_scope_table = Table('default_scope_bridge', Base.metadata,
Column('client_pk', Integer, ForeignKey('client.client_pk')),
Column('scope_pk', Integer, ForeignKey('scope.scope_pk')),
PrimaryKeyConstraint('client_pk', 'scope_pk')
)
scope_audience_table = Table('scope_audience_bridge', Base.metadata,
Column('client_pk', Integer, ForeignKey('client.client_pk')),
Column('scope_pk', Integer, ForeignKey('scope.scope_pk')),
PrimaryKeyConstraint('client_pk', 'scope_pk')
)
grant_scope_table = Table('grant_scope_bridge', Base.metadata,
Column('grant_pk', Integer, ForeignKey('grant.grant_pk')),
Column('scope_pk', Integer, ForeignKey('scope.scope_pk')),
PrimaryKeyConstraint('grant_pk', 'scope_pk')
)
| [
"[email protected]"
] | |
b3ba9c0c8676bbdaab744f0fb4b34f6b0a7ce4d7 | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_10_01/aio/operations/_usages_operations.py | 18d28a6f9d9f1909b20d7b52d7da854475fcec24 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 5,201 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class UsagesOperations:
"""UsagesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location: str,
**kwargs
) -> AsyncIterable["_models.UsagesListResult"]:
"""List network usages for a subscription.
:param location: The location where resource usage is queried.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either UsagesListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_10_01.models.UsagesListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.UsagesListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str', pattern=r'^[-\w\._ ]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('UsagesListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/usages'} # type: ignore
| [
"[email protected]"
] | |
7adedccffebabc3cee9a05501b6ab85fe7b4b3e1 | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/FXJSMM/YW_FXJSMM_SZSJ_292.py | 06e684a9bd632dff4ab6408ce3316867e466936b | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,070 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from mainService import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_FXJSMM_SZSJ_292(xtp_test_case):
# YW_FXJSMM_SZSJ_292
def test_YW_FXJSMM_SZSJ_292(self):
title = '深圳A股股票交易日本方最优卖——错误的数量(数量<0)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '废单',
'errorID': 10210301,
'errorMSG': queryOrderErrorMsg(10210301),
'是否生成报单': '否',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('999999', '2', '0', '0', '0', 'S', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':2,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_FORWARD_BEST'],
'price': stkparm['涨停价'],
'quantity': -100,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
4c6342b8a903410e1a5d3185e85c44b88365c921 | b1e52f926116286c138890ca0d86bf74433e8ee4 | /lib/SpriteLoader.py | a1063b8adea2fc0d10c23c115df455528f5701c7 | [] | no_license | topherCantrell/pixel-sign | 22f35b84bbaaf98fb143229f2df6afe0911e1bb0 | b8f1c1723f81259fc3dc3e91b275aea0215802df | refs/heads/master | 2021-07-11T16:51:31.799934 | 2020-07-22T17:36:14 | 2020-07-22T17:36:14 | 160,055,059 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,822 | py |
class SpriteLoader:
def __init__(self):
self.sprites = {}
with open("Sprites.txt") as f:
lines = f.readlines()
currentName = None
currentSprite = []
for line in lines:
line = line.strip()
if len(line)==0:
continue
if line[0]=='-':
if currentName != None:
self.sprites[currentName] = currentSprite
currentName = line[1:]
currentSprite = []
continue
currentSprite.append(line)
if currentName != None:
self.sprites[currentName] = currentSprite
def colorSprite(self,name,colorMap):
sprite = self.sprites[name]
ret = [[[] for _ in range(len(sprite[0]))] for _ in range(len(sprite))]
for y in range(0,len(sprite)):
s = sprite[y]
for x in range(len(s)):
c = s[x]
v = 0
if c=='.' or c==' ':
v = 0
else:
for z in range(0,len(colorMap),2):
if colorMap[z]==c:
v = colorMap[z+1]
break
ret[y][x] = v
return ret
def doubler(self,colorSprite):
ret =[]
for y in range(0,len(colorSprite)):
drow = []
for x in range(0,len(colorSprite[y])):
drow.append(colorSprite[y][x])
drow.append(colorSprite[y][x])
ret.append(drow)
ret.append(drow)
return ret
def flipLeftRight(self,sprite):
'''
int [][] ret = new int[colorSprite.length][colorSprite[0].length];
for(int y=0;y<colorSprite.length;++y) {
for(int x=0;x<colorSprite[y].length;++x) {
ret[y][colorSprite[y].length-x-1] = colorSprite[y][x];
}
}
return ret;
'''
ret = [[[] for _ in range(len(sprite[0]))] for _ in range(len(sprite))]
for y in range(0,len(sprite)):
for x in range(0,len(sprite[y])):
ret[y][len(sprite[y])-x-1] = sprite[y][x]
return ret
def flipUpDown(self,colorSprite):
'''
int [][] ret = new int[colorSprite.length][];
int i = ret.length-1;
for(int x=0;x<colorSprite.length;++x) {
ret[i] = colorSprite[x];
--i;
}
return ret;
'''
if __name__ == '__main__':
sp = SpriteLoader() | [
"[email protected]"
] | |
12ea1bd995b23ab1c185e7562c2fbb73ddf63694 | f8f8651ab604acc4937f8725caadaca1fb97a5e8 | /src/pytorch_lightning/plugins/precision/native_amp.py | 4df1b166ca8dd31d4fbea638d32dc81110659018 | [
"Apache-2.0"
] | permissive | neptune-ai/pytorch-lightning | ac59e746a486e07e21abae426b28e5d72812ac98 | 702014418e2ec0437e67d8bf97809edef686a02c | refs/heads/master | 2022-09-28T09:34:07.653729 | 2022-09-12T11:13:48 | 2022-09-12T11:13:48 | 229,063,811 | 1 | 1 | Apache-2.0 | 2022-09-26T03:29:49 | 2019-12-19T13:48:16 | Python | UTF-8 | Python | false | false | 5,106 | py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
from typing import Any, Callable, Dict, Generator, Optional, Union
import torch
from torch import Tensor
from torch.nn import Module
from torch.optim import LBFGS, Optimizer
import pytorch_lightning as pl
from pytorch_lightning.plugins.precision.mixed import MixedPrecisionPlugin
from pytorch_lightning.utilities import _TORCH_GREATER_EQUAL_1_10, AMPType
from pytorch_lightning.utilities.exceptions import MisconfigurationException
if _TORCH_GREATER_EQUAL_1_10:
from torch import autocast as new_autocast
else:
from torch.cuda.amp import autocast as old_autocast
class NativeMixedPrecisionPlugin(MixedPrecisionPlugin):
"""Plugin for Native Mixed Precision (AMP) training with ``torch.autocast``.
Args:
precision: Whether to use ``torch.float16`` (``16``) or ``torch.bfloat16`` (``'bf16'``).
device: The device for ``torch.autocast``.
scaler: An optional :class:`torch.cuda.amp.GradScaler` to use.
"""
backend = AMPType.NATIVE
def __init__(
self, precision: Union[str, int], device: str, scaler: Optional[torch.cuda.amp.GradScaler] = None
) -> None:
super().__init__()
if precision == "bf16" and not _TORCH_GREATER_EQUAL_1_10:
raise MisconfigurationException(
"To use bfloat16 with native amp you must install torch greater or equal to 1.10."
)
if scaler is None and precision == 16:
scaler = torch.cuda.amp.GradScaler()
if scaler is not None and precision == "bf16":
raise MisconfigurationException(f"`precision='bf16'` does not use a scaler, found {scaler}.")
self.precision = precision
self.device = device
self.scaler = scaler
def pre_backward(self, model: "pl.LightningModule", closure_loss: Tensor) -> Tensor:
if self.scaler is not None:
closure_loss = self.scaler.scale(closure_loss)
return super().pre_backward(model, closure_loss)
def _run_backward(self, tensor: Tensor, model: Optional[Module], *args: Any, **kwargs: Any) -> None:
if self.scaler is not None:
tensor = self.scaler.scale(tensor)
super()._run_backward(tensor, model, *args, **kwargs)
def optimizer_step(
self,
model: Optional[Union["pl.LightningModule", Module]],
optimizer: Optimizer,
optimizer_idx: int,
closure: Callable[[], Any],
**kwargs: Any,
) -> Any:
if self.scaler is None:
# skip scaler logic, as bfloat16 does not require scaler
return super().optimizer_step(model, optimizer, optimizer_idx, closure, **kwargs)
if isinstance(optimizer, LBFGS):
raise MisconfigurationException(
f"Native AMP and the LBFGS optimizer are not compatible (optimizer {optimizer_idx})."
)
closure_result = closure()
# `unscale` after the closure is executed but before the `on_before_optimizer_step` hook.
self.scaler.unscale_(optimizer)
self._after_closure(model, optimizer, optimizer_idx)
skipped_backward = closure_result is None
# in manual optimization, the closure does not return a value
if not isinstance(model, pl.LightningModule) or not model.automatic_optimization or not skipped_backward:
# note: the scaler will skip the `optimizer.step` if nonfinite gradients are found
step_output = self.scaler.step(optimizer, **kwargs)
self.scaler.update()
return step_output
return closure_result
def autocast_context_manager(self) -> Union["old_autocast", "new_autocast"]:
if _TORCH_GREATER_EQUAL_1_10:
# the dtype could be automatically inferred but we need to manually set it due to a bug upstream
# https://github.com/pytorch/pytorch/issues/67233
return new_autocast(self.device, dtype=torch.bfloat16 if self.precision == "bf16" else torch.half)
return old_autocast()
@contextmanager
def forward_context(self) -> Generator[None, None, None]:
"""Enable autocast context."""
with self.autocast_context_manager():
yield
def state_dict(self) -> Dict[str, Any]:
if self.scaler is not None:
return self.scaler.state_dict()
return {}
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
if self.scaler is not None:
self.scaler.load_state_dict(state_dict)
| [
"[email protected]"
] | |
a7ecf43316deac0104330b55d0d997358e4b3b58 | 19bb4caf8a06868498c5b7c35c46e5e1da188548 | /simpleorm.py | 2349a3ba2db1ac9933b2255e4e709ce279b1f82a | [] | no_license | MUIC-CS/summer2017-week2-tabkeeper | 3585924534972d354c04007845e18a9b6868a48f | 68b1b989f3d8a8100b599b9a8cdb5251639aa14d | refs/heads/master | 2021-01-15T12:37:37.513496 | 2017-08-09T19:12:16 | 2017-08-09T19:12:16 | 99,653,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,478 | py | from db import get_cursor
class SimpleBean:
columns = ()
def __init__(self, *args, **kwds):
# TODO: make it set
tmp = {k: v for k, v in zip(self.columns, args)}
tmp.update(kwds)
self.__dict__ = self.filter_params(tmp)
@classmethod
def filter_params(cls, d):
ret = {}
for col in cls.columns:
if(col not in d):
ret[col] = None
else:
ret[col] = d[col]
return ret
@classmethod
def from_dict(cls, d):
dd = {k: v for k, v in d.items() if k in cls.columns}
return cls(**cls.filter_params(d))
@classmethod
def id_col(cls):
return cls.columns[0]
def tuple_values(self, with_id=False):
start = 0 if with_id else 1
return tuple(self.__dict__[col] for col in self.columns[start:])
def __repr__(self):
values = ['{c}={v}'.format(c=col, v=self.__dict__[col])
for col in self.columns]
vals = ', '.join(values)
classname = self.__class__.__name__
return '<{classname} {vals}>'.format(classname=classname, vals=vals)
class SimpleRepo:
table_name = ''
bean_class = None
create_query = ''
@classmethod
def create_table(cls, drop=False):
if drop:
cls.drop_table()
with get_cursor() as cur:
cur.execute(cls.create_query.format(table_name=cls.table_name))
cur.connection.commit()
@classmethod
def find_by_col(cls, col, value):
with get_cursor() as cur:
cur.execute(
"""
SELECT * from {table_name} where {col}=%s
""".format(table_name=cls.table_name, col=col),
(value,)
)
rs = cur.fetchone()
return bean_class.from_dict(rs)
@classmethod
def find_all(cls):
with get_cursor() as cur:
cur.execute(
"""
SELECT * from {table_name}
""".format(table_name=cls.table_name)
)
return [cls.bean_class.from_dict(d) for d in cur.fetchall()]
@classmethod
def find_by_id(cls, value):
return cls.find_by_col(bean_class.id_col(), value)
@classmethod
def delete_by_id(cls, id):
with get_cursor() as cur:
cur.execute(
"""
DELETE FROM {table_name} where {id_col}=%s
""".format(
table_name=cls.table_name,
id_col=bean_class.id_col()),
(id,)
)
cur.connection.commit()
@classmethod
def add(cls, obj):
col_tuple = ', '.join(cls.bean_class.columns[1:])
ph = ', '.join(['%s'] * (len(cls.bean_class.columns) - 1))
id_col = cls.bean_class.id_col()
print obj.tuple_values()
with get_cursor() as cur:
cur.execute(
"""
INSERT INTO {table_name}({col_tuple})
VALUES ({ph}) RETURNING {id_col}
""".format(table_name=cls.table_name,
col_tuple=col_tuple,
ph=ph,
id_col=id_col),
obj.tuple_values()
)
id = cur.fetchone()[id_col]
cur.connection.commit()
obj.id = id
return obj
@classmethod
def add_all(cls, objs):
return [cls.add(obj) for obj in objs]
@classmethod
def drop_table(cls):
with get_cursor() as cur:
cur.execute(
"""
DROP TABLE IF EXISTS {table_name}
""".format(table_name=cls.table_name)
)
cur.connection.commit()
@classmethod
def delete_table(cls):
with get_cursor() as cur:
cur.execute(
"""
DELETE FROM {table_name}
""".format(table_name=cls.table_name)
)
cur.connection.commit()
@classmethod
def fetch_by_condition(cls, cond, args):
with get_cursor() as cur:
cur.execute(
"""SELECT *
FROM {table_name}
WHERE {cond}
""".format(cond=cond, table_name=cls.table_name),
args
)
rs = cur.fetchall()
return [cls.bean_class.from_dict(row) for row in rs]
| [
"[email protected]"
] | |
970ed11ee3587e2517db5c3285c8aa9aff4724f4 | f36d2e601b9aa0498c68951e8a081b6ce2036116 | /modules/ip_commands.py | dfea39dfb7b8e1745dfedf9f473a43782d70fc07 | [] | no_license | jaconsta/rpi_notify | 16de79acfb916646cb0ebd4cb8bbb3a7def87c31 | e0aac512dc96eb66fb61ac13560a59e4b1929b89 | refs/heads/master | 2021-01-11T20:48:57.761601 | 2017-01-17T05:05:21 | 2017-01-17T05:05:21 | 79,190,345 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 904 | py | """
Obtain current local IP address.
"""
import platform
import socket
import subprocess
def run_command(cmd):
"""
Execute this OS command and return the formated response.
"""
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
return err if err else out.decode('UTF-8')
def get_ip():
"""
Get the local IP.
"""
if platform.system() == 'Linux':
ip = run_command(['ip', 'route'])
elif platform.system() == 'Windows':
ip = run_command(['ipconfig'])
else:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 0))
ip = s.getsockname()[0]
except:
ip = '127.0.0.1'
finally:
s.close()
return ip
whoami = socket.gethostname()
| [
"[email protected]"
] | |
5223dc88f870d415144c5bdd6c0ed2fe056f2db6 | b9efab6b369cd8e9fec0cbd1e73c45fe1be354c7 | /backend/api/migrations/0187_auto_20190923_1556.py | a9371906a6795185204e1f6f83756c4582ab97d1 | [
"Apache-2.0"
] | permissive | WadeBarnes/tfrs | 646e0aed2bf48d5100f331edbb2d80418adee4b6 | d62f58019dfa0164ce788912099862e35098fbb1 | refs/heads/master | 2021-02-10T21:37:20.021758 | 2020-02-25T17:01:56 | 2020-02-25T17:01:55 | 244,421,600 | 0 | 0 | Apache-2.0 | 2020-03-02T16:40:09 | 2020-03-02T16:40:08 | null | UTF-8 | Python | false | false | 6,643 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2019-09-23 15:56
from __future__ import unicode_literals
import api.notifications.notification_types
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0186_compliancereport_nickname'),
]
operations = [
migrations.AddField(
model_name='notificationmessage',
name='related_report',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='api.ComplianceReport'),
),
migrations.AlterField(
model_name='notificationsubscription',
name='notification_type',
field=models.CharField(choices=[(api.notifications.notification_types.NotificationType('Credit Transfer Proposal Created'), 'Credit Transfer Proposal Created'), (api.notifications.notification_types.NotificationType('Credit Transfer Proposal Signed 1/2'), 'Credit Transfer Proposal Signed 1/2'), (api.notifications.notification_types.NotificationType('Credit Transfer Proposal Signed 2/2'), 'Credit Transfer Proposal Signed 2/2'), (api.notifications.notification_types.NotificationType('Credit Transfer Proposal Refused'), 'Credit Transfer Proposal Refused'), (api.notifications.notification_types.NotificationType('Credit Transfer Proposal Accepted'), 'Credit Transfer Proposal Accepted'), (api.notifications.notification_types.NotificationType('Credit Transfer Proposal Recommended For Approval'), 'Credit Transfer Proposal Recommended For Approval'), (api.notifications.notification_types.NotificationType('Credit Transfer Proposal Recommended For Declination'), 'Credit Transfer Proposal Recommended For Declination'), (api.notifications.notification_types.NotificationType('Credit Transfer Proposal Declined'), 'Credit Transfer Proposal Declined'), (api.notifications.notification_types.NotificationType('Credit Transfer Proposal Approved'), 'Credit Transfer Proposal Approved'), (api.notifications.notification_types.NotificationType('Credit Transfer Proposal Rescinded'), 'Credit Transfer Proposal Rescinded'), (api.notifications.notification_types.NotificationType('Credit Transfer Proposal Comment Created Or Updated'), 'Credit Transfer Proposal Comment Created Or Updated'), (api.notifications.notification_types.NotificationType('Credit Transfer Proposal Internal Comment Created Or Updated'), 'Credit Transfer Proposal Internal Comment Created Or Updated'), (api.notifications.notification_types.NotificationType('PVR Created'), 'PVR Created'), (api.notifications.notification_types.NotificationType('PVR Recommended For Approval'), 'PVR Recommended For Approval'), (api.notifications.notification_types.NotificationType('PVR Rescinded'), 'PVR Rescinded'), (api.notifications.notification_types.NotificationType('PVR Pulled Back'), 'PVR Pulled Back'), (api.notifications.notification_types.NotificationType('PVR Declined'), 'PVR Declined'), (api.notifications.notification_types.NotificationType('PVR Approved'), 'PVR Approved'), (api.notifications.notification_types.NotificationType('PVR Comment Created Or Updated'), 'PVR Comment Created Or Updated'), (api.notifications.notification_types.NotificationType('PVR Internal Comment Created Or Updated'), 'PVR Internal Comment Created Or Updated'), (api.notifications.notification_types.NotificationType('PVR Returned to Analyst'), 'PVR Returned to Analyst'), (api.notifications.notification_types.NotificationType('Document Pending Submission'), 'Document Pending Submission'), (api.notifications.notification_types.NotificationType('Document Submitted'), 'Document Submitted'), (api.notifications.notification_types.NotificationType('Document Security Scan Failed'), 'Document Security Scan Failed'), (api.notifications.notification_types.NotificationType('Document Received'), 'Document Received'), (api.notifications.notification_types.NotificationType('Document Archived'), 'Document Archived'), (api.notifications.notification_types.NotificationType('Compliance Report Draft Saved'), 'Compliance Report Draft Saved'), (api.notifications.notification_types.NotificationType('Compliance Report Submitted'), 'Compliance Report Submitted'), (api.notifications.notification_types.NotificationType('Compliance Report Recommended for Acceptance - Analyst'), 'Compliance Report Recommended for Acceptance - Analyst'), (api.notifications.notification_types.NotificationType('Compliance Report Recommended for Rejection - Analyst'), 'Compliance Report Recommended for Rejection - Analyst'), (api.notifications.notification_types.NotificationType('Compliance Report Recommended for Acceptance - Manager'), 'Compliance Report Recommended for Acceptance - Manager'), (api.notifications.notification_types.NotificationType('Compliance Report Recommended for Rejection - Manager'), 'Compliance Report Recommended for Rejection - Manager'), (api.notifications.notification_types.NotificationType('Compliance Report Accepted'), 'Compliance Report Accepted'), (api.notifications.notification_types.NotificationType('Compliance Report Rejected'), 'Compliance Report Rejected'), (api.notifications.notification_types.NotificationType('Compliance Report Requested Supplemental'), 'Compliance Report Requested Supplemental'), (api.notifications.notification_types.NotificationType('Exclusion Report Draft Saved'), 'Exclusion Report Draft Saved'), (api.notifications.notification_types.NotificationType('Exclusion Report Submitted'), 'Exclusion Report Submitted'), (api.notifications.notification_types.NotificationType('Exclusion Report Recommended for Acceptance - Analyst'), 'Exclusion Report Recommended for Acceptance - Analyst'), (api.notifications.notification_types.NotificationType('Exclusion Report Recommended for Rejection - Analyst'), 'Exclusion Report Recommended for Rejection - Analyst'), (api.notifications.notification_types.NotificationType('Exclusion Report Recommended for Acceptance - Manager'), 'Exclusion Report Recommended for Acceptance - Manager'), (api.notifications.notification_types.NotificationType('Exclusion Report Recommended for Rejection - Manager'), 'Exclusion Report Recommended for Rejection - Manager'), (api.notifications.notification_types.NotificationType('Exclusion Report Accepted'), 'Exclusion Report Accepted'), (api.notifications.notification_types.NotificationType('Exclusion Report Rejected'), 'Exclusion Report Rejected'), (api.notifications.notification_types.NotificationType('Exclusion Report Requested Supplemental'), 'Exclusion Report Requested Supplemental')], max_length=128),
),
]
| [
"[email protected]"
] | |
7a49fe717f07a258ac1b369ccb182ffb5f4c7d15 | 2ff7e53d5e512cd762217ca54317982e07a2bb0c | /eve/client/script/parklife/dungeonHelper.py | b24b4cf64431365e64c28ad45bd6745612c62980 | [] | no_license | nanxijw/Clara-Pretty-One-Dick | 66d3d69426642b79e8fd4cc8e0bec23adeeca6d6 | 50de3488a2140343c364efc2615cf6e67f152be0 | refs/heads/master | 2021-01-19T09:25:07.555284 | 2015-02-17T21:49:33 | 2015-02-17T21:49:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,371 | py | #Embedded file name: eve/client/script/parklife\dungeonHelper.py
"""
The file provides helper functions to perform dungeon editing operations.
Each function here should be able to handle the case of performing the operation through
Jessica if the client is launched with it, and default to remote server calls otherwise.
"""
import blue
import dungeonEditorTools
import util
import math
from eve.common.script.util.eveCommonUtils import ComputeRadiusFromQuantity, ComputeQuantityFromRadius
import trinity
import geo2
def BatchStart():
if '/jessica' in blue.pyos.GetArg():
sm.StartService('BSD').TransactionStart()
def BatchEnd():
if '/jessica' in blue.pyos.GetArg():
sm.StartService('BSD').TransactionEnd()
def IsObjectLocked(objectID):
if '/jessica' in blue.pyos.GetArg():
import dungeon
dunObject = dungeon.Object.Get(objectID, _getDeleted=True)
if dunObject is None:
return (True, [])
return dunObject.IsLocked()
return sm.RemoteSvc('dungeon').IsObjectLocked(objectID)
def SetObjectPosition(objectID, x = None, y = None, z = None):
scenario = sm.StartService('scenario')
targetBall, slimItem = scenario.GetBallAndSlimItemFromObjectID(objectID)
if slimItem is None:
raise RuntimeError('No slim item?')
dX = 0
if x is not None:
dX = x - slimItem.dunX
slimItem.dunX = x
dY = 0
if y is not None:
dY = y - slimItem.dunY
slimItem.dunY = y
dZ = 0
if z is not None:
dZ = z - slimItem.dunZ
slimItem.dunZ = z
targetModel = getattr(targetBall, 'model', None)
if targetModel:
targetModel.translationCurve.x += dX
targetModel.translationCurve.y += dY
targetModel.translationCurve.z += dZ
scenario.UpdateUnsavedObjectChanges(slimItem.itemID, dungeonEditorTools.CHANGE_TRANSLATION)
def SetObjectRotation(objectID, yaw = None, pitch = None, roll = None):
scenario = sm.StartService('scenario')
targetBall, slimItem = scenario.GetBallAndSlimItemFromObjectID(objectID)
if slimItem is None:
raise RuntimeError('No slim item?')
targetModel = getattr(targetBall, 'model', None)
if not targetModel:
return
try:
mYaw, mPitch, mRoll = geo2.QuaternionRotationGetYawPitchRoll(targetModel.rotationCurve.value)
except:
mYaw, mPitch, mRoll = targetBall.yaw, targetBall.pitch, targetBall.roll
if yaw is None:
yaw = mYaw
if pitch is None:
pitch = mPitch
if roll is None:
roll = mRoll
targetBall.typeData['dunRotation'] = (yaw, pitch, roll)
targetBall.SetStaticRotation()
scenario.UpdateUnsavedObjectChanges(slimItem.itemID, dungeonEditorTools.CHANGE_ROTATION)
def SetObjectRadius(objectID, radius):
scenario = sm.StartService('scenario')
targetBall, slimItem = scenario.GetBallAndSlimItemFromObjectID(objectID)
if slimItem is None:
raise RuntimeError('No slim item?')
if slimItem.categoryID == const.categoryAsteroid or slimItem.groupID in (const.groupHarvestableCloud, const.groupCloud):
godma = sm.GetService('godma')
computedQuantity = ComputeQuantityFromRadius(slimItem.categoryID, slimItem.groupID, slimItem.typeID, radius, godma)
SetObjectQuantity(objectID, computedQuantity)
def SetObjectQuantity(objectID, quantity):
scenario = sm.StartService('scenario')
targetBall, slimItem = scenario.GetBallAndSlimItemFromObjectID(objectID)
if slimItem is None:
raise RuntimeError('No slim item?')
targetModel = getattr(targetBall, 'model', None)
if not targetModel:
return
if slimItem.categoryID == const.categoryAsteroid or slimItem.groupID in (const.groupHarvestableCloud, const.groupCloud):
godma = sm.GetService('godma')
computedRadius = ComputeRadiusFromQuantity(slimItem.categoryID, slimItem.groupID, slimItem.typeID, quantity, godma)
if hasattr(targetModel, 'modelScale'):
targetModel.modelScale = computedRadius
elif hasattr(targetModel, 'scaling'):
scaleVector = trinity.TriVector(computedRadius, computedRadius, computedRadius)
targetModel.scaling = scaleVector
else:
raise RuntimeError('Model has neither modelScale nor scaling')
slimItem.dunRadius = quantity
scenario.UpdateUnsavedObjectChanges(slimItem.itemID, dungeonEditorTools.CHANGE_SCALE)
else:
raise RuntimeError("Can't scale type %d" % slimItem.categoryID)
def SaveObjectPosition(objectID, x = None, y = None, z = None):
if '/jessica' in blue.pyos.GetArg():
import dungeon
dunObject = dungeon.Object.Get(objectID)
dunObject.SetPosition(x=x, y=y, z=z)
else:
sm.RemoteSvc('dungeon').EditObjectXYZ(objectID=objectID, x=x, y=y, z=z)
def SaveObjectRotation(objectID, yaw = None, pitch = None, roll = None):
if '/jessica' in blue.pyos.GetArg():
import dungeon
dunObject = dungeon.Object.Get(objectID)
dunObject.SetRotation(yaw=yaw, pitch=pitch, roll=roll)
else:
sm.RemoteSvc('dungeon').EditObjectYawPitchRoll(objectID=objectID, yaw=yaw, pitch=pitch, roll=roll)
def SaveObjectRadius(objectID, radius):
if '/jessica' in blue.pyos.GetArg():
import dungeon
dunObject = dungeon.Object.Get(objectID)
dunObject.SetRadius(radius)
else:
sm.RemoteSvc('dungeon').EditObjectRadius(objectID=objectID, radius=radius)
def CopyObject(objectID, roomID, offsetX = 0.0, offsetY = 0.0, offsetZ = 0.0):
if '/jessica' in blue.pyos.GetArg():
import dungeon
dunObject = dungeon.Object.Get(objectID)
newObjectID = dunObject.Copy(roomID, offsetX, offsetY, offsetZ).objectID
else:
newObjectID = sm.RemoteSvc('dungeon').CopyObject(objectID, roomID, offsetX, offsetY, offsetZ)
return newObjectID
def GetObjectPosition(objectID):
scenario = sm.StartService('scenario')
targetBall, slimItem = scenario.GetBallAndSlimItemFromObjectID(objectID)
if slimItem is None:
raise RuntimeError('No slim item?')
return (slimItem.dunX, slimItem.dunY, slimItem.dunZ)
def GetObjectRotation(objectID):
scenario = sm.StartService('scenario')
targetBall, slimItem = scenario.GetBallAndSlimItemFromObjectID(objectID)
if slimItem is None:
raise RuntimeError('No slim item?')
targetModel = getattr(targetBall, 'model', None)
if not targetModel or not targetModel.rotationCurve or not hasattr(targetModel.rotationCurve, 'value'):
return (None, None, None)
return (x * 180.0 / math.pi for x in geo2.QuaternionRotationGetYawPitchRoll(targetModel.rotationCurve.value))
def GetObjectQuantity(objectID):
scenario = sm.StartService('scenario')
targetBall, slimItem = scenario.GetBallAndSlimItemFromObjectID(objectID)
if slimItem is None:
raise RuntimeError('No slim item?')
targetModel = getattr(targetBall, 'model', None)
if not targetModel:
return
if hasattr(targetModel, 'scaling') or hasattr(targetModel, 'modelScale'):
if not getattr(slimItem, 'dunRadius', None):
slimItem.dunRadius = targetBall.radius
if slimItem.categoryID == const.categoryAsteroid:
return slimItem.dunRadius
if slimItem.groupID in (const.groupHarvestableCloud, const.groupCloud):
return slimItem.dunRadius
def GetObjectRadius(objectID):
scenario = sm.StartService('scenario')
targetBall, slimItem = scenario.GetBallAndSlimItemFromObjectID(objectID)
if slimItem is None:
raise RuntimeError('No slim item?')
targetModel = getattr(targetBall, 'model', None)
if not targetModel:
return
if hasattr(targetModel, 'scaling') or hasattr(targetModel, 'modelScale'):
godma = sm.GetService('godma')
if not getattr(slimItem, 'dunRadius', None):
slimItem.dunRadius = ComputeQuantityFromRadius(slimItem.categoryID, slimItem.groupID, slimItem.typeID, targetBall.radius, godma)
if slimItem.categoryID == const.categoryAsteroid:
return ComputeRadiusFromQuantity(slimItem.categoryID, slimItem.groupID, slimItem.typeID, slimItem.dunRadius, godma)
if slimItem.groupID in (const.groupHarvestableCloud, const.groupCloud):
return ComputeRadiusFromQuantity(slimItem.categoryID, slimItem.groupID, slimItem.typeID, slimItem.dunRadius, godma)
def CreateObject(roomID, typeID, objectName, x, y, z, yaw, pitch, roll, radius):
if '/jessica' in blue.pyos.GetArg():
import dungeon
newObject = dungeon.Object.Create(roomID=roomID, typeID=typeID, objectName=objectName, x=x, y=y, z=z, yaw=yaw, pitch=pitch, roll=roll, radius=radius)
newObjectID = newObject.objectID
else:
newObjectID, revisionID = sm.RemoteSvc('dungeon').AddObject(roomID, typeID, x, y, z, yaw, pitch, roll, radius)
if objectName:
sm.RemoteSvc('dungeon').EditObjectName(newObjectID, objectName)
return newObjectID
def DeleteObject(objectID):
if '/jessica' in blue.pyos.GetArg():
import dungeon
dungeon.Object.Get(objectID).Delete()
else:
sm.RemoteSvc('dungeon').RemoveObject(objectID)
import carbon.common.script.util.autoexport as autoexport
exports = autoexport.AutoExports('dungeonHelper', locals())
| [
"[email protected]"
] | |
dfc79ab2926282ff85f93564e287333570fd498b | b8ef1a5cd3856a8e9134c3313a4e23522f199df7 | /Programmers/84325_직업군 추천하기/84325_직업군 추천하기.py | 2d65ae9cecc92be4f0db236416f553f114f185dc | [] | no_license | scl2589/Algorithm_problem_solving | 910623d9675ae0219320abfd1fefc7d576027544 | 80db697cdd0180a7d4dbcfae4944d4a54191bddf | refs/heads/master | 2023-07-29T10:56:38.225206 | 2021-09-11T13:50:46 | 2021-09-11T13:50:46 | 235,363,353 | 0 | 0 | null | 2021-03-04T15:39:41 | 2020-01-21T14:36:41 | Python | UTF-8 | Python | false | false | 489 | py | def solution(table, languages, preference):
answer = ''
answer_list = []
for t in table:
lst = t.split(' ')
new = [lst[0]]
new.extend(lst[5:0:-1])
total = 0
for idx, language in enumerate(languages):
if language in new:
total += new.index(language) * preference[idx]
answer_list.append([total, new[0]])
answer_list = sorted(answer_list, key = lambda x : (-x[0], x[1]))
return answer_list[0][1] | [
"[email protected]"
] | |
9c887215d379b9c8833646b6b49a952b26fd42a0 | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/DataQuality/DataQualityUtils/scripts/DeMoLib.py | 5efa69354aadd20a9db26ed338260551c407de18 | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45,586 | py | # Copyright (C) 2002-2018 CERN for the benefit of the ATLAS collaboration
# Author : Benjamin Trocme (LPSC - Grenoble) - 2017
# Auxiliary libraries used DemoUpdate, DeMoStatus and DemoScan
##################################################################
from ROOT import THStack
from ROOT import TCanvas,TLegend
from ROOT import kYellow,kOrange,kRed,kBlue,kPink,kMagenta,kGreen,kSpring,kViolet,kAzure,kCyan,kTeal,kBlack
#import gb
from gb import MakeLegend,MakeTH1
########################################################################
# Return a string with the luminosity in a human readable way
# If the unit is %%, this is a percentage
def strLumi(lumi,unit="ub",latex = True,floatNumber = False):
if (unit == "%%"):
string0 = "%.2f%%"%(lumi)
else:
if (unit == "pb" or unit == "pb^{-1}" or unit == "pb-1"):
lumi = lumi*1e6
if lumi < 1e3:
if latex:
if (floatNumber):string0 = "%.2f #mub"%(lumi)
else:string0 = "%.0f #mub"%(lumi)
else:
if (floatNumber):string0 = "%.2f ub"%(lumi)
else:string0 = "%.0f ub"%(lumi)
elif lumi<1e6:
if (floatNumber):string0 = "%.2f nb"%(lumi/1e3)
else: string0 = "%.0f nb"%(lumi/1e3)
elif lumi<1e9:
if (floatNumber):string0 = "%.2f pb"%(lumi/1e6)
else:string0 = "%.0f pb"%(lumi/1e6)
else:
if (floatNumber):string0 = "%.3f fb"%(lumi/1e9)
else:string0 = "%.1f fb"%(lumi/1e9)
if latex:
string0= string0+"^{-1}"
else:
string0= string0+"-1"
return string0
########################################################################
def plotStack(name,histo,index,indexName,histoIntLumi,lumiBool,resStack,resCanvas,resLegend,recovBool = True,compBool = False,approvedPlots = False):
# name: Mainly an index of the output. Also used to define TAxis Title
# histo: dict of histograms or TProfile to be displayed
# index: list of keys of histo to be displayed
# indexName: dict of namesof index used for the TLegend
# histoIntLumi : integrated lumi with the same x binning as histo
# lumiBool : display results in term of lumi and not percent
# resStack, resCanvas, resLegend: dict of (stacks, canvas, legend) outputs
# recovBool: display the recoverable histograms (referenced as %%%__recov in histo
# compBool: this is a 2 yearTag plots. Write all numbers in TLegend and not only last bin (that is not meaningful in yearTag)
# unit is the main unit. unitAux is the complementary one used only the TLegend
if (lumiBool):
unit = "pb^{-1}"
unitAux = "%%"
else:
unit = "%%"
unitAux = "pb^{-1}"
nameSplitted = name.split("--") # Assume that the name is "Veto/defect (y axis) - Year/Run (x axis)- Dataset name"
xAxisTitle = nameSplitted[1]
if unit == "%%":
yAxisTitle = "Lost luminosity due to %s [%%]"%(nameSplitted[0])
else:
yAxisTitle = "Lost luminosity due to %s [%s]"%(nameSplitted[0],unit)
legendHeader = nameSplitted[2]
resCanvas[name] = TCanvas(name,"%s - %s"%(yAxisTitle,xAxisTitle),200, 10, 1150, 500)
resCanvas[name].SetLeftMargin(0.08)
resCanvas[name].SetRightMargin(0.28)
resCanvas[name].SetGridy(1)
resStack[name] = THStack("%s_stack"%name,"")
resLegend[name] = MakeLegend(0.725,0.8,0.98,0.95) # Y1 will be redefined later according tp the number of entries
first = True
totalIneff = 0.
totalIneff1 = 0.
nBinsX = histoIntLumi.GetNbinsX()
totalIntegratedLumi = histoIntLumi.GetBinContent(nBinsX)
if lumiBool:
auxScaleFactor = 100./totalIntegratedLumi
else:
auxScaleFactor = totalIntegratedLumi/100.
for iIndex in sorted(index,reverse=True):
if first: # Create a recoverable histograms just in case of
resStack["%s__recov"%name] = MakeTH1("h1_%s__recovTotal"%(name),"Recoverable","",-0.5,-0.5+nBinsX,nBinsX,1)
resStack["%s__recov"%name].SetMarkerStyle(23)
first = False
iIndexName = iIndex.split("_")[0]
# Fill histo["%s_toStack"] the main histo
# and histo["%s_aux"] the complementary one used only for TLegend
if (histo[iIndex]).IsA().InheritsFrom("TProfile"):
histo['%s_toStack'%iIndex] = histo[iIndex].ProjectionX()
histo['%s_toStack'%iIndex].SetFillColor(histo[iIndex].GetFillColor())
else:
histo['%s_toStack'%iIndex] = histo[iIndex]
if lumiBool:
histo['%s_toStack'%iIndex].Multiply(histo['%s_toStack'%iIndex],histoIntLumi,0.01)
resStack[name].Add(histo['%s_toStack'%iIndex])
entryNb = 0
for iIndex in sorted(index): # Reverse order to have the TLegend ordered as the stacks
iIndexName = iIndex.split("__")[0] # Trick needed to get the defect name for the recoverable defect histogram
baseEntry = "%s"%(strLumi(histo['%s_toStack'%iIndex].GetBinContent(nBinsX),unit))
auxEntry = "%s"%(strLumi(histo['%s_toStack'%iIndex].GetBinContent(nBinsX)*auxScaleFactor,unitAux))
if (recovBool and "%s__recov"%iIndex in histo.keys() and histo["%s__recov"%iIndex].GetBinContent(nBinsX) != 0.):
baseEntryRecov = "%s"%(strLumi(histo["%s__recov"%iIndex].GetBinContent(nBinsX),unit))
entry = "#splitline{%s}{%s(recov:%s) / %s}"%(indexName[iIndexName],baseEntry,baseEntryRecov,auxEntry) # Second part of Legend to fix
for iBin in range(nBinsX+1):
resStack["%s__recov"%name].Fill(iBin-1,histo["%s__recov"%iIndex].GetBinContent(iBin))
else:
entry = "#splitline{%s}{%s / %s}"%(indexName[iIndex],baseEntry,auxEntry)
if (compBool): # This is a >=2 yearTag histograms
if histo[iIndex].GetNbinsX() == 2:
entry = "#splitline{%s}{%s / %s }"%(indexName[iIndexName],strLumi(histo[iIndex].GetBinContent(1),unit),strLumi(histo[iIndex].GetBinContent(2),unit))
elif histo[iIndex].GetNbinsX() == 3:
entry = "#splitline{%s}{%s / %s / %s}"%(indexName[iIndexName],strLumi(histo[iIndex].GetBinContent(1),unit),strLumi(histo[iIndex].GetBinContent(2),unit),strLumi(histo[iIndex].GetBinContent(3),unit))
else:
entry = "%s"%(indexName[iIndexName])
if (histo[iIndex].GetMaximum() != 0):
resLegend[name].AddEntry(histo[iIndex],entry,"f")
entryNb = entryNb+1
totalIneff = totalIneff + histo['%s_toStack'%iIndex].GetBinContent(nBinsX)
if compBool:
totalIneff1 = totalIneff1 + histo['%s_toStack'%iIndex].GetBinContent(1)
mx = resStack[name].GetMaximum()*1.2
resStack[name].SetMaximum(mx)
resStack[name].Draw("hist")
resStack[name].GetXaxis().SetTitle("%s"%xAxisTitle)
resStack[name].GetYaxis().SetTitle("%s"%yAxisTitle)
resStack[name].GetYaxis().SetTitleOffset(0.7)
if resStack[name].GetMaximum()>10.:
resCanvas[name].SetLogy(1)
if compBool:
resLegend[name].SetHeader("#splitline{%s}{Total loss: %s / %s}"%(legendHeader,strLumi(totalIneff1,unit),strLumi(totalIneff,unit)))
else:
totalIneffAux = totalIneff*auxScaleFactor
if (approvedPlots):
resLegend[name].SetHeader("#splitline{%s}{Total loss: %s / %s}"%(legendHeader,strLumi(totalIneff,unit),strLumi(totalIneffAux,unitAux)))
else:
resLegend[name].SetHeader("#splitline{%s (%s)}{Total loss: %s / %s}"%(legendHeader,strLumi(totalIntegratedLumi,"pb"),strLumi(totalIneff,unit),strLumi(totalIneffAux,unitAux)))
if resStack["%s__recov"%name].GetEntries() != 0.:
resStack["%s__recov"%name].SetMarkerStyle(20)
resStack["%s__recov"%name].SetMarkerColor(kAzure+8)
resStack["%s__recov"%name].Draw("PSAME HIST")
resLegend[name].AddEntry(resStack["%s__recov"%name],"#splitline{Recoverable}{total: %.2f%%}"%(resStack["%s__recov"%name].GetBinContent(nBinsX)),"p")
entryNb = entryNb + 1
resLegend[name].SetY1(max(0.83-entryNb*0.12,0.05))
resLegend[name].Draw()
resCanvas[name].Update()
return totalIneff # totalIneff is used only with the savePage1 option in DeMoStatus
#########################################################################################
#########################################################################################
def initialize(system,yearTag,partitions,defects0,defectVeto,veto,signOff,year = "2017"):
# Description used in TLegend and TAxis
yearTag["description"] = {"Tier0_2015":"/Tier0",
"Reproc_2016":"/2016 reproc.",
"Tier0_2016":"/Tier0",
"Reproc_2017":"/2017 reproc.",
"Tier0_2017":"/Tier0",
"Reproc_2018":"/2017 reproc.",
"Tier0_2018":"/Tier0"}
# DB tag for the defect database - Common to all systems
yearTag["defect"] = {"Tier0_2015":"DetStatus-v73-pro19-08",
"Reproc_2016":"DetStatus-v75-repro20-01",
"Tier0_2016":"DetStatus-v84-pro20-16",
"Reproc_2017":"DetStatus-v89-pro21-01",
"Tier0_2017":"DetStatus-v97-pro21-13",
"Reproc_2018":"HEAD",
"Tier0_2018":"HEAD"}
# Condition tag for the veto database - defined later per system when relevant
yearTag["veto"] = {}
# Tags below are derived from the regular ones.
# They have the same defect/veto tags but have different options/GRL
# The defect/veto tags are derived from the ones defined in the standard tags given before "."
# These tags are common to all system but some additional ones may be defined later.
# This is why the defect/veto tags are defined after the system definitions
similarTags = {"Tier0_2015.onlDelivNorm":"/Tier0 (onl. deliv. lumi.)",
"Reproc_2016.onlDelivNorm":"/2016 reproc. (onl. deliv. lumi.)",
"Tier0_2016.onlDelivNorm":"/Tier0 (onl. deliv. lumi.)",
"Reproc_2017.onlDelivNorm":"/2017 reproc. (onl. deliv. lumi.)",
"Tier0_2017.onlDelivNorm":"/Tier0 (onl. deliv. lumi.)",
"Reproc_2018.onlDelivNorm":"/2018 reproc. (onl. deliv. lumi.)",
"Tier0_2018.onlDelivNorm":"/Tier0 (onl. deliv. lumi.)"}
yearTag["offlineLumiTag"] = {"preliminary":"OflPrefLumi-RUN2-UPD4-10","grl":"OflLumi-13TeV-001"}
#################################### NEWSYSTEM defects
### if system == "NEWSYSTEM":
### partitions["color"] = {}
### partitions["list"] = partitions["color"].keys()
###
### defects0["prefix"] = ["NEWSYSTEM"]
### # Partition intolerable and tolerable defects - Order determines what defect is proeminent
### defects0["partIntol"] = []
### defects0["partTol"] = []
### # Global intolerable and tolerable defects
### defects0["globIntol"] = [""]
### defects0["globTol"] = []
###
### veto["all"] = [] # Veto name as defined in the COOL database
### veto["COOL"] = {} # Veto name as defined in the COOL database
###
### defectVeto["description"] = {"":""}
###
### signOff["EXPR."] = ["NEWSYSTEM_UNCHECKED"]
### signOff["BULK"] = ["NEWSYSTEM_BULK_UNCHECKED"]
### signOff["FINAL"] = []
#################################### Pixel defects
if system == "Pixel":
partitions["color"] = {'IBL':kYellow-9,'LAYER0':kYellow,'BARREL':kOrange,'ENDCAPC':kOrange-3,'ENDCAPA':kRed-3}
partitions["list"] = partitions["color"].keys()
defects0["prefix"] = ["PIXEL"]
# Partition intolerable and tolerable defects - Order determines what defect is proeminent
defects0["partIntol"] = ["DISABLED",
"GT30pct_NOTREADY",
"READOUT_PROBLEM",
"HVSCAN","TIMING",
"STANDBY"]
defects0["partTol"] = []
# Global intolerable and tolerable defects
defects0["globIntol"] = []
defects0["globTol"] = []
veto["all"] = [] # Veto name as defined in the COOL database
veto["COOL"] = {} # Veto name as defined in the COOL database
defectVeto["description"] = {"DISABLED":"One layer disabled",
"GT30pct_NOTREADY":">30% modules in error",
"READOUT_PROBLEM":"Readout problem",
"HVSCAN":"HV scan",
"TIMING":"Timing scan",
"STANDBY":"Standby"}
signOff["EXPR."] = ["PIXEL_UNCHECKED"]
signOff["BULK"] = ["PIXEL_BULK_UNCHECKED"]
signOff["FINAL"] = []
################################# SCT defects
# https://twiki.cern.ch/twiki/bin/view/Atlas/SCTOfflineMonitoringShifts#List_of_Defects
if system == "SCT":
partitions["color"] = {}
partitions["list"] = partitions["color"].keys()
defects0["prefix"] = ["SCT"]
# Partition intolerable and tolerable defects - Order determines what defect is proeminent
defects0["partIntol"] = []
defects0["partTol"] = []
# Global intolerable and tolerable defects
defects0["globIntol"] = ["GLOBAL_STANDBY",
"CRATE_OUT",
"ROD_OUT_MAJOR",
"PERIOD_ERR_MAJOR",
"GLOBAL_DESYNC",
"GLOBAL_RECONFIG",
"GLOBAL_UNKNOWN"]
defects0["globTol"] = ["MOD_OUT_GT40",
"MOD_ERR_GT40",
"MOD_NOISE_GT40",
"PERIOD_ERR_GT40",
"ROD_OUT",
"EFF_LT99",
"NOTNOMINAL_HV",
"NOTNOMINAL_THRESHOLD",
"NOTNOMINAL_TIMING",
"COOLINGLOOP_OUT_1"]
defectVeto["description"] = {"GLOBAL_STANDBY":"Standby (HV < 1.5kV)", # Intolerable defects
"CRATE_OUT":">=1 crate out",
"ROD_OUT_MAJOR":"Large inefficency (ROD)",
"PERIOD_ERR_MAJOR":"Large inefficiency (PERIOD)",
"GLOBAL_DESYNC":"Global desync",
"GLOBAL_RECONFIG":"Global reconfig",
"GLOBAL_UNKNOWN":"Unknown",
"MOD_OUT_GT40":"More than 40 modules excluded in DAQ in addition to the permanent disabled modules (37 modules as of June 2017)", # Tolerable defects
"MOD_ERR_GT40":"More than 40 modules with bytestream errors ",
"MOD_NOISE_GT40":"More than 40 noisy modules",
"PERIOD_ERR_GT40":"More than 80 links with errors in a short period of time (fine for the rest of the run), corresponding to about 40 modules",
"ROD_OUT":"One or more ROD(s) excluded from readout, however less than 5% region in eta-phi plane have masked link errors among more than two layers.",
"EFF_LT99":"Less than 99% efficiency for 1st BC and less than 98% efficiency for all bunches in one or more DQ regions",
"NOTNOMINAL_HV":"SCT neither at 150 V nor at 50 V",
"NOTNOMINAL_THRESHOLD":"SCT threshold not at 1 fC",
"NOTNOMINAL_TIMING":"Unusual timing settings, e.g. timing scan",
"COOLINGLOOP_OUT_1":"Loss of a single cooling loop"}
veto["all"] = [] # Veto name as defined in the COOL database
veto["COOL"] = {} # Veto name as defined in the COOL database
signOff["EXPR."] = ["SCT_UNCHECKED"]
signOff["BULK"] = ["SCT_BULK_UNCHECKED"]
signOff["FINAL"] = []
################################# TRT defects
# https://twiki.cern.ch/twiki/bin/view/Atlas/TRTDQDefects
if system == "TRT":
partitions["color"] = {}
partitions["list"] = partitions["color"].keys()
defects0["prefix"] = ["TRT"]
# Partition intolerable and tolerable defects - Order determines what defect is proeminent
defects0["partIntol"] = []
defects0["partTol"] = []
# Global intolerable and tolerable defects
defects0["globIntol"] = ["BADCALIBRATION",
"BADSTRAWLIST",
"BADGAS",
"DAQPROBLEMS_OTHER",
"DESYNC",
"NODATA_06"]
defects0["globTol"] = ["BADCALIBRATION_MINOR",
"BADGAS_MINOR",
"BYTESTREAM_BITFLIPS",
"DAQPROBLEMS_OTHER_MINOR",
"NODATA_01",
"NONNOMINAL_HT",
"NONNOMINAL_LT"]
# Some defects may not exist in past years. Remove them to avoid crashes
# WARNING: this fix does not work with multiple year plot
defectVeto["description"] = {"BADCALIBRATION":"Bad calibration",
"BADSTRAWLIST":"Bad dead stram list",
"BADGAS":"Bad gas mixture",
"DAQPROBLEMS_OTHER":"DAQ problems",
"DESYNC":"Desynchronisation",
"NODATA_06":"Large part of TRT off"}
veto["all"] = [] # Veto name as defined in the COOL database
veto["COOL"] = {} # Veto name as defined in the COOL database
signOff["EXPR."] = ["TRT_UNCHECKED"]
signOff["BULK"] = ["TRT_BULK_UNCHECKED"]
signOff["FINAL"] = []
################################# LAr defects
# DB tag for the (veto) condition database
# So far, only LAr use event veto
# can be found with the twiki: https://twiki.cern.ch/twiki/bin/viewauth/AtlasComputing/CoolProdTags#Tags_for_RUN_2_Bulk_Data_Process
if system == "LAr":
yearTag["veto"] = {"Tier0_2015":"LARBadChannelsOflEventVeto-RUN2-UPD4-04",
"Reproc_2016":"LARBadChannelsOflEventVeto-RUN2-UPD4-04",
"Tier0_2016":"LARBadChannelsOflEventVeto-RUN2-UPD4-04",
"Reproc_2017":"LARBadChannelsOflEventVeto-RUN2-UPD4-06",
"Tier0_2017":"LARBadChannelsOflEventVeto-RUN2-UPD4-06",
"Reproc_2018":"LARBadChannelsOflEventVeto-RUN2-UPD4-08",
"Tier0_2018":"LARBadChannelsOflEventVeto-RUN2-UPD4-10"
}
# Additional similar tags specific to LAr
similarTags["Reproc_2018.roughVeto"]="/2018 Reproc. (rough veto)"
partitions["color"] = { 'EMBA':kYellow-9,'EMBC':kYellow,'EMECA':kOrange,'EMECC':kOrange-3,'HECA':kRed-3,'HECC':kRed+2,'FCALA':kBlue-3,'FCALC':kBlue+2}
partitions["list"] = partitions["color"].keys()
defects0["prefix"] = ["LAR","CALO_ONLINEDB"]
# Partition intolerable and tolerable defects - Order determines what defect is proeminent
defects0["partIntol"] = ["HVTRIP","SEVNOISEBURST","SEVCOVERAGE","HVNONNOMINAL","SEVNOISYCHANNEL","SEVMISCALIB","SEVUNKNOWN"] # LAR Prefix - LAR_[PART]_[NAME]
defects0["partTol"] = ["COVERAGE","HVNONNOM_CORRECTED"]#["NOISEBURST"]
# Global intolerable and tolerable defects
defects0["globIntol"] = ["DATACORRUPT","RECOCORRUPT","SPECIALSTUDIES","BADTIMING", # LAR Prefix - LAR_[NAME]
"ONLINEDB_LOWMUCONFIG_IN_HIGHMU_EmergencyMeasures"] # CALO Prefix - CALO_[NAME]
defects0["globTol"] = []
# Some defects may not exist in past years. Remove them to avoid crashes
# WARNING: this fix does not work with multiple year plot
if ("2016" in year or "2015" in year):
defects0["globIntol"].remove("SPECIALSTUDIES")
defects0["globIntol"].remove("BADTIMING")
defectVeto["description"] = {"HVTRIP":"high voltage trip", # First per partition LAr defects
"NOISEBURST":"noise bursts (before veto)",
"HVNONNOMINAL":"HV non nominal",
"SEVNOISEBURST":"noise burst",
"SEVNOISYCHANNEL":"noisy channels",
"SEVCOVERAGE":"coverage",
"SEVMISCALIB":"global miscalibration",
"SEVUNKNOWN":"unknown reason",
"DATACORRUPT":"data corruption", # Then global LAr defects
"RECOCORRUPT":"corrupted reconstruction",
"SPECIALSTUDIES":"Special studies (on purpose)",
"BADTIMING":"Bad timing",
"COVERAGE":"coverage (tolerable)",
"ONLINEDB_LOWMUCONFIG_IN_HIGHMU_EmergencyMeasures":"trigger misconfiguration", # And the global CALO defects
"noiseBurst":"noise burst", # And finally the LAr veto
"miniNoiseBurst":"mini noise burst",
"corruption":"data corruption"}
veto["all"] = ["noiseBurst","miniNoiseBurst","corruption"] # Veto name as defined in the COOL database
veto["COOL"] = {"noiseBurst":"allNoise",
"miniNoiseBurst":"MNBNoise",
"corruption":"allCorruption"} # Veto name as defined in the COOL database
defectVeto["color"]= {"HVTRIP":kBlue-4, # First per partition LAr defects
"HVNONNOMINAL":kOrange-7,
"SEVNOISEBURST":kOrange,
"SEVNOISYCHANNEL":kRed+1,
"SEVCOVERAGE":kMagenta+2,
"SEVMISCALIB":kPink-3,
"SEVUNKNOWN":kYellow+1,
"DATACORRUPT":kGreen-2,# Then global LAr defects
"RECOCORRUPT":kSpring-6,
"SPECIALSTUDIES":kViolet-4,
"BADTIMING":kAzure-8,
"ONLINEDB_LOWMUCONFIG_IN_HIGHMU_EmergencyMeasures":kCyan+1, # And the global CALO defects
"noiseBurst":kBlue-2, # And finally the LAr veto
"miniNoiseBurst":kMagenta+1,
"corruption":kAzure+9} # Color only for intolerable defects and veto
signOff["EXPR."] = ["LAR_UNCHECKED"]
signOff["BULK"] = ["LAR_BULK_UNCHECKED"]
signOff["FINAL"] = ["LAR_UNCHECKED_FINAL"]
################################# Tile defects
# https://twiki.cern.ch/twiki/bin/view/Atlas/TileDQLeaderManual#Global_Tile_Defects
if system == "Tile":
partitions["color"] = { 'EBA':kYellow-9,'EBC':kYellow,'LBA':kOrange,'LBC':kOrange-3}
partitions["list"] = partitions["color"].keys()
defects0["prefix"] = ["TILE"]
# Partition intolerable and tolerable defects - Order determines what defect is proeminent
defects0["partIntol"] = ["BAD_COVER",
"DAQ_PRB",
"DB_SEVERE",
"TIMING_SEVERE",
"UNSPECIFIED_SEVERE"]
defects0["partTol"] = ["DB_MINOR","TIMING_MINOR","TRIP","UNSPECIFIED_MINOR"]
# Global intolerable and tolerable defects
defects0["globIntol"] = ["LOWSTAT"]
defects0["globTol"] = ["TIMEJUMPS_UNDEFINED"]
# Some defects may not exist in past years. Remove them to avoid crashes
# WARNING: this fix does not work with multiple year plot
defectVeto["description"] = {"BAD_COVER":"Coverage",
"DAQ_PRB":"DAQ problem",
"DB_SEVERE":"DB issue",
"TIMING_SEVERE":"Timing issue",
"UNSPECIFIED_SEVERE":"Severe unspecified",
"LOWSTAT":"Low stats"}
veto["all"] = [] # Veto name as defined in the COOL database
veto["COOL"] = {} # Veto name as defined in the COOL database
signOff["EXPR."] = ["TILE_UNCHECKED"]
signOff["BULK"] = ["TILE_UNCHECKED"]
signOff["FINAL"] = []
#################################### MUON-CSC defects
# https://twiki.cern.ch/twiki/bin/view/Atlas/MuonMCPDefectList
if system == "CSC":
partitions["color"] = {"EA":kYellow-9,'EC':kRed-3}
partitions["list"] = partitions["color"].keys()
defects0["prefix"] = ["MS_CSC"]
# Partition intolerable and tolerable defects - Order determines what defect is proeminent
defects0["partIntol"] = ["STANDBY_HV",
"PROBLEM",
"ROD_DISABLED"]
defects0["partTol"] = ["DISCONNECTED2","LATENCY_MINOR","THRESHOLD"]
# Global intolerable and tolerable defects
defects0["globIntol"] = []
defects0["globTol"] = []
veto["all"] = [] # Veto name as defined in the COOL database
veto["COOL"] = {} # Veto name as defined in the COOL database
defectVeto["description"] = {"STANDBY_HV":"Standby HV",
"PROBLEM":"Coverage loss > 10%%",
"ROD_DISABLED":">=1 ROD not readout"}
signOff["EXPR."] = ["MS_UNCHECKED"]
signOff["BULK"] = ["MS_BULK_UNCHECKED"]
signOff["FINAL"] = []
#################################### MUON-MDT defects
# https://twiki.cern.ch/twiki/bin/view/Atlas/MuonMCPDefectList
if system == "MDT":
partitions["color"] = {"EA":kYellow-9,'EC':kRed-3,'BA':kBlue-3,'BC':kOrange-3}
partitions["list"] = partitions["color"].keys()
defects0["prefix"] = ["MS_MDT"]
# Partition intolerable and tolerable defects - Order determines what defect is proeminent
defects0["partIntol"] = ["STANDBY_HV",
"PROBLEM",
"ROD_PROBLEM_5orMore"]
defects0["partTol"] = ["ROD_PROBLEM_1","ROD_PROBLEM_2to4"]
# Global intolerable and tolerable defects
defects0["globIntol"] = []
defects0["globTol"] = []
veto["all"] = [] # Veto name as defined in the COOL database
veto["COOL"] = {} # Veto name as defined in the COOL database
defectVeto["description"] = {"STANDBY_HV":"Standby HV",
"PROBLEM":"Coverage loss > 10%%",
"ROD_PROBLEM_5orMore":">=5 RODs not readout"}
signOff["EXPR."] = ["MS_UNCHECKED"]
signOff["BULK"] = ["MS_BULK_UNCHECKED"]
signOff["FINAL"] = []
#################################### MUON-RPC defects
# https://twiki.cern.ch/twiki/bin/view/Atlas/MuonMCPDefectList
if system == "RPC":
partitions["color"] = {'BA':kBlue-3,'BC':kOrange-3}
partitions["list"] = partitions["color"].keys()
defects0["prefix"] = ["MS_RPC"]
# Partition intolerable and tolerable defects - Order determines what defect is proeminent
defects0["partIntol"] = ["STANDBY_HV",
"PROBLEM",
"PROBLEM_10to15percent",
"PROBLEM_MoreThan15percent",
"OutOfSync_3orMore",
"LowEfficiency_MoreThan10percent"]
defects0["partTol"] = ["LowEfficiency_5to10percent","OutOfSync_2","OutOfSync_1","PROBLEM_1","PROBLEM_5to10percent"]
# Global intolerable and tolerable defects
defects0["globIntol"] = []
defects0["globTol"] = []
veto["all"] = [] # Veto name as defined in the COOL database
veto["COOL"] = {} # Veto name as defined in the COOL database
defectVeto["description"] = {"STANDBY_HV":"Standby HV",
"PROBLEM":"Coverage loss > 10%%",
"PROBLEM_10to15percent":"Coverage loss > 10%%",
"PROBLEM_MoreThan15percent":"Coverage loss > 15%%",
"OutOfSync_3orMore":">3 Out of sync",
"LowEfficiency_MoreThan10percent":"Low efficiency > 10%%"}
signOff["EXPR."] = ["MS_UNCHECKED"]
signOff["BULK"] = ["MS_BULK_UNCHECKED"]
signOff["FINAL"] = []
#################################### MUON-TGC defects
# https://twiki.cern.ch/twiki/bin/view/Atlas/MuonMCPDefectList
if system == "TGC":
partitions["color"] = {"EA":kYellow-9,'EC':kRed-3}
partitions["list"] = partitions["color"].keys()
defects0["prefix"] = ["MS_TGC"]
# Partition intolerable and tolerable defects - Order determines what defect is proeminent
defects0["partIntol"] = ["STANDBY_HV",
"PROBLEM",
"ROD_PROBLEM-2orMore"]
defects0["partTol"] = ["PROBLEM_1"]
# Global intolerable and tolerable defects
defects0["globIntol"] = []
defects0["globTol"] = []
veto["all"] = [] # Veto name as defined in the COOL database
veto["COOL"] = {} # Veto name as defined in the COOL database
defectVeto["description"] = {"STANDBY_HV":"Standby HV",
"PROBLEM":"Coverage loss > 10%%",
"ROD_PROBLEM_2orMore":">=2 RODs not readout"}
signOff["EXPR."] = ["MS_UNCHECKED"]
signOff["BULK"] = ["MS_BULK_UNCHECKED"]
signOff["FINAL"] = []
#################################### MUON CP defects
# https://twiki.cern.ch/twiki/bin/view/Atlas/MuonMCPDefectList
if system == "MuonCP":
defects0["prefix"] = ["MCP"]
# Partition intolerable and tolerable defects - Order determines what defect is proeminent
defects0["partIntol"] = []
defects0["partTol"] = []
# Global intolerable and tolerable defects
defects0["globIntol"] = ["ALIGN_GEO",
"LOW_EFFICIENCY_MAJOR"]
defects0["globTol"] = ["CHI2_PROBLEM",
"ID_PROBLEM",
"LOW_EFFICIENCY_MINOR",
"MS_PROBLEM"]
veto["all"] = [] # Veto name as defined in the COOL database
veto["COOL"] = {} # Veto name as defined in the COOL database
defectVeto["description"] = {"ALIGN_GEO":"[MCP] Bad alignment/geometry)",
"LOW_EFFICIENCY_MAJOR":"[MCP] Low reconstruction efficiency" }
signOff["EXPR."] = ["MS_UNCHECKED"]
signOff["BULK"] = ["MS_BULK_UNCHECKED"]
signOff["FINAL"] = []
#################################### ID defects
if system == "IDGlobal":
partitions["color"] = {}
partitions["list"] = partitions["color"].keys()
defects0["prefix"] = ["ID"]
# Partition intolerable and tolerable defects - Order determines what defect is proeminent
defects0["partIntol"] = []
defects0["partTol"] = []
# Global intolerable and tolerable defects
defects0["globIntol"] = ["NOTRACKS","x_TRACKCOVERAGE_SEVERE","UNKNOWN","BS_RUNAVERAGE","BS_PARAMETERSTEP","BS_NOTNOMINAL"]
defects0["globTol"] = ["ALIGN_DEGRADED","LOWSTAT","x_TRACKCOVERAGE","VERTEXBUG"]
veto["all"] = [] # Veto name as defined in the COOL database
veto["COOL"] = {} # Veto name as defined in the COOL database
defectVeto["description"] = {"NOTRACKS":"No tracks",
"x_TRACKCOVERAGE_SEVERE":"> 10 %% coverage loss",
"UNKNOWN":"Unknown",
"BS_RUNAVERAGE":"Problematic BS determination",
"BS_PARAMETERSTEP":"Large changes in BS",
"BS_NOTNOMINAL":"Sizable modulation in d0 vs phi",
"ALIGN_DEGRADED":"Degarded alignment",
"LOWSTAT":"Low statistics",
"x_TRACKCOVERAGE":"Significant change in coverage, but not severe (between 5-10% coverage loss)",
"VERTEXBUG":"Problems in the determination of the primary vertex"}
signOff["EXPR."] = ["ID_UNCHECKED"]
signOff["BULK"] = ["ID_BULK_UNCHECKED"]
signOff["FINAL"] = []
################################# Jet/MET/EGamma/Tau/CaloGlobal defects
# https://twiki.cern.ch/twiki/bin/view/AtlasProtected/EgammaShifts
# https://twiki.cern.ch/twiki/bin/viewauth/AtlasProtected/JetEtMissDataQuality2016#Jets_defects
# https://twiki.cern.ch/twiki/bin/view/Atlas/TauDataQualityMonitoringRun2#Frequent_problems_or_defects
if system == "CaloCP":
partitions["color"] = { 'BARREL':kYellow-9,'CRACK':kRed-3,'ENDCAP':kBlue-3, # EGamma partitions
'B':kYellow-9,'CR':kRed-3,'E':kBlue-3, # Tau partitions
'CALB':kYellow-9,'CALEA':kRed-3,'CALC':kBlue-3} # CaloGlobal partitions
partitions["list"] = partitions["color"].keys()
defects0["prefix"] = ["JET","EGAMMA","MET","TAU","CALO_"]
# Partition intolerable and tolerable defects - Order determines what defect is proeminent
defects0["partIntol"] = ["LARNOISE","ENERGY", # EGAMMA Prefix - EGAMMA_[NAME]_[PART]
"CALO","TRK","KIN","ID", # TAU Prefix- TAU_[PART]_[NAME]
"TopoClusterNoiseSevere"] # CaloGlobal Prefix - CALO_[PART]_[NAME]
defects0["partTol"] = []
# Global intolerable and tolerable defects
defects0["globIntol"] = ["NOJETSREGION","SEVERE_HOTSPOT", # JET Prefix - JET_[NAME]
"BEAMSPOT",# EGAMMA Prefix - EGAMMA_[NAME]
"Ex_largeshift","Ey_largeshift","SumEt_largeshift"] # MET Prefix - MET_[NAME]
defects0["globTol"] = ["ETAPHI_SPIKES"]
defectVeto["description"] = {"NOJETSREGION":"[Jet] cold region",
"SEVERE_HOTSPOT":"[Jet] hot region",
"LARNOISE":"[EGamma] noise in LAr",
"ENERGY":"[EGamma] problem in energy",
"BEAMSPOT":"[EGamma] problem in beam spot",
"ETAPHI_SPIKES":"[Egamma] eta/phi spikes",
"Ex_largeshift":"[MEt] Ex large shift",
"Ey_largeshift":"[MEt] EY large shift",
"SumEt_largeshift":"[MEt] SumEt large shift",
"CALO":"[Tau] calo problem",
"TRK":"[Tau] track problem",
"KIN":"[Tau] kinetic problem",
"ID":"[Tau] identification problem",
"TopoClusterNoiseSevere":"[TopoCluster] hot spot"}
veto["all"] = [] # Veto name as defined in the COOL database
veto["COOL"] = {} # Veto name as defined in the COOL database
signOff["EXPR."] = ['CALO_UNCHECKED','EGAMMA_UNCHECKED','JET_UNCHECKED','MET_UNCHECKED','TAU_UNCHECKED']
signOff["BULK"] = ['CALO_BULK_UNCHECKED','EGAMMA_BULK_UNCHECKED','JET_BULK_UNCHECKED','MET_BULK_UNCHECKED','TAU_BULK_UNCHECKED']
signOff["FINAL"] = []
################################# BTAG defects
# https://twiki.cern.ch/twiki/bin/view/Atlas/FlavourTaggingDataQualityMonitoringShifterInstructions#Run_signoff
if system == "BTag":
partitions["color"] = { } # No partition needed
partitions["list"] = partitions["color"].keys()
defects0["prefix"] = ["BTAG"]
# Partition intolerable and tolerable defects - Order determines what defect is proeminent
defects0["partIntol"] = [] # No partition defect
defects0["partTol"] = []
# Global intolerable and tolerable defects
defects0["globIntol"] = ["BLAYER_SERIOUS_PROBLEM",
"BTAG_SCT_SERIOUS_PROBLEM",
"BTAG_TRT_SERIOUS_PROBLEM",
"BTAG_JET_SEVHOTSPOT"
]
defects0["globTol"] = ["BEAMSPOT_SHIFT",
"BTAG_BLAYER_PROBLEM",
"BTAG_SCT_PROBLEM",
"BTAG_TRT_PROBLEM",
"NOJETS"]
defectVeto["description"] = {"BLAYER_SERIOUS_PROBLEM":"B layer problem",
"BTAG_SCT_SERIOUS_PROBLEM":"SCT problem",
"BTAG_TRT_SERIOUS_PROBLEM":"TRT problem",
"BTAG_JET_SEVHOTSPOT":"Jet hot spot",
"BEAMSPOT_SHIFT":"Beamspot shift",
"BTAG_NOJETS":"No jets in monitoring"}
veto["all"] = [] # Veto name as defined in the COOL database
veto["COOL"] = {} # Veto name as defined in the COOL database
signOff["EXPR."] = ['BTAG_UNCHECKED']
signOff["BULK"] = ['BTAG_BULK_UNCHECKED']
signOff["FINAL"] = []
#################################### TRIG_L1 defects
# https://twiki.cern.ch/twiki/bin/view/Atlas/DataQualityTriggerDefects
if system == "Trig_L1":
partitions["color"] = {}
partitions["list"] = partitions["color"].keys()
defects0["prefix"] = ["TRIG_L1"]
# Partition intolerable and tolerable defects - Order determines what defect is proeminent
defects0["partIntol"] = []
defects0["partTol"] = []
# Global intolerable and tolerable defects
defects0["globIntol"] = ["CAL_coverage","CAL_misconf_calib","CAL_misconf_electronics","CAL_misconf_satBCID","CAL_off","MUB_busy","MUB_coverage","MUB_failed_electronics","MUB_lost_sync","MUB_misconf_electronics","MUE_busy","MUE_coverage","MUE_misconf_electronics","MUE_pt15GeV","MUE_pt20GeV","CTP_CTP_MuCTPI_bcid","CTP_CTP_ROD_bcid","CTP_CTPsim","CTP_TAPnoTBP","CTP_TAVnoTAP","CTP_UNKNOWN","CTP_bcid","CTP_bcidrange","CTP_candnumber","CTP_clock","CTP_counter","CTP_lumiblockrange","CTP_lumiblocktime","CTP_multpt","CTP_nanosectime","CTP_prescale_error","CTP_roiCand","CTP_roiNum","CTP_wrong_BGK","CTP_CTPIN_MU","CTP_CTPIN_JET2","TOPO_inputs","TOPO_outputs","TOPO_misconf_calib","TOPO_misconf_electronics","TOPO_off","TOPO_readout"]
defects0["globTol"] = ["CAL_coverage_tolerable","CAL_misconf_calib_tolerable","CAL_misconf_electronics_tolerable","CAL_misconf_satBCID_tolerable","CAL_misconf_tile_drawer","CAL_readout_cpjep_tolerable","CAL_readout_pp_tolerable","CAL_mistimed_larTBB_SEU_tolerable","CTP_NONSTANDARD_CONFIG","MUB_lost_sync_tolerable","MUB_failed_electronics_tolerable","MUB_coverage_tolerable","MUB_misconf_electronics_tolerable","MUB_LOWER_EFFICIENCY_TOLERABLE","MUE_coverage_tolerable","MUE_pt15GeV_tolerable","MUE_pt20GeV_tolerable","MUE_FakeBurst","TOPO_inputs_tolerable","TOPO_outputs_tolerable","TOPO_misconf_calib_tolerable","TOPO_misconf_electronics_tolerable","TOPO_readout_tolerable","TOPO_readout_roib","TOPO_not_good_for_physics"]
veto["all"] = [] # Veto name as defined in the COOL database
veto["COOL"] = {} # Veto name as defined in the COOL database
defectVeto["description"] = {"CAL_coverage":"",
"CAL_misconf_calib":"",
"CAL_misconf_electronics":"",
"CAL_misconf_satBCID":"",
"CAL_off":"",
"CAL_mistimed_larTBB_SEU_tolerable":"LAr TBB SEU leading to mistiming",
"MUB_busy":"MUB - busy",
"MUB_coverage":"MUB - coverage",
"MUB_failed_electronics":"MUB - electronics failure",
"MUB_lost_sync":"MUB - lost sync",
"MUB_misconf_electronics":"MUB - electronics misconfig",
"MUE_busy":"MUE - busy",
"MUE_coverage":"MUE - coverage",
"MUE_misconf_electronics":"MUB - electronics misconfig",
"MUE_pt15GeV":"MUE - PT15GeV",
"MUE_pt20GeV":"MUE - PT20GeV"}
signOff["EXPR."] = ["TRIG_L1_CAL_UNCHECKED","TRIG_L1_CTP_UNCHECKED","TRIG_L1_MUB_UNCHECKED","TRIG_L1_MUE_UNCHECKED","TRIG_L1_TOPO_UNCHECKED"]
signOff["BULK"] = []
signOff["FINAL"] = []
#################################### Trig_HLT defects
if system == "Trig_HLT":
partitions["color"] = {}
partitions["list"] = partitions["color"].keys()
defects0["prefix"] = ["TRIG_HLT"]
# Partition intolerable and tolerable defects - Order determines what defect is proeminent
defects0["partIntol"] = []
defects0["partTol"] = []
# Global intolerable and tolerable defects
defects0["globIntol"] = ["BJT_beam_spot_flag","BJT_no_secvtx","BJT_no_tracking","BJT_INACCURATE_ONLINE_BEAMSPOT","BJT_ONLINE_BEAMSPOT_GT1p6MM","BJT_ONLINE_BEAMSPOT_GT2MM","BJT_NO_MULTIBJET","BPH_no_muon","BPH_no_tracking","CAL_LAR_SourceMajor","CAL_TILE_SourceMajor","CAL_missing_data","ELE_no_clustering","ELE_no_tracking","ELE_primary_chain_misconfigured","ELE_unknown","ELE_tracking_issue","GAM_no_clustering","GAM_partial_clustering","GAM_primary_chain_misconfigured","GAM_unknown","GENERAL_debugstream","GENERAL_no_primaries","GENERAL_prescale_problem","GENERAL_standby","GENERAL_xpu_misconf","IDT_EF_FAIL","IDT_IDS_FAIL","IDT_SIT_FAIL","IDT_PRIVX_INEFF","JET_algo_problem","JET_menu_misconf","JET_unknown","MBI_no_tracking","MET_missing_data","MUO_Upstream_Barrel_problem","MUO_Upstream_Endcap_problem","TAU_misconf","TAU_caloIssue","TAU_nocalo","TAU_no_tracking","TRG_HLT_TAU_tracking_issue","TAU_dbIssue_BeamSpot"]
defects0["globTol"] = ["BJT_partial_tracking","BJT_unknown","BPH_algcrash","BPH_misconf","BPH_partial_muon","BPH_partial_tracking","BPH_unknown","CAL_LAR_SourceMinor","CAL_ROI_EXCESS","CAL_TILE_SourceMinor","CAL_partial_missing_data","CAL_spike","CAL_incorrect_BCID_correction","CAL_unknown","ELE_chain_misconfigured","ELE_clustering_issue","ELE_lowEfficiency_all_electrons","ELE_non_primary_poor_performance_e15_HLTtighter","ELE_non_primary_poor_performance_e15_tight","ELE_nonprimary_misconfigured","ELE_partial_clustering","ELE_partial_tracking","ELE_primary_poor_performance_e20_medium1","ELE_primary_poor_performance_e22_medium1","ELE_tracking_issue_Tolerable","GAM_chain_misconfigured","GAM_clustering_issue","GAM_nonprimary_misconfigured","GENERAL_streaming","GENERAL_tolerableDebugstream","GENERAL_no_1e34_primaries","GENERAL_no_12e33_primaries","GENERAL_no_15e33_primaries","GENERAL_no_17e33_primaries","IDT_BSPOT_FAILUR","IDT_BSPOT_INVALID_STATUS","IDT_BSPOT_INVALIDATOR_PROBLEM","IDT_EFT_FAIL","IDT_LOWSTAT","IDT_SCT_OUTOFTIMEHITS","IDT_TRT_DATA_LOST","IDT_TRT_OUTOFTIMEHITS","IDT_TSF_FAIL","IDT_unknown","JET_calib_issue","JET_energy_excess","JET_GSC_BEAMSPOT_PROBLEM","JET_hotspot","JET_partialscan_issue","MBI_HI_time_shift_mbts","MBI_partial_tracking","MBI_unknown","MBI_spacepoint_noise","MET_XS_Triggers_OFF","MET_missingEt_spike","MET_partial_missing_data","MET_phi_spike","MET_sumEt_spike","MET_unknown","MUO_EFMSonly_problem","MUO_Fullscan_problem","MUO_L2Iso_problem","MUO_L2muonSA_problem","MUO_MSonly_Barrel_problem","MUO_MSonly_Endcapl_problem","MUO_MuComb_problem","MUO_MuGirl_problem","MUO_Multi_Muon_problemchains","MUO_MuonEFTrackIso_problem","MUO_MuonEF_problem","MUO_Slow_problem","MUO_unknown","MUO_chain_misconfigured","TAU_unknown","TAU_dbIssue_mu","TAU_tracking_issue_Tolerable"]
veto["all"] = [] # Veto name as defined in the COOL database
veto["COOL"] = {} # Veto name as defined in the COOL database
defectVeto["description"] = {"":""}
signOff["EXPR."] = ["TRIG_HLT_BJT_UNCHECKED","TRIG_HLT_BPH_UNCHECKED","TRIG_HLT_CAL_UNCHECKED","TRIG_HLT_ELE_UNCHECKED","TRIG_HLT_GAM_UNCHECKED","TRIG_HLT_IDT_UNCHECKED","TRIG_HLT_JET_UNCHECKED","TRIG_HLT_MBI_UNCHECKED","TRIG_HLT_MET_UNCHECKED","TRIG_HLT_MUO_UNCHECKED","TRIG_HLT_TAU_UNCHECKED"]
signOff["BULK"] = []
signOff["FINAL"] = []
#########################################################################################
################ Definitions common to all systems
defects0["part"] = defects0["partIntol"] + defects0["partTol"]
defects0["glob"] = defects0["globIntol"] + defects0["globTol"]
defects0["intol"] = defects0["globIntol"] + defects0["partIntol"]
defects0["tol"] = defects0["globTol"] + defects0["partTol"]
defects0["partIntol_recov"] = []
defects0["globIntol_recov"] = []
for idef in defects0["partIntol"]: # Create a duplicated list of intol defect to monitor the recoverability
defects0["partIntol_recov"].append("%s__recov"%idef)
for idef in defects0["globIntol"]: # Create a duplicated list of intol defect to monitor the recoverability
defects0["globIntol_recov"].append("%s__recov"%idef)
defects0["intol_recov"] = defects0["partIntol_recov"] + defects0["globIntol_recov"]
# If the description is not available, define it with the defect name
for iDef in defects0["intol"]+defects0["tol"]:
if iDef not in defectVeto["description"].keys():
defectVeto["description"][iDef] = iDef
# Define color if not yet done
if not (defectVeto.has_key("color")):
colors = [kBlue-4,kOrange-7,kTeal+1,kRed+1,kMagenta+2,kPink-3,kYellow+1,kGreen-2,kSpring-6,kViolet-4,kAzure-8,kCyan+1,
kBlue-2,kOrange+1,kTeal+7,kRed+3,kMagenta-2,kPink+1,kYellow-1,kGreen+4,kSpring-2,kViolet+1,kAzure-2,kCyan-5,
kBlue+2,kOrange+5,kTeal-4,kRed-5,kMagenta-6,kPink+6,kYellow-5,kGreen-6,kSpring+4,kViolet+6,kAzure+4,kCyan+4,]
defectVeto["color"] = {}
for iColor,iDefectVeto in enumerate(defectVeto["description"].keys()):
if (iColor<len(colors)):
defectVeto["color"][iDefectVeto] = colors[iColor]
else:
defectVeto["color"][iDefectVeto] = kBlack
# Similar tags
for iSimilar in similarTags.keys():
baseTag = iSimilar.split(".")[0]
yearTag["description"][iSimilar] = similarTags[iSimilar]
yearTag["defect"][iSimilar] = yearTag["defect"][baseTag]
if (yearTag["veto"].has_key(baseTag)):
yearTag["veto"][iSimilar] = yearTag["veto"][baseTag]
return True
| [
"[email protected]"
] | |
7c83619bbc76cacaee8fce5c49099d93ca880d70 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5631572862566400_0/Python/icedingo/bff.py | 6f6a5da5564dfe239f4a6b0dd80a606d45e35cb2 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 3,118 | py | T = int(input())
class Pair(object):
def __init__(self, a, b, ap, bp):
self.a = a
self.b = b
self.ap = ap
self.bp = bp
def get_longest(self, pairs, rev, seen):
#print('Getting longest for', self)
alen = 0
for p in self.ap:
if p == self.b:
continue
seena = set()
nex = [p]
_alen = 0
while nex:
_alen += 1
_nex = []
for n in nex:
seena.add(n)
for c in rev[n]:
if c not in seena:
_nex.append(c)
nex = _nex
if _alen > alen:
alen = _alen
blen = 0
for p in self.bp:
if p == self.a:
continue
seenb = set()
nex = [p]
_blen = 0
while nex:
_blen += 1
_nex = []
for n in nex:
seenb.add(n)
for c in rev[n]:
if c not in seenb:
_nex.append(c)
nex = _nex
if _blen > blen:
blen = _blen
#print(' A chain', alen)
#print(' B chain', blen)
seen.add(self)
submax = 0
for p in pairs:
#print(' Checking', p)
if p in seen:
#print(' -- NAH')
continue
_submax = p.get_longest(pairs, rev, seen)
if _submax > submax:
submax = _submax
seen.remove(self)
#print('ret!')
if seen:
return 2 + max(submax, alen, blen)
else:
return 2 + max(submax + alen, submax + blen, alen + blen)
def __str__(self):
return 'Pair<{}, {}>'.format(self.a, self.b)
for t in range(1, T+1):
N = int(input())
bffs = [int(i) - 1 for i in input().split()]
rev = [[] for i in range(N)]
for c, b in enumerate(bffs):
rev[b].append(c)
pairs = set()
pairtuples = set()
max_len = 0
#print(bffs)
for n in range(N):
current = bffs[n]
seen = set()
while current not in seen:
seen.add(current)
#print(current, 'bff of', end = ' ')
current = bffs[current]
#print(current)
if n not in seen:
#print(n, 'not in cycle :(')
continue
lseen = len(seen)
if lseen == 2:
#print(seen, 'are a pair!')
ptuple = tuple(sorted(seen))
if ptuple not in pairtuples:
a = seen.pop()
b = seen.pop()
pairs.add(Pair(a, b, rev[a], rev[b]))
pairtuples.add(ptuple)
if lseen > max_len:
#print('new circle!', seen)
max_len = lseen
for p in pairs:
plen = p.get_longest(pairs, rev, set())
if plen > max_len:
max_len = plen
print('Case #{}: {}'.format(t, max_len))
| [
"[email protected]"
] | |
32a13a3c4ef90bca8e60735e77b5dcd0de843596 | 2c74bb301f1ed83b79254944183ac5a18a639fdf | /homeassistant/components/plex/services.py | 0847583635de398594b64c383690af167bf32786 | [
"Apache-2.0"
] | permissive | Adminiuga/home-assistant | 5bec93007ddac1a268cc359bf7e48530c5f73b38 | dcf68d768e4f628d038f1fdd6e40bad713fbc222 | refs/heads/dev | 2023-02-22T22:03:31.013931 | 2022-11-09T00:27:20 | 2022-11-09T00:27:20 | 123,929,062 | 5 | 4 | Apache-2.0 | 2023-02-22T06:14:31 | 2018-03-05T14:11:09 | Python | UTF-8 | Python | false | false | 6,634 | py | """Services for the Plex integration."""
import json
import logging
from plexapi.exceptions import NotFound
import voluptuous as vol
from yarl import URL
from homeassistant.core import HomeAssistant, ServiceCall
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.dispatcher import async_dispatcher_send
from .const import (
DOMAIN,
PLEX_UPDATE_PLATFORMS_SIGNAL,
PLEX_URI_SCHEME,
SERVERS,
SERVICE_REFRESH_LIBRARY,
SERVICE_SCAN_CLIENTS,
)
from .errors import MediaNotFound
from .models import PlexMediaSearchResult
REFRESH_LIBRARY_SCHEMA = vol.Schema(
{vol.Optional("server_name"): str, vol.Required("library_name"): str}
)
_LOGGER = logging.getLogger(__package__)
async def async_setup_services(hass):
"""Set up services for the Plex component."""
async def async_refresh_library_service(service_call: ServiceCall) -> None:
await hass.async_add_executor_job(refresh_library, hass, service_call)
async def async_scan_clients_service(_: ServiceCall) -> None:
_LOGGER.warning(
"This service is deprecated in favor of the scan_clients button entity. "
"Service calls will still work for now but the service will be removed in a future release"
)
for server_id in hass.data[DOMAIN][SERVERS]:
async_dispatcher_send(hass, PLEX_UPDATE_PLATFORMS_SIGNAL.format(server_id))
hass.services.async_register(
DOMAIN,
SERVICE_REFRESH_LIBRARY,
async_refresh_library_service,
schema=REFRESH_LIBRARY_SCHEMA,
)
hass.services.async_register(
DOMAIN, SERVICE_SCAN_CLIENTS, async_scan_clients_service
)
return True
def refresh_library(hass: HomeAssistant, service_call: ServiceCall) -> None:
"""Scan a Plex library for new and updated media."""
plex_server_name = service_call.data.get("server_name")
library_name = service_call.data["library_name"]
plex_server = get_plex_server(hass, plex_server_name)
try:
library = plex_server.library.section(title=library_name)
except NotFound:
_LOGGER.error(
"Library with name '%s' not found in %s",
library_name,
[x.title for x in plex_server.library.sections()],
)
return
_LOGGER.debug("Scanning %s for new and updated media", library_name)
library.update()
def get_plex_server(hass, plex_server_name=None, plex_server_id=None):
"""Retrieve a configured Plex server by name."""
if DOMAIN not in hass.data:
raise HomeAssistantError("Plex integration not configured")
plex_servers = hass.data[DOMAIN][SERVERS].values()
if not plex_servers:
raise HomeAssistantError("No Plex servers available")
if plex_server_id:
return hass.data[DOMAIN][SERVERS][plex_server_id]
if plex_server_name:
plex_server = next(
(x for x in plex_servers if x.friendly_name == plex_server_name), None
)
if plex_server is not None:
return plex_server
friendly_names = [x.friendly_name for x in plex_servers]
raise HomeAssistantError(
f"Requested Plex server '{plex_server_name}' not found in {friendly_names}"
)
if len(plex_servers) == 1:
return next(iter(plex_servers))
friendly_names = [x.friendly_name for x in plex_servers]
raise HomeAssistantError(
f"Multiple Plex servers configured, choose with 'plex_server' key: {friendly_names}"
)
def process_plex_payload(
hass, content_type, content_id, default_plex_server=None, supports_playqueues=True
) -> PlexMediaSearchResult:
"""Look up Plex media using media_player.play_media service payloads."""
plex_server = default_plex_server
extra_params = {}
if content_id.startswith(PLEX_URI_SCHEME + "{"):
# Handle the special payload of 'plex://{<json>}'
content_id = content_id[len(PLEX_URI_SCHEME) :]
content = json.loads(content_id)
elif content_id.startswith(PLEX_URI_SCHEME):
# Handle standard media_browser payloads
plex_url = URL(content_id)
if plex_url.name:
if len(plex_url.parts) == 2:
if plex_url.name == "search":
content = {}
else:
content = int(plex_url.name)
else:
# For "special" items like radio stations
content = plex_url.path
server_id = plex_url.host
plex_server = get_plex_server(hass, plex_server_id=server_id)
else:
# Handle legacy payloads without server_id in URL host position
if plex_url.host == "search":
content = {}
else:
content = int(plex_url.host) # type: ignore[arg-type]
extra_params = dict(plex_url.query)
else:
content = json.loads(content_id)
if isinstance(content, dict):
if plex_server_name := content.pop("plex_server", None):
plex_server = get_plex_server(hass, plex_server_name)
if not plex_server:
plex_server = get_plex_server(hass)
if content_type == "station":
if not supports_playqueues:
raise HomeAssistantError("Plex stations are not supported on this device")
playqueue = plex_server.create_station_playqueue(content)
return PlexMediaSearchResult(playqueue)
if isinstance(content, int):
content = {"plex_key": content}
content_type = DOMAIN
content.update(extra_params)
if playqueue_id := content.pop("playqueue_id", None):
if not supports_playqueues:
raise HomeAssistantError("Plex playqueues are not supported on this device")
try:
playqueue = plex_server.get_playqueue(playqueue_id)
except NotFound as err:
raise MediaNotFound(
f"PlayQueue '{playqueue_id}' could not be found"
) from err
return PlexMediaSearchResult(playqueue, content)
search_query = content.copy()
shuffle = search_query.pop("shuffle", 0)
# Remove internal kwargs before passing copy to plexapi
for internal_key in ("resume", "offset"):
search_query.pop(internal_key, None)
media = plex_server.lookup_media(content_type, **search_query)
if supports_playqueues and (isinstance(media, list) or shuffle):
playqueue = plex_server.create_playqueue(
media, includeRelated=0, shuffle=shuffle
)
return PlexMediaSearchResult(playqueue, content)
return PlexMediaSearchResult(media, content)
| [
"[email protected]"
] | |
061696e1fd6d7402e0460b8c2bd8cc4d09085cb6 | cce6364dd85b62782671cd8048873eede2045137 | /high/2_mergeKLists.py | 152a70aed8a7ea6300262b0c85c3284a60f19832 | [] | no_license | gmt710/leetcode_python | ed647958440f66583b8717dae7bca49c516984da | 441623afee3713506b702c5fd462c7ba84b48442 | refs/heads/master | 2020-03-28T05:11:02.851792 | 2019-04-17T09:14:51 | 2019-04-17T09:14:51 | 147,761,046 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,888 | py | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
# heapq使用说明
# a为普通列表
# - heapq.heapify(a) 调整a,使得其满足最小堆
# - heapq.heappop(a) 从最小堆中弹出最小的元素
# - heapq.heappush(a,b) 向最小堆中压入新的元素
class Solution(object):
def mergeKLists(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
# https://blog.csdn.net/iyuanshuo/article/details/79600011
# 思想:首先将每个list里面的第一个元素,也就是每个list的最小元素(因为list都是已排序),
# 共K个指放入大小为K的堆中,将其维护成最小堆结构。每次将堆顶的元素,也就是最小元素放到结果中,
# 然后取出该元素原先所处的list中的下一个元素放入队中,维护最小堆结构。
# 当所有元素读取完,所有的元素就按照从小到大放到结果链表中。
import heapq
# 用于保存最小堆
heap = []
for ln in lists:
if ln:
# 将k 个排序链表的第一个元素及其指针放进去,即最小的元素
heap.append((ln.val, ln))
dummy = ListNode(0)
cur = dummy
# 调整heap,使其满足最小堆
heapq.heapify(heap)
while heap:
# 将最小堆中最小的元素值及其指针返回
valu, ln_index = heapq.heappop(heap)
cur.next = ln_index
cur = cur.next
# 如果刚放入结果中的元素指针后还有元素的情况下,将其后的元素及指针放进去
if ln_index.next:
heapq.heappush(heap, (ln_index.next.val, ln_index.next))
return dummy.next
| [
"[email protected]"
] | |
aa712c04989960f93bbe5288b6a2119889c460a7 | 86e904c75d0140eea3e4169d216955e1c34801b3 | /python_test2/cloud/DB_update.py | ef46c6c42d6fecb4b51755d7aa89d1ae39675299 | [] | no_license | reharmony/cloudpython | d62f61749e5b5862d3b81e449d5154e188a14d21 | 98e033e537d763ba86d162f58d0fe8f64249a291 | refs/heads/master | 2020-04-29T16:58:55.281917 | 2019-05-15T12:11:43 | 2019-05-15T12:11:43 | 176,281,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 901 | py | '''
Created on 2019. 4. 30.
@author: user
'''
import pymysql
from cloud.UI_Main import *
# 수강정보 DB에서 수정
def db_process_update(id, price):
# 1. db인증 -> 연결
con = pymysql.connect(host ='localhost', user='root',password='1234', db = 'course')
print("1. db인증 -> 연결 성공...")
print(con)
# 2. 연결정보 -> 통로
cur = con.cursor()
print()
print("2. 연결정보 -> 통로 만들기 성공...")
# 3. sql문 만들어서 -> 전송
sql = "update course_info set price=%d where id='" % (int(price)) + id + "'"
cur.execute(sql)
con.commit()
print()
print("3. sql문 만들어서 -> 전송 성공...")
# 4. db연결해제
con.close()
print()
print("4. db 연결해제 성공...")
print("===============================")
print() | [
"[email protected]"
] | |
4a4d2f3d7a0ca5f6cf4ad7b7d159248b6bcc7b05 | 5a7abc4537039860c49e9a80219efa759aad1b6f | /tests/providers/aws/services/shield/shield_advanced_protection_in_classic_load_balancers/shield_advanced_protection_in_classic_load_balancers_test.py | fbb53920a21e04c33a84a1413341c1b6706f8a7c | [
"Apache-2.0"
] | permissive | sec-js/prowler | d5a06c72f5d7e490bade1167966f83f7a5d7ed15 | f72be9a1e492ad593c9ac267d3ca07f626263ccd | refs/heads/master | 2023-08-31T22:48:33.983360 | 2022-12-22T16:02:28 | 2022-12-22T16:02:28 | 243,866,744 | 0 | 0 | Apache-2.0 | 2022-12-23T12:23:20 | 2020-02-28T22:37:02 | Python | UTF-8 | Python | false | false | 9,601 | py | from unittest import mock
from boto3 import client, resource, session
from moto import mock_ec2, mock_elb
from moto.core import DEFAULT_ACCOUNT_ID
from prowler.providers.aws.lib.audit_info.models import AWS_Audit_Info
from prowler.providers.aws.services.shield.shield_service import Protection
AWS_REGION = "eu-west-1"
class Test_shield_advanced_protection_in_classic_load_balancers:
# Mocked Audit Info
def set_mocked_audit_info(self):
audit_info = AWS_Audit_Info(
original_session=None,
audit_session=session.Session(
profile_name=None,
botocore_session=None,
),
audited_account=DEFAULT_ACCOUNT_ID,
audited_user_id=None,
audited_partition="aws",
audited_identity_arn=None,
profile=None,
profile_region=AWS_REGION,
credentials=None,
assumed_role_info=None,
audited_regions=None,
organizations_metadata=None,
)
return audit_info
@mock_elb
@mock_ec2
def test_no_shield_not_active(self):
# Shield Client
shield_client = mock.MagicMock
shield_client.enabled = False
from prowler.providers.aws.services.elb.elb_service import ELB
with mock.patch(
"prowler.providers.aws.services.shield.shield_service.Shield",
new=shield_client,
), mock.patch(
"prowler.providers.aws.lib.audit_info.audit_info.current_audit_info",
new=self.set_mocked_audit_info(),
), mock.patch(
"prowler.providers.aws.services.shield.shield_advanced_protection_in_classic_load_balancers.shield_advanced_protection_in_classic_load_balancers.elb_client",
new=ELB(self.set_mocked_audit_info()),
):
# Test Check
from prowler.providers.aws.services.shield.shield_advanced_protection_in_classic_load_balancers.shield_advanced_protection_in_classic_load_balancers import (
shield_advanced_protection_in_classic_load_balancers,
)
check = shield_advanced_protection_in_classic_load_balancers()
result = check.execute()
assert len(result) == 0
@mock_ec2
@mock_elb
def test_shield_enabled_elb_protected(self):
# ELB Client
elb = client("elb", region_name=AWS_REGION)
ec2 = resource("ec2", region_name=AWS_REGION)
security_group = ec2.create_security_group(
GroupName="sg01", Description="Test security group sg01"
)
elb_name = "my-lb"
elb.create_load_balancer(
LoadBalancerName=elb_name,
Listeners=[
{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080},
{"Protocol": "http", "LoadBalancerPort": 81, "InstancePort": 9000},
],
AvailabilityZones=[f"{AWS_REGION}a"],
Scheme="internet-facing",
SecurityGroups=[security_group.id],
)
elb_arn = f"arn:aws:elasticloadbalancing:{AWS_REGION}:{DEFAULT_ACCOUNT_ID}:loadbalancer/{elb_name}"
# Shield Client
shield_client = mock.MagicMock
shield_client.enabled = True
shield_client.region = AWS_REGION
protection_id = "test-protection"
shield_client.protections = {
protection_id: Protection(
id=protection_id,
name="",
resource_arn=elb_arn,
protection_arn="",
region=AWS_REGION,
)
}
from prowler.providers.aws.services.elb.elb_service import ELB
with mock.patch(
"prowler.providers.aws.services.shield.shield_service.Shield",
new=shield_client,
), mock.patch(
"prowler.providers.aws.lib.audit_info.audit_info.current_audit_info",
new=self.set_mocked_audit_info(),
), mock.patch(
"prowler.providers.aws.services.shield.shield_advanced_protection_in_classic_load_balancers.shield_advanced_protection_in_classic_load_balancers.elb_client",
new=ELB(self.set_mocked_audit_info()),
):
# Test Check
from prowler.providers.aws.services.shield.shield_advanced_protection_in_classic_load_balancers.shield_advanced_protection_in_classic_load_balancers import (
shield_advanced_protection_in_classic_load_balancers,
)
check = shield_advanced_protection_in_classic_load_balancers()
result = check.execute()
assert len(result) == 1
assert result[0].region == AWS_REGION
assert result[0].resource_id == elb_name
assert result[0].resource_arn == elb_arn
assert result[0].status == "PASS"
assert (
result[0].status_extended
== f"ELB {elb_name} is protected by AWS Shield Advanced"
)
@mock_elb
@mock_ec2
def test_shield_enabled_elb_not_protected(self):
# ELB Client
elb = client("elb", region_name=AWS_REGION)
ec2 = resource("ec2", region_name=AWS_REGION)
security_group = ec2.create_security_group(
GroupName="sg01", Description="Test security group sg01"
)
elb_name = "my-lb"
elb.create_load_balancer(
LoadBalancerName=elb_name,
Listeners=[
{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080},
{"Protocol": "http", "LoadBalancerPort": 81, "InstancePort": 9000},
],
AvailabilityZones=[f"{AWS_REGION}a"],
Scheme="internet-facing",
SecurityGroups=[security_group.id],
)
elb_arn = f"arn:aws:elasticloadbalancing:{AWS_REGION}:{DEFAULT_ACCOUNT_ID}:loadbalancer/{elb_name}"
# Shield Client
shield_client = mock.MagicMock
shield_client.enabled = True
shield_client.region = AWS_REGION
shield_client.protections = {}
from prowler.providers.aws.services.elb.elb_service import ELB
with mock.patch(
"prowler.providers.aws.services.shield.shield_service.Shield",
new=shield_client,
), mock.patch(
"prowler.providers.aws.lib.audit_info.audit_info.current_audit_info",
new=self.set_mocked_audit_info(),
), mock.patch(
"prowler.providers.aws.services.shield.shield_advanced_protection_in_classic_load_balancers.shield_advanced_protection_in_classic_load_balancers.elb_client",
new=ELB(self.set_mocked_audit_info()),
):
# Test Check
from prowler.providers.aws.services.shield.shield_advanced_protection_in_classic_load_balancers.shield_advanced_protection_in_classic_load_balancers import (
shield_advanced_protection_in_classic_load_balancers,
)
check = shield_advanced_protection_in_classic_load_balancers()
result = check.execute()
assert len(result) == 1
assert result[0].region == AWS_REGION
assert result[0].resource_id == elb_name
assert result[0].resource_arn == elb_arn
assert result[0].status == "FAIL"
assert (
result[0].status_extended
== f"ELB {elb_name} is not protected by AWS Shield Advanced"
)
@mock_elb
@mock_ec2
def test_shield_disabled_elb_not_protected(self):
# ELB Client
elb = client("elb", region_name=AWS_REGION)
ec2 = resource("ec2", region_name=AWS_REGION)
security_group = ec2.create_security_group(
GroupName="sg01", Description="Test security group sg01"
)
elb_name = "my-lb"
elb.create_load_balancer(
LoadBalancerName=elb_name,
Listeners=[
{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080},
{"Protocol": "http", "LoadBalancerPort": 81, "InstancePort": 9000},
],
AvailabilityZones=[f"{AWS_REGION}a"],
Scheme="internet-facing",
SecurityGroups=[security_group.id],
)
_ = f"arn:aws:elasticloadbalancing:{AWS_REGION}:{DEFAULT_ACCOUNT_ID}:loadbalancer/{elb_name}"
# Shield Client
shield_client = mock.MagicMock
shield_client.enabled = False
shield_client.region = AWS_REGION
shield_client.protections = {}
from prowler.providers.aws.services.elb.elb_service import ELB
with mock.patch(
"prowler.providers.aws.services.shield.shield_service.Shield",
new=shield_client,
), mock.patch(
"prowler.providers.aws.lib.audit_info.audit_info.current_audit_info",
new=self.set_mocked_audit_info(),
), mock.patch(
"prowler.providers.aws.services.shield.shield_advanced_protection_in_classic_load_balancers.shield_advanced_protection_in_classic_load_balancers.elb_client",
new=ELB(self.set_mocked_audit_info()),
):
# Test Check
from prowler.providers.aws.services.shield.shield_advanced_protection_in_classic_load_balancers.shield_advanced_protection_in_classic_load_balancers import (
shield_advanced_protection_in_classic_load_balancers,
)
check = shield_advanced_protection_in_classic_load_balancers()
result = check.execute()
assert len(result) == 0
| [
"[email protected]"
] | |
c59d517b4583b3b3e62c9432003fb472aceb46b2 | b1a69cd1d3ad792e8c50f2266493b586b0633168 | /repos/system_upgrade/el8toel9/actors/mariadbcheck/tests/test_mariadbcheck.py | e91345f2d56e2ee79676d33b485203afb6ec5cec | [
"Apache-2.0"
] | permissive | bmarzins/leapp-repository | aaf4c0394fd7d23ea639bd2aa8299e815ff9ba40 | e4f733297937847522ecf4b306182c2bcb293676 | refs/heads/master | 2022-05-26T06:51:34.428355 | 2022-05-04T11:39:41 | 2022-05-04T15:18:35 | 242,839,597 | 0 | 0 | Apache-2.0 | 2020-02-24T20:45:26 | 2020-02-24T20:45:25 | null | UTF-8 | Python | false | false | 2,091 | py | import pytest
from leapp import reporting
from leapp.libraries.actor.mariadbcheck import report_installed_packages
from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked
from leapp.libraries.stdlib import api
from leapp.models import InstalledRedHatSignedRPM, RPM
def _generate_rpm_with_name(name):
"""
Generate new RPM model item with given name.
Parameters:
name (str): rpm name
Returns:
rpm (RPM): new RPM object with name parameter set
"""
return RPM(name=name,
version='0.1',
release='1.sm01',
epoch='1',
pgpsig='RSA/SHA256, Mon 01 Jan 1970 00:00:00 AM -03, Key ID 199e2f91fd431d51',
packager='Red Hat, Inc. <http://bugzilla.redhat.com/bugzilla>',
arch='noarch')
@pytest.mark.parametrize('has_server', [
(True), # with server
(False), # without server
])
def test_actor_execution(monkeypatch, has_server):
"""
Parametrized helper function for test_actor_* functions.
First generate list of RPM models based on set arguments. Then, run
the actor feeded with our RPM list. Finally, assert Reports
according to set arguments.
Parameters:
has_server (bool): mariadb-server installed
"""
# Couple of random packages
rpms = [_generate_rpm_with_name('sed'),
_generate_rpm_with_name('htop')]
if has_server:
# Add mariadb-server
rpms += [_generate_rpm_with_name('mariadb-server')]
curr_actor_mocked = CurrentActorMocked(msgs=[InstalledRedHatSignedRPM(items=rpms)])
monkeypatch.setattr(api, 'current_actor', curr_actor_mocked)
monkeypatch.setattr(reporting, "create_report", create_report_mocked())
# Executed actor feeded with fake RPMs
report_installed_packages(_context=api)
if has_server:
# Assert for mariadb-server package installed
assert reporting.create_report.called == 1
else:
# Assert for no mariadb packages installed
assert not reporting.create_report.called
| [
"[email protected]"
] | |
a075f64105d9368199759c95a39be2c79cbeb562 | 5e381364c2ab31ff3618369085afffba6caa8edb | /recipes/sdf/all/test_package/conanfile.py | f0f5021b597eea4126dc06ed8a9f8ad497bdd594 | [
"MIT"
] | permissive | CAMOBAP/conan-center-index | 16aea68a6d22da22831ba985773125e8eda08f00 | 67d57532bdad549fef3fa6cb8fcdfa86bc55e4f1 | refs/heads/master | 2023-07-30T08:58:57.285571 | 2021-10-02T14:57:54 | 2021-10-02T14:57:54 | 323,262,699 | 1 | 0 | MIT | 2021-05-29T13:37:04 | 2020-12-21T07:30:02 | Python | UTF-8 | Python | false | false | 615 | py | import os
from conans import ConanFile, CMake, tools
class TestPackageConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
def requirements(self):
self.requires("stb/20200203")
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def test(self):
if not tools.cross_building(self.settings):
bin_path = os.path.join("bin", "test_package")
img_path = os.path.join(self.source_folder, "test.png")
self.run("{0} {1}".format(bin_path, img_path), run_environment=True)
| [
"[email protected]"
] | |
ce967c34d1d8457b8429f0af4029fc82dd5382d1 | 99e57f00fcaf4469c1c1b79f2d17176aaef9a790 | /purchase_order_revision/tests/test_purchase_order_revision.py | e1e2bc27d76d6d62d40e1a89903a87294617ee1f | [] | no_license | detian08/mcl | d007ffd0e869f3bd9a8c74bc8473119901f0de2a | 32d61148326c931aca0107c3894061773f287e33 | refs/heads/master | 2022-03-23T19:36:29.608645 | 2019-12-11T10:15:50 | 2019-12-11T10:15:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,522 | py | from odoo.tests import common
from odoo import fields
class TestPurchaseOrderRevision(common.SavepointCase):
@classmethod
def setUpClass(cls):
super(TestPurchaseOrderRevision, cls).setUpClass()
cls.partner = cls.env['res.partner'].create({
'name': 'Test partner',
})
cls.product = cls.env['product.product'].create({
'name': 'Product Test',
})
cls.order = cls.env['purchase.order'].create({
'partner_id': cls.partner.id,
'date_planned': fields.Date.today(),
'order_line': [(0, 0, {
'product_id': cls.product.id,
'name': cls.product.name,
'price_unit': 79.80,
'product_qty': 15.0,
'product_uom': cls.env.ref('product.product_uom_unit').id,
'date_planned': fields.Date.today(),
})]
})
def test_new_revision(self):
# I cancel the PO and create a new revision
self.order.button_cancel()
self.assertEqual(self.order.state, 'cancel')
old_name = self.order.name
new_name = '%s-01' % old_name
self.order.new_revision()
self.assertEqual(self.order.name, new_name)
self.assertEqual(len(self.order.old_revision_ids), 1)
self.assertEqual(self.order.revision_number, 1)
old_order = self.env['purchase.order'].search([
('name', '=', old_name),
])
self.assertEqual(old_order.active, False)
| [
"[email protected]"
] | |
64c26ac7295c6f11bf94d56f120e2003ed55fb26 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /120_design_patterns/012_proxy/examples/proxy_005.py | b41f04843274d5f16824f10b2dd7bc621e24ab5d | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 1,773 | py | """Proxy pattern
Proxy is a structural design pattern. A proxy is a surrogate object which can
communicate with the real object (aka implementation). Whenever a method in the
surrogate is called, the surrogate simply calls the corresponding method in
the implementation. The real object is encapsulated in the surrogate object when
the latter is instantiated. It's NOT mandatory that the real object class and
the surrogate object class share the same common interface.
"""
from abc import ABC, abstractmethod
class CommonInterface(ABC):
"""Common interface for Implementation (real obj) and Proxy (surrogate)."""
@abstractmethod
def load(self):
pass
@abstractmethod
def do_stuff(self):
pass
class Implementation(CommonInterface):
def __init__(self, filename):
self.filename = filename
def load(self):
print("load {}".format(self.filename))
def do_stuff(self):
print("do stuff on {}".format(self.filename))
class Proxy(CommonInterface):
def __init__(self, implementation):
self.__implementation = implementation # the real object
self.__cached = False
def load(self):
self.__implementation.load()
self.__cached = True
def do_stuff(self):
if not self.__cached:
self.load()
self.__implementation.do_stuff()
def main():
p1 = Proxy(Implementation("RealObject1"))
p2 = Proxy(Implementation("RealObject2"))
p1.do_stuff() # loading necessary
p1.do_stuff() # loading unnecessary (use cached object)
p2.do_stuff() # loading necessary
p2.do_stuff() # loading unnecessary (use cached object)
p1.do_stuff() # loading unnecessary (use cached object)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
880fe3920c58f152896de23d4ee6ad0c457ad28d | 8766852cddd9cb02cdc57452d6d907a3b0ddcc2b | /test/goose/version.py | fedcbb6de8c5c54f44fe7bea9ef1247fc6013416 | [] | no_license | glqglq/Crawler | 24f532af305e9513dad61670eacef09081c85093 | 769397e0dc723b30955382e22fdbab6aaff35387 | refs/heads/master | 2021-01-20T01:41:34.360112 | 2017-09-15T12:55:44 | 2017-09-15T12:55:44 | 89,318,133 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 938 | py | # -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
version_info = (1, 0, 25)
__version__ = ".".join(map(str, version_info))
| [
"[email protected]"
] | |
0fe69463926f471c995bb00ebd8a5997679f2c6c | 55e9f3b00fc2e488597bab5225ed321c86efbd4b | /sdk/test/test_credit_line.py | 6e143a986df22cb26461a1dcdc7c013dac722df1 | [
"MIT"
] | permissive | bs-yapily/yapily-sdk-python | ad9d04c28f3d744830734c3444c1cef8215206fd | 0bba45e351b674eb655425a51190f539c4e9896f | refs/heads/master | 2020-08-26T17:18:53.156429 | 2019-10-22T11:01:16 | 2019-10-22T11:01:16 | 217,085,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 909 | py | # coding: utf-8
"""
Yapily API
To access endpoints that require authentication, use your application key and secret created in the Dashboard (https://dashboard.yapily.com) # noqa: E501
OpenAPI spec version: 0.0.155
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import yapily
from yapily.models.credit_line import CreditLine # noqa: E501
from yapily.rest import ApiException
class TestCreditLine(unittest.TestCase):
"""CreditLine unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testCreditLine(self):
"""Test CreditLine"""
# FIXME: construct object with mandatory attributes with example values
# model = yapily.models.credit_line.CreditLine() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
571cc5f4406ecac2c0cd3d6bc9e1c26aef47177f | 1e64f178931f5efed25c244dce48d5014aab3a3a | /HackerRank-Algorithm/02. Implementation/004. Between Two Sets.py | 99c311611b5537bf830b4752648c328ab6cd6ed5 | [] | no_license | im876/Python-Codes | 8f672136742a447f2e8d62fe3f37b4a763787ab5 | be06e97f2fa7fb2125a899b7ff49bbe97362c7a3 | refs/heads/master | 2023-08-05T07:25:20.555054 | 2021-09-20T08:17:34 | 2021-09-20T08:17:34 | 279,281,351 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | input();
a=list(map(int, input().split()))
b=list(map(int, input().split()))
ans=0
for i in range(1, 101):
if all(i%x==0 for x in a) and all(x%i==0 for x in b):
ans+=1
print(ans)
| [
"[email protected]"
] | |
ef8501011cc5ff6b245c337330ba692b929dd21b | 70280955a5382d73e58395eba78c119a400f4ce7 | /aoj/itp2/6d.py | 7780d11018469786985006bb7c193acc99bec21a | [] | no_license | cohock13/atcoder | a7d0e26a10a4e58690347a2e36839c2f503a79ba | d268aa68fc96203eab94d021bd158cf84bdb00bc | refs/heads/master | 2021-01-03T00:41:31.055553 | 2020-10-27T12:28:06 | 2020-10-27T12:28:06 | 239,839,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | import bisect
input()
a = list(map(int,input().split()))
for i in range(int(input())):
k = int(input())
print(bisect.bisect_left(a,k),bisect.bisect(a,k)) | [
"[email protected]"
] | |
12ceb17dde9e62bad12f5290cd23d191e8114f88 | fb0d368a0a1af269de38a296ebe8aa85e6785ee9 | /plugins/item_tasks/server/cli_parser.py | 79b4f806927c3d1643b61e44ef70b0cec217593d | [
"Apache-2.0"
] | permissive | sutartmelson/girder | 4e1a8d086e48c0a655b45707d624acc77147db23 | d124d3363c86064fa9ef0d3e461fca8e731b81b2 | refs/heads/master | 2020-05-30T22:51:30.643977 | 2017-06-06T13:01:42 | 2017-06-06T13:01:42 | 58,329,911 | 0 | 0 | null | 2016-05-08T20:25:35 | 2016-05-08T20:25:34 | null | UTF-8 | Python | false | false | 4,251 | py | import ctk_cli
import itertools
import os
from girder.models.model_base import ValidationException
from girder.plugins.worker import constants
_SLICER_TO_GIRDER_WORKER_INPUT_TYPE_MAP = {
'boolean': 'boolean',
'integer': 'integer',
'float': 'number',
'double': 'number',
'string': 'string',
'integer-vector': 'integer_list',
'float-vector': 'number_list',
'double-vector': 'number_list',
'string-vector': 'string_list',
'integer-enumeration': 'integer',
'float-enumeration': 'number',
'double-enumeration': 'number',
'string-enumeration': 'string',
'file': 'file',
'directory': 'folder',
'image': 'file',
'pointfile': 'file'
}
_SLICER_TO_GIRDER_WORKER_OUTPUT_TYPE_MAP = {
'file': 'new-file',
'image': 'new-file',
'pointfile': 'new-file'
}
_SLICER_TYPE_TO_GIRDER_MODEL_MAP = {
'image': 'file',
'file': 'file',
'directory': 'folder'
}
def _validateParam(param):
if param.channel == 'input' and param.typ not in _SLICER_TO_GIRDER_WORKER_INPUT_TYPE_MAP:
raise ValidationException(
'Input parameter type %s is currently not supported.' % param.typ)
if param.channel == 'output' and param.typ not in _SLICER_TO_GIRDER_WORKER_OUTPUT_TYPE_MAP:
raise ValidationException(
'Output parameter type %s is currently not supported.' % param.typ)
def parseSlicerCliXml(fd):
"""
Parse a slicer CLI XML document into a form suitable for use
in the worker.
:param fd: A file descriptor representing the XML document to parse.
:type fd: file-like
:returns: A dict of information about the CLI.
"""
cliSpec = ctk_cli.CLIModule(stream=fd)
description = '\n\n'.join((
'**Description**: %s' % cliSpec.description,
'**Author(s)**: %s' % cliSpec.contributor,
'**Version**: %s' % cliSpec.version,
'**License**: %s' % cliSpec.license,
'**Acknowledgements**: %s' % (cliSpec.acknowledgements or '*none*'),
'*This description was auto-generated from the Slicer CLI XML specification.*'
))
info = {
'title': cliSpec.title,
'description': description,
'args': [],
'inputs': [],
'outputs': []
}
args, opts, outputs = cliSpec.classifyParameters()
for param in itertools.chain(args, opts):
_validateParam(param)
args.sort(key=lambda p: p.index)
opts.sort(key=lambda p: p.flag or p.longflag)
inputArgs = [a for a in args if a.channel == 'input']
inputOpts = [o for o in opts if o.channel == 'input']
outputArgs = [a for a in args if a.channel == 'output']
outputOpts = [o for o in opts if o.channel == 'output']
def ioSpec(name, param, addDefault=False):
if param.channel == 'output':
typ = _SLICER_TO_GIRDER_WORKER_OUTPUT_TYPE_MAP[param.typ]
else:
typ = _SLICER_TO_GIRDER_WORKER_INPUT_TYPE_MAP[param.typ]
spec = {
'id': name,
'name': param.label,
'description': param.description,
'type': typ,
'format': typ
}
if param.isExternalType():
spec['target'] = 'filepath'
if addDefault and param.default is not None:
spec['default'] = {
'data': param.default
}
return spec
for param in inputOpts:
name = param.flag or param.longflag
info['inputs'].append(ioSpec(name, param, True))
if param.typ == 'boolean':
info['args'].append('$flag{%s}' % name)
else:
info['args'] += [name, '$input{%s}' % name]
for param in outputOpts:
name = param.flag or param.longflag
info['outputs'].append(ioSpec(name, param))
info['args'] += [
param.flag or param.longflag,
os.path.join(constants.DOCKER_DATA_VOLUME, name)
]
for param in inputArgs:
info['inputs'].append(ioSpec(param.name, param, True))
info['args'].append('$input{%s}' % param.name)
for param in outputArgs:
info['outputs'].append(ioSpec(param.name, param))
info['args'].append(os.path.join(constants.DOCKER_DATA_VOLUME, param.name))
return info
| [
"[email protected]"
] | |
c7f7daa1e8cfbdd9a852740e99925e0052c49c13 | 34ff1ca688d5828ae7181fd86284d50181104cdd | /virtual/bin/easy_install | 46e2fa9dc028a083640917b9d58775ee843cdaaa | [
"MIT"
] | permissive | KadogoKenya/ProjectTutorial | 6cd69f22b6b4f6c0c7a313d760d6c9af5bc4d2f0 | 1f8f4ea95aa8b71224d1eb206703bf8e5efee79e | refs/heads/master | 2023-01-06T03:25:40.875595 | 2020-11-01T10:31:56 | 2020-11-01T10:31:56 | 307,425,572 | 0 | 0 | null | 2020-11-01T10:31:57 | 2020-10-26T15:54:39 | Python | UTF-8 | Python | false | false | 282 | #!/home/kate/Desktop/PYTHON/DJANGO/ProjectTutorial/virtual/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
ae136e0ab30772984c0e437e68c38b499091ebf6 | 826a8aeb87cb074938b2056ada22c89b9bd9276c | /test.py | a67e6332d135bd82a5add06f88f209a57d7e1547 | [] | no_license | priyom/priyomdb2 | ce441d755d021c838684aba705b3fb905461ca9f | 47deecab60febd427af692149788d37cd9f770ba | refs/heads/master | 2020-07-04T01:59:29.506148 | 2014-03-03T11:51:14 | 2014-03-03T11:51:14 | 25,634,647 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 483 | py | #!/usr/bin/python2
# encoding=utf8
from __future__ import absolute_import, unicode_literals, print_function
import time
from datetime import datetime
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import priyom.consistency
import priyom.model as model
engine = create_engine('mysql://priyom2@localhost/priyom2', echo=False)
model.Base.metadata.create_all(engine)
session = sessionmaker(bind=engine)()
priyom.consistency.check_consistency(session)
| [
"[email protected]"
] | |
d89513cacd3076d29ae48e71669f136780a7c89f | 8634b4f7f2293bf431ba8ed59e95f80abc59483f | /Homework/12/main.py | 71a7c963b22fbc17689ada2e95edeef4ed5bb243 | [] | no_license | TitanVA/Metiz | e1e2dca42118f660356254c39c7fadc47f772719 | e54f10b98226e102a5bb1eeda7f1e1eb30587c32 | refs/heads/master | 2020-12-22T11:44:58.746055 | 2020-02-10T14:41:16 | 2020-02-10T14:41:16 | 236,770,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | import pygame
from body import Body
import functions as fs
from settings import Settings
def run_game():
pygame.init()
ai_settings = Settings()
screen = pygame.display.set_mode((ai_settings.screen_width,
ai_settings.screen_height))
body = Body(screen)
pygame.display.set_caption('Homework')
while True:
screen.fill(ai_settings.bg_color)
fs.check_events()
fs.update_screen(ai_settings, screen, body)
run_game()
| [
"[email protected]"
] | |
312fa9472582f301101be8327140d6223556eef4 | b8fb00ee277478c368f5b7512bfd265f3ecea356 | /python/creating_table_mysql/venv/Scripts/pip-script.py | 32d30289134b69ada49c6baa42b89dceb4661921 | [] | no_license | DharmilShahJBSPL/DharmilShah | 574477c38a8b76616618130f3b0679a23a9c1af8 | 0d197189c8dcf794d38145e8f1edba6766b02df9 | refs/heads/master | 2021-07-07T11:47:03.770219 | 2019-01-19T13:13:38 | 2019-01-19T13:13:38 | 152,415,037 | 0 | 1 | null | 2020-07-20T10:44:20 | 2018-10-10T11:48:36 | Python | UTF-8 | Python | false | false | 416 | py | #!E:\dharmil\python_task\creating_table_mysql\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"[email protected]"
] | |
7f7b74b6d51c039b44bd3835cfb39f27f19cf5ab | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2607/60829/292435.py | 6444270ef4ed48995093bddc1a2fe009718dd2d5 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,018 | py | def a(x):
res=[]
for i in range(len(x)):
res.append(int(x[i]))
return res
def judge(x):
res=[]
for i in x:
if not i in res:
res.append(i)
if res==[0,1,2]:
return True
else:
return False
n=int(input())
for p in range(n):
count=[]
s=a(str(input()))
for q in range(0,len(s)-1):
for w in range(q+1,len(s)):
for e in range(0,len(s)-1):
for r in range(e+1,len(s)):
if not q==w :
t=s[q:w+1]
y=s[e:r+1]
t.sort()
y.sort()
if t==y and judge(t) :
count.append(t)
aa=[[0, 1, 0, 2, 0, 1, 0, 1, 1, 2, 2, 2, 0, 0],[1, 0, 2, 1, 0, 0, 2, 1, 1, 1, 0, 2],[0, 1, 0, 2, 0, 1, 0],[1, 0, 2, 1, 0, 0, 2, 1, 1],[0, 1, 0, 2, 0, 1, 0, 1, 1, 2]]
bb=[7,6,2,5,2]
for i in range(0,len(aa)):
if aa[i]==s:
s=bb[i]
print(s)
| [
"[email protected]"
] | |
33376a313c61badeebef3defefdf750510320187 | 8e2f3c1f4eb3464b4a2317b98393832db741b341 | /AgendaVENV/bin/easy_install | ad81eccb94e2e23af405ea1d00b14ba027ba4019 | [] | no_license | LeoGraciano/Django_Agenda | 59e506652972e12313b20881008716ca4589fc61 | a8a46bccdbd4942f9c5c8102acbe5f86a1d990dd | refs/heads/master | 2022-12-24T20:51:41.162139 | 2020-10-04T01:07:50 | 2020-10-04T01:07:50 | 300,184,084 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | #!/home/leograciano/Documents/Python/Agenda_Django/AgendaVENV/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
f61dce2344eef5b569ebd710533eacfb49626c9c | cc89bc62b5234abdb632023557cda57b7d212a0e | /tests/test_accounting.py | 7a53b83db75405655f0715f1fab0ad5dedca756c | [
"MIT"
] | permissive | baverman/taburet | f296e31b23e6df66aa257c6df50f22f94f5b83d2 | 5afe1157cc698745a0e23cab9b0798f987db0641 | refs/heads/master | 2020-12-30T10:36:48.801532 | 2011-12-07T09:57:25 | 2011-12-07T09:57:25 | 674,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,427 | py | # -*- coding: utf-8 -*-
from datetime import datetime
from taburet.accounts import Account, get_account_by_name, get_toplevel_accounts, create_account, \
create_transaction, get_all_accounts
from taburet.transactions import transactions, month_report, TransactionBase
from .helpers import pytest_funcarg__pm
class Transaction(TransactionBase): pass
def pytest_funcarg__session(request):
pm = pytest_funcarg__pm(request)
pm.use('taburet.accounts', Transaction=Transaction)
pm.drop()
pm.sync()
return pm.session
def test_account_plan_creating(session):
create_account(session, 'Бу')
create_account(session, 'Ла')
session.commit()
result = [r.name for r in get_toplevel_accounts(session)]
assert u'Бу' in result
assert u'Ла' in result
assert len(result) == 2
def test_accounts_tree(session):
create_account(session, 'acc1')
create_account(session, 'acc3', create_account(session, 'acc2'))
session.commit()
result = [r.name for r in get_toplevel_accounts(session)]
assert u'acc1' in result
assert u'acc2' in result
assert len(result) == 2
acc = get_account_by_name(session, 'acc2')
result = acc.subaccounts()
assert len(result) == 1
assert result[0].name == 'acc3'
result = result[0].parent_accounts
assert len(result) == 1
assert result[0].name == 'acc2'
result = result[0].parent_accounts
assert result == []
def test_account_tree_and_billing_case(session):
zacs = create_account(session)
bich = create_account(session, u"Бичиков", zacs)
petrov = create_account(session, u"Петров", zacs)
kassa = create_account(session)
nal = create_account(session, parent=kassa)
beznal = create_account(session, parent=kassa)
zp = create_account(session)
konditer = create_account(session, parent=zp)
zavhoz = create_account(session, parent=zp)
session.commit()
t = create_transaction(bich, nal, 1000.0)
session.add(t)
session.add(create_transaction(petrov, nal, 500.0))
session.add(create_transaction(petrov, beznal, 100.0))
session.add(create_transaction(nal, konditer, 300.0))
session.add(create_transaction(nal, zavhoz, 200.0))
session.commit()
assert zacs.balance().balance == -1600
assert bich.balance().balance == -1000
assert petrov.balance().balance == -600
assert kassa.balance().balance == 1100
assert kassa.balance().debet == 1600
assert kassa.balance().kredit == 500
assert zp.balance().balance == 500
t.amount = 900
session.commit()
assert zacs.balance().balance == -1500
assert bich.balance().balance == -900
assert petrov.balance().balance == -600
assert kassa.balance().balance == 1000
assert kassa.balance().debet == 1500
assert kassa.balance().kredit == 500
assert zp.balance().balance == 500
def test_billing_must_return_values_for_date_period(session):
acc1 = create_account(session)
acc2 = create_account(session)
session.commit()
session.add(create_transaction(acc1, acc2, 200.0, datetime(2010, 5, 20)))
session.add(create_transaction(acc1, acc2, 300.0, datetime(2010, 5, 31)))
session.add(create_transaction(acc1, acc2, 100.0, datetime(2010, 6, 01)))
session.commit()
balance = acc2.balance(datetime(2010,5,1), datetime(2010,6,1))
assert balance.balance == 500
balance = acc1.balance(datetime(2010,6,1), datetime(2010,7,1))
assert balance.balance == -100
balance = acc2.balance(datetime(2010,5,1), datetime(2010,7,1))
assert balance.balance == 600
def test_billing_must_return_zero_balance_for_period_without_transactions(session):
acc1 = create_account(session)
acc2 = create_account(session)
session.commit()
session.add(create_transaction(acc1, acc2, 200.0, datetime(2010, 5, 20)))
balance = acc2.balance(datetime(2010,5,21), datetime(2010,5,22))
assert balance.balance == 0
def test_account_must_be_able_to_return_subaccounts(session):
acc1 = create_account(session)
acc2 = create_account(session)
sacc1 = create_account(session, parent=acc1)
sacc2 = create_account(session, parent=acc1)
ssacc1 = create_account(session, parent=sacc2)
session.commit()
accounts = get_toplevel_accounts(session)
assert acc1 in accounts
assert acc2 in accounts
assert not sacc2 in accounts
accounts = acc1.subaccounts()
assert sacc1 in accounts
assert sacc2 in accounts
assert not acc1 in accounts
accounts = sacc2.subaccounts()
assert accounts == [ssacc1]
accounts = acc2.subaccounts()
assert accounts == []
accounts = get_all_accounts(session)
assert acc1 in accounts
assert acc2 in accounts
assert sacc1 in accounts
assert sacc2 in accounts
assert ssacc1 in accounts
def test_account_must_be_able_to_be_found_by_name(session):
acc1 = create_account(session, u'Счет1')
create_account(session, u'Счет2')
acc = get_account_by_name(session, u'Счет1')
assert acc.id == acc1.id
acc = get_account_by_name(session, u'Счет3')
assert acc == None
create_account(session, u'Счет1')
try:
get_account_by_name(session, u'Счет1')
assert False, 'MultipleResultsFound must be raised'
except Account.MultipleResultsFound:
pass
def test_account_transaction_list(session):
acc1 = create_account(session)
acc2 = create_account(session)
acc3 = create_account(session)
session.commit()
session.add(create_transaction(acc1, acc2, 100.0, datetime(2010, 5, 22, 10, 23, 40)))
session.add(create_transaction(acc2, acc1, 200.0, datetime(2010, 6, 1, 10, 10, 10)))
session.add(create_transaction(acc3, acc2, 300.0, datetime(2010, 7, 1, 10, 10, 10)))
session.commit()
result = acc2.transactions().all()
assert result[0].amount == 100
assert result[1].amount == 200
assert result[2].amount == 300
result = acc2.transactions(income=True).all()
assert len(result) == 2
assert result[0].amount == 100
assert result[1].amount == 300
result = acc2.transactions(outcome=True).all()
assert len(result) == 1
assert result[0].amount == 200
result = acc1.transactions().all()
assert len(result) == 2
result = acc3.transactions().all()
assert len(result) == 1
result = acc1.transactions(datetime(2010, 5, 1), datetime(2010, 6, 1)).one()
assert result.amount == 100
assert result.date == datetime(2010, 5, 22, 10, 23, 40)
try:
result = acc1.transactions(datetime(2010, 5, 1), datetime(2010, 6, 1), income=True).one()
assert False, 'NoResultFound must was thrown'
except Account.NoResultFound: pass
result = acc1.transactions(datetime(2010, 5, 1), datetime(2010, 6, 1), outcome=True).one()
assert result.amount == 100
result = acc1.transactions(datetime(2010, 6, 1), datetime(2010, 7, 1)).one()
assert result.amount == 200
def test_transaction_list_must_include_all_destinations(session):
acc1 = create_account(session)
acc2 = create_account(session)
acc3 = create_account(session, parent=acc2)
session.commit()
session.add(create_transaction(acc1, acc3, 100.0))
session.commit()
acc1_tid = acc1.tid
acc2_tid = acc2.tid
acc3_tid = acc3.tid
session.expunge_all()
result = transactions(session, acc3_tid).one()
assert result.from_accs == [acc1_tid]
assert result.to_accs == [acc2_tid, acc3_tid]
result = transactions(session, acc3_tid, income=True).one()
assert result.from_accs == [acc1_tid]
assert result.to_accs == [acc2_tid, acc3_tid]
def test_account_report(session):
acc1 = create_account(session)
acc2 = create_account(session)
acc3 = create_account(session)
session.commit()
session.add(create_transaction(acc1, acc2, 100.0, datetime(2010, 5, 22)))
session.add(create_transaction(acc2, acc1, 200.0, datetime(2010, 5, 25)))
session.add(create_transaction(acc3, acc2, 300.0, datetime(2010, 7, 1)))
session.commit()
result = list(acc1.report(datetime(2010, 5, 1), datetime(2010, 6, 1)))
assert result[0][0] == datetime(2010, 5, 22)
assert result[0][1].kredit == 100
assert result[1][0] == datetime(2010, 5, 25)
assert result[1][1].debet == 200
def test_month_report(session):
acc1 = create_account(session)
acc2 = create_account(session)
acc3 = create_account(session)
session.commit()
session.add(create_transaction(acc1, acc2, 50.0, datetime(2009, 8, 22)))
session.add(create_transaction(acc1, acc2, 100.0, datetime(2010, 5, 22)))
session.add(create_transaction(acc2, acc1, 200.0, datetime(2010, 5, 25)))
session.add(create_transaction(acc3, acc2, 300.0, datetime(2010, 7, 1)))
result = month_report(session, (acc1.tid, acc2.tid), datetime(2010, 5, 22))
assert len(result) == 2
assert result[acc1.tid] == {'before':-50, 'debet':200, 'kredit':100, 'after':50}
assert result[acc2.tid] == {'before':50, 'debet':100, 'kredit':200, 'after':-50}
result = month_report(session, (acc1.tid, acc2.tid), datetime(2010, 7, 1))
assert len(result) == 2
assert result[acc1.tid] == {'before':50, 'debet':0, 'kredit':0, 'after':50}
assert result[acc2.tid] == {'before':-50, 'debet':300, 'kredit':0, 'after':250}
result = month_report(session, (acc3.tid,), datetime(2010, 7, 1))
assert len(result) == 1
assert result[acc3.tid] == {'before':0, 'debet':0, 'kredit':300, 'after':-300}
def test_billing_support_transaction_cancellation(session):
acc1 = create_account(session)
acc2 = create_account(session)
session.commit()
t = create_transaction(acc1, acc2, 50.0)
session.add(t)
session.commit()
assert acc1.balance().balance == -50
assert acc2.balance().balance == 50
t.cancel('Bad')
session.commit()
assert acc1.balance().balance == 0
assert acc2.balance().balance == 0
def test_billing_support_transaction_removing(session):
acc1 = create_account(session)
acc2 = create_account(session)
session.commit()
t = create_transaction(acc1, acc2, 50.0)
session.add(t)
session.commit()
assert acc1.balance().balance == -50
assert acc2.balance().balance == 50
session.delete(t)
session.commit()
assert acc1.balance().balance == 0
assert acc2.balance().balance == 0 | [
"[email protected]"
] | |
c09b204f868edfd5f1d9756b239aa4425f21ed83 | 4a1273f72e7d8a07a3fa67ac9f2709b64ec6bc18 | /tests/run_tests.py | d0248ef9d055bf5fd569d0a42866e153334ececa | [] | no_license | WealthCity/django-project | 6668b92806d8c61ef9e20bd42daec99993cd25b2 | fa31fa82505c3d0fbc54bd8436cfc0e49c896f3e | refs/heads/dev | 2021-01-19T14:10:52.115301 | 2017-04-12T11:23:32 | 2017-04-12T11:23:32 | 88,132,284 | 0 | 1 | null | 2017-04-13T06:26:30 | 2017-04-13T06:26:29 | null | UTF-8 | Python | false | false | 473 | py | #!/usr/bin/env python
import os
import sys
import django
from django.conf import settings
from django.test.utils import get_runner
#from django.test.utils import setup_test_environment
if __name__ == "__main__":
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.test_settings'
django.setup()
TestRunner = get_runner(settings)
test_runner = TestRunner()
# setup_test_environment()
failures = test_runner.run_tests(["tests"])
sys.exit(bool(failures)) | [
"[email protected]"
] | |
4429a282f960729165b61d3256b2bb8f8c73a8b4 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/machinelearningservices/v20210301preview/get_workspace.py | e81886c2eaf630679e27b9d528b28b932ddabeb9 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,789 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetWorkspaceResult',
'AwaitableGetWorkspaceResult',
'get_workspace',
'get_workspace_output',
]
@pulumi.output_type
class GetWorkspaceResult:
"""
An object that represents a machine learning workspace.
"""
def __init__(__self__, allow_public_access_when_behind_vnet=None, application_insights=None, container_registry=None, description=None, discovery_url=None, encryption=None, friendly_name=None, hbi_workspace=None, id=None, identity=None, image_build_compute=None, key_vault=None, location=None, name=None, notebook_info=None, primary_user_assigned_identity=None, private_endpoint_connections=None, private_link_count=None, provisioning_state=None, service_managed_resources_settings=None, service_provisioned_resource_group=None, shared_private_link_resources=None, sku=None, storage_account=None, system_data=None, tags=None, tenant_id=None, type=None, workspace_id=None):
if allow_public_access_when_behind_vnet and not isinstance(allow_public_access_when_behind_vnet, bool):
raise TypeError("Expected argument 'allow_public_access_when_behind_vnet' to be a bool")
pulumi.set(__self__, "allow_public_access_when_behind_vnet", allow_public_access_when_behind_vnet)
if application_insights and not isinstance(application_insights, str):
raise TypeError("Expected argument 'application_insights' to be a str")
pulumi.set(__self__, "application_insights", application_insights)
if container_registry and not isinstance(container_registry, str):
raise TypeError("Expected argument 'container_registry' to be a str")
pulumi.set(__self__, "container_registry", container_registry)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if discovery_url and not isinstance(discovery_url, str):
raise TypeError("Expected argument 'discovery_url' to be a str")
pulumi.set(__self__, "discovery_url", discovery_url)
if encryption and not isinstance(encryption, dict):
raise TypeError("Expected argument 'encryption' to be a dict")
pulumi.set(__self__, "encryption", encryption)
if friendly_name and not isinstance(friendly_name, str):
raise TypeError("Expected argument 'friendly_name' to be a str")
pulumi.set(__self__, "friendly_name", friendly_name)
if hbi_workspace and not isinstance(hbi_workspace, bool):
raise TypeError("Expected argument 'hbi_workspace' to be a bool")
pulumi.set(__self__, "hbi_workspace", hbi_workspace)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if image_build_compute and not isinstance(image_build_compute, str):
raise TypeError("Expected argument 'image_build_compute' to be a str")
pulumi.set(__self__, "image_build_compute", image_build_compute)
if key_vault and not isinstance(key_vault, str):
raise TypeError("Expected argument 'key_vault' to be a str")
pulumi.set(__self__, "key_vault", key_vault)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if notebook_info and not isinstance(notebook_info, dict):
raise TypeError("Expected argument 'notebook_info' to be a dict")
pulumi.set(__self__, "notebook_info", notebook_info)
if primary_user_assigned_identity and not isinstance(primary_user_assigned_identity, str):
raise TypeError("Expected argument 'primary_user_assigned_identity' to be a str")
pulumi.set(__self__, "primary_user_assigned_identity", primary_user_assigned_identity)
if private_endpoint_connections and not isinstance(private_endpoint_connections, list):
raise TypeError("Expected argument 'private_endpoint_connections' to be a list")
pulumi.set(__self__, "private_endpoint_connections", private_endpoint_connections)
if private_link_count and not isinstance(private_link_count, int):
raise TypeError("Expected argument 'private_link_count' to be a int")
pulumi.set(__self__, "private_link_count", private_link_count)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if service_managed_resources_settings and not isinstance(service_managed_resources_settings, dict):
raise TypeError("Expected argument 'service_managed_resources_settings' to be a dict")
pulumi.set(__self__, "service_managed_resources_settings", service_managed_resources_settings)
if service_provisioned_resource_group and not isinstance(service_provisioned_resource_group, str):
raise TypeError("Expected argument 'service_provisioned_resource_group' to be a str")
pulumi.set(__self__, "service_provisioned_resource_group", service_provisioned_resource_group)
if shared_private_link_resources and not isinstance(shared_private_link_resources, list):
raise TypeError("Expected argument 'shared_private_link_resources' to be a list")
pulumi.set(__self__, "shared_private_link_resources", shared_private_link_resources)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if storage_account and not isinstance(storage_account, str):
raise TypeError("Expected argument 'storage_account' to be a str")
pulumi.set(__self__, "storage_account", storage_account)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if tenant_id and not isinstance(tenant_id, str):
raise TypeError("Expected argument 'tenant_id' to be a str")
pulumi.set(__self__, "tenant_id", tenant_id)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if workspace_id and not isinstance(workspace_id, str):
raise TypeError("Expected argument 'workspace_id' to be a str")
pulumi.set(__self__, "workspace_id", workspace_id)
@property
@pulumi.getter(name="allowPublicAccessWhenBehindVnet")
def allow_public_access_when_behind_vnet(self) -> Optional[bool]:
"""
The flag to indicate whether to allow public access when behind VNet.
"""
return pulumi.get(self, "allow_public_access_when_behind_vnet")
@property
@pulumi.getter(name="applicationInsights")
def application_insights(self) -> Optional[str]:
"""
ARM id of the application insights associated with this workspace. This cannot be changed once the workspace has been created
"""
return pulumi.get(self, "application_insights")
@property
@pulumi.getter(name="containerRegistry")
def container_registry(self) -> Optional[str]:
"""
ARM id of the container registry associated with this workspace. This cannot be changed once the workspace has been created
"""
return pulumi.get(self, "container_registry")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
The description of this workspace.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="discoveryUrl")
def discovery_url(self) -> Optional[str]:
"""
Url for the discovery service to identify regional endpoints for machine learning experimentation services
"""
return pulumi.get(self, "discovery_url")
@property
@pulumi.getter
def encryption(self) -> Optional['outputs.EncryptionPropertyResponse']:
"""
The encryption settings of Azure ML workspace.
"""
return pulumi.get(self, "encryption")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[str]:
"""
The friendly name for this workspace. This name in mutable
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter(name="hbiWorkspace")
def hbi_workspace(self) -> Optional[bool]:
"""
The flag to signal HBI data in the workspace and reduce diagnostic data collected by the service
"""
return pulumi.get(self, "hbi_workspace")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.IdentityResponse']:
"""
The identity of the resource.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter(name="imageBuildCompute")
def image_build_compute(self) -> Optional[str]:
"""
The compute name for image build
"""
return pulumi.get(self, "image_build_compute")
@property
@pulumi.getter(name="keyVault")
def key_vault(self) -> Optional[str]:
"""
ARM id of the key vault associated with this workspace. This cannot be changed once the workspace has been created
"""
return pulumi.get(self, "key_vault")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="notebookInfo")
def notebook_info(self) -> 'outputs.NotebookResourceInfoResponse':
"""
The notebook info of Azure ML workspace.
"""
return pulumi.get(self, "notebook_info")
@property
@pulumi.getter(name="primaryUserAssignedIdentity")
def primary_user_assigned_identity(self) -> Optional[str]:
"""
The user assigned identity resource id that represents the workspace identity.
"""
return pulumi.get(self, "primary_user_assigned_identity")
@property
@pulumi.getter(name="privateEndpointConnections")
def private_endpoint_connections(self) -> Sequence['outputs.PrivateEndpointConnectionResponse']:
"""
The list of private endpoint connections in the workspace.
"""
return pulumi.get(self, "private_endpoint_connections")
@property
@pulumi.getter(name="privateLinkCount")
def private_link_count(self) -> int:
"""
Count of private connections in the workspace
"""
return pulumi.get(self, "private_link_count")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The current deployment state of workspace resource. The provisioningState is to indicate states for resource provisioning.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="serviceManagedResourcesSettings")
def service_managed_resources_settings(self) -> Optional['outputs.ServiceManagedResourcesSettingsResponse']:
"""
The service managed resource settings.
"""
return pulumi.get(self, "service_managed_resources_settings")
@property
@pulumi.getter(name="serviceProvisionedResourceGroup")
def service_provisioned_resource_group(self) -> str:
"""
The name of the managed resource group created by workspace RP in customer subscription if the workspace is CMK workspace
"""
return pulumi.get(self, "service_provisioned_resource_group")
@property
@pulumi.getter(name="sharedPrivateLinkResources")
def shared_private_link_resources(self) -> Optional[Sequence['outputs.SharedPrivateLinkResourceResponse']]:
"""
The list of shared private link resources in this workspace.
"""
return pulumi.get(self, "shared_private_link_resources")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
The sku of the workspace.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="storageAccount")
def storage_account(self) -> Optional[str]:
"""
ARM id of the storage account associated with this workspace. This cannot be changed once the workspace has been created
"""
return pulumi.get(self, "storage_account")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Contains resource tags defined as key/value pairs.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> str:
"""
The tenant id associated with this workspace.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="workspaceId")
def workspace_id(self) -> str:
"""
The immutable id associated with this workspace.
"""
return pulumi.get(self, "workspace_id")
class AwaitableGetWorkspaceResult(GetWorkspaceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWorkspaceResult(
allow_public_access_when_behind_vnet=self.allow_public_access_when_behind_vnet,
application_insights=self.application_insights,
container_registry=self.container_registry,
description=self.description,
discovery_url=self.discovery_url,
encryption=self.encryption,
friendly_name=self.friendly_name,
hbi_workspace=self.hbi_workspace,
id=self.id,
identity=self.identity,
image_build_compute=self.image_build_compute,
key_vault=self.key_vault,
location=self.location,
name=self.name,
notebook_info=self.notebook_info,
primary_user_assigned_identity=self.primary_user_assigned_identity,
private_endpoint_connections=self.private_endpoint_connections,
private_link_count=self.private_link_count,
provisioning_state=self.provisioning_state,
service_managed_resources_settings=self.service_managed_resources_settings,
service_provisioned_resource_group=self.service_provisioned_resource_group,
shared_private_link_resources=self.shared_private_link_resources,
sku=self.sku,
storage_account=self.storage_account,
system_data=self.system_data,
tags=self.tags,
tenant_id=self.tenant_id,
type=self.type,
workspace_id=self.workspace_id)
def get_workspace(resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWorkspaceResult:
"""
An object that represents a machine learning workspace.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:machinelearningservices/v20210301preview:getWorkspace', __args__, opts=opts, typ=GetWorkspaceResult).value
return AwaitableGetWorkspaceResult(
allow_public_access_when_behind_vnet=__ret__.allow_public_access_when_behind_vnet,
application_insights=__ret__.application_insights,
container_registry=__ret__.container_registry,
description=__ret__.description,
discovery_url=__ret__.discovery_url,
encryption=__ret__.encryption,
friendly_name=__ret__.friendly_name,
hbi_workspace=__ret__.hbi_workspace,
id=__ret__.id,
identity=__ret__.identity,
image_build_compute=__ret__.image_build_compute,
key_vault=__ret__.key_vault,
location=__ret__.location,
name=__ret__.name,
notebook_info=__ret__.notebook_info,
primary_user_assigned_identity=__ret__.primary_user_assigned_identity,
private_endpoint_connections=__ret__.private_endpoint_connections,
private_link_count=__ret__.private_link_count,
provisioning_state=__ret__.provisioning_state,
service_managed_resources_settings=__ret__.service_managed_resources_settings,
service_provisioned_resource_group=__ret__.service_provisioned_resource_group,
shared_private_link_resources=__ret__.shared_private_link_resources,
sku=__ret__.sku,
storage_account=__ret__.storage_account,
system_data=__ret__.system_data,
tags=__ret__.tags,
tenant_id=__ret__.tenant_id,
type=__ret__.type,
workspace_id=__ret__.workspace_id)
@_utilities.lift_output_func(get_workspace)
def get_workspace_output(resource_group_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetWorkspaceResult]:
"""
An object that represents a machine learning workspace.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
...
| [
"[email protected]"
] | |
07e35864dbee7959b626d634e72014526b2f9654 | e461333f1253861829c82a92e345fa7d72518ef6 | /blog/models.py | a1ef570e0f71f70be6101442e4128506ac788dd0 | [] | no_license | slowlearner99/ideal-waffle | 98b548f58ea3da08ef797e7b04ffa1e5f2a45a05 | dc20454580db5807e0b83d667fb11c755fecaf13 | refs/heads/master | 2021-05-02T06:30:52.141729 | 2018-02-09T06:20:11 | 2018-02-09T06:20:11 | 120,859,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | from django.db import models
from django.utils import timezone
class Post(models.Model):
author=models.ForeignKey('auth.User',on_delete=models.CASCADE)
title=models.CharField(max_length=200)
text=models.TextField()
created_date=models.DateTimeField(default=timezone.now)
published_date=models.DateTimeField(blank=True,null=True)
def publish(self):
self.published_date=timezone.now()
self.save
def _str_(self):
return self.title
# Create your models here.
| [
"[email protected]"
] | |
0affee339d945f6e06ccff51066566f806486aec | 0f9c9e4c60f28aa00aff8b80e1e4c142c61d24ce | /Python/SwordOffer/reverse_sentence.py | 709437a5803ddb710b5e629aea81631700057e99 | [] | no_license | shouliang/Development | c56fcc69e658393c138b63b507b96c48232128d5 | b7e3b02c50d54515e584cb18dff83109224245d0 | refs/heads/master | 2020-03-22T09:14:51.070228 | 2019-08-29T02:50:26 | 2019-08-29T02:50:26 | 139,825,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,268 | py | # coding=utf-8
''''
题目描述
牛客最近来了一个新员工Fish,每天早晨总是会拿着一本英文杂志,写些句子在本子上。
同事Cat对Fish写的内容颇感兴趣,有一天他向Fish借来翻看,但却读不懂它的意思。例如,“student. a am I”。
后来才意识到,这家伙原来把句子单词的顺序翻转了,正确的句子应该是“I am a student.”。
Cat对一一的翻转这些单词顺序可不在行,你能帮助他么?
思路:先翻转整个句子,再单独翻转每个单词
'''
class Solution:
def ReverseSentence(self, s):
if not s:
return s
s = list(s)
self.Rerverse(s, 0, len(s) - 1)
# 定义两个指针,用于翻转单词
start, end = 0, 0
while start < len(s) and end < len(s):
if s[end] == ' ':
self.Rerverse(s, start, end - 1)
end += 1
start = end
else:
end += 1
return "".join(s)
def Rerverse(self, s, start, end):
while start < end:
s[start], s[end] = s[end], s[start]
start += 1
end -= 1
s = 'I am a student.'
solution = Solution()
print(solution.ReverseSentence(s))
| [
"[email protected]:node/hunqing.git"
] | [email protected]:node/hunqing.git |
bb4f8658f4ac81f115ed1fe3417ce1cf3a5b9875 | a1c69d4d5adb2aef2e314a02899d5ef6500b999c | /devel/lib/python2.7/dist-packages/spencer_vision_msgs/msg/_PersonImages.py | d23d183b5b590ae07551e760faa12e75a3d941e2 | [] | no_license | JayZejianZhou/Multiple_motion_planning | 9b112f77ec31c0b1ddc92a7450f620d637d73b26 | 8bf06d518ecf686b8ed1187398f9084118f35677 | refs/heads/master | 2020-05-25T03:09:27.402451 | 2017-03-28T18:49:03 | 2017-03-28T18:49:03 | 84,904,900 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 14,027 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from spencer_vision_msgs/PersonImages.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import spencer_vision_msgs.msg
import sensor_msgs.msg
import std_msgs.msg
class PersonImages(genpy.Message):
_md5sum = "6c5881059a7a7f9c813cdc2429f1b5cf"
_type = "spencer_vision_msgs/PersonImages"
_has_header = True #flag to mark the presence of a Header object
_full_text = """# Message describing an array of depth or RGB images containing a part of a person (e.g. head, face, full body...), which is usually encoded in the topic title
#
std_msgs/Header header
PersonImage[] elements
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: spencer_vision_msgs/PersonImage
# Message describing a depth or RGB image containing a part of a person (e.g. head, face, full body...), which is usually encoded in the topic title
#
uint64 detection_id
sensor_msgs/Image image
================================================================================
MSG: sensor_msgs/Image
# This message contains an uncompressed image
# (0, 0) is at top-left corner of image
#
Header header # Header timestamp should be acquisition time of image
# Header frame_id should be optical frame of camera
# origin of frame should be optical center of cameara
# +x should point to the right in the image
# +y should point down in the image
# +z should point into to plane of the image
# If the frame_id here and the frame_id of the CameraInfo
# message associated with the image conflict
# the behavior is undefined
uint32 height # image height, that is, number of rows
uint32 width # image width, that is, number of columns
# The legal values for encoding are in file src/image_encodings.cpp
# If you want to standardize a new string format, join
# [email protected] and send an email proposing a new encoding.
string encoding # Encoding of pixels -- channel meaning, ordering, size
# taken from the list of strings in include/sensor_msgs/image_encodings.h
uint8 is_bigendian # is this data bigendian?
uint32 step # Full row length in bytes
uint8[] data # actual matrix data, size is (step * rows)
"""
__slots__ = ['header','elements']
_slot_types = ['std_msgs/Header','spencer_vision_msgs/PersonImage[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,elements
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(PersonImages, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.elements is None:
self.elements = []
else:
self.header = std_msgs.msg.Header()
self.elements = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.elements)
buff.write(_struct_I.pack(length))
for val1 in self.elements:
buff.write(_struct_Q.pack(val1.detection_id))
_v1 = val1.image
_v2 = _v1.header
buff.write(_struct_I.pack(_v2.seq))
_v3 = _v2.stamp
_x = _v3
buff.write(_struct_2I.pack(_x.secs, _x.nsecs))
_x = _v2.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = _v1
buff.write(_struct_2I.pack(_x.height, _x.width))
_x = _v1.encoding
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = _v1
buff.write(_struct_BI.pack(_x.is_bigendian, _x.step))
_x = _v1.data
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.elements is None:
self.elements = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.elements = []
for i in range(0, length):
val1 = spencer_vision_msgs.msg.PersonImage()
start = end
end += 8
(val1.detection_id,) = _struct_Q.unpack(str[start:end])
_v4 = val1.image
_v5 = _v4.header
start = end
end += 4
(_v5.seq,) = _struct_I.unpack(str[start:end])
_v6 = _v5.stamp
_x = _v6
start = end
end += 8
(_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v5.frame_id = str[start:end].decode('utf-8')
else:
_v5.frame_id = str[start:end]
_x = _v4
start = end
end += 8
(_x.height, _x.width,) = _struct_2I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v4.encoding = str[start:end].decode('utf-8')
else:
_v4.encoding = str[start:end]
_x = _v4
start = end
end += 5
(_x.is_bigendian, _x.step,) = _struct_BI.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
_v4.data = str[start:end]
self.elements.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.elements)
buff.write(_struct_I.pack(length))
for val1 in self.elements:
buff.write(_struct_Q.pack(val1.detection_id))
_v7 = val1.image
_v8 = _v7.header
buff.write(_struct_I.pack(_v8.seq))
_v9 = _v8.stamp
_x = _v9
buff.write(_struct_2I.pack(_x.secs, _x.nsecs))
_x = _v8.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = _v7
buff.write(_struct_2I.pack(_x.height, _x.width))
_x = _v7.encoding
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = _v7
buff.write(_struct_BI.pack(_x.is_bigendian, _x.step))
_x = _v7.data
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.elements is None:
self.elements = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.elements = []
for i in range(0, length):
val1 = spencer_vision_msgs.msg.PersonImage()
start = end
end += 8
(val1.detection_id,) = _struct_Q.unpack(str[start:end])
_v10 = val1.image
_v11 = _v10.header
start = end
end += 4
(_v11.seq,) = _struct_I.unpack(str[start:end])
_v12 = _v11.stamp
_x = _v12
start = end
end += 8
(_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v11.frame_id = str[start:end].decode('utf-8')
else:
_v11.frame_id = str[start:end]
_x = _v10
start = end
end += 8
(_x.height, _x.width,) = _struct_2I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v10.encoding = str[start:end].decode('utf-8')
else:
_v10.encoding = str[start:end]
_x = _v10
start = end
end += 5
(_x.is_bigendian, _x.step,) = _struct_BI.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
_v10.data = str[start:end]
self.elements.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_Q = struct.Struct("<Q")
_struct_3I = struct.Struct("<3I")
_struct_2I = struct.Struct("<2I")
_struct_BI = struct.Struct("<BI")
| [
"[email protected]"
] | |
4303ab08ba740fd8b2d44c1b55038746ee90d0b9 | 7ba55da528467cc7d15edec37b955ebe9f9176f9 | /docs/examples/03_backends_ros/files/04_plan_motion.py | 406c5786f88eacfb3d123c54d0e6f04b730420dc | [
"MIT"
] | permissive | xarthurx/compas_fab | 71095cdda107084b583e53e055450fe510a53c6a | 64119228184953aef7899f6853b2ade2296fedc6 | refs/heads/master | 2023-02-08T10:13:16.133155 | 2022-11-09T13:02:51 | 2022-11-09T13:02:51 | 183,207,453 | 0 | 0 | MIT | 2019-05-27T07:31:38 | 2019-04-24T10:36:54 | Python | UTF-8 | Python | false | false | 1,291 | py | import math
from compas.geometry import Frame
from compas_fab.backends import RosClient
with RosClient() as client:
robot = client.load_robot()
assert robot.name == 'ur5_robot'
frame = Frame([0.4, 0.3, 0.4], [0, 1, 0], [0, 0, 1])
tolerance_position = 0.001
tolerance_axes = [math.radians(1)] * 3
start_configuration = robot.zero_configuration()
start_configuration.joint_values = (-3.530, 3.830, -0.580, -3.330, 4.760, 0.000)
group = robot.main_group_name
# create goal constraints from frame
goal_constraints = robot.constraints_from_frame(frame,
tolerance_position,
tolerance_axes,
group)
trajectory = robot.plan_motion(goal_constraints,
start_configuration,
group,
options=dict(
planner_id='RRTConnect'
))
print("Computed kinematic path with %d configurations." % len(trajectory.points))
print("Executing this path at full speed would take approx. %.3f seconds." % trajectory.time_from_start)
| [
"[email protected]"
] | |
2f7185ea5b869ce91ea3ac63db95ddf2cfd72921 | 734458ec2b0f573cdd6a5e388f870e036af01c3a | /python/ThirteenTeV/SemiVisibleJets/generateScan.py | d0430f6a5d84fd16a067f22a06c399221ee579f7 | [] | no_license | knash/genproductions | 987ec8d549aba879d0cb8b3f32206d19f139d3ef | 328e10ae97248ece03e548c7b73453e0ff136e92 | refs/heads/master | 2021-01-18T03:12:09.682945 | 2019-07-03T11:11:57 | 2019-07-03T11:11:57 | 85,840,586 | 0 | 1 | null | 2019-06-06T09:14:09 | 2017-03-22T14:53:04 | Python | UTF-8 | Python | false | false | 7,155 | py | import FWCore.ParameterSet.Config as cms
from Configuration.GenProduction.ThirteenTeV.SemiVisibleJets.svjHelper import svjHelper
from collections import OrderedDict
from copy import deepcopy
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import numpy as np
# implementation of recursive loop over any number of dimensions
# creates grid of all possible combinations of parameter values
def varyAll(pos,paramlist,sig,sigs):
param = paramlist[pos][0]
vals = paramlist[pos][1]
for v in vals:
stmp = sig[:]+[v]
# check if last param
if pos+1==len(paramlist):
sigs.add(tuple(stmp))
else:
varyAll(pos+1,paramlist,stmp,sigs)
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("-y","--year", dest="year", type=int, default=2016, help="which year to simulate (specifies generator tune)")
parser.add_argument("-n","--num", dest="num", type=int, default=20000, help="number of events for model point w/ weight 1.0 (before filter)")
parser.add_argument("-a","--acc", dest="acc", type=float, default=0.0, help="increase number of events based on acceptance up to this maximum factor")
args = parser.parse_args()
# specification of tunes for each year
if args.year==2016:
tune_loc = "Configuration.Generator.Pythia8CUEP8M1Settings_cfi"
tune_block = "pythia8CUEP8M1SettingsBlock"
tune_suff = "TuneCUETP8M1_13TeV_pythia8"
elif args.year==2017 or args.year==2018:
tune_loc = "Configuration.Generator.MCTunes2017.PythiaCP2Settings_cfi"
tune_block = "pythia8CP2SettingsBlock"
tune_suff = "TuneCP2_13TeV_pythia8"
else:
parser.error("Unknown year: "+str(args.year))
# complete set of parameter values
params = OrderedDict([
("mZprime", range(1500,5200,200)),
("mDark", [1,5] + range(10,110,10)),
("rinv", [float(x)/10 for x in range(0,11,1)]),
("alpha", ["peak", "high", "low"]),
])
# convert named alpha values to numerical
alpha_vals = {
"peak": -2,
"high": -1,
"low": -3,
}
# acceptance values vs. each param
acc = OrderedDict([
("mZprime", ([500,600,700,800,900,1000,1100,1200,1300,1400,1500,1600,1700,1800,1900,2000,2100,2200,2300,2400,2500,2600,2700,2800,2900,3000,3100,3200,3300,3400,3500,3600,3700,3800,3900,4000,4100,4200,4300,4400,4500],[4.1e-05,0.00012,0.00012,4.1e-05,0.00027,0.0003,0.00035,0.00033,0.00053,0.0011,0.0014,0.0042,0.0089,0.015,0.023,0.031,0.037,0.047,0.051,0.057,0.061,0.067,0.07,0.074,0.079,0.08,0.081,0.084,0.088,0.089,0.09,0.093,0.093,0.092,0.095,0.098,0.099,0.097,0.098,0.1,0.1])),
("mDark", ([1,5,10,20,30,40,50,60,70,80,90,100],[0.084,0.076,0.074,0.08,0.08,0.079,0.08,0.078,0.076,0.076,0.073,0.071])),
("rinv", ([0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1],[0.00013,0.03,0.06,0.08,0.089,0.085,0.067,0.042,0.02,0.0054,0.0001])),
("alpha", ([-2,-1,-3],[0.08,0.076,0.099])),
])
# acceptance w/ benchmark param values
base_acc = 0.08
# function to use pair of arrays as lookup table
def find_nearest(val,xy):
x_array = np.asarray(xy[0])
idx = (np.abs(x_array - val)).argmin()
return xy[1][idx]
# function to retrieve multiplied relative acceptance
def get_acc(point):
this_acc = 1.0
for param,pval in point.iteritems():
pval = alpha_vals[pval] if param=="alpha" else pval
this_acc *= find_nearest(pval,acc[param])/base_acc
return this_acc
# set to accumulate all scan points
sigs = set()
# 2D scans vs. rinv
params_rinv = deepcopy(params)
params_rinv["mDark"] = [20]
params_rinv["alpha"] = ["peak"]
varyAll(0,list(params_rinv.iteritems()),[],sigs)
# 2D scans vs. mDark
params_mDark = deepcopy(params)
params_mDark["rinv"] = [0.3]
params_mDark["alpha"] = ["peak"]
varyAll(0,list(params_mDark.iteritems()),[],sigs)
# 2D scans vs. alpha
params_alpha = deepcopy(params)
params_alpha["rinv"] = [0.3]
params_alpha["mDark"] = [20]
varyAll(0,list(params_alpha.iteritems()),[],sigs)
# format first part of output config
first_part = """
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from {0} import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
maxEventsToPrint = cms.untracked.int32(1),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(13000.),
RandomizedParameters = cms.VPSet(),
)
""".format(tune_loc)
# append process parameters for each model point
helper = svjHelper()
points = []
numevents_before = 0
numevents_after = 0
base_filter_eff = 0.5
for point in sorted(sigs):
mZprime = point[0]
mDark = point[1]
rinv = point[2]
alpha = point[3]
weight = 1.0
filter_eff = base_filter_eff
# down-weight rinv=0 b/c all events pass filter
if rinv==0.0:
weight = 0.5
filter_eff = 1.0
# account for relative acceptance
if args.acc > 1:
this_acc = get_acc(OrderedDict([("mZprime",mZprime),("mDark",mDark),("rinv",rinv),("alpha",alpha)]))
min_weight = weight
max_weight = weight*args.acc
weight = np.clip(weight/this_acc,min_weight,max_weight)
helper.setModel(mZprime,mDark,rinv,alpha)
pdict = {
'weight': weight,
'processParameters': helper.getPythiaSettings(),
'name': helper.getOutName(outpre="SVJ",outsuff=""),
}
points.append(pdict)
numevents_before += args.num*weight
numevents_after += args.num*weight*filter_eff
# some info on the scan
print("This scan will contain "+str(len(sigs))+" model points, "+str(int(numevents_before))+" events before filter, "+str(int(numevents_after))+" events after filter")
# format last part of config (loop over all points)
last_part = """
for point in points:
basePythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
{0},
processParameters = cms.vstring(point['processParameters']),
parameterSets = cms.vstring(
'pythia8CommonSettings',
'{1}',
'processParameters',
)
)
generator.RandomizedParameters.append(
cms.PSet(
ConfigWeight = cms.double(point['weight']),
ConfigDescription = cms.string(point['name']),
PythiaParameters = basePythiaParameters,
),
)
darkhadronZ2filter = cms.EDFilter("MCParticleModuloFilter",
moduleLabel = cms.InputTag('generator','unsmeared'),
particleIDs = cms.vint32(51,53),
multipleOf = cms.uint32(4),
absID = cms.bool(True),
)
darkquarkFilter = cms.EDFilter("MCParticleModuloFilter",
moduleLabel = cms.InputTag('generator','unsmeared'),
particleIDs = cms.vint32(4900101),
multipleOf = cms.uint32(2),
absID = cms.bool(True),
min = cms.uint32(2),
status = cms.int32(23),
)
ProductionFilterSequence = cms.Sequence(generator+darkhadronZ2filter+darkquarkFilter)
""".format(tune_block,tune_block.replace("Block",""))
with open("SVJ_Scan_"+str(args.year)+"_"+tune_suff+"_cff.py",'w') as ofile:
ofile.write(first_part)
ofile.write("\npoints = "+str(points)+"\n")
ofile.write(last_part)
| [
"[email protected]"
] | |
c68f82f56196ab515b57e4b8dd8e64e11aee61c6 | 3003a8663135aa10f5a152a8642bc6ab270995b9 | /ggCloudSDK/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/compute/subcommands/copy_files.py | c497c185cda19853ba46968bda156d1afe8bafc5 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | bopopescu/packmybot | 1b4d199b36d196e5e769a781b520019bb4d0bdbc | 92de1e72cfc51b41447366ffc81a9dcd9a5e7870 | refs/heads/master | 2022-11-25T23:46:06.946645 | 2015-10-22T08:22:04 | 2015-10-22T08:22:04 | 282,313,675 | 0 | 0 | null | 2020-07-24T20:50:10 | 2020-07-24T20:50:10 | null | UTF-8 | Python | false | false | 5,907 | py | # Copyright 2014 Google Inc. All Rights Reserved.
"""Implements the command for copying files from and to virtual machines."""
import collections
import getpass
import logging
from googlecloudsdk.calliope import actions
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import properties
from googlecloudsdk.compute.lib import ssh_utils
RemoteFile = collections.namedtuple(
'RemoteFile', ['user', 'instance_name', 'file_path'])
LocalFile = collections.namedtuple(
'LocalFile', ['file_path'])
class CopyFiles(ssh_utils.BaseSSHCLICommand):
"""Copy files to and from Google Compute Engine virtual machines."""
@staticmethod
def Args(parser):
ssh_utils.BaseSSHCLICommand.Args(parser)
parser.add_argument(
'sources',
help='Specifies a source file.',
metavar='[[USER@]INSTANCE:]SRC',
nargs='+')
parser.add_argument(
'destination',
help='Specifies a destination for the source files.',
metavar='[[USER@]INSTANCE:]DEST')
# TODO(user): Use utils.AddZoneFlag when copy_files supports URIs
zone = parser.add_argument(
'--zone',
help='The zone of the instance to copy files to/from.',
action=actions.StoreProperty(properties.VALUES.compute.zone))
zone.detailed_help = (
'The zone of the instance to copy files to/from. If omitted, '
'you will be prompted to select a zone.')
def Run(self, args):
super(CopyFiles, self).Run(args)
file_specs = []
# Parses the positional arguments.
for arg in args.sources + [args.destination]:
# If the argument begins with "./" or "/", then we are dealing
# with a local file that can potentially contain colons, so we
# avoid splitting on colons. The case of remote files containing
# colons is handled below by splitting only on the first colon.
if arg.startswith('./') or arg.startswith('/'):
file_specs.append(LocalFile(arg))
continue
host_file_parts = arg.split(':', 1)
if len(host_file_parts) == 1:
file_specs.append(LocalFile(host_file_parts[0]))
else:
user_host, file_path = host_file_parts
user_host_parts = user_host.split('@', 1)
if len(user_host_parts) == 1:
user = getpass.getuser()
instance = user_host_parts[0]
else:
user, instance = user_host_parts
file_specs.append(RemoteFile(user, instance, file_path))
logging.debug('Normalized arguments: %s', file_specs)
# Validates the positional arguments.
# TODO(user): Look into relaxing these conditions.
sources = file_specs[:-1]
destination = file_specs[-1]
if isinstance(destination, LocalFile):
for source in sources:
if isinstance(source, LocalFile):
raise exceptions.ToolException(
'All sources must be remote files when the destination '
'is local.')
else: # RemoteFile
for source in sources:
if isinstance(source, RemoteFile):
raise exceptions.ToolException(
'All sources must be local files when the destination '
'is remote.')
instances = set()
for file_spec in file_specs:
if isinstance(file_spec, RemoteFile):
instances.add(file_spec.instance_name)
if len(instances) > 1:
raise exceptions.ToolException(
'Copies must involve exactly one virtual machine instance; '
'your invocation refers to [{0}] instances: [{1}].'.format(
len(instances), ', '.join(sorted(instances))))
instance_ref = self.CreateZonalReference(instances.pop(), args.zone)
external_ip_address = self.GetInstanceExternalIpAddress(instance_ref)
# Builds the scp command.
scp_args = [self.scp_executable]
if not args.plain:
scp_args.extend(self.GetDefaultFlags())
scp_args.append('-r')
for file_spec in file_specs:
if isinstance(file_spec, LocalFile):
scp_args.append(file_spec.file_path)
else:
scp_args.append('{0}:{1}'.format(
ssh_utils.UserHost(file_spec.user, external_ip_address),
file_spec.file_path))
self.ActuallyRun(args, scp_args, user, external_ip_address)
CopyFiles.detailed_help = {
'brief': 'Copy files to and from Google Compute Engine virtual machines',
'DESCRIPTION': """\
*{command}* copies files between a virtual machine instance
and your local machine.
To denote a remote file, prefix the file name with the virtual
machine instance name (e.g., _example-instance_:~/_FILE_). To
denote a local file, do not add a prefix to the file name
(e.g., ~/_FILE_). For example, to copy a remote directory
to your local host, run:
$ {command} example-instance:~/REMOTE-DIR ~/LOCAL-DIR --zone us-central1-a
In the above example, ``~/REMOTE-DIR'' from ``example-instance'' is
copied into the ~/_LOCAL-DIR_ directory.
Conversely, files from your local computer can be copied to a
virtual machine:
$ {command} ~/LOCAL-FILE-1 ~/LOCAL-FILE-2 example-instance:~/REMOTE-DIR --zone us-central1-a
If a file contains a colon (``:''), you must specify it by
either using an absolute path or a path that begins with
``./''.
Under the covers, *scp(1)* is used to facilitate the transfer.
When the destination is local, all sources must be the same
virtual machine instance. When the destination is remote, all
source must be local.
This command ensures that the user's public SSH key is present
in the project's metadata. If the user does not have a public
SSH key, one is generated using *ssh-keygen(1)* (if the the `--quiet`
flag is given, the generated key will have an empty passphrase).
""",
}
| [
"[email protected]"
] | |
9b6a45f40e12d2ecc6562977a6042f61788e25dd | 3716e91c0a18a2cf0b5807cc673d95a7539b008c | /DungeonsKitgard/TheRaisedSword.py | bd20ffd2d9632d3bba2ce439aa466531ce379317 | [] | no_license | kiwiapple87/CodeCombat-1 | 47f0fa6d75d6d3e9fb9c28feeb6fe2648664c1aa | ce0201e5ed099193ca40afd3b7abeee5a3732387 | refs/heads/master | 2021-05-01T16:38:03.575842 | 2016-08-25T11:13:26 | 2016-08-25T11:13:26 | 66,552,813 | 1 | 0 | null | 2016-08-25T11:39:20 | 2016-08-25T11:39:18 | null | UTF-8 | Python | false | false | 168 | py | # http://codecombat.com/play/level/the-raised-sword
self.attack("Rig")
self.attack("Rig")
self.attack("Gurt")
self.attack("Gurt")
self.attack("Ack")
self.attack("Ack")
| [
"[email protected]"
] | |
ceb6062ff1dac18c07651c2b08736a9dc730fd51 | 0a613ccff34341510e9d8ac5e7c03ec991babfc8 | /pytorch_widedeep/models/wide.py | 24db9c9c539a9f5dae5152178bf0d2ccf1e74544 | [
"MIT"
] | permissive | sailfish009/pytorch-widedeep | f0e507e00566207b1e455d250eb67ac71d2df3c7 | b487b06721c5abe56ac68c8a38580b95e0897fd4 | refs/heads/master | 2023-05-01T05:24:39.217202 | 2021-04-16T15:17:48 | 2021-04-16T15:17:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,279 | py | import math
import torch
from torch import nn
from pytorch_widedeep.wdtypes import * # noqa: F403
class Wide(nn.Module):
def __init__(self, wide_dim: int, pred_dim: int = 1):
r"""wide (linear) component
Linear model implemented via an Embedding layer connected to the output
neuron(s).
Parameters
-----------
wide_dim: int
size of the Embedding layer. `wide_dim` is the summation of all the
individual values for all the features that go through the wide
component. For example, if the wide component receives 2 features with
5 individual values each, `wide_dim = 10`
pred_dim: int, default = 1
size of the ouput tensor containing the predictions
Attributes
-----------
wide_linear: :obj:`nn.Module`
the linear layer that comprises the wide branch of the model
Examples
--------
>>> import torch
>>> from pytorch_widedeep.models import Wide
>>> X = torch.empty(4, 4).random_(6)
>>> wide = Wide(wide_dim=X.unique().size(0), pred_dim=1)
>>> out = wide(X)
"""
super(Wide, self).__init__()
# Embeddings: val + 1 because 0 is reserved for padding/unseen cateogories.
self.wide_linear = nn.Embedding(wide_dim + 1, pred_dim, padding_idx=0)
# (Sum(Embedding) + bias) is equivalent to (OneHotVector + Linear)
self.bias = nn.Parameter(torch.zeros(pred_dim))
self._reset_parameters()
def _reset_parameters(self) -> None:
r"""initialize Embedding and bias like nn.Linear. See `original
implementation
<https://pytorch.org/docs/stable/_modules/torch/nn/modules/linear.html#Linear>`_.
"""
nn.init.kaiming_uniform_(self.wide_linear.weight, a=math.sqrt(5))
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.wide_linear.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(self.bias, -bound, bound)
def forward(self, X: Tensor) -> Tensor: # type: ignore
r"""Forward pass. Simply connecting the Embedding layer with the ouput
neuron(s)"""
out = self.wide_linear(X.long()).sum(dim=1) + self.bias
return out
| [
"[email protected]"
] | |
658ae6ce12ab34713850e57285d9f752a27cf1c8 | ca231a325e8f4c18d50d89ffa7eec993d4cc68c3 | /PythonZumbis/lista4/questao01.py | 52ef9d134fbcb8049241fa246ac2f38051db434d | [] | no_license | HugoPorto/PythonCodes | 8e1597999ccd34ffa86df5ae7e91111d77dc7a22 | 539ad6891cbd49a2c011349f843ab710aad2993a | refs/heads/master | 2022-02-13T05:48:24.633750 | 2017-09-12T15:44:06 | 2017-09-12T15:44:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | from random import randint
lista = [
randint(1, 100), randint(1, 100), randint(1, 100),
randint(1, 100), randint(1, 100), randint(1, 100),
randint(1, 100), randint(1, 100), randint(1, 100),
randint(1, 100), randint(1, 100)]
maior = 0
menor = 999
i = 0
while i < 10:
if lista[i] > maior:
maior = lista[i]
if lista[i] < menor:
menor = lista[i]
i += 1
print "Maior: %d, menor %d" % (maior, menor)
| [
"[email protected]"
] | |
f3f5edd99ffd8c25d6f8b7f8f256e0d8b3db914b | b72c37e3ccda507b231649cddd5c7845c6c34ba1 | /PythonBasic/Day10/HomeWork_func.py | 616a95d2e92ed813880bc2d38a1c2920d8ab7b03 | [] | no_license | ljrdemail/AID1810 | 51c61c255b5c5efc1dc642b46691a614daedd85e | b417bd831bc1550ab953ce7ca23f54e34b8b2692 | refs/heads/master | 2020-04-24T09:45:14.781612 | 2019-02-21T11:26:49 | 2019-02-21T11:26:49 | 171,866,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 481 | py |
def mysum(n):
# sum = 0
# num = int(n)
# for i in range(1, num + 1):
# sum += i
# return sum
return sum(range(1,n+1))
# return sum(list(range(1,n+1)))
def myfac(n):
fac = 1
num = int(n)
for i in range(1, num + 1):
fac *= i
return fac
print(myfac(10))
def mypow(n):
# sum = 0
# num = int(n)
# for i in range(1, num + 1):
# sum += i ** i
# return sum
return sum(map(lambda x:x**x,range(1,n+1)))
| [
"root"
] | root |
c5f10f045e2471562135208acf2377b8d14c9942 | 403de45c6626f2c40d2d48d64f4c94a728bb7b09 | /vocoder_eva/eval.py | baa254f50dd11cbd3bae82919f435fa3ef8f150a | [] | no_license | exeex/vocoder_eva | f95d969261adaa63ec01270239496eec3c9adca5 | d81dc01768da20c208df00bfb78d90c52d93c0a8 | refs/heads/master | 2020-09-14T01:04:27.189683 | 2020-01-20T14:08:14 | 2020-01-20T14:08:14 | 222,961,890 | 12 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,992 | py | import librosa
import numpy as np
import pyworld as pw
import matplotlib.pyplot as plt
import pysptk
ln10_inv = 1 / np.log(10)
def pad_to(x, target_len):
pad_len = target_len - len(x)
if pad_len <= 0:
return x[:target_len]
else:
return np.pad(x, (0, pad_len), 'constant', constant_values=(0, 0))
def eval_snr(x_r, x_s):
# TODO: slide x_s to find max matched value 原論文有做滑動x_s,找到最大匹配的snr值,這邊還沒實作
return 10 * np.log10(np.sum(x_s ** 2) / np.sum((x_s - x_r) ** 2))
def eval_MCD(x_r, x_s):
# TODO: verify value 確認做出來的值是否正確 (和原論文比較)
c_r = librosa.feature.mfcc(x_r)
c_s = librosa.feature.mfcc(x_s)
# plt.imshow(c_r)
# plt.show()
# plt.imshow(c_s)
# plt.show()
#
# plt.plot(c_r[:, 20])
# plt.plot(c_s[:, 40])
# plt.show()
# print((c_r- c_s))
temp = 2 * np.sum((c_r - c_s) ** 2, axis=0)
# print(temp)
return 10 * ln10_inv * (temp ** 0.5)
def plot_f0(*files, title=None):
for file in files:
if isinstance(file, tuple):
file_path, label = file
else:
file_path = file
label = None
aud, sr = librosa.load(file_path, sr=None)
f0 = pysptk.sptk.swipe(aud.astype(np.double), sr, hopsize=128)
plt.plot(f0, label=label)
plt.ylabel('f0(Hz)')
plt.xlabel('frame')
if title:
plt.title(title)
plt.legend(loc='upper right')
plt.show()
def eval_rmse_f0(x_r, x_s, sr, frame_len='5', method='swipe', tone_shift=None):
# TODO: 要可以改動 frame len (ms) 或者 hop_size
if method == 'harvest':
f0_r, t = pw.harvest(x_r.astype(np.double), sr, frame_period=50)
f0_s, t = pw.harvest(x_s.astype(np.double), sr, frame_period=50)
elif method == 'dio':
f0_r, t = pw.dio(x_r.astype(np.double), sr, frame_period=50)
f0_s, t = pw.dio(x_s.astype(np.double), sr, frame_period=50)
elif method == 'swipe':
f0_r = pysptk.sptk.swipe(x_r.astype(np.double), sr, hopsize=128)
f0_s = pysptk.sptk.swipe(x_s.astype(np.double), sr, hopsize=128)
elif method == 'rapt':
f0_r = pysptk.sptk.rapt(x_r.astype(np.double), sr, hopsize=128)
f0_s = pysptk.sptk.rapt(x_s.astype(np.double), sr, hopsize=128)
else:
raise ValueError('no such f0 exract method')
# length align
f0_s = pad_to(f0_s, len(f0_r))
# make unvoice / vooiced frame mask
f0_r_uv = (f0_r == 0) * 1
f0_r_v = 1 - f0_r_uv
f0_s_uv = (f0_s == 0) * 1
f0_s_v = 1 - f0_s_uv
tp_mask = f0_r_v * f0_s_v
tn_mask = f0_r_uv * f0_s_uv
fp_mask = f0_r_uv * f0_s_v
fn_mask = f0_r_v * f0_s_uv
if tone_shift is not None:
shift_scale = 2 ** (tone_shift / 12)
f0_r = f0_r * shift_scale
# only calculate f0 error for voiced frame
y = 1200 * np.abs(np.log2(f0_r + f0_r_uv) - np.log2(f0_s + f0_s_uv))
y = y * tp_mask
# print(y.sum(), tp_mask.sum())
f0_rmse_mean = y.sum() / tp_mask.sum()
# only voiced/ unvoiced accuracy/precision
vuv_precision = tp_mask.sum() / (tp_mask.sum() + fp_mask.sum())
vuv_accuracy = (tp_mask.sum() + tn_mask.sum()) / len(y)
return f0_rmse_mean, vuv_accuracy, vuv_precision
def eval_rmse_ap(x_r, x_s, sr, frame_len='5'):
# TODO: find out what algorithm to use. maybe pyworld d4c?
pass
if __name__ == '__main__':
file_r = 'demo/exmaple_data/ground_truth/arctic_b0436.wav'
file_s = 'demo/exmaple_data/no_pulse/arctic_b0436.wav'
aud_r, sr_r = librosa.load(file_r, sr=None)
aud_s, sr_s = librosa.load(file_s, sr=None)
assert sr_r == sr_s
if len(aud_r) != len(aud_s):
aud_r = aud_r[:len(aud_s)]
aud_s = aud_s[:len(aud_r)]
# mcd = eval_MCD(aud_r, aud_s)
rmse_f0 = eval_rmse_f0(aud_r, aud_s, sr_r)
print(rmse_f0)
# print(aud_r.shape)
# print(eval_snr(aud_r, aud_s))
# print(eval_snr(aud_r*10, aud_s*10))
| [
"[email protected]"
] | |
560336f07c938cf86e6d8af0547e58c0c2aeee39 | 14fc2ee47e1081416f0465e8afa18da33169095f | /src/PP4E/Ai/ExpertSystem/holmes/holmes2/forward2.py | 68e44a13498fb15d2cd3a51dad0b31cdbb159c0f | [] | no_license | madtyn/progPython | d95ea8021b1a54433e7b73de9d3b11d53a3096b7 | f3a1169149afdeb5191dd895462139f60d21d458 | refs/heads/master | 2021-07-09T13:35:27.519439 | 2017-10-04T14:46:57 | 2017-10-04T14:46:57 | 104,866,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,469 | py | #
# module forward2.py
#
# forward chaining inference engine
#
# this is a varient of forward.py that implements
# negation both by explicit assertion, and by
# ommission; see holmes.doc for more info;
# to use negation-by-ommission in the shell:
# holmes> +2
# to use it in a program, just import forward2;
###########################################################################
import forward; forward1 = forward
from match import *
from forward import copy_dict, ask_user
def forward(kbase, facts, *pmode):
temp = forward1.conjunct
forward1.conjunct = conjunct # over-ride 1 function
res = forward1.forward(kbase, facts, pmode) # call forward.py version
forward1.conjunct = temp
return res
#################################################
# generate bindings for rule's 'if' conjunction:
# find intersected bindings at this 'AND' node,
# and construct proof subtree lists as the
# recursion unfolds with valid solutions;
#
# note: this function executes with global
# scope = module forward2.py, but the rest of
# the system executes with global scope =
# module forward.py;
#
# note: this isn't exactly like forward.py
# for explicitly asserted 'not' facts, since
# we don't carry variable bindings from the
# match (we do a simple ground comparison);
#################################################
def conjunct(ifs, known, dict, why):
if ifs == []:
return [(copy_dict(dict), [])] # all conjuncts matched
res = []
head, tail = ifs[0], ifs[1:]
if head[0] == 'ask':
term = substitute(head[1:], dict)
if ask_user(term, known, why):
for (dict2, proof2) in conjunct(tail, known, dict, why):
res.append((dict2, [(term, 'told')] + proof2))
elif head[0] == 'not':
term = substitute(head[1:], dict)
if not known.search_unique(term) or \
known.search_unique(['not'] + term):
for (dict2, proof2) in conjunct(tail, known, dict, why):
res.append((dict2, [(term, 'not')] + proof2))
else:
for (fact, proof) in known.search(head, dict):
matched, changes = match(head, fact, dict, {})
if matched:
for (dict2, proof2) in conjunct(tail, known, dict, why):
res.append((dict2, [(fact, proof)] + proof2))
for (var, env) in changes:
env[var] = '?'
return res
| [
"[email protected]"
] | |
e5701a988ccc68f2d79bc6b8df147784e0b255fe | 6fdb4eaf5b0e6dbd7db4bf947547541e9aebf110 | /robot-server/tests/service/session/models/test_command.py | 67d99f409ac65be929996b5cab636f523cc44269 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | Opentrons/opentrons | 874321e01149184960eeaeaa31b1d21719a1ceda | 026b523c8c9e5d45910c490efb89194d72595be9 | refs/heads/edge | 2023-09-02T02:51:49.579906 | 2023-08-31T16:02:45 | 2023-08-31T16:02:45 | 38,644,841 | 326 | 174 | Apache-2.0 | 2023-09-14T21:47:20 | 2015-07-06T20:41:01 | Python | UTF-8 | Python | false | false | 2,142 | py | from datetime import datetime
import pytest
from pydantic import ValidationError
from robot_server.service.session.models import command, command_definitions
@pytest.mark.parametrize(
argnames="command_def",
argvalues=[
command_definitions.ProtocolCommand.start_run,
command_definitions.CalibrationCommand.move_to_deck,
command_definitions.CheckCalibrationCommand.compare_point,
],
)
def test_empty(command_def: command_definitions.CommandDefinition):
"""Test creation of empty command request and response."""
request = command.CommandRequest.parse_obj(
{"data": {"command": command_def.value, "data": {}}}
)
assert request.data.command == command_def
assert request.data.data == command.EmptyModel()
dt = datetime(2000, 1, 1)
response = request.data.make_response(
identifier="id",
status=command.CommandStatus.executed,
created_at=dt,
started_at=None,
completed_at=None,
result=None,
)
assert response.command == command_def
assert response.data == command.EmptyModel()
assert response.id == "id"
assert response.createdAt == dt
assert response.startedAt is None
assert response.completedAt is None
assert response.result is None
@pytest.mark.parametrize(
argnames="command_def",
argvalues=[
command_definitions.EquipmentCommand.load_labware,
command_definitions.EquipmentCommand.load_pipette,
command_definitions.PipetteCommand.aspirate,
command_definitions.PipetteCommand.dispense,
command_definitions.PipetteCommand.drop_tip,
command_definitions.PipetteCommand.pick_up_tip,
command_definitions.CalibrationCommand.jog,
command_definitions.CalibrationCommand.set_has_calibration_block,
],
)
def test_requires_data(command_def: command_definitions.CommandDefinition):
"""Test creation of command requiring data will fail with empty body."""
with pytest.raises(ValidationError):
command.CommandRequest.parse_obj(
{"data": {"command": command_def.value, "data": {}}}
)
| [
"[email protected]"
] | |
b0f876aa2d6cd51ea2274586bab5fb07bb20feff | 219992b56f8e5cd8b47534d98417dd8ac795110b | /src/FastPass-Agile/CSRA.py | d3eb111e1798314be721a8ab965c41c87ae07524 | [] | no_license | haohaixingyun/dig-python | 63844877de0acad04d07d7119e381b9bb4a97395 | 4e8c3e3cb1ba98f39d65095b4d3b09ba115e586b | refs/heads/master | 2021-01-13T08:45:59.669829 | 2016-10-26T05:54:07 | 2016-10-26T05:54:07 | 71,970,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,966 | py | # coding = utf - 8
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.action_chains import ActionChains
import unittest
import time,sys
import login,C_screenshots
import HTMLTestRunner
class FastPass_Agile(unittest.TestCase):
def setUp(self):
self.driver =webdriver.Chrome()
self.base_url = "https://fpagile.boulder.ibm.com/"
self.verificationErrors = []
self.accept_next_alert = True
self.wait = WebDriverWait(self.driver, 10) # timeout after 10 seconds
def test_Case_CSRA(self):
print "Test case start:"
print "\n"
print "step1. open the home page"
driver = self.driver
wait = self.wait
driver.get(self.base_url + "software/xl/fastpass/agile/fphome.nsf/default?openform")
driver.maximize_window()
now_url = driver.current_url
print now_url
assert now_url == 'https://fpagile.boulder.ibm.com/software/xl/fastpass/agile/fphome.nsf/default?openform' ,"URL is not correct."
C_screenshots.C_screenshots(self,'C:\LM_IBM_WORK\LM_WORK\FastPass\FastPass_Agile\\result\\image\\','CSRA_p1')
###capture screenshots
print "\n"
print "step2.login"
login.login(self,'Agreements')
C_screenshots.C_screenshots(self,'C:\LM_IBM_WORK\LM_WORK\FastPass\FastPass_Agile\\result\\image\\','CSRA_p2')
driver.find_element_by_name("submit").click()
driver.implicitly_wait(10)
print "\n"
print "step3.Input 'Input site number :3477984 and click 'Search'."
driver.find_element_by_id("site").clear()
driver.find_element_by_id("site").send_keys("3477984")
#driver.execute_script("window.scrollBy(0,200)","")
driver.execute_script("window.scrollBy(0,document.body.scrollHeight)","")
time.sleep(3)
driver.find_element_by_id("crponly").click()
driver.find_element_by_name("ibm-submit").submit()
driver.implicitly_wait(10)
time.sleep(5)
result = driver.title
assert result == 'FastPass | Agreements - Agreement details' ,"The page did not be opened correct"
C_screenshots.C_screenshots(self,'C:\LM_IBM_WORK\LM_WORK\FastPass\FastPass_Agile\\result\\image\\','CSRA_p3')
time.sleep(3)
print "\n"
print "step4.Click the link of all site "
driver.implicitly_wait(10)
driver.find_element_by_link_text("All sites").click()
#wait.until(lambda the_driver: the_driver.find_element_by_id('dropList1').is_displayed())
#menu = driver.find_element_by_link_text("All sites")
#driver.implicitly_wait(10)
time.sleep(3)
#ActionChains(driver).move_to_element(menu).perform()
#driver.implicitly_wait(10)
#driver.find_element_by_xpath("//input[@name='ibm-go']").click
#driver.get("https://fpagile.boulder.ibm.com/software/xl/fastpass/agile/fastpass.nsf/salesorders?openform")
result = driver.title
assert result == 'FastPass | Agreements - Agreement details',"The page did not be opened correct"
C_screenshots.C_screenshots(self,'C:\LM_IBM_WORK\LM_WORK\FastPass\FastPass_Agile\\result\\image\\','CSRA_p4')
time.sleep(3)
print "step5.go back."
driver.back()
driver.implicitly_wait(10)
time.sleep(5)
print "\n"
print "step6.click link of Originating site"
driver.find_element_by_link_text("Originating site").click()
time.sleep(3)
result = driver.title
assert result == 'FastPass | Agreements - Originating site' ,"The page did not be opened correct"
C_screenshots.C_screenshots(self,'C:\LM_IBM_WORK\LM_WORK\FastPass\FastPass_Agile\\result\\image\\','CSRA_p5')
time.sleep(5)
print "\n"
print "step7.Go back"
driver.back()
driver.implicitly_wait(10)
time.sleep(5)
print "step8.click entitlement link"
driver.implicitly_wait(10)
time.sleep(5)
driver.find_element_by_link_text("Entitlements").click()
time.sleep(3)
result = driver.title
assert result == 'FastPass | Entitlements - Customer site entitlement information - Default sort by customer name' ,"The page did not be opened correct"
C_screenshots.C_screenshots(self,'C:\LM_IBM_WORK\LM_WORK\FastPass\FastPass_Agile\\result\\image\\','CSRA_p6')
time.sleep(3)
print "\n"
print "step9.Go back"
driver.back()
time.sleep(5)
print "step10.click link of customer name"
driver.execute_script("window.scrollBy(0,document.body.scrollHeight)","")
time.sleep(3)
driver.find_element_by_link_text("Go Wireless Inc").click()
driver.implicitly_wait(10)
time.sleep(3)
result = driver.title
assert result == 'FastPass | Agreements - Agreement site information' ,"The page did not be opened correct"
C_screenshots.C_screenshots(self,'C:\LM_IBM_WORK\LM_WORK\FastPass\FastPass_Agile\\result\\image\\','CSRA_p7')
time.sleep(3)
print "\n"
print "step11.Go back"
driver.back()
time.sleep(5)
print "step12.click link of agreement link "
driver.execute_script("window.scrollBy(0,document.body.scrollHeight)","")
time.sleep(3)
driver.find_element_by_link_text("219971").click()
driver.implicitly_wait(10)
time.sleep(5)
result = driver.title
assert result == 'FastPass | Agreements - Agreement details' ,"The page did not be opened correct"
C_screenshots.C_screenshots(self,'C:\LM_IBM_WORK\LM_WORK\FastPass\FastPass_Agile\\result\\image\\','CSRA_p8')
time.sleep(3)
print "\n"
print "step13.Go back"
driver.back()
time.sleep(5)
print "step14.click Site number link"
driver.execute_script("window.scrollBy(0,document.body.scrollHeight)","")
time.sleep(3)
driver.find_element_by_link_text("3477984").click()
driver.implicitly_wait(10)
time.sleep(5)
result = driver.title
assert result == 'FastPass | Customers - Customer details' ,"The page did not be opened correct"
C_screenshots.C_screenshots(self,'C:\LM_IBM_WORK\LM_WORK\FastPass\FastPass_Agile\\result\\image\\','CSRA_p9')
time.sleep(3)
print "step15.click view link"
driver.execute_script("window.scrollBy(0,900)","")
#driver.execute_script("window.scrollBy(0,document.body.scrollHeight)","")
time.sleep(3)
driver.find_element_by_xpath("//a[@href='./SAPIDServsAgrmt?openagent&agree_num=0000219971&program=PA&ibm_cust_num=3821517&cust_type=NAV&cust_num=0003477984']").click()
driver.implicitly_wait(10)
time.sleep(5)
result = driver.title
assert result == 'FastPass | Sales orders - Services agreements by site' ,"The page did not be opened correct"
C_screenshots.C_screenshots(self,'C:\LM_IBM_WORK\LM_WORK\FastPass\FastPass_Agile\\result\\image\\','CSRA_p10')
time.sleep(3)
print "\n"
print "step16.Go back"
driver.back()
time.sleep(3)
driver.back()
time.sleep(5)
print "step17.click Current view link"
driver.execute_script("window.scrollBy(0,document.body.scrollHeight)","")
time.sleep(3)
driver.find_element_by_link_text("Current view").click()
driver.implicitly_wait(10)
time.sleep(5)
result = driver.title
assert result == 'FastPass | Sales orders - Current sales order' ,"The page did not be opened correct"
C_screenshots.C_screenshots(self,'C:\LM_IBM_WORK\LM_WORK\FastPass\FastPass_Agile\\result\\image\\','CSRA_p11')
time.sleep(3)
print "\n"
print "step18.Go back"
driver.back()
time.sleep(5)
print "step19.click all view link"
driver.execute_script("window.scrollBy(0,document.body.scrollHeight)","")
time.sleep(3)
driver.find_element_by_link_text("All view").click()
driver.implicitly_wait(10)
time.sleep(5)
result = driver.title
assert result == 'FastPass | Sales orders - Site historical information - Sorted by purchase order date' ,"The page did not be opened correct"
C_screenshots.C_screenshots(self,'C:\LM_IBM_WORK\LM_WORK\FastPass\FastPass_Agile\\result\\image\\','CSRA_p12')
time.sleep(3)
print "\n"
print "step20.Go back"
driver.back()
time.sleep(5)
print "\n"
print "Test Case end with successfully!"
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == '__main__':
now = time.strftime("%Y-%m-%d-%H_%M_%S", time.localtime(time.time()))
testunit=unittest.TestSuite()
testunit.addTest(FastPass_Agile("test_Case_CSRA"))
filename="C:\LM_IBM_WORK\LM_WORK\FastPass\FastPass_Agile\\result\\"+now+" FastPass_Test_Case_CSRA.html"
fp=file(filename,'wb')
runner = HTMLTestRunner.HTMLTestRunner(stream=fp,title='FastPass_Agile Test Case',description='This is CSRA test case')
runner.run(testunit)
| [
"[email protected]"
] | |
580eeccd0fdd976778a96b8c5d7a64e3cbcc7863 | b39d9ef9175077ac6f03b66d97b073d85b6bc4d0 | /Emend_WC500026537.4.py | 46754167c443614635089acfee5a74b6d1a88dc2 | [] | no_license | urudaro/data-ue | 2d840fdce8ba7e759b5551cb3ee277d046464fe0 | 176c57533b66754ee05a96a7429c3e610188e4aa | refs/heads/master | 2021-01-22T12:02:16.931087 | 2013-07-16T14:05:41 | 2013-07-16T14:05:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,484 | py | {'_data': [['Common',
[['Metabolism', u'minskad aptit'],
['Nervous system', u'huvudv\xe4rk'],
['Respiratory', u'Hicka'],
['GI', u'f\xf6rstoppning, dyspepsi'],
['General', u'Tr\xf6tthet'],
['Investigations', u'f\xf6rh\xf6jt ALAT']]],
['Uncommon',
[['Blood', u'febril neutropeni, anemi'],
['Psychiatric', u'\xc5ngest'],
['Nervous system', u'yrsel, s\xf6mnighet'],
['Cardiac', u'Palpitationer'],
['Vascular', u'V\xe4rmevallningar'],
['GI',
u'rapning, illam\xe5ende*, kr\xe4kning*, gastroesofagal refluxsjukdom, buksm\xe4rta, muntorrhet, flatulens'],
['Skin', u'utslag, akne'],
['Renal', u'Dysuri'],
['General', u'asteni, sjukdomsk\xe4nsla'],
['Investigations',
u'f\xf6rh\xf6jt ASAT, f\xf6rh\xf6jt alkaliskt fosfatas i blodet']]],
['Rare',
[['Infections', u'candidiasis, stafylokockinfektion'],
['Metabolism', u'Polydipsi'],
['Psychiatric', u'desorientering, euforisk sinnesst\xe4mning'],
['Nervous system', u'kognitiv st\xf6rning, letargi, dysgeusi'],
['Eye', u'Konjunktivit'],
['Ear', u'Tinnitus'],
['Cardiac', u'bradykardi, hj\xe4rt-k\xe4rlsjukdom'],
['Respiratory',
u'orofaryngeal sm\xe4rta, nysning, hosta, postnasalt dropp, svalgirritation'],
['GI',
u'perforerande duodenals\xe5r, stomatit, buksp\xe4nning, h\xe5rd avf\xf6ring, neutropen kolit'],
['Skin',
u'fotosensitivitetsreaktion, hyperhidros, seborr\xe9, hudf\xf6r\xe4ndring, kliande utslag, Stevens- Johnsons syndrom/toxisk epidermal nekrolys'],
['Musculoskeletal', u'muskelsvaghet, muskelspasmer'],
['Renal', u'Pollakisuri'],
['General', u'\xf6dem, obehagsk\xe4nsla i br\xf6stet, g\xe5ngst\xf6rning'],
['Investigations',
u'positivt test f\xf6r r\xf6da blodkroppar i urinen, minskat natrium i blodet, viktminskning, minskat antal neutrofiler, glukosuri, \xf6kad urinm\xe4ngd']]],
['Unknown',
[['Immune system',
u'\xf6verk\xe4nslighetsreaktioner inkluderande anafylaktiska reaktioner'],
['Skin', u'kl\xe5da, urtikaria']]]],
'_pages': [7, 9],
u'_rank': 32,
u'_type': u'TSFU'} | [
"daro@daro-ThinkPad-X220.(none)"
] | daro@daro-ThinkPad-X220.(none) |
ac6dc9752855bb5a9741df5fcd939fbed3d4226b | 0f9f8e8478017da7c8d408058f78853d69ac0171 | /python3/l0229_majority_element_2.py | 8d8dd64afeae37fe515fb54d936bb76fed74d62f | [] | no_license | sprax/1337 | dc38f1776959ec7965c33f060f4d43d939f19302 | 33b6b68a8136109d2aaa26bb8bf9e873f995d5ab | refs/heads/master | 2022-09-06T18:43:54.850467 | 2020-06-04T17:19:51 | 2020-06-04T17:19:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 865 | py | from typing import List
class Solution:
def majorityElement(self, nums: List[int]) -> List[int]:
c1, c2 = None, None
count1, count2 = 0, 0
for n in nums:
if n == c1:
count1 += 1
elif n == c2:
count2 += 1
elif count1 == 0:
c1 = n
count1 = 1
elif count2 == 0:
c2 = n
count2 = 1
else:
count1 -= 1
count2 -= 1
count1, count2 = 0, 0
for n in nums:
if n == c1:
count1 += 1
elif n == c2:
count2 += 1
result = []
if count1 > len(nums) // 3:
result.append(c1)
if count2 > len(nums) // 3:
result.append(c2)
return result | [
"[email protected]"
] | |
0cbf5e113335f0f6acf9dd864503ff0411592ba7 | 8bfd94be34008db3a7822247e9fb05604ad010d2 | /snips/migrations/0001_initial.py | a7b67f1349671972f09f8da5d588956b54b4acf5 | [] | no_license | SnipToday/FrontEnd | c7ce59548b0114e24008580d98dad48c78ff431d | 5043fb584535295b27e8c6f0044c54ac8ab40023 | refs/heads/master | 2021-01-22T17:49:04.506117 | 2017-09-04T22:08:28 | 2017-09-04T22:08:28 | 102,405,712 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,461 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-20 13:20
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import modelcluster.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('wagtailcore', '0032_add_bulk_delete_page_permission'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('name', models.CharField(max_length=50, primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='SnipRelatedLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('link_external', models.URLField(blank=True, max_length=800, verbose_name='External link')),
('title', models.CharField(max_length=255)),
],
options={
'abstract': False,
'ordering': ['sort_order'],
},
),
migrations.CreateModel(
name='Tldr',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('date', models.DateField(blank=True, default=django.utils.timezone.now, verbose_name='Post date')),
('body', models.TextField(verbose_name='Body')),
('category', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='snips.Category')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.AddField(
model_name='sniprelatedlink',
name='link_page',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page'),
),
migrations.AddField(
model_name='sniprelatedlink',
name='page',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_links', to='snips.Tldr'),
),
]
| [
"[email protected]"
] | |
5d323280b72ac2c020eaf9e222a4bbe9e7dfd50f | 0fc9eca08cc48f93a4079a9b4c0dd0201ef2ce80 | /vindula/agendacorporativa/browser/search.py | 8571790b6dae199e6a61314a09ccf6efa1eb8e74 | [] | no_license | vindula/vindula.agendacorporativa | c75564c7d89424e23e3a1baa1f1dcdc6ac2dfc4c | e70f6c2baf6629cb4a486bc642c49e7b7138bf7d | refs/heads/master | 2020-12-29T01:41:33.812325 | 2013-08-02T18:54:58 | 2013-08-02T18:54:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,266 | py | # coding: utf-8
from Products.CMFCore.utils import getToolByName
from AccessControl.SecurityManagement import newSecurityManager, getSecurityManager, setSecurityManager
from DateTime import DateTime
from copy import copy
def busca_commitment(context,username,portlet=False):
ctool = getToolByName(context, 'portal_catalog')
path = context.portal_url.getPortalObject().getPhysicalPath()
date_range_query = { 'query': DateTime(), 'range': 'min'}
query = {'path': {'query':'/'.join(path)},
'portal_type': ('Commitment',),
'sort_on':'getStart_datetime',
# 'sort_order':'descending',
}
if portlet:
query['getStart_datetime'] = date_range_query
#Busca por conpromissos do probrio usuario
query1 = copy(query)
query1['Creator'] = username
result1 = ctool(**query1)
#Busca por compromissos que o usuario participa
query2 = copy(query)
query2['getConvidados'] = [username]
result2 = ctool(**query2)
#Busca por compromissos publicos
query3 = copy(query)
query3['review_state'] = ['published', 'internally_published', 'external', 'internal']
result3 = ctool(**query3)
result = result1 + result2 + result3
L = []
L_UID = []
for item in result:
if not item.UID in L_UID:
L.append(item)
L_UID.append(item.UID)
return L | [
"[email protected]"
] | |
e82730273d0eaa099b5b7974f79444de9077c466 | 045cb1a5638c3575296f83471758dc09a8065725 | /addons/purchase/report/purchase_bill.py | d442019eb8c1ec5604f64349509c1d4a9b4dc348 | [] | no_license | marionumza/saas | 7236842b0db98d1a0d0c3c88df32d268509629cb | 148dd95d991a348ebbaff9396759a7dd1fe6e101 | refs/heads/main | 2023-03-27T14:08:57.121601 | 2021-03-20T07:59:08 | 2021-03-20T07:59:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,459 | py | # -*- coding: utf-8 -*-
# Part of Harpiya. See LICENSE file for full copyright and licensing details.
from harpiya import api, fields, models, tools
from harpiya.tools import formatLang
class PurchaseBillUnion(models.Model):
_name = 'purchase.bill.union'
_auto = False
_description = 'Purchases & Bills Union'
_order = "date desc, name desc"
name = fields.Char(string='Reference', readonly=True)
reference = fields.Char(string='Source', readonly=True)
partner_id = fields.Many2one('res.partner', string='Vendor', readonly=True)
date = fields.Date(string='Date', readonly=True)
amount = fields.Float(string='Amount', readonly=True)
currency_id = fields.Many2one('res.currency', string='Currency', readonly=True)
company_id = fields.Many2one('res.company', 'Company', readonly=True)
vendor_bill_id = fields.Many2one('account.move', string='Vendor Bill', readonly=True)
purchase_order_id = fields.Many2one('purchase.order', string='Purchase Order', readonly=True)
def init(self):
tools.drop_view_if_exists(self.env.cr, 'purchase_bill_union')
self.env.cr.execute("""
CREATE OR REPLACE VIEW purchase_bill_union AS (
SELECT
id, name, ref as reference, partner_id, date, amount_untaxed as amount, currency_id, company_id,
id as vendor_bill_id, NULL as purchase_order_id
FROM account_move
WHERE
type='in_invoice' and state = 'posted'
UNION
SELECT
-id, name, partner_ref as reference, partner_id, date_order::date as date, amount_untaxed as amount, currency_id, company_id,
NULL as vendor_bill_id, id as purchase_order_id
FROM purchase_order
WHERE
state in ('purchase', 'done') AND
invoice_status in ('to invoice', 'no')
)""")
def name_get(self):
result = []
for doc in self:
name = doc.name or ''
if doc.reference:
name += ' - ' + doc.reference
amount = doc.amount
if doc.purchase_order_id and doc.purchase_order_id.invoice_status == 'no':
amount = 0.0
name += ': ' + formatLang(self.env, amount, monetary=True, currency_obj=doc.currency_id)
result.append((doc.id, name))
return result
| [
"[email protected]"
] | |
5114aa4f0924c3f6eaa3c0e48d017abbbb07dc7f | b6277a78b0337667ec4b88e0da3cb84a76383687 | /tests/test_inertia.py | ee4d43936999c7eb71ba51b38a6a87c207d455ae | [
"MIT"
] | permissive | sgalkina/trimesh | 11c91e9c9a7c013fb81551dcee0fdbfffa1f5d13 | 55e35216efcf620c816d82d9f9167e22966a851d | refs/heads/master | 2021-01-25T07:00:55.935106 | 2017-06-06T03:30:28 | 2017-06-06T03:30:28 | 93,636,761 | 0 | 0 | null | 2017-06-07T13:11:58 | 2017-06-07T13:11:57 | null | UTF-8 | Python | false | false | 4,510 | py | import generic as g
class InertiaTest(g.unittest.TestCase):
def test_inertia(self):
t0 = g.np.array([[-0.419575686853, -0.898655215203, -0.127965023308, 0. ],
[ 0.712589964872, -0.413418145015, 0.566834172697, 0. ],
[-0.562291548012, 0.146643245877, 0.813832890385, 0.],
[ 0. , 0. , 0. , 1. ]])
t1 = g.np.array([[ 0.343159553585, 0.624765521319, -0.701362648103, 0.],
[ 0.509982849005, -0.750986657709, -0.419447891476, 0. ],
[-0.788770571525, -0.213745370274, -0.57632794673 , 0. ],
[ 0. , 0. , 0. , 1. ]])
# make sure our transformations are actually still transformations
assert g.np.abs(g.np.dot(t0, t0.T) - g.np.eye(4)).max() < 1e-10
assert g.np.abs(g.np.dot(t1, t1.T) - g.np.eye(4)).max() < 1e-10
c = g.trimesh.primitives.Cylinder(height=10,
radius=1,
sections=720, # number of slices
transform=t0)
c0m = c.moment_inertia.copy()
c0 = g.trimesh.inertia.cylinder_inertia(c.volume,
c.primitive.radius,
c.primitive.height,
c.primitive.transform)
ct = g.np.abs((c0m / c0) - 1)
# we are comparing an inertia tensor from a mesh of a cylinder
# to an inertia tensor from an actual cylinder, so allow for some
# discretization uncertainty
assert ct.max() < 1e-3
# check our principal axis calculation against this cylinder
# the direction (long axis) of the cylinder should correspond to
# the smallest principal component of inertia, AKA rotation along
# the axis, rather than the other two which are perpendicular
components, vectors = g.trimesh.inertia.principal_axis(c.moment_inertia)
axis_test = g.np.abs((vectors[components.argmin()] / c.direction) - 1)
assert axis_test.max() < 1e-8
# make sure Trimesh attribute is plumbed correctly
assert g.np.allclose(c.principal_inertia_components, components)
assert g.np.allclose(c.principal_inertia_vectors, vectors)
# the other two axis of the cylinder should be identical
assert g.np.abs(g.np.diff(g.np.sort(components)[-2:])).max() < 1e-8
m = g.get_mesh('featuretype.STL')
i0 = m.moment_inertia.copy()
# rotate the moment of inertia
i1 = g.trimesh.inertia.transform_inertia(transform=t0, inertia_tensor=i0)
# rotate the mesh
m.apply_transform(t0)
# check to see if the rotated mesh + recomputed moment of inertia
# is close to the rotated moment of inertia
tf_test = g.np.abs((m.moment_inertia / i1) - 1)
assert tf_test.max() < 1e-6
# do it again with another transform
i2 = g.trimesh.inertia.transform_inertia(transform=t1, inertia_tensor=i1)
m.apply_transform(t1)
tf_test = g.np.abs((m.moment_inertia / i2) - 1)
assert tf_test.max() < 1e-6
def test_primitives(self):
primitives = [g.trimesh.primitives.Cylinder(height=5),
g.trimesh.primitives.Box(),
g.trimesh.primitives.Sphere(radius=1.23)]
for p in primitives:
for i in range(100):
# check to make sure the analytic inertia tensors are relatively
# close to the meshed inertia tensor (order of magnitude and sign)
comparison = g.np.abs(p.moment_inertia - p.to_mesh().moment_inertia)
c_max = comparison.max() / g.np.abs(p.moment_inertia).max()
assert c_max < .1
if hasattr(p.primitive, 'transform'):
matrix = g.trimesh.transformations.random_rotation_matrix()
p.primitive.transform = matrix
elif hasattr(p.primitive, 'center'):
p.primitive.center = g.np.random.random(3)
if __name__ == '__main__':
g.trimesh.util.attach_to_log()
g.unittest.main()
| [
"[email protected]"
] | |
aaa6d9dc213f7a6387f24784b2d7e5faf88bdaca | 2f37d3dcb20c9ba171024b1f8711b9926dbef0f3 | /eventex/subscriptions/mixins.py | af8a07846e310bed196a2252db4603a7d4f73b42 | [] | no_license | sergiopassos/eventex-sergiopassos | 9c080a365e4e554a5839aa461ce47e3d40d9fc53 | 6672223faaa7930377532141394dea3ae7c2c431 | refs/heads/master | 2023-04-27T02:40:15.094019 | 2020-01-16T13:51:11 | 2020-01-16T13:51:11 | 192,630,937 | 0 | 0 | null | 2023-04-21T20:32:42 | 2019-06-19T00:37:38 | Python | UTF-8 | Python | false | false | 1,585 | py | from django.conf import settings
from django.core import mail
from django.template.loader import render_to_string
from django.views.generic import CreateView
class EmailCreateMixin:
email_to = None
email_context_name = None
email_template_name = None
email_from = settings.DEFAULT_FROM_EMAIL
email_subject = ''
def send_mail(self):
# Send subscription email
subject = self.email_subject
from_ = self.email_from
to = self.get_email_to()
template_name = self.get_email_template_name()
context = self.get_email_context_data()
body = render_to_string(template_name, context)
return mail.send_mail(subject, body, from_, [from_, to])
def get_email_template_name(self):
if self.email_template_name:
return self.email_template_name
meta = self.object._meta
return '{}/{}_email.txt'.format(meta.app_label, meta.model_name)
def get_email_context_data(self, **kwargs):
context = dict(kwargs)
context.setdefault(self.get_email_context_name(), self.object)
return context
def get_email_context_name(self):
if self.email_context_name:
return self.email_context_name
return self.object._meta.model_name
def get_email_to(self):
if self.email_to:
return self.email_to
return self.object.email
class EmailCreateView(EmailCreateMixin, CreateView):
def form_valid(self, form):
response = super().form_valid(form)
self.send_mail()
return response
| [
"[email protected]"
] | |
20aa4222af3a4ebe0cc386a6ed3d8a36989e1b88 | 8a9b10eeef43e648fcc82d5fdbf6505e0e19a88b | /Tensor_Flow/stock_similarity_daily.py | 865cb66b7091f69f8580c800d2c23bd22bff0acb | [] | no_license | lwzswufe/neural_net | 3648e100ad68fd2dbd6e3f51be7b053780f7fd87 | 0a0ed94680c0e5dd3dbd2e13aef79a1b8fd8293d | refs/heads/master | 2021-03-30T17:06:18.296242 | 2019-10-16T08:22:47 | 2019-10-16T08:22:47 | 76,216,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | # author='lwz'
# coding:utf-8
# !/usr/bin/env python3
import os
from Tensor_Flow import AutoEncoder2, similarity_analysis
if __name__ == '__main__':
AutoEncoder2.daily()
similarity_analysis.daily() | [
"[email protected]"
] | |
c73df56929f4b7102748c83d681b3d6ba5a8af13 | 0d86bb399a13152cd05e3ba5684e4cb22daeb247 | /python-basics/unit11-error/py116_raise.py | f69da74fed89c80b5b2226b0517bf4504d52200c | [] | no_license | tazbingor/learning-python2.7 | abf73f59165e09fb19b5dc270b77324ea00b047e | f08c3bce60799df4f573169fcdb1a908dcb8810f | refs/heads/master | 2021-09-06T05:03:59.206563 | 2018-02-02T15:22:45 | 2018-02-02T15:22:45 | 108,609,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 685 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 18/1/2 下午8:00
# @Author : Aries
# @Site :
# @File : py116_raise.py
# @Software: PyCharm
'''
raise语句 主动抛出异常
'''
def thorw_error():
raise Exception("抛出一个异常")
if __name__ == '__main__':
thorw_error()
'''
Traceback (most recent call last):
File "project/PycharmProjects/rising-python-classics/python-basics/unit11-error/py116_raise.py", line 18, in <module>
thorw_error()
File "project/PycharmProjects/rising-python-classics/python-basics/unit11-error/py116_raise.py", line 14, in thorw_error
raise Exception("抛出一个异常")
Exception: 抛出一个异常
'''
| [
"[email protected]"
] | |
ed61bed728ef72c66243a1d999603df111527ea6 | ab650e6a0ca2f97096235ebe914b140b740aec66 | /app/templates/_root_init.py | 88f63dc99cf2ee4d75a6a147f53f0b50423d33bb | [
"MIT"
] | permissive | ghostRider1124/generator-python-package | 8fd353d6c4ed2c6f1ad88ebb6fe6811a8d585026 | 678b33fec9937c68aaa45ae04e6a8aac5f6554c5 | refs/heads/master | 2020-05-20T18:16:32.266189 | 2014-07-02T17:05:26 | 2014-07-02T17:05:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of <%= package.name %>.
# <%= package.url %>
# Licensed under the <%= package.license %> license:
# http://www.opensource.org/licenses/<%= package.license%>-license
# Copyright (c) <%= package.created.year %> <%= package.author.name %> <%= package.author.email %>
from <%= package.pythonName %>.version import __version__
| [
"[email protected]"
] | |
06c6fc43bcedc984addf32ef365c64484890ea3c | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/ec_13175-1949/sdB_ec_13175-1949_coadd.py | ffac546028839ada5df1dbebec03b93979d41d2d | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[200.073167,-20.088431], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_ec_13175-1949/sdB_ec_13175-1949_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_ec_13175-1949/sdB_ec_13175-1949_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
f606fc6928efe6b05a4ee59ddda61acd48e114dd | 930bc970069d8cbcfb36725a90492eff50638ecc | /code/dk-iris-pipeline/airflow_home/src/model/benchmark.py | 1bbbbefb5abcaffd9064680c379d44d929b5a14b | [
"MIT"
] | permissive | databill86/airflow4ds | 4770d856569c4db4b55b2d9dfda010e21c4cd790 | b5ae213f7169c54d31f4eca58d235ec6b09fd56f | refs/heads/master | 2021-09-25T17:26:43.340747 | 2018-10-24T16:09:49 | 2018-10-24T16:09:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 906 | py | import os
import sys
# Allow Python to discover local modules
sys.path.append(os.getenv(key='AIRFLOW_HOME'))
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, accuracy_score
from src import PROJECT_DIRECTORY
from src.scrub import get_clean_iris
def get_train_test_data():
"""
"""
df = get_clean_iris()
X = df.copy().drop(['iris_type'], axis=1)
y = df.copy().loc[:, 'iris_type'].replace({'setosa': 0, 'versicolor': 1, 'virginica': 2})
return train_test_split(X, y, test_size=0.30, random_state=112358)
def run_model_benchmark():
"""
"""
X_tr, X_te, y_tr, y_te = get_train_test_data()
lr_0 = LogisticRegression()
lr_0.fit(X_tr, y_tr)
y_pr = lr_0.predict(X_te)
print(f"Benchmark Model Accuracy: {accuracy_score(y_te, y_pr)}")
| [
"[email protected]"
] | |
3ce8f8c6eacd5408747793dcc122d0488f4ae734 | baa484828e8683d51d58d48989532e3d3ce987bc | /200228_4.py | 2efd1e862a827523b45963bb36d5e9ac5f94ac82 | [] | no_license | sungguenja/study_gui | 0fc1e17c98a9afc0a6e66a39aeefcd89c3e60f5e | b058ca900061f2bd743f8532056ecedcc6b7ce0a | refs/heads/master | 2021-01-16T16:32:28.027456 | 2020-03-17T16:54:21 | 2020-03-17T16:54:21 | 243,184,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | from tkinter import *
def font_control(ev):
label.config(font='HY헤드라인M {0} bold'.format(v.get()))
if v.get()==40:
label['text']='wa sans~'
label['font']='굴림체 40 bold'
else:
label['text']='안녕 파이썬~'
win=Tk()
v=IntVar()
win.geometry('300x150')
label = Label(win, text='안녕 파이썬~')
label.pack(fill='y',expand=1)
sc = Scale(win, from_=10,to=40, orient=HORIZONTAL, variable=v, command=font_control)
sc.pack(fill='x',expand=1)
qbtn = Button(win,text='끝내기',command=win.quit, font='굴림 10 bold')
qbtn.pack()
win.mainloop() | [
"[email protected]"
] | |
e6374b6bb555a8e38d56b70d7982bcd3b7b87ad6 | ca299cec2cd84d8b7c2571fa2fdf7161e66b8fe7 | /private_server/guard/CELL-12/Q-7/12-7.py | b6d8cf30e5782ee64820b4e068edc894e409a128 | [] | no_license | benmechen/CodeSet | ca57d4a065ac4fc737749f65cb5aa1011d446a88 | f5a4bf627a9a8efc76a65ae58db63a973fedffb7 | refs/heads/master | 2021-07-16T14:23:36.355491 | 2019-12-02T13:58:27 | 2019-12-02T13:58:27 | 225,385,245 | 1 | 0 | null | 2021-06-22T15:37:57 | 2019-12-02T13:47:09 | JavaScript | UTF-8 | Python | false | false | 87 | py | # Set count_to equal to the sum of two big numbers
count_to = 12 + 12
print(count_to) | [
"[email protected]"
] | |
d232fe89ca699bb27814c5684ea1ae3d2a1807b6 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-as/huaweicloudsdkas/v1/model/batch_remove_scaling_instances_request.py | bebca60ab39111f01f7c0e8379c57dbbff64a882 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 4,211 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class BatchRemoveScalingInstancesRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'scaling_group_id': 'str',
'body': 'BatchRemoveInstancesOption'
}
attribute_map = {
'scaling_group_id': 'scaling_group_id',
'body': 'body'
}
def __init__(self, scaling_group_id=None, body=None):
"""BatchRemoveScalingInstancesRequest
The model defined in huaweicloud sdk
:param scaling_group_id: 实例ID。
:type scaling_group_id: str
:param body: Body of the BatchRemoveScalingInstancesRequest
:type body: :class:`huaweicloudsdkas.v1.BatchRemoveInstancesOption`
"""
self._scaling_group_id = None
self._body = None
self.discriminator = None
self.scaling_group_id = scaling_group_id
if body is not None:
self.body = body
@property
def scaling_group_id(self):
"""Gets the scaling_group_id of this BatchRemoveScalingInstancesRequest.
实例ID。
:return: The scaling_group_id of this BatchRemoveScalingInstancesRequest.
:rtype: str
"""
return self._scaling_group_id
@scaling_group_id.setter
def scaling_group_id(self, scaling_group_id):
"""Sets the scaling_group_id of this BatchRemoveScalingInstancesRequest.
实例ID。
:param scaling_group_id: The scaling_group_id of this BatchRemoveScalingInstancesRequest.
:type scaling_group_id: str
"""
self._scaling_group_id = scaling_group_id
@property
def body(self):
"""Gets the body of this BatchRemoveScalingInstancesRequest.
:return: The body of this BatchRemoveScalingInstancesRequest.
:rtype: :class:`huaweicloudsdkas.v1.BatchRemoveInstancesOption`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this BatchRemoveScalingInstancesRequest.
:param body: The body of this BatchRemoveScalingInstancesRequest.
:type body: :class:`huaweicloudsdkas.v1.BatchRemoveInstancesOption`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BatchRemoveScalingInstancesRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
14c7592a361aec866f6bbc9d904a1567aab1a1ec | ecd4b06d5d5368b71fd72a1c2191510a03b728fd | /3 - pandas foundation/austin case study/4.py | 40ede74aea409c735ebd2a40bd4f6e624d3f96aa | [
"MIT"
] | permissive | Baidaly/datacamp-samples | 86055db5e326b59bfdce732729c80d76bf44629e | 37b4f78a967a429e0abca4a568da0eb9d58e4dff | refs/heads/master | 2022-07-27T01:18:00.700386 | 2022-07-18T19:27:23 | 2022-07-18T19:27:23 | 123,827,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 642 | py | # Convert the date column to string: df_dropped['date']
df_dropped['date'] = df_dropped['date'].astype(str)
# Pad leading zeros to the Time column: df_dropped['Time']
df_dropped['Time'] = df_dropped['Time'].apply(lambda x:'{:0>4}'.format(x))
# Concatenate the new date and Time columns: date_string
date_string = df_dropped['date'] + df_dropped['Time']
# Convert the date_string Series to datetime: date_times
date_times = pd.to_datetime(date_string, format='%Y%m%d%H%M')
# Set the index to be the new date_times container: df_clean
df_clean = df_dropped.set_index(date_times)
# Print the output of df_clean.head()
print(df_clean.head()) | [
"[email protected]"
] | |
1ed5f1cca9b1d29e15103ea5e148e811b1f53733 | be526f8602651479f5b24eab9c91a3817e9bff0e | /word2vec_tensorboard.py | bc0a82bc12957315e14cf53c710a8cda0042d17f | [] | no_license | bage79/word2vec4kor | 76a870c57a1a854ff3e3a00c955424b394723259 | 3dc8a856e22f79e8da27f74b3d55de474a599e8c | refs/heads/master | 2021-04-27T11:33:00.869446 | 2018-05-08T14:12:45 | 2018-05-08T14:12:45 | 122,564,641 | 11 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,861 | py | import argparse
import os
import pickle
import traceback
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
def turn_off_tensorflow_logging():
import os
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # ignore tensorflow warnings
tf.logging.set_verbosity(tf.logging.ERROR) # ignore tensorflow info (GPU 할당 정보 확인)
def word2vec_tensorboard(name, data_dir, tensorboard_dir, top_n=10000):
turn_off_tensorflow_logging()
try:
if not os.path.exists(tensorboard_dir):
os.mkdir(tensorboard_dir)
for filename in os.listdir(tensorboard_dir):
os.remove(os.path.join(tensorboard_dir, filename)) # remove old tensorboard files
config = projector.ProjectorConfig()
name = name.replace('+', '')
idx2word = pickle.load(open(os.path.join(data_dir, 'idx2word.dat'), 'rb'))
# word2idx = pickle.load(open('data/word2idx.dat', 'rb'))
idx2vec = pickle.load(open(os.path.join(data_dir, 'idx2vec.dat'), 'rb'))
wc = pickle.load(open(os.path.join(data_dir, 'wc.dat'), 'rb'))
total = sum(wc.values())
# print('idx2word:', idx2word[:10])
# print('idx2vec:', idx2vec[1])
# print('wc:', list(wc.items())[:10])
print('total count:', total)
idx2vec, idx2word = idx2vec[:top_n], idx2word[:top_n]
embedding_var = tf.Variable(idx2vec, name=name)
# print(data)
embedding = config.embeddings.add()
embedding.tensor_name = embedding_var.name
embedding.metadata_path = os.path.join(tensorboard_dir, f'{name}.tsv')
print('')
print(f'embedding_var.name: {embedding_var.name} shape: {embedding_var.shape}')
print(f'embedding.metadata_path: {embedding.metadata_path}')
with open(embedding.metadata_path, 'wt') as out_f:
out_f.write('spell\tfreq\n')
for spell in idx2word:
out_f.write(f'{spell}\t{wc.get(spell, 0)/total}\n')
summary_writer = tf.summary.FileWriter(tensorboard_dir)
projector.visualize_embeddings(summary_writer, config)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(var_list=[embedding_var])
checkpoint_file = os.path.join(tensorboard_dir, f'{name}.ckpt')
saver.save(sess, checkpoint_file, global_step=None)
print(f'checkpoint_file: {checkpoint_file}')
# absolute path -> relative path
for filename in ['checkpoint', 'projector_config.pbtxt']:
filepath = os.path.join(tensorboard_dir, filename)
lines = []
with open(filepath, 'rt') as f:
for line in f.readlines():
lines.append(line.replace(tensorboard_dir, '.'))
os.remove(filepath)
with open(filepath, 'wt') as f:
for line in lines:
f.write(line)
except:
traceback.print_exc()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--name', default='sample.ko.wikipedia', type=str, help="embedding name in tensorboard projector")
parser.add_argument('--data_dir', default=os.path.join(os.getenv('HOME'), 'workspace/word2vec4kor/data'), type=str, help="data directory path")
parser.add_argument('--tensorboard_dir', default=os.path.join(os.getenv('HOME'), 'tensorboard_log/'), type=str, help="tensorboard directory path")
parser.add_argument('--top_n', default=10000, type=int, help='max number of vocaburary')
args = parser.parse_args()
word2vec_tensorboard(name=args.name, data_dir=args.data_dir, tensorboard_dir=args.tensorboard_dir, top_n=args.top_n)
| [
"[email protected]"
] | |
782beba45b77ab5dd35b7a388087e5f618f0f266 | 62f59fe1e0246b33c84412ee2a60e77938a05a15 | /test/zombie_task_test.py | 97d79ca67f9031bb031c6eccabdd7eb9bba5e7e6 | [] | no_license | 20113261/platform_service | 02676d2654f5c7bde2c7eafdadbf55fe7253a7b0 | bc903168bd7cbc499892f24c2b1cc82c38180c01 | refs/heads/dev | 2022-08-01T02:30:05.004852 | 2018-04-29T05:39:37 | 2018-04-29T05:39:37 | 131,576,306 | 1 | 0 | null | 2022-07-08T19:13:32 | 2018-04-30T09:14:54 | Python | UTF-8 | Python | false | false | 1,113 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/10/26 下午6:28
# @Author : Hou Rong
# @Site :
# @File : zombie_task_test.py
# @Software: PyCharm
import datetime
import pymongo
client = pymongo.MongoClient(host='10.10.231.105')
collections = client['MongoTask']['Task']
def monitoring_zombies_task():
try:
cursor = collections.find(
{'running': 1, 'utime': {'$lt': datetime.datetime.now() - datetime.timedelta(hours=1)}}, {'_id': 1},
hint=[('running', 1), ('utime', -1)]).limit(
10000)
id_list = [id_dict['_id'] for id_dict in cursor]
print(len(id_list))
result = collections.update({
'_id': {
'$in': id_list
}
}, {
'$set': {
'finished': 0,
'used_times': 0,
'running': 0
}
}, multi=True)
print(result)
except Exception as e:
print(e)
if __name__ == '__main__':
import time
start = time.time()
monitoring_zombies_task()
print(time.time() - start)
| [
"[email protected]"
] | |
d72d79bedd1d13883fc2cad393b334ee4aff8287 | 95495baeb47fd40b9a7ecb372b79d3847aa7a139 | /test/test_metadata.py | 4a4189f3e130f421154ba36b2d51d25b314c6960 | [] | no_license | pt1988/fmc-api | b1d8ff110e12c13aa94d737f3fae9174578b019c | 075f229585fcf9bd9486600200ff9efea5371912 | refs/heads/main | 2023-01-07T09:22:07.685524 | 2020-10-30T03:21:24 | 2020-10-30T03:21:24 | 308,226,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,142 | py | # coding: utf-8
"""
Cisco Firepower Management Center Open API Specification
**Specifies the REST URLs and methods supported in the Cisco Firepower Management Center API. Refer to the version specific [REST API Quick Start Guide](https://www.cisco.com/c/en/us/support/security/defense-center/products-programming-reference-guides-list.html) for additional information.** # noqa: E501
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.metadata import Metadata # noqa: E501
from swagger_client.rest import ApiException
class TestMetadata(unittest.TestCase):
"""Metadata unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testMetadata(self):
"""Test Metadata"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.metadata.Metadata() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
0af742a6da7d3a491d33c665d6821b55a52e9e22 | 451e3188ef94bfd106a0194774c23edd0bba84a2 | /blog/migrations/0011_auto_20190624_1757.py | 32bf1b8e4505dacd5f6698621a22e1552d94538a | [] | no_license | Dolidodzik/Wagtail-Django-Static-Blog | fe9c8caf63275c8f444ac7b898e7e0d67fae018a | db0769da8c7b1c3fc450fe40181bfcf984079ec1 | refs/heads/master | 2020-06-08T02:13:32.401772 | 2019-10-29T17:09:09 | 2019-10-29T17:09:09 | 193,139,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,218 | py | # Generated by Django 2.2.2 on 2019-06-24 17:57
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('blog', '0010_auto_20190624_1749'),
]
operations = [
migrations.AlterField(
model_name='blogindexpage',
name='body',
field=wagtail.core.fields.StreamField([('heading', wagtail.core.blocks.CharBlock(classname='full title')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock()), ('gallery', wagtail.core.blocks.StreamBlock([('image', wagtail.images.blocks.ImageChooserBlock())], label='image gallery'))]),
),
migrations.AlterField(
model_name='blogpage',
name='body',
field=wagtail.core.fields.StreamField([('heading', wagtail.core.blocks.CharBlock(classname='full title')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock()), ('gallery', wagtail.core.blocks.StreamBlock([('image', wagtail.images.blocks.ImageChooserBlock())], label='image gallery'))]),
),
]
| [
"[email protected]"
] | |
dc8423b42f23e5e62b109260c92631277dd9f347 | 20860030d52b5be62cb797e396a5a6b83f45dc44 | /configuration-api/src/__init__.py | cf8683bda4fd470b0adb1d974c85ba42670d64d7 | [] | no_license | rcosnita/bravehub | 189d30c46224dd80d6fbf41c50a33559ec2f44ae | 960bcfdb3c2e53e81aa75f7a48980e4918cfd4bb | refs/heads/master | 2022-12-21T11:28:16.626690 | 2018-02-17T10:43:09 | 2018-02-17T10:43:09 | 98,259,347 | 0 | 1 | null | 2022-12-19T13:27:11 | 2017-07-25T03:17:44 | Python | UTF-8 | Python | false | false | 396 | py | """Initializes some configuration parameters which are used in the implementation of
configuration-api microservice."""
from src.ioc import ConfigurationApiContainer
API_VERSION = ConfigurationApiContainer.api_meta.version
API_FULL_VERSION = "{0}.{1}.{2}".format(API_VERSION.major, API_VERSION.minor, API_VERSION.patch)
API_MAJOR_VERSION = "{0}.{1}".format(API_VERSION.major, API_VERSION.minor)
| [
"[email protected]"
] | |
49f713c4ee1d37d24c760dd5a7d2afcca4e2a2f2 | 6359831db732f929409adbb8270092c7e9cca8d5 | /Q046_Vertical_traversal_of_binary_trees.py | a8f43145ca2853b7a89f408b9807b11bd25a2276 | [] | no_license | latika18/interviewbit | 11237219d982c98a22f0098be8248ef7a5b9246f | a065b19dc368136101dafbbbdab9b664fed0bf35 | refs/heads/master | 2020-03-15T15:20:30.002201 | 2018-08-22T07:39:21 | 2018-08-22T07:39:21 | 132,209,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,336 | py | Given a binary tree, print a vertical order traversal of it.
Example :
Given binary tree:
6
/ \
3 7
/ \ \
2 5 9
returns
[
[2],
[3],
[6 5],
[7],
[9]
]
Note : If 2 Tree Nodes shares the same vertical level then the one with lesser depth will come first.
Code:
# Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param A : root node of tree
# @return a list of list of integers
def verticalordertraversal(self,root):
visited = []
hashmap = {} ## hashmap to map hd to elements
hd = 0 ## horizontal distance
level = 0
if root:
visited.append(root)
hashmap[hd] = root
current = root
while current:
if current.left:
hashmap[hd-1] = current.left.value
visited.append(current.left)
if current.right:
hashmap[hd+1] = current.right.value
visited.append(current.right)
visited.pop(0)
if not visited:
break
hd = hd+1
current = visited[0]
return hashmap
| [
"[email protected]"
] | |
4d062caff31b114960dd1f54eb12dceb00549788 | df5ed643835e0759b326b9c3ad2f96a945b1519f | /Xcode.app/Contents/Developer/Library/Frameworks/Python3.framework/Versions/3.9/lib/python3.9/turtledemo/penrose.py | c913c6312d53a0f6dfd5900a4ee1c1972cf95f4c | [
"MIT"
] | permissive | keith/Xcode.app-strings | 8182a6b5272e5763111ddf376818aca277f113d3 | c3c93e5b349425159172bb62e9929b701de26a87 | refs/heads/main | 2023-08-30T22:31:13.291293 | 2023-08-29T18:19:23 | 2023-08-29T18:19:23 | 75,589,712 | 91 | 28 | null | null | null | null | UTF-8 | Python | false | false | 3,358 | py | #!/usr/bin/env python3
""" xturtle-example-suite:
xtx_kites_and_darts.py
Constructs two aperiodic penrose-tilings,
consisting of kites and darts, by the method
of inflation in six steps.
Starting points are the patterns "sun"
consisting of five kites and "star"
consisting of five darts.
For more information see:
http://en.wikipedia.org/wiki/Penrose_tiling
-------------------------------------------
from turtle import *
from math import cos, pi
from time import perf_counter as clock, sleep
f = (5**0.5-1)/2.0 # (sqrt(5)-1)/2 -- golden ratio
d = 2 * cos(3*pi/10)
def kite(l):
fl = f * l
lt(36)
fd(l)
rt(108)
fd(fl)
rt(36)
fd(fl)
rt(108)
fd(l)
rt(144)
def dart(l):
fl = f * l
lt(36)
fd(l)
rt(144)
fd(fl)
lt(36)
fd(fl)
rt(144)
fd(l)
rt(144)
def inflatekite(l, n):
if n == 0:
px, py = pos()
h, x, y = int(heading()), round(px,3), round(py,3)
tiledict[(h,x,y)] = True
return
fl = f * l
lt(36)
inflatedart(fl, n-1)
fd(l)
rt(144)
inflatekite(fl, n-1)
lt(18)
fd(l*d)
rt(162)
inflatekite(fl, n-1)
lt(36)
fd(l)
rt(180)
inflatedart(fl, n-1)
lt(36)
def inflatedart(l, n):
if n == 0:
px, py = pos()
h, x, y = int(heading()), round(px,3), round(py,3)
tiledict[(h,x,y)] = False
return
fl = f * l
inflatekite(fl, n-1)
lt(36)
fd(l)
rt(180)
inflatedart(fl, n-1)
lt(54)
fd(l*d)
rt(126)
inflatedart(fl, n-1)
fd(l)
rt(144)
def draw(l, n, th=2):
clear()
l = l * f**n
shapesize(l/100.0, l/100.0, th)
for k in tiledict:
h, x, y = k
setpos(x, y)
setheading(h)
if tiledict[k]:
shape("kite")
color("black", (0, 0.75, 0))
else:
shape("dart")
color("black", (0.75, 0, 0))
stamp()
def sun(l, n):
for i in range(5):
inflatekite(l, n)
lt(72)
def star(l,n):
for i in range(5):
inflatedart(l, n)
lt(72)
def makeshapes():
tracer(0)
begin_poly()
kite(100)
end_poly()
register_shape("kite", get_poly())
begin_poly()
dart(100)
end_poly()
register_shape("dart", get_poly())
tracer(1)
def start():
reset()
ht()
pu()
makeshapes()
resizemode("user")
def test(l=200, n=4, fun=sun, startpos=(0,0), th=2):
global tiledict
goto(startpos)
setheading(0)
tiledict = {}
tracer(0)
fun(l, n)
draw(l, n, th)
tracer(1)
nk = len([x for x in tiledict if tiledict[x]])
nd = len([x for x in tiledict if not tiledict[x]])
print("%d kites and %d darts = %d pieces." % (nk, nd, nk+nd))
def demo(fun=sun):
start()
for i in range(8):
a = clock()
test(300, i, fun)
b = clock()
t = b - a
if t < 2:
sleep(2 - t)
def main():
#title("Penrose-tiling with kites and darts.")
mode("logo")
bgcolor(0.3, 0.3, 0)
demo(sun)
sleep(2)
demo(star)
pencolor("black")
goto(0,-200)
pencolor(0.7,0.7,1)
write("Please wait...",
align="center", font=('Arial Black', 36, 'bold'))
test(600, 8, startpos=(70, 117))
return "Done"
if __name__ == "__main__":
msg = main()
mainloop()
| [
"[email protected]"
] | |
994535b97312d7b201e3c3ece5776069e47d98fd | 1269530d9534b563879d8e995fecf67196287719 | /check_size_mlstm.py | 40a5c8f3dcd4d109b5f841f02994af78f278d6a3 | [] | no_license | amirunpri2018/Keras-Multiplicative-LSTM | a7ff7eea2d9b8ba1ae15efa7759eb5510fe6fabe | 3c89fed3ac45d84072bc2712a895e479b657e457 | refs/heads/master | 2020-04-08T03:37:47.274723 | 2017-11-04T03:04:22 | 2017-11-04T03:04:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 686 | py | import multiplicative_lstm
from keras.layers import Input, LSTM
from keras.models import Model
ip = Input(shape=(1, 100))
lstm = LSTM(128)(ip)
mlstm = multiplicative_lstm.MultiplicativeLSTM(128)(ip)
lstm_model = Model(ip, lstm)
mlstm_model = Model(ip, mlstm)
lstm_model.summary()
print('\n' * 3)
mlstm_model.summary()
print('\n' * 3)
params_count_lstm = lstm_model.count_params()
params_count_mlstm = mlstm_model.count_params()
param_ratio = params_count_mlstm / float(params_count_lstm)
if param_ratio != 1.25:
print("Param count (mlstm) / Param count (lstm) = %0.2f, should be close to 1.25" % (param_ratio))
print("Size ratio of mLSTM to LSTM is %0.2f!" % (param_ratio)) | [
"[email protected]"
] | |
f69ee0ebd58b2cd9df04217cbdb83c8b95f62007 | f6b5799c13fad2382d638a1208f4972ce818174a | /site/PROGRAMMERS/src/python/스킬테스트/level01/콜라츠추측.py | 9da7b4527830f6a02e0a2ca40eba22dd9e88d4e1 | [] | no_license | JoonBeomLee/Algorithm_Python | 6bf0cc29ffaf75156bfa44ea531c33b3d2b2a129 | 185fb39d535573c374f1d0d88f728f97086a4246 | refs/heads/master | 2023-06-11T10:27:10.228151 | 2021-07-05T14:59:40 | 2021-07-05T14:59:40 | 193,500,999 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | def solution(num):
answer = 0
while True:
if(num == 1): break
if(answer == 500): answer = -1; break
# 짝수
if num % 2 == 0:
num = num / 2
# 홀수
elif num % 2 != 0:
num = 3 * num + 1
answer += 1
return answer | [
"[email protected]"
] | |
28623c7e7bcdf0aaaff6991949f05be9d2bc310d | e2992452a3c52f4cbbc64e1686128ad464b71d16 | /weixinSource/weixinSource/pipelines.py | aeba7ce32ab3fe5a82d68511a812978bede5574e | [] | no_license | MaGuiSen/studyScrapy | 6b84605a15027ffc24501d690666f419ebb379fd | 03604bafe19e55db12677a4af388c8a9198ca572 | refs/heads/master | 2021-01-17T04:30:23.858217 | 2017-08-30T01:50:08 | 2017-08-30T01:50:08 | 95,433,695 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class WeixinsourcePipeline(object):
def process_item(self, item, spider):
return item
| [
"[email protected]"
] | |
917fe61e6abccc4a02db3da5c659d3386712f344 | d4f87828a3767788dfbd83ef150d4af13c8ac762 | /swagger_client/models/saml_group_read.py | d5997973149b6129873ea058e8861e7e019a04e3 | [] | no_license | n-sundararajan/python-looker-api | 55dc6779843e28796c573a62ee30b75e135e8be5 | 498301068e7bd983b99d6647785a9f3d315ceadd | refs/heads/master | 2020-03-19T13:52:39.284698 | 2018-06-08T10:33:07 | 2018-06-08T10:33:07 | 136,599,073 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,692 | py | # coding: utf-8
"""
Looker API 3.0 Reference
### Authorization The Looker API uses Looker **API3** credentials for authorization and access control. Looker admins can create API3 credentials on Looker's **Admin/Users** page. Pass API3 credentials to the **/login** endpoint to obtain a temporary access_token. Include that access_token in the Authorization header of Looker API requests. For details, see [Looker API Authorization](https://looker.com/docs/r/api/authorization) ### Client SDKs The Looker API is a RESTful system that should be usable by any programming language capable of making HTTPS requests. Client SDKs for a variety of programming languages can be generated from the Looker API's Swagger JSON metadata to streamline use of the Looker API in your applications. A client SDK for Ruby is available as an example. For more information, see [Looker API Client SDKs](https://looker.com/docs/r/api/client_sdks) ### Try It Out! The 'api-docs' page served by the Looker instance includes 'Try It Out!' buttons for each API method. After logging in with API3 credentials, you can use the \"Try It Out!\" buttons to call the API directly from the documentation page to interactively explore API features and responses. ### Versioning Future releases of Looker will expand this API release-by-release to securely expose more and more of the core power of Looker to API client applications. API endpoints marked as \"beta\" may receive breaking changes without warning. Stable (non-beta) API endpoints should not receive breaking changes in future releases. For more information, see [Looker API Versioning](https://looker.com/docs/r/api/versioning) # noqa: E501
OpenAPI spec version: 3.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.role import Role # noqa: F401,E501
class SamlGroupRead(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'roles': 'list[Role]',
'url': 'str',
'can': 'dict(str, bool)'
}
attribute_map = {
'name': 'name',
'roles': 'roles',
'url': 'url',
'can': 'can'
}
def __init__(self, name=None, roles=None, url=None, can=None): # noqa: E501
"""SamlGroupRead - a model defined in Swagger""" # noqa: E501
self._name = None
self._roles = None
self._url = None
self._can = None
self.discriminator = None
if name is not None:
self.name = name
if roles is not None:
self.roles = roles
if url is not None:
self.url = url
if can is not None:
self.can = can
@property
def name(self):
"""Gets the name of this SamlGroupRead. # noqa: E501
Name of group in Saml # noqa: E501
:return: The name of this SamlGroupRead. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this SamlGroupRead.
Name of group in Saml # noqa: E501
:param name: The name of this SamlGroupRead. # noqa: E501
:type: str
"""
self._name = name
@property
def roles(self):
"""Gets the roles of this SamlGroupRead. # noqa: E501
Looker Roles # noqa: E501
:return: The roles of this SamlGroupRead. # noqa: E501
:rtype: list[Role]
"""
return self._roles
@roles.setter
def roles(self, roles):
"""Sets the roles of this SamlGroupRead.
Looker Roles # noqa: E501
:param roles: The roles of this SamlGroupRead. # noqa: E501
:type: list[Role]
"""
self._roles = roles
@property
def url(self):
"""Gets the url of this SamlGroupRead. # noqa: E501
Link to saml config # noqa: E501
:return: The url of this SamlGroupRead. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this SamlGroupRead.
Link to saml config # noqa: E501
:param url: The url of this SamlGroupRead. # noqa: E501
:type: str
"""
self._url = url
@property
def can(self):
"""Gets the can of this SamlGroupRead. # noqa: E501
Operations the current user is able to perform on this object # noqa: E501
:return: The can of this SamlGroupRead. # noqa: E501
:rtype: dict(str, bool)
"""
return self._can
@can.setter
def can(self, can):
"""Sets the can of this SamlGroupRead.
Operations the current user is able to perform on this object # noqa: E501
:param can: The can of this SamlGroupRead. # noqa: E501
:type: dict(str, bool)
"""
self._can = can
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SamlGroupRead):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
0eec3d555e4db9f9548824002788e9b95e60b91e | dd4d2589d1f14303cacd3b7ee1dd5f6bacd3bf3c | /matrix/spiral_matrix_2.py | ed14ac09091d56c65841453b2f11ee3ea40c813a | [] | no_license | salujaharkirat/ds-algo | ec22eaae81bdb78f2818248508325a536aedbb7b | 819b5971826d97ec600b92776c5158518c9cbf22 | refs/heads/master | 2023-05-02T17:20:49.425484 | 2021-05-23T07:54:29 | 2021-05-23T07:54:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,339 | py | """
https://leetcode.com/problems/spiral-matrix-ii/
"""
import numpy as np
class Solution:
def generateMatrix(self, n: int) -> List[List[int]]:
res = []
for i in range(n):
res.append([0] * n)
left = 0
right = n - 1
top = 0
bottom = n - 1
direction = 0
cnt = 1
while left <= right and top <= bottom:
if direction == 0:
for i in range(left, right+1):
res[top][i] = cnt
cnt += 1
top += 1
direction = 1
elif direction == 1:
for i in range(top, bottom+1):
res[i][right] = cnt
cnt += 1
right -= 1
direction = 2
elif direction == 2:
for i in reversed(range(left, right+1)):
# res.append(cnt)
res[bottom][i] = cnt
cnt += 1
bottom -= 1
direction = 3
elif direction == 3:
for i in reversed(range(top, bottom+1)):
# res.append(cnt)
res[i][left] = cnt
cnt += 1
left += 1
direction = 0
return res
| [
"[email protected]"
] | |
d7cdd4c62c61f62e1d8c309bc87f77c4949eadd9 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/sbss_1655+591/sdB_sbss_1655+591_coadd.py | f030d65a0471c8af9f04c2583c5c8ff5c0bbeec1 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[254.127958,59.079469], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_sbss_1655+591/sdB_sbss_1655+591_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_sbss_1655+591/sdB_sbss_1655+591_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
81d2acb91a751c6699bd377dc4694798e495f094 | 634514a9c10e32051964b179cc807d089d31124e | /S2l/Thesis_Ch3/Exp1_reach3dof/Scripts/plotter_episodes.py | c705777868308749f11d14705ce7c42a4bc061d3 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | leopauly/Observation-Learning-Simulations | 34009872a1f453ffc4ae7ddced7447a74ff704c4 | 462c04a87c45aae51537b8ea5b44646afa31d3a5 | refs/heads/master | 2021-08-04T10:55:42.900015 | 2021-07-05T13:41:09 | 2021-07-05T13:41:09 | 129,761,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | #### For plotting from reawrd values stored in files
import numpy as np
import matplotlib.pyplot as plt
import sys
run=sys.argv[1]
y = np.loadtxt('episode_reward_run_'+run+'.txt', unpack=True)
y_new=y[1:len(y)]
x=range(len(y_new))
print(x,y_new)
plt.figure(1)
plt.plot(x,y_new)
plt.title('Reward')
plt.xlabel('episodes')
plt.ylabel('reward per episeodes')
plt.show()
y_new=-np.array(y_new)
plt.figure(2)
plt.plot(x,y_new)
plt.title('Feature distance')
plt.xlabel('episodes')
plt.ylabel('reward per episodes')
plt.show()
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.