blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
da8775e18d3b0e6f3cfa5b7ce00126f7f11d9688 | b819632a899cc4919c4efb097b87009a9d07d209 | /testbed_nodel11_vm_container.py | a54514a0093d7fb87304a63cdeb2ee24793ed008 | [] | no_license | NuthanChandra/ctools | bb2570786d9b1a584c5b08800f48b02ed8664480 | bcb967c53375104e32b32c8f0d2b3ca25ed69e49 | refs/heads/master | 2022-11-28T04:25:30.092129 | 2020-04-14T12:38:27 | 2020-04-14T12:38:27 | 255,604,269 | 1 | 1 | null | 2020-07-23T16:29:45 | 2020-04-14T12:34:11 | Python | UTF-8 | Python | false | false | 4,050 | py | from fabric.api import env
import os
host1 = '[email protected]'
host2 = '[email protected]'
host3 = '[email protected]'
host4 = '[email protected]'
host5 = '[email protected]'
host6 = '[email protected]'
kvm_nodel12 = '10.204.216.114'
ext_routers = [('hooper','10.204.217.240')]
router_asn = 64512
public_vn_rtgt = 2225
public_vn_subnet = '10.204.221.160/28'
host_build = '[email protected]'
{env_roledefs}
#env.roledefs = {
# 'all': [host1,host2,host3,host4,host5,host6],
# 'cfgm': [host1, host2],
# 'webui': [host1],
# 'openstack': [host1],
# 'control': [host2, host3],
# 'collector': [host1],
# 'database': [host1, host2, host3],
# 'compute': [host4, host5, host6],
# 'build': [host_build]
#}
env.physical_routers={
'hooper' : { 'vendor': 'juniper',
'model' : 'mx',
'asn' : '64512',
'name' : 'hooper',
'ssh_username' : 'root',
'ssh_password' : 'c0ntrail123',
'mgmt_ip' : '10.204.217.240',
}
}
env.hostnames = {
'all': ['nodel12-vm1', 'nodel12-vm2', 'nodel12-vm3', 'nodel12-vm4', 'nodel12-vm5', 'nodel12-vm6']
}
env.openstack_admin_password = 'contrail123'
env.password = 'c0ntrail123'
env.passwords = {
host1: 'c0ntrail123',
host2: 'c0ntrail123',
host3: 'c0ntrail123',
host4: 'c0ntrail123',
host5: 'c0ntrail123',
host6: 'c0ntrail123',
host_build: 'stack@123',
}
reimage_param = 'ubuntu-14.04.5'
vm_node_details = {
'default': {
'image_dest' : '/mnt/disk1/images/',
'ram' : '32768',
'server': kvm_nodel12,
'vcpus' : '4',
'disk_format' : 'qcow2',
'image_source' : 'http://10.204.217.158/images/node_vm_images/%s-256G.img.gz' % (reimage_param),
},
host1 : {
'name' : 'nodel12-vm1',
'network' : [{'bridge' : 'br1', 'mac':'52:53:59:01:00:01'}
],
},
host2 : { 'name' : 'nodel12-vm2',
'network' : [{'bridge' : 'br1', 'mac':'52:53:59:01:00:02'}
]
},
host3 : { 'name' : 'nodel12-vm3',
'network' : [{'bridge' : 'br1', 'mac':'52:53:59:01:00:03'}
]
},
host4 : { 'name' : 'nodel12-vm4',
'network' : [{'bridge' : 'br1', 'mac':'52:53:59:01:00:04'}
]
},
host5 : { 'name' : 'nodel12-vm5',
'network' : [{'bridge' : 'br1', 'mac':'52:53:59:01:00:05'}
]
},
host6 : { 'name' : 'nodel12-vm6',
'network' : [{'bridge' : 'br1', 'mac':'52:53:59:01:00:06'}
]
}
}
env.keystone = {'admin_password': 'c0ntrail123'}
env.openstack = {'manage_amqp': "true"}
minimum_diskGB=32
env.kernel_upgrade=False
env.rsyslog_params = {'port':19876, 'proto':'tcp', 'collector':'dynamic', 'status':'enable'}
env.test_repo_dir='/home/stack/multi_interface_parallel/centos65/icehouse/contrail-test'
env.mail_from='[email protected]'
env.mail_to='[email protected]'
multi_tenancy=True
env.interface_rename = True
env.enable_lbaas = True
enable_ceilometer = True
ceilometer_polling_interval = 60
env.encap_priority = "'VXLAN','MPLSoUDP','MPLSoGRE'"
env.log_scenario='Multi-Node Nodel12 Contrainer Sanity[mgmt, ctrl=data]'
env.ntp_server = '10.204.217.158'
| [
"[email protected]"
] | |
8f377dbae4bdfac6f266dec47f88176f4f0e1eca | b50f07920a48df36c5303e6bbd35ff1eafbece16 | /jms/expression.py | 0668bf0f294aef112c6ee929ab72cafc5af0faa2 | [] | no_license | principia12/project_re | ed21cd369412d440ae794fd7ff422400988be5e3 | d165026e08cd1efd27ed9a0147aaf790f9374916 | refs/heads/master | 2020-08-27T19:39:08.872522 | 2019-11-07T09:31:04 | 2019-11-07T09:31:04 | 217,472,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,657 | py |
from abc import ABC, abstractmethod
from .common import ConsumeFailException, is_valid_char, is_whitespace, is_word_char, is_numeric
from .tokenizer import TokenType
class Expr(ABC):
@abstractmethod
def consume(self, text, idx):
pass
@classmethod
def from_token(cls, token):
if token.token_type == TokenType.CHAR:
return Term(token.value)
elif token.token_type == TokenType.ANCHOR_CHAR:
return AnchorTerm(token.value)
elif token.token_type in [TokenType.CLASS_CHAR, TokenType.WILDCARD_CHAR]:
return ClassTerm(token.value)
else:
raise ValueError()
@classmethod
def with_and(cls, exprs):
return AndExpr(exprs)
@classmethod
def with_or(cls, exprs):
return OrExpr(exprs)
@staticmethod
def get_char(text, idx):
if idx >= len(text):
raise ConsumeFailException()
return text[idx]
class EmptyTerm(Expr):
def consume(self, text, idx):
return idx
class Term(Expr):
def __init__(self, c):
self.c = c
def consume(self, text, idx):
c = self.get_char(text, idx)
if c == self.c:
return idx + 1
else:
raise ConsumeFailException()
class AnchorTerm(Expr):
check_funcs = {
'^': lambda text, idx: idx == 0,
'$': lambda text, idx: idx == len(text)
}
def __init__(self, c):
self.check_func = self.check_funcs[c]
def consume(self, text, idx):
if self.check_func(text, idx):
return idx
else:
raise ConsumeFailException()
class ClassTerm(Expr):
check_funcs = {
'.': is_valid_char,
'd': is_numeric,
'w': is_word_char,
's': is_whitespace,
}
def __init__(self, c: str):
self.positive = c == '.' or c.islower()
self.check_func = self.check_funcs[c.lower()]
def consume(self, text, idx):
c = self.get_char(text, idx)
if self.check_func(c) == self.positive:
return idx + 1
else:
raise ConsumeFailException()
class AndExpr(Expr):
def __init__(self, exprs):
self.exprs = exprs
def consume(self, text, idx):
for expr in self.exprs:
idx = expr.consume(text, idx)
return idx
class OrExpr(Expr):
def __init__(self, exprs):
self.exprs = exprs
def consume(self, text, idx):
for expr in self.exprs:
try:
return expr.consume(text, idx)
except ConsumeFailException:
pass
raise ConsumeFailException()
| [
"[email protected]"
] | |
75d3392dc40e06676c640968578a29a6e4230e6b | 1e139784a36ce2a26dafaac0bb795b168ca91776 | /electron_project/abstract/migrations/0003_delete_workeraccount.py | bda3728b90ddb267ad2ad6addfa863d7ca628b2e | [] | no_license | TestAccount2077/Mas-Electronics | a9f4431be7ea740b99616cb4ce4acf9bba46096f | 6bb887805900affdcd905deb33b341892bebd41f | refs/heads/master | 2020-03-28T15:11:57.044686 | 2019-01-26T16:01:55 | 2019-01-26T16:01:55 | 148,566,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2018-10-20 04:57
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('abstract', '0002_workeraccount'),
]
operations = [
migrations.DeleteModel(
name='WorkerAccount',
),
]
| [
"[email protected]"
] | |
c82c8b8c6b31aa7a2fbea0a59b5f32fd34dcd6e1 | d9f4400d47e0ce914be636698365e26f836b766c | /apps/screen/urls.py | 71d2c6af754bfffc99bcf77e2a85e73445a03918 | [] | no_license | otonelunico/prevex | deffc3cfd82354b20e61ac636b2b7fb4dd48d360 | e32efb317a05031a5e0c454d3343748ea7ff534e | refs/heads/master | 2021-01-22T08:39:26.389747 | 2017-06-24T20:52:27 | 2017-06-24T20:52:27 | 92,628,288 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | from django.conf.urls import url, include
from apps.screen.views import index, Settings, Prevent_, Video_
from django.contrib.auth.decorators import login_required
urlpatterns = [
url(r'^$', index, name='index'),
url(r'^settings/$', login_required(Settings), name="settings"),
url(r'^prevent/(?P<funct>\w+)/(?P<type>\d+)/(?P<id>\d+)$', login_required(Prevent_), name="prevent"),
url(r'^video/(?P<funct>\w+)/(?P<type>\d+)$', login_required(Video_), name="video"),
] | [
"[email protected]"
] | |
dd5e2a1ea852ce0926d23d2517ce7a6499aa5d2c | 75bd816c06203f9ae8b988b1f51778b199fbe629 | /backend/app/db/__init__.py | 47daae9c5a072f088859a5a05e80b73162277462 | [] | no_license | mcptr/bbone-js-py | ce209e377976707d1e0661fda5d5ceb6452dd8a1 | ee07dce6907c645fbdd843daa80604d7228778b1 | refs/heads/master | 2020-03-27T00:11:34.310231 | 2018-08-21T18:17:30 | 2018-08-21T18:17:30 | 145,602,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19 | py | from . db import *
| [
"[email protected]"
] | |
734a353a9b4a5f50f3a72adeae60c79376b0e30d | e82245a9e623ef3e2b4b9c02f0fd932c608c4484 | /firecode.io/08-find_the_transpose_of_a_square_matrix.py | 3d2ba5c22dcb19e7aba1339638498c7d1921455a | [] | no_license | Zylophone/Programming-for-Sport | 33e8161028cfddce3b7a1243eb092070107342e3 | 193d6184f939303d8661f68d6fd06bdec95df351 | refs/heads/master | 2020-06-16T23:11:44.719286 | 2017-05-21T17:10:46 | 2017-05-21T17:10:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | '''
Args:
- matrix (list of lists)
- a square matrix
Modifies:
- arg into its transpose in-place
Returns:
- nothing (None)
Complexity:
- O(n^2) time
- O(1) extra space, in-place
'''
def transpose_matrix(matrix):
if matrix is None:
return None
n= len(matrix)
for i in range(n):
for j in range(i):
matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j] | [
"[email protected]"
] | |
5db1fbf5131e9fcb3b1160d38c497df02b701c2d | 12a5b72982291ac7c074210afc2c9dfe2c389709 | /online_judges/Codeforces/113/A/code.py | 6a269bbf442e5a4f164b88db14eb1cdb942cc845 | [] | no_license | krantirk/Algorithms-and-code-for-competitive-programming. | 9b8c214758024daa246a1203e8f863fc76cfe847 | dcf29bf976024a9d1873eadc192ed59d25db968d | refs/heads/master | 2020-09-22T08:35:19.352751 | 2019-05-21T11:56:39 | 2019-05-21T11:56:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | py | s = ["lios","liala","etr","etra","initis","inites"]
input_string = raw_input().split()
answer = True
for e in input_string:
flag = False
for k in s:
if e.endswith(k):
flag = True
if not flag:
answer = False
break
if (answer): print "YES"
else: print "NO"
| [
"[email protected]"
] | |
653c6ee77e55fe39bf26522d6e3e04161daa0ce3 | 022104aa2456429356bdd26c701a2949381a83cf | /install/lib/python2.7/dist-packages/robotnik_msgs/msg/_SetElevatorFeedback.py | fe9731c70b42e53a2afd11197435c3aea3f8e08d | [] | no_license | nachocz/campero_ws | 204f313d5fbdb81d1f7cc568341a1170ddd2b4cf | f2b09f96165166c0e867e3f5f3dcd092dbac1c1b | refs/heads/master | 2023-02-02T03:25:56.603172 | 2020-12-11T11:28:42 | 2020-12-11T11:28:42 | 320,539,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,506 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from robotnik_msgs/SetElevatorFeedback.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import robotnik_msgs.msg
class SetElevatorFeedback(genpy.Message):
_md5sum = "47e3f709643220443260a9d8c1f901ea"
_type = "robotnik_msgs/SetElevatorFeedback"
_has_header = False # flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
robotnik_msgs/ElevatorStatus status
================================================================================
MSG: robotnik_msgs/ElevatorStatus
# state
string RAISING=raising
string LOWERING=lowering
string IDLE=idle
string ERROR_G_IO=error_getting_io
string ERROR_S_IO=error_setting_io
string ERROR_TIMEOUT=error_timeout_in_action
# position
string UP=up
string DOWN=down
string UNKNOWN=unknown
# IDLE, RAISING, LOWERING
string state
# UP, DOWN, UNKNOWN
string position
float32 height
"""
__slots__ = ['status']
_slot_types = ['robotnik_msgs/ElevatorStatus']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
status
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(SetElevatorFeedback, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.status is None:
self.status = robotnik_msgs.msg.ElevatorStatus()
else:
self.status = robotnik_msgs.msg.ElevatorStatus()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.status.state
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.status.position
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.status.height
buff.write(_get_struct_f().pack(_x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.status is None:
self.status = robotnik_msgs.msg.ElevatorStatus()
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.state = str[start:end].decode('utf-8', 'rosmsg')
else:
self.status.state = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.position = str[start:end].decode('utf-8', 'rosmsg')
else:
self.status.position = str[start:end]
start = end
end += 4
(self.status.height,) = _get_struct_f().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.status.state
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.status.position
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.status.height
buff.write(_get_struct_f().pack(_x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.status is None:
self.status = robotnik_msgs.msg.ElevatorStatus()
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.state = str[start:end].decode('utf-8', 'rosmsg')
else:
self.status.state = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.position = str[start:end].decode('utf-8', 'rosmsg')
else:
self.status.position = str[start:end]
start = end
end += 4
(self.status.height,) = _get_struct_f().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_f = None
def _get_struct_f():
global _struct_f
if _struct_f is None:
_struct_f = struct.Struct("<f")
return _struct_f
| [
"[email protected]"
] | |
2ffa0b789fd7bedb02f8cc8683ee87eb0cdbb113 | 1bf9f6b0ef85b6ccad8cb029703f89039f74cedc | /src/spring/azext_spring/vendored_sdks/appplatform/v2022_01_01_preview/aio/operations/_build_service_agent_pool_operations.py | 3f303bb5a4713c0abb643d1787ec91d545c4e585 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | VSChina/azure-cli-extensions | a1f4bf2ea4dc1b507618617e299263ad45213add | 10b7bfef62cb080c74b1d59aadc4286bd9406841 | refs/heads/master | 2022-11-14T03:40:26.009692 | 2022-11-09T01:09:53 | 2022-11-09T01:09:53 | 199,810,654 | 4 | 2 | MIT | 2020-07-13T05:51:27 | 2019-07-31T08:10:50 | Python | UTF-8 | Python | false | false | 22,671 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
from urllib.parse import parse_qs, urljoin, urlparse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._build_service_agent_pool_operations import (
build_get_request,
build_list_request,
build_update_put_request,
)
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class BuildServiceAgentPoolOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.appplatform.v2022_01_01_preview.aio.AppPlatformManagementClient`'s
:attr:`build_service_agent_pool` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(
self, resource_group_name: str, service_name: str, build_service_name: str, **kwargs: Any
) -> AsyncIterable["_models.BuildServiceAgentPoolResource"]:
"""List build service agent pool.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param service_name: The name of the Service resource. Required.
:type service_name: str
:param build_service_name: The name of the build service resource. Required.
:type build_service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BuildServiceAgentPoolResource or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.appplatform.v2022_01_01_preview.models.BuildServiceAgentPoolResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-01-01-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.BuildServiceAgentPoolResourceCollection]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
service_name=service_name,
build_service_name=build_service_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("BuildServiceAgentPoolResourceCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/agentPools"} # type: ignore
@distributed_trace_async
async def get(
self, resource_group_name: str, service_name: str, build_service_name: str, agent_pool_name: str, **kwargs: Any
) -> _models.BuildServiceAgentPoolResource:
"""Get build service agent pool.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param service_name: The name of the Service resource. Required.
:type service_name: str
:param build_service_name: The name of the build service resource. Required.
:type build_service_name: str
:param agent_pool_name: The name of the build service agent pool resource. Required.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BuildServiceAgentPoolResource or the result of cls(response)
:rtype: ~azure.mgmt.appplatform.v2022_01_01_preview.models.BuildServiceAgentPoolResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-01-01-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.BuildServiceAgentPoolResource]
request = build_get_request(
resource_group_name=resource_group_name,
service_name=service_name,
build_service_name=build_service_name,
agent_pool_name=agent_pool_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("BuildServiceAgentPoolResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/agentPools/{agentPoolName}"} # type: ignore
async def _update_put_initial(
self,
resource_group_name: str,
service_name: str,
build_service_name: str,
agent_pool_name: str,
agent_pool_resource: Union[_models.BuildServiceAgentPoolResource, IO],
**kwargs: Any
) -> _models.BuildServiceAgentPoolResource:
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-01-01-preview")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.BuildServiceAgentPoolResource]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(agent_pool_resource, (IO, bytes)):
_content = agent_pool_resource
else:
_json = self._serialize.body(agent_pool_resource, "BuildServiceAgentPoolResource")
request = build_update_put_request(
resource_group_name=resource_group_name,
service_name=service_name,
build_service_name=build_service_name,
agent_pool_name=agent_pool_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_put_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("BuildServiceAgentPoolResource", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("BuildServiceAgentPoolResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_put_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/agentPools/{agentPoolName}"} # type: ignore
@overload
async def begin_update_put(
self,
resource_group_name: str,
service_name: str,
build_service_name: str,
agent_pool_name: str,
agent_pool_resource: _models.BuildServiceAgentPoolResource,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.BuildServiceAgentPoolResource]:
"""Create or update build service agent pool.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param service_name: The name of the Service resource. Required.
:type service_name: str
:param build_service_name: The name of the build service resource. Required.
:type build_service_name: str
:param agent_pool_name: The name of the build service agent pool resource. Required.
:type agent_pool_name: str
:param agent_pool_resource: Parameters for the update operation. Required.
:type agent_pool_resource:
~azure.mgmt.appplatform.v2022_01_01_preview.models.BuildServiceAgentPoolResource
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either BuildServiceAgentPoolResource or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.appplatform.v2022_01_01_preview.models.BuildServiceAgentPoolResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_update_put(
self,
resource_group_name: str,
service_name: str,
build_service_name: str,
agent_pool_name: str,
agent_pool_resource: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.BuildServiceAgentPoolResource]:
"""Create or update build service agent pool.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param service_name: The name of the Service resource. Required.
:type service_name: str
:param build_service_name: The name of the build service resource. Required.
:type build_service_name: str
:param agent_pool_name: The name of the build service agent pool resource. Required.
:type agent_pool_name: str
:param agent_pool_resource: Parameters for the update operation. Required.
:type agent_pool_resource: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either BuildServiceAgentPoolResource or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.appplatform.v2022_01_01_preview.models.BuildServiceAgentPoolResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_update_put(
self,
resource_group_name: str,
service_name: str,
build_service_name: str,
agent_pool_name: str,
agent_pool_resource: Union[_models.BuildServiceAgentPoolResource, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.BuildServiceAgentPoolResource]:
"""Create or update build service agent pool.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param service_name: The name of the Service resource. Required.
:type service_name: str
:param build_service_name: The name of the build service resource. Required.
:type build_service_name: str
:param agent_pool_name: The name of the build service agent pool resource. Required.
:type agent_pool_name: str
:param agent_pool_resource: Parameters for the update operation. Is either a model type or a IO
type. Required.
:type agent_pool_resource:
~azure.mgmt.appplatform.v2022_01_01_preview.models.BuildServiceAgentPoolResource or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either BuildServiceAgentPoolResource or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.appplatform.v2022_01_01_preview.models.BuildServiceAgentPoolResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-01-01-preview")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.BuildServiceAgentPoolResource]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_put_initial( # type: ignore
resource_group_name=resource_group_name,
service_name=service_name,
build_service_name=build_service_name,
agent_pool_name=agent_pool_name,
agent_pool_resource=agent_pool_resource,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("BuildServiceAgentPoolResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(
AsyncPollingMethod,
AsyncARMPolling(lro_delay, lro_options={"final-state-via": "azure-async-operation"}, **kwargs),
) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_put.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/agentPools/{agentPoolName}"} # type: ignore
| [
"[email protected]"
] | |
0271411a63fff75c6ccceb030d69175bd3075563 | cbf9f600374d7510988632d7dba145c8ff0cd1f0 | /abc/207/a.py | ab70569c2994dfed4061f58d6ae22f8216cea250 | [] | no_license | sakakazu2468/AtCoder_py | d0945d03ad562474e40e413abcec39ded61e6855 | 34bdf39ee9647e7aee17e48c928ce5288a1bfaa5 | refs/heads/master | 2022-04-27T18:32:28.825004 | 2022-04-21T07:27:00 | 2022-04-21T07:27:00 | 225,844,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 62 | py | a, b, c = map(int, input().split())
print(max(a+b, b+c, c+a))
| [
"[email protected]"
] | |
a67d6079a5eec64bc07497534737ee8ef949dd51 | 3ab1f37b4372d0796c85ef24343dd8c03accb6ef | /CoinBase/ConnectFour.py | aa65bc2e3204e93c5a8b26877efbaf25d28eb2c3 | [] | no_license | Blossomyyh/leetcode | 2be6a99534801fc59fe9551317ca49c3704b1c3d | 38615779eb43d147587467e11dc22761ac0726cb | refs/heads/master | 2023-01-22T16:56:26.624677 | 2020-11-20T13:47:43 | 2020-11-20T13:47:43 | 266,845,278 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,135 | py | """
Connect 4
use get column and line and diagonals to find wins
4 ->wins
https://codereview.stackexchange.com/questions/225840/a-simple-connect-4-game-in-python
https://github.com/KeithGalli/Connect4-Python/blob/master/connect4.py
Better solution:
focus on the current move's row and col! to check wins
"""
TEAM1 = 1
TEAM2 = 2
class connect4:
def __init__(self, row=6, col=7):
self.row = row
self.col = col
# generate empty 6*6 board
self.board = [[0]*self.col for _ in range(self.row)]
self.rows =[]
self.count = self.row * self.col
# one situation- 4positions -> 0; team1+1, team2-1 4/-4--> win
def returnboard(self):
for i in range(self.row):
print(self.board[i])
return
def checkwins(self, team):
# n*m --> Time O(4*N*M)
# horizontally
for r in range(self.row):
for c in range(self.col - 3):
if self.board[r][c] == team and self.board[r][c+1]== team and self.board[r][c+2]== team and self.board[r][c+3]== team:
return True
# vertically
for r in range(self.row - 3):
for c in range(self.col):
if self.board[r][c] == team and self.board[r+1][c] == team and self.board[r+2][c] == team and self.board[r+3][c] == team:
return True
# diagonally
for r in range(self.row -3):
for c in range(self.col - 3):
if self.board[r][c] == team and self.board[r+1][c+1]== team and self.board[r+2][c+2]== team and self.board[r+3][c+3] == team:
return True
# anti-diagonally
for r in range(3, self.row):
for c in range(self.col - 3):
if self.board[r][c] == team and self.board[r-1][c+1] == team and self.board[r-2][c+2] == team and self.board[r-3][c+3] == team:
return True
return False
def checkcolumn(self, col):
# check whether the current column can make move
return 0 in [i[col] for i in self.board]
def checkend(self, rounds):
# check all the element are filled
print("The end of the game! ")
return rounds > self.count
def makemove(self, team, col):
# col is valid here
i = self.row -1
# check from bottom until find the empty position
while self.board[i][col] != 0:
i -= 1
self.board[i][col] = team
print(str(team)+" move at col: " +str(col))
self.returnboard()
if self.checkwins(team):
print("Team "+str(team)+ " WIN !")
return True
return False
import random
if __name__ == "__main__":
game = connect4()
game.returnboard()
rounds = 1
win = False
while not win and not game.checkend(rounds):
team = rounds % 2 + 1
# generate a random number 0-6
colidx = random.randrange(7)
while not game.checkcolumn(colidx):
colidx = random.randrange(7)
win = game.makemove(team, colidx)
rounds += 1
game.returnboard()
| [
"[email protected]"
] | |
339700e4ddf899c0336dd7012c4c6385c8eb3cbb | 9716a77ef1d0ba5ef9a61be04f6229494744d5d5 | /chapter06 정렬/위에서 아래로.py | 158d19ce05bdd45aaedf7a1c03c28402fb6a8ac5 | [] | no_license | korea-space-codingmonster/Algorithm_Study | 98b00c81839cf8ac8365d3982c25650a21226ce9 | 8c92857e458994a2d1d77dc3ea0d4b645b8b6a4b | refs/heads/main | 2023-06-03T20:00:52.915447 | 2021-06-20T05:51:47 | 2021-06-20T05:51:47 | 329,354,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 712 | py | # 하나의 주열에는 다양한 수가 존재한다. 이러한 수의 크기에 상관없이 나열되어 있다.
# 이 수를 큰 수부터 작은 수의 순서로 정렬해야한다. 수열을 내림차순으로 정렬하는 프로그램을 만드시오.
# 입력조건
# 첫째 줄에 수열에 속해 있는 수의 개수 N이 주어진다.(1 < N <= 500)
# 둘째 줄부터 N + 1번째 줄까지 N개의 수가 입려된다. 수의 범위는 1이상 100000이하의 자연수이다.
# 입력예시
# 3
# 15
# 27
# 12
# 출력예시
# 27 15 12
n = int(input())
array = []
for i in range(n):
array.append(int(input()))
array = sorted(array, reverse = True)
for i in array:
print(i, end = ' ')
| [
"[email protected]"
] | |
56337b1337e5899c09e974e19288f2cdc899dc73 | eb94bccbcc8d2244843dde59d201850870ef29ca | /datahub_lib/swagger_client/models/role_assignment_request.py | d57b16e3d72f647de2aff3f2b4762c479931ea5c | [] | no_license | sunasing/noaa_docker | d7e2000d1cfc91123d74f0c95f8efe2c7eda8c12 | 7d9e05686463a6cd8d39313af9496a06bdf00367 | refs/heads/master | 2020-12-27T14:44:18.967963 | 2020-02-03T06:50:52 | 2020-02-03T06:50:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,051 | py | # coding: utf-8
"""
Azure FarmBeats API
<p> <p>Azure FarmBeats helps you build digital agricultural solutions in Azure. By providing a standardized schema to query agricultural data from various sources, Azure FarmBeats provides you: <ul > <li style=\"margin: 7px;\">Ability to acquire, aggregate, process and store agricultural data.</li> <li style=\"margin: 7px;\">Capability to fuse data between data sources and generate insights.</li> <li style=\"margin: 7px;\">Schematized access and query capabilities on ingested data.</li> </ul> </p> <h><b>REST Operation Groups</b></h> <p><b>Farm:</b></p> <p>Farm corresponds to a physical location of interest within the system. Each Farm has a Farm name and a unique farm id.</p> <p><b>Device:</b></p> <p>Device corresponds to a physical device present in the farm. Each device has a unique device id. Device is typically provisioned to a farm with a farm id.</p> <p><b>DeviceModel:</b></p> <p>DeviceModel corresponds to the meta-data of the device such as the Manufacturer, Type of the device either Gateway or Node.</p> <p><b>Sensor:</b></p> <p>Sensor corresponds to a physical sensor that records values. A sensor is typically connected to a device with a device id.</p> </p> <p><b>SensorModel:</b></p> <p>SensorModel corresponds to the meta-data of the sensor such as the Manufacturer, Type of the sensor either Analog or Digital, Sensor Measure such as Ambient Temperature, Pressure etc.</p> <p><b>Telemetry:</b></p> <p>Telemetry provides the ability to read telemetry messages for a particular sensor & time range.</p> <p><b>Job:</b></p> <p>Job corresponds to any workflow of activities which are executed in the system to get a desired output. Each job is associated with a job id and job type.</p> <p><b>JobType:</b></p> <p>JobType corresponds to different job types supported by the system. This includes system defined & user-defined job types.</p> <p><b>ExtendedType:</b></p> <p>ExtendedType corresponds to the list of system & user-defined types in the system. This helps setup a new Sensor or Scene or Scenefile type in the system.</p> <p><b>Partner:</b></p> <p>Partner corresponds to the sensor/weather/imagery integration partner.</p> <p><b>Scene:</b></p> <p>Scene corresponds to any generated output in the context of a Farm. Each Scene has a scene id, scene source, scene type and farm id associated with it. Each scene id can have multiple scene files associated with it.</p> <p><b>SceneFile:</b></p> <p>SceneFile corresponds to all files which are generated for single scene. A single scene id can have multiple SceneFile ids associated with it.</p> <p><b>Rule:</b></p> <p>Rule corresponds to a condition for farm-related data to trigger an alert. Each rule will be in the context of a farm's data.</p> <p><b>Alert:</b></p> <p>Alert corresponds to a notification which gets generated when a rule condition is met. Each alert will be in the context of a rule.</p> <p><b>RoleDefinition:</b></p> <p>RoleDefinition defines allowed and disallowed actions for a role.</p> <p><b>RoleAssignment:</b></p> <p>RoleAssignment corresponds to the assignment of a role to a user or a service principal.</p> </p> # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class RoleAssignmentRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'role_definition_id': 'str',
'object_id': 'str',
'object_id_type': 'str',
'tenant_id': 'str'
}
attribute_map = {
'role_definition_id': 'roleDefinitionId',
'object_id': 'objectId',
'object_id_type': 'objectIdType',
'tenant_id': 'tenantId'
}
def __init__(self, role_definition_id=None, object_id=None, object_id_type=None, tenant_id=None): # noqa: E501
"""RoleAssignmentRequest - a model defined in Swagger""" # noqa: E501
self._role_definition_id = None
self._object_id = None
self._object_id_type = None
self._tenant_id = None
self.discriminator = None
self.role_definition_id = role_definition_id
self.object_id = object_id
self.object_id_type = object_id_type
self.tenant_id = tenant_id
@property
def role_definition_id(self):
"""Gets the role_definition_id of this RoleAssignmentRequest. # noqa: E501
Gets or sets roleDefinitionId of the role assignment. # noqa: E501
:return: The role_definition_id of this RoleAssignmentRequest. # noqa: E501
:rtype: str
"""
return self._role_definition_id
@role_definition_id.setter
def role_definition_id(self, role_definition_id):
"""Sets the role_definition_id of this RoleAssignmentRequest.
Gets or sets roleDefinitionId of the role assignment. # noqa: E501
:param role_definition_id: The role_definition_id of this RoleAssignmentRequest. # noqa: E501
:type: str
"""
if role_definition_id is None:
raise ValueError("Invalid value for `role_definition_id`, must not be `None`") # noqa: E501
if role_definition_id is not None and len(role_definition_id) > 200:
raise ValueError("Invalid value for `role_definition_id`, length must be less than or equal to `200`") # noqa: E501
if role_definition_id is not None and len(role_definition_id) < 3:
raise ValueError("Invalid value for `role_definition_id`, length must be greater than or equal to `3`") # noqa: E501
self._role_definition_id = role_definition_id
@property
def object_id(self):
"""Gets the object_id of this RoleAssignmentRequest. # noqa: E501
Gets or sets objectId of the role assignment. # noqa: E501
:return: The object_id of this RoleAssignmentRequest. # noqa: E501
:rtype: str
"""
return self._object_id
@object_id.setter
def object_id(self, object_id):
"""Sets the object_id of this RoleAssignmentRequest.
Gets or sets objectId of the role assignment. # noqa: E501
:param object_id: The object_id of this RoleAssignmentRequest. # noqa: E501
:type: str
"""
if object_id is None:
raise ValueError("Invalid value for `object_id`, must not be `None`") # noqa: E501
if object_id is not None and len(object_id) > 200:
raise ValueError("Invalid value for `object_id`, length must be less than or equal to `200`") # noqa: E501
if object_id is not None and len(object_id) < 3:
raise ValueError("Invalid value for `object_id`, length must be greater than or equal to `3`") # noqa: E501
self._object_id = object_id
@property
def object_id_type(self):
"""Gets the object_id_type of this RoleAssignmentRequest. # noqa: E501
Gets or sets objectIdType of the role assignment. # noqa: E501
:return: The object_id_type of this RoleAssignmentRequest. # noqa: E501
:rtype: str
"""
return self._object_id_type
@object_id_type.setter
def object_id_type(self, object_id_type):
"""Sets the object_id_type of this RoleAssignmentRequest.
Gets or sets objectIdType of the role assignment. # noqa: E501
:param object_id_type: The object_id_type of this RoleAssignmentRequest. # noqa: E501
:type: str
"""
if object_id_type is None:
raise ValueError("Invalid value for `object_id_type`, must not be `None`") # noqa: E501
allowed_values = ["UserId", "ServicePrincipalId"] # noqa: E501
if object_id_type not in allowed_values:
raise ValueError(
"Invalid value for `object_id_type` ({0}), must be one of {1}" # noqa: E501
.format(object_id_type, allowed_values)
)
self._object_id_type = object_id_type
@property
def tenant_id(self):
"""Gets the tenant_id of this RoleAssignmentRequest. # noqa: E501
Gets or sets tenantId of the role assignment. # noqa: E501
:return: The tenant_id of this RoleAssignmentRequest. # noqa: E501
:rtype: str
"""
return self._tenant_id
@tenant_id.setter
def tenant_id(self, tenant_id):
"""Sets the tenant_id of this RoleAssignmentRequest.
Gets or sets tenantId of the role assignment. # noqa: E501
:param tenant_id: The tenant_id of this RoleAssignmentRequest. # noqa: E501
:type: str
"""
if tenant_id is None:
raise ValueError("Invalid value for `tenant_id`, must not be `None`") # noqa: E501
if tenant_id is not None and len(tenant_id) > 200:
raise ValueError("Invalid value for `tenant_id`, length must be less than or equal to `200`") # noqa: E501
if tenant_id is not None and len(tenant_id) < 3:
raise ValueError("Invalid value for `tenant_id`, length must be greater than or equal to `3`") # noqa: E501
self._tenant_id = tenant_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RoleAssignmentRequest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RoleAssignmentRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
0eb8c05f44ce6192a839496e20dd39bbaf464182 | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/artificial/transf_RelativeDifference/trend_Lag1Trend/cycle_5/ar_/test_artificial_1024_RelativeDifference_Lag1Trend_5__100.py | f07285216a3cc7ff2114694287b967c65119eace | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 275 | py | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 5, transform = "RelativeDifference", sigma = 0.0, exog_count = 100, ar_order = 0); | [
"[email protected]"
] | |
24b6342cfd9d6f470842e7b811d8251cdbf6b932 | c85a6d674679780ee510b5c8c3dbcbdecc859f64 | /test/test_alert_config.py | b935b925e2b7e72d3f4f6959ec9d5a61a7aa6c8d | [] | no_license | cbrowet-axway/APIM_sdk | d4f4a124e86a7b2e65d0ef07b54c68e95de68337 | 4f82df67ebe3dd6eae645bab8f86e72c0347ee24 | refs/heads/master | 2020-05-25T13:22:35.802350 | 2020-04-16T09:25:21 | 2020-04-16T09:25:21 | 187,820,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 944 | py | # coding: utf-8
"""
API Manager API v1.3
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.3.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.alert_config import AlertConfig # noqa: E501
from swagger_client.rest import ApiException
class TestAlertConfig(unittest.TestCase):
"""AlertConfig unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAlertConfig(self):
"""Test AlertConfig"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.alert_config.AlertConfig() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
d96ed2ce95e6d3184151b1539a6f3a0eb664c89b | 75d8667735782cd1d0eb4877e52c89da5cd92dde | /nova/api/openstack/compute/floating_ips_bulk.py | 3107887da5317d24b2fbdb3186c0eec49b39a49a | [
"Apache-2.0"
] | permissive | bopopescu/nova-token | ffecfd3ec561936b7d9d7e691bc57383cde05436 | ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2 | refs/heads/master | 2022-11-22T09:53:31.073483 | 2016-05-14T02:47:01 | 2016-05-15T22:02:55 | 282,105,621 | 0 | 0 | Apache-2.0 | 2020-07-24T02:42:19 | 2020-07-24T02:42:18 | null | UTF-8 | Python | false | false | 13,677 | py | begin_unit
comment|'# Copyright 2012 IBM Corp.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
name|'import'
name|'netaddr'
newline|'\n'
name|'import'
name|'six'
newline|'\n'
name|'import'
name|'webob'
op|'.'
name|'exc'
newline|'\n'
nl|'\n'
name|'from'
name|'nova'
op|'.'
name|'api'
op|'.'
name|'openstack'
op|'.'
name|'compute'
op|'.'
name|'schemas'
name|'import'
name|'floating_ips_bulk'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'api'
op|'.'
name|'openstack'
name|'import'
name|'extensions'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'api'
op|'.'
name|'openstack'
name|'import'
name|'wsgi'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'api'
name|'import'
name|'validation'
newline|'\n'
name|'import'
name|'nova'
op|'.'
name|'conf'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'exception'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'i18n'
name|'import'
name|'_'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'objects'
newline|'\n'
nl|'\n'
DECL|variable|CONF
name|'CONF'
op|'='
name|'nova'
op|'.'
name|'conf'
op|'.'
name|'CONF'
newline|'\n'
name|'CONF'
op|'.'
name|'import_opt'
op|'('
string|"'default_floating_pool'"
op|','
string|"'nova.network.floating_ips'"
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|ALIAS
name|'ALIAS'
op|'='
string|"'os-floating-ips-bulk'"
newline|'\n'
DECL|variable|authorize
name|'authorize'
op|'='
name|'extensions'
op|'.'
name|'os_compute_authorizer'
op|'('
name|'ALIAS'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|FloatingIPBulkController
name|'class'
name|'FloatingIPBulkController'
op|'('
name|'wsgi'
op|'.'
name|'Controller'
op|')'
op|':'
newline|'\n'
nl|'\n'
indent|' '
op|'@'
name|'extensions'
op|'.'
name|'expected_errors'
op|'('
number|'404'
op|')'
newline|'\n'
DECL|member|index
name|'def'
name|'index'
op|'('
name|'self'
op|','
name|'req'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Return a list of all floating IPs."""'
newline|'\n'
name|'context'
op|'='
name|'req'
op|'.'
name|'environ'
op|'['
string|"'nova.context'"
op|']'
newline|'\n'
name|'authorize'
op|'('
name|'context'
op|')'
newline|'\n'
nl|'\n'
name|'return'
name|'self'
op|'.'
name|'_get_floating_ip_info'
op|'('
name|'context'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'extensions'
op|'.'
name|'expected_errors'
op|'('
number|'404'
op|')'
newline|'\n'
DECL|member|show
name|'def'
name|'show'
op|'('
name|'self'
op|','
name|'req'
op|','
name|'id'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Return a list of all floating IPs for a given host."""'
newline|'\n'
name|'context'
op|'='
name|'req'
op|'.'
name|'environ'
op|'['
string|"'nova.context'"
op|']'
newline|'\n'
name|'authorize'
op|'('
name|'context'
op|')'
newline|'\n'
nl|'\n'
name|'return'
name|'self'
op|'.'
name|'_get_floating_ip_info'
op|'('
name|'context'
op|','
name|'id'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_get_floating_ip_info
dedent|''
name|'def'
name|'_get_floating_ip_info'
op|'('
name|'self'
op|','
name|'context'
op|','
name|'host'
op|'='
name|'None'
op|')'
op|':'
newline|'\n'
indent|' '
name|'floating_ip_info'
op|'='
op|'{'
string|'"floating_ip_info"'
op|':'
op|'['
op|']'
op|'}'
newline|'\n'
nl|'\n'
name|'if'
name|'host'
name|'is'
name|'None'
op|':'
newline|'\n'
indent|' '
name|'try'
op|':'
newline|'\n'
indent|' '
name|'floating_ips'
op|'='
name|'objects'
op|'.'
name|'FloatingIPList'
op|'.'
name|'get_all'
op|'('
name|'context'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'NoFloatingIpsDefined'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'floating_ip_info'
newline|'\n'
dedent|''
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'try'
op|':'
newline|'\n'
indent|' '
name|'floating_ips'
op|'='
name|'objects'
op|'.'
name|'FloatingIPList'
op|'.'
name|'get_by_host'
op|'('
name|'context'
op|','
nl|'\n'
name|'host'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'FloatingIpNotFoundForHost'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPNotFound'
op|'('
name|'explanation'
op|'='
name|'e'
op|'.'
name|'format_message'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
name|'for'
name|'floating_ip'
name|'in'
name|'floating_ips'
op|':'
newline|'\n'
indent|' '
name|'instance_uuid'
op|'='
name|'None'
newline|'\n'
name|'fixed_ip'
op|'='
name|'None'
newline|'\n'
name|'if'
name|'floating_ip'
op|'.'
name|'fixed_ip'
op|':'
newline|'\n'
indent|' '
name|'instance_uuid'
op|'='
name|'floating_ip'
op|'.'
name|'fixed_ip'
op|'.'
name|'instance_uuid'
newline|'\n'
name|'fixed_ip'
op|'='
name|'str'
op|'('
name|'floating_ip'
op|'.'
name|'fixed_ip'
op|'.'
name|'address'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'result'
op|'='
op|'{'
string|"'address'"
op|':'
name|'str'
op|'('
name|'floating_ip'
op|'.'
name|'address'
op|')'
op|','
nl|'\n'
string|"'pool'"
op|':'
name|'floating_ip'
op|'.'
name|'pool'
op|','
nl|'\n'
string|"'interface'"
op|':'
name|'floating_ip'
op|'.'
name|'interface'
op|','
nl|'\n'
string|"'project_id'"
op|':'
name|'floating_ip'
op|'.'
name|'project_id'
op|','
nl|'\n'
string|"'instance_uuid'"
op|':'
name|'instance_uuid'
op|','
nl|'\n'
string|"'fixed_ip'"
op|':'
name|'fixed_ip'
op|'}'
newline|'\n'
name|'floating_ip_info'
op|'['
string|"'floating_ip_info'"
op|']'
op|'.'
name|'append'
op|'('
name|'result'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'return'
name|'floating_ip_info'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'extensions'
op|'.'
name|'expected_errors'
op|'('
op|'('
number|'400'
op|','
number|'409'
op|')'
op|')'
newline|'\n'
op|'@'
name|'validation'
op|'.'
name|'schema'
op|'('
name|'floating_ips_bulk'
op|'.'
name|'create'
op|')'
newline|'\n'
DECL|member|create
name|'def'
name|'create'
op|'('
name|'self'
op|','
name|'req'
op|','
name|'body'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Bulk create floating IPs."""'
newline|'\n'
name|'context'
op|'='
name|'req'
op|'.'
name|'environ'
op|'['
string|"'nova.context'"
op|']'
newline|'\n'
name|'authorize'
op|'('
name|'context'
op|')'
newline|'\n'
nl|'\n'
name|'params'
op|'='
name|'body'
op|'['
string|"'floating_ips_bulk_create'"
op|']'
newline|'\n'
name|'ip_range'
op|'='
name|'params'
op|'['
string|"'ip_range'"
op|']'
newline|'\n'
nl|'\n'
name|'pool'
op|'='
name|'params'
op|'.'
name|'get'
op|'('
string|"'pool'"
op|','
name|'CONF'
op|'.'
name|'default_floating_pool'
op|')'
newline|'\n'
name|'interface'
op|'='
name|'params'
op|'.'
name|'get'
op|'('
string|"'interface'"
op|','
name|'CONF'
op|'.'
name|'public_interface'
op|')'
newline|'\n'
nl|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'ips'
op|'='
op|'['
name|'objects'
op|'.'
name|'FloatingIPList'
op|'.'
name|'make_ip_info'
op|'('
name|'addr'
op|','
name|'pool'
op|','
name|'interface'
op|')'
nl|'\n'
name|'for'
name|'addr'
name|'in'
name|'self'
op|'.'
name|'_address_to_hosts'
op|'('
name|'ip_range'
op|')'
op|']'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'InvalidInput'
name|'as'
name|'exc'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPBadRequest'
op|'('
name|'explanation'
op|'='
name|'exc'
op|'.'
name|'format_message'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'try'
op|':'
newline|'\n'
indent|' '
name|'objects'
op|'.'
name|'FloatingIPList'
op|'.'
name|'create'
op|'('
name|'context'
op|','
name|'ips'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'FloatingIpExists'
name|'as'
name|'exc'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPConflict'
op|'('
name|'explanation'
op|'='
name|'exc'
op|'.'
name|'format_message'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'return'
op|'{'
string|'"floating_ips_bulk_create"'
op|':'
op|'{'
string|'"ip_range"'
op|':'
name|'ip_range'
op|','
nl|'\n'
string|'"pool"'
op|':'
name|'pool'
op|','
nl|'\n'
string|'"interface"'
op|':'
name|'interface'
op|'}'
op|'}'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'extensions'
op|'.'
name|'expected_errors'
op|'('
op|'('
number|'400'
op|','
number|'404'
op|')'
op|')'
newline|'\n'
op|'@'
name|'validation'
op|'.'
name|'schema'
op|'('
name|'floating_ips_bulk'
op|'.'
name|'delete'
op|')'
newline|'\n'
DECL|member|update
name|'def'
name|'update'
op|'('
name|'self'
op|','
name|'req'
op|','
name|'id'
op|','
name|'body'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Bulk delete floating IPs."""'
newline|'\n'
name|'context'
op|'='
name|'req'
op|'.'
name|'environ'
op|'['
string|"'nova.context'"
op|']'
newline|'\n'
name|'authorize'
op|'('
name|'context'
op|')'
newline|'\n'
nl|'\n'
name|'if'
name|'id'
op|'!='
string|'"delete"'
op|':'
newline|'\n'
indent|' '
name|'msg'
op|'='
name|'_'
op|'('
string|'"Unknown action"'
op|')'
newline|'\n'
name|'raise'
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPNotFound'
op|'('
name|'explanation'
op|'='
name|'msg'
op|')'
newline|'\n'
dedent|''
name|'ip_range'
op|'='
name|'body'
op|'['
string|"'ip_range'"
op|']'
newline|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'ips'
op|'='
op|'('
name|'objects'
op|'.'
name|'FloatingIPList'
op|'.'
name|'make_ip_info'
op|'('
name|'address'
op|','
name|'None'
op|','
name|'None'
op|')'
nl|'\n'
name|'for'
name|'address'
name|'in'
name|'self'
op|'.'
name|'_address_to_hosts'
op|'('
name|'ip_range'
op|')'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'InvalidInput'
name|'as'
name|'exc'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPBadRequest'
op|'('
name|'explanation'
op|'='
name|'exc'
op|'.'
name|'format_message'
op|'('
op|')'
op|')'
newline|'\n'
dedent|''
name|'objects'
op|'.'
name|'FloatingIPList'
op|'.'
name|'destroy'
op|'('
name|'context'
op|','
name|'ips'
op|')'
newline|'\n'
nl|'\n'
name|'return'
op|'{'
string|'"floating_ips_bulk_delete"'
op|':'
name|'ip_range'
op|'}'
newline|'\n'
nl|'\n'
DECL|member|_address_to_hosts
dedent|''
name|'def'
name|'_address_to_hosts'
op|'('
name|'self'
op|','
name|'addresses'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Iterate over hosts within an address range.\n\n If an explicit range specifier is missing, the parameter is\n interpreted as a specific individual address.\n """'
newline|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'return'
op|'['
name|'netaddr'
op|'.'
name|'IPAddress'
op|'('
name|'addresses'
op|')'
op|']'
newline|'\n'
dedent|''
name|'except'
name|'ValueError'
op|':'
newline|'\n'
indent|' '
name|'net'
op|'='
name|'netaddr'
op|'.'
name|'IPNetwork'
op|'('
name|'addresses'
op|')'
newline|'\n'
name|'if'
name|'net'
op|'.'
name|'size'
op|'<'
number|'4'
op|':'
newline|'\n'
indent|' '
name|'reason'
op|'='
name|'_'
op|'('
string|'"/%s should be specified as single address(es) "'
nl|'\n'
string|'"not in cidr format"'
op|')'
op|'%'
name|'net'
op|'.'
name|'prefixlen'
newline|'\n'
name|'raise'
name|'exception'
op|'.'
name|'InvalidInput'
op|'('
name|'reason'
op|'='
name|'reason'
op|')'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'net'
op|'.'
name|'iter_hosts'
op|'('
op|')'
newline|'\n'
dedent|''
dedent|''
name|'except'
name|'netaddr'
op|'.'
name|'AddrFormatError'
name|'as'
name|'exc'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'InvalidInput'
op|'('
name|'reason'
op|'='
name|'six'
op|'.'
name|'text_type'
op|'('
name|'exc'
op|')'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|FloatingIpsBulk
dedent|''
dedent|''
dedent|''
name|'class'
name|'FloatingIpsBulk'
op|'('
name|'extensions'
op|'.'
name|'V21APIExtensionBase'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Bulk handling of Floating IPs."""'
newline|'\n'
nl|'\n'
DECL|variable|name
name|'name'
op|'='
string|'"FloatingIpsBulk"'
newline|'\n'
DECL|variable|alias
name|'alias'
op|'='
name|'ALIAS'
newline|'\n'
DECL|variable|version
name|'version'
op|'='
number|'1'
newline|'\n'
nl|'\n'
DECL|member|get_resources
name|'def'
name|'get_resources'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'resource'
op|'='
op|'['
name|'extensions'
op|'.'
name|'ResourceExtension'
op|'('
name|'ALIAS'
op|','
nl|'\n'
name|'FloatingIPBulkController'
op|'('
op|')'
op|')'
op|']'
newline|'\n'
name|'return'
name|'resource'
newline|'\n'
nl|'\n'
DECL|member|get_controller_extensions
dedent|''
name|'def'
name|'get_controller_extensions'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""It\'s an abstract function V21APIExtensionBase and the extension\n will not be loaded without it.\n """'
newline|'\n'
name|'return'
op|'['
op|']'
newline|'\n'
dedent|''
dedent|''
endmarker|''
end_unit
| [
"[email protected]"
] | |
f12c7e78f6cc76322a20f97f04b8731c60d73ac0 | 5474905a26e356fe2742e62567718173b81b616d | /templates/python.flask/{{cookiecutter.project_safe_name}}/test/test_demo.py | 52e109dcd4a6b06c2716df9480549aeac5797cf5 | [
"MIT"
] | permissive | by46/recipe | 16dd24a8a83f2a00beab84c5b6522c0bff073233 | 203abd2141a536b66b4e57d073169a49395be1f0 | refs/heads/master | 2020-04-13T22:41:27.865516 | 2016-09-09T10:09:20 | 2016-09-09T10:09:20 | 65,368,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 746 | py | import unittest
import app
from app import create_app
class HelloWorldTestCase(unittest.TestCase):
def setUp(self):
self.client = create_app('test').test_client()
def test_hello_world(self):
response = self.client.get('/{{cookiecutter.project_slug}}', follow_redirects=True)
self.assertTrue('The Art of Computer Programming' in response.data)
def test_version(self):
response = self.client.get('/{{cookiecutter.project_slug}}/version', follow_redirects=True)
self.assertTrue(app.__version__ in response.data)
def test_faq(self):
response = self.client.get('/{{cookiecutter.project_slug}}/faq.htm')
self.assertEqual('<!--Newegg-->', response.data)
| [
"[email protected]"
] | |
9106c5d1a7b95165084dd263e4a7421c8030d12e | e95eeb123f3772da8d2dc7677e7afdc1287f1276 | /bot.py | a1f32743e20491dec9bd1bd4b5ce00cf5bfb409e | [
"MIT"
] | permissive | jayrav13/presidency | 3cf880bf51d211f8fb21d5c4bc564f22c2a8ae4f | f18721d5df9af161cc01f503b6657d9b06fea0e9 | refs/heads/master | 2022-09-29T22:53:40.867506 | 2017-03-03T18:57:04 | 2017-03-03T18:57:04 | 72,818,604 | 15 | 2 | MIT | 2022-09-16T17:46:42 | 2016-11-04T05:56:54 | Python | UTF-8 | Python | false | false | 5,501 | py | """
Imports
"""
from presidency.models import *
from lxml import html
import requests
import json
import datetime
from twython import Twython
import os
import time
import sys
"""
Set UTF-8 for everything.
"""
reload(sys)
sys.setdefaultencoding("utf-8")
# Establish Base URL.
base_url = os.environ.get('WHITE_HOUSE_URL') + ""
# Establish all pages to scrape.
pages = {
"/briefing-room/speeches-and-remarks": "Speeches and Remarks",
"/briefing-room/press-briefings": "Press Briefings",
"/briefing-room/statements-and-releases": "Statements and Releases",
"/briefing-room/presidential-actions/executive-orders": "Executive Orders",
"/briefing-room/presidential-actions/presidential-memoranda": "Presidential Memoranda",
"/briefing-room/presidential-actions/proclamations": "Proclamations",
"/briefing-room/presidential-actions/related-omb-material": "Related OMB Material",
# "/briefing-room/pending-legislation": "Pending Legislation",
# "/briefing-room/signed-legislation": "Signed Legislation",
# "/briefing-room/vetoed-legislation": "Vetoed Legislation",
"/briefing-room/statements-administration-policy": "Statements of Administration Policy"
}
# Scrape each page.
for key, value in pages.iteritems():
print("Scanning " + value)
# Make request and transform into tree.
page_url = base_url + key
response = requests.get(page_url)
tree = html.document_fromstring(response.text)
# Deterimine number of total pages.
pagecount = int(tree.xpath('//li[@class="pager-current"]')[0].text_content().split(' of ')[1]) if len(tree.xpath('//li[@class="pager-current"]')) > 0 else 1
# Keep iterating through pages until you reach a page that has been fully scraped. Then stop.
for i in range(0, pagecount):
# Use ?page= parameter to scrape, starting with page 0.
response = requests.get(page_url)
print("PAGE URL: " + page_url)
tree = html.document_fromstring(response.text)
# Build the resulting dictionary objects for each document on that page.
objects = [{
"document_date": x.xpath('div[contains(@class, "views-field-created")]')[0].text_content().strip() if len(x.xpath('div[contains(@class, "views-field-created")]')) > 0 else x.xpath('div')[0].text_content().split(' on ')[1],
"title": x.xpath('div[contains(@class, "views-field-title")]')[0].text_content().strip(),
"uri": x.xpath('div[contains(@class, "views-field-title")]')[0].xpath('h3')[0].xpath('a')[0].attrib['href'].strip(),
"category_slug": key,
"category_name": value,
"full_url": os.environ.get('WHITE_HOUSE_URL') + x.xpath('div[contains(@class, "views-field-title")]')[0].xpath('h3')[0].xpath('a')[0].attrib['href'].strip()
} for x in tree.xpath('//div[contains(@class, "views-row")]')]
# Add url's to object.
for i in range(0, len(objects)):
url = requests.post('https://www.googleapis.com/urlshortener/v1/url?key=' + os.environ.get('GOOGLE_URL_SHORTENER_API_KEY'), json={"longUrl": os.environ.get('WHITE_HOUSE_URL') + objects[i]['uri']})
if url.status_code == 200:
objects[i]['short_url'] = url.json()['id']
else:
objects[i]['short_url'] = objects[i]['short_url']
# Create database objects for all of these.
records = [WhiteHouse(x['title'], x['uri'], x['category_slug'], x['category_name'], x['document_date'], x['full_url'], x['short_url']) for x in objects]
# Track number of records successfully added. Those not added will be duplicates.
record_counter = 0
# Iterate through records.
for x in records:
# Attempt to persist.
try:
db.session.add(x)
db.session.commit()
record_counter = record_counter + 1
print("Added " + x.title + " successfully.")
# Fallback,
except Exception as e:
# Flush old commit that did not persist.
db.session.rollback()
# Try to save an error message.
"""
try:
db.session.add(Error(str(e)))
db.session.commit()
except:
db.session.rollback()
"""
print("Failed to add " + x.title + " successfully: " + str(e))
# If 0 records were added to the database, everything henceforth is old in this topic.
# Break, go to next slug.
pager = tree.xpath('//li[contains(@class, "pager-next")]')
try:
print(pager[0].xpath('a')[0].attrib['href'])
page_url = base_url + pager[0].xpath('a')[0].attrib['href']
except:
pass
# Retrieve all documents in descending order.
documents = WhiteHouse.query.filter_by(is_tweeted=False).order_by(WhiteHouse.document_date.asc())
print("New documents detected: %d" % (documents.count()))
# Set up Twitter bot.
twitter = Twython(
os.environ.get('TWITTER_CONSUMER_KEY'),
os.environ.get('TWITTER_CONSUMER_SECRET'),
os.environ.get('TWITTER_ACCESS_TOKEN'),
os.environ.get('TWITTER_ACCESS_TOKEN_SECRET')
)
# Go through all relevant documents and tweet them out.
for document in documents:
try:
tweet = document.title[0 : 113] + ("..." if len(document.title) > 113 else "") + " " + document.short_url
if os.environ.get('TWEET_ENV') == "TRUE":
try:
twitter.update_status( status=(tweet) )
document.is_tweeted = True
except Exception as e:
"""
db.session.add(Error(str(e)))
db.session.commit()
"""
continue
document.tweet = tweet
print("Tweeted: " + document.tweet)
db.session.add(document)
db.session.commit()
except Exception as e:
"""
try:
db.session.add(Error(str(e)))
db.session.commit()
except:
db.session.rollback()
"""
pass
# Time Delay
if os.environ.get('TWEET_ENV') == "TRUE":
time.sleep(10)
| [
"[email protected]"
] | |
b1897521c7b612921d88df7a303a832036796e83 | 4bd555bc662b8182a2e7644976bfdb00ed5e1ebe | /PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/scene.py | 14aa3e0938459fe4581caf119b859a026e19008b | [] | no_license | fhelmli/homeNOWG2 | a103df1ef97194dec9501dbda87ec1f7c111fb4a | e794fd87b296544542fd9dc7ac94c981c6312419 | refs/heads/master | 2020-04-04T13:40:20.417769 | 2019-01-30T21:41:04 | 2019-01-30T21:41:04 | 155,970,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,230 | py | #import pythonista
#coding: utf-8
from _scene2 import *
import _scene2
from scene_drawing import *
import math
from numbers import Number
from io import BytesIO
import ui
DEFAULT_ORIENTATION = 0
PORTRAIT = 1
LANDSCAPE = 2
BLEND_NORMAL = 0
BLEND_ADD = 1
BLEND_MULTIPLY = 2
from ui import get_screen_size
def run(scene_to_run, orientation=0, frame_interval=1, anti_alias=False, show_fps=False, multi_touch=True):
sv = SceneView()
if orientation == PORTRAIT:
ui_orientations = ['portrait']
elif orientation == LANDSCAPE:
ui_orientations = ['landscape']
else:
ui_orientations = None
sv.anti_alias = anti_alias
sv.frame_interval = frame_interval
sv.multi_touch_enabled = multi_touch
sv.shows_fps = show_fps
sv.scene = scene_to_run
sv.present(orientations=ui_orientations)
def gravity():
g = _scene2.gravity()
return Vector3(g[0], g[1], g[2])
class Touch (object):
def __init__(self, x, y, prev_x, prev_y, touch_id):
self.touch_id = touch_id
self.location = Point(x, y)
self.prev_location = Point(prev_x, prev_y)
self.layer = None
def __eq__(self, other_touch):
if not isinstance(other_touch, Touch):
return False
elif other_touch.touch_id == self.touch_id:
return True
return False
def __hash__(self):
return self.touch_id.__hash__()
class Scene (SceneNode):
def __init__(self, *args, **kwargs):
SceneNode.__init__(self, *args, **kwargs)
self.t = 0.0
self.dt = 0.0
self.root_layer = None
self.touches = {}
self.delayed_invocations = []
w, h = ui.get_screen_size()
self.size = Size(w, h)
self.bounds = Rect(0, 0, w, h)
self.presented_scene = None
self.presenting_scene = None
self.setup_finished = False
def setup(self):
pass
def update(self):
pass
def did_evaluate_actions(self):
pass
def draw(self):
pass
def did_change_size(self):
pass
def stop(self):
pass
def pause(self):
pass
def resume(self):
pass
def touch_began(self, touch):
pass
def touch_moved(self, touch):
pass
def touch_ended(self, touch):
pass
def present_modal_scene(self, other_scene):
if self.presented_scene:
self.dismiss_modal_scene()
other_scene._setup_scene(*self.size)
other_scene._set_size(*self.size)
self.presented_scene = other_scene
other_scene.presenting_scene = self
other_scene.z_position = max(n.z_position for n in self.children) + 1
self.add_child(other_scene)
def dismiss_modal_scene(self):
if self.presented_scene:
self.presented_scene.presenting_scene = None
self.presented_scene.remove_from_parent()
self.presented_scene = None
elif self.presenting_scene:
self.presenting_scene.dismiss_modal_scene()
def add_layer(self, layer):
if self.root_layer is None:
s = self.size
self.root_layer = Layer(Rect(0, 0, s[0], s[1]))
self.root_layer.add_layer(layer)
def delay(self, dt, func):
invocation = { 't': self.t + dt, 'f': func }
self.delayed_invocations.append(invocation)
def _setup_scene(self, width, height):
if hasattr(self, 'setup_finished') and self.setup_finished:
return
self.size = Size(width, height)
self.bounds = Rect(0, 0, width, height)
# Note: Some legacy code relies on not having to call super in __init__, so these are initialized again here...
self.t = 0.0
self.dt = 0.0
self.root_layer = None
self.touches = {}
self.delayed_invocations = []
self.presented_scene = None
self.presenting_scene = None
self.setup()
self.setup_finished = True
def _set_size(self, width, height):
if self.size.w != width or self.size.h != height:
self.size = Size(width, height)
self.bounds = Rect(0, 0, width, height)
self.crop_rect = self.bounds
self.did_change_size()
if self.presented_scene:
self.presented_scene._set_size(width, height)
def should_rotate(self, orientation):
return False
def _process_delayed_invocations(self):
fired_invocations = None
for invocation in self.delayed_invocations:
if invocation['t'] <= self.t:
invocation['f']()
if fired_invocations is None:
fired_invocations = []
fired_invocations.append(invocation)
if fired_invocations is not None:
for invocation in fired_invocations:
self.delayed_invocations.remove(invocation)
def _draw(self, dt):
paused = self.paused
if not paused:
self.dt = dt
self.t += dt
self._process_delayed_invocations()
self.draw()
if not paused:
self.update()
self._update(dt)
if not paused:
self.did_evaluate_actions()
self._render()
if self.presented_scene:
self.presented_scene._draw(dt)
def _stop(self):
self.stop()
def _touch_began(self, x, y, touch_id):
if self.presented_scene:
self.presented_scene._touch_began(x, y, touch_id)
return
touch = Touch(x, y, x, y, touch_id)
if self.root_layer is not None:
hit_layer = self.root_layer._hit_test(Point(x, y))
touch.layer = hit_layer
if hit_layer is not None:
if hasattr(hit_layer, 'touch_began') and callable(hit_layer.touch_began):
hit_layer.touch_began(touch)
self.touches[touch_id] = touch
self.touch_began(touch)
def _touch_moved(self, x, y, prev_x, prev_y, touch_id):
if self.presented_scene:
self.presented_scene._touch_moved(x, y, prev_x, prev_y, touch_id)
return
touch = Touch(x, y, prev_x, prev_y, touch_id)
old_touch = self.touches.get(touch_id, None)
if old_touch is not None:
touch.layer = old_touch.layer
if touch.layer is not None:
if hasattr(touch.layer, 'touch_moved') and callable(touch.layer.touch_moved):
touch.layer.touch_moved(touch)
self.touches[touch_id] = touch
self.touch_moved(touch)
def _touch_ended(self, x, y, touch_id):
if self.presented_scene:
self.presented_scene._touch_ended(x, y, touch_id)
return
touch = Touch(x, y, x, y, touch_id)
old_touch = self.touches.get(touch_id, None)
if old_touch is not None:
del self.touches[touch_id]
touch.layer = old_touch.layer
if touch.layer is not None:
if hasattr(touch.layer, 'touch_ended') and callable(touch.layer.touch_ended):
touch.layer.touch_ended(touch)
self.touch_ended(touch)
class LabelNode (SpriteNode):
def __init__(self, text='', font=('Helvetica', 20), *args, **kwargs):
SpriteNode.__init__(self, *args, **kwargs)
self._suspend_updates = True
self._rendered_text = None
self.text = text
self.font = font
self._suspend_updates = False
self.update_texture()
def __setattr__(self, name, value):
SpriteNode.__setattr__(self, name, value)
if name == 'font':
try:
if len(value) != 2:
raise TypeError('Expected a sequence of font name and size')
if not isinstance(value[0], basestring):
raise TypeError('Font name must be a string')
if not isinstance(value[1], Number):
raise TypeError('Font size must be a number')
except TypeError:
raise TypeError('Expected a sequence of font name and size')
if name == 'font' or (name == 'text' and value != self._rendered_text):
self.update_texture()
def update_texture(self):
if self._suspend_updates:
return
w, h = ui.measure_string(self.text, font=self.font)
with ui.ImageContext(max(w, 1), max(h, 1)) as ctx:
ui.draw_string(self.text, (0, 0, w, h), self.font, color='white')
img = ctx.get_image()
self.texture = Texture(img)
self._rendered_text = self.text
class ShapeNode (SpriteNode):
def __init__(self, path=None, fill_color='white', stroke_color='clear', shadow=None, *args, **kwargs):
SpriteNode.__init__(self, *args, **kwargs)
self._suspend_updates = True
self.path = path
self.line_width = path.line_width
self.fill_color = fill_color
self.stroke_color = stroke_color
self.shadow = shadow
self._suspend_updates = False
self.update_texture()
def __setattr__(self, name, value):
SpriteNode.__setattr__(self, name, value)
if name == 'line_width':
self.path.line_width = value
self.update_texture()
if name in ('path', 'fill_color', 'stroke_color', 'shadow'):
self.update_texture()
def update_texture(self):
if self._suspend_updates or not self.path:
return
if self.shadow:
shadow_color = self.shadow[0]
shadow_offset_x = self.shadow[1]
shadow_offset_y = self.shadow[2]
shadow_radius = self.shadow[3]
else:
shadow_offset_x = 0
shadow_offset_y = 0
shadow_radius = 0
shadow_left = shadow_radius - shadow_offset_x
shadow_right = shadow_radius + shadow_offset_x
shadow_top = shadow_radius - shadow_offset_y
shadow_bottom = shadow_radius + shadow_offset_y
lw = self.path.line_width
path_bounds = self.path.bounds
w = max(1, math.ceil(path_bounds.w + abs(shadow_left) + abs(shadow_right)) + lw)
h = max(1, math.ceil(path_bounds.h + abs(shadow_top) + abs(shadow_bottom)) + lw)
with ui.ImageContext(w, h) as ctx:
ui.concat_ctm(ui.Transform.translation(lw/2 + max(0, shadow_left) - path_bounds.x, lw/2 + max(0, shadow_top) - path_bounds.y))
ui.set_color(self.fill_color)
with ui.GState():
if self.shadow:
ui.set_shadow(shadow_color, shadow_offset_x, shadow_offset_y, shadow_radius)
self.path.fill()
if self.path.line_width > 0:
ui.set_color(self.stroke_color)
self.path.stroke()
img = ctx.get_image()
self.texture = Texture(img)
| [
"[email protected]"
] | |
a2300d6a94ca2cefd91d8d13d10b57d752bcefa4 | 1ade02a8e0c6d7e442c9d9041f15518d22da3923 | /w2/d5/sqlite_db/schema.py | 99c0917574b62199a3263ba8d784e3cfc122ffc9 | [] | no_license | fodisi/ByteAcademy-Bootcamp | 7980b80636a36db6da3e0fc0e529fbc6b8e097e0 | d53e3f4864f6cba1b85e806c29b01c48e3c2e81d | refs/heads/master | 2020-03-19T12:55:31.489638 | 2018-07-25T16:19:19 | 2018-07-25T16:19:19 | 136,550,128 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | #!/usr/bin/env python3
import sqlite3
# create a connection to the database
connection = sqlite3.connect("securities_master.db", check_same_thread=False)
# create a cursor object to represent the "gaze" of the database management system
cursor = connection.cursor()
cursor.execute(
"""CREATE TABLE rippleUSD(
pk INTEGER PRIMARY KEY AUTOINCREMENT,
unix_time FLOAT,
last_price FLOAT,
trade_volume FLOAT
);"""
)
cursor.close()
connection.close()
| [
"[email protected]"
] | |
606c6daa39403e1f7813670974620cd5c5c62c6f | 9c8b45b2b2be2e4c7063675965fa25538114e660 | /namseoul/urls.py | 85b13d914746388b20e042876187df50d8b64b07 | [] | no_license | gloweean/namseoul | 1a8f8b85b7ff4213c078b8e3cca409dfadfac5f4 | 9acc0c3c0e12f61d5ad399c32364bff2d11cbcfb | refs/heads/master | 2020-04-07T01:49:23.669077 | 2018-12-22T05:44:37 | 2018-12-22T05:44:37 | 157,953,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,509 | py | """namseoul URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from message import views
from rest_framework import routers
from rest_framework.authtoken import views as AuthView
from member.views import UserSignUpView, UserRetrieveUpdateDestroyView, UserLogoutView
# ViewSet을 사용할 경우 router를 지정해주어야 한다.
router = routers.DefaultRouter()
router.register(r'message', views.MessageViewSet)
urlpatterns = [
path('admin/', admin.site.urls),
path('', include(router.urls)),
path('signup', UserSignUpView.as_view()),
path('user_info', UserRetrieveUpdateDestroyView.as_view()),
path('login', AuthView.obtain_auth_token), # 이후 요청부터는 Authorization: Token 9944b09199c62bcf9418ad846dd0e4bbdfc6ee4b 형식으로 request header에 넣어서 요청을 보내야 한다.
path('logout', UserLogoutView.as_view()),
]
| [
"[email protected]"
] | |
28a57877699840d447b57131eafedbf97d4ffd13 | 9e15ada895e90d033bc3b65c2666065bddd62605 | /08/8.1.repr_test.py | 06248a91f25680a03388cc1f6d0487d858914dcf | [] | no_license | zhyErick/fengkuang_python | b0f0c78273420fd862691799bfd7e4f1b6eadf80 | 6d50ad3b7d4ae05d06379c2dc87d91081964ec6d | refs/heads/master | 2021-02-14T08:23:26.616211 | 2020-05-06T13:08:07 | 2020-05-06T13:08:07 | 244,788,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | class Apple:
# 实现构造器
def __init__(self, color, weight):
self.color = color
self.weight = weight
# 重写__repr__()方法,用于实现Apple对象的自我描述
def __repr__(self):
return "Apple[color=" + self.color + ", weight=" + str(self.weight) + "]"
a = Apple("红色", 5.68)
print(a) | [
"[email protected]"
] | |
387f78efedf54707074b3d54e433ca863301716b | 9c9701f79c8eeaa05f684442d2d03f7de4bba1f1 | /Korpora/korpus_namuwiki.py | 2e75dd8d2a48c3a78b50a187af42870c394678d6 | [
"CC-BY-4.0"
] | permissive | hank110/Korpora | e54708fe2d7910df4e6ec5cff1cf1ca0696636bf | b0e014f5c8c4ba71aba335285d0be48cbb802a0d | refs/heads/master | 2023-01-10T04:24:14.386097 | 2020-09-21T03:42:25 | 2020-09-21T03:42:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,007 | py | import os
from .korpora import Korpus, SentencePairKorpusData
from .utils import fetch, default_korpora_path, load_wikitext
NAMUWIKI_FETCH_INFORMATION = [
{
'url': 'https://github.com/lovit/namuwikitext/releases/download/v0.1/namuwikitext_20200302.v0.1.train.zip',
'destination': 'namuwikitext/namuwikitext_20200302.train.zip',
'method': 'download & unzip'
},
{
'url': 'https://github.com/lovit/namuwikitext/releases/download/v0.1/namuwikitext_20200302.v0.1.test.zip',
'destination': 'namuwikitext/namuwikitext_20200302.test.zip',
'method': 'download & unzip'
},
{
'url': 'https://github.com/lovit/namuwikitext/releases/download/v0.1/namuwikitext_20200302.v0.1.dev.zip',
'destination': 'namuwikitext/namuwikitext_20200302.dev.zip',
'method': 'download & unzip'
}
]
description = """ Author : Hyunjoong Kim lovit@github
Repository : https://github.com/lovit/namuwikitext
References :
나무위키의 덤프 데이터를 바탕을 제작한 wikitext 형식의 텍스트 파일입니다.
학습 및 평가를 위하여 위키페이지 별로 train (99%), dev (0.5%), test (0.5%) 로 나뉘어져있습니다.
"""
license = " CC BY-NC-SA 2.0 KR which Namuwiki dump dataset is licensed"
class NamuwikiTextKorpusData(SentencePairKorpusData):
"""
Args:
description (str) : data description
texts (list of str) : namuwiki contents including '\n'
pairs (list of str) : title
"""
def __init__(self, description, texts, pairs):
super().__init__(description, texts, pairs)
class NamuwikiTextKorpus(Korpus):
def __init__(self, root_dir=None, force_download=False):
super().__init__(description, license)
if root_dir is None:
root_dir = default_korpora_path
fetch_namuwikitext(root_dir, force_download)
for information in NAMUWIKI_FETCH_INFORMATION:
destination = information['destination']
local_path = os.path.join(os.path.abspath(root_dir), destination[:-4])
if 'train' in destination:
response = input(
'NamuwikiText.train text file is large (5.3G).'
'If you want to load text in your memory, please insert `yes`').lower()
if (len(response) == 1 and response == 'y') or (response == 'yes'):
texts, titles = self.load(local_path)
self.train = NamuwikiTextKorpusData(description, texts, titles)
else:
dirname = os.path.abspath(f'{root_dir}/namuwikitext')
self.train = f'Namuwikitext corpus is downloaded. Open local directory {dirname}'
print('Continue to load `dev` and `test`')
continue
texts, titles = self.load(local_path)
if 'dev' in destination:
self.dev = NamuwikiTextKorpusData(description, texts, titles)
elif 'test' in destination:
self.test = NamuwikiTextKorpusData(description, texts, titles)
else:
raise ValueError(f'Check local files')
def load(self, path):
def split_title_text(wikitext):
lines = wikitext.split('\n')
title = lines[0]
text = '\n'.join([line.strip() for line in lines[2:] if line.strip()])
return title, text
wikitexts = load_wikitext(path)
wikitexts = [split_title_text(wikitext) for wikitext in wikitexts]
titles, texts = zip(*wikitexts)
# swap position
return texts, titles
def fetch_namuwikitext(root_dir, force_download):
for information in NAMUWIKI_FETCH_INFORMATION:
url = information['url']
destination = information['destination']
local_path = os.path.join(os.path.abspath(root_dir), destination)
fetch(url, local_path, 'namuwikitext', force_download, information['method'])
| [
"[email protected]"
] | |
d60e972614e566bef7cbc20eb726db3227df9346 | d41d18d3ea6edd2ec478b500386375a8693f1392 | /plotly/validators/pointcloud/_y.py | dfc46fe42092562d7111c7b05c2ec21d2a386694 | [
"MIT"
] | permissive | miladrux/plotly.py | 38921dd6618650d03be9891d6078e771ffccc99a | dbb79e43e2cc6c5762251537d24bad1dab930fff | refs/heads/master | 2020-03-27T01:46:57.497871 | 2018-08-20T22:37:38 | 2018-08-20T22:37:38 | 145,742,203 | 1 | 0 | MIT | 2018-08-22T17:37:07 | 2018-08-22T17:37:07 | null | UTF-8 | Python | false | false | 400 | py | import _plotly_utils.basevalidators
class YValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name='y', parent_name='pointcloud', **kwargs):
super(YValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='calc+clearAxisTypes',
role='data',
**kwargs
)
| [
"[email protected]"
] | |
7a218a01ecbfc594cc00ff334d30ebe2489e5c13 | c324a6d923bae3a00bd1dc69a43d0e5c707a104a | /addons-vauxoo/hr_expense_replenishment/__openerp__.py | f75805678aedf47bc322db43b9213897c5e35bdc | [] | no_license | meswapnilwagh/odoo-adr | 5c593c2240d23b79811ccd7b5297b634e5ffe19d | 442c8d5fa52cab30028a26dd93bd8eae88d58fed | refs/heads/master | 2020-01-27T10:03:27.142715 | 2015-09-04T14:36:59 | 2015-09-04T14:36:59 | 50,238,226 | 0 | 4 | null | 2016-01-23T12:53:28 | 2016-01-23T12:53:25 | null | UTF-8 | Python | false | false | 2,537 | py | # -*- encoding: utf-8 -*-
###############################################################################
# Module Writen to OpenERP, Open Source Management Solution
# Copyright (C) OpenERP Venezuela (<http://openerp.com.ve>).
# All Rights Reserved
############# Credits #########################################################
# Coded by: Katherine Zaoral <[email protected]>
# Planified by: Humberto Arocha <[email protected]>
# Audited by: Humberto Arocha <[email protected]>
###############################################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
{
"name": "Expenses Replenishment",
"version": "0.1",
"author": "Vauxoo",
"category": "HR Module",
"description": """
Expenses Replenishment
======================
This module add the functionality to the HR Expense module to manage deductible
expenses by using invoices asociated to an expense document. Also make an
automation of the reconciliation process for the expense and the employee
payment.
Dependencies information
------------------------
- You can download the *account_invoice_line_currency* module from::
bzr branch lp:addons-vauxoo/7.0
""",
"website": "http://openerp.com.ve",
"license": "",
"depends": [
"hr_expense",
"account_invoice_line_currency",
"hr_expense_analytic",
"account_move_report"
],
"demo": [],
"data": [
"security/hr_security.xml",
"wizard/hr_expense_wizard_view.xml",
"view/account_invoice_view.xml",
"view/hr_expense_view.xml",
"workflow/workflow.xml"
],
"test": [],
"js": [],
"css": [],
"qweb": [],
"installable": True,
"auto_install": False,
"active": False
} | [
"[email protected]"
] | |
b51ac12b70717c54b15760648a95a50bb8013523 | b36c065d9fe10a6a9bf42415f3a716565ba26756 | /old_code/basicdatas/dicts.py | e4c860a52e2b343ae12c7d32c9bedfb1cc78cc21 | [] | no_license | fanghongbin/nmc_met_class | a447255ce43b2b8f33ee2db584e55483ce68d82c | b59e5ab68c47d83c70c0d7081ca23dce72bf8c75 | refs/heads/master | 2022-02-13T05:25:40.201333 | 2019-05-09T06:54:58 | 2019-05-09T06:54:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,942 | py | #!/usr/bin/python3.6
# -*- coding:UTF-8 -*-
import mymethods.str_finder as finder
gds_station_data_element_name_id = {
"经度":1,
"纬度":2,
"测站高度":3,
"测站级别(short)":4,
"测站类型(short)":5,
"气压传感器海拔高度":6,
"温湿传感器离地面高度":7,
"温湿传感器距水面高度":8,
"风速传感器距地面高度":9,
"风传感器距甲板平台高度":10,
"风速传感器距水面高度":11,
"移动平台移动方向":12,
"移动平台移动速度":13,
"海盐传感器距海面深度":14,
"浪高传感器距海面高度":15,
"浮标方位":16,
"总水深":17,
"海面/水面以下深度":18,
"船面距海面高度":19,
"方位或方位角":20,
"字符型站名":21,
"风向":201,
"风速":203,
"1分钟平均风向":205,
"1分钟平均风速":207,
"2分钟平均风向":209,
"2分钟平均风速":211,
"10分钟平均风向":213,
"10分钟平均风速":215,
"最大风速的风向":217,
"最大风速":219,
"瞬时风向":221,
"瞬时风速":223,
"极大风速的风向":225,
"极大风速":227,
"过去6小时极大瞬时风速的风向":229,
"过去6小时极大瞬时风速":231,
"过去12小时极大瞬时风速的风向":233,
"过去12小时极大瞬时风速":235,
"风力(short)":237,
"海平面气压":401,
"3小时变压":403,
"24小时变压":405,
"本站气压":407,
"最高气压":409,
"最低气压":411,
"气压":413,
"日平均气压":415,
"日平均海平面气压":417,
"高度(探空)":419,
"位势高度(探空)":421,
"温度":601,
"最高气温":603,
"最低气温":605,
"24小时变温":607,
"过去24小时最高气温":609,
"过去24小时最低气温":611,
"日平均气温":613,
"露点温度":801,
"温度露点差":803,
"相对湿度":805,
"最小相对湿度":807,
"日平均相对湿度":809,
"水汽压":811,
"日平均水汽压":813,
"降水量":1001,
"1小时降水":1003,
"3小时降水":1005,
"6小时降水":1007,
"12小时降水":1009,
"24小时降水":1011,
"日总降水":1013,
"20-08时降水量":1015,
"08-20时降水量":1017,
"20-20时降水量":1019,
"08-08时降水量":1021,
"蒸发":1023,
"蒸发(大型)":1025,
"可降水分(预报降水量)":1027,
"1分钟平均水平能见度":1201,
"10分钟平均水平能见度":1203,
"最小水平能见度":1205,
"水平能见度(人工)":1207,
"总云量":1401,
"低云量":1403,
"云底高度":1405,
"低云状(short)":1407,
"中云状(short)":1409,
"高云状(short)":1411,
"日平均总云量":1413,
"日平均低云量":1415,
"云量(低云或中云)":1417,
"云类型(short)":1419,
"现在天气(short)":1601,
"过去天气1(short)":1603,
"过去天气2(short)":1605,
"龙卷类型(short)":1801,
"龙卷所在方位(short)":1803,
"最大冰雹直径":1805,
"雷暴(short)":1807,
"电流强度(闪电定位)":1809,
"地面温度":2001,
"最高地面温度":2003,
"最低地面温度":2005,
"过去12小时最低地面温度":2007,
"5cm地温":2009,
"10cm地温":2011,
"15cm地温":2013,
"20cm地温":2015,
"40cm地温":2017,
"80cm地温":2019,
"160cm地温":2021,
"320cm地温":2023,
"草面(雪面)温度":2025,
"草面(雪面)最高温度":2027,
"草面(雪面)最低温度":2029,
"日平均地面温度":2031,
"日平均5cm地温":2033,
"日平均10cm地温":2035,
"日平均15cm地温":2037,
"日平均20cm地温":2039,
"日平均40cm地温":2041,
"日平均80cm地温":2043,
"日平均160cm地温":2045,
"日平均320cm地温":2047,
"日平均草面(雪面)温度":2049,
"地面状态(short)":2201,
"积雪深度":2203,
"雪压":2205,
"电线积冰直径":2207,
"电线积冰-现象(short)":2209,
"电线积冰-南北方向直径":2211,
"电线积冰-南北方向厚度":2213,
"电线积冰-南北方向重量":2215,
"电线积冰-东西方向直径":2217,
"电线积冰-东西方向厚度":2219,
"电线积冰-东西方向重量":2221,
"船上结冰原因(short)":2223,
"船上结冰厚度":2225,
"船上结冰速度(short)":2227,
"海冰密集度(short)":2229,
"冰情发展(short)":2231,
"冰总量和类型(short)":2233,
"冰缘方位":2235,
"冰情(short)":2237,
"最高气压出现时间":10001,
"最低气压出现时间":10003,
"最高气温出现时间":10005,
"最低气温出现时间":10007,
"最小相对湿度出现时间":10009,
"最大风速出现时间":10011,
"极大风速出现时间":10013,
"最高地面温度出现时间":10015,
"最低地面温度出现时间":10017,
"草面(雪面)最低温度出现时间":10019,
"草面(雪面)最高温度出现时间":10021,
"最小水平能见度出现时间":10023,
"天气出现时间":10025,
"海表最高温度出现时间":10027,
"海表最低温度出现时间":10029,
"最大波高出现时间":10031,
"风速表类型":2401,
"湿球温度测量方法":2403,
"海面温度测量方法":2405,
"洋流测量方法":2407,
"气压倾向特征":2409,
"海面温度":2601,
"湿球温度":2603,
"海面盐度":2605,
"海表最高温度":2607,
"海表最低温度":2609,
"海水温度":2611,
"海水盐度":2613,
"海面海流方向":2801,
"海面海流速度":2803,
"洋流方向和速度的平均周期(short)":2805,
"表层海洋面流速":2807,
"表层海洋面波向":2809,
"海流方向":2811,
"海流速度":2813,
"波浪方向":3001,
"波浪周期":3003,
"波浪高度":3005,
"风浪方向":3007,
"风浪周期":3009,
"风浪高度":3011,
"第一涌浪方向":3013,
"第一涌浪周期":3015,
"第一涌浪高度":3017,
"第二涌浪方向":3019,
"第二涌浪周期":3021,
"第二涌浪高度":3023,
"有效波高":3025,
"有效波高的周期":3027,
"平均波高":3029,
"平均波周期":3031,
"最大波高":3033,
"最大波高的周期":3035,
"人工测量浪高":3037,
"仪器测量浪高":3039,
"浪级代码(short)":3041
}
gds_station_data_element_id_name = dict(zip(gds_station_data_element_name_id.values(),gds_station_data_element_name_id.keys()))
def gds_station_data_element_id_finder(input_strs):
ele_names = finder.muti_strs_finder(input_strs,gds_station_data_element_name_id)
names_ids = {}
for names in ele_names:
names_ids[names] = gds_station_data_element_name_id[names]
print(names + " : " + str(names_ids[names]))
return names_ids
class m1_value_column:
站号 = 0
经度 = 1
纬度 = 2
拔海高度 = 3
站点级别 = 4
总云量 =5
风向 = 6
风速 = 7
气压 = 8
小时变压 = 9
过去天气1 = 10
过去天气2 =11
降水6小时 =12
低云状 =13
低云量 =14
低云高 =15
露点 =16
能见度 =17
现在天气 =18
温度 =19
中云状 =20
高云状 =21
标志1 =22
标志2 =23
日变温 = 24
日变压 =25
class m2_value_column:
站号 = 0
经度 = 1
纬度 = 2
拔海高度 = 3
位势高度 = 4
温度 = 5
温度露点差 = 6
风向 = 7
风速 = 8
class m8_value_column:
站号 = 0
经度 = 1
纬度 = 2
拔海高度 = 3
天气现象1 = 4
风向1 = 5
风速1 = 6
最低温度 = 7
最高温度 = 8
天气现象2 = 9
风向2 = 10
风速2 = 11
| [
"[email protected]"
] | |
1e14a12fb0353af32a9218ab79645ee9b390dfb1 | 51554f9c49231e4a0c7a0356456050e927ce2884 | /accounts/views.py | 901b9709e55d1d06d857e863436a139628cc653d | [
"Apache-2.0"
] | permissive | geoffreynyaga/ANGA-UTM | 10a2958e172faad66e414b561ec035a2162571e7 | 68d3033529490d3fb57ac727c8c2a2f77fcffae6 | refs/heads/master | 2022-12-09T18:30:25.622423 | 2022-01-10T18:07:29 | 2022-01-10T18:07:29 | 232,053,896 | 8 | 3 | Apache-2.0 | 2022-11-22T03:59:59 | 2020-01-06T08:10:06 | JavaScript | UTF-8 | Python | false | false | 4,576 | py | from django.shortcuts import render
from django.contrib.auth import login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.models import User
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.exceptions import PermissionDenied
from django.urls import reverse_lazy
from django.forms.models import inlineformset_factory
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.views import generic
from flight_plans.models import FlightLog
from rpas.models import Rpas
from . import (
forms,
) # TODO: where is this needed? see line below and resolve to use just one in this doc
from .forms import UserForm
from .models import UserProfile
# Create your views here.
class LoginView(generic.FormView):
form_class = AuthenticationForm
success_url = reverse_lazy("view_airspace")
template_name = "accounts/login.html"
def get_form(self, form_class=None):
if form_class is None:
form_class = self.get_form_class()
return form_class(self.request, **self.get_form_kwargs())
def form_valid(self, form):
login(self.request, form.get_user())
return super().form_valid(form)
def logout_view(request):
logout(request)
return HttpResponseRedirect("/account/login")
class SignUp(generic.CreateView):
form_class = forms.UserCreateForm
success_url = reverse_lazy("login")
template_name = "accounts/signup.html"
@login_required() # only logged in users should access this
def edit_user(request, pk):
# querying the User object with pk from url
user = User.objects.get(pk=pk)
# prepopulate UserProfileForm with retrieved user values from above.
user_form = UserForm(instance=user)
# The sorcery begins from here, see explanation below
ProfileInlineFormset = inlineformset_factory(
User,
UserProfile,
fields=(
"phone_number",
"organization",
"bio",
"profile_pic",
"location",
"birth_date",
),
)
formset = ProfileInlineFormset(instance=user)
if request.user.is_authenticated and request.user.id == user.id:
if request.method == "POST":
user_form = UserForm(request.POST, request.FILES, instance=user)
formset = ProfileInlineFormset(request.POST, request.FILES, instance=user)
if user_form.is_valid():
created_user = user_form.save(commit=False)
formset = ProfileInlineFormset(
request.POST, request.FILES, instance=created_user
)
if formset.is_valid():
created_user.save()
formset.save()
# return HttpResponseRedirect('/account/profile/')
return HttpResponseRedirect(
reverse("accounts:view_profile", args=(user.id,))
)
return render(
request,
"accounts/edit_profile.html",
{"noodle": pk, "noodle_form": user_form, "formset": formset},
)
else:
raise PermissionDenied
# class view_profile(generic.TemplateView):
# template_name = "accounts/profile.html"
# # model = UserProfile
#
# def get(self, request):
# myrpas = Rpas.objects.filter(organization = request.user.userprofile.organization)
# myflightlogs = FlightLog.objects.filter(user = request.user)
# args = {'myrpas': myrpas, 'myflightlogs':myflightlogs}
# return render(request, self.template_name ,args)
class ViewProfile(LoginRequiredMixin, generic.DetailView):
template_name = "accounts/profile.html"
model = UserProfile
def get_context_data(self, *args, **kwargs):
context = super(ViewProfile, self).get_context_data(**kwargs)
pk = self.kwargs["pk"]
thisuser = User.objects.get(pk=pk)
org = thisuser.userprofile.organization
context["myrpas"] = Rpas.objects.filter(organization=org)
context["myflightlogs"] = FlightLog.objects.filter(user=thisuser)
return context
def error_404(request, exception):
data = {}
return render(request, "errors/404.html", data)
def error_500(request):
data = {}
return render(request, "errors/500.html", data)
| [
"[email protected]"
] | |
23dc496b373f870ec52009d414579d71d99fa082 | 8807958eab34f289cc8b1b07e180af757bde7124 | /design2/test_LineClassifier.py | 75cb3a10fcec6095c64c62655aa304d8f43531da | [
"BSD-2-Clause"
] | permissive | davidjamesbeck/IJAL-interlinear | 4f34cbb8626403f7bc52db96f0349d10ca2ce674 | cb5dbb1d6aea98cce76668aa868a9189f31baf3f | refs/heads/master | 2020-03-30T11:00:46.001171 | 2018-10-01T13:50:02 | 2018-10-01T13:50:02 | 151,148,840 | 0 | 0 | BSD-2-Clause | 2018-10-01T19:45:38 | 2018-10-01T19:45:37 | null | UTF-8 | Python | false | false | 3,028 | py | import re
import sys
import unittest
from Line import *
from LineClassifier import *
import importlib
pd.set_option('display.width', 1000)
import pdb
def runTests():
test_recognizeDegenerateLine()
test_recognizeCanonicalLine()
test_recognizeWordsAsElementsLine()
test_MonkeyAndThunder_allLinesRecognized()
test_LOKONO_allLinesRecognized()
def test_recognizeDegenerateLine():
"""
MonkeyAndThunder starts off with a few introductory lines in Spanish, with English translation.
No words, no glosses, just a line with time slots, and one child
"""
print("--- test_recognizeDegenerateLine")
filename = "../testData/monkeyAndThunder/AYA1_MonkeyandThunder.eaf"
xmlDoc = etree.parse(filename)
x0 = Line(xmlDoc, 0)
assert(x0.getTierCount() == 2)
classifier = LineClassifier(x0.getTable())
assert(classifier.run() == "DegenerateLine")
def test_recognizeCanonicalLine():
"""
MonkeyAndThunder line 6 fits the canonical form:
1) a time line
"""
print("--- test_recognizeCanonicalLine")
filename = "../testData/monkeyAndThunder/AYA1_MonkeyandThunder.eaf"
xmlDoc = etree.parse(filename)
x = Line(xmlDoc, 6)
assert(x.getTierCount() == 4)
classifier = LineClassifier(x.getTable())
assert(classifier.run() == "CanonicalLine")
def test_recognizeWordsAsElementsLine():
"""
LOKONO has the canonical spokenText tier, its translation, but each word in the
spokenText is its own element, each with two children: morpheme and gloss
"""
print("--- test_recognizeWordsAsElementsLine")
filename = "../testData/LOKONO_IJAL_2.eaf"
xmlDoc = etree.parse(filename)
x = Line(xmlDoc, 1)
# print(x.getTable())
assert(x.getTierCount() == 20)
classifier = LineClassifier(x.getTable())
assert(classifier.run() == "WordsAsElementsLine")
def test_MonkeyAndThunder_allLinesRecognized():
print("--- test_MonkeyAndThunder_allLinesRecognized")
filename = "../testData/monkeyAndThunder/AYA1_MonkeyandThunder.eaf"
xmlDoc = etree.parse(filename)
lineCount = len(xmlDoc.findall("TIER/ANNOTATION/ALIGNABLE_ANNOTATION"))
assert(lineCount == 41)
for i in range(lineCount):
x = Line(xmlDoc, i)
classifier = LineClassifier(x.getTable())
classification = classifier.run()
#print("%d: %s" % (i, classification))
assert(classification in ["DegenerateLine", "CanonicalLine"])
def test_LOKONO_allLinesRecognized():
print("--- test_LOKONO_allLinesRecognized")
filename = "../testData/LOKONO_IJAL_2.eaf"
xmlDoc = etree.parse(filename)
lineCount = len(xmlDoc.findall("TIER/ANNOTATION/ALIGNABLE_ANNOTATION"))
assert(lineCount == 344)
for i in range(lineCount):
x = Line(xmlDoc, i)
classifier = LineClassifier(x.getTable())
classification = classifier.run()
#print("%d: %s" % (i, classification))
assert(classification in ["WordsAsElementsLine"])
#x = Line(xmlDoc, 28)
#x.getTable()
| [
"[email protected]"
] | |
f3400af8343d019f8d5a1257bf176ef20d2d7882 | 97d85e2958de5b413202f89154f564f7c8994b83 | /springmesh/render/__init__.py | 8b04ed234af4d9c161276e2d0f67a9b01ee98f84 | [] | no_license | afcarl/pyspringmesh | ea25a943bf1e7384e888c5dc51386a03c5c9435f | 08da6bf9ca3a989829e07a190b9a34c487b0a0d3 | refs/heads/master | 2020-03-16T23:25:49.064570 | 2016-08-23T19:45:58 | 2016-08-23T19:45:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60 | py | #!/usr/bin/env python
from . import mpl
__all__ = ['mpl']
| [
"[email protected]"
] | |
2674d077dcd3e48cf5445537f600e6171777c48d | 3f7c4de996894d83f0e999ab9e60302be5ab195f | /tests/test_fleet_telematics_api.py | 08355ed115f5e5d2e7b808128cec81a2981e98ee | [
"MIT"
] | permissive | tungson-pm/HerePy | 3f18ffddd181434c63f94abe67844c0fcb02747d | a9e2797f251ff157cf89cfae7c1605833bfee75f | refs/heads/master | 2022-12-25T06:08:21.880054 | 2020-10-05T19:54:51 | 2020-10-05T19:54:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,376 | py | #!/usr/bin/env python
import os
import time
import unittest
import json
import responses
import herepy
class FleetTelematicsApiTest(unittest.TestCase):
def setUp(self):
api = herepy.FleetTelematicsApi('api_key')
self._api = api
def test_initiation(self):
self.assertIsInstance(self._api, herepy.FleetTelematicsApi)
self.assertEqual(self._api._api_key, 'api_key')
self.assertEqual(self._api._base_url, 'https://wse.ls.hereapi.com/2/')
@responses.activate
def test_find_sequence_whensucceed(self):
with open('testdata/models/fleet_telematics_find_sequence.json', 'r') as f:
expected_response = f.read()
responses.add(responses.GET, 'https://wse.ls.hereapi.com/2/findsequence.json',
expected_response, status=200)
start = str.format('{0};{1},{2}', 'WiesbadenCentralStation', 50.0715, 8.2434)
intermediate_destinations = [str.format('{0};{1},{2}', 'FranfurtCentralStation', 50.1073, 8.6647),
str.format('{0};{1},{2}', 'DarmstadtCentralStation', 49.8728, 8.6326),
str.format('{0};{1},{2}', 'FrankfurtAirport', 50.0505, 8.5698)]
end = str.format('{0};{1},{2}', 'MainzCentralStation', 50.0021, 8.259)
modes = [herepy.RouteMode.fastest, herepy.RouteMode.car, herepy.RouteMode.traffic_enabled]
response = self._api.find_sequence(start=start,
departure='2014-12-09T09:30:00%2b01:00',
intermediate_destinations=intermediate_destinations,
end=end,
modes=modes)
self.assertTrue(response)
self.assertIsInstance(response, herepy.WaypointSequenceResponse)
@responses.activate
def test_find_sequence_whenerroroccured(self):
with open('testdata/models/fleet_telematics_unauthorized_error.json', 'r') as f:
expected_response = f.read()
responses.add(responses.GET, 'https://wse.ls.hereapi.com/2/findsequence.json',
expected_response, status=200)
start = str.format('{0};{1},{2}', 'WiesbadenCentralStation', 50.0715, 8.2434)
intermediate_destinations = [str.format('{0};{1},{2}', 'FranfurtCentralStation', 50.1073, 8.6647),
str.format('{0};{1},{2}', 'DarmstadtCentralStation', 49.8728, 8.6326),
str.format('{0};{1},{2}', 'FrankfurtAirport', 50.0505, 8.5698)]
end = str.format('{0};{1},{2}', 'MainzCentralStation', 50.0021, 8.259)
modes = [herepy.RouteMode.fastest, herepy.RouteMode.car, herepy.RouteMode.traffic_enabled]
with self.assertRaises(herepy.HEREError):
self._api.find_sequence(start=start,
departure='2014-12-09T09:30:00%2b01:00',
intermediate_destinations=intermediate_destinations,
end=end,
modes=modes)
@responses.activate
def test_find_pickups_whensucceed(self):
with open('testdata/models/fleet_telematics_find_pickups.json', 'r') as f:
expected_response = f.read()
responses.add(responses.GET, 'https://wse.ls.hereapi.com/2/findpickups.json',
expected_response, status=200)
modes = [herepy.RouteMode.fastest, herepy.RouteMode.car, herepy.RouteMode.traffic_enabled]
start = str.format('{0},{1};{2}:{3},value:{4}', 50.115620,
8.631210, herepy.MultiplePickupOfferType.pickup.__str__(),
'GRAPEFRUITS', 1000)
departure = '2016-10-14T07:30:00+02:00'
capacity = 10000
vehicle_cost = 0.29
driver_cost = 20
max_detour = 60
rest_times = 'disabled'
intermediate_destinations = [str.format('{0},{1};{2}:{3},value:{4}', 50.118578,
8.636551, herepy.MultiplePickupOfferType.drop.__str__(),
'APPLES', 30),
str.format('{0},{1};{2}:{3}', 50.122540, 8.631070,
herepy.MultiplePickupOfferType.pickup.__str__(), 'BANANAS')]
end = str.format('{1},{2}', 'MainzCentralStation', 50.132540, 8.649280)
response = self._api.find_pickups(modes=modes,
start=start,
departure=departure,
capacity=capacity,
vehicle_cost=vehicle_cost,
driver_cost=driver_cost,
max_detour=max_detour,
rest_times=rest_times,
intermediate_destinations=intermediate_destinations,
end=end)
self.assertTrue(response)
self.assertIsInstance(response, herepy.WaypointSequenceResponse)
@responses.activate
def test_find_pickups_whenerroroccured(self):
with open('testdata/models/fleet_telematics_unauthorized_error.json', 'r') as f:
expected_response = f.read()
responses.add(responses.GET, 'https://wse.ls.hereapi.com/2/findpickups.json',
expected_response, status=200)
modes = [herepy.RouteMode.fastest, herepy.RouteMode.car, herepy.RouteMode.traffic_enabled]
start = str.format('{0},{1};{2}:{3},value:{4}', 50.115620,
8.631210, herepy.MultiplePickupOfferType.pickup.__str__(),
'GRAPEFRUITS', 1000)
departure = '2016-10-14T07:30:00+02:00'
capacity = 10000
vehicle_cost = 0.29
driver_cost = 20
max_detour = 60
rest_times = 'disabled'
intermediate_destinations = [str.format('{0},{1};{2}:{3},value:{4}', 50.118578,
8.636551, herepy.MultiplePickupOfferType.drop.__str__(),
'APPLES', 30),
str.format('{0},{1};{2}:{3}', 50.122540, 8.631070,
herepy.MultiplePickupOfferType.pickup.__str__(), 'BANANAS')]
end = str.format('{1},{2}', 'MainzCentralStation', 50.132540, 8.649280)
with self.assertRaises(herepy.HEREError):
self._api.find_pickups(modes=modes,
start=start,
departure=departure,
capacity=capacity,
vehicle_cost=vehicle_cost,
driver_cost=driver_cost,
max_detour=max_detour,
rest_times=rest_times,
intermediate_destinations=intermediate_destinations,
end=end)
| [
"[email protected]"
] | |
a85cc8cf2b49e89ca79b5d93c0af0d7e1dcec4ee | c55083d8a23a9d093b677066a5a827634c09357b | /chstrings/__init__.py | 39796432eff779705b6f260f03ae6661e1d07d2b | [
"MIT"
] | permissive | earwig/citationhunt | 211a44c7bdb67e675872ca44aeae982d33fcf359 | b6084d2958989c9082db7a8d4556a4e51b78bdb3 | refs/heads/master | 2021-01-15T16:11:11.563650 | 2016-07-21T11:08:43 | 2016-07-21T11:08:43 | 62,332,946 | 1 | 0 | null | 2016-06-30T18:16:46 | 2016-06-30T18:16:46 | null | UTF-8 | Python | false | false | 1,902 | py | import flask
import os
import json
def _preprocess_variables(config, strings):
in_page_link = flask.Markup(
'<a target="_blank" href=%s>%s</a>')
strings['in_page'] = \
flask.Markup(strings['in_page']) % in_page_link
if config.lead_section_policy_link:
lead_section_policy_link = flask.Markup(
'<a target="_blank" href=%s>%s</a>') % (
config.lead_section_policy_link,
config.lead_section_policy_link_title)
strings['lead_section_hint'] = \
flask.Markup(strings['lead_section_hint']) % \
lead_section_policy_link
else:
strings['lead_section_hint'] = ''
beginners_hint_link = flask.Markup(
'<a target="_blank" href=%s>%s</a>') % (
config.beginners_link,
config.beginners_link_title)
strings['beginners_hint'] = \
flask.Markup(strings['beginners_hint']) % beginners_hint_link
if '404' not in config.flagged_off:
page_not_found_link = flask.Markup('<a href=%s>Citation Hunt</a>') % (
config.lang_code)
strings['page_not_found_text'] = \
flask.Markup(strings['page_not_found_text']) % page_not_found_link
strings.setdefault('instructions_goal', '')
strings.setdefault('instructions_details', '')
if strings['instructions_details']:
strings['instructions_details'] = flask.Markup(
strings['instructions_details']) % (
flask.Markup('<b>' + strings['button_wikilink'] + '</b>'),
flask.Markup('<b>' + strings['button_next'] + '</b>'),
beginners_hint_link)
return strings
def get_localized_strings(config, lang_code):
strings_dir = os.path.dirname(__file__)
strings = json.load(file(os.path.join(strings_dir, lang_code + '.json')))
return _preprocess_variables(config, strings)
| [
"[email protected]"
] | |
7f2bc13f3b49ac4bb99cd8a03c9d886de3c9552c | a59d55ecf9054d0750168d3ca9cc62a0f2b28b95 | /.install/.backup/platform/gsutil/gslib/help_provider.py | adf4c90d50cad5e50dfe990e242fb236c5bc9fdd | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | bopopescu/google-cloud-sdk | bb2746ff020c87271398196f21a646d9d8689348 | b34e6a18f1e89673508166acce816111c3421e4b | refs/heads/master | 2022-11-26T07:33:32.877033 | 2014-06-29T20:43:23 | 2014-06-29T20:43:23 | 282,306,367 | 0 | 0 | NOASSERTION | 2020-07-24T20:04:47 | 2020-07-24T20:04:46 | null | UTF-8 | Python | false | false | 3,604 | py | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gslib.exception import CommandException
class HelpType(object):
COMMAND_HELP = 'command_help'
ADDITIONAL_HELP = 'additional_help'
ALL_HELP_TYPES = [HelpType.COMMAND_HELP, HelpType.ADDITIONAL_HELP]
# help_spec key constants.
HELP_NAME = 'help_name'
HELP_NAME_ALIASES = 'help_name_aliases'
HELP_TYPE = 'help_type'
HELP_ONE_LINE_SUMMARY = 'help_one_line_summary'
HELP_TEXT = 'help_text'
SUBCOMMAND_HELP_TEXT = 'subcommand_help_text'
# Constants enforced by SanityCheck
MAX_HELP_NAME_LEN = 15
MIN_ONE_LINE_SUMMARY_LEN = 10
MAX_ONE_LINE_SUMMARY_LEN = 80 - MAX_HELP_NAME_LEN
REQUIRED_SPEC_KEYS = [HELP_NAME, HELP_NAME_ALIASES, HELP_TYPE,
HELP_ONE_LINE_SUMMARY, HELP_TEXT]
DESCRIPTION_PREFIX = """
<B>DESCRIPTION</B>"""
SYNOPSIS_PREFIX = """
<B>SYNOPSIS</B>"""
class HelpProvider(object):
"""Interface for providing help."""
# Each subclass must define the following map.
help_spec = {
# Name of command or auxiliary help info for which this help applies.
HELP_NAME : None,
# List of help name aliases.
HELP_NAME_ALIASES : None,
# HelpType.
HELP_TYPE : None,
# One line summary of this help.
HELP_ONE_LINE_SUMMARY : None,
# The full help text.
HELP_TEXT : None,
}
# This is a static helper instead of a class method because the help loader
# (gslib.commands.help._LoadHelpMaps()) operates on classes not instances.
def SanityCheck(help_provider, help_name_map):
"""Helper for checking that a HelpProvider has minimally adequate content."""
for k in REQUIRED_SPEC_KEYS:
if k not in help_provider.help_spec or help_provider.help_spec[k] is None:
raise CommandException('"%s" help implementation is missing %s '
'specification' % (help_provider.help_name, k))
# Sanity check the content.
assert (len(help_provider.help_spec[HELP_NAME]) > 1
and len(help_provider.help_spec[HELP_NAME]) < MAX_HELP_NAME_LEN)
for hna in help_provider.help_spec[HELP_NAME_ALIASES]:
assert len(hna) > 0
one_line_summary_len = len(help_provider.help_spec[HELP_ONE_LINE_SUMMARY])
assert (one_line_summary_len > MIN_ONE_LINE_SUMMARY_LEN
and one_line_summary_len < MAX_ONE_LINE_SUMMARY_LEN)
assert len(help_provider.help_spec[HELP_TEXT]) > 10
# Ensure there are no dupe help names or aliases across commands.
name_check_list = [help_provider.help_spec[HELP_NAME]]
name_check_list.extend(help_provider.help_spec[HELP_NAME_ALIASES])
for name_or_alias in name_check_list:
if help_name_map.has_key(name_or_alias):
raise CommandException(
'Duplicate help name/alias "%s" found while loading help from %s. '
'That name/alias was already taken by %s' % (name_or_alias,
help_provider.__module__, help_name_map[name_or_alias].__module__))
def CreateHelpText(synopsis, description):
"""Helper for adding help text headers given synopsis and description."""
return SYNOPSIS_PREFIX + synopsis + DESCRIPTION_PREFIX + description
| [
"[email protected]"
] | |
48e2d7509c3ff795db0dc5f1698c5858c5e81c7b | ccbfc7818c0b75929a1dfae41dc061d5e0b78519 | /aliyun-openapi-python-sdk-master/aliyun-python-sdk-cs/aliyunsdkcs/request/v20151215/DescribeAgilityTunnelAgentInfoRequest.py | 37758b4a66546675df1e6431acbd485dd825bb85 | [
"Apache-2.0"
] | permissive | P79N6A/dysms_python | 44b634ffb2856b81d5f79f65889bfd5232a9b546 | f44877b35817e103eed469a637813efffa1be3e4 | refs/heads/master | 2020-04-28T15:25:00.368913 | 2019-03-13T07:52:34 | 2019-03-13T07:52:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,210 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
class DescribeAgilityTunnelAgentInfoRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'CS', '2015-12-15', 'DescribeAgilityTunnelAgentInfo')
self.set_uri_pattern('/agility/[Token]/agent_info')
self.set_method('GET')
def get_Token(self):
return self.get_path_params().get('Token')
def set_Token(self,Token):
self.add_path_param('Token',Token) | [
"[email protected]"
] | |
9e86b3518912ee7ce4ce5497fb45ab9c6eb765ab | 295ecf4f254c42e9201657ef0a13ec2c68c40c9b | /info/views.py | 6a2850c2b723ff267061ff6b95988447a8586342 | [] | no_license | zwolf21/StockAdmin-pre2 | 0236061284a6fe8801591608591d21129d4ea7c0 | b21d069ff215c17ce3bca040ecf9b8f48b452ed4 | refs/heads/master | 2021-05-01T09:28:59.818469 | 2016-11-30T17:33:30 | 2016-11-30T17:33:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,923 | py | from django.shortcuts import render, render_to_response
from django.core.urlresolvers import reverse_lazy
from django.views.generic.edit import FormView
from django.views.generic import ListView, DetailView, CreateView, TemplateView
from django.conf import settings
from django.db.models import Q
import os, sys
from .models import Info
from .forms import XlFileForm
from .modules.utils import xlDB2DicIter, is_xlfile
from django.utils import simplejson
from django.http import HttpResponse
# Create your views here.
class DrugInfoFromXlFile(FormView):
form_class = XlFileForm
template_name = 'info/get_xlfile_form.html'
def form_valid(self, form):
recreate = form.cleaned_data['recreate']
xlfile = self.request.FILES['xlfile']
if not is_xlfile(xlfile.name):
context = {
'error_message': '파일 형식이 일치하지 않습니다',
'file_name' : xlfile.name
}
return render_to_response('info/update_failure.html', context)
temp_file = os.path.join(settings.MEDIA_ROOT,'temp.xls')
with open(temp_file, 'wb') as fp:
fp.write(xlfile.read())
di_table = xlDB2DicIter(temp_file)
os.remove(temp_file)
src_field_set = set(di_table[0])
essential_field_set = {'약품코드','EDI코드','약품명(한글)','제약회사명','일반단가','수가명','규격단위'}
if not essential_field_set < src_field_set:
context = {
'error_message' : '엑셀파일에 지정된 필수 컬럼(열) 항목이 없습니다',
'essential_fields' : essential_field_set,
'missing_fields' : essential_field_set - src_field_set,
'input_file_fields': src_field_set,
'file_name' : xlfile.name
}
return render_to_response('info/update_failure.html', context)
if recreate:
Info.objects.all().delete()
context = {
'success_count' : 0,
'failure_count' : 0,
'failures' : [],
'why' : ''
}
success_count = 0
for row in di_table:
try:
Info.objects.create(
edi = int(row['EDI코드']),
code = row['약품코드'],
name = row['약품명(한글)'],
name_as = row['수가명'],
firm = row['제약회사명'],
price = row['일반단가'],
pkg_amount = row.get('포장단위') or 1,
standard_unit = row['규격단위'],
narcotic_class = int(row.get('약품법적구분') or 0)
)
except:
exception = {}
type_err, val_err, trcbk = sys.exc_info()
context['failures'].append({
'error_type': type_err.__name__,
'error_value': val_err,
'error_drug_name': row.get('약품명(한글)','약품명 미지정'),
'error_drug_code': row.get('약품코드','약품코드 미지정')
})
context['failure_count']+=1
else:
context['success_count']+=1
context['total_count'] = context['failure_count']+context['success_count']
return render_to_response('info/update_result.html', context)
class IndexTV(TemplateView):
template_name = "info/drug_info.html"
| [
"[email protected]"
] | |
50f14085ebf1fa050502627f08de7bacfbbf9444 | 74c04ef3ed2bc71e728b3bb840c927a86352c6e1 | /djangotesting/jango/resturant/forms.py | 226cb77012fce96d306543ca927164a3764be1ac | [] | no_license | zamanehsani/restaurant | 06b658b277dda8fa8d4f5b598d389767ab61f876 | 0f21ce268fdc21402c32dee1ecc64850a24fcc2a | refs/heads/main | 2023-01-12T04:52:09.541112 | 2020-11-16T05:44:04 | 2020-11-16T05:44:04 | 313,192,805 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 860 | py | from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from resturant.models import Profile
class UserRegisterForm(UserCreationForm):
email = forms.EmailField()
first_name = forms.CharField(max_length=150)
last_name = forms.CharField(max_length=150)
class Meta:
model = User
fields =['first_name','last_name','username', 'email', 'password1', 'password2']
class UserUpdateForm(forms.ModelForm):
# email = forms.EmailField()
# first_name = forms.CharField(max_length=150)
# last_name = forms.CharField(max_length=150)
class Meta:
model = User
fields =['first_name','last_name','username', 'email']
class UserProfileUpdateForm(forms.ModelForm):
class Meta:
model = Profile
fields =['image', 'gender'] | [
"[email protected]"
] | |
d27d3af2bdcba02d17e1eab4c19c711a2074e5b4 | e45d2faad9389886a82ff5176853b1ff6e37caae | /argparse/055_argparse_add_argument_nargs_questionMark.py | c28aa7baa511d22b3e9de4adc7c5adf3ead24488 | [] | no_license | allenmo/python_study | 6320aa4cd80fe46ccf73076015c67bdcb6338d30 | 7aff5d810ca6e791d62235d57c072a8dc14457ca | refs/heads/master | 2021-03-24T12:00:33.079530 | 2016-11-22T23:35:58 | 2016-11-22T23:35:58 | 55,770,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--foo', nargs='?', const='c', default='d')
parser.add_argument('bar', nargs='?', default='d')
print parser.parse_args('XX --foo YY'.split())
print parser.parse_args('XX --foo'.split())
print parser.parse_args(''.split())
| [
"[email protected]"
] | |
1a3e9cae56843f2b9167840ccb12e915ec8f7161 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /SqbyWYwqChQroXfhu_10.py | ada02ff817a159ee3621064c542db76a15142950 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 159 | py |
def lower_triang(arr):
for i in range(len(arr)):
for j in range(len(arr[0])):
if j > i:
arr[i][j] = 0
return arr
| [
"[email protected]"
] | |
78ad653bda4fe0bb743b72f0f9938f240fc28738 | 1929ce01411908ebe5f04f9db4ae1c7afef085e1 | /home/migrations/0002_load_initial_data.py | ab7698763aa5dbf140368dc4b936b332fafab746 | [] | no_license | crowdbotics-apps/wislister-15050 | 81ad2bc3b804de790adb0606c0902915269c4990 | 2e7d08e9359d011448187a428a90ef21638ade5f | refs/heads/master | 2022-12-10T08:08:17.564321 | 2020-03-25T06:57:42 | 2020-03-25T06:57:42 | 249,908,377 | 0 | 0 | null | 2022-12-08T06:06:15 | 2020-03-25T06:57:27 | Python | UTF-8 | Python | false | false | 1,290 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "wislister"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">wislister</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "wislister-15050.botics.co"
site_params = {
"name": "wislister",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"[email protected]"
] | |
2d5e608579841a44031e64c373d497d78288e98e | 80f2fa4f1f4d56eef9471174f80b62838db9fc3b | /xdl/xdl/python/backend/mxnet/convert_utils.py | 1182f929bb080337b4f5368b36c8a279477309a7 | [
"Apache-2.0"
] | permissive | laozhuang727/x-deeplearning | a54f2fef1794274cbcd6fc55680ea19760d38f8a | 781545783a4e2bbbda48fc64318fb2c6d8bbb3cc | refs/heads/master | 2020-05-09T17:06:00.495080 | 2019-08-15T01:45:40 | 2019-08-15T01:45:40 | 181,295,053 | 1 | 0 | Apache-2.0 | 2019-08-15T01:45:41 | 2019-04-14T10:51:53 | PureBasic | UTF-8 | Python | false | false | 3,191 | py | # Copyright 2018 Alibaba Group. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import mxnet as mx
import numpy as np
from mxnet.initializer import One
from mxnet.initializer import Zero
from mxnet.initializer import Constant
from xdl.python.lib.datatype import DataType as xt
from xdl.python.lib.tensorshape import TensorShape as xts
class MX2XDL(object):
@staticmethod
def convert_shape(shape):
return xts(list(shape))
@staticmethod
def convert_type(dtype):
if dtype == np.int16:
return xt.int16
if dtype == np.int32:
return xt.int32
elif dtype == np.int64:
return xt.int64
elif dtype == np.float32 or dtype is None:
return xt.float
elif dtype == np.float64:
return xt.double
else:
raise Exception("unsupported datatype:", dtype)
@staticmethod
def convert_initializer(initializer, args):
import xdl.python.ops.init_ops as xi
if initializer is None or initializer == '':
return xi.Zeros()
elif initializer == 'one':
return xi.Ones()
elif initializer == 'zero':
return xi.Zeros()
elif initializer == 'constant' or initializer == 'Constant':
return xi.Constant(value=args['value'])
elif initializer == 'uniform':
scale = 0.07
if args.has_key('scale'):
scale = args['scale']
return xi.UniformUnitScaling(factor=scale)
elif initializer == 'normal':
sigma = 0.01
if args.has_key('sigma'):
sigma = args['sigma']
return xi.TruncatedNormal(stddev=sigma)
elif initializer == 'identity':
param = []
if args.has_key('init_value'):
param = args['init_value']
return xi.Identity(np.array(param, dtype=np.float32))
else:
raise Exception('unsupport mxnet initializer:' + initializer)
class XDL2MX(object):
@staticmethod
def convert_type(dtype):
if dtype == xt.int16:
return 'int16'
elif dtype == xt.int32:
return 'int32'
elif dtype == xt.int64:
return 'int64'
elif dtype == xt.float:
return 'float32'
elif dtype == xt.double:
return 'float64'
else:
raise Exception("unsupported datatype:", dtype)
| [
"[email protected]"
] | |
8470b45483b504a9ac0a11ddad19a85fd67badf5 | fcde32709c62b8ee86da459bb7c8eee52c848118 | /爬虫1905/day07/07_maoyanspider.py | a58245ebcd950ce02c7237e81b5a8a7f1daa3da5 | [] | no_license | klaus2015/py_base | 6b92d362c3d7dc0e09205a037f4d580381dac94d | ec32c731c1c2f6a0dab87f1d167397e4fa86b8de | refs/heads/master | 2022-07-28T15:49:30.383648 | 2020-05-11T15:31:43 | 2020-05-11T15:31:43 | 261,777,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 619 | py | from selenium import webdriver
url = 'https://maoyan.com/board/4'
browser = webdriver.Chrome()
browser.get(url)
# 基准xpath: [<selenium xxx li at xxx>,<selenium xxx li at>]
li_list = browser.find_elements_by_xpath('//*[@id="app"]/div/div/div[1]/dl/dd')
for li in li_list:
item = {}
# info_list: ['1', '霸王别姬', '主演:张国荣', '上映时间:1993-01-01', '9.5']
info_list = li.text.split('\n')
item['number'] = info_list[0]
item['name'] = info_list[1]
item['star'] = info_list[2]
item['time'] = info_list[3]
item['score'] = info_list[4]
print(item)
| [
"[email protected]"
] | |
852490d729b985e69d687a1f0ed8e5043d18b59a | bc539788b876773e294383863252c1637de9eb7f | /scrapy/PycharmProjects/Reptile/automation_Testing/pageobjects/primeur.py | b5765198c8c00001d1762535bb5b9cbf98cb42fa | [] | no_license | umsung/scrapy | 4eb56bf74f3e617e49dcdec61cf77010eb912f4f | deacd9f289159c5af114b0dd3110448ad7eb43e8 | refs/heads/master | 2020-05-31T14:11:46.530793 | 2019-10-16T01:32:25 | 2019-10-16T01:32:25 | 190,321,772 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,548 | py | from framework.base_page import BasePage
import time
from selenium.webdriver.common.by import By
import random
from framework.logger import *
logger = Logger('Primeur').getlog()
class Primeur(BasePage):
navbox = (By.XPATH, "//*[@class='navbox']/a[4]")
gopage = (By.XPATH, '//*[@id="gopage"]')
qj_sort = (By.XPATH, "//*[@class='qj-sort']/ul/li[9]")
addExpectcart = (By.CLASS_NAME, "addExpectcart")
lazy = (By.CLASS_NAME, "lazy")
pay_btn = (By.XPATH, '//*[@id="Pay"]')
addcart_detail = (By.XPATH, "//*[@id='AddCart']")
priceS = (By.XPATH, '//*[@class="price-v"]')
ReceiveEmail = (By.XPATH, "//*[@id='ReceiveEmail']")
checkexpect = (By.XPATH, "//*[@id='checkexpect']")
num = (By.XPATH, '//*[@class="order-amount"]')
spanPay = (By.XPATH, '//*[@id="spanPay"]')
btnToPay = (By.XPATH, "//*[@id='btnToPay']")
btn_payment = (By.XPATH, "//*[@class='btn-payment']")
pay_zfb = (By.XPATH, "//*[@class='pay-zfb']")
pay_cft = (By.XPATH, "//*[@class='pay-cft']")
pri_input = (By.XPATH, "//*[@class='st-out']/input[1]")
pri_submit = (By.XPATH, "//*[@class='st-out']/input[2]")
def primeur_buy(self, email):
self.find_element(*self.navbox).click()
self.expected_conditions(self.gopage)
self.find_element(*self.qj_sort).click()
time.sleep(1.5)
self.find_element(*self.qj_sort).click()
time.sleep(1.5)
self.find_elements(*self.addExpectcart)[random.randint(0, 49)].click()
time.sleep(1)
self.find_elements(*self.lazy)[random.randint(0, 49)].click()
self.switch_to(1)
self.expected_conditions(self.addcart_detail).click()
price = self.expected_conditions(self.priceS).text
price = price.replace('¥', '')
self.expected_conditions(self.pay_btn).click()
self.expected_conditions(self.ReceiveEmail).clear()
time.sleep(0.5)
self.find_element(*self.ReceiveEmail).send_keys(email)
self.expected_conditions(self.checkexpect).click()
order_amount = self.expected_conditions(self.num).text
t_price = self.expected_conditions(self.spanPay).text
t_price = t_price.replace(',', '')
if float(price) * int(order_amount) == float(t_price):
logger.info('期酒价格正确:{}'.format(price))
else:
logger.info('期酒价格错误:{}'.format(price))
self.get_windows_img()
self.expected_conditions(self.btnToPay).click()
time.sleep(1)
# 默认微信支付方式
self.expected_conditions(self.btn_payment).click()
time.sleep(1)
self.back()
# 支付宝支付
self.expected_conditions(self.pay_zfb).click()
self.expected_conditions(self.btn_payment).click()
time.sleep(1)
self.back()
# 财付通支付
self.expected_conditions(self.pay_cft).click()
self.expected_conditions(self.btn_payment).click()
time.sleep(1)
self.close()
self.switch_to(0)
def primeur_search(self, text):
self.expected_conditions(self.pri_input).send_keys(text)
self.expected_conditions(self.pri_submit).click()
self.move_to(*self.navbox)
self.find_elements(By.XPATH, "//*[@class='extend-qj']/dl//a")[random.randint(0, 7)].click()
self.expected_conditions((By.XPATH, "//*[@class='qj-sort']/ul/li[9]")).click() | [
"[email protected]"
] | |
9caa0a55e144b6e14c9cb2a644b72b93caec68d8 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02407/s588681694.py | 198ea6576032a9e20876cd8043d50eef73ac6a9b | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | num = int(input())
data = list(map(int, input().split()))
lengh = len(data)
for tmp in range(lengh):
if tmp == lengh-1:
print(data.pop(-1))
else:
print(data.pop(-1), end=" ")
| [
"[email protected]"
] | |
c9b64c4bfdc00788592a94875e52019ca0453b03 | 2451f5297cdad588f5c1450336bf4de7cd38ebd8 | /hotline/styles/__init__.py | a1af3599f2045b6c43336f947b7e68c0dffaddfa | [
"MIT"
] | permissive | danbradham/hotline | 45aebfa2e3ef53b5782dfcd006351daeed8b45ac | 267037d2b783f2fd5ed9ad16afaad9a51e821a5f | refs/heads/main | 2021-12-15T02:05:22.577499 | 2021-07-03T13:12:11 | 2021-07-03T13:12:11 | 11,076,114 | 16 | 3 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | # -*- coding: utf-8 -*-
import os
import sys
from glob import glob
this_module = sys.modules[__name__]
this_package = os.path.dirname(__file__)
for file in glob(os.path.join(this_package, '*.css')):
with open(file, 'r') as f:
data = f.read()
style_name = os.path.basename(file).split('.')[0]
setattr(this_module, style_name, data)
| [
"[email protected]"
] | |
b1159fafc24bc513627ba31c35d9f0208fb1d6a7 | d2a818967193f8f7f9e980ef5ba7decea6cb1065 | /L1Trigger/L1TMuonEndCap/python/fakeEmtfParams_2017_MC_cff.py | 3343034d1d16d8aaf7170908508f36e31701472f | [
"Apache-2.0"
] | permissive | panoskatsoulis/cmssw | 1f5bfc6664856032db6609fad1b793d63b31afa6 | 5e32e53f9a775ea197e83fdb1462f99d4c9cb1a9 | refs/heads/l1t-integration-CMSSW_9_2_8 | 2022-08-14T15:22:14.881299 | 2017-09-01T06:17:20 | 2017-09-01T06:17:20 | 102,378,833 | 0 | 1 | null | 2021-02-12T12:09:50 | 2017-09-04T16:01:18 | C++ | UTF-8 | Python | false | false | 1,459 | py | import FWCore.ParameterSet.Config as cms
## Fills CondFormats from the database
from CondCore.CondDB.CondDB_cfi import CondDB
CondDB.connect = cms.string("frontier://FrontierProd/CMS_CONDITIONS")
## Fills firmware, pT LUT, and PC LUT versions manually
emtfParamsSource = cms.ESSource(
"EmptyESSource",
recordName = cms.string('L1TMuonEndcapParamsRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
emtfParams = cms.ESProducer(
"L1TMuonEndCapParamsESProducer",
## Version 7 was deployed June 8, 2017
PtAssignVersion = cms.int32(7),
## 123456 is default (most up-to-date) firmware version
FirmwareVersion = cms.int32(123456),
## v1 corresponds to data/emtf_luts/ph_lut_v2, used at the beginning of 2017
PrimConvVersion = cms.int32(1)
)
## Fills pT LUT XMLs ("forests") from the database
emtfForestsSource = cms.ESSource(
"EmptyESSource",
recordName = cms.string('L1TMuonEndCapForestRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
emtfForestsDB = cms.ESSource(
"PoolDBESSource",
CondDB,
toGet = cms.VPSet(
cms.PSet(
## https://cms-conddb.cern.ch/cmsDbBrowser/search/Prod/L1TMuonEndCapForest
record = cms.string("L1TMuonEndCapForestRcd"),
## v7 EMTF pT LUTs from June 8, 2017
tag = cms.string("L1TMuonEndCapForest_static_Sq_20170613_v7_mc")
)
)
)
| [
"[email protected]"
] | |
ce5951450a4cd865151ae115b4118a18ee34d959 | d7d53826ab804a3d0f229b0a189f2626d4ebe99b | /platforms/renren/renren_python/__init__.py | ab12380bfd9707774f6d9c67043474655e297150 | [] | no_license | zbcbcbc/xiaomaifeng | 6e299e7f1d13dbca95af7a1e46d66dd0d1c86b08 | 91b7da9404678227d3c2c4a446777be6dacdedb7 | refs/heads/master | 2020-12-02T16:58:26.661967 | 2016-09-04T17:53:51 | 2016-09-04T17:53:51 | 67,359,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | # -*- coding: utf-8 -*-
from renren import *
from config import * | [
"[email protected]"
] | |
0849a8f3ae4967918963852dc4c3560d04704001 | 6d97e875fb6a3dea9780d918efe33dfd59ac137d | /python/analysisSettings.py | a55ccd8f080ca88a450c60fb86a02defc6c6a6ce | [] | no_license | acarvalh/tth-htt | 0a1350efcf76f425057c809f74d92ae3d719d008 | c6bb3f2bfb6620c858d29c800be1ae1e2246904a | refs/heads/master | 2021-06-20T05:29:35.657498 | 2018-06-02T01:34:37 | 2018-06-02T01:34:37 | 104,874,635 | 0 | 0 | null | 2017-09-26T11:10:10 | 2017-09-26T11:10:10 | null | UTF-8 | Python | false | false | 9,242 | py | # Integrated luminosity
# Reproduced https://github.com/HEP-KBFI/tth-nanoAOD/blob/4564463eed45251a4fd274ed754b1a37bae8e98e/test/datasets_data_2017_v2.txt#L33
# Official figures: https://hypernews.cern.ch/HyperNews/CMS/get/luminosity/761/1.html
lumi_2017 = 41.529e+3 # 1/pb (uncertaintiy: 2.3%)
# Systematic uncertainties
class systematics(object):
# Basic definitions
central = [ "central" ]
JES = [ "CMS_ttHl_JESUp", "CMS_ttHl_JESDown" ]
JER = [ "CMS_ttHl_JERUp", "CMS_ttHl_JERDown" ]
UnclusteredEn = [ "CMS_ttHl_UnclusteredEnUp", "CMS_ttHl_UnclusteredEnDown" ]
tauES = [ "CMS_ttHl_tauESUp", "CMS_ttHl_tauESDown" ]
class LHE(object):
class TTH(object):
x1 = [ "CMS_ttHl_thu_shape_ttH_x1Up", "CMS_ttHl_thu_shape_ttH_x1Down" ]
y1 = [ "CMS_ttHl_thu_shape_ttH_y1Up", "CMS_ttHl_thu_shape_ttH_y1Down" ]
class TTW(object):
x1 = [ "CMS_ttHl_thu_shape_ttW_x1Up", "CMS_ttHl_thu_shape_ttW_x1Down" ]
y1 = [ "CMS_ttHl_thu_shape_ttW_y1Up", "CMS_ttHl_thu_shape_ttW_y1Down" ]
class TTZ(object):
x1 = [ "CMS_ttHl_thu_shape_ttZ_x1Up", "CMS_ttHl_thu_shape_ttZ_x1Down" ]
y1 = [ "CMS_ttHl_thu_shape_ttZ_y1Up", "CMS_ttHl_thu_shape_ttZ_y1Down" ]
ttH = TTH().x1 + TTH.y1
ttW = TTW().x1 + TTW.y1
ttZ = TTZ().x1 + TTZ.y1
full = ttH + ttW + ttZ
class Btag(object):
HF = [ "CMS_ttHl_btag_HFUp", "CMS_ttHl_btag_HFDown" ]
HFStats1 = [ "CMS_ttHl_btag_HFStats1Up", "CMS_ttHl_btag_HFStats1Down" ]
HFStats2 = [ "CMS_ttHl_btag_HFStats2Up", "CMS_ttHl_btag_HFStats2Down" ]
LF = [ "CMS_ttHl_btag_LFUp", "CMS_ttHl_btag_LFDown" ]
LFStats1 = [ "CMS_ttHl_btag_LFStats1Up", "CMS_ttHl_btag_LFStats1Down" ]
LFStats2 = [ "CMS_ttHl_btag_LFStats2Up", "CMS_ttHl_btag_LFStats2Down" ]
cErr1 = [ "CMS_ttHl_btag_cErr1Up", "CMS_ttHl_btag_cErr1Down" ]
cErr2 = [ "CMS_ttHl_btag_cErr2Up", "CMS_ttHl_btag_cErr2Down" ]
full = HF + HFStats1 + HFStats2 + LF + LFStats1 + LFStats2 + cErr1 + cErr2
class FakeRate_e_shape(object):
pt = [ "CMS_ttHl_FRe_shape_ptUp", "CMS_ttHl_FRe_shape_ptDown" ]
eta = [ "CMS_ttHl_FRe_shape_etaUp", "CMS_ttHl_FRe_shape_etaDown" ]
eta_barrel = [ "CMS_ttHl_FRe_shape_eta_barrelUp", "CMS_ttHl_FRe_shape_eta_barrelDown" ]
full = pt + eta + eta_barrel
class FakeRate_m_shape(object):
pt = [ "CMS_ttHl_FRm_shape_ptUp", "CMS_ttHl_FRm_shape_ptDown" ]
eta = [ "CMS_ttHl_FRm_shape_etaUp", "CMS_ttHl_FRm_shape_etaDown" ]
full = pt + eta
class FakeRate_t(object):
jt_norm = [ "CMS_ttHl_FRjt_normUp", "CMS_ttHl_FRjt_normDown" ]
jt_shape = [ "CMS_ttHl_FRjt_shapeUp", "CMS_ttHl_FRjt_shapeDown" ]
et_shift = [ "CMS_ttHl_FRet_shiftUp", "CMS_ttHl_FRet_shiftDown" ]
mt_shift = [ "CMS_ttHl_FRmt_shiftUp", "CMS_ttHl_FRmt_shiftDown" ]
full = jt_norm + jt_shape + et_shift + mt_shift
class Electron_energy(object):
ER = [ "CMS_ttHl_electronERUp", "CMS_ttHl_electronERDown" ]
ESEndcap = [ "CMS_ttHl_electronESEndcapUp", "CMS_ttHl_electronESEndcapDown" ]
ESBarrel = [ "CMS_ttHl_electronESBarrelUp", "CMS_ttHl_electronESBarrelDown" ]
full = ER + ESEndcap + ESBarrel
class Muon_energy(object):
ER = [ "CMS_ttHl_muonERUp", "CMS_ttHl_muonERDown" ]
ESBarrel1 = [ "CMS_ttHl_muonESBarrel1Up", "CMS_ttHl_muonESBarrel1Down" ]
ESBarrel2 = [ "CMS_ttHl_muonESBarrel2Up", "CMS_ttHl_muonESBarrel2Down" ]
ESEndcap1 = [ "CMS_ttHl_muonESEndcap1Up", "CMS_ttHl_muonESEndcap1Down" ]
ESEndcap2 = [ "CMS_ttHl_muonESEndcap2Up", "CMS_ttHl_muonESEndcap2Down" ]
full = ER + ESBarrel1 + ESBarrel2 + ESEndcap1 + ESEndcap2
lhe = LHE()
btag = Btag()
FRe_shape = FakeRate_e_shape()
FRm_shape = FakeRate_m_shape()
FR_t = FakeRate_t()
electron_E = Electron_energy()
muon_E = Muon_energy()
# Analysis-specific definitions
an_leptonFR = central + JES + JER + UnclusteredEn
an_jetToTauFR = central + JES + tauES
an_addMEM = central + JES + JER + tauES + UnclusteredEn
an_chargeFlip_e = central + electron_E.full
an_chargeFlip_mu = central + muon_E.full
an_ctrl = central + JES + lhe.full
an_common = central + JES + tauES + btag.full + FR_t.full + lhe.full
# CV: enable the CMS_ttHl_FRe_shape and CMS_ttHl_FRm_shape only if you plan to run compShapeSyst 1!
an_extended = an_common + FRe_shape.full + FRm_shape.full
class Triggers(object):
def __init__(self, era):
if era == "2017":
self.triggers_analysis = {
'3mu' : {
'HLT_TripleMu_12_10_5',
},
'1e2mu' : {
# 'HLT_DiMu9_Ele9_CaloIdL_TrackIdL', # prescale of 2
'HLT_DiMu9_Ele9_CaloIdL_TrackIdL_DZ', # unprescaled
},
'2e1mu' : {
'HLT_Mu8_DiEle12_CaloIdL_TrackIdL',
},
'3e' : {
'HLT_Ele16_Ele12_Ele8_CaloIdL_TrackIdL', # has PU dependence
},
'2mu' : {
# 'HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL', # heavily prescaled throughout 2017 data-taking period
'HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ', # unprescaled in 2017B; heavily prescaled since 2017C
'HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ_Mass3p8', # introduced in 2017C
},
'1e1mu' : {
'HLT_Mu23_TrkIsoVVL_Ele12_CaloIdL_TrackIdL_IsoVL', # not present in 2017B
'HLT_Mu23_TrkIsoVVL_Ele12_CaloIdL_TrackIdL_IsoVL_DZ',
'HLT_Mu12_TrkIsoVVL_Ele23_CaloIdL_TrackIdL_IsoVL_DZ',
},
'2e' : {
'HLT_Ele23_Ele12_CaloIdL_TrackIdL_IsoVL', # higher efficiency than non-DZ; not present in 2017B
'HLT_Ele23_Ele12_CaloIdL_TrackIdL_IsoVL_DZ',
},
'1mu' : {
'HLT_IsoMu24', # not enabled at high lumi
'HLT_IsoMu27',
},
'1e' : {
'HLT_Ele32_WPTight_Gsf', # not present in 2017BC (or, equivalently, not enabled at high lumi)
'HLT_Ele35_WPTight_Gsf',
},
# CV: tau trigger paths taken from slide 6 of presentation given by Hale Sert at HTT workshop in December 2017
# (https://indico.cern.ch/event/684622/contributions/2807071/attachments/1575421/2487940/141217_triggerStatusPlans_hsert.pdf),
# except that the 'HLT_IsoMu24_eta2p1_LooseChargedIsoPFTau20_SingleL1' path has been dropped,
# as it was found to increase the trigger acceptance only marginally
# (cf. slide 19 of https://indico.cern.ch/event/683144/contributions/2814995/attachments/1570846/2478034/Ruggles_TauTriggers_TauPOG_20171206_v7.pdf)
'1mu1tau' : {
'HLT_IsoMu20_eta2p1_LooseChargedIsoPFTau27_eta2p1_CrossL1',
},
'1e1tau' : {
'HLT_Ele24_eta2p1_WPTight_Gsf_LooseChargedIsoPFTau30_eta2p1_CrossL1',
},
'2tau' : {
'HLT_DoubleMediumChargedIsoPFTau35_Trk1_eta2p1_Reg',
'HLT_DoubleTightChargedIsoPFTau35_Trk1_TightID_eta2p1_Reg',
'HLT_DoubleMediumChargedIsoPFTau40_Trk1_TightID_eta2p1_Reg',
'HLT_DoubleTightChargedIsoPFTau40_Trk1_eta2p1_Reg',
},
}
self.triggers_leptonFR = {
'1e' : {
'HLT_Ele8_CaloIdM_TrackIdM_PFJet30',
'HLT_Ele17_CaloIdM_TrackIdM_PFJet30',
'HLT_Ele23_CaloIdM_TrackIdM_PFJet30',
},
'1mu' : {
'HLT_Mu27',
'HLT_Mu20',
'HLT_Mu3_PFJet40',
},
'2e' : set(),
'2mu' : {
'HLT_Mu17',
'HLT_Mu8',
}
}
self.blacklist = {
'Run2017B' : {
'1e' : { 'HLT_Ele32_WPTight_Gsf', 'HLT_Ele8_CaloIdM_TrackIdM_PFJet30', 'HLT_Ele17_CaloIdM_TrackIdM_PFJet30' },
'1mu' : { 'HLT_Mu3_PFJet40' },
'1e1mu' : { 'HLT_Mu23_TrkIsoVVL_Ele12_CaloIdL_TrackIdL_IsoVL' },
'2mu' : { 'HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ_Mass3p8' },
},
'Run2017C' : {
'1e' : { 'HLT_Ele32_WPTight_Gsf' },
},
}
else:
raise ValueError("Invalid era: %s" % era)
self.triggers_all = {}
for trigger_name in list(set(self.triggers_analysis.keys()) | set(self.triggers_leptonFR.keys())):
self.triggers_all[trigger_name] = set()
if trigger_name in self.triggers_analysis:
self.triggers_all[trigger_name].update(self.triggers_analysis[trigger_name])
if trigger_name in self.triggers_leptonFR:
self.triggers_all[trigger_name].update(self.triggers_leptonFR[trigger_name])
self.triggers_analysis_flat = { trigger for triggers in self.triggers_analysis for trigger in triggers }
self.triggers_leptonFR_flat = { trigger for triggers in self.triggers_leptonFR for trigger in triggers }
self.triggers_flat = self.triggers_analysis_flat | self.triggers_leptonFR_flat
self.blacklist_flat = {}
for blacklist_process in self.blacklist_flat:
self.blacklist_flat[blacklist_process] = set()
for trigger_name in self.blacklist_flat[blacklist_process]:
self.blacklist_flat[blacklist_process].update(self.blacklist_flat[blacklist_process][trigger_name])
| [
"[email protected]"
] | |
fa124ae1000dfb25e11780f6a3e0bfed4690739f | c6c61ae056151292b84cb8840bc90120bdea0152 | /payment_bridge/tests/common.py | 1a1c907589d6f1860c4644e7e52c44c1170984d4 | [] | no_license | zbyte64/active_merchant_compat | a61bd0a1dbdbd2e76af71264aff0cefc606f1cfc | e9a95563c8c7afec684b13ff40836a8177c3a0f2 | refs/heads/master | 2021-01-25T08:55:06.816324 | 2012-11-28T19:54:51 | 2012-11-28T19:54:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,759 | py | import base64
import json
import unittest
import yaml
import os
from payment_bridge.wsgi import BaseDirectPostApplication
global_config = {}
inpath = os.path.join(os.getcwd(), 'gateways.yaml')
if os.path.exists(inpath):
infile = open(inpath)
global_config = yaml.load(infile) or {}
else:
print "Please create the following file with gateway credentials:", inpath
class BaseTestDirectPostApplication(BaseDirectPostApplication):
def __init__(self, **kwargs):
self.gateway = kwargs.pop('gateway')
super(BaseTestDirectPostApplication, self).__init__(**kwargs)
def load_gateways_config(self):
return [self.gateway]
def decrypt_data(self, encrypted_data):
"""
Takes an encoded string and returns a dictionary
"""
return json.loads(base64.b64decode(encrypted_data))
def encrypt_data(self, params):
"""
Takes a dictionary and returns a string
"""
return base64.b64encode(json.dumps(params))
class PaymentData(object):
cc_info = {
'cc_number':'4111 1111 1111 1111',
'cc_exp_year': '2015',
'cc_exp_month': '11',
'cc_ccv': '111',
'bill_first_name':'John',
'bill_last_name': 'Smith',
}
bill_address = {
'bill_first_name':'John',
'bill_last_name': 'Smith',
'bill_address1':'5555 Main St',
'bill_address2':'',
'bill_city':'San Diego',
'bill_state':'CA',
'bill_country':'US',
'bill_zip':'92101',
'bill_email':'[email protected]',
}
ship_address = {
'ship_first_name':'John',
'ship_last_name': 'Smith',
'ship_address1':'5555 Main St',
'ship_address2':'',
'ship_city':'San Diego',
'ship_state':'CA',
'ship_country':'US',
'ship_zip':'92101',
'ship_email':'[email protected]',
}
def get_cc_info(self):
return dict(self.cc_info)
def get_bill_address(self):
return dict(self.bill_address)
def get_bill_info(self):
info = self.get_cc_info()
info.update(self.bill_address)
return info
def get_ship_address(self):
return dict(self.ship_address)
def get_all_info(self):
info = self.get_bill_info()
info.update(self.ship_address)
return info
class BaseGatewayTestCase(unittest.TestCase):
gateway = {}
def setUp(self):
self.checkGatewayConfigured()
gateway = dict(self.gateway)
gateway['params'] = self.read_gateway_params()
self.application = BaseTestDirectPostApplication(redirect_to='http://localhost:8080/direct-post/', gateway=gateway)
self.data_source = PaymentData()
def tearDown(self):
self.application.shutdown()
def read_gateway_params(self):
return global_config.get(self.gateway['module'], None)
def get_supported_actions(self):
if not hasattr(self, '_supported_actions'):
#calling a gateway with action = None is a request for the supported actions
response = self.application.call_bridge(data=None, secure_data=None, gateway='test', action=None)
if response['message'] == 'Unrecognized gateway':
self.skipTest(response['message'])
self._supported_actions = response['supported_actions']
return self._supported_actions
def checkGatewayConfigured(self):
if self.read_gateway_params() == None:
self.skipTest("Gateway unconfigured")
def checkGatewaySupport(self, action):
if not action in self.get_supported_actions():
self.skipTest("Unsupported action: %s" % action)
| [
"[email protected]"
] | |
e3f10a7582e6fc5d779950e44c40a5806c9fe248 | b7f45072d056b80ed49e6bcde91877d8576e970d | /ImageJ/py/test_close_non_image_window.py | 2cd2c6e0e90d945001ede2dac8896cf07f92104b | [] | no_license | jrminter/tips | 128a18ee55655a13085c174d532c77bcea412754 | f48f8b202f8bf9e36cb6d487a23208371c79718e | refs/heads/master | 2022-06-14T08:46:28.972743 | 2022-05-30T19:29:28 | 2022-05-30T19:29:28 | 11,463,325 | 5 | 8 | null | 2019-12-18T16:24:02 | 2013-07-17T00:16:43 | Jupyter Notebook | UTF-8 | Python | false | false | 470 | py | from ij import IJ, ImagePlus, WindowManager, Prefs, ImageStack
import jmFijiGen as jmg
IJ.run("Close All")
# load an image and create a Result Window and a ROI Manager
imp = IJ.openImage("http://imagej.nih.gov/ij/images/blobs.gif")
imp.show()
IJ.setAutoThreshold(imp, "Default");
IJ.run("Convert to Mask")
IJ.run(imp, "Analyze Particles...", "display exclude clear add")
jmg.close_open_non_image_window("Results")
jmg.close_open_non_image_window("ROI Manager")
| [
"[email protected]"
] | |
af3a768529efb8bb50385450db2321e290882c18 | f0d583a064cc53510d8b00b42ac869832e70bf41 | /facerecognition/evaluate/FaceCropper.py | 6826195a7621246c2a234747f1b31805e68464ca | [] | no_license | PaulZoni/nn | 918d543b4b2d955ff991da70ce4e88d4d94d13c8 | 25a81579499c893584b040f536ddbef254197f4e | refs/heads/master | 2020-04-27T19:05:10.968050 | 2019-06-27T12:22:16 | 2019-06-27T12:22:16 | 174,564,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,879 | py | import cv2
import numpy as np
class FaceCropper(object):
CASCADE_PATH = "/home/pavel/PycharmProjects/nn/facerecognition/evaluate/haarcascade_frontalface_default.xml"
frontal_face_extended = "/home/pavel/PycharmProjects/nn/facerecognition/evaluate/haarcascade_frontalcatface_extended.xml"
def __init__(self):
self.face_cascade = cv2.CascadeClassifier(self.CASCADE_PATH)
def generate(self, image_path=None, show_result=None, size=32, inter=cv2.INTER_AREA, frame=None):
img = None
if frame is None:
img = cv2.imread(image_path)
else:
img = frame
if img is None and frame is None:
print("Can't open image file")
return 0
print(len(img))
faces = self.face_cascade.detectMultiScale(img, 1.1, 3, minSize=(100, 100),)
if faces is None:
print('Failed to detect face')
return 0
if show_result:
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
image_resize = cv2.resize(img, (960, 540))
cv2.imshow('img', image_resize)
cv2.waitKey(0)
cv2.destroyAllWindows()
facecnt = len(faces)
print("Detected faces: %d" % facecnt)
if facecnt is 0:
return 0
i = 0
height, width = img.shape[:2]
last_images = []
for (x, y, w, h) in faces:
r = max(w, h) / 2
centerx = x + w / 2
centery = y + h / 2
nx = int(centerx - r)
ny = int(centery - r)
nr = int(r * 2)
faceimg = img[ny:ny + nr, nx:nx + nr]
lastimg = cv2.resize(faceimg, (size, size), interpolation=inter)
i += 1
last_images.append(lastimg)
return np.array(last_images)
| [
"[email protected]"
] | |
e874ce17a476b6813ee430fd51b64ccbb202365f | e174e13114fe96ad2a4eeb596a3d1c564ae212a8 | /Python for Finance Analyze Big Financial Data by Y. Hilpisch/Code of Python For Finance/4375OS_03_Code/4375OS_03_22_dir2_default_input_value.py | 2c4bf3307842ec868e3d734bbc87c545bf5e7179 | [] | no_license | Kevinqian0501/python_books | c1a7632d66dceb46db439f7cbed86d85370aab42 | 0691e4685af03a296aafb02447e3585db55ce461 | refs/heads/master | 2021-08-30T19:27:03.985464 | 2017-12-19T05:56:31 | 2017-12-19T05:56:31 | 104,145,012 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | """
Name : 4375OS_03_22_dir2_default_input_value.py
Book : Python for Finance
Publisher: Packt Publishing Ltd.
Author : Yuxing Yan
Date : 12/25/2013
email : [email protected]
[email protected]
"""
# with a default input value
def dir2(path='c:\python32'):
from os import listdir
print(listdir(path))
| [
"[email protected]"
] | |
0e266919ea78b49136e3fa48756b2e0ad863ee7f | 6390a7f030cc6b2ff61237e41360af2d270e1efb | /tests/numpy/type.py | bf439e924a092a27e2e2c855344ea182de7bcdd3 | [
"MIT"
] | permissive | davidkellis/py2rb | b999ca4c3b9316d19ac42c6d57fbbc158ee35700 | 4518a1549cfacc25a1ea3c736bca3de15a123878 | refs/heads/master | 2023-06-22T05:32:16.209823 | 2021-07-05T01:55:53 | 2021-07-05T01:55:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | # coding: utf-8
import numpy as np
print(np.int8)
print(np.int16)
print(np.int32)
print(np.int64)
print(np.int)
print(np.uint8)
print(np.uint16)
print(np.uint32)
print(np.uint64)
print(np.uint)
print(np.float32)
print(np.float64)
| [
"[email protected]"
] | |
6b94cc6f96b64548b8a6f5d1eb93e9473a97fb84 | 23f6dbacd9b98fdfd08a6f358b876d3d371fc8f6 | /rootfs/usr/lib/pymodules/python2.6/papyon/event/profile.py | dc2baa746dd1841a536a99b43d7b2b21fd559ad9 | [] | no_license | xinligg/trainmonitor | 07ed0fa99e54e2857b49ad3435546d13cc0eb17a | 938a8d8f56dc267fceeb65ef7b867f1cac343923 | refs/heads/master | 2021-09-24T15:52:43.195053 | 2018-10-11T07:12:25 | 2018-10-11T07:12:25 | 116,164,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43 | py | /usr/share/pyshared/papyon/event/profile.py | [
"[email protected]"
] | |
1343447d884966d58eae60eff8a5d897df8e129a | 0aec617440075b73e5da64cd1477b6a098ed864c | /data_structures/recursion/binary_search.py | 496f81ebb3311fb3e671c99933525b63e1203629 | [
"MIT"
] | permissive | severian5it/udacity_dsa | 0b1512cc8c5125149d6be6f78fa14446e7ab5c25 | e47f27b0179961d6107fe46a236ac7d887fe6816 | refs/heads/main | 2023-03-07T02:24:37.299599 | 2021-02-14T10:34:50 | 2021-02-14T10:34:50 | 316,949,338 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 533 | py | #bear in mind, this is log(n)
def binary_search(arr, target):
return binary_search_func(arr, 0, len(arr) - 1, target)
def binary_search_func(arr, start_index, end_index, target):
if start_index > end_index:
return -1
mid_index = (start_index + end_index) // 2
if arr[mid_index] == target:
return mid_index
elif arr[mid_index] > target:
return binary_search_func(arr, start_index, mid_index - 1, target)
else:
return binary_search_func(arr, mid_index + 1, end_index, target) | [
"[email protected]"
] | |
23397448d1fe6599e575a43d4155512a93975142 | d9f6894acb9bc7f86e218fdec9f55d131889f4c3 | /env/bin/gunicorn_paster | d6d38ef658a6cd5087a558e75a0a1ab97881df4e | [] | no_license | Marckhz/easycredit | 9f2fbc678c14a2fb6b2f972a6041b5aa6bf90a3b | bc67ad796ee7d3b5c1a93e0eaa4a907211ad9644 | refs/heads/master | 2020-03-26T20:39:14.767302 | 2018-08-20T00:35:49 | 2018-08-20T00:35:49 | 145,337,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | #!/home/marco/ConCredito/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.pasterapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"[email protected]"
] | ||
2bc186d49fd3741a5945895a8313e016d372f690 | d10724d15f2888c5d2de8abb340995aa2a2074b9 | /examples/python/src/07fizzbuzz/main.py | a7bacd972b2efa6b474803bcf2f437439b106265 | [
"MIT"
] | permissive | podhmo/prestring | 5849e7f7de3626e8a1f48740190d98cd55bd3721 | 8a3499377d1b1b2b180809b31bd7536de5c3ec4d | refs/heads/master | 2021-07-16T06:35:10.555681 | 2021-03-28T05:35:37 | 2021-03-28T05:35:37 | 31,548,112 | 10 | 1 | MIT | 2021-03-28T05:27:35 | 2015-03-02T15:53:34 | Python | UTF-8 | Python | false | false | 292 | py | def fizzbuzz(n: int) -> str:
if n % 3 == 0 and n % 5 == 0:
return "fizzbuzz"
elif n % 3 == 0:
return "fizz"
elif n % 5 == 0:
return "buzz"
else:
return str(n)
if __name__ == "__main__":
print(", ".join(fizzbuzz(i) for i in range(1, 21)))
| [
"[email protected]"
] | |
694a55ffe10f4262a60d4c2029e30a6b57a22ff9 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_200/499.py | b15fef3836eebfaaa053844940f5b5fa956d25de | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | for _ in xrange(input()):
print "Case #%d:" % (_+1),
n = raw_input()
l = len(n)
nn = map(int, n)
def dfs(c, less, st):
if c == l:
return int(st)
if less:
v = dfs(c+1, 1, st + '9')
else:
v = 0
if c == l-1 or nn[c] <= nn[c+1]:
v = max(v, dfs(c+1, 0, st + n[c]))
if c == 0 or nn[c-1] <= nn[c]-1:
v = max(v, dfs(c+1, 1, st + str(nn[c]-1)))
return v
print dfs(0, 0, "")
| [
"[email protected]"
] | |
45b7193b9e36e0ceb7d6cdceeb758a380ea8adb4 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=3.5_rd=0.5_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=75/sched.py | 580c5a476d4a3ce082bab13eb8366ff4f2034cf6 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | -X FMLP -Q 0 -L 2 84 250
-X FMLP -Q 0 -L 2 79 400
-X FMLP -Q 0 -L 2 66 300
-X FMLP -Q 1 -L 1 54 250
-X FMLP -Q 1 -L 1 50 250
-X FMLP -Q 1 -L 1 49 400
-X FMLP -Q 2 -L 1 41 300
-X FMLP -Q 2 -L 1 37 125
-X FMLP -Q 3 -L 1 35 250
-X FMLP -Q 3 -L 1 31 300
30 125
28 125
26 300
25 100
21 100
19 125
15 175
11 100
10 100
7 100
| [
"[email protected]"
] | |
2adfa7d968a07dd30d191878d89081daf3f7949b | c7e028d71b5dd72eb18b72c6733e7e98a969ade6 | /src/demos/datastructures/fifo.py | 74444fc181f799a0428cb21e7b27d0e754254573 | [
"MIT"
] | permissive | antoniosarosi/algoritmia | da075a7ac29cc09cbb31e46b82ae0b0ea8ee992f | 22b7d61e34f54a3dee03bf9e3de7bb4dd7daa31b | refs/heads/master | 2023-01-24T06:09:37.616107 | 2020-11-19T16:34:09 | 2020-11-19T16:34:09 | 314,302,653 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 341 | py | #coding: latin1
#< full
from algoritmia.datastructures.queues import Fifo
dfltFifo = Fifo([0, 1])
listBasedFifo = Fifo([0, 1], createList=lambda data: list(data))
for i in range(2, 6):
listBasedFifo.push(i)
dfltFifo.push(i)
while len(listBasedFifo) > 0:
print(dfltFifo.pop(), listBasedFifo.pop(), end=" : ")
#> full | [
"amarzal@localhost"
] | amarzal@localhost |
b64d842a5f0f64d7ae91f197a6e0a98a5a0be31d | f7a474af31989a7492411b9e18ba76d3c1527029 | /Day-18/DjangoForm/views.py | b64cc455fe609dd5123bb340d8d00f2c33eeac6a | [] | no_license | chikkalarameshsaikumar/Django-TOT | 01fa4190ca7d2c23e3e0d74e704037babd5b3217 | fb91bb6b2db306b1379f2c00f8d5d27e9b5821f2 | refs/heads/main | 2023-02-05T00:13:03.310573 | 2020-12-25T11:10:03 | 2020-12-25T11:10:03 | 339,008,757 | 0 | 1 | null | 2021-02-15T08:18:18 | 2021-02-15T08:18:18 | null | UTF-8 | Python | false | false | 1,545 | py | from django.shortcuts import render,redirect
from django.http import HttpResponse
# Create your views here.
# from DjangoForm.forms import DynamicHtmlFormGen, RegisterForm
from .models import Register
from .forms import Reg
def registerForm(request):
if request.method=='POST':
#data = request.POST
#print(data)
# name = data['name']
# print(name)
f = RegisterForm(request.POST)
f.save()
return HttpResponse("record inserted successfully...")
f = RegisterForm()
return render(request,'DjangoForm/registerForm.html',{"f":f})
def fetchAll(request):
data = Register.objects.all()
#print(data)
#return HttpResponse('check in cmd')
return render(request,'DjangoForm/fetchAll.html',{'data':data})
def dynamicHtmlFormGen(request):
# return HttpResponse("hi i am working fine")
t = DynamicHtmlFormGen()
return render(request,'DjangoForm/dynamicHtmlFormGen.html',{'form':t})
def home(request):
return render(request,'DjangoForm/home.html')
def rgform(request):
if request.method == "POST":
y = Reg(request.POST)
if y.is_valid():
# print(y)
y.save()
return redirect("/")
y = Reg()
return render(request,'DjangoForm/register.html',{'tg':y})
def fetchall(request):
t = Register.objects.all()
return render(request,'DjangoForm/fetch.html',{'y':t})
def upd(request,id):
a = Register.objects.get(id=id)
if request.method == "POST":
w = Reg(request.POST,instance=a)
if w.is_valid():
w.save()
return redirect('/ft')
w = Reg(instance=a)
return render(request,'DjangoForm/update.html',{'t':w}) | [
"[email protected]"
] | |
fc34da7d0a63f931eb43704be15efd3f638678f9 | 650b3dd4cc74f32db78f7d99cef9907aec78a222 | /dialogs/tools/fDepreciation_data.py | 832cfd0bbed68f7aae6e702a9f8b189942aee073 | [] | no_license | mech4/PKTrx | 29b871ab587434e7c208175c248f48d9b6c80a17 | cf01bc5be8837d632974786d2419c58b94a0381d | refs/heads/master | 2020-03-29T19:55:07.331831 | 2012-09-18T20:22:52 | 2012-09-18T20:22:52 | 6,289,691 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | import sys
import com.ihsan.foundation.pobjecthelper as phelper
def FormSetDataEx(uideflist,params):
config = uideflist.config
uipData = uideflist.uipData.Dataset.AddRecord()
app = config.AppObject
res = app.rexecscript('accounting','appinterface/AccountingDay.GetLastCloseDate',app.CreateValues())
rec = res.FirstRecord
if rec.Is_Err : raise '',rec.Err_Message
LastCloseDate = int(rec.LastCloseDate)
uipData.LastCloseDate = LastCloseDate
uipData.ProcessDate = LastCloseDate + 1
| [
"[email protected]"
] | |
e7591c29d28eb94dede0687778c05ae5ebba9be1 | b08870f8fe7b3cf1bbab3c52a7bacbb36ee1dcc6 | /verp/hr/doctype/department/department.py | 78df3a770042e793040a9911f00f7b77bfe97d92 | [] | no_license | vsadminpk18/verpfinalversion | 7148a64fe6134e2a6371470aceb1b57cc4b5a559 | 93d164b370ad9ca0dd5cda0053082dc3abbd20da | refs/heads/master | 2023-07-13T04:11:59.211046 | 2021-08-27T06:26:48 | 2021-08-27T06:26:48 | 400,410,611 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,302 | py | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils.nestedset import NestedSet, get_root_of
from verp.utilities.transaction_base import delete_events
from frappe.model.document import Document
class Department(NestedSet):
nsm_parent_field = 'parent_department'
def autoname(self):
root = get_root_of("Department")
if root and self.department_name != root:
self.name = get_abbreviated_name(self.department_name, self.company)
else:
self.name = self.department_name
def validate(self):
if not self.parent_department:
root = get_root_of("Department")
if root:
self.parent_department = root
def before_rename(self, old, new, merge=False):
# renaming consistency with abbreviation
if not frappe.get_cached_value('Company', self.company, 'abbr') in new:
new = get_abbreviated_name(new, self.company)
return new
def on_update(self):
if not frappe.local.flags.ignore_update_nsm:
super(Department, self).on_update()
def on_trash(self):
super(Department, self).on_trash()
delete_events(self.doctype, self.name)
def on_doctype_update():
frappe.db.add_index("Department", ["lft", "rgt"])
def get_abbreviated_name(name, company):
abbr = frappe.get_cached_value('Company', company, 'abbr')
new_name = '{0} - {1}'.format(name, abbr)
return new_name
@frappe.whitelist()
def get_children(doctype, parent=None, company=None, is_root=False):
condition = ''
var_dict = {
"name": get_root_of("Department"),
"parent": parent,
"company": company,
}
if company == parent:
condition = "name=%(name)s"
elif company:
condition = "parent_department=%(parent)s and company=%(company)s"
else:
condition = "parent_department = %(parent)s"
return frappe.db.sql("""
select
name as value,
is_group as expandable
from `tab{doctype}`
where
{condition}
order by name""".format(doctype=doctype, condition=condition), var_dict, as_dict=1)
@frappe.whitelist()
def add_node():
from frappe.desk.treeview import make_tree_args
args = frappe.form_dict
args = make_tree_args(**args)
if args.parent_department == args.company:
args.parent_department = None
frappe.get_doc(args).insert()
| [
"[email protected]"
] | |
212ae839fc4995842e57d2a227c3fc5d77dc51fb | 8a58b02b1dfc97bf56a5fd94732316c032e24a70 | /api/tests.py | d76ab163735695925faa78e7a7a3345bf8ab58bb | [] | no_license | momentum-team-2/example--django-recipebook | ab04d4957268ed8251e84d8a09cfc60a138c9d9f | 4a4e17c396fcc9f4c648cea494c4ae6d5dc5e570 | refs/heads/main | 2022-11-28T13:40:13.301591 | 2020-08-05T14:09:55 | 2020-08-05T14:09:55 | 279,464,956 | 0 | 0 | null | 2023-09-04T18:58:14 | 2020-07-14T02:50:58 | Python | UTF-8 | Python | false | false | 762 | py | from django.test import TestCase
from rest_framework.test import APIClient
from users.models import User
from rest_framework.authtoken.models import Token
# Create your tests here.
class RecipesAPITestCase(TestCase):
def test_user_is_added_to_recipe_on_creation(self):
user = User.objects.create(username="test")
token = Token.objects.filter(user=user).first()
client = APIClient()
client.credentials(HTTP_AUTHORIZATION="Token " + token.key)
response = client.post(
"/api/recipes/",
{"title": "Test Recipe", "ingredients": [], "steps": []},
format="json",
)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.data["user"], user.username)
| [
"[email protected]"
] | |
718559c2ac4ab854f51d624b912324dcf7fe2be7 | 20b76d0a9a2d31ec929ffcdb082931201b58361f | /homework/2020-09-20/2020-09-20-杨婷婷.py | 46f6234d029e6c75645919444ff24e57147ec43e | [] | no_license | yangtingting123456/interfaceiframe | 3a6ff3f386cb98dcf7849ea3ab52a8ce93c6d306 | 12fc9ec2366f220a5cb1ce51c3a6a9ad7316316e | refs/heads/master | 2023-01-02T00:13:53.878122 | 2020-10-26T06:08:16 | 2020-10-26T06:08:16 | 306,569,037 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,012 | py | # 1、charles 的 三种过滤方式操作截图做成文档
#见charles四种过滤数据文档
# 2、charles 抓取 论坛 注册 、发帖数据,截图抓到了请求即可
# 3、requests 编写脚本 实现获取access_token、增加标签接口、实现查询标签接口、实现删除标签接口
# 用的公司项目做的,登录(获取token,密码md5加密)-获取用户列表-用户更新,详情,-退出等;
# 4、requests 模拟 https://www.qq.com的请求,用re模块截取出
# <meta name="description" content="(.+?)" />中的content内容
# import requests
# import re
#
# response = requests.get(url='https://www.qq.com' )
# body = response.content.decode('gbk')
# # print(body)
# content = re.findall(' <meta name="description" content="(.+?)" /> ',body)
# print(content)
import re
import requests
response = requests.get(url='https://www.qq.com')
body = response.content.decode('gbk')
# print(body)
con = re.findall(' name="description" content="(.+?)"',body)
print( con ) | [
"[email protected]"
] | |
8d6dee6211d3b8e0bd8f42cb2ce3ca58cf345e87 | 54bc239124576563c1f0c72e381fb2a4fcaa6a9e | /Adafruit_AD8495_Guide/AD8495_Temperature.py | 4546df8dcb61aa12248110733193b2823c7e335d | [
"MIT"
] | permissive | jonsampson/Adafruit_Learning_System_Guides | 79359154e26e710b088e0c1cbc9969a26a938a25 | b941d8209cec42e3dce5f5e6b533584e3e99ac73 | refs/heads/master | 2020-07-29T17:43:53.439741 | 2019-10-14T01:53:01 | 2019-10-14T01:53:01 | 209,904,940 | 3 | 1 | MIT | 2019-09-21T01:04:35 | 2019-09-21T01:04:34 | null | UTF-8 | Python | false | false | 283 | py | import time
import analogio
import board
ad8495 = analogio.AnalogIn(board.A1)
def get_voltage(pin):
return (pin.value * 3.3) / 65536
while True:
temperature = (get_voltage(ad8495) - 1.25) / 0.005
print(temperature)
print(get_voltage(ad8495))
time.sleep(0.5)
| [
"[email protected]"
] | |
db985281b42e7256f86e97b45e00e71da8cd0b1d | f4b60f5e49baf60976987946c20a8ebca4880602 | /lib/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/psu/instpol.py | 82e180acf3f6c666ea7d10c6bd65a11d575327f0 | [] | no_license | cqbomb/qytang_aci | 12e508d54d9f774b537c33563762e694783d6ba8 | a7fab9d6cda7fadcc995672e55c0ef7e7187696e | refs/heads/master | 2022-12-21T13:30:05.240231 | 2018-12-04T01:46:53 | 2018-12-04T01:46:53 | 159,911,666 | 0 | 0 | null | 2022-12-07T23:53:02 | 2018-12-01T05:17:50 | Python | UTF-8 | Python | false | false | 6,877 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class InstPol(Mo):
"""
The power redundancy policy is for all power supply units on the fabric nodes (leaves and spines) that are consuming the power supply policy through their respective selector profile policy.
"""
meta = ClassMeta("cobra.model.psu.InstPol")
meta.moClassName = "psuInstPol"
meta.rnFormat = "psuInstP-%(name)s"
meta.category = MoCategory.REGULAR
meta.label = "Power Supply Redundancy Policy"
meta.writeAccessMask = 0x20000000001
meta.readAccessMask = 0x800ae700000001
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = True
meta.isContextRoot = False
meta.childClasses.add("cobra.model.psu.RtPsuInstPolCons")
meta.childClasses.add("cobra.model.psu.RtResPsuInstPol")
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childClasses.add("cobra.model.psu.RtPsuInstPol")
meta.childNamesAndRnPrefix.append(("cobra.model.psu.RtPsuInstPol", "rtfabricPsuInstPol-"))
meta.childNamesAndRnPrefix.append(("cobra.model.psu.RtPsuInstPolCons", "rtpsuInstPolCons"))
meta.childNamesAndRnPrefix.append(("cobra.model.psu.RtResPsuInstPol", "rtresPsuInstPol"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.parentClasses.add("cobra.model.fabric.Inst")
meta.superClasses.add("cobra.model.fabric.ProtoPol")
meta.superClasses.add("cobra.model.fabric.ProtoInstPol")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Def")
meta.superClasses.add("cobra.model.fabric.UtilInstPol")
meta.rnPrefixes = [
('psuInstP-', True),
]
prop = PropMeta("str", "adminRdnM", "adminRdnM", 765, PropCategory.REGULAR)
prop.label = "Admin Redundancy Mode"
prop.isConfig = True
prop.isAdmin = True
prop.defaultValue = 3
prop.defaultValueStr = "comb"
prop._addConstant("comb", "combined", 3)
prop._addConstant("insrc-rdn", "input-source-redundancy", 6)
prop._addConstant("n-rdn", "non-redundant", 4)
prop._addConstant("not-supp", "not-supported", 1)
prop._addConstant("ps-rdn", "n+1-redundancy", 5)
prop._addConstant("rdn", "n+n-redundancy", 2)
prop._addConstant("sinin-rdn", "single-input-redundancy", 7)
prop._addConstant("unknown", "unknown", 0)
meta.props.add("adminRdnM", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5579, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "name", "name", 7080, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.range = [(1, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "ownerKey", "ownerKey", 15230, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerKey", prop)
prop = PropMeta("str", "ownerTag", "ownerTag", 15231, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerTag", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "uid", "uid", 8, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("uid", prop)
meta.namingProps.append(getattr(meta.props, "name"))
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Policy"
def __init__(self, parentMoOrDn, name, markDirty=True, **creationProps):
namingVals = [name]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
ea44472cf613f7a505cdbd709dcbf6b69628ed94 | 35d42fa466f6457c83f9e89b6e87e050c0189bf2 | /news/urls.py | 8cd7b45c9fe79d8f08621a003fef854c096236ef | [] | no_license | Burence1/The-Moringa-Tribune | 4c0473f50f84f0f6563369b805d7b00bf8aa43ec | b035a082580eb1e8841e504c87f56392f85ae43e | refs/heads/main | 2023-05-12T11:17:26.898628 | 2021-05-27T13:48:13 | 2021-05-27T13:48:13 | 365,954,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 837 | py | from django.conf import settings
from django.conf.urls.static import static
from django.urls import path,re_path
from . import views
urlpatterns = [
path('',views.news_today,name='newsToday'),
re_path('archives/(\d{4}-\d{2}-\d{2})/',views.past_days_news,name = 'pastNews'),
path('search/',views.search_results,name='search_results'),
path('article/(\d+)', views.article, name='article'),
path('new-article',views.new_article, name='new-article'),
path('ajax/newsletter/', views.newsletter, name='newsletter'),
path('api/merch/merch-id/<int:pk>/',views.MerchDescription.as_view())
# path('api/merch/', views.MerchList.as_view()),
# re_path('api/merch/merch-id/(?P<pk>[0-9]+)/',
# views.MerchDescription.as_view())
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
| [
"[email protected]"
] | |
b7e4e280e4c4ea18117163135448ed4e9f3b14b8 | 19be48da7eb090f31fd88b1cef9c8ef3a6aaa0eb | /funcion23.py | c746bc134e246f9f9e9ecf9b80faae8d064e47c1 | [] | no_license | smith-sanchez/t09_Carrion_Villavicencio | 376608d60dd175d872f2622b38ff220b6160ff9a | 4cbb0e0694b35fd7135748bc7ef13db7c7374390 | refs/heads/master | 2020-11-27T05:08:14.629793 | 2019-12-20T18:36:00 | 2019-12-20T18:36:00 | 229,316,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | # funcion 23
# salario de un chef
import libreria
import os
#import os
dia=int(os.sys.argv[1])
precio_dia=float(os.sys.argv[2])
# import libreia
salario_total=libreria.salario(dia,precio_dia)
print(" el salario es:",salario_total) | [
"[email protected]"
] | |
3cdf8011b618b07498f42f587746389db19ab840 | e7964338707afba0228866a33f954a974fcc693b | /code/linreg/boston3d_loss.py | 93704fc901be370ade12eb00fcf6b4701c31b2e4 | [
"MIT"
] | permissive | anawatbk/msds621 | f96346ddc4fd47d7b9c3a40e2632da7a39aaf2e0 | 869a309e235359119f30477c7a57763e222197e5 | refs/heads/master | 2023-03-25T10:20:02.072200 | 2021-03-10T09:39:33 | 2021-03-10T09:39:33 | 333,196,889 | 0 | 0 | MIT | 2021-03-10T09:39:34 | 2021-01-26T19:41:04 | Jupyter Notebook | UTF-8 | Python | false | false | 2,523 | py | import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from mpl_toolkits.mplot3d import Axes3D # required even though not ref'd!
from matplotlib import rcParams
import matplotlib as mpl
from sklearn.linear_model import LinearRegression, Ridge, Lasso, LogisticRegression
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.datasets import load_boston, load_iris, load_wine, load_digits, \
load_breast_cancer, load_diabetes
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, precision_score, recall_score
import glob
import os
from PIL import Image as PIL_Image
# STOPPED WORK IN PROGRESS
def loss(B, X, y):
"Line coefficients: B = [y-intercept, slope]"
return np.mean(y - np.dot(X, np.array(B))) ** 2
def get_surface(X, y, loss, b0_range, b1_range):
n = len(X)
B0 = np.ones(shape=(n, 1))
X = np.hstack([np.ones(shape=(n, 1)), X]) # add ones column
(b0_mesh, b1_mesh) = np.meshgrid(b0_range, b1_range, indexing='ij')
L = np.zeros(b0_mesh.shape)
for i in range(len(b0_range)):
for j in range(len(b1_range)):
L[i][j] = loss([b0_range[i], b1_range[j]], X=X, y=y)
return L
def plot3d(L, b0_range, b1_range, ax, elev=50, azim=145):
rcParams["font.size"] = 10
ax.view_init(elev, azim)
b0_range_mesh, b1_range_mesh = np.meshgrid(b0_range, b1_range, indexing='ij')
surface = ax.plot_surface(b0_range_mesh, b1_range_mesh, L, alpha=0.7, cmap='coolwarm')
# plt.title("""$loss(\\beta) = \sum_{i=1}^{N}(y^{{(i)}} - (\\beta_0 + \\beta_1 x^{{(i)}}))^2$""", fontsize=12)
ax.set_xlabel('$\\beta_0$', fontsize=14)
ax.set_ylabel('$\\beta_1$', fontsize=14)
ax.zaxis.set_major_formatter(mpl.ticker.StrMethodFormatter('{x:.0f}'))
boston = load_boston()
df = pd.DataFrame(boston.data, columns=boston.feature_names)
df['MEDV'] = boston.target
print(df.head(3))
X = df.drop('MEDV', axis=1)
y = df['MEDV']
lm = LinearRegression()
lm.fit(X, y)
true_b0 = lm.intercept_
coeff = lm.coef_
print(f"True beta = {true_b0:.2f}, {coeff}")
b0_range = np.arange(-3030, -2900, .1) # y intercept
b1_range = np.arange(105, 120, .05) # slope
L = get_surface(X['LSTAT'], y, loss, b0_range=b0_range, b1_range=b1_range)
fig = plt.figure(figsize=(8, 7))
ax = fig.add_subplot(111, projection='3d')
plot3d(L, b0_range=b0_range, b1_range=b1_range, ax=ax, elev=25, azim=110)
#Theax.plot([true_b0], [true_b1], marker='x', markersize=10, color='black')
plt.show()
| [
"[email protected]"
] | |
bfb4f12275d4630557cbb7716232b552fb2bc121 | ba1e90ae6ea9f8f74d9b542e159825341c717712 | /2014/w33.py | e5aa36b9425bc3b95b355755a29e3a5445ba785d | [] | no_license | sailesh2/CompetitiveCode | b384687a7caa8980ab9b9c9deef2488b0bfe9cd9 | 5671dac08216f4ce75d5992e6af8208fa2324d12 | refs/heads/master | 2021-06-24T22:39:11.396049 | 2020-11-27T05:22:17 | 2020-11-27T05:22:17 | 161,877,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | n=input()
ar=[0]*1000001
i=0
while i<n:
k=input()
x=raw_input().split(' ')
j=0
while j<k:
ar[int(x[j])]=1
j=j+1
i=i+1
i=1
while i<=1000001:
if ar[i]==1:
print i,
i=i+1
| [
"[email protected]"
] | |
7d6ab9147f7e2b8536e088e2f9369d2f7f13d547 | 4a36849188747a1e3cc4b052eb6bc3a21e3e53bb | /POJ/3061.Subsequence/3061.Subsequence.py | e877888939ef6ca21888b36bf9aeb5ccaf105122 | [] | no_license | koking0/Algorithm | 88f69a26f424d1b60a8440c09dd51c8563a86309 | 2828811ae2f905865b4f391672693375c124c185 | refs/heads/master | 2022-07-06T17:10:07.440930 | 2022-06-24T14:59:40 | 2022-06-24T14:59:40 | 216,952,717 | 35 | 48 | null | 2020-07-21T02:46:26 | 2019-10-23T02:41:09 | Java | UTF-8 | Python | false | false | 855 | py | #!/usr/bin/env python
# -*- coding: utf-H -*-
# @Time : 2020/1/28 16:27
# @File : 3061.Subsequence.py
# ----------------------------------------------
# ☆ ☆ ☆ ☆ ☆ ☆ ☆
# >>> Author : Alex
# >>> QQ : 2426671397
# >>> Mail : [email protected]
# >>> Github : https://github.com/koking0
# ☆ ☆ ☆ ☆ ☆ ☆ ☆
import sys
while True:
try:
length, target = map(int, input().split())
sequence = list(map(int, input().split()))
left, sum_num, ans = 0, 0, sys.maxsize
for right in range(length):
sum_num += sequence[right]
while sum_num > target:
ans = min(right - left + 1, ans)
sum_num -= sequence[left]
left += 1
print(ans if ans != sys.maxsize else 0)
except EOFError:
break
| [
"[email protected]"
] | |
a976c9d14e1dee06b2ff83170340b7db50d36e35 | f0cdda3cf2817bcf991a14cf46e38c353e6872a6 | /src/epuck2_gazebo/scripts/epuck2_control_codes/epuck_pid_controller.py | 83a881db1ca46ec151e0f02e6df04aef77f70ca8 | [] | no_license | vinits5/gym-vinit | efc1b5312674840333eea4fb3912aa579c295f5f | 3ebd79ee94a51c12a6b64fe743ebc742f8d5e63d | refs/heads/master | 2020-03-22T00:55:19.272167 | 2018-06-30T19:00:12 | 2018-06-30T19:00:12 | 138,631,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,760 | py | #! /usr/bin/python
import rospy
import math
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
import matplotlib.pyplot as plt
import numpy as np
import tf
from tf.transformations import euler_from_quaternion
from std_srvs.srv import Empty
import time
velocity_publisher = rospy.Publisher('epuck2/cmd_vel', Twist, queue_size=10)
class epuck():
def __init__(self):
rospy.init_node('epuck_controller', anonymous=True)
self.velocity_publisher = rospy.Publisher('epuck2/cmd_vel', Twist, queue_size=10)
self.pose_subscriber = rospy.Subscriber('/epuck2/odom_diffdrive', Odometry, self.callback)
self.rate = rospy.Rate(10)
def callback(self,data):
self.x = data.pose.pose.position.x
self.y = data.pose.pose.position.y
q0 = data.pose.pose.orientation.x
q1 = data.pose.pose.orientation.y
q2 = data.pose.pose.orientation.z
q3 = data.pose.pose.orientation.w
quaternion = (q0,q1,q2,q3)
self.euler = euler_from_quaternion(quaternion)
def orientation(self,angle):
angle = angle*(180.0/math.pi)
if angle >= -90:
angle = 90 - angle
else:
angle = - angle - 270
return angle
def motion(self,xg,yg):
loop = True
#PID Parameters
Kp = 1 #Proportional constant
Ki = 0.075 #Integral constant
Kd = 0 #Differential constant
E = 0 #Difference of errors
I = 0 #Sum of all errors
ai = 0 #Previous orientation of robot
ei = 0 #Previous error in orientation of robot
goal = True #True if goal not reached & False if reached
#Path points:
path_x = []
path_y = []
#PID loop
while goal:
yi = self.y #Current y position
xi = self.x #Current x position
path_x.append(xi)
path_y.append(yi)
#Error Calculations
ad = math.atan2(yg-yi,xg-xi) #Angle from curent position to Goal
e = ad - ai #Error in current and previous orientations
e = math.atan2(math.sin(e),math.cos(e)) #Error converted in range -90 to 90
#PID control
E = e - ei #Difference of previous and current error
I = I + e #Sum of all erros
w = Kp*e + Ki*I + Kd*E #Calculation of angular velocity
#Command Velocities to robot
vel = Twist() #Velocity object
if e >= 0: #Check for left or right turn
w = -w #For left: -w & for right: w
vel.angular.z = w
vel.linear.x = 0.05
velocity_publisher.publish(vel)
#Loop running at 10Hz frequency.
self.rate.sleep()
#New positions
yn = self.y #New y position
xn = self.x #New x position
ai = math.atan2(yn-yi,xn-xi) #New orientation from goal
ai = math.atan2(math.sin(ai),math.cos(ai)) #New orientation in range -90 to 90
#Check the goal condition
if ((xn-xg)*(xn-xg)+(yn-yg)*(yn-yg)-0.01*0.05)<0:
print('Goal Reached!')
vel.angular.z = 0
vel.linear.x = 0
velocity_publisher.publish(vel)
goal = False
return(path_x,path_y)
def circular_motion(self):
path_X = []
path_Y = []
y = [0,0.2,0.4,0.6,0.8,1.0]
x2 = []
for i in y:
x3 = 0.25-(i-0.5)*(i-0.5)
x2.append(x3)
x = [math.sqrt(i) for i in x2]
xf = []
yf = []
[xf.append(i) for i in x]
[yf.append(i) for i in y]
y.reverse()
[yf.append(i) for i in y]
x.reverse()
[xf.append(-i) for i in x]
for i in range(len(xf)):
path_x,path_y = self.motion(xf[i],yf[i])
path_X.append(path_x)
path_Y.append(path_y)
return (path_X,path_Y)
if __name__ == '__main__':
try:
X = epuck()
#xg = input('Enter xg: ')
#yg = input('Enter yg: ')
#path_x,path_y = X.motion(xg,yg)
x = input('Enter anything to start: ')
#reset_world = rospy.ServiceProxy('/gazebo/reset_world',Empty)
path_X,path_Y = X.circular_motion()
xx = []
yy = []
for i in path_X:
for j in i:
xx.append(j)
for i in path_Y:
for j in i:
yy.append(j)
plt.plot(xx,yy)
plt.show()
#reset_world()
except rospy.ROSInterruptException:
pass
| [
"[email protected]"
] | |
774d66cb1470234449465f0188cd76c1b7dd3b9f | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_gigabits.py | 5b528902501ccb6eb2a2803116afd4524cf7a3d7 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py |
from xai.brain.wordbase.nouns._gigabit import _GIGABIT
#calss header
class _GIGABITS(_GIGABIT, ):
def __init__(self,):
_GIGABIT.__init__(self)
self.name = "GIGABITS"
self.specie = 'nouns'
self.basic = "gigabit"
self.jsondata = {}
| [
"[email protected]"
] | |
9c2aa150b9b7abbab3bc15bcc19cbffd2f73bcfe | 38c10c01007624cd2056884f25e0d6ab85442194 | /third_party/deqp/src/scripts/khr_util/registry.py | 58dab52c64d273336ed333499b874f040502eb67 | [
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | zenoalbisser/chromium | 6ecf37b6c030c84f1b26282bc4ef95769c62a9b2 | e71f21b9b4b9b839f5093301974a45545dad2691 | refs/heads/master | 2022-12-25T14:23:18.568575 | 2016-07-14T21:49:52 | 2016-07-23T08:02:51 | 63,980,627 | 0 | 2 | BSD-3-Clause | 2022-12-12T12:43:41 | 2016-07-22T20:14:04 | null | UTF-8 | Python | false | false | 11,636 | py | # -*- coding: utf-8 -*-
#-------------------------------------------------------------------------
# drawElements Quality Program utilities
# --------------------------------------
#
# Copyright 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------------------
import sys, logging, re
from lxml import etree
from collections import OrderedDict
from functools import wraps, partial
log = logging.getLogger(__name__)
debug = log.debug
info = log.info
warning = log.warning
def warnElem(elem, fmt, *args):
warning('%s:%d, %s %s: ' + fmt, elem.base, elem.sourceline, elem.tag, elem.get('name') or '', *args)
class Object(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class Located(Object):
location = None
class Group(Located): pass
class Enum(Located): pass
class Enums(Located):
name = None
comment = None
enums = None
class Type(Located):
location = None
name=None
definition=None
api=None
requires=None
def makeObject(cls, elem, **kwargs):
kwargs.setdefault('name', elem.get('name'))
kwargs.setdefault('comment', elem.get('comment'))
kwargs['location'] = (elem.base, elem.sourceline)
return cls(**kwargs)
def parseEnum(eEnum):
return makeObject(
Enum, eEnum,
value=eEnum.get('value'),
type=eEnum.get('type'),
alias=eEnum.get('alias'))
class Param(Located): pass
class Command(Located):
name=None
declaration=None
type=None
ptype=None
group=None
params=None
alias=None
class Interface(Object): pass
class Index:
def __init__(self, items=[], **kwargs):
self.index = {}
self.items = []
self.__dict__.update(kwargs)
self.update(items)
def append(self, item):
keys = self.getkeys(item)
for key in keys:
self[key] = item
self.items.append(item)
def update(self, items):
for item in items:
self.append(item)
def __iter__(self):
return iter(self.items)
def nextkey(self, key):
raise KeyError
def getkeys(self, item):
return []
def __contains__(self, key):
return key in self.index
def __setitem__(self, key, item):
if key in self.index:
self.duplicateKey(key, item)
else:
self.index[key] = item
def duplicateKey(self, key, item):
warning("Duplicate %s: %r", type(item).__name__.lower(), key)
def __getitem__(self, key):
try:
while True:
try:
return self.index[key]
except KeyError:
pass
key = self.nextkey(key)
except KeyError:
item = self.missingKey(key)
self.append(item)
return item
def missingKey(self, key):
raise KeyError(key)
def __len__(self):
return len(self.items)
class ElemNameIndex(Index):
def getkeys(self, item):
return [item.get('name')]
def duplicateKey(self, key, item):
warnElem(item, "Duplicate key: %s", key)
class CommandIndex(Index):
def getkeys(self, item):
return [item.findtext('proto/name'), item.findtext('alias')]
class NameApiIndex(Index):
def getkeys(self, item):
return [(item.get('name'), item.get('api'))]
def nextkey(self, key):
if len(key) == 2 and key[1] is not None:
return key[0], None
raise KeyError
def duplicateKey(self, key, item):
warnElem(item, "Duplicate key: %s", key)
class TypeIndex(NameApiIndex):
def getkeys(self, item):
return [(item.get('name') or item.findtext('name'), item.get('api'))]
class EnumIndex(NameApiIndex):
def getkeys(self, item):
name, api, alias = (item.get(attrib) for attrib in ['name', 'api', 'alias'])
return [(name, api)] + ([(alias, api)] if alias is not None else [])
def duplicateKey(self, (name, api), item):
if name == item.get('alias'):
warnElem(item, "Alias already present: %s", name)
else:
warnElem(item, "Already present")
class Registry:
def __init__(self, eRegistry):
self.types = TypeIndex(eRegistry.findall('types/type'))
self.groups = ElemNameIndex(eRegistry.findall('groups/group'))
self.enums = EnumIndex(eRegistry.findall('enums/enum'))
for eEnum in self.enums:
groupName = eEnum.get('group')
if groupName is not None:
self.groups[groupName] = eEnum
self.commands = CommandIndex(eRegistry.findall('commands/command'))
self.features = ElemNameIndex(eRegistry.findall('feature'))
self.apis = {}
for eFeature in self.features:
self.apis.setdefault(eFeature.get('api'), []).append(eFeature)
for apiFeatures in self.apis.itervalues():
apiFeatures.sort(key=lambda eFeature: eFeature.get('number'))
self.extensions = ElemNameIndex(eRegistry.findall('extensions/extension'))
self.element = eRegistry
def getFeatures(self, api, checkVersion=None):
return [eFeature for eFeature in self.apis[api]
if checkVersion is None or checkVersion(eFeature.get('number'))]
class NameIndex(Index):
createMissing = None
kind = "item"
def getkeys(self, item):
return [item.name]
def missingKey(self, key):
if self.createMissing:
warning("Reference to implicit %s: %r", self.kind, key)
return self.createMissing(name=key)
else:
raise KeyError
def matchApi(api1, api2):
return api1 is None or api2 is None or api1 == api2
class Interface(Object):
pass
def extractAlias(eCommand):
aliases = eCommand.xpath('alias/@name')
return aliases[0] if aliases else None
def getExtensionName(eExtension):
return eExtension.get('name')
def extensionSupports(eExtension, api, profile=None):
if api == 'gl' and profile == 'core':
needSupport = 'glcore'
else:
needSupport = api
supporteds = eExtension.get('supported').split('|')
return needSupport in supporteds
class InterfaceSpec(Object):
def __init__(self):
self.enums = set()
self.types = set()
self.commands = set()
def addComponent(self, eComponent):
if eComponent.tag == 'require':
def modify(items, item): items.add(item)
else:
assert eComponent.tag == 'remove'
def modify(items, item):
try:
items.remove(item)
except KeyError:
warning("Tried to remove absent item: %s", item)
for typeName in eComponent.xpath('type/@name'):
modify(self.types, typeName)
for enumName in eComponent.xpath('enum/@name'):
modify(self.enums, enumName)
for commandName in eComponent.xpath('command/@name'):
modify(self.commands, commandName)
def addComponents(self, elem, api, profile=None):
for eComponent in elem.xpath('require|remove'):
cApi = eComponent.get('api')
cProfile = eComponent.get('profile')
if (matchApi(api, eComponent.get('api')) and
matchApi(profile, eComponent.get('profile'))):
self.addComponent(eComponent)
def addFeature(self, eFeature, api=None, profile=None, force=False):
info('Feature %s', eFeature.get('name'))
if not matchApi(api, eFeature.get('api')):
if not force: return
warnElem(eFeature, 'API %s is not supported', api)
self.addComponents(eFeature, api, profile)
def addExtension(self, eExtension, api=None, profile=None, force=False):
if not extensionSupports(eExtension, api, profile):
if not force: return
warnElem(eExtension, '%s is not supported in API %s' % (getExtensionName(eExtension), api))
self.addComponents(eExtension, api, profile)
def createInterface(registry, spec, api=None):
def parseType(eType):
# todo: apientry
#requires = eType.get('requires')
#if requires is not None:
# types[requires]
return makeObject(
Type, eType,
name=eType.get('name') or eType.findtext('name'),
definition=''.join(eType.xpath('.//text()')),
api=eType.get('api'),
requires=eType.get('requires'))
def createType(name):
info('Add type %s', name)
try:
return parseType(registry.types[name, api])
except KeyError:
return Type(name=name)
def createEnum(enumName):
info('Add enum %s', enumName)
return parseEnum(registry.enums[enumName, api])
def extractPtype(elem):
ePtype = elem.find('ptype')
if ePtype is None:
return None
return types[ePtype.text]
def extractGroup(elem):
groupName = elem.get('group')
if groupName is None:
return None
return groups[groupName]
def parseParam(eParam):
return makeObject(
Param, eParam,
name=eParam.get('name') or eParam.findtext('name'),
declaration=''.join(eParam.xpath('.//text()')).strip(),
type=''.join(eParam.xpath('(.|ptype)/text()')).strip(),
ptype=extractPtype(eParam),
group=extractGroup(eParam))
def createCommand(commandName):
info('Add command %s', commandName)
eCmd = registry.commands[commandName]
eProto = eCmd.find('proto')
return makeObject(
Command, eCmd,
name=eCmd.findtext('proto/name'),
declaration=''.join(eProto.xpath('.//text()')).strip(),
type=''.join(eProto.xpath('(.|ptype)/text()')).strip(),
ptype=extractPtype(eProto),
group=extractGroup(eProto),
alias=extractAlias(eCmd),
params=NameIndex(map(parseParam, eCmd.findall('param'))))
def createGroup(name):
info('Add group %s', name)
try:
eGroup = registry.groups[name]
except KeyError:
return Group(name=name)
return makeObject(
Group, eGroup,
# Missing enums are often from exotic extensions. Don't create dummy entries,
# just filter them out.
enums=NameIndex(enums[name] for name in eGroup.xpath('enum/@name')
if name in enums))
def sortedIndex(items):
return NameIndex(sorted(items, key=lambda item: item.location))
groups = NameIndex(createMissing=createGroup, kind="group")
types = NameIndex(map(createType, spec.types),
createMissing=createType, kind="type")
enums = NameIndex(map(createEnum, spec.enums),
createMissing=Enum, kind="enum")
commands = NameIndex(map(createCommand, spec.commands),
createMissing=Command, kind="command")
# This is a mess because the registry contains alias chains whose
# midpoints might not be included in the interface even though
# endpoints are.
for command in commands:
alias = command.alias
aliasCommand = None
while alias is not None:
aliasCommand = registry.commands[alias]
alias = extractAlias(aliasCommand)
command.alias = None
if aliasCommand is not None:
name = aliasCommand.findtext('proto/name')
if name in commands:
command.alias = commands[name]
return Interface(
types=sortedIndex(types),
enums=sortedIndex(enums),
groups=sortedIndex(groups),
commands=sortedIndex(commands))
def spec(registry, api, version=None, profile=None, extensionNames=[], protects=[], force=False):
available = set(protects)
spec = InterfaceSpec()
if version is None or version is False:
def check(v): return False
elif version is True:
def check(v): return True
else:
def check(v): return v <= version
for eFeature in registry.getFeatures(api, check):
spec.addFeature(eFeature, api, profile, force)
for extName in extensionNames:
eExtension = registry.extensions[extName]
protect = eExtension.get('protect')
if protect is not None and protect not in available:
warnElem(eExtension, "Unavailable dependency %s", protect)
if not force:
continue
spec.addExtension(eExtension, api, profile, force)
available.add(extName)
return spec
def interface(registry, api, **kwargs):
s = spec(registry, api, **kwargs)
return createInterface(registry, s, api)
def parse(path):
return Registry(etree.parse(path))
| [
"[email protected]"
] | |
ea2f3fd552459d85a170b03d4f5e904f7c191349 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p04000/s750895351.py | 7378de763ab9a50cfa785771268623df0b68e5e7 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 604 | py | import sys
input = sys.stdin.buffer.readline
from collections import defaultdict
def main():
H,W,N = map(int,input().split())
d = defaultdict(int)
for i in range(N):
a,b = map(int,input().split())
a -= 1
b -= 1
for x in range(3):
for y in range(3):
na,nb = a-x,b-y
if (0 <= na < H-2 and 0 <= nb < W-2):
d[na*W+nb] += 1
d = list(d.values())
ans = (H-2)*(W-2)-len(d)
print(ans)
for i in range(9):
i += 1
print(d.count(i))
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
3357a70b2ecd4f3212b41073723613285ecef369 | fd540b341a290a37c3a3a7e8fbffda93bfed2864 | /cloudops/devops/cijenkins/apps.py | 5fe331e54a5b8fb5ea9eb285f051e1e8d35867ab | [] | no_license | bopopescu/devops | 69d88b8209d744d4e722b482d1a7b1a5e95b0850 | 6a03805fc805f7604273e92f19f7fdea953451c2 | refs/heads/master | 2022-11-28T17:58:42.923890 | 2018-11-16T09:49:07 | 2018-11-16T09:49:07 | 281,549,091 | 0 | 0 | null | 2020-07-22T02:01:07 | 2020-07-22T02:01:06 | null | UTF-8 | Python | false | false | 98 | py | from django.apps import AppConfig
class CijenkinsConfig(AppConfig):
name = 'cijenkins'
| [
"[email protected]"
] | |
6c39c671da8ea030b974588fc017b2bac50a4db6 | feeeab5dc580786a35dbddcb99ddab85bc893668 | /managers/cc_help.py | 208cebfbd552ce1485297a7e2ef7c4c00e44949c | [] | no_license | idelfrides/POC_test_creditCard_type | 54dd3c5de02547802074e2acf50295463e92f17d | 10792ac8f3393a6e3d621d24a43eb794ec241a02 | refs/heads/master | 2020-08-08T01:58:54.997806 | 2019-10-08T14:43:23 | 2019-10-08T14:43:23 | 213,668,861 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,114 | py |
# from .help import get_digits
import re
from .help import get_digits
# there are codes --> code
CC_TYPE_GENERIC = 0
CC_TYPE_VISA = 1
CC_TYPE_AMEX = 2
CC_TYPE_DINERS = 3
CC_TYPE_DISCOVER = 4
CC_TYPE_MASTERCARD = 5
CC_TYPE_ELO = 6
CC_TYPE_JCB = 7
CC_TYPE_MIR = 8
CC_TYPE_UNIONPAY = 9
CC_TYPES = (
(CC_TYPE_ELO, {
'title': 'Elo',
'regex': re.compile(r'^(?:431274|451416|5067|5090|627780|636297)')
}),
(CC_TYPE_VISA, {
'title': 'Visa',
'regex': re.compile(r'^4')
}),
(CC_TYPE_AMEX, {
'title': 'American Express',
'regex': re.compile(r'^3[47]')
}),
(CC_TYPE_DINERS, {
'title': 'Diners Club',
'regex': re.compile(r'^3(?:0[0-5]|095|[689])')
}),
(CC_TYPE_DISCOVER, {
'title': 'Discover Card',
'regex': re.compile(r'^6(?:011|4[4-9]|5)')
}),
(CC_TYPE_JCB, {
'title': 'JCB',
'regex': re.compile(r'^35(?:2[89]|[3-8])')
}),
(CC_TYPE_MIR, {
'title': 'MIR',
'regex': re.compile(r'^220[0-4]')
}),
(CC_TYPE_UNIONPAY, {
'title': 'UnionPay',
'regex': re.compile(r'^62')
}),
(CC_TYPE_MASTERCARD, {
'title': 'MasterCard',
'regex': re.compile(r'^(?:5[1-5]|222[1-9]|22[3-9]|2[3-6]|27[01]|2720)')
}),
)
CC_TYPE_CHOICES = (
(CC_TYPE_GENERIC, 'Generic'),
(CC_TYPE_VISA, 'Visa'),
(CC_TYPE_AMEX, 'American Express'),
(CC_TYPE_DINERS, 'Diners Club'),
(CC_TYPE_DISCOVER, 'Discover Card'),
(CC_TYPE_MASTERCARD, 'MasterCard'),
(CC_TYPE_ELO, 'Elo'),
(CC_TYPE_JCB, 'JCB'),
(CC_TYPE_MIR, 'MIR'),
(CC_TYPE_UNIONPAY, 'UnionPay'),
)
def get_type(number):
"""
Gets credit card type given number.
:type number: str
:rtype: int
"""
number = get_digits(number)
for code, record in CC_TYPES:
if re.match(record['regex'], number):
return code
return CC_TYPE_GENERIC
| [
"[email protected]"
] | |
b603e746dc5f758e8ad5e6b8160c2676e856d555 | 6fbd56a12f8675c8ee6dd9ad23101a9c02d34387 | /setup.py | 9ee9310affb4d9f8071f556091f427c1ae42963a | [
"MIT"
] | permissive | matthiasdebernardini/topology | aa666940786dfdbc1fe1f732b73365d1eb596893 | 5cb7cb1e9a602874e7a325f95e50dfe110ca8efb | refs/heads/main | 2023-02-14T18:54:40.751005 | 2021-01-05T09:29:01 | 2021-01-05T09:29:01 | 328,508,598 | 0 | 0 | MIT | 2021-01-11T00:26:57 | 2021-01-11T00:26:56 | null | UTF-8 | Python | false | false | 846 | py | from setuptools import setup
import io
with io.open('README.org', encoding='utf-8') as f:
long_description = f.read()
with io.open('requirements.txt', encoding='utf-8') as f:
requirements = [r for r in f.read().split('\n') if len(r)]
setup(name='lntopo',
version='0.1.0',
description='Tools to work with lnresearch/topology datasets',
long_description=long_description,
long_description_content_type='text/x-org',
url='http://github.com/lnresearch/topology',
author='Christian Decker',
author_email='[email protected]',
license='MIT',
packages=[],
package_data={},
scripts=[],
zip_safe=True,
entry_points = {
'console_scripts': [
'lntopo-cli = cli.__main__:cli',
],
},
install_requires=requirements
)
| [
"[email protected]"
] | |
2158e8067cd9d63acebc081e566af22f4a3499f8 | 5e6d8b9989247801718dd1f10009f0f7f54c1eb4 | /sdk/python/pulumi_azure_native/network/v20191101/connection_monitor.py | bfaf57c8f178117cd1274495f9a585d412064692 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | vivimouret29/pulumi-azure-native | d238a8f91688c9bf09d745a7280b9bf2dd6d44e0 | 1cbd988bcb2aa75a83e220cb5abeb805d6484fce | refs/heads/master | 2023-08-26T05:50:40.560691 | 2021-10-21T09:25:07 | 2021-10-21T09:25:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,417 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ConnectionMonitorArgs', 'ConnectionMonitor']
@pulumi.input_type
class ConnectionMonitorArgs:
def __init__(__self__, *,
network_watcher_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
auto_start: Optional[pulumi.Input[bool]] = None,
connection_monitor_name: Optional[pulumi.Input[str]] = None,
destination: Optional[pulumi.Input['ConnectionMonitorDestinationArgs']] = None,
endpoints: Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionMonitorEndpointArgs']]]] = None,
location: Optional[pulumi.Input[str]] = None,
monitoring_interval_in_seconds: Optional[pulumi.Input[int]] = None,
notes: Optional[pulumi.Input[str]] = None,
outputs: Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionMonitorOutputArgs']]]] = None,
source: Optional[pulumi.Input['ConnectionMonitorSourceArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
test_configurations: Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionMonitorTestConfigurationArgs']]]] = None,
test_groups: Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionMonitorTestGroupArgs']]]] = None):
"""
The set of arguments for constructing a ConnectionMonitor resource.
:param pulumi.Input[str] network_watcher_name: The name of the Network Watcher resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group containing Network Watcher.
:param pulumi.Input[bool] auto_start: Determines if the connection monitor will start automatically once created.
:param pulumi.Input[str] connection_monitor_name: The name of the connection monitor.
:param pulumi.Input['ConnectionMonitorDestinationArgs'] destination: Describes the destination of connection monitor.
:param pulumi.Input[Sequence[pulumi.Input['ConnectionMonitorEndpointArgs']]] endpoints: List of connection monitor endpoints.
:param pulumi.Input[str] location: Connection monitor location.
:param pulumi.Input[int] monitoring_interval_in_seconds: Monitoring interval in seconds.
:param pulumi.Input[str] notes: Optional notes to be associated with the connection monitor.
:param pulumi.Input[Sequence[pulumi.Input['ConnectionMonitorOutputArgs']]] outputs: List of connection monitor outputs.
:param pulumi.Input['ConnectionMonitorSourceArgs'] source: Describes the source of connection monitor.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Connection monitor tags.
:param pulumi.Input[Sequence[pulumi.Input['ConnectionMonitorTestConfigurationArgs']]] test_configurations: List of connection monitor test configurations.
:param pulumi.Input[Sequence[pulumi.Input['ConnectionMonitorTestGroupArgs']]] test_groups: List of connection monitor test groups.
"""
pulumi.set(__self__, "network_watcher_name", network_watcher_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if auto_start is None:
auto_start = True
if auto_start is not None:
pulumi.set(__self__, "auto_start", auto_start)
if connection_monitor_name is not None:
pulumi.set(__self__, "connection_monitor_name", connection_monitor_name)
if destination is not None:
pulumi.set(__self__, "destination", destination)
if endpoints is not None:
pulumi.set(__self__, "endpoints", endpoints)
if location is not None:
pulumi.set(__self__, "location", location)
if monitoring_interval_in_seconds is None:
monitoring_interval_in_seconds = 60
if monitoring_interval_in_seconds is not None:
pulumi.set(__self__, "monitoring_interval_in_seconds", monitoring_interval_in_seconds)
if notes is not None:
pulumi.set(__self__, "notes", notes)
if outputs is not None:
pulumi.set(__self__, "outputs", outputs)
if source is not None:
pulumi.set(__self__, "source", source)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if test_configurations is not None:
pulumi.set(__self__, "test_configurations", test_configurations)
if test_groups is not None:
pulumi.set(__self__, "test_groups", test_groups)
@property
@pulumi.getter(name="networkWatcherName")
def network_watcher_name(self) -> pulumi.Input[str]:
"""
The name of the Network Watcher resource.
"""
return pulumi.get(self, "network_watcher_name")
@network_watcher_name.setter
def network_watcher_name(self, value: pulumi.Input[str]):
pulumi.set(self, "network_watcher_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group containing Network Watcher.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="autoStart")
def auto_start(self) -> Optional[pulumi.Input[bool]]:
"""
Determines if the connection monitor will start automatically once created.
"""
return pulumi.get(self, "auto_start")
@auto_start.setter
def auto_start(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "auto_start", value)
@property
@pulumi.getter(name="connectionMonitorName")
def connection_monitor_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the connection monitor.
"""
return pulumi.get(self, "connection_monitor_name")
@connection_monitor_name.setter
def connection_monitor_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "connection_monitor_name", value)
@property
@pulumi.getter
def destination(self) -> Optional[pulumi.Input['ConnectionMonitorDestinationArgs']]:
"""
Describes the destination of connection monitor.
"""
return pulumi.get(self, "destination")
@destination.setter
def destination(self, value: Optional[pulumi.Input['ConnectionMonitorDestinationArgs']]):
pulumi.set(self, "destination", value)
@property
@pulumi.getter
def endpoints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionMonitorEndpointArgs']]]]:
"""
List of connection monitor endpoints.
"""
return pulumi.get(self, "endpoints")
@endpoints.setter
def endpoints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionMonitorEndpointArgs']]]]):
pulumi.set(self, "endpoints", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Connection monitor location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="monitoringIntervalInSeconds")
def monitoring_interval_in_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Monitoring interval in seconds.
"""
return pulumi.get(self, "monitoring_interval_in_seconds")
@monitoring_interval_in_seconds.setter
def monitoring_interval_in_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "monitoring_interval_in_seconds", value)
@property
@pulumi.getter
def notes(self) -> Optional[pulumi.Input[str]]:
"""
Optional notes to be associated with the connection monitor.
"""
return pulumi.get(self, "notes")
@notes.setter
def notes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "notes", value)
@property
@pulumi.getter
def outputs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionMonitorOutputArgs']]]]:
"""
List of connection monitor outputs.
"""
return pulumi.get(self, "outputs")
@outputs.setter
def outputs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionMonitorOutputArgs']]]]):
pulumi.set(self, "outputs", value)
@property
@pulumi.getter
def source(self) -> Optional[pulumi.Input['ConnectionMonitorSourceArgs']]:
"""
Describes the source of connection monitor.
"""
return pulumi.get(self, "source")
@source.setter
def source(self, value: Optional[pulumi.Input['ConnectionMonitorSourceArgs']]):
pulumi.set(self, "source", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Connection monitor tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="testConfigurations")
def test_configurations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionMonitorTestConfigurationArgs']]]]:
"""
List of connection monitor test configurations.
"""
return pulumi.get(self, "test_configurations")
@test_configurations.setter
def test_configurations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionMonitorTestConfigurationArgs']]]]):
pulumi.set(self, "test_configurations", value)
@property
@pulumi.getter(name="testGroups")
def test_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionMonitorTestGroupArgs']]]]:
"""
List of connection monitor test groups.
"""
return pulumi.get(self, "test_groups")
@test_groups.setter
def test_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionMonitorTestGroupArgs']]]]):
pulumi.set(self, "test_groups", value)
class ConnectionMonitor(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auto_start: Optional[pulumi.Input[bool]] = None,
connection_monitor_name: Optional[pulumi.Input[str]] = None,
destination: Optional[pulumi.Input[pulumi.InputType['ConnectionMonitorDestinationArgs']]] = None,
endpoints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConnectionMonitorEndpointArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
monitoring_interval_in_seconds: Optional[pulumi.Input[int]] = None,
network_watcher_name: Optional[pulumi.Input[str]] = None,
notes: Optional[pulumi.Input[str]] = None,
outputs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConnectionMonitorOutputArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[pulumi.InputType['ConnectionMonitorSourceArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
test_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConnectionMonitorTestConfigurationArgs']]]]] = None,
test_groups: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConnectionMonitorTestGroupArgs']]]]] = None,
__props__=None):
"""
Information about the connection monitor.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] auto_start: Determines if the connection monitor will start automatically once created.
:param pulumi.Input[str] connection_monitor_name: The name of the connection monitor.
:param pulumi.Input[pulumi.InputType['ConnectionMonitorDestinationArgs']] destination: Describes the destination of connection monitor.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConnectionMonitorEndpointArgs']]]] endpoints: List of connection monitor endpoints.
:param pulumi.Input[str] location: Connection monitor location.
:param pulumi.Input[int] monitoring_interval_in_seconds: Monitoring interval in seconds.
:param pulumi.Input[str] network_watcher_name: The name of the Network Watcher resource.
:param pulumi.Input[str] notes: Optional notes to be associated with the connection monitor.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConnectionMonitorOutputArgs']]]] outputs: List of connection monitor outputs.
:param pulumi.Input[str] resource_group_name: The name of the resource group containing Network Watcher.
:param pulumi.Input[pulumi.InputType['ConnectionMonitorSourceArgs']] source: Describes the source of connection monitor.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Connection monitor tags.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConnectionMonitorTestConfigurationArgs']]]] test_configurations: List of connection monitor test configurations.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConnectionMonitorTestGroupArgs']]]] test_groups: List of connection monitor test groups.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ConnectionMonitorArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Information about the connection monitor.
:param str resource_name: The name of the resource.
:param ConnectionMonitorArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ConnectionMonitorArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auto_start: Optional[pulumi.Input[bool]] = None,
connection_monitor_name: Optional[pulumi.Input[str]] = None,
destination: Optional[pulumi.Input[pulumi.InputType['ConnectionMonitorDestinationArgs']]] = None,
endpoints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConnectionMonitorEndpointArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
monitoring_interval_in_seconds: Optional[pulumi.Input[int]] = None,
network_watcher_name: Optional[pulumi.Input[str]] = None,
notes: Optional[pulumi.Input[str]] = None,
outputs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConnectionMonitorOutputArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[pulumi.InputType['ConnectionMonitorSourceArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
test_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConnectionMonitorTestConfigurationArgs']]]]] = None,
test_groups: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ConnectionMonitorTestGroupArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ConnectionMonitorArgs.__new__(ConnectionMonitorArgs)
if auto_start is None:
auto_start = True
__props__.__dict__["auto_start"] = auto_start
__props__.__dict__["connection_monitor_name"] = connection_monitor_name
__props__.__dict__["destination"] = destination
__props__.__dict__["endpoints"] = endpoints
__props__.__dict__["location"] = location
if monitoring_interval_in_seconds is None:
monitoring_interval_in_seconds = 60
__props__.__dict__["monitoring_interval_in_seconds"] = monitoring_interval_in_seconds
if network_watcher_name is None and not opts.urn:
raise TypeError("Missing required property 'network_watcher_name'")
__props__.__dict__["network_watcher_name"] = network_watcher_name
__props__.__dict__["notes"] = notes
__props__.__dict__["outputs"] = outputs
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["source"] = source
__props__.__dict__["tags"] = tags
__props__.__dict__["test_configurations"] = test_configurations
__props__.__dict__["test_groups"] = test_groups
__props__.__dict__["connection_monitor_type"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["monitoring_status"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["start_time"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20191101:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20171001:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20171001:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20171101:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20171101:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20180101:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20180101:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20180201:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20180201:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20180401:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20180401:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20180601:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20180601:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20180701:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20180701:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20180801:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20180801:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20181001:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20181001:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20181101:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20181101:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20181201:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20181201:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20190201:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20190201:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20190401:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20190401:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20190601:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20190601:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20190701:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20190701:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20190801:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20190801:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20190901:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20190901:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20191201:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20191201:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20200301:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20200301:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20200401:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20200401:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20200501:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20200501:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20200601:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20200601:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20200701:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20200701:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20200801:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20200801:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20201101:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20201101:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20210201:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20210201:ConnectionMonitor"), pulumi.Alias(type_="azure-native:network/v20210301:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20210301:ConnectionMonitor")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ConnectionMonitor, __self__).__init__(
'azure-native:network/v20191101:ConnectionMonitor',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ConnectionMonitor':
"""
Get an existing ConnectionMonitor resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ConnectionMonitorArgs.__new__(ConnectionMonitorArgs)
__props__.__dict__["auto_start"] = None
__props__.__dict__["connection_monitor_type"] = None
__props__.__dict__["destination"] = None
__props__.__dict__["endpoints"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["location"] = None
__props__.__dict__["monitoring_interval_in_seconds"] = None
__props__.__dict__["monitoring_status"] = None
__props__.__dict__["name"] = None
__props__.__dict__["notes"] = None
__props__.__dict__["outputs"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["source"] = None
__props__.__dict__["start_time"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["test_configurations"] = None
__props__.__dict__["test_groups"] = None
__props__.__dict__["type"] = None
return ConnectionMonitor(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="autoStart")
def auto_start(self) -> pulumi.Output[Optional[bool]]:
"""
Determines if the connection monitor will start automatically once created.
"""
return pulumi.get(self, "auto_start")
@property
@pulumi.getter(name="connectionMonitorType")
def connection_monitor_type(self) -> pulumi.Output[str]:
"""
Type of connection monitor.
"""
return pulumi.get(self, "connection_monitor_type")
@property
@pulumi.getter
def destination(self) -> pulumi.Output[Optional['outputs.ConnectionMonitorDestinationResponse']]:
"""
Describes the destination of connection monitor.
"""
return pulumi.get(self, "destination")
@property
@pulumi.getter
def endpoints(self) -> pulumi.Output[Optional[Sequence['outputs.ConnectionMonitorEndpointResponse']]]:
"""
List of connection monitor endpoints.
"""
return pulumi.get(self, "endpoints")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Connection monitor location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="monitoringIntervalInSeconds")
def monitoring_interval_in_seconds(self) -> pulumi.Output[Optional[int]]:
"""
Monitoring interval in seconds.
"""
return pulumi.get(self, "monitoring_interval_in_seconds")
@property
@pulumi.getter(name="monitoringStatus")
def monitoring_status(self) -> pulumi.Output[str]:
"""
The monitoring status of the connection monitor.
"""
return pulumi.get(self, "monitoring_status")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the connection monitor.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def notes(self) -> pulumi.Output[Optional[str]]:
"""
Optional notes to be associated with the connection monitor.
"""
return pulumi.get(self, "notes")
@property
@pulumi.getter
def outputs(self) -> pulumi.Output[Optional[Sequence['outputs.ConnectionMonitorOutputResponse']]]:
"""
List of connection monitor outputs.
"""
return pulumi.get(self, "outputs")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the connection monitor.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def source(self) -> pulumi.Output[Optional['outputs.ConnectionMonitorSourceResponse']]:
"""
Describes the source of connection monitor.
"""
return pulumi.get(self, "source")
@property
@pulumi.getter(name="startTime")
def start_time(self) -> pulumi.Output[str]:
"""
The date and time when the connection monitor was started.
"""
return pulumi.get(self, "start_time")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Connection monitor tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="testConfigurations")
def test_configurations(self) -> pulumi.Output[Optional[Sequence['outputs.ConnectionMonitorTestConfigurationResponse']]]:
"""
List of connection monitor test configurations.
"""
return pulumi.get(self, "test_configurations")
@property
@pulumi.getter(name="testGroups")
def test_groups(self) -> pulumi.Output[Optional[Sequence['outputs.ConnectionMonitorTestGroupResponse']]]:
"""
List of connection monitor test groups.
"""
return pulumi.get(self, "test_groups")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Connection monitor type.
"""
return pulumi.get(self, "type")
| [
"[email protected]"
] | |
75beee39f655ccdabb0e887a3fea8cafc7e95c8a | e2897c39ec494856e0f110c57f3f0bb4740ac4de | /task_2/task_2/wsgi.py | d926bbe042842485d5f7b8e6eef5a2e769852adf | [] | no_license | ksuvarna85/app_school | fc3f75eddf18535fff8cbf2b38d1fd39bf313102 | 9804cd0d9c629e37d72cd72738c675536ce1dd24 | refs/heads/master | 2022-12-10T23:24:30.967284 | 2020-09-02T05:13:16 | 2020-09-02T05:13:16 | 292,185,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | """
WSGI config for task_2 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'task_2.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
953dfcb4dd312ccbcb7d455b544179ac4a617b59 | 2d4005c1bce1bad26fa9cba6c8ccab913e27c4ec | /Python高级/7丶http协议丶web服务器/4丶根据用户的需求返回相应的页面.py | 98eb031fe68132eb0345e8427d55a43e7c9ea1ae | [] | no_license | wfwf1990/learn | 4b801f2c082ce180a6d70d680c8cadbc5c6ec3cf | 5ed32454ddf083866fabd730d5b2ffb544a30e08 | refs/heads/master | 2020-03-21T18:16:20.284168 | 2018-07-18T11:35:29 | 2018-07-18T11:35:29 | 138,881,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,626 | py | # Author: wangfang
# Author: wangfang
import socket
import re
def handle_data(client_socket):
recv_data = client_socket.recv(1024)
#接收的数据进行解码
recv_data = recv_data.decode("utf-8")
#接收的数据进行合并
recv_data = recv_data.splitlines()
#获取请求头中的URI
url = re.match("[^/]+(/[^ ]*)",recv_data[0]).group(1)
#如果路径是/ 修改路径为/index.html
if url == "/":
url = "/index.html"
#读取文件,没有不存在,执行异常代码
try:
f1 = open("./html" +url,"rb")
except:
response_header = "http/1.1 404 not found \r\n"
response_header += "\r\n"
response_body = "file not found".encode("utf-8")
else:
response_header = "http/1.1 200 OK \r\n"
response_header += "\r\n"
response_body = f1.read()
f1.close()
#向客户端返回报头和body
client_socket.send(response_header.encode("utf-8"))
client_socket.send(response_body)
#关闭套接字
client_socket.close()
def main():
"""控制整个程序"""
#创建tcp套接字
tcp_server_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
#绑定端口
server_ip = ""
server_port = 8080
server_addr = (server_ip,server_port)
tcp_server_socket.bind(server_addr)
#监听
tcp_server_socket.listen(128)
while True:
"""接收用户请求和返回用户数据"""
client_socket,client_addr = tcp_server_socket.accept()
handle_data(client_socket)
tcp_server_socket.close()
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
67e955dc2e70709a21813dde1e1e3ecf9da1ec54 | 41c26da9c57052a3c9cd17b81d91f41ef074cf8d | /MyLeetCode/FB/Sqrt(x).py | e57f5f518cdc5ab67b63761318a1cca55c7a2c24 | [] | no_license | ihuei801/leetcode | a82f59a16574f4781ce64a5faa099b75943de94e | fe79161211cc08c269cde9e1fdcfed27de11f2cb | refs/heads/master | 2021-06-08T05:12:53.934029 | 2020-05-07T07:22:25 | 2020-05-07T07:22:25 | 93,356,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 571 | py | ###
# Binary Search
# Time Complexity: O(logn)
# Space Complexity: O(1)
###
class Solution(object):
def mySqrt(self, x):
"""
:type x: int
:rtype: int
"""
if x <= 0:
return 0
if x == 1:
return 1
l, r = 1, x
while l + 1 < r:
mid = (l + r)/2
if mid*mid == x:
return mid
elif mid*mid < x:
l = mid
else:
r = mid
if r*r <= x:
return r
else:
return l | [
"[email protected]"
] | |
e45e92ac2a515b699091a99231db873b58ea6c9e | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/cirq_new/cirq_program/startCirq_noisy786.py | f2b65f32ed029be9c4178c2a3b37eb138cc9f1e7 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,253 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=19
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=5
c.append(cirq.H.on(input_qubit[0])) # number=14
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=15
c.append(cirq.H.on(input_qubit[0])) # number=16
c.append(cirq.Z.on(input_qubit[1])) # number=13
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=8
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=9
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=10
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=11
c.append(cirq.Z.on(input_qubit[2])) # number=12
c.append(cirq.Y.on(input_qubit[0])) # number=17
c.append(cirq.Y.on(input_qubit[0])) # number=18
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2820
circuit = circuit.with_noise(cirq.depolarize(p=0.01))
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_noisy786.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | [
"[email protected]"
] | |
63a817e8557c763d366712c27c996a9e667b18c3 | ebe7c57183b0eeba9af1bdc72f0f81b9b8129ca9 | /23. HashTable/387.py | 8b7b7473758a3bccdd09c324c10a8ef2fb84a148 | [] | no_license | proTao/leetcode | f2e46392b56b69606e1dd25cf5738cb0ad275645 | 97533d53c8892b6519e99f344489fa4fd4c9ab93 | refs/heads/master | 2021-11-24T10:23:56.927122 | 2021-11-18T04:28:05 | 2021-11-18T04:28:05 | 110,225,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | from collections import Counter
from math import inf
class Solution:
def firstUniqChar(self, s: str) -> int:
count = Counter(s)
for i, c in enumerate(s):
if count[c] == 1:
return i
return -1
def firstUniqChar(self, s: str) -> int:
alpha = "qwertyuiopasdfghjklzxcvbnm"
res = inf
for c in alpha:
i = s.find(c)
if i == -1:
continue
j = s.find(c, i+1)
if j == -1:
res = min(res, i)
return res if res is not inf else -1
if __name__ == "__main__":
print(Solution().firstUniqCharBetter("loveleetcode"))
| [
"[email protected]"
] | |
a6664ec1cdda715d878aabeded1980ae5457a15c | 6f4f4d2ff85574a42a6e539d43becce5815e4530 | /lyman/tests/test_frontend.py | 639f8dc1d503c8f8798aa0fc2826f066d4bf4007 | [
"BSD-2-Clause"
] | permissive | toddt/lyman | b6aa656b6f8a6a235b9bf2f64d035a1b78dc188f | e3a5519fce41a765ae593d8d161e995c5f9aae8c | refs/heads/master | 2021-01-22T12:53:33.693352 | 2014-01-15T21:33:10 | 2014-01-15T21:33:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | from argparse import Namespace
from nose.tools import assert_equal
from .. import frontend
def test_determine_engine():
plugin_dict = dict(linear="Linear",
multiproc="MultiProc",
ipython="IPython",
torque="PBS")
for arg, plugin_str in plugin_dict.items():
args = Namespace(plugin=arg, queue=None)
if arg == "multiproc":
args.nprocs = 4
plugin, plugin_args = frontend.determine_engine(args)
yield assert_equal, plugin, plugin_str
if arg == "multiproc":
yield assert_equal, plugin_args, dict(n_procs=4, qsub_args="")
| [
"[email protected]"
] | |
8c6a5a3e278d1c8a19d73033246e3453833eb81e | 18f8a1c7122c0b320f17ea31192439779a8c63e8 | /web/apps/admin/groups.py | b0ad4bfe977abf42bf38d551d4f9ce035134e1a5 | [
"MIT"
] | permissive | RyanLainchbury/zoom | d49afa8d3506fca2c6e426707bd60ba640420a45 | 684a16f4fe3cea3d26f2d520c743a871ca84ecc5 | refs/heads/master | 2020-12-25T19:03:12.881247 | 2017-06-09T07:29:27 | 2017-06-09T07:29:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,650 | py | """
system users
"""
from zoom.components import success, error
from zoom.collect import Collection, CollectionController
from zoom.forms import Form
from zoom.helpers import link_to, url_for
from zoom.models import Group, Groups
from zoom.tools import now
import zoom.validators as v
import zoom.fields as f
from model import update_group_members
def group_fields(request):
fields = f.Fields([
f.TextField('Name', v.required, v.valid_name),
f.MemoField('Description'),
f.PulldownField('Administrators', default='administrators', options=request.site.user_groups),
])
personal_fields = f.Section('Includes',[
# f.ChosenMultiselectField('Groups', options=request.site.user_groups),
f.ChosenMultiselectField('Users', options=request.site.user_options),
])
return f.Fields(fields, personal_fields)
class GroupCollectionController(CollectionController):
def before_insert(self, record):
record['type'] = 'U'
update_group_members(record)
def before_update(self, record):
record['type'] = 'U'
update_group_members(record)
def main(route, request):
def user_group(group):
return group.type == 'U' and not group.name.startswith('a_')
db = request.site.db
users = Groups(db)
fields = group_fields(request)
columns = 'link', 'description', 'administrators'
return Collection(
fields,
model=Group,
controller=GroupCollectionController,
store=users,
item_name='group',
url='/admin/groups',
filter=user_group,
columns=columns,
)(route, request)
| [
"[email protected]"
] | |
cc247e80135181a627d1df3c82785a5871e3b13c | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /all-gists/1485023/snippet.py | 55f1081e778224a3121589a27e60a6f8ebd07476 | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 1,053 | py | # You need gevent 1.0 and pyzmq 3.x
#
# pip install --user git://github.com/SiteSupport/gevent.git
# pip install --user pyzmq
#
import gevent
import zmq.green as zmq
import os, sys
ADDR = 'tcp://127.0.0.1:5555'
def run_parent():
ctx = zmq.Context()
sock = ctx.socket(zmq.PUSH)
sock.bind(ADDR)
for i in range(10):
sock.send('message: %d' % i)
gevent.sleep(1)
def run_child(ident):
# create a new context since we are forked in a new process
ctx = zmq.Context()
sock = ctx.socket(zmq.PULL)
sock.connect(ADDR)
while True:
msg = sock.recv()
print '%s: %s' % (ident, msg)
def fork_workers(num):
pids = []
for i in range(num):
pid = gevent.fork()
if pid == 0:
run_child(os.getpid())
sys.exit(0)
else:
pids.append(pid)
return pids
pids = fork_workers(3)
print 'workers:', ', '.join('%d' % p for p in pids)
run_parent()
# not cool, workers should die themselves actually
for pid in pids:
os.kill(pid, 15)
| [
"[email protected]"
] | |
222a24bf377055ea4f4cd7687dc139e8332a4893 | 948a8fe4a46bbdda00f3af5d7a999092fd546808 | /src/QPS_simlearning.py | 01fd2a4dbf239f1fd79955040d1a9cf7af07bda0 | [] | no_license | wencanluo/QuantitativeSummarization | fcaf072566f0a4907f383042af0054ed1c47d82e | 8c34923e3447e517ee99fc00fda2bd81b34e25a0 | refs/heads/master | 2020-12-21T01:07:37.842895 | 2017-05-11T01:06:02 | 2017-05-11T01:06:02 | 56,019,382 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,737 | py | import sys
import re
import fio
import xml.etree.ElementTree as ET
from collections import defaultdict
import random
import NLTKWrapper
import SennaParser
import porter
import annotation
import os
import CourseMirror_Survey
import OracleExperiment
import json
from CourseMirror_Survey import stopwords, punctuations
import codecs
from nltk.tag import SennaPSGTagger
import pickle
import numpy as np
from sklearn import svm
from sklearn.metrics import mean_squared_error, precision_recall_fscore_support, accuracy_score
import pickle
import file_util
from AlignPhraseAnnotation import AlignPhraseAnnotation
from similarity import Similarity
import global_params
sim_exe = '.feature.sim'
def extractPhrasePaireFeature(phrasedir):
for lec in annotation.Lectures:
path = phrasedir + str(lec)+ '/'
fio.NewPath(path)
for prompt in ['q1', 'q2']:
prefix = os.path.join(path, '%s.%s.'%(prompt, method))
filename = path + prompt + sim_exe
print filename
featureset = []
feature_extractor = Similarity(prefix)
phrasefile = os.path.join(path, "%s.%s.key"%(prompt, method))
phrases = fio.LoadList(phrasefile)
for p1 in phrases:
for p2 in phrases:
featureset.append((feature_extractor.get_features(p1, p2), 0.0, {'p1':p1, 'p2':p2}))
fio.SaveDict2Json(featureset, filename)
feature_extractor.save()
def extractPhrasePaireFromAnnotation(phrasedir, annotators, id):
for doc, lec, annotator in annotation.generate_all_files(annotation.datadir + 'json/', '.json', anotators = annotators, lectures=annotation.Lectures):
print doc
#load task
task = annotation.Task()
task.loadjson(doc)
path = phrasedir + str(lec)+ '/'
fio.NewPath(path)
for prompt in ['q1', 'q2']:
prefix = os.path.join(path, '%s.%s.'%(prompt, method))
filename = path + prompt + sim_exe
print filename
featureset = []
feature_extractor = Similarity(prefix)
phrase_annotation = task.get_phrase_annotation(prompt)
#positive examples
for rank1 in sorted(phrase_annotation):
for rank2 in sorted(phrase_annotation):
if rank1 == rank2:
score = 1.0
else:
score = 0.0
phrases1 = phrase_annotation[rank1]
phrases2 = phrase_annotation[rank2]
for phrasedict1 in phrases1:
p1 = phrasedict1['phrase'].lower().strip()
for phrasedict2 in phrases2:
p2 = phrasedict2['phrase'].lower().strip()
featureset.append((feature_extractor.get_features(p1, p2), score, {'p1':p1, 'p2':p2}))
fio.SaveDict2Json(featureset, filename)
feature_extractor.save()
def combine_files_test(phrasedir, lectures, features=None, prompts=['q1', 'q2']):
X = []
Y = []
if features == None:
sim_extractor = Similarity()
features = sorted(sim_extractor.features.keys())
for i, lec in enumerate(lectures):
for q in prompts:
for phrasedir in [phrasedir]:
path = phrasedir + str(lec)+ '/'
filename = os.path.join(path, q + sim_exe)
data = fio.LoadDictJson(filename)
for fdict, score, _ in data:
row = []
for name in features:
x = fdict[name]
if str(x) == 'nan':
x = 0.0
row.append(x)
X.append(row)
Y.append(score)
return X, Y
def combine_files_course(course, lectures, features=None, prompts=['q1', 'q2']):
phrasedir1 = '../data/%s/oracle_annotator_1/phrase/'%course
phrasedir2 = '../data/%s/oracle_annotator_2/phrase/'%course
X = []
Y = []
if features == None:
sim_extractor = Similarity()
features = sorted(sim_extractor.features.keys())
for i, lec in enumerate(lectures):
for q in prompts:
for phrasedir in [phrasedir1, phrasedir2]:
path = phrasedir + str(lec)+ '/'
filename = os.path.join(path, q + sim_exe)
data = fio.LoadDictJson(filename)
for fdict, score, _ in data:
row = []
for name in features:
x = fdict[name]
if str(x) == 'nan':
x = 0.0
row.append(x)
X.append(row)
Y.append(score)
return X, Y
def combine_files(lectures, features=None, prompts=['q1', 'q2']):
phrasedir1 = '../data/%s/oracle_annotator_1/phrase/'%course
phrasedir2 = '../data/%s/oracle_annotator_2/phrase/'%course
X = []
Y = []
if features == None:
sim_extractor = Similarity()
features = sorted(sim_extractor.features.keys())
for i, lec in enumerate(lectures):
for q in prompts:
for phrasedir in [phrasedir1, phrasedir2]:
path = phrasedir + str(lec)+ '/'
filename = os.path.join(path, q + sim_exe)
data = fio.LoadDictJson(filename)
for fdict, score, _ in data:
row = []
for name in features:
x = fdict[name]
if str(x) == 'nan':
x = 0.0
row.append(x)
X.append(row)
Y.append(score)
return X, Y
def correlation_analysis(course):
phrasedir1 = '../data/%s/oracle_annotator_1/phrase/'%course
phrasedir2 = '../data/%s/oracle_annotator_2/phrase/'%course
outdir = '../data/%s/simlearning/'%course
fio.NewPath(outdir)
sim_extractor = Similarity()
features = sorted(sim_extractor.features.keys())
head = features + ['score', 'predict']
body = []
lectures = annotation.Lectures
name = '_'.join(features)
for i, lec in enumerate(lectures):
model_file = os.path.join(model_dir, '%d_%s.model'%(lec, name))
with open(model_file, 'rb') as handle:
clf = pickle.load(handle)
for q in ['q1', 'q2']:
outfile = os.path.join(outdir, str(lec), '%s%s'%(q, sim_exe))
for phrasedir in [phrasedir1, phrasedir2]:
path = phrasedir + str(lec)+ '/'
filename = os.path.join(path, q + sim_exe)
data = fio.LoadDictJson(filename)
for fdict, score, _ in data:
row = []
for fname in features:
x = fdict[fname]
if str(x) == 'nan':
x = 0.0
row.append(x)
predict_score = clf.predict([row])
row.append(score)
row.append(predict_score[0])
body.append(row)
out_correlation = os.path.join(outdir, 'data.txt')
print out_correlation
fio.WriteMatrix(out_correlation, body, head)
def correlation_analysis_noduplicate():
phrasedir1 = '../data/%s/oracle_annotator_1/phrase/'%course
phrasedir2 = '../data/%s/oracle_annotator_2/phrase/'%course
outdir = '../data/%s/simlearning/'%course
fio.NewPath(outdir)
sim_extractor = Similarity()
features = sorted(sim_extractor.features.keys())
head = features + ['score']
body = []
lectures = annotation.Lectures
for i, lec in enumerate(lectures):
for q in ['q1', 'q2']:
outfile = os.path.join(outdir, str(lec), '%s%s'%(q, sim_exe))
for phrasedir in [phrasedir1, phrasedir2]:
path = phrasedir + str(lec)+ '/'
filename = os.path.join(path, q + sim_exe)
data = fio.LoadDictJson(filename)
for fdict, score, pd in data:
if pd['p1'] == pd['p2']:
print pd['p1']
continue
row = []
for name in features:
x = fdict[name]
if str(x) == 'nan':
x = 0.0
row.append(x)
row.append(score)
body.append(row)
out_correlation = os.path.join(outdir, 'data.txt')
fio.WriteMatrix(out_correlation, body, head)
def train_leave_one_lecture_out(model_dir, name='simlearn_cv'):
# model_dir = '../data/IE256/%s/model/%s/'%(system, name)
# fio.NewPath(model_dir)
#
# outputdir = '../data/IE256/%s/extraction/%s_output/'%(system, name)
# fio.NewPath(outputdir)
sim_extractor = Similarity()
allfeatures = sorted(sim_extractor.features.keys())
if True:
k = len(allfeatures)
#for k in range(len(allfeatures)+1):
#features = allfeatures#['WordEmbedding']
if k == len(allfeatures):#use all features
features = allfeatures
else:
features = [allfeatures[k]]
name = '_'.join(features)
lectures = annotation.Lectures
dict = defaultdict(int)
MSE = []
for i, lec in enumerate(lectures):
train = [x for x in lectures if x != lec]
test = [lec]
print train
print test
model_file = os.path.join(model_dir, '%d_%s.model'%(lec, name))
if fio.IsExist(model_file):
with open(model_file, 'rb') as handle:
clf = pickle.load(handle)
else:
train_X, train_Y = combine_files(train, features)
clf = svm.SVR()
clf.fit(train_X, train_Y)
with open(model_file, 'wb') as handle:
pickle.dump(clf, handle)
for q in ['q1', 'q2']:
test_X, test_Y = combine_files(test, features, prompts=[q])
predict_Y = clf.predict(test_X)
mse = mean_squared_error(test_Y, predict_Y)
MSE.append([lec, q, mse])
output = '../data/%s/simlearning.cv.%s.txt'%(course, name)
fio.WriteMatrix(output, MSE, header=['lec', 'prompt', 'MSE'])
def train_IE256_svm(traincourse, model_dir, name='simlearn_cv'):
sim_extractor = Similarity()
allfeatures = sorted(sim_extractor.features.keys())
features = allfeatures
name = '_'.join(features)
lectures = annotation.Lectures
dict = defaultdict(int)
if traincourse == 'IE256':
train = [x for x in range(14, 26) if x != 22]
else:
train = [x for x in range(3, 27)]
model_file = os.path.join(model_dir, '%s_%s.model'%(traincourse, name))
if fio.IsExist(model_file):
with open(model_file, 'rb') as handle:
clf = pickle.load(handle)
else:
train_X, train_Y = combine_files_course(traincourse, train, features)
clf = svm.SVC()
clf.fit(train_X, train_Y)
with open(model_file, 'wb') as handle:
pickle.dump(clf, handle)
def train_leave_one_lecture_out_svm(model_dir, name='simlearn_cv'):
# model_dir = '../data/IE256/%s/model/%s/'%(system, name)
# fio.NewPath(model_dir)
#
# outputdir = '../data/IE256/%s/extraction/%s_output/'%(system, name)
# fio.NewPath(outputdir)
sim_extractor = Similarity()
allfeatures = sorted(sim_extractor.features.keys())
#for k in range(len(allfeatures)+1):
k = len(allfeatures)
if True:
#for k in range(len(allfeatures)):
#if allfeatures[k] != 'optimumComparerLSATasa': continue
if k == len(allfeatures):#use all features
features = allfeatures
else:
features = [allfeatures[k]]
#features = allfeatures[0:k] + allfeatures[k+1:]
name = '_'.join(features)
lectures = annotation.Lectures
dict = defaultdict(int)
MSE = []
for i, lec in enumerate(lectures):
train = [x for x in lectures if x != lec]
test = [lec]
print train
print test
model_file = os.path.join(model_dir, '%d_%s.model'%(lec, name))
if fio.IsExist(model_file):
with open(model_file, 'rb') as handle:
clf = pickle.load(handle)
else:
train_X, train_Y = combine_files(train, features)
clf = svm.SVC()
clf.fit(train_X, train_Y)
with open(model_file, 'wb') as handle:
pickle.dump(clf, handle)
for q in ['q1', 'q2']:
test_X, test_Y = combine_files(test, features, prompts=[q])
predict_Y = clf.predict(test_X)
prf = precision_recall_fscore_support(test_Y, predict_Y, average='weighted')
accuracy = accuracy_score(test_Y, predict_Y)
MSE.append([lec, q, accuracy] + [prf[0], prf[1], prf[2]])
output = '../data/%s/simlearning.cv.svm.%s.txt'%(course, name)
fio.WriteMatrix(output, MSE, header=['lec', 'prompt', 'accuracy', 'precision', 'recall', 'f-score'])
def predict_IE256(train_course, model_dir, phrasedir, modelname='svm'):
sim_extractor = Similarity()
allfeatures = sorted(sim_extractor.features.keys())
features = allfeatures
name = '_'.join(features)
lectures = annotation.Lectures
for i, lec in enumerate(lectures):
test = [lec]
print test
model_file = os.path.join(model_dir, '%s_%s.model'%(train_course, name))
with open(model_file, 'rb') as handle:
clf = pickle.load(handle)
path = os.path.join(phrasedir, str(lec))
for q in ['q1', 'q2']:
test_X, test_Y = combine_files_test(phrasedir, test, features, prompts=[q])
predict_Y = clf.predict(test_X)
#write the output
phrasefile = os.path.join(path, "%s.%s.key"%(q, method))
phrases = fio.LoadList(phrasefile)
assert(len(predict_Y) == len(phrases)*len(phrases))
k = 0
body = []
for p1 in phrases:
row = []
for p2 in phrases:
row.append(predict_Y[k])
k += 1
body.append(row)
output = os.path.join(path, "%s.%s.%s"%(q, method,modelname))
fio.WriteMatrix(output, body, phrases)
def predict_leave_one_lecture_out(model_dir, phrasedir, modelname='svr'):
sim_extractor = Similarity()
allfeatures = sorted(sim_extractor.features.keys())
features = allfeatures
name = '_'.join(features)
lectures = annotation.Lectures
for i, lec in enumerate(lectures):
test = [lec]
print test
model_file = os.path.join(model_dir, '%d_%s.model'%(lec, name))
with open(model_file, 'rb') as handle:
clf = pickle.load(handle)
path = os.path.join(phrasedir, str(lec))
for q in ['q1', 'q2']:
test_X, test_Y = combine_files_test(phrasedir, test, features, prompts=[q])
predict_Y = clf.predict(test_X)
#write the output
phrasefile = os.path.join(path, "%s.%s.key"%(q, method))
phrases = fio.LoadList(phrasefile)
assert(len(predict_Y) == len(phrases)*len(phrases))
k = 0
body = []
for p1 in phrases:
row = []
for p2 in phrases:
row.append(predict_Y[k])
k += 1
body.append(row)
output = os.path.join(path, "%s.%s.%s"%(q, method,modelname))
fio.WriteMatrix(output, body, phrases)
def gather_performance(output):
sim_extractor = Similarity()
allfeatures = sorted(sim_extractor.features.keys())
allbody = []
for k in range(len(allfeatures)+1):
#features = allfeatures#['WordEmbedding']
if k == len(allfeatures):#use all features
features = allfeatures
else:
features = [allfeatures[k]]
#features = allfeatures[0:k] + allfeatures[k+1:]
name = '_'.join(features)
resultfile = '../data/%s/simlearning.cv.svm.%s.txt'%(course, name)
head, body = fio.ReadMatrix(resultfile, hasHead=True)
#get the average
allhead = ['name'] + head[2:]
average = [name]
for i in range(2, len(head)):#start from the third one
values = [float(row[i]) for row in body]
average.append(np.mean(values))
allbody.append(average)
fio.WriteMatrix(output, allbody, allhead)
def check_stopword():
from CourseMirror_Survey import stopwords
vocab = fio.LoadDictJson(global_params.vocab)
for word, count in vocab.items():
if count < 5: continue
if word in stopwords:
print word, '\t', count
if __name__ == '__main__':
course = global_params.g_cid
for system, method in [
('QPS_combine', 'crf'),
]:
phrasedir = "../data/"+course+"/"+system+"/phrase/"
# extractPhrasePaireFeature(phrasedir)
model_dir = "../data/"+course+"/simlearning/svm"
fio.NewPath(model_dir)
train_leave_one_lecture_out_svm(model_dir)
predict_leave_one_lecture_out(model_dir, phrasedir, modelname='svm')
| [
"[email protected]"
] | |
2868e0431b4695d3c0c1bf5f09a50754ff439a4e | 983f77449bbea7ae1993a93d7f4431f0f07193f0 | /lab/agent_exercising/model.py | 3593f4708457d6223c507bb9e459248134d29983 | [] | no_license | johnholl/TDnets | 09d45f2bab138639e3be107d2e44df01533c10c3 | 00afc8a5ad412047c658deed2f487a98f062788b | refs/heads/master | 2020-06-19T06:41:42.159903 | 2017-03-13T13:02:11 | 2017-03-13T13:02:11 | 74,916,539 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,714 | py | import numpy as np
import tensorflow as tf
import tensorflow.contrib.rnn as rnn
def normalized_columns_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None):
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
def flatten(x):
return tf.reshape(x, [-1, np.prod(x.get_shape().as_list()[1:])])
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None):
with tf.variable_scope(name):
stride_shape = [1, stride[0], stride[1], 1]
filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = np.prod(filter_shape[:3])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = np.prod(filter_shape[:2]) * num_filters
# initialize weights with random weights
w_bound = np.sqrt(6. / (fan_in + fan_out))
w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
collections=collections)
b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.constant_initializer(0.0),
collections=collections)
return tf.nn.conv2d(x, w, stride_shape, pad) + b
def linear(x, size, name, initializer=None, bias_init=0):
w = tf.get_variable(name + "/w", [x.get_shape()[1], size], initializer=initializer)
b = tf.get_variable(name + "/b", [size], initializer=tf.constant_initializer(bias_init))
return tf.matmul(x, w) + b
def categorical_sample(logits, d):
value = tf.squeeze(tf.multinomial(logits - tf.reduce_max(logits, [1], keep_dims=True), 1), [1])
return tf.one_hot(value, d)
class LSTMPolicy(object):
def __init__(self, ob_space, ac_space):
self.x = x = tf.placeholder(tf.float32, [None] + list(ob_space))
for i in range(4):
x = tf.nn.elu(conv2d(x, 32, "l{}".format(i + 1), [3, 3], [2, 2]))
# introduce a "fake" batch dimension of 1 after flatten so that we can do LSTM over time dim
x = tf.expand_dims(flatten(x), [0])
size = 256
lstm = rnn.rnn_cell.BasicLSTMCell(size, state_is_tuple=True)
self.state_size = lstm.state_size
step_size = tf.shape(self.x)[:1]
c_init = np.zeros((1, lstm.state_size.c), np.float32)
h_init = np.zeros((1, lstm.state_size.h), np.float32)
self.state_init = [c_init, h_init]
c_in = tf.placeholder(tf.float32, [1, lstm.state_size.c])
h_in = tf.placeholder(tf.float32, [1, lstm.state_size.h])
self.state_in = [c_in, h_in]
state_in = rnn.rnn_cell.LSTMStateTuple(c_in, h_in)
lstm_outputs, lstm_state = tf.nn.dynamic_rnn(
lstm, x, initial_state=state_in, sequence_length=step_size,
time_major=False)
lstm_c, lstm_h = lstm_state
x = tf.reshape(lstm_outputs, [-1, size])
self.logits = linear(x, ac_space, "action", normalized_columns_initializer(0.01))
self.vf = tf.reshape(linear(x, 1, "value", normalized_columns_initializer(1.0)), [-1])
self.state_out = [lstm_c[:1, :], lstm_h[:1, :]]
self.sample = categorical_sample(self.logits, ac_space)[0, :]
self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)
def get_initial_features(self):
return self.state_init
def act(self, ob, c, h):
sess = tf.get_default_session()
return sess.run([self.sample, self.vf] + self.state_out,
{self.x: [ob], self.state_in[0]: c, self.state_in[1]: h})
def value(self, ob, c, h):
sess = tf.get_default_session()
return sess.run(self.vf, {self.x: [ob], self.state_in[0]: c, self.state_in[1]: h})[0]
class AuxLSTMPolicy(object):
def __init__(self, ob_space, ac_space):
self.x = x = tf.placeholder(tf.float32, [None] + list(ob_space))
self.action = tf.placeholder(tf.float32, [None, ac_space])
self.reward = tf.placeholder(tf.float32, [None, 1])
x = tf.nn.relu(conv2d(x, 16, "l1", [8, 8], [4, 4]))
x = conv_features = tf.nn.relu(conv2d(x, 32, "l2", [4, 4], [2, 2]))
x = flatten(x)
x = tf.nn.relu(linear(x, 256, "l3", normalized_columns_initializer(0.1)))
x = tf.concat(concat_dim=1, values=[x, self.action, self.reward])
# introduce a "fake" batch dimension of 1 after flatten so that we can do LSTM over time dim
x = tf.expand_dims(x, [0])
size = 256
lstm = rnn.rnn_cell.BasicLSTMCell(size, state_is_tuple=True)
self.state_size = lstm.state_size
step_size = tf.shape(self.x)[:1]
c_init = np.zeros((1, lstm.state_size.c), np.float32)
h_init = np.zeros((1, lstm.state_size.h), np.float32)
self.state_init = [c_init, h_init]
c_in = tf.placeholder(tf.float32, [1, lstm.state_size.c])
h_in = tf.placeholder(tf.float32, [1, lstm.state_size.h])
self.state_in = [c_in, h_in]
state_in = rnn.rnn_cell.LSTMStateTuple(c_in, h_in)
lstm_outputs, lstm_state = tf.nn.dynamic_rnn(
lstm, x, initial_state=state_in, sequence_length=step_size,
time_major=False)
lstm_c, lstm_h = lstm_state
x = tf.reshape(lstm_outputs, [-1, size])
self.logits = linear(x, ac_space, "action", normalized_columns_initializer(0.01))
self.vf = tf.reshape(linear(x, 1, "value", normalized_columns_initializer(1.0)), [-1])
self.state_out = [lstm_c[:1, :], lstm_h[:1, :]]
self.sample = categorical_sample(self.logits, ac_space)[0, :]
self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)
def get_initial_features(self):
return self.state_init
def act(self, ob, prev_a, prev_r, c, h):
sess = tf.get_default_session()
return sess.run([self.sample, self.vf] + self.state_out,
{self.x: [ob], self.action: [prev_a], self.reward: [[prev_r]],
self.state_in[0]: c, self.state_in[1]: h})
def value(self, ob, prev_a, prev_r, c, h):
sess = tf.get_default_session()
return sess.run(self.vf, {self.x: [ob], self.action: [prev_a], self.reward: [[prev_r]],
self.state_in[0]: c, self.state_in[1]: h})[0]
| [
"[email protected]"
] | |
2f211ee9858ffddacd1a6b995f06cd8455450b80 | 4d9ce4ab1f0ce0a857f215edc2ffc99ce3b82623 | /tfx/orchestration/experimental/core/mlmd_state_test.py | 6faacc6cc12f8ce1e987bfdbb57b7de35f8efd41 | [
"Apache-2.0"
] | permissive | vpipkt/tfx | 448fd85a177f7e3a3a6dacf262eb0c93f459f534 | 42f4f4095ff3c3e23fe2ac1076c9a0fdfc631d23 | refs/heads/master | 2023-06-20T12:27:56.083959 | 2021-05-25T18:31:23 | 2021-05-25T18:33:12 | 370,820,614 | 0 | 0 | Apache-2.0 | 2021-05-25T20:31:22 | 2021-05-25T20:31:22 | null | UTF-8 | Python | false | false | 2,934 | py | # Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.orchestration.experimental.core.mlmd_state."""
import os
import tensorflow as tf
from tfx.orchestration import metadata
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import test_utils
from ml_metadata.proto import metadata_store_pb2
def _write_test_execution(mlmd_handle):
execution_type = metadata_store_pb2.ExecutionType(name='foo', version='bar')
execution_type_id = mlmd_handle.store.put_execution_type(execution_type)
[execution_id] = mlmd_handle.store.put_executions(
[metadata_store_pb2.Execution(type_id=execution_type_id)])
[execution] = mlmd_handle.store.get_executions_by_id([execution_id])
return execution
class MlmdStateTest(test_utils.TfxTest):
def setUp(self):
super().setUp()
pipeline_root = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self.id())
metadata_path = os.path.join(pipeline_root, 'metadata', 'metadata.db')
connection_config = metadata.sqlite_metadata_connection_config(
metadata_path)
connection_config.sqlite.SetInParent()
self._mlmd_connection = metadata.Metadata(
connection_config=connection_config)
def test_mlmd_execution_update(self):
with self._mlmd_connection as m:
expected_execution = _write_test_execution(m)
# Mutate execution.
with mlmd_state.mlmd_execution_atomic_op(
m, expected_execution.id) as execution:
self.assertEqual(expected_execution, execution)
execution.last_known_state = metadata_store_pb2.Execution.CANCELED
# Test that updated execution is committed to MLMD.
[execution] = m.store.get_executions_by_id([execution.id])
self.assertEqual(metadata_store_pb2.Execution.CANCELED,
execution.last_known_state)
# Test that in-memory state is also in sync.
with mlmd_state.mlmd_execution_atomic_op(
m, expected_execution.id) as execution:
self.assertEqual(metadata_store_pb2.Execution.CANCELED,
execution.last_known_state)
def test_mlmd_execution_absent(self):
with self._mlmd_connection as m:
with mlmd_state.mlmd_execution_atomic_op(m, 1) as execution:
self.assertIsNone(execution)
if __name__ == '__main__':
tf.test.main()
| [
"[email protected]"
] | |
61568db31e9d7b2d8fa0d2c395d9da0c6d81ca53 | f4b8c90c1349c8740c1805f7b6b0e15eb5db7f41 | /starrez_client/models/transaction_dispute_item.py | d514f910513b38e744435d4c97d3d923c2655c8b | [] | no_license | CalPolyResDev/StarRezAPI | 012fb8351159f96a81352d6c7bfa36cd2d7df13c | b184e1863c37ff4fcf7a05509ad8ea8ba825b367 | refs/heads/master | 2021-01-25T10:29:37.966602 | 2018-03-15T01:01:35 | 2018-03-15T01:01:35 | 123,355,501 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,339 | py | # coding: utf-8
"""
StarRez API
This is a way to connect with the StarRez API. We are not the developers of the StarRez API, we are just an organization that uses it and wanted a better way to connect to it. # noqa: E501
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class TransactionDisputeItem(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'transaction_dispute_id': 'int',
'entry_id': 'int',
'transaction_dispute_status_enum': 'str',
'date_created': 'datetime',
'security_user_id': 'int',
'created_by_security_user_id': 'int',
'date_modified': 'str'
}
attribute_map = {
'transaction_dispute_id': 'TransactionDisputeID',
'entry_id': 'EntryID',
'transaction_dispute_status_enum': 'TransactionDisputeStatusEnum',
'date_created': 'DateCreated',
'security_user_id': 'SecurityUserID',
'created_by_security_user_id': 'CreatedBy_SecurityUserID',
'date_modified': 'DateModified'
}
def __init__(self, transaction_dispute_id=None, entry_id=None, transaction_dispute_status_enum=None, date_created=None, security_user_id=None, created_by_security_user_id=None, date_modified=None): # noqa: E501
"""TransactionDisputeItem - a model defined in Swagger""" # noqa: E501
self._transaction_dispute_id = None
self._entry_id = None
self._transaction_dispute_status_enum = None
self._date_created = None
self._security_user_id = None
self._created_by_security_user_id = None
self._date_modified = None
self.discriminator = None
if transaction_dispute_id is not None:
self.transaction_dispute_id = transaction_dispute_id
if entry_id is not None:
self.entry_id = entry_id
if transaction_dispute_status_enum is not None:
self.transaction_dispute_status_enum = transaction_dispute_status_enum
if date_created is not None:
self.date_created = date_created
if security_user_id is not None:
self.security_user_id = security_user_id
if created_by_security_user_id is not None:
self.created_by_security_user_id = created_by_security_user_id
if date_modified is not None:
self.date_modified = date_modified
@property
def transaction_dispute_id(self):
"""Gets the transaction_dispute_id of this TransactionDisputeItem. # noqa: E501
Transaction Dispute # noqa: E501
:return: The transaction_dispute_id of this TransactionDisputeItem. # noqa: E501
:rtype: int
"""
return self._transaction_dispute_id
@transaction_dispute_id.setter
def transaction_dispute_id(self, transaction_dispute_id):
"""Sets the transaction_dispute_id of this TransactionDisputeItem.
Transaction Dispute # noqa: E501
:param transaction_dispute_id: The transaction_dispute_id of this TransactionDisputeItem. # noqa: E501
:type: int
"""
self._transaction_dispute_id = transaction_dispute_id
@property
def entry_id(self):
"""Gets the entry_id of this TransactionDisputeItem. # noqa: E501
Entry # noqa: E501
:return: The entry_id of this TransactionDisputeItem. # noqa: E501
:rtype: int
"""
return self._entry_id
@entry_id.setter
def entry_id(self, entry_id):
"""Sets the entry_id of this TransactionDisputeItem.
Entry # noqa: E501
:param entry_id: The entry_id of this TransactionDisputeItem. # noqa: E501
:type: int
"""
self._entry_id = entry_id
@property
def transaction_dispute_status_enum(self):
"""Gets the transaction_dispute_status_enum of this TransactionDisputeItem. # noqa: E501
Transaction Dispute Status # noqa: E501
:return: The transaction_dispute_status_enum of this TransactionDisputeItem. # noqa: E501
:rtype: str
"""
return self._transaction_dispute_status_enum
@transaction_dispute_status_enum.setter
def transaction_dispute_status_enum(self, transaction_dispute_status_enum):
"""Sets the transaction_dispute_status_enum of this TransactionDisputeItem.
Transaction Dispute Status # noqa: E501
:param transaction_dispute_status_enum: The transaction_dispute_status_enum of this TransactionDisputeItem. # noqa: E501
:type: str
"""
self._transaction_dispute_status_enum = transaction_dispute_status_enum
@property
def date_created(self):
"""Gets the date_created of this TransactionDisputeItem. # noqa: E501
Date Created # noqa: E501
:return: The date_created of this TransactionDisputeItem. # noqa: E501
:rtype: datetime
"""
return self._date_created
@date_created.setter
def date_created(self, date_created):
"""Sets the date_created of this TransactionDisputeItem.
Date Created # noqa: E501
:param date_created: The date_created of this TransactionDisputeItem. # noqa: E501
:type: datetime
"""
self._date_created = date_created
@property
def security_user_id(self):
"""Gets the security_user_id of this TransactionDisputeItem. # noqa: E501
Security User # noqa: E501
:return: The security_user_id of this TransactionDisputeItem. # noqa: E501
:rtype: int
"""
return self._security_user_id
@security_user_id.setter
def security_user_id(self, security_user_id):
"""Sets the security_user_id of this TransactionDisputeItem.
Security User # noqa: E501
:param security_user_id: The security_user_id of this TransactionDisputeItem. # noqa: E501
:type: int
"""
self._security_user_id = security_user_id
@property
def created_by_security_user_id(self):
"""Gets the created_by_security_user_id of this TransactionDisputeItem. # noqa: E501
Created By Security User # noqa: E501
:return: The created_by_security_user_id of this TransactionDisputeItem. # noqa: E501
:rtype: int
"""
return self._created_by_security_user_id
@created_by_security_user_id.setter
def created_by_security_user_id(self, created_by_security_user_id):
"""Sets the created_by_security_user_id of this TransactionDisputeItem.
Created By Security User # noqa: E501
:param created_by_security_user_id: The created_by_security_user_id of this TransactionDisputeItem. # noqa: E501
:type: int
"""
self._created_by_security_user_id = created_by_security_user_id
@property
def date_modified(self):
"""Gets the date_modified of this TransactionDisputeItem. # noqa: E501
Date Modified # noqa: E501
:return: The date_modified of this TransactionDisputeItem. # noqa: E501
:rtype: str
"""
return self._date_modified
@date_modified.setter
def date_modified(self, date_modified):
"""Sets the date_modified of this TransactionDisputeItem.
Date Modified # noqa: E501
:param date_modified: The date_modified of this TransactionDisputeItem. # noqa: E501
:type: str
"""
self._date_modified = date_modified
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TransactionDisputeItem):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
f7d962573d6c4eeb3ac79b56b3303e17fe27a433 | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /_VSCODE-extensions/vscode-python/pythonFiles/runJediLanguageServer.py | a473bf76b3a84a8c79ff0f8fd1ea6b94dcf2f432 | [
"MIT"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 451 | py | import re
import sys
import os
# Add the lib path to our sys path so jedi_language_server can find its references
EXTENSION_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.join(EXTENSION_ROOT, "pythonFiles", "lib", "python"))
from jedi_language_server.cli import cli
# Trick language server into thinking it started from 'jedi-language-server.exe'
sys.argv[0] = "jedi-language-server.exe"
sys.exit(cli())
| [
"[email protected]"
] | |
0943ae201a1571e0b8c8803d9ed60f43beef1bc7 | 0b358a0d64eb03655c030b36c0ae87880b153951 | /mmdet/models/dense_heads/corner_head.py | 327094bad674975cefd305d5ab08d6505ed45dca | [] | permissive | jshilong/DDQ | db05ff309d63316c62faa59b28c66d65eef973d1 | de9331e4579aaafab4d69e3a9a3c6638efc5392c | refs/heads/main | 2023-06-03T15:02:09.949907 | 2023-05-24T03:32:12 | 2023-05-24T03:32:12 | 498,974,099 | 199 | 6 | Apache-2.0 | 2022-06-02T05:01:53 | 2022-06-02T03:10:25 | null | UTF-8 | Python | false | false | 48,420 | py | # Copyright (c) OpenMMLab. All rights reserved.
from logging import warning
from math import ceil, log
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, bias_init_with_prob
from mmcv.ops import CornerPool, batched_nms
from mmcv.runner import BaseModule
from mmdet.core import multi_apply
from ..builder import HEADS, build_loss
from ..utils import gaussian_radius, gen_gaussian_target
from ..utils.gaussian_target import (gather_feat, get_local_maximum,
get_topk_from_heatmap,
transpose_and_gather_feat)
from .base_dense_head import BaseDenseHead
from .dense_test_mixins import BBoxTestMixin
class BiCornerPool(BaseModule):
"""Bidirectional Corner Pooling Module (TopLeft, BottomRight, etc.)
Args:
in_channels (int): Input channels of module.
out_channels (int): Output channels of module.
feat_channels (int): Feature channels of module.
directions (list[str]): Directions of two CornerPools.
norm_cfg (dict): Dictionary to construct and config norm layer.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
in_channels,
directions,
feat_channels=128,
out_channels=128,
norm_cfg=dict(type='BN', requires_grad=True),
init_cfg=None):
super(BiCornerPool, self).__init__(init_cfg)
self.direction1_conv = ConvModule(
in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg)
self.direction2_conv = ConvModule(
in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg)
self.aftpool_conv = ConvModule(
feat_channels,
out_channels,
3,
padding=1,
norm_cfg=norm_cfg,
act_cfg=None)
self.conv1 = ConvModule(
in_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None)
self.conv2 = ConvModule(
in_channels, out_channels, 3, padding=1, norm_cfg=norm_cfg)
self.direction1_pool = CornerPool(directions[0])
self.direction2_pool = CornerPool(directions[1])
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
"""Forward features from the upstream network.
Args:
x (tensor): Input feature of BiCornerPool.
Returns:
conv2 (tensor): Output feature of BiCornerPool.
"""
direction1_conv = self.direction1_conv(x)
direction2_conv = self.direction2_conv(x)
direction1_feat = self.direction1_pool(direction1_conv)
direction2_feat = self.direction2_pool(direction2_conv)
aftpool_conv = self.aftpool_conv(direction1_feat + direction2_feat)
conv1 = self.conv1(x)
relu = self.relu(aftpool_conv + conv1)
conv2 = self.conv2(relu)
return conv2
@HEADS.register_module()
class CornerHead(BaseDenseHead, BBoxTestMixin):
"""Head of CornerNet: Detecting Objects as Paired Keypoints.
Code is modified from the `official github repo
<https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/
kp.py#L73>`_ .
More details can be found in the `paper
<https://arxiv.org/abs/1808.01244>`_ .
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
num_feat_levels (int): Levels of feature from the previous module. 2
for HourglassNet-104 and 1 for HourglassNet-52. Because
HourglassNet-104 outputs the final feature and intermediate
supervision feature and HourglassNet-52 only outputs the final
feature. Default: 2.
corner_emb_channels (int): Channel of embedding vector. Default: 1.
train_cfg (dict | None): Training config. Useless in CornerHead,
but we keep this variable for SingleStageDetector. Default: None.
test_cfg (dict | None): Testing config of CornerHead. Default: None.
loss_heatmap (dict | None): Config of corner heatmap loss. Default:
GaussianFocalLoss.
loss_embedding (dict | None): Config of corner embedding loss. Default:
AssociativeEmbeddingLoss.
loss_offset (dict | None): Config of corner offset loss. Default:
SmoothL1Loss.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
num_classes,
in_channels,
num_feat_levels=2,
corner_emb_channels=1,
train_cfg=None,
test_cfg=None,
loss_heatmap=dict(
type='GaussianFocalLoss',
alpha=2.0,
gamma=4.0,
loss_weight=1),
loss_embedding=dict(
type='AssociativeEmbeddingLoss',
pull_weight=0.25,
push_weight=0.25),
loss_offset=dict(
type='SmoothL1Loss', beta=1.0, loss_weight=1),
init_cfg=None):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
super(CornerHead, self).__init__(init_cfg)
self.num_classes = num_classes
self.in_channels = in_channels
self.corner_emb_channels = corner_emb_channels
self.with_corner_emb = self.corner_emb_channels > 0
self.corner_offset_channels = 2
self.num_feat_levels = num_feat_levels
self.loss_heatmap = build_loss(
loss_heatmap) if loss_heatmap is not None else None
self.loss_embedding = build_loss(
loss_embedding) if loss_embedding is not None else None
self.loss_offset = build_loss(
loss_offset) if loss_offset is not None else None
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self._init_layers()
def _make_layers(self, out_channels, in_channels=256, feat_channels=256):
"""Initialize conv sequential for CornerHead."""
return nn.Sequential(
ConvModule(in_channels, feat_channels, 3, padding=1),
ConvModule(
feat_channels, out_channels, 1, norm_cfg=None, act_cfg=None))
def _init_corner_kpt_layers(self):
"""Initialize corner keypoint layers.
Including corner heatmap branch and corner offset branch. Each branch
has two parts: prefix `tl_` for top-left and `br_` for bottom-right.
"""
self.tl_pool, self.br_pool = nn.ModuleList(), nn.ModuleList()
self.tl_heat, self.br_heat = nn.ModuleList(), nn.ModuleList()
self.tl_off, self.br_off = nn.ModuleList(), nn.ModuleList()
for _ in range(self.num_feat_levels):
self.tl_pool.append(
BiCornerPool(
self.in_channels, ['top', 'left'],
out_channels=self.in_channels))
self.br_pool.append(
BiCornerPool(
self.in_channels, ['bottom', 'right'],
out_channels=self.in_channels))
self.tl_heat.append(
self._make_layers(
out_channels=self.num_classes,
in_channels=self.in_channels))
self.br_heat.append(
self._make_layers(
out_channels=self.num_classes,
in_channels=self.in_channels))
self.tl_off.append(
self._make_layers(
out_channels=self.corner_offset_channels,
in_channels=self.in_channels))
self.br_off.append(
self._make_layers(
out_channels=self.corner_offset_channels,
in_channels=self.in_channels))
def _init_corner_emb_layers(self):
"""Initialize corner embedding layers.
Only include corner embedding branch with two parts: prefix `tl_` for
top-left and `br_` for bottom-right.
"""
self.tl_emb, self.br_emb = nn.ModuleList(), nn.ModuleList()
for _ in range(self.num_feat_levels):
self.tl_emb.append(
self._make_layers(
out_channels=self.corner_emb_channels,
in_channels=self.in_channels))
self.br_emb.append(
self._make_layers(
out_channels=self.corner_emb_channels,
in_channels=self.in_channels))
def _init_layers(self):
"""Initialize layers for CornerHead.
Including two parts: corner keypoint layers and corner embedding layers
"""
self._init_corner_kpt_layers()
if self.with_corner_emb:
self._init_corner_emb_layers()
def init_weights(self):
super(CornerHead, self).init_weights()
bias_init = bias_init_with_prob(0.1)
for i in range(self.num_feat_levels):
# The initialization of parameters are different between
# nn.Conv2d and ConvModule. Our experiments show that
# using the original initialization of nn.Conv2d increases
# the final mAP by about 0.2%
self.tl_heat[i][-1].conv.reset_parameters()
self.tl_heat[i][-1].conv.bias.data.fill_(bias_init)
self.br_heat[i][-1].conv.reset_parameters()
self.br_heat[i][-1].conv.bias.data.fill_(bias_init)
self.tl_off[i][-1].conv.reset_parameters()
self.br_off[i][-1].conv.reset_parameters()
if self.with_corner_emb:
self.tl_emb[i][-1].conv.reset_parameters()
self.br_emb[i][-1].conv.reset_parameters()
def forward(self, feats):
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: Usually a tuple of corner heatmaps, offset heatmaps and
embedding heatmaps.
- tl_heats (list[Tensor]): Top-left corner heatmaps for all
levels, each is a 4D-tensor, the channels number is
num_classes.
- br_heats (list[Tensor]): Bottom-right corner heatmaps for all
levels, each is a 4D-tensor, the channels number is
num_classes.
- tl_embs (list[Tensor] | list[None]): Top-left embedding
heatmaps for all levels, each is a 4D-tensor or None.
If not None, the channels number is corner_emb_channels.
- br_embs (list[Tensor] | list[None]): Bottom-right embedding
heatmaps for all levels, each is a 4D-tensor or None.
If not None, the channels number is corner_emb_channels.
- tl_offs (list[Tensor]): Top-left offset heatmaps for all
levels, each is a 4D-tensor. The channels number is
corner_offset_channels.
- br_offs (list[Tensor]): Bottom-right offset heatmaps for all
levels, each is a 4D-tensor. The channels number is
corner_offset_channels.
"""
lvl_ind = list(range(self.num_feat_levels))
return multi_apply(self.forward_single, feats, lvl_ind)
def forward_single(self, x, lvl_ind, return_pool=False):
"""Forward feature of a single level.
Args:
x (Tensor): Feature of a single level.
lvl_ind (int): Level index of current feature.
return_pool (bool): Return corner pool feature or not.
Returns:
tuple[Tensor]: A tuple of CornerHead's output for current feature
level. Containing the following Tensors:
- tl_heat (Tensor): Predicted top-left corner heatmap.
- br_heat (Tensor): Predicted bottom-right corner heatmap.
- tl_emb (Tensor | None): Predicted top-left embedding heatmap.
None for `self.with_corner_emb == False`.
- br_emb (Tensor | None): Predicted bottom-right embedding
heatmap. None for `self.with_corner_emb == False`.
- tl_off (Tensor): Predicted top-left offset heatmap.
- br_off (Tensor): Predicted bottom-right offset heatmap.
- tl_pool (Tensor): Top-left corner pool feature. Not must
have.
- br_pool (Tensor): Bottom-right corner pool feature. Not must
have.
"""
tl_pool = self.tl_pool[lvl_ind](x)
tl_heat = self.tl_heat[lvl_ind](tl_pool)
br_pool = self.br_pool[lvl_ind](x)
br_heat = self.br_heat[lvl_ind](br_pool)
tl_emb, br_emb = None, None
if self.with_corner_emb:
tl_emb = self.tl_emb[lvl_ind](tl_pool)
br_emb = self.br_emb[lvl_ind](br_pool)
tl_off = self.tl_off[lvl_ind](tl_pool)
br_off = self.br_off[lvl_ind](br_pool)
result_list = [tl_heat, br_heat, tl_emb, br_emb, tl_off, br_off]
if return_pool:
result_list.append(tl_pool)
result_list.append(br_pool)
return result_list
def get_targets(self,
gt_bboxes,
gt_labels,
feat_shape,
img_shape,
with_corner_emb=False,
with_guiding_shift=False,
with_centripetal_shift=False):
"""Generate corner targets.
Including corner heatmap, corner offset.
Optional: corner embedding, corner guiding shift, centripetal shift.
For CornerNet, we generate corner heatmap, corner offset and corner
embedding from this function.
For CentripetalNet, we generate corner heatmap, corner offset, guiding
shift and centripetal shift from this function.
Args:
gt_bboxes (list[Tensor]): Ground truth bboxes of each image, each
has shape (num_gt, 4).
gt_labels (list[Tensor]): Ground truth labels of each box, each has
shape (num_gt,).
feat_shape (list[int]): Shape of output feature,
[batch, channel, height, width].
img_shape (list[int]): Shape of input image,
[height, width, channel].
with_corner_emb (bool): Generate corner embedding target or not.
Default: False.
with_guiding_shift (bool): Generate guiding shift target or not.
Default: False.
with_centripetal_shift (bool): Generate centripetal shift target or
not. Default: False.
Returns:
dict: Ground truth of corner heatmap, corner offset, corner
embedding, guiding shift and centripetal shift. Containing the
following keys:
- topleft_heatmap (Tensor): Ground truth top-left corner
heatmap.
- bottomright_heatmap (Tensor): Ground truth bottom-right
corner heatmap.
- topleft_offset (Tensor): Ground truth top-left corner offset.
- bottomright_offset (Tensor): Ground truth bottom-right corner
offset.
- corner_embedding (list[list[list[int]]]): Ground truth corner
embedding. Not must have.
- topleft_guiding_shift (Tensor): Ground truth top-left corner
guiding shift. Not must have.
- bottomright_guiding_shift (Tensor): Ground truth bottom-right
corner guiding shift. Not must have.
- topleft_centripetal_shift (Tensor): Ground truth top-left
corner centripetal shift. Not must have.
- bottomright_centripetal_shift (Tensor): Ground truth
bottom-right corner centripetal shift. Not must have.
"""
batch_size, _, height, width = feat_shape
img_h, img_w = img_shape[:2]
width_ratio = float(width / img_w)
height_ratio = float(height / img_h)
gt_tl_heatmap = gt_bboxes[-1].new_zeros(
[batch_size, self.num_classes, height, width])
gt_br_heatmap = gt_bboxes[-1].new_zeros(
[batch_size, self.num_classes, height, width])
gt_tl_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width])
gt_br_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width])
if with_corner_emb:
match = []
# Guiding shift is a kind of offset, from center to corner
if with_guiding_shift:
gt_tl_guiding_shift = gt_bboxes[-1].new_zeros(
[batch_size, 2, height, width])
gt_br_guiding_shift = gt_bboxes[-1].new_zeros(
[batch_size, 2, height, width])
# Centripetal shift is also a kind of offset, from center to corner
# and normalized by log.
if with_centripetal_shift:
gt_tl_centripetal_shift = gt_bboxes[-1].new_zeros(
[batch_size, 2, height, width])
gt_br_centripetal_shift = gt_bboxes[-1].new_zeros(
[batch_size, 2, height, width])
for batch_id in range(batch_size):
# Ground truth of corner embedding per image is a list of coord set
corner_match = []
for box_id in range(len(gt_labels[batch_id])):
left, top, right, bottom = gt_bboxes[batch_id][box_id]
center_x = (left + right) / 2.0
center_y = (top + bottom) / 2.0
label = gt_labels[batch_id][box_id]
# Use coords in the feature level to generate ground truth
scale_left = left * width_ratio
scale_right = right * width_ratio
scale_top = top * height_ratio
scale_bottom = bottom * height_ratio
scale_center_x = center_x * width_ratio
scale_center_y = center_y * height_ratio
# Int coords on feature map/ground truth tensor
left_idx = int(min(scale_left, width - 1))
right_idx = int(min(scale_right, width - 1))
top_idx = int(min(scale_top, height - 1))
bottom_idx = int(min(scale_bottom, height - 1))
# Generate gaussian heatmap
scale_box_width = ceil(scale_right - scale_left)
scale_box_height = ceil(scale_bottom - scale_top)
radius = gaussian_radius((scale_box_height, scale_box_width),
min_overlap=0.3)
radius = max(0, int(radius))
gt_tl_heatmap[batch_id, label] = gen_gaussian_target(
gt_tl_heatmap[batch_id, label], [left_idx, top_idx],
radius)
gt_br_heatmap[batch_id, label] = gen_gaussian_target(
gt_br_heatmap[batch_id, label], [right_idx, bottom_idx],
radius)
# Generate corner offset
left_offset = scale_left - left_idx
top_offset = scale_top - top_idx
right_offset = scale_right - right_idx
bottom_offset = scale_bottom - bottom_idx
gt_tl_offset[batch_id, 0, top_idx, left_idx] = left_offset
gt_tl_offset[batch_id, 1, top_idx, left_idx] = top_offset
gt_br_offset[batch_id, 0, bottom_idx, right_idx] = right_offset
gt_br_offset[batch_id, 1, bottom_idx,
right_idx] = bottom_offset
# Generate corner embedding
if with_corner_emb:
corner_match.append([[top_idx, left_idx],
[bottom_idx, right_idx]])
# Generate guiding shift
if with_guiding_shift:
gt_tl_guiding_shift[batch_id, 0, top_idx,
left_idx] = scale_center_x - left_idx
gt_tl_guiding_shift[batch_id, 1, top_idx,
left_idx] = scale_center_y - top_idx
gt_br_guiding_shift[batch_id, 0, bottom_idx,
right_idx] = right_idx - scale_center_x
gt_br_guiding_shift[
batch_id, 1, bottom_idx,
right_idx] = bottom_idx - scale_center_y
# Generate centripetal shift
if with_centripetal_shift:
gt_tl_centripetal_shift[batch_id, 0, top_idx,
left_idx] = log(scale_center_x -
scale_left)
gt_tl_centripetal_shift[batch_id, 1, top_idx,
left_idx] = log(scale_center_y -
scale_top)
gt_br_centripetal_shift[batch_id, 0, bottom_idx,
right_idx] = log(scale_right -
scale_center_x)
gt_br_centripetal_shift[batch_id, 1, bottom_idx,
right_idx] = log(scale_bottom -
scale_center_y)
if with_corner_emb:
match.append(corner_match)
target_result = dict(
topleft_heatmap=gt_tl_heatmap,
topleft_offset=gt_tl_offset,
bottomright_heatmap=gt_br_heatmap,
bottomright_offset=gt_br_offset)
if with_corner_emb:
target_result.update(corner_embedding=match)
if with_guiding_shift:
target_result.update(
topleft_guiding_shift=gt_tl_guiding_shift,
bottomright_guiding_shift=gt_br_guiding_shift)
if with_centripetal_shift:
target_result.update(
topleft_centripetal_shift=gt_tl_centripetal_shift,
bottomright_centripetal_shift=gt_br_centripetal_shift)
return target_result
def loss(self,
tl_heats,
br_heats,
tl_embs,
br_embs,
tl_offs,
br_offs,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
"""Compute losses of the head.
Args:
tl_heats (list[Tensor]): Top-left corner heatmaps for each level
with shape (N, num_classes, H, W).
br_heats (list[Tensor]): Bottom-right corner heatmaps for each
level with shape (N, num_classes, H, W).
tl_embs (list[Tensor]): Top-left corner embeddings for each level
with shape (N, corner_emb_channels, H, W).
br_embs (list[Tensor]): Bottom-right corner embeddings for each
level with shape (N, corner_emb_channels, H, W).
tl_offs (list[Tensor]): Top-left corner offsets for each level
with shape (N, corner_offset_channels, H, W).
br_offs (list[Tensor]): Bottom-right corner offsets for each level
with shape (N, corner_offset_channels, H, W).
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [left, top, right, bottom] format.
gt_labels (list[Tensor]): Class indices corresponding to each box.
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes_ignore (list[Tensor] | None): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components. Containing the
following losses:
- det_loss (list[Tensor]): Corner keypoint losses of all
feature levels.
- pull_loss (list[Tensor]): Part one of AssociativeEmbedding
losses of all feature levels.
- push_loss (list[Tensor]): Part two of AssociativeEmbedding
losses of all feature levels.
- off_loss (list[Tensor]): Corner offset losses of all feature
levels.
"""
targets = self.get_targets(
gt_bboxes,
gt_labels,
tl_heats[-1].shape,
img_metas[0]['pad_shape'],
with_corner_emb=self.with_corner_emb)
mlvl_targets = [targets for _ in range(self.num_feat_levels)]
det_losses, pull_losses, push_losses, off_losses = multi_apply(
self.loss_single, tl_heats, br_heats, tl_embs, br_embs, tl_offs,
br_offs, mlvl_targets)
loss_dict = dict(det_loss=det_losses, off_loss=off_losses)
if self.with_corner_emb:
loss_dict.update(pull_loss=pull_losses, push_loss=push_losses)
return loss_dict
def loss_single(self, tl_hmp, br_hmp, tl_emb, br_emb, tl_off, br_off,
targets):
"""Compute losses for single level.
Args:
tl_hmp (Tensor): Top-left corner heatmap for current level with
shape (N, num_classes, H, W).
br_hmp (Tensor): Bottom-right corner heatmap for current level with
shape (N, num_classes, H, W).
tl_emb (Tensor): Top-left corner embedding for current level with
shape (N, corner_emb_channels, H, W).
br_emb (Tensor): Bottom-right corner embedding for current level
with shape (N, corner_emb_channels, H, W).
tl_off (Tensor): Top-left corner offset for current level with
shape (N, corner_offset_channels, H, W).
br_off (Tensor): Bottom-right corner offset for current level with
shape (N, corner_offset_channels, H, W).
targets (dict): Corner target generated by `get_targets`.
Returns:
tuple[torch.Tensor]: Losses of the head's different branches
containing the following losses:
- det_loss (Tensor): Corner keypoint loss.
- pull_loss (Tensor): Part one of AssociativeEmbedding loss.
- push_loss (Tensor): Part two of AssociativeEmbedding loss.
- off_loss (Tensor): Corner offset loss.
"""
gt_tl_hmp = targets['topleft_heatmap']
gt_br_hmp = targets['bottomright_heatmap']
gt_tl_off = targets['topleft_offset']
gt_br_off = targets['bottomright_offset']
gt_embedding = targets['corner_embedding']
# Detection loss
tl_det_loss = self.loss_heatmap(
tl_hmp.sigmoid(),
gt_tl_hmp,
avg_factor=max(1,
gt_tl_hmp.eq(1).sum()))
br_det_loss = self.loss_heatmap(
br_hmp.sigmoid(),
gt_br_hmp,
avg_factor=max(1,
gt_br_hmp.eq(1).sum()))
det_loss = (tl_det_loss + br_det_loss) / 2.0
# AssociativeEmbedding loss
if self.with_corner_emb and self.loss_embedding is not None:
pull_loss, push_loss = self.loss_embedding(tl_emb, br_emb,
gt_embedding)
else:
pull_loss, push_loss = None, None
# Offset loss
# We only compute the offset loss at the real corner position.
# The value of real corner would be 1 in heatmap ground truth.
# The mask is computed in class agnostic mode and its shape is
# batch * 1 * width * height.
tl_off_mask = gt_tl_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as(
gt_tl_hmp)
br_off_mask = gt_br_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as(
gt_br_hmp)
tl_off_loss = self.loss_offset(
tl_off,
gt_tl_off,
tl_off_mask,
avg_factor=max(1, tl_off_mask.sum()))
br_off_loss = self.loss_offset(
br_off,
gt_br_off,
br_off_mask,
avg_factor=max(1, br_off_mask.sum()))
off_loss = (tl_off_loss + br_off_loss) / 2.0
return det_loss, pull_loss, push_loss, off_loss
def get_bboxes(self,
tl_heats,
br_heats,
tl_embs,
br_embs,
tl_offs,
br_offs,
img_metas,
rescale=False,
with_nms=True):
"""Transform network output for a batch into bbox predictions.
Args:
tl_heats (list[Tensor]): Top-left corner heatmaps for each level
with shape (N, num_classes, H, W).
br_heats (list[Tensor]): Bottom-right corner heatmaps for each
level with shape (N, num_classes, H, W).
tl_embs (list[Tensor]): Top-left corner embeddings for each level
with shape (N, corner_emb_channels, H, W).
br_embs (list[Tensor]): Bottom-right corner embeddings for each
level with shape (N, corner_emb_channels, H, W).
tl_offs (list[Tensor]): Top-left corner offsets for each level
with shape (N, corner_offset_channels, H, W).
br_offs (list[Tensor]): Bottom-right corner offsets for each level
with shape (N, corner_offset_channels, H, W).
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
rescale (bool): If True, return boxes in original image space.
Default: False.
with_nms (bool): If True, do nms before return boxes.
Default: True.
"""
assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len(img_metas)
result_list = []
for img_id in range(len(img_metas)):
result_list.append(
self._get_bboxes_single(
tl_heats[-1][img_id:img_id + 1, :],
br_heats[-1][img_id:img_id + 1, :],
tl_offs[-1][img_id:img_id + 1, :],
br_offs[-1][img_id:img_id + 1, :],
img_metas[img_id],
tl_emb=tl_embs[-1][img_id:img_id + 1, :],
br_emb=br_embs[-1][img_id:img_id + 1, :],
rescale=rescale,
with_nms=with_nms))
return result_list
def _get_bboxes_single(self,
tl_heat,
br_heat,
tl_off,
br_off,
img_meta,
tl_emb=None,
br_emb=None,
tl_centripetal_shift=None,
br_centripetal_shift=None,
rescale=False,
with_nms=True):
"""Transform outputs for a single batch item into bbox predictions.
Args:
tl_heat (Tensor): Top-left corner heatmap for current level with
shape (N, num_classes, H, W).
br_heat (Tensor): Bottom-right corner heatmap for current level
with shape (N, num_classes, H, W).
tl_off (Tensor): Top-left corner offset for current level with
shape (N, corner_offset_channels, H, W).
br_off (Tensor): Bottom-right corner offset for current level with
shape (N, corner_offset_channels, H, W).
img_meta (dict): Meta information of current image, e.g.,
image size, scaling factor, etc.
tl_emb (Tensor): Top-left corner embedding for current level with
shape (N, corner_emb_channels, H, W).
br_emb (Tensor): Bottom-right corner embedding for current level
with shape (N, corner_emb_channels, H, W).
tl_centripetal_shift: Top-left corner's centripetal shift for
current level with shape (N, 2, H, W).
br_centripetal_shift: Bottom-right corner's centripetal shift for
current level with shape (N, 2, H, W).
rescale (bool): If True, return boxes in original image space.
Default: False.
with_nms (bool): If True, do nms before return boxes.
Default: True.
"""
if isinstance(img_meta, (list, tuple)):
img_meta = img_meta[0]
batch_bboxes, batch_scores, batch_clses = self.decode_heatmap(
tl_heat=tl_heat.sigmoid(),
br_heat=br_heat.sigmoid(),
tl_off=tl_off,
br_off=br_off,
tl_emb=tl_emb,
br_emb=br_emb,
tl_centripetal_shift=tl_centripetal_shift,
br_centripetal_shift=br_centripetal_shift,
img_meta=img_meta,
k=self.test_cfg.corner_topk,
kernel=self.test_cfg.local_maximum_kernel,
distance_threshold=self.test_cfg.distance_threshold)
if rescale:
batch_bboxes /= batch_bboxes.new_tensor(img_meta['scale_factor'])
bboxes = batch_bboxes.view([-1, 4])
scores = batch_scores.view(-1)
clses = batch_clses.view(-1)
detections = torch.cat([bboxes, scores.unsqueeze(-1)], -1)
keepinds = (detections[:, -1] > -0.1)
detections = detections[keepinds]
labels = clses[keepinds]
if with_nms:
detections, labels = self._bboxes_nms(detections, labels,
self.test_cfg)
return detections, labels
def _bboxes_nms(self, bboxes, labels, cfg):
if 'nms_cfg' in cfg:
warning.warn('nms_cfg in test_cfg will be deprecated. '
'Please rename it as nms')
if 'nms' not in cfg:
cfg.nms = cfg.nms_cfg
if labels.numel() > 0:
max_num = cfg.max_per_img
bboxes, keep = batched_nms(bboxes[:, :4], bboxes[:,
-1].contiguous(),
labels, cfg.nms)
if max_num > 0:
bboxes = bboxes[:max_num]
labels = labels[keep][:max_num]
return bboxes, labels
def decode_heatmap(self,
tl_heat,
br_heat,
tl_off,
br_off,
tl_emb=None,
br_emb=None,
tl_centripetal_shift=None,
br_centripetal_shift=None,
img_meta=None,
k=100,
kernel=3,
distance_threshold=0.5,
num_dets=1000):
"""Transform outputs for a single batch item into raw bbox predictions.
Args:
tl_heat (Tensor): Top-left corner heatmap for current level with
shape (N, num_classes, H, W).
br_heat (Tensor): Bottom-right corner heatmap for current level
with shape (N, num_classes, H, W).
tl_off (Tensor): Top-left corner offset for current level with
shape (N, corner_offset_channels, H, W).
br_off (Tensor): Bottom-right corner offset for current level with
shape (N, corner_offset_channels, H, W).
tl_emb (Tensor | None): Top-left corner embedding for current
level with shape (N, corner_emb_channels, H, W).
br_emb (Tensor | None): Bottom-right corner embedding for current
level with shape (N, corner_emb_channels, H, W).
tl_centripetal_shift (Tensor | None): Top-left centripetal shift
for current level with shape (N, 2, H, W).
br_centripetal_shift (Tensor | None): Bottom-right centripetal
shift for current level with shape (N, 2, H, W).
img_meta (dict): Meta information of current image, e.g.,
image size, scaling factor, etc.
k (int): Get top k corner keypoints from heatmap.
kernel (int): Max pooling kernel for extract local maximum pixels.
distance_threshold (float): Distance threshold. Top-left and
bottom-right corner keypoints with feature distance less than
the threshold will be regarded as keypoints from same object.
num_dets (int): Num of raw boxes before doing nms.
Returns:
tuple[torch.Tensor]: Decoded output of CornerHead, containing the
following Tensors:
- bboxes (Tensor): Coords of each box.
- scores (Tensor): Scores of each box.
- clses (Tensor): Categories of each box.
"""
with_embedding = tl_emb is not None and br_emb is not None
with_centripetal_shift = (
tl_centripetal_shift is not None
and br_centripetal_shift is not None)
assert with_embedding + with_centripetal_shift == 1
batch, _, height, width = tl_heat.size()
if torch.onnx.is_in_onnx_export():
inp_h, inp_w = img_meta['pad_shape_for_onnx'][:2]
else:
inp_h, inp_w, _ = img_meta['pad_shape']
# perform nms on heatmaps
tl_heat = get_local_maximum(tl_heat, kernel=kernel)
br_heat = get_local_maximum(br_heat, kernel=kernel)
tl_scores, tl_inds, tl_clses, tl_ys, tl_xs = get_topk_from_heatmap(
tl_heat, k=k)
br_scores, br_inds, br_clses, br_ys, br_xs = get_topk_from_heatmap(
br_heat, k=k)
# We use repeat instead of expand here because expand is a
# shallow-copy function. Thus it could cause unexpected testing result
# sometimes. Using expand will decrease about 10% mAP during testing
# compared to repeat.
tl_ys = tl_ys.view(batch, k, 1).repeat(1, 1, k)
tl_xs = tl_xs.view(batch, k, 1).repeat(1, 1, k)
br_ys = br_ys.view(batch, 1, k).repeat(1, k, 1)
br_xs = br_xs.view(batch, 1, k).repeat(1, k, 1)
tl_off = transpose_and_gather_feat(tl_off, tl_inds)
tl_off = tl_off.view(batch, k, 1, 2)
br_off = transpose_and_gather_feat(br_off, br_inds)
br_off = br_off.view(batch, 1, k, 2)
tl_xs = tl_xs + tl_off[..., 0]
tl_ys = tl_ys + tl_off[..., 1]
br_xs = br_xs + br_off[..., 0]
br_ys = br_ys + br_off[..., 1]
if with_centripetal_shift:
tl_centripetal_shift = transpose_and_gather_feat(
tl_centripetal_shift, tl_inds).view(batch, k, 1, 2).exp()
br_centripetal_shift = transpose_and_gather_feat(
br_centripetal_shift, br_inds).view(batch, 1, k, 2).exp()
tl_ctxs = tl_xs + tl_centripetal_shift[..., 0]
tl_ctys = tl_ys + tl_centripetal_shift[..., 1]
br_ctxs = br_xs - br_centripetal_shift[..., 0]
br_ctys = br_ys - br_centripetal_shift[..., 1]
# all possible boxes based on top k corners (ignoring class)
tl_xs *= (inp_w / width)
tl_ys *= (inp_h / height)
br_xs *= (inp_w / width)
br_ys *= (inp_h / height)
if with_centripetal_shift:
tl_ctxs *= (inp_w / width)
tl_ctys *= (inp_h / height)
br_ctxs *= (inp_w / width)
br_ctys *= (inp_h / height)
x_off, y_off = 0, 0 # no crop
if not torch.onnx.is_in_onnx_export():
# since `RandomCenterCropPad` is done on CPU with numpy and it's
# not dynamic traceable when exporting to ONNX, thus 'border'
# does not appears as key in 'img_meta'. As a tmp solution,
# we move this 'border' handle part to the postprocess after
# finished exporting to ONNX, which is handle in
# `mmdet/core/export/model_wrappers.py`. Though difference between
# pytorch and exported onnx model, it might be ignored since
# comparable performance is achieved between them (e.g. 40.4 vs
# 40.6 on COCO val2017, for CornerNet without test-time flip)
if 'border' in img_meta:
x_off = img_meta['border'][2]
y_off = img_meta['border'][0]
tl_xs -= x_off
tl_ys -= y_off
br_xs -= x_off
br_ys -= y_off
zeros = tl_xs.new_zeros(*tl_xs.size())
tl_xs = torch.where(tl_xs > 0.0, tl_xs, zeros)
tl_ys = torch.where(tl_ys > 0.0, tl_ys, zeros)
br_xs = torch.where(br_xs > 0.0, br_xs, zeros)
br_ys = torch.where(br_ys > 0.0, br_ys, zeros)
bboxes = torch.stack((tl_xs, tl_ys, br_xs, br_ys), dim=3)
area_bboxes = ((br_xs - tl_xs) * (br_ys - tl_ys)).abs()
if with_centripetal_shift:
tl_ctxs -= x_off
tl_ctys -= y_off
br_ctxs -= x_off
br_ctys -= y_off
tl_ctxs *= tl_ctxs.gt(0.0).type_as(tl_ctxs)
tl_ctys *= tl_ctys.gt(0.0).type_as(tl_ctys)
br_ctxs *= br_ctxs.gt(0.0).type_as(br_ctxs)
br_ctys *= br_ctys.gt(0.0).type_as(br_ctys)
ct_bboxes = torch.stack((tl_ctxs, tl_ctys, br_ctxs, br_ctys),
dim=3)
area_ct_bboxes = ((br_ctxs - tl_ctxs) * (br_ctys - tl_ctys)).abs()
rcentral = torch.zeros_like(ct_bboxes)
# magic nums from paper section 4.1
mu = torch.ones_like(area_bboxes) / 2.4
mu[area_bboxes > 3500] = 1 / 2.1 # large bbox have smaller mu
bboxes_center_x = (bboxes[..., 0] + bboxes[..., 2]) / 2
bboxes_center_y = (bboxes[..., 1] + bboxes[..., 3]) / 2
rcentral[..., 0] = bboxes_center_x - mu * (bboxes[..., 2] -
bboxes[..., 0]) / 2
rcentral[..., 1] = bboxes_center_y - mu * (bboxes[..., 3] -
bboxes[..., 1]) / 2
rcentral[..., 2] = bboxes_center_x + mu * (bboxes[..., 2] -
bboxes[..., 0]) / 2
rcentral[..., 3] = bboxes_center_y + mu * (bboxes[..., 3] -
bboxes[..., 1]) / 2
area_rcentral = ((rcentral[..., 2] - rcentral[..., 0]) *
(rcentral[..., 3] - rcentral[..., 1])).abs()
dists = area_ct_bboxes / area_rcentral
tl_ctx_inds = (ct_bboxes[..., 0] <= rcentral[..., 0]) | (
ct_bboxes[..., 0] >= rcentral[..., 2])
tl_cty_inds = (ct_bboxes[..., 1] <= rcentral[..., 1]) | (
ct_bboxes[..., 1] >= rcentral[..., 3])
br_ctx_inds = (ct_bboxes[..., 2] <= rcentral[..., 0]) | (
ct_bboxes[..., 2] >= rcentral[..., 2])
br_cty_inds = (ct_bboxes[..., 3] <= rcentral[..., 1]) | (
ct_bboxes[..., 3] >= rcentral[..., 3])
if with_embedding:
tl_emb = transpose_and_gather_feat(tl_emb, tl_inds)
tl_emb = tl_emb.view(batch, k, 1)
br_emb = transpose_and_gather_feat(br_emb, br_inds)
br_emb = br_emb.view(batch, 1, k)
dists = torch.abs(tl_emb - br_emb)
tl_scores = tl_scores.view(batch, k, 1).repeat(1, 1, k)
br_scores = br_scores.view(batch, 1, k).repeat(1, k, 1)
scores = (tl_scores + br_scores) / 2 # scores for all possible boxes
# tl and br should have same class
tl_clses = tl_clses.view(batch, k, 1).repeat(1, 1, k)
br_clses = br_clses.view(batch, 1, k).repeat(1, k, 1)
cls_inds = (tl_clses != br_clses)
# reject boxes based on distances
dist_inds = dists > distance_threshold
# reject boxes based on widths and heights
width_inds = (br_xs <= tl_xs)
height_inds = (br_ys <= tl_ys)
# No use `scores[cls_inds]`, instead we use `torch.where` here.
# Since only 1-D indices with type 'tensor(bool)' are supported
# when exporting to ONNX, any other bool indices with more dimensions
# (e.g. 2-D bool tensor) as input parameter in node is invalid
negative_scores = -1 * torch.ones_like(scores)
scores = torch.where(cls_inds, negative_scores, scores)
scores = torch.where(width_inds, negative_scores, scores)
scores = torch.where(height_inds, negative_scores, scores)
scores = torch.where(dist_inds, negative_scores, scores)
if with_centripetal_shift:
scores[tl_ctx_inds] = -1
scores[tl_cty_inds] = -1
scores[br_ctx_inds] = -1
scores[br_cty_inds] = -1
scores = scores.view(batch, -1)
scores, inds = torch.topk(scores, num_dets)
scores = scores.unsqueeze(2)
bboxes = bboxes.view(batch, -1, 4)
bboxes = gather_feat(bboxes, inds)
clses = tl_clses.contiguous().view(batch, -1, 1)
clses = gather_feat(clses, inds).float()
return bboxes, scores, clses
def onnx_export(self,
tl_heats,
br_heats,
tl_embs,
br_embs,
tl_offs,
br_offs,
img_metas,
rescale=False,
with_nms=True):
"""Transform network output for a batch into bbox predictions.
Args:
tl_heats (list[Tensor]): Top-left corner heatmaps for each level
with shape (N, num_classes, H, W).
br_heats (list[Tensor]): Bottom-right corner heatmaps for each
level with shape (N, num_classes, H, W).
tl_embs (list[Tensor]): Top-left corner embeddings for each level
with shape (N, corner_emb_channels, H, W).
br_embs (list[Tensor]): Bottom-right corner embeddings for each
level with shape (N, corner_emb_channels, H, W).
tl_offs (list[Tensor]): Top-left corner offsets for each level
with shape (N, corner_offset_channels, H, W).
br_offs (list[Tensor]): Bottom-right corner offsets for each level
with shape (N, corner_offset_channels, H, W).
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
rescale (bool): If True, return boxes in original image space.
Default: False.
with_nms (bool): If True, do nms before return boxes.
Default: True.
Returns:
tuple[Tensor, Tensor]: First tensor bboxes with shape
[N, num_det, 5], 5 arrange as (x1, y1, x2, y2, score)
and second element is class labels of shape [N, num_det].
"""
assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len(
img_metas) == 1
result_list = []
for img_id in range(len(img_metas)):
result_list.append(
self._get_bboxes_single(
tl_heats[-1][img_id:img_id + 1, :],
br_heats[-1][img_id:img_id + 1, :],
tl_offs[-1][img_id:img_id + 1, :],
br_offs[-1][img_id:img_id + 1, :],
img_metas[img_id],
tl_emb=tl_embs[-1][img_id:img_id + 1, :],
br_emb=br_embs[-1][img_id:img_id + 1, :],
rescale=rescale,
with_nms=with_nms))
detections, labels = result_list[0]
# batch_size 1 here, [1, num_det, 5], [1, num_det]
return detections.unsqueeze(0), labels.unsqueeze(0)
| [
"[email protected]"
] | |
b986eb7743e180367024e6fec8f37a1dcba074a2 | 192874fd96861ceb1864a71bf6f13932cc017d63 | /hue/desktop/core/ext-py/kombu-2.5.10/kombu/utils/__init__.py | 532fb883b49d9ef2073274f95e24dbbd98bbbd8a | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | OpenPOWER-BigData/HDP-hue | 1de3efc0ac773f1e7b1acd03675f11b65c6f477d | 23719febdaae26c916bdc9d0712645987ae7e0e4 | refs/heads/master | 2021-01-17T17:19:31.157051 | 2016-07-18T19:44:10 | 2016-07-18T19:44:10 | 63,631,863 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,546 | py | """
kombu.utils
===========
Internal utilities.
"""
from __future__ import absolute_import
import importlib
import random
import sys
from contextlib import contextmanager
from itertools import count, repeat
from time import sleep
from uuid import UUID, uuid4 as _uuid4, _uuid_generate_random
from .encoding import safe_repr as _safe_repr
try:
import ctypes
except:
ctypes = None # noqa
__all__ = ['EqualityDict', 'say', 'uuid', 'kwdict', 'maybe_list',
'fxrange', 'fxrangemax', 'retry_over_time',
'emergency_dump_state', 'cached_property',
'reprkwargs', 'reprcall', 'nested']
def symbol_by_name(name, aliases={}, imp=None, package=None,
sep='.', default=None, **kwargs):
"""Get symbol by qualified name.
The name should be the full dot-separated path to the class::
modulename.ClassName
Example::
celery.concurrency.processes.TaskPool
^- class name
or using ':' to separate module and symbol::
celery.concurrency.processes:TaskPool
If `aliases` is provided, a dict containing short name/long name
mappings, the name is looked up in the aliases first.
Examples:
>>> symbol_by_name('celery.concurrency.processes.TaskPool')
<class 'celery.concurrency.processes.TaskPool'>
>>> symbol_by_name('default', {
... 'default': 'celery.concurrency.processes.TaskPool'})
<class 'celery.concurrency.processes.TaskPool'>
# Does not try to look up non-string names.
>>> from celery.concurrency.processes import TaskPool
>>> symbol_by_name(TaskPool) is TaskPool
True
"""
if imp is None:
imp = importlib.import_module
if not isinstance(name, basestring):
return name # already a class
name = aliases.get(name) or name
sep = ':' if ':' in name else sep
module_name, _, cls_name = name.rpartition(sep)
if not module_name:
cls_name, module_name = None, package if package else cls_name
try:
try:
module = imp(module_name, package=package, **kwargs)
except ValueError, exc:
raise ValueError, ValueError(
"Couldn't import %r: %s" % (name, exc)), sys.exc_info()[2]
return getattr(module, cls_name) if cls_name else module
except (ImportError, AttributeError):
if default is None:
raise
return default
def eqhash(o):
try:
return o.__eqhash__()
except AttributeError:
return hash(o)
class EqualityDict(dict):
def __getitem__(self, key):
h = eqhash(key)
if h not in self:
return self.__missing__(key)
return dict.__getitem__(self, h)
def __setitem__(self, key, value):
return dict.__setitem__(self, eqhash(key), value)
def __delitem__(self, key):
return dict.__delitem__(self, eqhash(key))
def say(m, *s):
sys.stderr.write(str(m) % s + '\n')
def uuid4():
# Workaround for http://bugs.python.org/issue4607
if ctypes and _uuid_generate_random: # pragma: no cover
buffer = ctypes.create_string_buffer(16)
_uuid_generate_random(buffer)
return UUID(bytes=buffer.raw)
return _uuid4()
def uuid():
"""Generate a unique id, having - hopefully - a very small chance of
collision.
For now this is provided by :func:`uuid.uuid4`.
"""
return str(uuid4())
gen_unique_id = uuid
if sys.version_info >= (2, 6, 5):
def kwdict(kwargs):
return kwargs
else:
def kwdict(kwargs): # pragma: no cover # noqa
"""Make sure keyword arguments are not in Unicode.
This should be fixed in newer Python versions,
see: http://bugs.python.org/issue4978.
"""
return dict((key.encode('utf-8'), value)
for key, value in kwargs.items())
def maybe_list(v):
if v is None:
return []
if hasattr(v, '__iter__'):
return v
return [v]
def fxrange(start=1.0, stop=None, step=1.0, repeatlast=False):
cur = start * 1.0
while 1:
if not stop or cur <= stop:
yield cur
cur += step
else:
if not repeatlast:
break
yield cur - step
def fxrangemax(start=1.0, stop=None, step=1.0, max=100.0):
sum_, cur = 0, start * 1.0
while 1:
if sum_ >= max:
break
yield cur
if stop:
cur = min(cur + step, stop)
else:
cur += step
sum_ += cur
def retry_over_time(fun, catch, args=[], kwargs={}, errback=None,
max_retries=None, interval_start=2, interval_step=2,
interval_max=30, callback=None):
"""Retry the function over and over until max retries is exceeded.
For each retry we sleep a for a while before we try again, this interval
is increased for every retry until the max seconds is reached.
:param fun: The function to try
:param catch: Exceptions to catch, can be either tuple or a single
exception class.
:keyword args: Positional arguments passed on to the function.
:keyword kwargs: Keyword arguments passed on to the function.
:keyword errback: Callback for when an exception in ``catch`` is raised.
The callback must take two arguments: ``exc`` and ``interval``, where
``exc`` is the exception instance, and ``interval`` is the time in
seconds to sleep next..
:keyword max_retries: Maximum number of retries before we give up.
If this is not set, we will retry forever.
:keyword interval_start: How long (in seconds) we start sleeping between
retries.
:keyword interval_step: By how much the interval is increased for each
retry.
:keyword interval_max: Maximum number of seconds to sleep between retries.
"""
retries = 0
interval_range = fxrange(interval_start,
interval_max + interval_start,
interval_step, repeatlast=True)
for retries in count():
try:
return fun(*args, **kwargs)
except catch, exc:
if max_retries is not None and retries > max_retries:
raise
if callback:
callback()
tts = (errback(exc, interval_range, retries) if errback
else next(interval_range))
if tts:
for i in range(int(tts / interval_step)):
if callback:
callback()
sleep(interval_step)
def emergency_dump_state(state, open_file=open, dump=None):
from pprint import pformat
from tempfile import mktemp
if dump is None:
import pickle
dump = pickle.dump
persist = mktemp()
say('EMERGENCY DUMP STATE TO FILE -> %s <-' % persist)
fh = open_file(persist, 'w')
try:
try:
dump(state, fh, protocol=0)
except Exception, exc:
say('Cannot pickle state: %r. Fallback to pformat.' % (exc, ))
fh.write(pformat(state))
finally:
fh.flush()
fh.close()
return persist
class cached_property(object):
"""Property descriptor that caches the return value
of the get function.
*Examples*
.. code-block:: python
@cached_property
def connection(self):
return Connection()
@connection.setter # Prepares stored value
def connection(self, value):
if value is None:
raise TypeError('Connection must be a connection')
return value
@connection.deleter
def connection(self, value):
# Additional action to do at del(self.attr)
if value is not None:
print('Connection %r deleted' % (value, ))
"""
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.__get = fget
self.__set = fset
self.__del = fdel
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
self.__module__ = fget.__module__
def __get__(self, obj, type=None):
if obj is None:
return self
try:
return obj.__dict__[self.__name__]
except KeyError:
value = obj.__dict__[self.__name__] = self.__get(obj)
return value
def __set__(self, obj, value):
if obj is None:
return self
if self.__set is not None:
value = self.__set(obj, value)
obj.__dict__[self.__name__] = value
def __delete__(self, obj):
if obj is None:
return self
try:
value = obj.__dict__.pop(self.__name__)
except KeyError:
pass
else:
if self.__del is not None:
self.__del(obj, value)
def setter(self, fset):
return self.__class__(self.__get, fset, self.__del)
def deleter(self, fdel):
return self.__class__(self.__get, self.__set, fdel)
def reprkwargs(kwargs, sep=', ', fmt='%s=%s'):
return sep.join(fmt % (k, _safe_repr(v)) for k, v in kwargs.iteritems())
def reprcall(name, args=(), kwargs={}, sep=', '):
return '%s(%s%s%s)' % (name, sep.join(map(_safe_repr, args or ())),
(args and kwargs) and sep or '',
reprkwargs(kwargs, sep))
@contextmanager
def nested(*managers): # pragma: no cover
# flake8: noqa
"""Combine multiple context managers into a single nested
context manager."""
exits = []
vars = []
exc = (None, None, None)
try:
try:
for mgr in managers:
exit = mgr.__exit__
enter = mgr.__enter__
vars.append(enter())
exits.append(exit)
yield vars
except:
exc = sys.exc_info()
finally:
while exits:
exit = exits.pop()
try:
if exit(*exc):
exc = (None, None, None)
except:
exc = sys.exc_info()
if exc != (None, None, None):
# Don't rely on sys.exc_info() still containing
# the right information. Another exception may
# have been raised and caught by an exit method
raise exc[0], exc[1], exc[2]
finally:
del(exc)
def shufflecycle(it):
it = list(it) # don't modify callers list
shuffle = random.shuffle
for _ in repeat(None):
shuffle(it)
yield it[0]
def entrypoints(namespace):
try:
from pkg_resources import iter_entry_points
except ImportError:
return iter([])
return ((ep, ep.load()) for ep in iter_entry_points(namespace))
class ChannelPromise(object):
def __init__(self, contract):
self.__contract__ = contract
def __call__(self):
try:
return self.__value__
except AttributeError:
value = self.__value__ = self.__contract__()
return value
def __repr__(self):
return '<promise: %r>' % (self(), )
def escape_regex(p, white=''):
# what's up with re.escape? that code must be neglected or someting
return ''.join(c if c.isalnum() or c in white
else ('\\000' if c == '\000' else '\\' + c)
for c in p)
| [
"[email protected]"
] | |
b59c150b00f4f258483032fd787f347eff062302 | 229e1e103bc24dda4d8fef54b762009e19045a45 | /configs/nowd/gc/res101_d_gc.py | ad3d4ce5f80de72c915fef67bf6c818a89d6128a | [
"MIT"
] | permissive | yinmh17/CCNet | c0be71919877c0d44c51cd8fd8ad8f644ef618a6 | d5e90fe5ccfa16389fd25bdd3e2160ffe2dfbd22 | refs/heads/master | 2020-06-18T13:03:46.781284 | 2019-11-12T06:26:59 | 2019-11-12T06:26:59 | 196,311,075 | 1 | 1 | MIT | 2019-07-21T19:48:39 | 2019-07-11T03:10:01 | Python | UTF-8 | Python | false | false | 1,097 | py | model = dict(
type='basenet',
pretrained='',
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
block_num=[3, 4, 23, 3],
),
att=dict(
with_att=False,
type='glore',
att_stage=[False,False,True,False],
att_pos='after_add',
att_location=[[],[],[5,11,17],[]],
),
module=dict(
type='nl_nowd',
downsample=True,
whiten_type=[],
weight_init_scale=1.0,
with_gc=True,
with_nl=False,
nowd=[],
use_out=False,
out_bn=False,
)
)
train_cfg = dict(
batch_size=8,
learning_rate=1e-2,
momentum=0.9,
num_steps=60000,
power=0.9,
random_seed=1234,
restore_from='./dataset/resnet101-imagenet.pth',
save_num_images=2,
start_iters=0,
save_from=59500,
save_pred_every=100,
snapshot_dir='snapshots/',
weight_decay=0.0005
)
data_cfg = dict(
data_dir='cityscapes',
data_list='./dataset/list/cityscapes/train.lst',
ignore_label=255,
input_size='769,769',
num_classes=19,
)
| [
"[email protected]"
] | |
a76ac90843514fd223703c25311d3db82fdcb1d9 | fb86f0dca6e525b8a8ddb63f10b8d220ddd7f7fe | /test/functional/sapling_changeaddresses.py | f8aa5d49517df753cb930c26fe101290083c7303 | [
"MIT"
] | permissive | ORO-mlm/UNO-Core | 14fcdb3c2db4bde256e48ea661ada61579ccf403 | d6e6769ce57466cfc9e7cab681eab880cdb8e3e8 | refs/heads/main | 2023-06-16T08:21:00.808606 | 2021-07-12T07:08:35 | 2021-07-12T07:08:35 | 383,350,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,141 | py | #!/usr/bin/env python3
# Copyright (c) 2019 The Zcash developers
# Copyright (c) 2020 The PIVX developers
# Copyright (c) 2021- The UNO developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import UnoTestFramework
from test_framework.util import *
from decimal import Decimal
# Test wallet change address behaviour
class WalletChangeAddressesTest(UnoTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
saplingUpgrade = ['-nuparams=v5_shield:1']
self.extra_args = [saplingUpgrade, saplingUpgrade]
def run_test(self):
self.nodes[0].generate(110)
# Obtain some transparent funds
midAddr = self.nodes[0].getnewshieldaddress()
# Shield almost all the balance
txid = self.nodes[0].shieldsendmany(get_coinstake_address(self.nodes[0]), [{"address": midAddr, "amount": Decimal(2400)}])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
taddrSource = self.nodes[0].getnewaddress()
for _ in range(6):
recipients = [{"address": taddrSource, "amount": Decimal('3')}]
txid = self.nodes[0].shieldsendmany(midAddr, recipients, 1)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
def check_change_taddr_reuse(target, isTargetShielded):
recipients = [{"address": target, "amount": Decimal('1')}]
# Send funds to recipient address twice
txid1 = self.nodes[0].shieldsendmany(taddrSource, recipients, 1)
self.nodes[1].generate(1)
self.sync_all()
txid2 = self.nodes[0].shieldsendmany(taddrSource, recipients, 1)
self.nodes[1].generate(1)
self.sync_all()
# Verify that the two transactions used different change addresses
tx1 = self.nodes[0].getrawtransaction(txid1, 1)
tx2 = self.nodes[0].getrawtransaction(txid2, 1)
assert_true(len(tx1['vout']) >= 1) # at least one output
assert_true(len(tx2['vout']) >= 1)
for i in range(len(tx1['vout'])):
tx1OutAddrs = tx1['vout'][i]['scriptPubKey']['addresses']
tx2OutAddrs = tx2['vout'][i]['scriptPubKey']['addresses']
if tx1OutAddrs != [target]:
print('Source address: %s' % taddrSource)
print('TX1 change address: %s' % tx1OutAddrs[0])
print('TX2 change address: %s' % tx2OutAddrs[0])
assert(tx1OutAddrs != tx2OutAddrs)
taddr = self.nodes[0].getnewaddress()
saplingAddr = self.nodes[0].getnewshieldaddress()
print()
print('Checking shieldsendmany(taddr->Sapling)')
check_change_taddr_reuse(saplingAddr, True)
print()
print('Checking shieldsendmany(taddr->taddr)')
check_change_taddr_reuse(taddr, False)
if __name__ == '__main__':
WalletChangeAddressesTest().main()
| [
"[email protected]"
] | |
e74a4232dc7fc3b1de106635f6beb9dc191f4f63 | 373e44ad5fba391d86543f28b91a2cdf9a22f874 | /model/TestPar45_60_varydatasize/60/PowerPredEDFA_average.py | d70aad24fd3ba2b6baa9d93a86d69d68d3e1e57a | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | szhu3210/oopt-gnpy | c311fe00b869d3bcfabae4e22366cbc2215eb91d | 83768480eb9aedad560ab9a722493f04cfe80c9c | refs/heads/master | 2020-04-02T00:24:48.608431 | 2019-04-01T18:32:11 | 2019-04-01T18:32:11 | 153,803,494 | 1 | 0 | BSD-3-Clause | 2018-10-19T15:22:16 | 2018-10-19T15:22:15 | null | UTF-8 | Python | false | false | 1,521 | py | # coding: utf-8
# In[171]:
# Ido Michael
import tensorflow as tf
import os, struct
import numpy as np
import matplotlib.pyplot as plt
import ParsePowerEDFA
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
import math
import sys
import configparser
import random
print(tf.__version__)
# In case we need to average results of 5 different debug files and plot them on a graph.
# ParsePowerEDFA.getTestFiles()
# Average files by name and then write collected results into a csv file.
[testdb, testmse, testmae, tr2, tr4, tr6, tr8, tr1, mse_tr, mae_tr] = ParsePowerEDFA.averageResults("TestPar45_60_60")
[val2, val4, val6, val8, val1, mse_val, mae_val] = ParsePowerEDFA.averageResults_val("TestPar45_60_60")
ParsePowerEDFA.plot_to_matrix(tr2, tr4, tr6, tr8, tr1, mse_tr, mae_tr)
ParsePowerEDFA.plot_to_matrix_Val(val2, val4, val6, val8, val1, mse_val, mae_val)
ParsePowerEDFA.plot_to_matrix_test(testdb, testmse, testmae)
# 20%
# [testdb, val2, val4, val6, val8, val1] = ParsePowerEDFA.averageResults([
# "./TestPar29.ini140-debug.log",
# "./TestPar29.ini84-debug.log",
# "./TestPar29.ini150-debug.log"
# ])
# [testdb, val2, val4, val6, val8, val1] = ParsePowerEDFA.averageResults(["./test/TestPar25.ini-smaller53-debug.log", "./test/TestPar25.ini-smaller103-debug.log", "./test/TestPar25.ini-smaller25-debug.log", "./test/TestPar25.ini-smaller37-debug.log", "./test/TestPar25.ini-smaller30-debug.log"])
# ParsePowerEDFA.plotGraph(val2, val4, val6, val8, val1)
| [
"[email protected]"
] | |
a030538c5ca7316deb104f9555029252bad5e681 | 3c40dce2af71dd6216f4b64e5f42d4d6d5bc6b25 | /auto_client/libs/plugins/__init__.py | 02afa5f43fb806ca16c94112950069f750df338e | [] | no_license | huzhou520/cmdb | 1c93ad47d2a5e564c1e8f34ec9015590208fafeb | b6b4aba4184ed316a0a0b5f2b1a876473ec4cdbc | refs/heads/master | 2020-09-11T22:36:31.588900 | 2019-12-27T01:55:40 | 2019-12-27T01:55:40 | 222,212,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,078 | py | from settings import PLUGIN_DICT
def get_server_info(hostname, ssh_func):
"""
:param hostname: 要操作的远程主机
:param ssh_func: 要执行的方法
:return:
"""
info_dict = {}
for key, path in PLUGIN_DICT.items():
# 1.切割settings文件中的字典
"""
例:libs.plugins.board.Board,切割settings文件中的values切成如下:
key:libs.plugins.board(模块路径) value: Board(对应模块下面的方法)
"""
module_name, class_name = path.rsplit('.', maxsplit=1)
# 2.以字符串的方式加载模块
import importlib
module = importlib.import_module(module_name)
# print(module_name,class_name)
# 3.通过反射找模块下面的方法
cls = getattr(module, class_name)
# print(module_name, class_name)
# 4.实例化对象
obj = cls()
# 5.执行对象的process方法
ret = obj.process(hostname, ssh_func)
info_dict[key] = ret
# print(info_dict)
return info_dict
| [
"[email protected]"
] | |
315dde5190931ae95728751f22a8752b3de8b9e1 | a439ca43178d38cfe6daaee50ea134ca6c52b502 | /thaniya_client/src/thaniya_client/tools/ThaniyaMySQL_native.py | a110c96dca5ffc807f8d9a6352e11b529c08a02e | [
"Apache-2.0"
] | permissive | jkpubsrc/Thaniya | 37ca727abdc6f9f605257813889fe3a033995bba | 4ebdf2854e3d7888af7396adffa22628b4ab2267 | refs/heads/master | 2023-03-05T20:58:59.528746 | 2021-02-15T19:31:06 | 2021-02-15T19:31:06 | 331,318,787 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,503 | py |
import os
import typing
import tarfile
import jk_simpleexec
import jk_utils
from ..ThaniyaBackupContext import ThaniyaBackupContext
from .EnumTarPathMode import EnumTarPathMode
from .ThaniyaService import ThaniyaService
class ThaniyaMySQL_native:
@staticmethod
def mySQLDump(ctx:ThaniyaBackupContext, dbName:str, dbUserName:str, dbPassword:str, outputDumpFilePath:str) -> int:
assert isinstance(ctx, ThaniyaBackupContext)
assert isinstance(dbName, str)
assert dbName
assert isinstance(outputDumpFilePath, str)
assert outputDumpFilePath
ctx = ctx.descend("Creating dump file " + repr(outputDumpFilePath) + " ...")
with ctx.log as nestedLog:
outputDumpFilePath = ctx.absPath(outputDumpFilePath)
authFile = ctx.privateTempDir.writeTextFile("[mysqldump]\nuser=" + dbUserName + "\npassword=" + dbPassword + "\n")
result = jk_simpleexec.invokeCmd("/usr/bin/mysqldump", [
"--defaults-extra-file=" + authFile,
"--r",
outputDumpFilePath,
"--routines", # Include stored routines (procedures and functions) for the dumped databases in the output.
"--triggers", # Include triggers for each dumped table in the output.
dbName,
], workingDirectory=os.path.dirname(authFile))
if result.returnCode == 0:
nestedLog.notice("Succeeded.")
return os.path.getsize(outputDumpFilePath)
else:
result.dump(nestedLog.error)
raise Exception("Failed to backup database '" + dbName + "'!")
#
@staticmethod
def mySQLDumpCalculateSize(ctx:ThaniyaBackupContext, dbName:str, dbUserName:str, dbPassword:str) -> int:
import mysql.connector
assert isinstance(ctx, ThaniyaBackupContext)
ctx = ctx.descend("Calculating size for the MySQL dump ...")
with ctx.log as nestedLog:
con = None
try:
# Segmentation fault
# see: https://bugs.mysql.com/bug.php?id=89889
# (but this does not work)
print("> Connecting ....")
con = mysql.connector.connect(host="localhost", database=dbName, user=dbUserName, passwd=dbPassword)
print("> Connected.")
sqlQuery = "SELECT SUM(data_length) FROM information_schema.tables WHERE table_schema = '" + dbName + "';"
cursor = con.cursor()
cursor.execute(sqlQuery)
records = cursor.fetchall()
assert cursor.rowcount == 1
nEstimatedSize = -1
for row in records:
nEstimatedSize = row[0]
break
return nEstimatedSize
finally:
if con and con.is_connected():
cursor.close()
con.close()
#
#
| [
"[email protected]"
] | |
716ed2177858886621060abad9ac3e5c264f152a | ec0b8bfe19b03e9c3bb13d9cfa9bd328fb9ca3f1 | /res/packages/scripts/scripts/client/gui/Scaleform/daapi/view/battle/shared/timers_common.py | fa0fb189e2e6d1344d6ee8d161432c625338e6e9 | [] | no_license | webiumsk/WOT-0.9.20.0 | de3d7441c5d442f085c47a89fa58a83f1cd783f2 | 811cb4e1bca271372a1d837a268b6e0e915368bc | refs/heads/master | 2021-01-20T22:11:45.505844 | 2017-08-29T20:11:38 | 2017-08-29T20:11:38 | 101,803,045 | 0 | 1 | null | null | null | null | WINDOWS-1250 | Python | false | false | 2,809 | py | # 2017.08.29 21:46:09 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/battle/shared/timers_common.py
import BigWorld
from gui.shared.utils.TimeInterval import TimeInterval
class TimerComponent(object):
__slots__ = ('_panel', '_typeID', '_viewID', '_totalTime', '_startTime', '_finishTime')
def __init__(self, panel, typeID, viewID, totalTime):
super(TimerComponent, self).__init__()
self._panel = panel
self._typeID = typeID
self._viewID = viewID
self._totalTime = totalTime
self._startTime = BigWorld.serverTime()
self._finishTime = self._startTime + totalTime if totalTime else 0
def __repr__(self):
return 'TimerComponent(typeID = {}, viewID = {}, totalTime = {})'.format(self._typeID, self._viewID, self._totalTime)
def clear(self):
self._panel = None
return
def show(self, isBubble = True):
self._showView(isBubble)
self._startTick()
def hide(self):
self._stopTick()
self._hideView()
@property
def typeID(self):
return self._typeID
@property
def viewID(self):
return self._viewID
@property
def finishTime(self):
return self._finishTime
@property
def totalTime(self):
return self._totalTime
def _startTick(self):
raise NotImplementedError
def _stopTick(self):
raise NotImplementedError
def _hideView(self):
raise NotImplementedError
def _showView(self, isBubble):
raise NotImplementedError
class PythonTimer(TimerComponent):
__slots__ = ('_timeInterval', '__weakref__')
def __init__(self, panel, typeID, viewID, totalTime):
super(PythonTimer, self).__init__(panel, typeID, viewID, totalTime)
self._timeInterval = TimeInterval(1.0, self, '_tick')
def clear(self):
self._timeInterval.stop()
super(PythonTimer, self).clear()
def _startTick(self):
if self._totalTime:
timeLeft = max(0, self._finishTime - BigWorld.serverTime())
if timeLeft:
self._setViewSnapshot(timeLeft)
self._timeInterval.start()
def _stopTick(self):
self._timeInterval.stop()
def _tick(self):
timeLeft = self._finishTime - BigWorld.serverTime()
if timeLeft >= 0:
self._setViewSnapshot(timeLeft)
else:
self.hide()
def _setViewSnapshot(self, timeLeft):
raise NotImplementedError
# okay decompyling c:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\gui\Scaleform\daapi\view\battle\shared\timers_common.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.08.29 21:46:09 Střední Evropa (letní čas)
| [
"[email protected]"
] | |
e6c68eff9e6a0fdc168b30b5b841532a1cf4b03d | 2daa3894e6d6929fd04145100d8a3be5eedbe21c | /tests/artificial/transf_inv/trend_constant/cycle_7/ar_/test_artificial_32_inv_constant_7__0.py | a69b8bc01e2baf94726fff51468a7e4e6843851d | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Henri-Lo/pyaf | a1f73a0cc807873bd7b79648fe51de9cfd6c126a | 08c968425d85dcace974d90db7f07c845a0fe914 | refs/heads/master | 2021-07-01T12:27:31.600232 | 2017-09-21T11:19:04 | 2017-09-21T11:19:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
dataset = tsds.generate_random_TS(N = 32 , FREQ = 'D', seed = 0, trendtype = "constant", cycle_length = 7, transform = "inv", sigma = 0.0, exog_count = 0, ar_order = 0);
art.process_dataset(dataset); | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.