blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
62e324edbf5db72f17c763dc3d686b1df8fa8e76 | e060901933ae4b8608a16b75027eb676bcbf24ee | /Shoes/Shoes/urls.py | d5d37b558ac87257bbd1c5fb2c26a53faf66edaf | [] | no_license | apatten001/Shoepal | 5c48cc45de5c9fc8d321487e019833714f45d8c3 | a007090fe2acb541b393854001cd8a106c8d2450 | refs/heads/master | 2021-04-06T20:09:53.773607 | 2018-03-15T21:04:29 | 2018-03-15T21:04:29 | 125,425,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 871 | py | """Shoes URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('sell.urls', namespace='sell')),
path('', include('accounts.urls', namespace='accounts'))
]
| [
"[email protected]"
] | |
78160df90330823af6c5d5389fbe35435da6181e | 9d8acc20d2ee1d1957849dfb71c22e0dae2d8c5c | /baomoicrawl/venv/Lib/site-packages/twisted/python/test/deprecatedattributes.py | 368f231b4fba03650d3126673b5a98b6ffcf64ff | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | thuy4tbn99/TranTruongThuy_17021178_Nhom4_Crawler | b0fdedee2942a12d9f64dfed93f43802dc5ab340 | 87c8c07433466bbc43a24ea089f75baeb467c356 | refs/heads/master | 2022-11-27T21:36:33.917491 | 2020-08-10T23:24:42 | 2020-08-10T23:24:42 | 286,583,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 599 | py | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
A module that is deprecated, used by L{twisted.python.test.test_deprecate} for
testing purposes.
"""
from __future__ import division, absolute_import
from incremental import Version
from twisted.python.deprecate import deprecatedModuleAttribute
# Known module-level attributes.
DEPRECATED_ATTRIBUTE = 42
ANOTHER_ATTRIBUTE = 'hello'
version = Version('Twisted', 8, 0, 0)
message = 'Oh noes!'
deprecatedModuleAttribute(
version,
message,
__name__,
'DEPRECATED_ATTRIBUTE')
| [
"[email protected]"
] | |
9aa328809ba68d90740be6791e15c2849822679e | 3cec81bf2c37485482b9ac100c45029baa8044a2 | /pde/tools/spectral.py | 98c12dee4c9b855f17d2228f1d8477346148dbbe | [
"MIT"
] | permissive | jhurreaq/py-pde | 085adfbbcdabc162d66fef28007d729d465fbadf | 42cd3e9cc45793840ecfe244e606c39b13502658 | refs/heads/master | 2023-08-05T18:23:53.612466 | 2021-09-19T13:19:29 | 2021-09-19T13:19:29 | 318,832,446 | 0 | 0 | MIT | 2020-12-05T16:16:46 | 2020-12-05T16:16:45 | null | UTF-8 | Python | false | false | 3,022 | py | """
Functions making use of spectral decompositions
.. autosummary::
:nosignatures:
make_colored_noise
.. codeauthor:: David Zwicker <[email protected]>
"""
from typing import Callable, Tuple
import numpy as np
try:
from pyfftw.interfaces.numpy_fft import irfftn as np_irfftn
from pyfftw.interfaces.numpy_fft import rfftn as np_rfftn
except ImportError:
from numpy.fft import irfftn as np_irfftn
from numpy.fft import rfftn as np_rfftn
def make_colored_noise(
shape: Tuple[int, ...],
dx=1.0,
exponent: float = 0,
scale: float = 1,
rng: np.random.Generator = None,
) -> Callable[[], np.ndarray]:
r"""Return a function creating an array of random values that obey
.. math::
\langle c(\boldsymbol k) c(\boldsymbol k’) \rangle =
\Gamma^2 |\boldsymbol k|^\nu \delta(\boldsymbol k-\boldsymbol k’)
in spectral space on a Cartesian grid. The special case :math:`\nu = 0`
corresponds to white noise.
Args:
shape (tuple of ints):
Number of supports points in each spatial dimension. The number of
the list defines the spatial dimension.
dx (float or list of floats):
Discretization along each dimension. A uniform discretization in
each direction can be indicated by a single number.
exponent:
Exponent :math:`\nu` of the power spectrum
scale:
Scaling factor :math:`\Gamma` determining noise strength
rng (:class:`~numpy.random.Generator`):
Random number generator (default: :func:`~numpy.random.default_rng()`)
Returns:
callable: a function returning a random realization
"""
if rng is None:
rng = np.random.default_rng()
# extract some information about the grid
dim = len(shape)
dx = np.broadcast_to(dx, (dim,))
if exponent == 0:
# fast case of white noise
def noise_normal():
"""return array of colored noise"""
return scale * rng.normal(size=shape)
return noise_normal
# deal with colored noise in the following
# prepare wave vectors
k2s = np.array(0)
for i in range(dim):
if i == dim - 1:
k = np.fft.rfftfreq(shape[i], dx[i])
else:
k = np.fft.fftfreq(shape[i], dx[i])
k2s = np.add.outer(k2s, k ** 2)
# scaling of all modes with k != 0
k2s.flat[0] = 1 # type: ignore
scaling = 2 * np.pi * scale * k2s ** (exponent / 4)
scaling.flat[0] = 0 # type: ignore
# TODO: accelerate the FFT using the pyfftw package
def noise_colored() -> np.ndarray:
"""return array of colored noise"""
# random field
arr: np.ndarray = rng.normal(size=shape) # type: ignore
# forward transform
arr = np_rfftn(arr)
# scale according to frequency
arr *= scaling
# backwards transform
arr = np_irfftn(arr, shape)
return arr
return noise_colored
| [
"[email protected]"
] | |
48b166e97930d067ecf11888f7c6e2d3278d35f2 | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/aio/operations/_network_interface_ip_configurations_operations.py | 056672474f33b7ea604d12fce6dcb88351808632 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 8,928 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NetworkInterfaceIPConfigurationsOperations:
"""NetworkInterfaceIPConfigurationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs
) -> AsyncIterable["_models.NetworkInterfaceIPConfigurationListResult"]:
"""Get all ip configurations in a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceIPConfigurationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_03_01.models.NetworkInterfaceIPConfigurationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceIPConfigurationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceIPConfigurationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/ipConfigurations'} # type: ignore
async def get(
self,
resource_group_name: str,
network_interface_name: str,
ip_configuration_name: str,
**kwargs
) -> "_models.NetworkInterfaceIPConfiguration":
"""Gets the specified network interface ip configuration.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param ip_configuration_name: The name of the ip configuration name.
:type ip_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterfaceIPConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.NetworkInterfaceIPConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceIPConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'ipConfigurationName': self._serialize.url("ip_configuration_name", ip_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterfaceIPConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/ipConfigurations/{ipConfigurationName}'} # type: ignore
| [
"[email protected]"
] | |
d6e8ff8516b3f0322e3955eae9f8edc244f810f8 | 83de24182a7af33c43ee340b57755e73275149ae | /aliyun-python-sdk-dbs/aliyunsdkdbs/request/v20190306/ModifyBackupObjectsRequest.py | 0e3fa5590c1538ee6643ee531e162ad58600b343 | [
"Apache-2.0"
] | permissive | aliyun/aliyun-openapi-python-sdk | 4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f | 83fd547946fd6772cf26f338d9653f4316c81d3c | refs/heads/master | 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 | NOASSERTION | 2023-09-14T08:51:06 | 2015-07-23T09:39:45 | Python | UTF-8 | Python | false | false | 2,056 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdbs.endpoint import endpoint_data
class ModifyBackupObjectsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Dbs', '2019-03-06', 'ModifyBackupObjects')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_BackupPlanId(self): # String
return self.get_query_params().get('BackupPlanId')
def set_BackupPlanId(self, BackupPlanId): # String
self.add_query_param('BackupPlanId', BackupPlanId)
def get_BackupObjects(self): # String
return self.get_query_params().get('BackupObjects')
def set_BackupObjects(self, BackupObjects): # String
self.add_query_param('BackupObjects', BackupObjects)
def get_OwnerId(self): # String
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # String
self.add_query_param('OwnerId', OwnerId)
| [
"[email protected]"
] | |
f45fdaae0dc927be8cad55969099030418d04029 | 633b2299ab0bb2648b94a7f8bf0288aa7a083347 | /Project_Assignment11/app11/views.py | 709df73b08555f9bfcf6f78709277c4db176767d | [] | no_license | Abhirvalandge/Django-Practice-Projects-with-Naveen-Sir | 7deb309b8af3c9e123da8525d7a925661662bde3 | 172191e22fcec3a3f80f64e624e6f9a52617486e | refs/heads/main | 2023-03-23T08:34:25.196039 | 2021-03-06T06:56:17 | 2021-03-06T06:56:17 | 345,023,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | from django.shortcuts import render
def showMain(request):
return render(request,"index.html")
def showIndex(request):
oroom = 1500
droom = 2500
sroom = 5000
comp = 1000
gy = 250
min = 50
gname = request.POST.get("gn")
gcont = request.POST.get("gc")
add = request.POST.get("add")
ordi = request.POST.get("ord")
delux = request.POST.get("del")
suite = request.POST.get("sui")
computer = request.POST.get("com")
gym = request.POST.get("gym")
mineral = request.POST.get("min")
if ordi == "oroom" and computer == "comp":
a1=oroom+comp
return render(request,"confirmation.html", {"data1":a1})
| [
"[email protected]"
] | |
a13cc031394394a73888e319442d50773ae1f9cf | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_1_neat/16_0_1_Samsruti_1.py | fef1069471fc5682a4dbee4a96c5491f48a857dc | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 828 | py | import sys
def readInt():
return int(raw_input())
def retrieveEachNumber(number):
eachNumberList = []
rem = 0
while number > 0:
rem = number % 10
number = number/10
eachNumberList.append(rem)
return eachNumberList
fileout = open('log.txt', 'w')
needList = [0,1,2,3,4,5,6,7,8,9]
trueval = []
T = readInt()
for cases in range(1,T+1):
#print "Enter N: "
N = readInt()
val = 0
i = 1
arr = []
while i<1000000:
# print "i is :",i
val = N*i
# print "val = ",val
arr += retrieveEachNumber(val)
# print arr
i = i+1
arr = list(set(sorted(arr)))
if arr == needList:
flag = 1
break
else:
flag = 0
continue
if flag == 1:
print >> fileout,"Case #"+str(cases)+": ",val
else:
print >> fileout,"Case #"+str(cases)+": INSOMNIA"
fileout.close()
| [
"[[email protected]]"
] | |
43d77b0cd16051f64708b60f8fccce68d390dffb | dc99d95671170444cd7bf02e37da6ecda4a5f19e | /apps/operation/migrations/0008_auto_20180905_1609.py | 55dfc0e5ddf62f37178e22dbdf88e45ca8ef24f2 | [] | no_license | bbright3493/python_real_war | 734d49ed9f7e1800d24dc754424a07b69d7d8c1f | 6e43bb7d814920222f3310bd6fd9f04cb3d5bbf1 | refs/heads/master | 2020-03-30T06:08:40.249185 | 2018-10-22T07:33:41 | 2018-10-22T07:33:41 | 150,841,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,126 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-09-05 16:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('operation', '0007_auto_20180831_1621'),
]
operations = [
migrations.RemoveField(
model_name='usercourse',
name='status',
),
migrations.AddField(
model_name='usercourse',
name='course_status',
field=models.SmallIntegerField(choices=[(1, '未开通'), (2, '已开通')], default=1, verbose_name='课程状态'),
),
migrations.AddField(
model_name='usercourse',
name='study_status',
field=models.SmallIntegerField(choices=[(1, '未学习'), (2, '学习中'), (3, '已学习')], default=1, verbose_name='课程的学习状态'),
),
migrations.AddField(
model_name='userpass',
name='choice_status',
field=models.SmallIntegerField(default=0, verbose_name='关卡选择题库答题状态'),
),
]
| [
"[email protected]"
] | |
3d8e60b68e721264a7b6abc65cdd796f0b801774 | db9060d9a8550d6ce24fff6bdf04b4fa8974f20b | /utils/install_pyro.py | e6823af6f8006fac074ae01adce8216a8dbc3c8b | [] | no_license | davecap/pydr | 92fbb7ba9ecaa054a36b0409f86b58740592767d | fc6cbef45007b55bf78fd661cc81c6b0ef7e0d80 | refs/heads/master | 2016-09-06T11:11:28.685731 | 2011-08-05T16:50:22 | 2011-08-05T16:50:22 | 864,921 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 963 | py | #!/usr/bin/python
import subprocess
import os
import shutil
import tempfile
import re
try:
temp_dir = tempfile.mkdtemp(prefix='pyro_install_')
print 'Installing in %s' % temp_dir
os.chdir(temp_dir)
url = 'http://www.xs4all.nl/~irmen/pyro3/download/Pyro-3.10.tar.gz'
filename = 'Pyro-3.10.tar.gz'
dir = re.match(r'(.*)\.tar.gz', filename).group(1)
return_code = subprocess.call(['wget', url])
if return_code != 0:
raise Exception('Could not download file from url: %s' % url)
else:
print 'Downloaded...'
return_code = subprocess.call(['tar', 'xzvf', filename])
if return_code != 0:
raise Exception('Could not untar file: %s' % filename)
else:
print 'Untarred...'
os.chdir(os.path.abspath(dir))
return_code = subprocess.call(['python', 'setup.py', 'install'])
if return_code != 0:
raise Exception('Could not install Pyro')
finally:
shutil.rmtree(temp_dir)
| [
"[email protected]"
] | |
9554a7b27eb0ab83d7221086697a354c5f8a3b85 | 6547c59e1041bd888c31d360f9b75e07f5b2bdd0 | /bin/pythonicus | d17e06e9c343bbdefffcca29145c02e19757ae3d | [] | no_license | diegorubin/pythonicus | b5069f3bc12f20715e70da990fb4476089046c53 | ae0015d301df5cac3896455a6b64917f0b5bc1b8 | refs/heads/master | 2021-03-12T20:36:05.564847 | 2014-12-27T10:52:10 | 2014-12-27T10:52:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | #!/usr/bin/env python
import sys
#import cyclone.web
from twisted.python import log
from twisted.internet import reactor
try:
from pythonicus.server.base import *
except ImportError:
from os.path import join, abspath, dirname
parentpath = abspath(join(dirname(__file__), '..'))
sys.path.append(parentpath)
from pythonicus.server.base import *
log.startLogging(sys.stdout)
reactor.listenTCP(8888, Application())
reactor.run()
| [
"[email protected]"
] | ||
979444d06a18703716c2ffc753dde974cb5502fd | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03967/s359107548.py | d0ee9a3ac3bd33bf4227ed9624640a7cce491654 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | S = input()
g_cnt, p_cnt = 0, 0
ans = 0
for s in S:
if p_cnt < g_cnt:
if s == "g":
p_cnt += 1
ans += 1
elif s == "p":
p_cnt += 1
else:
if s == "g":
g_cnt += 1
elif s == "p":
g_cnt += 1
ans -= 1
print(ans) | [
"[email protected]"
] | |
f45ba9f87301ca0d99a049ac8d38a6398db17af0 | e5b4ed93d6666e195e96a265d3e7cfe4243a7300 | /python_net/day02/udp_server.py | 8df6e4aa87d01a1cdc5cca61ad4cecade830b7b8 | [] | no_license | Spider251/python | 934f5b8b923c2b61186a6df8445957290e5c4c74 | 8b1931f862e1d5c29fed9af624bcac94c1d25755 | refs/heads/master | 2020-04-05T11:58:04.558098 | 2018-11-09T12:06:06 | 2018-11-09T12:06:06 | 156,852,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | # udp_server.py
from socket import *
#创建数据报套接字
sockfd = socket(AF_INET,SOCK_DGRAM)
#绑定地址
server_addr = ('0.0.0.0',8888)
sockfd.bind(server_addr)
#消息收发
while True:
data,addr = sockfd.recvfrom(1024)
print("消息来自%s:%s"%(addr,data.decode()))
sockfd.sendto(b"Thanks for you msg",addr)
#关闭套接字
sockfd.close()
| [
"[email protected]"
] | |
0cc41ccb5f1374c3d4787416da11b9c307894afd | 16c8fdf291430475f40d578b0d64552eb64046e9 | /colour/difference/tests/test_huang2015.py | 9e6bd01ac869d70b6f779b19f66a0c1b05fae61b | [
"BSD-3-Clause"
] | permissive | nodefeet/colour | 4c1bfed87ce173ff878bdf288fd9828bb68022e3 | 319dd5b1c45aef6983eff1830f918c1e593fb530 | refs/heads/develop | 2022-02-19T17:39:36.657993 | 2022-02-15T08:38:26 | 2022-02-15T08:38:26 | 460,456,444 | 0 | 0 | BSD-3-Clause | 2022-02-17T13:53:37 | 2022-02-17T13:53:36 | null | UTF-8 | Python | false | false | 1,117 | py | """Defines the unit tests for the :mod:`colour.difference.huang2015` module."""
import numpy as np
import unittest
from colour.difference import power_function_Huang2015
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "[email protected]"
__status__ = "Production"
__all__ = [
"TestPowerFunctionHuang2015",
]
class TestPowerFunctionHuang2015(unittest.TestCase):
"""
Define :func:`colour.difference.huang2015.power_function_Huang2015`
definition unit tests methods.
"""
def test_power_function_Huang2015(self):
"""
Test :func:`colour.difference.huang2015.power_function_Huang2015`
definition.
"""
d_E = np.array([2.0425, 2.8615, 3.4412])
np.testing.assert_almost_equal(
power_function_Huang2015(d_E),
np.array([2.35748796, 2.98505036, 3.39651062]),
decimal=7,
)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
6f91fd482b30ef5e937e14797819b36816094e2f | add74ecbd87c711f1e10898f87ffd31bb39cc5d6 | /xcp2k/classes/_basis3.py | 7221798fa0235553b855d476ccc0d957c7a06580 | [] | no_license | superstar54/xcp2k | 82071e29613ccf58fc14e684154bb9392d00458b | e8afae2ccb4b777ddd3731fe99f451b56d416a83 | refs/heads/master | 2021-11-11T21:17:30.292500 | 2021-11-06T06:31:20 | 2021-11-06T06:31:20 | 62,589,715 | 8 | 2 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | from xcp2k.inputsection import InputSection
class _basis3(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Default_keyword = []
self._name = "BASIS"
self._repeated_default_keywords = {'Default_keyword': 'DEFAULT_KEYWORD'}
self._attributes = ['Default_keyword']
| [
"[email protected]"
] | |
536c4275d8ead61ff8849a450a9f33405be4a30a | 2594650405c1424bec1ab14c1ce994906d6cc961 | /AByteOfPython/C14_InputOutput1b.py | 9ce11e5999a8462b2ab6eb9fb64979d493ae9a6e | [] | no_license | aa2276016/Learning_Python | 10dd46eeb77d5ec05b4e607c523e9e5597a2e7ee | f0e3b4876ea078a45493eb268992cec62ccd29d1 | refs/heads/master | 2021-10-19T08:34:15.694353 | 2018-03-17T02:38:49 | 2018-03-17T02:38:49 | 125,590,648 | 0 | 0 | null | 2018-03-17T02:41:26 | 2018-03-17T02:41:26 | null | UTF-8 | Python | false | false | 444 | py | # Input Output 1b:
def reverse(text):
return text[::-1]
def is_palindrome(text):
return text == reverse(text)
while True: # 先设置循环, 把input包括在循环中,这样每次循环才让人输入内容
something = input("Enter text: ")
if something == 'quit':
break
if is_palindrome(something):
print("Yes, it s a palindrome")
else:
print("No, it is not a palindrome")
| [
"[email protected]"
] | |
699c1ceda3c1dd63c07aae302a294b79564c477f | e39cd8cdcbfe60eafb45fb96f6f5ada62940c6f2 | /setup.py | fc63bf42cbb532d0af0752d0f7a3a6da2041a9db | [
"MIT"
] | permissive | DarkmatterVale/hurricane | 5d4505ed6a994ed669e9795d047602ca773ce20c | 94e9e56fcc6d73f5f76ad1fe9e3e4f248549fe7b | refs/heads/master | 2021-05-01T00:52:22.374738 | 2017-01-13T00:15:34 | 2017-01-13T00:15:34 | 64,959,057 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 914 | py | # This is the setup file for pip
from setuptools import setup, find_packages
import os, sys
from os import path
setup(
name = 'hurricane',
version = '0.0.1',
description = 'A master-slave computer communication protocol',
url = 'https://github.com/DarkmatterVale/hurricane',
author = 'Vale Tolpegin',
author_email = '[email protected]',
license = 'MIT',
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Information Analysis',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
"Operating System :: OS Independent",
],
packages = find_packages(),
install_requires = ['scapy-python3'],
keywords = [],
)
| [
"[email protected]"
] | |
b6e84a0000cbadb35e33a397764e8a72f3b8d1a7 | a0a41401bafc44f233d5ba82ed8905ba9d213699 | /huntserver/migrations/0023_auto_20150717_1025.py | 0539387ebf04335213a8d2b2b2e750a333718e53 | [] | no_license | christophsjones/puzzlehunt_server | 266c0e6a02e09d1f7290dab6b006c0ce70b2586b | 53390bdb8d97955c280f7a2e660150d5c56b49f2 | refs/heads/master | 2021-01-23T01:35:12.499539 | 2015-10-21T19:59:43 | 2015-10-21T19:59:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('huntserver', '0022_auto_20150717_1024'),
]
operations = [
migrations.AlterField(
model_name='unlockable',
name='content_type',
field=models.CharField(default=b'TXT', max_length=3, choices=[(b'IMG', b'Image'), (b'PDF', b'PDF'), (b'TXT', b'Text'), (b'WEB', b'Link')]),
),
]
| [
"[email protected]"
] | |
526117433c8898f00ad8d1ef33ed32fc9051ce2d | b0c375bda0f25ab2408be94fd0714819af3bc1ab | /model/layers/blocks_module.py | 5927e6a6f428baca4259783ad12f9d0372bcd408 | [] | no_license | nenoc/YOLOv4-pytorch | 8f0ee6415b610d8d566396500abe133b1079ba70 | 781b07d5fbbd4faefcfd1d535f21982c436f27cd | refs/heads/master | 2022-11-20T20:03:45.814946 | 2020-07-20T12:05:34 | 2020-07-20T12:05:34 | 282,566,409 | 2 | 0 | null | 2020-07-26T03:15:34 | 2020-07-26T03:15:34 | null | UTF-8 | Python | false | false | 713 | py | import torch.nn as nn
from ..layers.conv_module import Convolutional
class Residual_block(nn.Module):
def __init__(self, filters_in, filters_out, filters_medium):
super(Residual_block, self).__init__()
self.__conv1 = Convolutional(filters_in=filters_in, filters_out=filters_medium, kernel_size=1, stride=1, pad=0,
norm="bn", activate="leaky")
self.__conv2 = Convolutional(filters_in=filters_medium, filters_out=filters_out, kernel_size=3, stride=1, pad=1,
norm="bn", activate="leaky")
def forward(self, x):
r = self.__conv1(x)
r = self.__conv2(r)
out = x + r
return out
| [
"your email"
] | your email |
348ced0727e42f540f5292dbfa643f5e7e7f46b7 | a438748ac89d53b19e7f4130529906896f059b25 | /gen_for_collect.py | 9fc0c0c61660397c13ff38e9779b4a1fbdb2bf23 | [] | no_license | Alexfordrop/Basics | 90ead9294727a823eb044e5f2f69d8f29133d150 | eda400424b2c72bd5e01a6c7cb14ad7ae29477d4 | refs/heads/master | 2023-06-08T16:42:26.704163 | 2021-06-27T20:46:27 | 2021-06-27T20:46:27 | 329,421,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | # Генераторный список
a = [i for i in range(5)]
# Генераторное выражение
x = (i for i in range(5))
print(a)
print(x)
for i in x:
print(i, end=' ')
| [
"[email protected]"
] | |
d89c72f08a21bd2f42f071c1c791e17542d88c4f | 060ce17de7b5cdbd5f7064d1fceb4ded17a23649 | /fn_google_maps_directions/tests/test_fn_google_maps_directions.py | ea98b341ce5894834d0f28a893991ead0320e9ba | [
"MIT"
] | permissive | ibmresilient/resilient-community-apps | 74bbd770062a22801cef585d4415c29cbb4d34e2 | 6878c78b94eeca407998a41ce8db2cc00f2b6758 | refs/heads/main | 2023-06-26T20:47:15.059297 | 2023-06-23T16:33:58 | 2023-06-23T16:33:58 | 101,410,006 | 81 | 107 | MIT | 2023-03-29T20:40:31 | 2017-08-25T14:07:33 | Python | UTF-8 | Python | false | false | 3,118 | py | # -*- coding: utf-8 -*-
"""Tests using pytest_resilient_circuits"""
from __future__ import print_function
import pytest
from resilient_circuits.util import get_config_data, get_function_definition
from resilient_circuits import SubmitTestFunction, FunctionResult
PACKAGE_NAME = "fn_google_maps_directions"
FUNCTION_NAME = "fn_google_maps_directions"
# Read the default configuration-data section from the package
config_data = get_config_data(PACKAGE_NAME)
# Provide a simulation of the Resilient REST API (uncomment to connect to a real appliance)
resilient_mock = "pytest_resilient_circuits.BasicResilientMock"
def call_fn_google_maps_directions_function(circuits, function_params, timeout=10):
# Fire a message to the function
evt = SubmitTestFunction("fn_google_maps_directions", function_params)
circuits.manager.fire(evt)
event = circuits.watcher.wait("fn_google_maps_directions_result", parent=evt, timeout=timeout)
assert event
assert isinstance(event.kwargs["result"], FunctionResult)
pytest.wait_for(event, "complete", True)
return event.kwargs["result"].value
class TestFnGoogleMapsDirections:
""" Tests for the fn_google_maps_directions function"""
inputs = ['IBM, Armonk, New York', 'IBM Resilient, Cambridge, Boston, MA']
outputs = [
{
"success": True,
"directions_link": "https://www.google.com/maps/dir/?api=1&origin=IBM%2C%20Armonk%2C%20New%20York&destination=IBM%20Resilient%2C%20Cambridge%2C%20Boston%2C%20MA",
"inputs": {
"google_maps_origin": inputs[0],
"google_maps_destination": inputs[1]
}
}
]
def test_function_definition(self):
""" Test that the package provides customization_data that defines the function """
func = get_function_definition(PACKAGE_NAME, FUNCTION_NAME)
assert func is not None
@pytest.mark.parametrize("google_maps_origin, google_maps_destination, expected_results", [
(inputs[0], inputs[1], outputs[0])
])
def test_success(self, circuits_app, google_maps_origin, google_maps_destination, expected_results):
""" Test calling with sample values for the parameters """
function_params = {
"google_maps_origin": google_maps_origin,
"google_maps_destination": google_maps_destination
}
results = call_fn_google_maps_directions_function(circuits_app, function_params)
assert(expected_results == results)
@pytest.mark.parametrize("google_maps_origin, google_maps_destination, expected_results", [
(inputs[0], inputs[1], outputs[0])
])
def test_result_is_dict(self, circuits_app, google_maps_origin, google_maps_destination, expected_results):
""" Test calling with sample values for the parameters and result is of type dict"""
function_params = {
"google_maps_origin": google_maps_origin,
"google_maps_destination": google_maps_destination
}
results = call_fn_google_maps_directions_function(circuits_app, function_params)
assert (isinstance(results, dict)) | [
"[email protected]"
] | |
7b23637cb362ddb83b9c8ac25dd2b2295942406e | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/217/usersdata/274/113591/submittedfiles/av2_p3_m2.py | 11d3b1df8bc4632ec02ca21815a72cd111a8a103 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 899 | py | # -*- coding: utf-8 -*
n=int(input("Dimensão do Quadrado: "))
while notn>=3:
n=int(input("Dimensão do Quadrado: "))
M=[]
for i in range(0,n,1):
L=[]
for j in range(o,n,1):
L.append(int(input("Elemento da Linha: "))
M.append(L)
somaL=[]
for i in range(0,n,1):
somaL.append(sum(M[i]))
somaC=[]
for j in range(0,n,1):
C=0
for i in range (0,n,1):
C=C+M[i][j]
somaC.append(C)
b=[somaL[0]]
ct=0
k=0
VE=0
VC=0
for i in range(0,n,1):
if somaL[i]in b:
continue
else:
ct+ct=1
k=1
if ct==1:
VE=somaL[k]
VC+somaL[0]
if ct!=1:
VE=somaL[0]
VC+somaL[1]
k=0
b1=[somaC[0]]
cont2=0
k=0
VE1=0
for i in range(0,n,1):
if somaC[i]in b1:
continue
else:
ct2=ct2+1
k1=i
if cont2==1:
VE=somaL[k]
VC+somaL[0]
if ct!=1:
VE=somaL[0]
VC+somaL[1]
k1=0
| [
"[email protected]"
] | |
dc2c448b2f4ba78a00a1c9045e74ad0ae468900e | 4a5056686e604e5af78d4653c7c51ac3769b6b38 | /server/view.py | e7de73a06c0760f65ebc1e704baa315ae65457bc | [
"MIT"
] | permissive | SangminOut/Short-URL | 673e164cf0e9c471d5e509ccd9ae5fe604ddf2f5 | 6269049442e0c499764eb1061631b6399bdbc919 | refs/heads/master | 2020-04-11T04:01:38.015376 | 2018-12-16T12:22:53 | 2018-12-16T12:22:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 566 | py | from flask import request, redirect, Response, jsonify, current_app
from model import UrlModel
def save_url():
original_url = request.json['url']
url_key = UrlModel(original_url).save_url()
if current_app.config['SERVER_NAME'] is not None:
base_url = current_app.config['SERVER_NAME'] + '/'
else:
base_url = 'localhost/'
return jsonify({'url': 'http://' + base_url + url_key}), 201
def get_url(key) -> Response:
url = UrlModel.get_url(key)
if url is None:
return Response('', 204)
return redirect(url)
| [
"[email protected]"
] | |
188c4c56d93bad2c21c59daa92790577ed1fbaaa | a3d32e0ff84958d194ced642441f5379c0032465 | /tfmsarest/views/common_schema.py | 2e068f8dc9fbea974ec8cf611c0b9e4f16d9026b | [] | no_license | TensorMSA/tensormsa_old | 406755511d05d4ec179c085337a05f73c0dde80a | ef058737f391de817c74398ef9a5d3a28f973c98 | refs/heads/master | 2021-06-18T11:58:29.349060 | 2017-04-20T10:17:43 | 2017-04-20T10:17:43 | 67,384,681 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | from tfmsacore import netconf
import json, unicodedata
from rest_framework.response import Response
from rest_framework.views import APIView
from tfmsacore.utils.json_conv import JsonDataConverter as jc
class CommonSchema(APIView):
"""
"""
def get(self, request, datatype, preprocess, category, subcategory ):
"""
- desc : return nn_info data
"""
try:
result = netconf.get_namespace(datatype, preprocess, category, subcategory)
return_data = {"status": "200", "result": result}
return Response(json.dumps(return_data))
except Exception as e:
return_data = {"status": "400", "result": str(e)}
return Response(json.dumps(return_data)) | [
"[email protected]"
] | |
69bb43586bb37d90d4904525583972a1c1353d74 | f6f632bee57875e76e1a2aa713fdbe9f25e18d66 | /python/_0501_1000/0631_design-excel-sum-formula.py | f5ab87115d8d3a0a794edc0aec9f0d2c163b354e | [] | no_license | Wang-Yann/LeetCodeMe | b50ee60beeeb3661869bb948bef4fbe21fc6d904 | 44765a7d89423b7ec2c159f70b1a6f6e446523c2 | refs/heads/master | 2023-08-07T05:31:23.428240 | 2021-09-30T15:33:53 | 2021-09-30T15:33:53 | 253,497,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,890 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : Rock Wayne
# @Created : 2020-07-24 09:36:33
# @Last Modified : 2020-07-24 09:36:33
# @Mail : [email protected]
# @Version : alpha-1.0
"""
# 你的任务是实现 Excel 的求和功能,具体的操作如下:
#
# Excel(int H, char W): 这是一个构造函数,输入表明了 Excel 的高度和宽度。H 是一个正整数,范围从 1 到 26,代表高度。W
# 是一个字符,范围从 'A' 到 'Z',宽度等于从 'A' 到 W 的字母个数。Excel 表格是一个高度 * 宽度的二维整数数组,数组中元素初始化为 0。第一
# 行下标从 1 开始,第一列下标从 'A' 开始。
#
#
#
# void Set(int row, char column, int val): 设置 C(row, column) 中的值为 val。
#
#
#
# int Get(int row, char column): 返回 C(row, column) 中的值。
#
#
#
# int Sum(int row, char column, List of Strings : numbers): 这个函数会将计算的结果放入 C(row
# , column) 中,计算的结果等于在 numbers 中代表的所有元素之和,这个函数同时也会将这个结果返回。求和公式会一直计算更新结果直到这个公式被其他的值
# 或者公式覆盖。
#
# numbers 是若干字符串的集合,每个字符串代表单个位置或一个区间。如果这个字符串表示单个位置,它的格式如下:ColRow,例如 "F7" 表示位置 (
# 7, F) 。如果这个字符串表示一个区间,它的格式如下:ColRow1:ColRow2。区间就是左上角为 ColRow1 右下角为 ColRow2 的长方形。
#
#
#
#
# 样例 1 :
#
#
#
# Excel(3,"C");
# // 构造一个 3*3 的二维数组,初始化全是 0。
# // A B C
# // 1 0 0 0
# // 2 0 0 0
# // 3 0 0 0
#
# Set(1, "A", 2);
# // 设置 C(1,"A") 为 2。
# // A B C
# // 1 2 0 0
# // 2 0 0 0
# // 3 0 0 0
#
# Sum(3, "C", ["A1", "A1:B2"]);
# // 将 C(3,"C") 的值设为 C(1,"A") 单点,左上角为 C(1,"A") 右下角为 C(2,"B") 的长方形,所有元素之和。返回值 4。
#
# // A B C
# // 1 2 0 0
# // 2 0 0 0
# // 3 0 0 4
#
# Set(2, "B", 2);
# // 将 C(2,"B") 设为 2。 注意 C(3, "C") 的值也同时改变。
# // A B C
# // 1 2 0 0
# // 2 0 2 0
# // 3 0 0 6
#
#
#
#
# 注释 :
#
#
# 你可以认为不会出现循环求和的定义,比如说: A1 = sum(B1) ,B1 = sum(A1)。
# 测试数据中,字母表示用双引号。
# 请记住清零 Excel 类中的变量,因为静态变量、类变量会在多组测试数据中保存之前结果。详情请看这里。
#
#
#
# Related Topics 设计
# 👍 16 👎 0
"""
from typing import List
import pytest
# leetcode submit region begin(Prohibit modification and deletion)
class Excel:
def __init__(self, H: int, W: str):
self.m = {}
self.mat = [[0] * (self.col_idx(W) + 1) for _ in range(H)]
def col_idx(self, char):
return ord(char) - ord("A")
def set(self, r: int, c: str, v: int) -> None:
if (r, c) in self.m:
self.m.pop((r, c))
self.mat[r - 1][self.col_idx(c)] = v
def get(self, r: int, c: str) -> int:
if (r, c) in self.m:
return self.sum(r, c, self.m[(r, c)])
return self.mat[r - 1][self.col_idx(c)]
def sum(self, r: int, c: str, strs: List[str]) -> int:
res = 0
for s in strs:
if ":" not in s:
y = s[0]
x = int(s[1:])
res += self.get(x, y)
else:
f, t = s.split(":")
for i in range(int(f[1:]), int(t[1:]) + 1):
for j in range(ord(f[0]), ord(t[0]) + 1):
res += self.get(i, chr(j))
self.m[r, c] = strs
# print(self.m)
return res
# Your Excel object will be instantiated and called as such:
# obj = Excel(H, W)
# obj.set(r,c,v)
# param_2 = obj.get(r,c)
# param_3 = obj.sum(r,c,strs)
# leetcode submit region end(Prohibit modification and deletion)
def test_solution():
ex = Excel(3, "C")
ex.set(1, "A", 2)
assert ex.sum(3, "C", ["A1", "A1:B2"]) == 4
ex.set(2, "B", 2)
assert ex.get(3, "C") == 6
def test1():
ops = ["Excel", "set", "set", "set", "set", "set", "set", "set", "set", "set", "set", "set", "set", "set",
"set", "set", "set", "set", "set", "set", "set", "set", "set", "set", "set", "set", "set", "sum",
"sum", "sum", "sum", "sum", "sum", "sum", "sum", "sum", "sum", "sum", "sum", "sum", "sum", "sum",
"sum", "sum", "sum", "sum", "sum", "sum", "sum", "sum", "sum", "sum", "sum"]
args = [[26, "Z"], [1, "A", 0], [1, "B", 1], [1, "C", 2], [1, "D", 3], [1, "E", 4], [1, "F", 5], [1, "G", 6],
[1, "H", 7], [1, "I", 8], [1, "J", 9], [1, "K", 10], [1, "L", 11], [1, "M", 12], [1, "N", 13],
[1, "O", 14], [1, "P", 15], [1, "Q", 16], [1, "R", 17], [1, "S", 18], [1, "T", 19],
[1, "U", 20], [1, "V", 21], [1, "W", 22],
[1, "X", 23], [1, "Y", 24], [1, "Z", 25], [2, "A", ["A1:A1"]], [2, "B", ["A1:B1"]], [2, "C", ["A1:C1"]],
[2, "D", ["A1:D1"]], [2, "E", ["A1:E1"]], [2, "F", ["A1:F1"]], [2, "G", ["A1:G1"]], [2, "H", ["A1:H1"]],
[2, "I", ["A1:I1"]], [2, "J", ["A1:J1"]], [2, "K", ["A1:K1"]], [2, "L", ["A1:L1"]], [2, "M", ["A1:M1"]],
[2, "N", ["A1:N1"]], [2, "O", ["A1:O1"]], [2, "P", ["A1:P1"]], [2, "Q", ["A1:Q1"]], [2, "R", ["A1:R1"]],
[2, "S", ["A1:S1"]], [2, "T", ["A1:T1"]], [2, "U", ["A1:U1"]], [2, "V", ["A1:V1"]], [2, "W", ["A1:W1"]],
[2, "X", ["A1:X1"]], [2, "Y", ["A1:Y1"]], [2, "Z", ["A1:Z1"]]]
ex = Excel(26, "Z")
for op, arg in zip(ops[1:], args[1:]):
x = getattr(ex, op)(*arg)
if op != "set":
print(x)
if __name__ == '__main__':
pytest.main(["-q", "--color=yes", "--capture=no", __file__])
| [
"[email protected]"
] | |
28e9cadbf5eb0e4047f49800c21759742600c50c | c76b54198bcfbf0eb3427db86270053b05c34531 | /main.py | 7bc62005369f9fa349970a9e699ccc71fa56957f | [] | no_license | tang1323/ArticleSpider | fd2f97c4b6cb77adf3439e993c641f345e9fe07e | 7eb99c98a24fc5b6c2f2e54c6322c07b15bb7eed | refs/heads/master | 2023-04-04T02:03:29.289456 | 2021-04-15T01:43:18 | 2021-04-15T01:43:18 | 358,093,260 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 367 | py |
from scrapy.cmdline import execute
import sys
import os
# print(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
# execute(["scrapy", "crawl", "cnblogs"])
# execute(["scrapy", "crawl", "zhihu_sel"])
# execute(["scrapy", "crawl", "lagou"])
execute(["scrapy", "crawl", "lagou_cooike_pool"])
| [
"[email protected]"
] | |
8f210ef53d44be2eb134aa20494b2b5844f64165 | 045eeaf9af3470b6ca0aca47cbdbeaf227d40e14 | /tests/test_rest_ft_fansid.py | 7f3313129ec66da43c5730cf4f23446409176e57 | [
"Apache-2.0"
] | permissive | ashutoshshanker/ops-restd | aa07ff0dbf0845bc745876e93a7092cc70c062e0 | 0afb98d3fd505bfe5251bae7550dc0ea7525edf5 | refs/heads/master | 2020-12-28T23:23:32.524833 | 2016-08-02T20:43:25 | 2016-08-02T22:40:57 | 65,635,684 | 0 | 1 | null | 2016-08-13T20:30:32 | 2016-08-13T20:30:31 | null | UTF-8 | Python | false | false | 11,595 | py | # (C) Copyright 2015 Hewlett Packard Enterprise Development LP
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import pytest
import json
from opstestfw.switch.CLI import *
from opstestfw import *
topoDict = {"topoType": "physical",
"topoExecution": 3000,
"topoDevices": "dut01 wrkston01",
"topoLinks": "lnk01:dut01:wrkston01",
"topoFilters": "dut01:system-category:switch,\
wrkston01:system-category:workstation,\
wrkston01:docker-image:host/freeradius-ubuntu",
"topoLinkFilter": "lnk01:dut01:interface:eth0"}
switchMgmtAddr = "10.10.10.2"
restClientAddr = "10.10.10.3"
def switch_reboot(dut01):
# Reboot switch
LogOutput('info', '### Reboot switch ###\n')
dut01.Reboot()
rebootRetStruct = returnStruct(returnCode=0)
return rebootRetStruct
def config_rest_environment(dut01, wrkston01):
global switchMgmtAddr
global restClientAddr
retStruct = GetLinuxInterfaceIp(deviceObj=dut01)
assert retStruct.returnCode() == 0, 'Failed to get linux interface\
ip on switch'
LogOutput('info', '### Successful in getting linux interface ip on\
the switch ###\n')
switchIpAddr = retStruct.data
retStruct = InterfaceIpConfig(deviceObj=dut01,
interface="mgmt",
addr=switchMgmtAddr, mask=24, config=True)
assert retStruct.returnCode() == 0, 'Failed to configure IP on switchport'
LogOutput('info', '### Successfully configured ip on switch port ###\n')
cmdOut = dut01.cmdVtysh(command="show run")
LogOutput('info', '### Running config of the switch:\n' + cmdOut + '\
###\n')
LogOutput('info', '### Configuring workstations ###\n')
retStruct = wrkston01.NetworkConfig(
ipAddr=restClientAddr,
netMask="255.255.255.0",
broadcast="140.1.2.255",
interface=wrkston01.linkPortMapping['lnk01'],
config=True)
assert retStruct.returnCode() == 0, 'Failed to configure IP on\
workstation'
LogOutput('info', '### Successfully configured IP on workstation ###\n')
cmdOut = wrkston01.cmd("ifconfig " + wrkston01.linkPortMapping['lnk01'])
LogOutput('info', '### Ifconfig info for workstation 1:\n' + cmdOut + '\
###\n')
retStruct = GetLinuxInterfaceIp(deviceObj=wrkston01)
assert retStruct.returnCode() == 0, 'Failed to get linux interface\
ip on switch'
LogOutput('info', '### Successful in getting linux interface ip on the\
workstation ###\n')
switchIpAddr = retStruct.data
retStruct = returnStruct(returnCode=0)
return retStruct
def deviceCleanup(dut01, wrkston01):
retStruct = wrkston01.NetworkConfig(
ipAddr=restClientAddr,
netMask="255.255.255.0",
broadcast="140.1.2.255",
interface=wrkston01.linkPortMapping['lnk01'],
config=False)
assert retStruct.returnCode() == 0, 'Failed to unconfigure IP address\
on workstation 1'
LogOutput('info', '### Successfully unconfigured ip on Workstation ###\n')
cmdOut = wrkston01.cmd("ifconfig " + wrkston01.linkPortMapping['lnk01'])
LogOutput('info', '### Ifconfig info for workstation 1:\n' + cmdOut + '\
###')
retStruct = InterfaceIpConfig(deviceObj=dut01,
interface="mgmt",
addr=switchMgmtAddr, mask=24, config=False)
assert retStruct.returnCode() == 0, 'Failed to unconfigure IP address\
on dut01 port'
LogOutput('info', '### Unconfigured IP address on dut01 port ###\n')
cmdOut = dut01.cmdVtysh(command="show run")
LogOutput('info', 'Running config of the switch:\n' + cmdOut)
retStruct = returnStruct(returnCode=0)
return retStruct
def restTestFansid(wrkston01):
data = {
"configuration": {
"direction": "f2b",
"other_config": {
"key": "fan_speed_override"},
"speed": "fast",
"external_ids": {}}}
retStruct = wrkston01.RestCmd(
switch_ip=switchMgmtAddr,
url="/rest/v1/system/subsystems/base/fans/base-2R",
method="PUT",
data=data)
assert retStruct.returnCode(
) == 0, 'Failed to Execute rest command \
"PUT for url=/rest/v1/system/subsystems/base/fans/base-2R"'
LogOutput('info', '### Success in executing the rest command "PUT \
for url=/rest/v1/system/subsystems/base/fans/base-2R" ###\n')
LogOutput('info', 'http return code ' + retStruct.data['http_retcode'])
assert retStruct.data['http_retcode'].find(
'200') != -1, 'Rest PUT Fanid Failed for base-2R\n' +\
retStruct.data['response_body']
LogOutput('info', '### Success in Rest Fanid PUT method for base-2R###\n')
LogOutput('info', '###' + retStruct.data['response_body'] + '###\n')
retStruct = wrkston01.RestCmd(
switch_ip=switchMgmtAddr,
url="/rest/v1/system/subsystems/base/fans/base-2R",
method="GET")
assert retStruct.returnCode(
) == 0, 'Failed to Execute rest command \
"GET for url=/rest/v1/system/subsystems/base/fans/base-2R"'
LogOutput('info', '### Success in executing the rest command \
"GET for url=/rest/v1/system/subsystems/base/fans/base-2R" ###\n')
LogOutput('info', 'http return code ' + retStruct.data['http_retcode'])
assert retStruct.data['http_retcode'].find(
'200') != -1, 'Rest GET Fansid Failed\n' +\
retStruct.data['response_body']
LogOutput('info', '### Success in Rest GET Fansid ###\n')
LogOutput('info', '###' + retStruct.data['response_body'] + '###\n')
json_data = retStruct.data['response_body']
data_dict = json.loads(json_data)
data_config = data_dict["configuration"]
data_otherconfig = data_config["other_config"]
assert data_config[
"speed"] == 'normal', 'Failed in checking the GET METHOD JSON\
response validation for Fan speed'
LogOutput('info', '### Success in Rest GET system for Fan speed ###\n')
assert data_otherconfig[
"key"] == 'fan_speed_override', 'Failed in checking the GET METHOD\
JSON response validation for Fan Key'
LogOutput('info', '### Success in Rest GET system for Fan Key ###\n')
assert data_config[
"direction"] == 'f2b', 'Failed in checking the GET METHOD JSON\
response validation for Fan Direction'
LogOutput('info', '### Success in Rest GET system for Fan Direction ###\n')
retStruct = wrkston01.RestCmd(
switch_ip=switchMgmtAddr,
url="/rest/v1/system/subsystems/base/fans/base-2R",
method="DELETE")
assert retStruct.returnCode(
) == 0, 'Failed to Execute rest command \
"DELET for url=/rest/v1/system/subsystems/base/fans/base-2R"'
LogOutput('info', '### Success in executing the rest command \
"DELETE for url=/rest/v1/system/subsystems/base/fans/base-2R" ###\n')
LogOutput('info', 'http return code ' + retStruct.data['http_retcode'])
assert retStruct.data['http_retcode'].find(
'204') != -1, 'Rest DELETE Fansid Failed\n' +\
retStruct.data['response_body']
LogOutput('info', '### Success in Rest DELETE Fansid ###\n')
LogOutput('info', '###' + retStruct.data['response_body'] + '###\n')
retStruct = wrkston01.RestCmd(
switch_ip=switchMgmtAddr,
url="/rest/v1/system/subsystems/base/fans/base-2R",
method="GET")
assert retStruct.returnCode(
) == 0, '"Success in executing the rest command" \
"GET as not expected url=/rest/v1/system/subsystems/base/fans/base-2RL"'
LogOutput('info', '### Failed to Execute rest command \
"GET as expected url=/rest/v1/system/subsystems/base/fans/base-2R" ###\n')
LogOutput('info', 'http return code ' + retStruct.data['http_retcode'])
assert retStruct.data['http_retcode'].find(
'404') != -1, 'Rest GET method not return code 404 Fansid Failed\n' +\
retStruct.data['response_body']
LogOutput('info', '### Success in Rest GET method with http code 404\
Fansid ###\n')
LogOutput('info', '###' + retStruct.data['response_body'] + '###\n')
retStruct = returnStruct(returnCode=0)
return retStruct
class Test_ft_framework_rest:
def setup_class(cls):
# Create Topology object and connect to devices
Test_ft_framework_rest.testObj = testEnviron(topoDict=topoDict)
Test_ft_framework_rest.topoObj = \
Test_ft_framework_rest.testObj.topoObjGet()
wrkston01Obj = Test_ft_framework_rest.topoObj.deviceObjGet(
device="wrkston01")
wrkston01Obj.CreateRestEnviron()
def teardown_class(cls):
# Terminate all nodes
Test_ft_framework_rest.topoObj.terminate_nodes()
def test_reboot_switch(self):
LogOutput('info', '##############################################\n')
LogOutput('info', '### Reboot the switch ###\n')
LogOutput('info', '##############################################\n')
dut01Obj = self.topoObj.deviceObjGet(device="dut01")
retStruct = switch_reboot(dut01Obj)
assert retStruct.returnCode() == 0, 'Failed to reboot Switch'
LogOutput('info', '### Successful in Switch Reboot piece ###\n')
def test_config_rest_environment(self):
LogOutput('info', '##############################################\n')
LogOutput('info', '### Configure REST environment ###\n')
LogOutput('info', '##############################################\n')
dut01Obj = self.topoObj.deviceObjGet(device="dut01")
wrkston01Obj = self.topoObj.deviceObjGet(device="wrkston01")
retStruct = config_rest_environment(dut01Obj, wrkston01Obj)
assert retStruct.returnCode() == 0, 'Failed to config REST environment'
LogOutput('info', '### Successful in config REST environment ###\n')
def test_restTestFansid(self):
LogOutput('info', '##############################################\n')
LogOutput('info', '### Testing REST Fanid basic functionality ###\n')
LogOutput('info', '##############################################\n')
wrkston01Obj = self.topoObj.deviceObjGet(device="wrkston01")
retStruct = restTestFansid(wrkston01Obj)
assert retStruct.returnCode() == 0, 'Failed to test rest Fansid'
LogOutput('info', '### Successful in test rest Fansid ###\n')
def test_clean_up_devices(self):
LogOutput('info', '##############################################\n')
LogOutput('info', '### Device Cleanup - rolling back config ###\n')
LogOutput('info', '##############################################\n')
dut01Obj = self.topoObj.deviceObjGet(device="dut01")
wrkston01Obj = self.topoObj.deviceObjGet(device="wrkston01")
retStruct = deviceCleanup(dut01Obj, wrkston01Obj)
assert retStruct.returnCode() == 0, 'Failed to cleanup device'
LogOutput('info', '### Successfully Cleaned up devices ###\n')
| [
"[email protected]"
] | |
ea2cc4a918ce7233d5fd7fc4ced1465a7724a886 | 5376007035bf5aebb57c4d4f788098c9706ebe44 | /api/serializers/store.py | bf4b7d5c0a1d2081c3fe855a0077f072ecdf34a1 | [] | no_license | oyeolamilekan/monoapp | 6c0f49cc12a167bc0343648ae63f4e9c864bb130 | 939de5a1bb65e9bc48e48662f2ccffef280ffe10 | refs/heads/master | 2022-12-12T04:26:49.515305 | 2019-09-07T11:28:51 | 2019-09-07T11:28:51 | 192,071,474 | 0 | 0 | null | 2022-12-08T03:03:11 | 2019-06-15T11:35:50 | Python | UTF-8 | Python | false | false | 593 | py | """
This serializer handles the serialization of the store object
"""
from rest_framework.serializers import ModelSerializer
from shop.models import Shop
class ShopSerializer(ModelSerializer):
"""[the shop info]
Arguments:
{[ inherits from serializer class rest framework]} -- [description]
"""
class Meta:
model = Shop
fields = (
"id",
"user",
"slug",
"title",
"categories",
"phone_number",
"address",
"description",
"logo",
)
| [
"[email protected]"
] | |
24b60ba993defbd5d007b35259063ce0e9692859 | f4cf6af590a5b680bbd974d9b201f70dafebbe71 | /One/__init__.py | 541f302c256956fbc75964ed9abbc484fcfbbc63 | [] | no_license | Felixshao/Drawhook-1 | ffed4d2d8e297e3bcd9e6ab94fd0d4e5f032c6ea | b7a6a97d3d5cd6375ace1cf951a3716c84779060 | refs/heads/master | 2023-01-28T11:56:58.995120 | 2020-12-02T04:46:54 | 2020-12-02T04:46:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26 | py | # _*_ config: utf-8 _*_
#
| [
"[email protected]"
] | |
e0a16314f9011bec7baf07a499ee6dc14a3fd84c | d838bed08a00114c92b73982a74d96c15166a49e | /docs/data/learn/Bioinformatics/input/ch7_code/src/Stepik.7.3.ExerciseBreak.ReduceMatrixAndTreeSizeForDegreeGreaterThan3.py | 8c78a252e9bb67b680c446f51bfc4e3cd9a48b02 | [] | no_license | offbynull/offbynull.github.io | 4911f53d77f6c59e7a453ee271b1e04e613862bc | 754a85f43159738b89dd2bde1ad6ba0d75f34b98 | refs/heads/master | 2023-07-04T00:39:50.013571 | 2023-06-17T20:27:05 | 2023-06-17T23:27:00 | 308,482,936 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,617 | py | # EXERCISE BREAK
#
# Exercise Break: We have just described how to reduce the size of the tree as well as the dimension of the distance
# matrix D if the parent node (m) has degree 3. Design a similar approach in the case that the degree of m is larger
# than 3.
# Recall that for m having a degree of 3...
#
# i * * k
# \ /
# m *-------*
# / \
# j * * l
#
# | | i | j | k | l |
# |---|---|---|---|---|
# | i | ? | ? | ? | ? |
# | j | ? | ? | ? | ? |
# | k | ? | ? | ? | ? |
# | l | ? | ? | ? | ? |
#
# ... you can remove the leaf nodes i and j, leaving you with a new leaf node of just m. This is possible because the
# distance between m and other leaf nodes may be derived just from the weights in the original matrix...
#
# dist(m,k) = (dist(i,k) + dist(j,k) - dist(i,j)) / 2
# dist(m,l) = (dist(i,l) + dist(j,l) - dist(i,j)) / 2
#
# Once these distances have been calculated, you can remove i and j from the tree and replace them in the distance
# matrix with just m...
#
# * k
# /
# m *-------*
# \
# * l
#
# | | m | k | l |
# |---|---|---|---|
# | m | ? | ? | ? |
# | k | ? | ? | ? |
# | l | ? | ? | ? |
#
# ANSWER
#
# How would this work if m had a degree of 4?
#
# * a
# /
# *
# i * / \
# \ / * b
# m *
# / \ * c
# j * \ /
# *
# \
# * d
#
# | | i | j | a | b | c | d |
# |---|---|---|---|---|---|---|
# | i | ? | ? | ? | ? | ? | ? |
# | j | ? | ? | ? | ? | ? | ? |
# | a | ? | ? | ? | ? | ? | ? |
# | b | ? | ? | ? | ? | ? | ? |
# | c | ? | ? | ? | ? | ? | ? |
# | d | ? | ? | ? | ? | ? | ? |
#
# Do the same thing as before: Calculate the distance from m to every other leaf node just as you did for degree = 3...
#
# dist(m,a) = (dist(i,a) + dist(j,a) - dist(i,j)) / 2
# dist(m,b) = (dist(i,b) + dist(j,b) - dist(i,j)) / 2
# dist(m,c) = (dist(i,c) + dist(j,c) - dist(i,j)) / 2
# dist(m,d) = (dist(i,d) + dist(j,d) - dist(i,j)) / 2
#
# ... which ends up resulting in...
#
# * a
# /
# *
# / \
# / * b
# m *
# \ * c
# \ /
# *
# \
# * d
#
# | | m | a | b | c | d |
# |---|---|---|---|---|---|
# | m | ? | ? | ? | ? | ? |
# | a | ? | ? | ? | ? | ? |
# | b | ? | ? | ? | ? | ? |
# | c | ? | ? | ? | ? | ? |
# | d | ? | ? | ? | ? | ? |
#
# I'm fairly certain this is right, but I haven't tested it out.
| [
"[email protected]"
] | |
81dc0357d63777f89ea036e852b3efe56feaf9ab | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/205/usersdata/273/87079/submittedfiles/questao2_av1.py | bf918e1ddadf59e5328db54d5ee0e1da3c292262 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,037 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
a=int(input('Digite o numero escolhido: '))
b=int(input('Digite o numero escolhido: '))
c=int(input('Digite o numero escolhido: '))
d=int(input('Digite o numero escolhido: '))
e=int(input('Digite o numero escolhido: '))
f=int(input('Digite o numero escolhido: '))
asort=int(input('Digite o numero sorteado: '))
bsort=int(input('Digite o numero sorteado: '))
csort=int(input('Digite o numero sorteado: '))
dsort=int(input('Digite o numero sorteado: '))
esort=int(input('Digite o numero sorteado: '))
fsort=int(input('Digite o numero sorteado: '))
n1=a
cont=0
while (a!=n1):
if a==asort or a==bsort or a==csort or a==dsort or a==esort or a==fsort:
cont=cont+1
a=b
b=c
c=d
d=e
e=f
n1=a
if cont==3:
print('Terno')
elif cont==4:
print('Quadra')
elif cont==5:
print('Quina')
elif cont==6:
print('Sena')
else:
print('Azar')
| [
"[email protected]"
] | |
22d3fe7a3b1adedf2070a101426b7e1d4a883601 | 6320fef2ea7376c2b35f97f1a5af004e90f09098 | /1-2주차 실습(복습)/venv/Lib/site-packages/pyloco/plxtask.py | c7e5c709a9b0287db15a28f62f3226fa92395ce8 | [] | no_license | Dplo1514/ploaistudy | 7aa08d7f71653748a9e32dcc09ee8f6cec0aaed9 | e35e42b1e5f0c90cc1e2a59993a1ef73d8872d0c | refs/heads/master | 2023-09-03T00:45:55.601651 | 2021-10-24T12:19:38 | 2021-10-24T12:19:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,310 | py | # -*- coding: utf-8 -*-
"""plx module."""
from __future__ import unicode_literals
import os
import re
import pydoc
from pyloco.task import Task
from pyloco.error import UsageError
from pyloco.util import parse_param_option, pyloco_formatter, system, pyloco_import
from pyloco.base import pyloco_builtins
_pat_section = r"^\s*\[(?P<sec>.+)\]\s*\Z"
_pat_attr_section = r"^attribute\s*\*\Z"
_pat_comment = r"^\s*#"
_pat_continue = r".*\\\s*\Z"
_pat_ident = r"(?P<ident>[^\d\W]\w*)"
_pat_id = r"[^\d\W]\w*"
_re_section = re.compile(_pat_section)
_re_attr_section = re.compile(_pat_attr_section)
_re_comment = re.compile(_pat_comment)
_re_continue = re.compile(_pat_continue)
# _re_ident = re.compile(_pat_ident)
_re_plxcmd = re.compile(r"^[^@]+(@\s*"+_pat_id+r"\s*)+\s*=")
def read_plx(path):
entry_section = ("__entry__", [])
attr_section = ("attribute*", [])
section = entry_section
plx_sections = []
with open(path) as fplx:
contflag = False
item = ""
for line in fplx.read().splitlines():
if _re_comment.match(line):
contflag = False
item = ""
continue
if contflag:
contflag = False
if _re_continue.match(line):
item += line.rstrip()[:-1]
contflag = True
continue
item += line
if item:
match = _re_section.match(item)
if match:
header = match.group("sec")
section = (header.strip(), [])
if _re_attr_section.match(section[0]):
attr_section = section
else:
plx_sections.append(section)
else:
section[1].append(item)
item = ""
else:
section[1].append(item)
return entry_section, attr_section, plx_sections
def collect_plx_command(item):
line = None
plx_cmd = None
match = _re_plxcmd.match(item)
if match:
args = item[:match.span()[1]-1]
opt = parse_param_option(args, False, None)
plx_cmd = (opt, item[match.span()[1]:])
else:
line = item
return plx_cmd, line
class PlXTask(Task):
"""PlX task
PlX task merges the strengths of Python, Shell script,
and INI file syntax
"""
_version_ = "0.1.0"
def _setup(self, taskpath):
self.plx_entry_section, self.plx_attr_section, self.plx_sections = \
read_plx(taskpath)
self.plx_entry_body, self.plx_argdefs, self.plx_fwddefs, \
self.plx_shrdefs = [], {}, {}, {}
self._env["__file__"] = os.path.abspath(taskpath)
for line in self.plx_entry_section[1]:
_match, _line = collect_plx_command(line)
if _match:
opt = _match[0]
name = opt.vargs[0]
ctx = opt.context[0]
if ctx == "arg":
rhs = parse_param_option(_match[1], True, None)
self.plx_argdefs[name] = rhs
elif ctx == "forward":
rhs = parse_param_option(_match[1], True, None)
self.plx_fwddefs[name] = rhs
elif ctx == "shared":
rhs = parse_param_option(_match[1], True, None)
self.plx_shrdefs[name] = rhs
else:
self.plx_entry_body.append(line)
if _line is not None:
self.plx_entry_body.append(_line)
for opt in self.plx_argdefs.values():
if opt.vargs:
if opt.vargs[0].startswith("-"):
self.add_option_argument(*opt.vargs, **opt.kwargs)
else:
self.add_data_argument(*opt.vargs, **opt.kwargs)
for fwd in self.plx_fwddefs.values():
self.register_forward(*fwd.vargs, **fwd.kwargs)
for shr in self.plx_shrdefs.values():
self.add_shared(*opt.vargs, **opt.kwargs)
self._section_handlers = {
"forward": self.run_forward_section,
"shared": self.run_shared_section,
}
self._command_handlers = {
"shell": self.run_shell_command,
"manager": self.run_manager_command,
}
lenv = {}
if self.plx_attr_section:
self.run_section(self.plx_attr_section[1], lenv=lenv)
for key, value in lenv.items():
if key in ("_doc_",):
setattr(self, "_%s_" % key, value)
setattr(self, key, value)
if "_name_" not in lenv:
_, self._name_ = os.path.split(taskpath)
def run(self, argv, subargv=None, forward=None):
if not argv:
raise UsageError("PlX Task is not found."
" Please check plx path.")
elif not os.path.isfile(argv[0]):
raise UsageError("PlX Task '%s' is not found."
" Please check plx path." % str(argv[0]))
self._setup(argv[0])
prog = os.path.basename(getattr(self, "_path_", self._name_))
self._parser.prog = self.get_mgrname() + " " + prog[-20:]
if hasattr(self, "__doc__") and self.__doc__:
self._parser.desc, self._parser.long_desc = pydoc.splitdoc(
self.__doc__)
return super(PlXTask, self).run(argv[1:], subargv=subargv,
forward=forward)
def perform(self, targs):
for plx_dest, opt in self.plx_argdefs.items():
dest = opt.kwargs.get("dest", opt.vargs[0])
if dest in self._env["__arguments__"]:
argval = self._env["__arguments__"].pop(dest, None)
self._env["__arguments__"][plx_dest] = argval
out = self.run_section(self.plx_entry_body)
if out == 0:
for hdr, body in self.plx_sections:
# check hdr
if hdr.endswith("*"):
special_sec = True
opt = parse_param_option(hdr[:-1], False,
self.parent.shared)
else:
special_sec = False
opt = parse_param_option(hdr, False, self.parent.shared)
env = dict(self.parent.shared)
env["__builtins__"] = pyloco_builtins
sec_check = all([eval(c, env) for c in opt.context])
if sec_check:
sec_name = opt.vargs[0]
# find sec_handler
if special_sec:
if sec_name in self._section_handlers:
sec_handler = self._section_handlers[sec_name]
else:
raise UsageError(
"Special section '%s' is not registered."
" Please register first." % sec_name
)
else:
sec_handler = self.run_section
out = sec_handler(body, *opt.vargs[1:], **opt.kwargs)
return out
def run_section(self, body, *vargs, **kwargs):
env = dict(self._env)
env.update(self.parent.shared)
lenv = kwargs.get("lenv", {})
lines = []
hidx = 0
for b in body:
l1 = b.replace("{", "{{").replace("}", "}}")
l2 = l1.replace("__{{", "{").replace("}}__", "}")
l3 = pyloco_formatter.vformat(l2, [], env)
_match, _line = collect_plx_command(l3)
if _match:
opt = _match[0]
name = opt.vargs[0]
cmd_handler = None
for ctx in opt.context:
if ctx in self._command_handlers:
cmd_handler = self._command_handlers[ctx]
break
# rhs = parse_param_option(_match[1], True, None)
if cmd_handler:
idx_space = l3.find(name)
fname = "__plx_cmd_handler%d__" % hidx
hidx += 1
env[fname] = cmd_handler
vargs = ", ".join(opt.vargs[1:])
kwargs = ", ".join(["%s=%s" % (k, v) for k, v in
opt.kwargs.items()])
if kwargs:
args = "%s, %s" % (vargs, kwargs)
else:
args = vargs
cmd = (_match[1].replace('\\"', '__EDQ__')
.replace('"', '\\"').replace('__EDQ__', '\\\\\\"'))
cmd = cmd.strip()
if args:
lines.append(l3[:idx_space] + "%s = " % name + fname +
'("%s", %s)' % (cmd, args))
else:
lines.append(l3[:idx_space] + "%s = " % name + fname +
'("%s")' % cmd)
else:
raise UsageError("command handler for '%s' is not found." %
opt.context[0])
else:
lines.append(l3)
exec("\n".join(lines), env, lenv)
self.parent.shared.update(lenv)
return lenv["out"][0] if "out" in lenv else 0
def run_forward_section(self, body, *vargs, **kwargs):
lenv = {}
if self.run_section(body, lenv=lenv) == 0:
fwds = {}
for fwd, opt in self.plx_fwddefs.items():
if fwd in lenv:
fwds[opt.vargs[0]] = lenv[fwd]
self.add_forward(**fwds)
return 0
return -1
def run_shared_section(self, body, *vargs, **kwargs):
lenv = {}
if self.run_section(body, lenv=lenv) == 0:
shrs = {}
for shr, opt in self.plx_shrdefs.items():
if shr in lenv:
shrs[opt.vargs[0]] = lenv[shr]
self.add_shared(**shrs)
return 0
return -1
def run_shell_command(self, cmd, *vargs, **kwargs):
return system(cmd)
def run_manager_command(self, cmd, *vargs, **kwargs):
mgr = pyloco_import(self.get_mgrname())
if not cmd:
return (-1, None)
if cmd.startswith("-"):
if cmd.startswith("-- "):
return mgr.perform("", "", cmd[3:])
else:
idx = cmd.find("-- ")
if idx > 0:
return mgr.perform("", cmd[:idx], cmd[idx+3:])
else:
return mgr.perform("", cmd)
else:
return mgr.perform("", "", cmd)
| [
"[email protected]"
] | |
edd67f99568d16c9b74838ae193917ba2ac2bd67 | b8a803694c283a5acd13ab6760a36710884ab24f | /llvm/tests/test_operands.py | ef68b9eebfa0080d14e29767806e6305f85e337b | [
"NCSA",
"BSD-3-Clause"
] | permissive | llvmpy/llvmpy | 8a4c31e731364ead802231b97e058b8f8c444f96 | 13130fe35f1fb03a7051ad46c36146002391a6fa | refs/heads/master | 2016-09-05T16:48:54.694686 | 2015-04-28T16:21:34 | 2015-04-28T16:21:34 | 3,375,197 | 155 | 13 | null | 2015-05-27T18:36:45 | 2012-02-07T07:09:59 | HTML | UTF-8 | Python | false | false | 2,076 | py | import unittest
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from llvm.core import Module
from .support import TestCase, tests
class TestOperands(TestCase):
# implement a test function
test_module = """
define i32 @prod(i32, i32) {
entry:
%2 = mul i32 %0, %1
ret i32 %2
}
define i32 @test_func(i32, i32, i32) {
entry:
%tmp1 = call i32 @prod(i32 %0, i32 %1)
%tmp2 = add i32 %tmp1, %2
%tmp3 = add i32 %tmp2, 1
%tmp4 = add i32 %tmp3, -1
%tmp5 = add i64 -81985529216486895, 12297829382473034410
ret i32 %tmp4
}
"""
def test_operands(self):
m = Module.from_assembly(StringIO(self.test_module))
test_func = m.get_function_named("test_func")
prod = m.get_function_named("prod")
# test operands
i1 = test_func.basic_blocks[0].instructions[0]
i2 = test_func.basic_blocks[0].instructions[1]
i3 = test_func.basic_blocks[0].instructions[2]
i4 = test_func.basic_blocks[0].instructions[3]
i5 = test_func.basic_blocks[0].instructions[4]
self.assertEqual(i1.operand_count, 3)
self.assertEqual(i2.operand_count, 2)
self.assertEqual(i3.operands[1].z_ext_value, 1)
self.assertEqual(i3.operands[1].s_ext_value, 1)
self.assertEqual(i4.operands[1].z_ext_value, 0xffffffff)
self.assertEqual(i4.operands[1].s_ext_value, -1)
self.assertEqual(i5.operands[0].s_ext_value, -81985529216486895)
self.assertEqual(i5.operands[1].z_ext_value, 12297829382473034410)
self.assert_(i1.operands[-1] is prod)
self.assert_(i1.operands[0] is test_func.args[0])
self.assert_(i1.operands[1] is test_func.args[1])
self.assert_(i2.operands[0] is i1)
self.assert_(i2.operands[1] is test_func.args[2])
self.assertEqual(len(i1.operands), 3)
self.assertEqual(len(i2.operands), 2)
self.assert_(i1.called_function is prod)
tests.append(TestOperands)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
ef79cfd8757f6d078443dd0489deeb6ba381380c | ad6ad38511b214d1d6cc183535d0d4ff463eadf9 | /vulscan_Project/pageUtil.py | 83b026b6a28da1424c2632eaf0628ffc050d2d5e | [] | no_license | 0ps/VulScanner | abf8417f00dec8f7485246fc208a157d96207180 | de2519655c214ebfbe56c0278e6230afaae72559 | refs/heads/master | 2023-08-30T18:05:44.620650 | 2021-10-22T07:25:18 | 2021-10-22T07:25:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,428 | py | import re
def get_pages(page, last_page):
page_list = []
if last_page < 9:
for i in range(1, last_page + 1):
page_list.append(i)
return page_list
if page < 7:
for i in range(1, 9):
page_list.append(i)
page_list.extend(["...", last_page - 1, last_page])
elif page < last_page - 4:
page_list.extend([1, 2, "..."])
for i in range(-3, 3):
page_list.append(page + i)
page_list.extend(["...", last_page - 1, last_page])
else:
page_list.extend([1, 2, "..."])
for i in range(last_page - 7, last_page + 1):
page_list.append(i)
return page_list
def get_lastpage(count, each_num):
if count == 0:
return 1
if count % each_num == 0:
return int(count / each_num)
else:
return int(count / each_num) + 1
def get_ctx(ctx, list_name, all_list, page, last_page, query, base_path):
ctx[list_name] = all_list # 本页显示总列表
ctx["count"] = len(all_list)
ctx["page"] = page # 当前页数
ctx['notfirst'] = 0 if page == 1 else -1
ctx['notlast'] = 0 if page == last_page else 1
ctx['pages'] = get_pages(page, last_page)
ctx["last_page"] = last_page
ctx["query"] = query
base_path = re.sub(r"((\?)?(&)?page=\w*)", "", base_path)
ctx["page_url"] = base_path + ("&page=" if "?" in base_path else "?page=")
return ctx | [
"[email protected]"
] | |
661644011cb7fd52e289e875561f343ed8320b55 | 7b74696ff2ab729396cba6c203984fce5cd0ff83 | /analysis/migrations/0020_auto_20200524_0933.py | 80926a6c092f0a2ecf1c00f9e2138a97d0a500d0 | [
"MIT"
] | permissive | webclinic017/investtrack | e9e9a7a8caeecaceebcd79111c32b334c4e1c1d0 | 4aa204b608e99dfec3dd575e72b64a6002def3be | refs/heads/master | 2023-06-18T12:57:32.417414 | 2021-07-10T14:26:53 | 2021-07-10T14:26:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,101 | py | # Generated by Django 3.0.2 on 2020-05-24 01:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('analysis', '0019_auto_20200524_0859'),
]
operations = [
migrations.AlterField(
model_name='stockstrategytestlog',
name='event_type',
field=models.CharField(choices=[('MARK_CP', '标记临界点'), ('DOWNLOAD', '下载历史交易'), ('MARK_EXP_PCT', '标记预期涨幅'), ('UPD_CP', '更新临界点'), ('UPD_DOWNLOAD', '更新下载历史交易'), ('UPD_EXP_PCT', '更新预期涨幅'), ('MARK_LH_PCT', '标记高低点涨幅'), ('UPD_LH_PCT', '更新高低点涨幅')], max_length=50, verbose_name='日志类型'),
),
migrations.AlterField(
model_name='tradestrategystat',
name='applied_period',
field=models.CharField(blank=True, choices=[('mm', '月线'), ('30', '30分钟'), ('dd', '日线'), ('60', '60分钟'), ('wk', '周线'), ('15', '15分钟')], default='60', max_length=2, verbose_name='应用周期'),
),
]
| [
"[email protected]"
] | |
3e7d5739d2701988a82fde01995009a2af4221b8 | d152aa407b78640648cdafc005c8c2f9ee722dd6 | /lib/project_data.py | b8ffd2ff7bb359fb198b434c953edde43acf4ce7 | [] | no_license | bsmith89/sc-validate-haplotypes | 7bc4033d853f5529b9159d5157484deb85832252 | 1e9325db95330b5cf3c37f9270382680876afcc1 | refs/heads/main | 2023-09-03T13:22:17.657279 | 2021-11-09T17:20:42 | 2021-11-09T17:20:42 | 401,805,035 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | def metagenotype_db_to_xarray(df):
"""Convert from project database schema to a StrainFacts metagenotype.
"""
return (
df.rename_axis(columns="allele")
.rename(columns=dict(alternative_tally="alt", reference_tally="ref"))
.rename_axis(index=dict(lib_id="sample", species_position="position"))
.stack()
.to_xarray()
.fillna(0)
.sortby("allele")
)
| [
"[email protected]"
] | |
4d8b302264ff7450e02482b29129980a08e00304 | 997551673e3f08d83b966e35bb55c192f35c44c6 | /tests/test_schemas.py | 1dbcc7fbf49e7c01cc2445106c6ad0bd1451aece | [
"MIT"
] | permissive | lockefox/pyRobinhood | 6069f533f8733e8199bd22eaf9fce8da5b345aac | 8cfd9adf384d2da9d61287f483b0038195e2f476 | refs/heads/master | 2021-09-06T22:42:04.548824 | 2018-02-12T20:09:28 | 2018-02-12T20:09:28 | 116,594,721 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,771 | py | """test_schemas.py: validate responses from live Robinhood.com endpoints"""
import pytest
import jsonschema
import requests
import helpers
def test_api_root():
"""validate / has all expected routes"""
endpoint_list = {
'mfa': 'https://api.robinhood.com/mfa/',
'margin_interest_charges': 'https://api.robinhood.com/cash_journal/margin_interest_charges/',
'margin_upgrades': 'https://api.robinhood.com/margin/upgrades/',
'instruments': 'https://api.robinhood.com/instruments/',
'quotes': 'https://api.robinhood.com/quotes/',
'accounts': 'https://api.robinhood.com/accounts/',
'orders': 'https://api.robinhood.com/orders/',
'subscription_fees': 'https://api.robinhood.com/subscription/subscription_fees/',
'id_documents': 'https://api.robinhood.com/upload/photo_ids/',
'portfolios': 'https://api.robinhood.com/portfolios/',
'markets': 'https://api.robinhood.com/markets/',
'wire_relationships': 'https://api.robinhood.com/wire/relationships/',
'ach_queued_deposit': 'https://api.robinhood.com/ach/queued_deposit/',
'subscriptions': 'https://api.robinhood.com/subscription/subscriptions/',
'wire_transfers': 'https://api.robinhood.com/wire/transfers/',
'dividends': 'https://api.robinhood.com/dividends/',
'notification_settings': 'https://api.robinhood.com/settings/notifications/',
'applications': 'https://api.robinhood.com/applications/',
'user': 'https://api.robinhood.com/user/',
'ach_relationships': 'https://api.robinhood.com/ach/relationships/',
'ach_deposit_schedules': 'https://api.robinhood.com/ach/deposit_schedules/',
'ach_iav_auth': 'https://api.robinhood.com/ach/iav/auth/',
'notifications': 'https://api.robinhood.com/notifications/',
'ach_transfers': 'https://api.robinhood.com/ach/transfers/',
'positions': 'https://api.robinhood.com/positions/',
'watchlists': 'https://api.robinhood.com/watchlists/',
'document_requests': 'https://api.robinhood.com/upload/document_requests/',
'edocuments': 'https://api.robinhood.com/documents/',
'password_reset': 'https://api.robinhood.com/password_reset/request/',
'password_change': 'https://api.robinhood.com/password_change/',
}
req = requests.get('https://api.robinhood.com/')
req.raise_for_status()
data = req.json()
assert data == endpoint_list
@pytest.mark.auth
def test_accounts_schema():
"""validate /accounts endpoint"""
token = helpers.xfail_can_auth()
result = helpers.raw_request_get(
endpoint='accounts',
headers={'Authorization': 'Token ' + token},
)
schema = helpers.load_schema('accounts.schema')
jsonschema.validate(result, schema)
@pytest.mark.auth
def test_applications_schema():
"""validate /applications endpoint"""
token = helpers.xfail_can_auth()
result = helpers.raw_request_get(
endpoint='applications',
headers={'Authorization': 'Token ' + token},
)
schema = helpers.load_schema('applications.schema')
jsonschema.validate(result, schema)
@pytest.mark.auth
def test_dividends_schema():
"""validate /dividends endpoint"""
token = helpers.xfail_can_auth()
result = helpers.raw_request_get(
endpoint='dividends',
headers={'Authorization': 'Token ' + token},
)
schema = helpers.load_schema('dividends.schema')
jsonschema.validate(result, schema)
@pytest.mark.auth
def test_documents_schema():
"""validate /documents endpoint"""
token = helpers.xfail_can_auth()
result = helpers.raw_request_get(
endpoint='edocuments',
headers={'Authorization': 'Token ' + token},
)
schema = helpers.load_schema('documents.schema')
jsonschema.validate(result, schema)
def test_fundamentals_schema():
"""validate /fundamentals endpoint"""
result = helpers.raw_request_get(
endpoint_url='https://api.robinhood.com/fundamentals/',
params={'symbols': helpers.CONFIG.get('tests', 'good_stock_list')},
)
schema = helpers.load_schema('fundamentals.schema')
jsonschema.validate(result, schema)
def test_instruments_schema():
"""validate /instruments endpoint"""
# TODO: instruments from API ROOT
result = helpers.raw_request_get(
endpoint='instruments'
)
schema = helpers.load_schema('instruments.schema')
jsonschema.validate(result, schema)
def test_markets_schema():
"""validate /markets endpoint"""
result = helpers.raw_request_get(
endpoint='markets'
)
schema = helpers.load_schema('markets.schema')
jsonschema.validate(result, schema)
@pytest.mark.auth
def test_mfa_schema():
"""validate /mfa endpoint"""
token = helpers.xfail_can_auth()
result = helpers.raw_request_get(
endpoint='mfa',
headers={'Authorization': 'Token ' + token},
)
schema = helpers.load_schema('mfa.schema')
jsonschema.validate(result, schema)
def test_news_schema():
"""validate /midlands/news/ endpoint"""
# TODO: not on API ROOT?
result = helpers.raw_request_get(
endpoint_url='https://api.robinhood.com/midlands/news/',
params={'symbol': helpers.CONFIG.get('tests', 'good_stock')}
)
schema = helpers.load_schema('news.schema')
jsonschema.validate(result, schema)
@pytest.mark.auth
def test_orders_schema():
"""validate /orders endpoint"""
token = helpers.xfail_can_auth()
result = helpers.raw_request_get(
endpoint='orders',
headers={'Authorization': 'Token ' + token},
)
schema = helpers.load_schema('orders.schema')
jsonschema.validate(result, schema)
@pytest.mark.auth
def test_portfolios_schema():
"""validate /orders endpoint"""
token = helpers.xfail_can_auth()
result = helpers.raw_request_get(
endpoint='portfolios',
headers={'Authorization': 'Token ' + token},
)
schema = helpers.load_schema('portfolios.schema')
jsonschema.validate(result, schema)
def test_quotes_schema():
"""validate /quotes endpoint"""
# TODO: not on API ROOT?
result = helpers.raw_request_get(
endpoint='quotes',
params={'symbols': helpers.CONFIG.get('tests', 'good_stock_list')}
)
schema = helpers.load_schema('quotes.schema')
jsonschema.validate(result, schema)
@pytest.mark.auth
def test_user_schema():
"""validate /orders endpoint"""
token = helpers.xfail_can_auth()
result = helpers.raw_request_get(
endpoint='user',
headers={'Authorization': 'Token ' + token},
)
schema = helpers.load_schema('user.schema')
jsonschema.validate(result, schema)
| [
"[email protected]"
] | |
42b996ecf34bba801d2aeae1cf4adb58ea5b7312 | 9e7b9e91b8425061a5ad36e0dd630a799ec79f6f | /tensorflow_cookbook3.py | 1e58ae2fab2e6b25d2331356b42bf5460979e68e | [] | no_license | OlgaBelitskaya/colab_notebooks | c27fad60f7e4ca35287e2561487b5d9d82efde43 | d568149c8bcfb0025f7b09120ca44f639ac40efe | refs/heads/master | 2023-07-07T23:02:49.289280 | 2021-08-14T08:16:38 | 2021-08-14T08:16:38 | 158,067,383 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,894 | py | # -*- coding: utf-8 -*-
"""tensorflow_cookbook3.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1UXD9nxTS9s2EGkiRst59NREqX9Eiw3Zp
"""
# Commented out IPython magic to ensure Python compatibility.
from IPython.display import display,HTML
def dhtml(str):
display(HTML("""<style>
@import url('https://fonts.googleapis.com/css?family=Ewert&effect=3d');
</style><h1 class='font-effect-3d' style='font-family:Ewert; color:#ff355e'>
# %s</h1>"""%str))
dhtml('Code Modules & Helpful Functions')
import numpy as np,pylab as pl,pandas as pd
import sys,h5py,urllib,zipfile
import tensorflow as tf
import tensorflow_hub as th
from sklearn.model_selection import train_test_split
fpath='https://olgabelitskaya.github.io/'
fw='weights.best.hdf5'
def prepro(x_train,y_train,x_test,y_test,n_class):
n=int(len(x_test)/2)
x_valid,y_valid=x_test[:n],y_test[:n]
x_test,y_test=x_test[n:],y_test[n:]
cy_train=tf.keras.utils.to_categorical(y_train,n_class)
cy_valid=tf.keras.utils.to_categorical(y_valid,n_class)
cy_test=tf.keras.utils.to_categorical(y_test,n_class)
df=pd.DataFrame([[x_train.shape,x_valid.shape,x_test.shape],
[y_train.shape,y_valid.shape,y_test.shape],
[cy_train.shape,cy_valid.shape,cy_test.shape]],
columns=['train','valid','test'],
index=['images','labels','encoded labels'])
display(df)
return [[x_train,x_valid,x_test],
[y_train,y_valid,y_test],
[cy_train,cy_valid,cy_test]]
def cb(fw):
early_stopping=tf.keras.callbacks\
.EarlyStopping(monitor='val_loss',patience=20,verbose=2)
checkpointer=tf.keras.callbacks\
.ModelCheckpoint(filepath=fw,save_best_only=True,verbose=2)
lr_reduction=tf.keras.callbacks\
.ReduceLROnPlateau(monitor='val_loss',verbose=2,
patience=5,factor=.8)
return [checkpointer,early_stopping,lr_reduction]
def display_resize(x_train,x_valid,x_test,
y_valid,cy_valid,pixels):
x_train=tf.image.resize(x_train,[pixels,pixels])
x_valid=tf.image.resize(x_valid,[pixels,pixels])
x_test=tf.image.resize(x_test,[pixels,pixels])
img=x_valid[1]
lbl='one example of resized images \nlabel: '+\
str(y_valid[1][0])+'=>'+str(cy_valid[1])+\
'\nshape: '+str(img.shape)
pl.imshow(img); pl.title(lbl)
return [x_train,x_valid,x_test]
def premodel(pixels,dense,mh,labels):
model=tf.keras.Sequential([
tf.keras.layers.Input((pixels,pixels,3),
name='input'),
th.KerasLayer(mh,trainable=True),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(dense,activation='relu'),
tf.keras.layers.Dropout(rate=.5),
tf.keras.layers.Dense(labels,activation='softmax')])
model.compile(optimizer='adam',metrics=['accuracy'],
loss='categorical_crossentropy')
display(model.summary())
return model
dhtml('Data Loading & Preprocessing')
zf='DecorColorImages.h5.zip'
input_file=urllib.request.urlopen(fpath+zf)
output_file=open(zf,'wb');
output_file.write(input_file.read())
output_file.close(); input_file.close()
zipf=zipfile.ZipFile(zf,'r')
zipf.extractall(''); zipf.close()
f=h5py.File(zf[:-4],'r')
keys=list(f.keys()); print(keys)
images=np.array(f[keys[2]])/255
labels=np.array(f[keys[1]]).astype('int').reshape(-1,1)-1
x_train1,x_test1,y_train1,y_test1=\
train_test_split(images,labels,test_size=.2,random_state=1)
del images,labels
[[x_train1,x_valid1,x_test1],
[y_train1,y_valid1,y_test1],
[cy_train1,cy_valid1,cy_test1]]=\
prepro(x_train1,y_train1,x_test1,y_test1,7)
(x_train2,y_train2),(x_test2,y_test2)=\
tf.keras.datasets.cifar10.load_data()
[[x_train2,x_valid2,x_test2],
[y_train2,y_valid2,y_test2],
[cy_train2,cy_valid2,cy_test2]]=\
prepro(x_train2/255,y_train2,x_test2/255,y_test2,10)
zf='LetterColorImages_123.h5.zip'
input_file=urllib.request.urlopen(fpath+zf)
output_file=open(zf,'wb');
output_file.write(input_file.read())
output_file.close(); input_file.close()
zipf=zipfile.ZipFile(zf,'r')
zipf.extractall(''); zipf.close()
f=h5py.File(zf[:-4],'r')
keys=list(f.keys()); print(keys)
images=np.array(f[keys[1]])/255
labels=np.array(f[keys[2]]).astype('int').reshape(-1,1)-1
x_train3,x_test3,y_train3,y_test3=\
train_test_split(images,labels,test_size=.2,random_state=1)
del images,labels
[[x_train3,x_valid3,x_test3],
[y_train3,y_valid3,y_test3],
[cy_train3,cy_valid3,cy_test3]]=\
prepro(x_train3,y_train3,x_test3,y_test3,33)
zf='FlowerColorImages.h5.zip'
input_file=urllib.request.urlopen(fpath+zf)
output_file=open(zf,'wb');
output_file.write(input_file.read())
output_file.close(); input_file.close()
zipf=zipfile.ZipFile(zf,'r')
zipf.extractall(''); zipf.close()
f=h5py.File(zf[:-4],'r')
keys=list(f.keys())
images=np.array(f[keys[0]])/255
labels=np.array(f[keys[1]]).astype('int').reshape(-1,1)
x_train4,x_test4,y_train4,y_test4=\
train_test_split(images,labels,test_size=.2,random_state=1)
del images,labels
[[x_train4,x_valid4,x_test4],
[y_train4,y_valid4,y_test4],
[cy_train4,cy_valid4,cy_test4]]=\
prepro(x_train4,y_train4,x_test4,y_test4,10)
dhtml('Pre-Trained Saved Models')
dhtml('#1')
[handle_base,pixels]=["mobilenet_v2_100_192",192]
mhandle="https://tfhub.dev/google/imagenet/{}/feature_vector/4"\
.format(handle_base)
[x_train1,x_valid1,x_test1]=\
display_resize(x_train1,x_valid1,x_test1,
y_valid1,cy_valid1,pixels)
model=premodel(pixels,1024,mhandle,7)
history=model.fit(x=x_train1,y=cy_train1,batch_size=16,
epochs=50,callbacks=cb(fw),
validation_data=(x_valid1,cy_valid1))
model.load_weights(fw)
model.evaluate(x_test1,cy_test1)
[handle_base,pixels]=["mobilenet_v2_140_224",224]
mhandle="https://tfhub.dev/google/imagenet/{}/feature_vector/4"\
.format(handle_base)
[x_train1,x_valid1,x_test1]=\
display_resize(x_train1,x_valid1,x_test1,
y_valid1,cy_valid1,pixels)
model=premodel(pixels,1024,mhandle,7)
history=model.fit(x=x_train1,y=cy_train1,batch_size=16,
epochs=50,callbacks=cb(fw),
validation_data=(x_valid1,cy_valid1))
model.load_weights(fw)
model.evaluate(x_test1,cy_test1)
del x_train1,x_valid1,x_test1,\
y_train1,y_valid1,y_test1,\
cy_train1,cy_valid1,cy_test1
dhtml('#2')
[handle_base,pixels]=["mobilenet_v2_050_96",96]
mhandle="https://tfhub.dev/google/imagenet/{}/feature_vector/4"\
.format(handle_base)
[x_train2,x_valid2,x_test2]=\
display_resize(x_train2,x_valid2,x_test2,
y_valid2,cy_valid2,pixels)
model=premodel(pixels,512,mhandle,10)
history=model.fit(x=x_train2,y=cy_train2,batch_size=64,
epochs=10,callbacks=cb(fw),
validation_data=(x_valid2,cy_valid2))
model.load_weights(fw)
model.evaluate(x_test2,cy_test2)
[handle_base,pixels]=["mobilenet_v2_075_96",96]
mhandle="https://tfhub.dev/google/imagenet/{}/feature_vector/4"\
.format(handle_base)
model=premodel(pixels,1024,mhandle,10)
history=model.fit(x=x_train2,y=cy_train2,batch_size=64,
epochs=10,callbacks=cb(fw),
validation_data=(x_valid2,cy_valid2))
model.load_weights(fw)
model.evaluate(x_test2,cy_test2)
del x_train2,x_valid2,x_test2,\
y_train2,y_valid2,y_test2,\
cy_train2,cy_valid2,cy_test2
dhtml('#3')
[handle_base,pixels]=["mobilenet_v2_050_96",96]
mhandle="https://tfhub.dev/google/imagenet/{}/feature_vector/4"\
.format(handle_base)
[x_train3,x_valid3,x_test3]=\
display_resize(x_train3,x_valid3,x_test3,
y_valid3,cy_valid3,pixels)
model=premodel(pixels,512,mhandle,33)
history=model.fit(x=x_train3,y=cy_train3,batch_size=64,
epochs=50,callbacks=cb(fw),
validation_data=(x_valid3,cy_valid3))
model.load_weights(fw)
model.evaluate(x_test3,cy_test3)
del x_train3,x_valid3,x_test3,\
y_train3,y_valid3,y_test3,\
cy_train3,cy_valid3,cy_test3
dhtml('#4')
[handle_base,pixels]=["mobilenet_v1_100_128",128]
mhandle="https://tfhub.dev/google/imagenet/{}/feature_vector/4"\
.format(handle_base)
model=premodel(pixels,512,mhandle,10)
history=model.fit(x=x_train4,y=cy_train4,batch_size=8,
epochs=50,callbacks=cb(fw),
validation_data=(x_valid4,cy_valid4))
model.load_weights(fw)
model.evaluate(x_test4,cy_test4)
[handle_base,pixels]=["mobilenet_v2_130_224",224]
mhandle="https://tfhub.dev/google/imagenet/{}/feature_vector/4"\
.format(handle_base)
[x_train4,x_valid4,x_test4]=\
display_resize(x_train4,x_valid4,x_test4,
y_valid4,cy_valid4,pixels)
model=premodel(pixels,512,mhandle,10)
history=model.fit(x=x_train4,y=cy_train4,batch_size=8,
epochs=50,callbacks=cb(fw),
validation_data=(x_valid4,cy_valid4))
model.load_weights(fw)
model.evaluate(x_test4,cy_test4) | [
"[email protected]"
] | |
ba1050f4f449533e84541451c37cbe5468b1e375 | be84495751737bbf0a8b7d8db2fb737cbd9c297c | /tests2/materials/test_ndir_btdf.py | 68d82a3eb938def30d40fded9d7f0910a8055d81 | [] | no_license | mario007/renmas | 5e38ff66cffb27b3edc59e95b7cf88906ccc03c9 | bfb4e1defc88eb514e58bdff7082d722fc885e64 | refs/heads/master | 2021-01-10T21:29:35.019792 | 2014-08-17T19:11:51 | 2014-08-17T19:11:51 | 1,688,798 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,441 | py |
import unittest
from random import random
from tdasm import Runtime
import renmas2
class TransmissionSampling(unittest.TestCase):
def setUp(self):
pass
def asm_code1(self, ren):
code = """
#DATA
"""
code += ren.structures.structs(("hitpoint",)) + """
uint32 next_dir_ptr
hitpoint hp
#CODE
; call next direction of material
mov eax, hp
call dword [next_dir_ptr]
#END
"""
return code
def test_transmission_sampling(self):
factory = renmas2.Factory()
ren = renmas2.Renderer()
runtime = Runtime()
mat = renmas2.core.material.Material(ren.converter.zero_spectrum())
eta_in = 1.3
eta_out = 1.0
sampling = renmas2.materials.PerfectTransmissionSampling(eta_in, eta_out)
mat.add(sampling)
eta_in = ren.converter.zero_spectrum().set(1.3)
eta_out = ren.converter.zero_spectrum().set(1.0)
fresnel = renmas2.materials.FresnelDielectric(eta_in, eta_out)
spec = ren.converter.create_spectrum((0.5, 0.5, 0.5))
perf_spec = renmas2.materials.PerfectTransmission(spec, fresnel, 1.0)
mat.add(perf_spec)
normal = factory.vector(2, 4.5, 5)
normal.normalize()
hit_point = factory.vector(3, 5, 6)
wo = factory.vector(-2, 1, 0)
wo.normalize()
hp = renmas2.shapes.HitPoint(1.5, hit_point, normal, 0)
hp.wo = wo
hp.fliped = False
ren.macro_call.set_runtimes([runtime])
mat.next_direction_btdf_asm([runtime], ren.structures, ren.assembler)
mc = ren.assembler.assemble(self.asm_code1(ren))
ds = runtime.load("test", mc)
ds["next_dir_ptr"] = runtime.address_module(mat.nd_asm_name)
ds["hp.normal"] = (normal.x, normal.y, normal.z, 0.0)
ds["hp.t"] = 1.5
ds["hp.hit"] = (hit_point.x, hit_point.y, hit_point.z, 0.0)
ds["hp.wo"] = (wo.x, wo.y, wo.z, 0.0)
ds["hp.fliped"] = 0
runtime.run("test")
mat.next_direction_btdf(hp)
print ("Python")
print (hp.wi)
print (hp.ndotwi)
print (hp.specular)
print (hp.f_spectrum)
print ("ASM")
print (ds["hp.wi"])
print (ds["hp.ndotwi"])
print (ds["hp.specular"])
print (ds["hp.f_spectrum.values"])
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
f0a94d9d2d473018865e32f6034970de6d891486 | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/arc080/B/3769232.py | ebaf3d3a32aa808a1603111549af9d0b2c17c8ed | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | import sys,collections
def solve():
H,W = map(int,input().split())
N = int(input())
L = list(map(int,input().split()))
ans = [["" for _ in range(W)] for _ in range(H)]
h,w = 0,0
for i,v in enumerate(L):
while v != 0:
if h % 2 == 0:
ans[h][w] = str(i+1)
v -= 1
if w == W-1:
h += 1
else:
w += 1
else:
ans[h][w] = str(i+1)
v -= 1
if w == 0:
h += 1
else:
w -= 1
for v in ans:
print(" ".join(v))
solve() | [
"[email protected]"
] | |
0550a909ac90ff9e96eb663552ef66bb57941547 | b39d9ef9175077ac6f03b66d97b073d85b6bc4d0 | /Questran_LOC_pow_f_oral_susp_SmPC.py | 972f617ceafb8aa344c064ee17a6c67a8eae2cac | [] | no_license | urudaro/data-ue | 2d840fdce8ba7e759b5551cb3ee277d046464fe0 | 176c57533b66754ee05a96a7429c3e610188e4aa | refs/heads/master | 2021-01-22T12:02:16.931087 | 2013-07-16T14:05:41 | 2013-07-16T14:05:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,793 | py | {'_data': [['Unknown',
[['Investigations',
u' f\xf6rkortad protrombin tid, f\xf6rl\xe4ngd protrombin tid, vikt\xf6kning, viktminskning.'],
['Blood',
u' Bl\xf6dningsben\xe4genhet, hypoprotrombinemi, anemi, lymfadenopati, ekkymos.'],
['Nervous system',
u' huvudv\xe4rk, yrsel, synkop\xe9, s\xf6mnighet, neuralgi, parestesier, dysgeusi.'],
['Eye', u' nattblindhet (med vitamin A brist), uveit.'],
['Ear', u' tinnitus, yrsel.'],
['Respiratory', u' astma, v\xe4sning, dyspn\xe9, hicka.'],
['GI',
u' f\xf6rstoppning, pankreatit, abdominal obehagsk\xe4nsla, flatulens, illam\xe5ende, kr\xe4kningar, diarr\xe9, dyspepsi, steatorr\xe9, glossit, anorektalt besv\xe4r, gastrointestinal bl\xf6dning, rektal bl\xf6dning, missf\xe4rgning av faeces, hemorroidal bl\xf6dning, duodenal ulcer bl\xf6dning, dysfagi, ulcus, proktalgi, rapning, akut buk, karies, bl\xf6dningar i munnen och intestinal obstruktion (inklusive 2 d\xf6dsfall i pediatriska patienter), divertikulit.'],
['Renal', u' hematuri, dysuri, onormal urinod\xf6r, polyuri.'],
['Skin', u' rodnad, hudirritation, n\xe4sselutslag.'],
['Musculoskeletal', u' osteoporos, ryggont, myalgi, artralgi, artrit.'],
['Metabolism',
u' vitamin A brist, vitamin K brist, vitamin D brist, acidos, hyperkloremi (hos barn) anorexi.'],
['General', u' tr\xf6tthet, \xf6dem. '],
['Hepato',
u' kolelitiasis, kalcifiering av gallbl\xe5san, gallkolik, onormala leverfunktionsv\xe4rden'],
['Psychiatric', u' \xf6kat libido, \xe5ngest.']]]],
'_pages': [3, 4],
u'_rank': 14,
u'_type': u'LSFU2'} | [
"[email protected]"
] | |
38d538d07d33b4c8a2d6259cc1bca71f2d0d91b8 | d7f45fac46598da9825a404d7511df7474237e4a | /ex.099.py | d39572b9a7a98a885df5df8b8c4d938d841ec59a | [] | no_license | MarceloBCS/Exercicios_Curso_em_video | b4a8cbc8573e1303065c0cf1baad25c47d5a2fd8 | a90fd67d83cf154f3554f962815fb791d3508d0c | refs/heads/master | 2022-12-29T19:15:50.007022 | 2020-10-13T05:09:28 | 2020-10-13T05:09:28 | 303,592,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 803 | py | from random import sample
from datetime import date
from time import sleep
cor = {'red':'\033[1;31m', 'limp':'\033[m', 'az':'\033[1;34m'}
def maior(k):
c = len('Analisando os valores passados...')
print('=' * c)
print('Analisando os valores passados...')
if k == 0:
n = []
if k > 0:
n = sample(range(0, 10), k)
for x in range(0, len(n)):
sleep(.2)
print(cor['red'], f'{n[x]}', cor['limp'], end='|')
print(f'\nForam informados {cor["az"]}{len(n)} valores{cor["limp"]} ao todo')
print('O {}maior{} valor gerado foi'.format(cor['az'], cor['limp']),
f'{cor["az"]}{max(n)}{cor["limp"]}' if k > 0 else f'{cor["az"]}0{cor["limp"]}')
print()
maior(6)
maior(3)
maior(2)
maior(1)
maior(0)
print(f'\nProcessado em {date.today()}') | [
"[email protected]"
] | |
201e8ccf63e246f942be772b74b423a6ee42fcc3 | b57b0a14df5c6841f04cccb7b02ad04afbca18f8 | /linkerd/tests/common.py | ab3716c34a505403b0d2f2704985e95f6a23c1eb | [
"AFL-3.0",
"BSD-3-Clause-Modification",
"LGPL-3.0-only",
"Unlicense",
"LGPL-2.1-only",
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | zeroc0d3/integrations-core | d9c99803c049668b7f9f9c796d338e343d3d46ee | 634d567f3c38d32aabb3f4c16b50bcfa8a4ae0fb | refs/heads/master | 2021-09-28T18:37:00.650406 | 2021-09-13T11:59:45 | 2021-09-13T11:59:45 | 199,758,958 | 0 | 0 | BSD-3-Clause | 2019-07-31T02:01:25 | 2019-07-31T02:01:24 | null | UTF-8 | Python | false | false | 28,974 | py | from datadog_checks.base.stubs import aggregator
from datadog_checks.dev import get_here
from datadog_checks.linkerd.metrics import construct_metrics_config
HERE = get_here()
LINKERD_FIXTURE_METRICS = {
'jvm:start_time': 'jvm.start_time',
'jvm:application_time_millis': 'jvm.application_time_millis',
'jvm:classes:total_loaded': 'jvm.classes.total_loaded',
'jvm:classes:current_loaded': 'jvm.classes.current_loaded',
'jvm:classes:total_unloaded': 'jvm.classes.total_unloaded',
'jvm:postGC:Par_Survivor_Space:max': 'jvm.postGC.Par_Survivor_Space.max',
'jvm:postGC:Par_Survivor_Space:used': 'jvm.postGC.Par_Survivor_Space.used',
'jvm:postGC:CMS_Old_Gen:max': 'jvm.postGC.CMS_Old_Gen.max',
'jvm:postGC:CMS_Old_Gen:used': 'jvm.postGC.CMS_Old_Gen.used',
'jvm:postGC:Par_Eden_Space:max': 'jvm.postGC.Par_Eden_Space.max',
'jvm:postGC:Par_Eden_Space:used': 'jvm.postGC.Par_Eden_Space.used',
'jvm:postGC:used': 'jvm.postGC.used',
'jvm:nonheap:committed': 'jvm.nonheap.committed',
'jvm:nonheap:max': 'jvm.nonheap.max',
'jvm:nonheap:used': 'jvm.nonheap.used',
'jvm:tenuring_threshold': 'jvm.tenuring_threshold',
'jvm:thread:daemon_count': 'jvm.thread.daemon_count',
'jvm:thread:count': 'jvm.thread.count',
'jvm:thread:peak_count': 'jvm.thread.peak_count',
'jvm:mem:postGC:Par_Survivor_Space:max': 'jvm.mem.postGC.Par_Survivor_Space.max',
'jvm:mem:postGC:Par_Survivor_Space:used': 'jvm.mem.postGC.Par_Survivor_Space.used',
'jvm:mem:postGC:CMS_Old_Gen:max': 'jvm.mem.postGC.CMS_Old_Gen.max',
'jvm:mem:postGC:CMS_Old_Gen:used': 'jvm.mem.postGC.CMS_Old_Gen.used',
'jvm:mem:postGC:Par_Eden_Space:max': 'jvm.mem.postGC.Par_Eden_Space.max',
'jvm:mem:postGC:Par_Eden_Space:used': 'jvm.mem.postGC.Par_Eden_Space.used',
'jvm:mem:postGC:used': 'jvm.mem.postGC.used',
'jvm:mem:metaspace:max_capacity': 'jvm.mem.metaspace.max_capacity',
'jvm:mem:buffer:direct:max': 'jvm.mem.buffer.direct.max',
'jvm:mem:buffer:direct:count': 'jvm.mem.buffer.direct.count',
'jvm:mem:buffer:direct:used': 'jvm.mem.buffer.direct.used',
'jvm:mem:buffer:mapped:max': 'jvm.mem.buffer.mapped.max',
'jvm:mem:buffer:mapped:count': 'jvm.mem.buffer.mapped.count',
'jvm:mem:buffer:mapped:used': 'jvm.mem.buffer.mapped.used',
'jvm:mem:allocations:eden:bytes': 'jvm.mem.allocations.eden.bytes',
'jvm:mem:current:used': 'jvm.mem.current.used',
'jvm:mem:current:CMS_Old_Gen:max': 'jvm.mem.current.CMS_Old_Gen.max',
'jvm:mem:current:CMS_Old_Gen:used': 'jvm.mem.current.CMS_Old_Gen.used',
'jvm:mem:current:Metaspace:max': 'jvm.mem.current.Metaspace.max',
'jvm:mem:current:Metaspace:used': 'jvm.mem.current.Metaspace.used',
'jvm:mem:current:Par_Eden_Space:max': 'jvm.mem.current.Par_Eden_Space.max',
'jvm:mem:current:Par_Eden_Space:used': 'jvm.mem.current.Par_Eden_Space.used',
'jvm:mem:current:Par_Survivor_Space:max': 'jvm.mem.current.Par_Survivor_Space.max',
'jvm:mem:current:Par_Survivor_Space:used': 'jvm.mem.current.Par_Survivor_Space.used',
'jvm:mem:current:Compressed_Class_Space:max': 'jvm.mem.current.Compressed_Class_Space.max',
'jvm:mem:current:Compressed_Class_Space:used': 'jvm.mem.current.Compressed_Class_Space.used',
'jvm:mem:current:Code_Cache:max': 'jvm.mem.current.Code_Cache.max',
'jvm:mem:current:Code_Cache:used': 'jvm.mem.current.Code_Cache.used',
'jvm:num_cpus': 'jvm.num_cpus',
'jvm:gc:msec': 'jvm.gc.msec',
'jvm:gc:ParNew:msec': 'jvm.gc.ParNew.msec',
'jvm:gc:ParNew:cycles': 'jvm.gc.ParNew.cycles',
'jvm:gc:ConcurrentMarkSweep:msec': 'jvm.gc.ConcurrentMarkSweep.msec',
'jvm:gc:ConcurrentMarkSweep:cycles': 'jvm.gc.ConcurrentMarkSweep.cycles',
'jvm:gc:cycles': 'jvm.gc.cycles',
'jvm:fd_limit': 'jvm.fd_limit',
'jvm:compilation:time_msec': 'jvm.compilation.time_msec',
'jvm:uptime': 'jvm.uptime',
'jvm:safepoint:sync_time_millis': 'jvm.safepoint.sync_time_millis',
'jvm:safepoint:total_time_millis': 'jvm.safepoint.total_time_millis',
'jvm:safepoint:count': 'jvm.safepoint.count',
'jvm:heap:committed': 'jvm.heap.committed',
'jvm:heap:max': 'jvm.heap.max',
'jvm:heap:used': 'jvm.heap.used',
'jvm:fd_count': 'jvm.fd_count',
'rt:server:sent_bytes': 'rt.server.sent_bytes',
'rt:server:connects': 'rt.server.connects',
'rt:server:success': 'rt.server.success',
'rt:server:received_bytes': 'rt.server.received_bytes',
'rt:server:read_timeout': 'rt.server.read_timeout',
'rt:server:write_timeout': 'rt.server.write_timeout',
'rt:server:nacks': 'rt.server.nacks',
'rt:server:thread_usage:requests:mean': 'rt.server.thread_usage.requests.mean',
'rt:server:thread_usage:requests:relative_stddev': 'rt.server.thread_usage.requests.relative_stddev',
'rt:server:thread_usage:requests:stddev': 'rt.server.thread_usage.requests.stddev',
'rt:server:socket_unwritable_ms': 'rt.server.socket_unwritable_ms',
'rt:server:closes': 'rt.server.closes',
'rt:server:status:1XX': 'rt.server.status.1XX',
'rt:server:status:4XX': 'rt.server.status.4XX',
'rt:server:status:2XX': 'rt.server.status.2XX',
'rt:server:status:error': 'rt.server.status.error',
'rt:server:status:3XX': 'rt.server.status.3XX',
'rt:server:status:5XX': 'rt.server.status.5XX',
'rt:server:status:200': 'rt.server.status.200',
'rt:server:nonretryable_nacks': 'rt.server.nonretryable_nacks',
'rt:server:socket_writable_ms': 'rt.server.socket_writable_ms',
'rt:server:requests': 'rt.server.requests',
'rt:server:pending': 'rt.server.pending',
'rt:server:connections': 'rt.server.connections',
'rt:service:success': 'rt.service.success',
'rt:service:retries:total': 'rt.service.retries.total',
'rt:service:retries:budget_exhausted': 'rt.service.retries.budget_exhausted',
'rt:service:retries:budget': 'rt.service.retries.budget',
'rt:service:requests': 'rt.service.requests',
'rt:service:pending': 'rt.service.pending',
'rt:client:sent_bytes': 'rt.client.sent_bytes',
'rt:client:failure_accrual:removals': 'rt.client.failure_accrual.removals',
'rt:client:failure_accrual:probes': 'rt.client.failure_accrual.probes',
'rt:client:failure_accrual:removed_for_ms': 'rt.client.failure_accrual.removed_for_ms',
'rt:client:failure_accrual:revivals': 'rt.client.failure_accrual.revivals',
'rt:client:connects': 'rt.client.connects',
'rt:client:pool_num_waited': 'rt.client.pool_num_waited',
'rt:client:success': 'rt.client.success',
'rt:client:pool_waiters': 'rt.client.pool_waiters',
'rt:client:retries:request_limit': 'rt.client.retries.request_limit',
'rt:client:retries:budget_exhausted': 'rt.client.retries.budget_exhausted',
'rt:client:retries:cannot_retry': 'rt.client.retries.cannot_retry',
'rt:client:retries:not_open': 'rt.client.retries.not_open',
'rt:client:retries:budget': 'rt.client.retries.budget',
'rt:client:retries:requeues': 'rt.client.retries.requeues',
'rt:client:received_bytes': 'rt.client.received_bytes',
'rt:client:read_timeout': 'rt.client.read_timeout',
'rt:client:write_timeout': 'rt.client.write_timeout',
'rt:client:service:success': 'rt.client.service.success',
'rt:client:service:pending': 'rt.client.service.pending',
'rt:client:service:requests': 'rt.client.service.requests',
'rt:client:pool_num_too_many_waiters': 'rt.client.pool_num_too_many_waiters',
'rt:client:socket_unwritable_ms': 'rt.client.socket_unwritable_ms',
'rt:client:closes': 'rt.client.closes',
'rt:client:pool_cached': 'rt.client.pool_cached',
'rt:client:nack_admission_control:dropped_requests': 'rt.client.nack_admission_control.dropped_requests',
'rt:client:status:1XX': 'rt.client.status.1XX',
'rt:client:status:4XX': 'rt.client.status.4XX',
'rt:client:status:2XX': 'rt.client.status.2XX',
'rt:client:status:error': 'rt.client.status.error',
'rt:client:status:3XX': 'rt.client.status.3XX',
'rt:client:status:5XX': 'rt.client.status.5XX',
'rt:client:status:200': 'rt.client.status.200',
'rt:client:pool_size': 'rt.client.pool_size',
'rt:client:available': 'rt.client.available',
'rt:client:socket_writable_ms': 'rt.client.socket_writable_ms',
'rt:client:cancelled_connects': 'rt.client.cancelled_connects',
'rt:client:requests': 'rt.client.requests',
'rt:client:loadbalancer:size': 'rt.client.loadbalancer.size',
'rt:client:loadbalancer:rebuilds': 'rt.client.loadbalancer.rebuilds',
'rt:client:loadbalancer:closed': 'rt.client.loadbalancer.closed',
'rt:client:loadbalancer:load': 'rt.client.loadbalancer.load',
'rt:client:loadbalancer:meanweight': 'rt.client.loadbalancer.meanweight',
'rt:client:loadbalancer:adds': 'rt.client.loadbalancer.adds',
'rt:client:loadbalancer:updates': 'rt.client.loadbalancer.updates',
'rt:client:loadbalancer:algorithm:p2c_least_loaded': 'rt.client.loadbalancer.algorithm.p2c_least_loaded',
'rt:client:loadbalancer:available': 'rt.client.loadbalancer.available',
'rt:client:loadbalancer:max_effort_exhausted': 'rt.client.loadbalancer.max_effort_exhausted',
'rt:client:loadbalancer:busy': 'rt.client.loadbalancer.busy',
'rt:client:loadbalancer:removes': 'rt.client.loadbalancer.removes',
'rt:client:pending': 'rt.client.pending',
'rt:client:dispatcher:serial:queue_size': 'rt.client.dispatcher.serial.queue_size',
'rt:client:connections': 'rt.client.connections',
'rt:bindcache:path:expires': 'rt.bindcache.path.expires',
'rt:bindcache:path:evicts': 'rt.bindcache.path.evicts',
'rt:bindcache:path:misses': 'rt.bindcache.path.misses',
'rt:bindcache:path:oneshots': 'rt.bindcache.path.oneshots',
'rt:bindcache:bound:expires': 'rt.bindcache.bound.expires',
'rt:bindcache:bound:evicts': 'rt.bindcache.bound.evicts',
'rt:bindcache:bound:misses': 'rt.bindcache.bound.misses',
'rt:bindcache:bound:oneshots': 'rt.bindcache.bound.oneshots',
'rt:bindcache:tree:expires': 'rt.bindcache.tree.expires',
'rt:bindcache:tree:evicts': 'rt.bindcache.tree.evicts',
'rt:bindcache:tree:misses': 'rt.bindcache.tree.misses',
'rt:bindcache:tree:oneshots': 'rt.bindcache.tree.oneshots',
'rt:bindcache:client:expires': 'rt.bindcache.client.expires',
'rt:bindcache:client:evicts': 'rt.bindcache.client.evicts',
'rt:bindcache:client:misses': 'rt.bindcache.client.misses',
'rt:bindcache:client:oneshots': 'rt.bindcache.client.oneshots',
}
LINKERD_FIXTURE_TYPES = {
'jvm:start_time': 'gauge',
'jvm:application_time_millis': 'gauge',
'jvm:classes:total_loaded': 'gauge',
'jvm:classes:current_loaded': 'gauge',
'jvm:classes:total_unloaded': 'gauge',
'jvm:postGC:Par_Survivor_Space:max': 'gauge',
'jvm:postGC:Par_Survivor_Space:used': 'gauge',
'jvm:postGC:CMS_Old_Gen:max': 'gauge',
'jvm:postGC:CMS_Old_Gen:used': 'gauge',
'jvm:postGC:Par_Eden_Space:max': 'gauge',
'jvm:postGC:Par_Eden_Space:used': 'gauge',
'jvm:postGC:used': 'gauge',
'jvm:nonheap:committed': 'gauge',
'jvm:nonheap:max': 'gauge',
'jvm:nonheap:used': 'gauge',
'jvm:tenuring_threshold': 'gauge',
'jvm:thread:daemon_count': 'gauge',
'jvm:thread:count': 'gauge',
'jvm:thread:peak_count': 'gauge',
'jvm:mem:postGC:Par_Survivor_Space:max': 'gauge',
'jvm:mem:postGC:Par_Survivor_Space:used': 'gauge',
'jvm:mem:postGC:CMS_Old_Gen:max': 'gauge',
'jvm:mem:postGC:CMS_Old_Gen:used': 'gauge',
'jvm:mem:postGC:Par_Eden_Space:max': 'gauge',
'jvm:mem:postGC:Par_Eden_Space:used': 'gauge',
'jvm:mem:postGC:used': 'gauge',
'jvm:mem:metaspace:max_capacity': 'gauge',
'jvm:mem:buffer:direct:max': 'gauge',
'jvm:mem:buffer:direct:count': 'gauge',
'jvm:mem:buffer:direct:used': 'gauge',
'jvm:mem:buffer:mapped:max': 'gauge',
'jvm:mem:buffer:mapped:count': 'gauge',
'jvm:mem:buffer:mapped:used': 'gauge',
'jvm:mem:allocations:eden:bytes': 'gauge',
'jvm:mem:current:used': 'gauge',
'jvm:mem:current:CMS_Old_Gen:max': 'gauge',
'jvm:mem:current:CMS_Old_Gen:used': 'gauge',
'jvm:mem:current:Metaspace:max': 'gauge',
'jvm:mem:current:Metaspace:used': 'gauge',
'jvm:mem:current:Par_Eden_Space:max': 'gauge',
'jvm:mem:current:Par_Eden_Space:used': 'gauge',
'jvm:mem:current:Par_Survivor_Space:max': 'gauge',
'jvm:mem:current:Par_Survivor_Space:used': 'gauge',
'jvm:mem:current:Compressed_Class_Space:max': 'gauge',
'jvm:mem:current:Compressed_Class_Space:used': 'gauge',
'jvm:mem:current:Code_Cache:max': 'gauge',
'jvm:mem:current:Code_Cache:used': 'gauge',
'jvm:num_cpus': 'gauge',
'jvm:gc:msec': 'gauge',
'jvm:gc:ParNew:msec': 'gauge',
'jvm:gc:ParNew:cycles': 'gauge',
'jvm:gc:ConcurrentMarkSweep:msec': 'gauge',
'jvm:gc:ConcurrentMarkSweep:cycles': 'gauge',
'jvm:gc:cycles': 'gauge',
'jvm:fd_limit': 'gauge',
'jvm:compilation:time_msec': 'gauge',
'jvm:uptime': 'gauge',
'jvm:safepoint:sync_time_millis': 'gauge',
'jvm:safepoint:total_time_millis': 'gauge',
'jvm:safepoint:count': 'gauge',
'jvm:heap:committed': 'gauge',
'jvm:heap:max': 'gauge',
'jvm:heap:used': 'gauge',
'jvm:fd_count': 'gauge',
'rt:server:sent_bytes': 'gauge',
'rt:server:connects': 'gauge',
'rt:server:success': 'gauge',
'rt:server:received_bytes': 'gauge',
'rt:server:read_timeout': 'gauge',
'rt:server:write_timeout': 'gauge',
'rt:server:nacks': 'gauge',
'rt:server:thread_usage:requests:mean': 'gauge',
'rt:server:thread_usage:requests:relative_stddev': 'gauge',
'rt:server:thread_usage:requests:stddev': 'gauge',
'rt:server:socket_unwritable_ms': 'gauge',
'rt:server:closes': 'gauge',
'rt:server:status:1XX': 'gauge',
'rt:server:status:4XX': 'gauge',
'rt:server:status:2XX': 'gauge',
'rt:server:status:error': 'gauge',
'rt:server:status:3XX': 'gauge',
'rt:server:status:5XX': 'gauge',
'rt:server:status:200': 'gauge',
'rt:server:nonretryable_nacks': 'gauge',
'rt:server:socket_writable_ms': 'gauge',
'rt:server:requests': 'gauge',
'rt:server:pending': 'gauge',
'rt:server:connections': 'gauge',
'rt:service:success': 'gauge',
'rt:service:retries:total': 'gauge',
'rt:service:retries:budget_exhausted': 'gauge',
'rt:service:retries:budget': 'gauge',
'rt:service:requests': 'gauge',
'rt:service:pending': 'gauge',
'rt:client:sent_bytes': 'gauge',
'rt:client:failure_accrual:removals': 'gauge',
'rt:client:failure_accrual:probes': 'gauge',
'rt:client:failure_accrual:removed_for_ms': 'gauge',
'rt:client:failure_accrual:revivals': 'gauge',
'rt:client:connects': 'gauge',
'rt:client:pool_num_waited': 'gauge',
'rt:client:success': 'gauge',
'rt:client:pool_waiters': 'gauge',
'rt:client:retries:request_limit': 'gauge',
'rt:client:retries:budget_exhausted': 'gauge',
'rt:client:retries:cannot_retry': 'gauge',
'rt:client:retries:not_open': 'gauge',
'rt:client:retries:budget': 'gauge',
'rt:client:retries:requeues': 'gauge',
'rt:client:received_bytes': 'gauge',
'rt:client:read_timeout': 'gauge',
'rt:client:write_timeout': 'gauge',
'rt:client:service:success': 'gauge',
'rt:client:service:pending': 'gauge',
'rt:client:service:requests': 'gauge',
'rt:client:pool_num_too_many_waiters': 'gauge',
'rt:client:socket_unwritable_ms': 'gauge',
'rt:client:closes': 'gauge',
'rt:client:pool_cached': 'gauge',
'rt:client:nack_admission_control:dropped_requests': 'gauge',
'rt:client:status:1XX': 'gauge',
'rt:client:status:4XX': 'gauge',
'rt:client:status:2XX': 'gauge',
'rt:client:status:error': 'gauge',
'rt:client:status:3XX': 'gauge',
'rt:client:status:5XX': 'gauge',
'rt:client:status:200': 'gauge',
'rt:client:pool_size': 'gauge',
'rt:client:available': 'gauge',
'rt:client:socket_writable_ms': 'gauge',
'rt:client:cancelled_connects': 'gauge',
'rt:client:requests': 'gauge',
'rt:client:loadbalancer:size': 'gauge',
'rt:client:loadbalancer:rebuilds': 'gauge',
'rt:client:loadbalancer:closed': 'gauge',
'rt:client:loadbalancer:load': 'gauge',
'rt:client:loadbalancer:meanweight': 'gauge',
'rt:client:loadbalancer:adds': 'gauge',
'rt:client:loadbalancer:updates': 'gauge',
'rt:client:loadbalancer:algorithm:p2c_least_loaded': 'gauge',
'rt:client:loadbalancer:available': 'gauge',
'rt:client:loadbalancer:max_effort_exhausted': 'gauge',
'rt:client:loadbalancer:busy': 'gauge',
'rt:client:loadbalancer:removes': 'gauge',
'rt:client:pending': 'gauge',
'rt:client:dispatcher:serial:queue_size': 'gauge',
'rt:client:connections': 'gauge',
'rt:bindcache:path:expires': 'gauge',
'rt:bindcache:path:evicts': 'gauge',
'rt:bindcache:path:misses': 'gauge',
'rt:bindcache:path:oneshots': 'gauge',
'rt:bindcache:bound:expires': 'gauge',
'rt:bindcache:bound:evicts': 'gauge',
'rt:bindcache:bound:misses': 'gauge',
'rt:bindcache:bound:oneshots': 'gauge',
'rt:bindcache:tree:expires': 'gauge',
'rt:bindcache:tree:evicts': 'gauge',
'rt:bindcache:tree:misses': 'gauge',
'rt:bindcache:tree:oneshots': 'gauge',
'rt:bindcache:client:expires': 'gauge',
'rt:bindcache:client:evicts': 'gauge',
'rt:bindcache:client:misses': 'gauge',
'rt:bindcache:client:oneshots': 'gauge',
}
MOCK_INSTANCE = {
'prometheus_url': 'http://fake.tld/prometheus',
'metrics': [LINKERD_FIXTURE_METRICS],
'type_overrides': LINKERD_FIXTURE_TYPES,
}
MOCK_INSTANCE_NEW = {
'openmetrics_endpoint': 'http://fake.tld/prometheus',
'extra_metrics': construct_metrics_config(LINKERD_FIXTURE_METRICS, LINKERD_FIXTURE_TYPES),
}
LINKERD_FIXTURE_VALUES = {
'linkerd.jvm.start_time': 1.52103079e12,
'linkerd.jvm.application_time_millis': 52340.887,
'linkerd.jvm.classes.total_loaded': 8842.0,
'linkerd.jvm.classes.current_loaded': 8815.0,
'linkerd.jvm.classes.total_unloaded': 27.0,
'linkerd.jvm.postGC.Par_Survivor_Space.max': 1.7432576e7,
'linkerd.jvm.postGC.Par_Survivor_Space.used': 200736.0,
'linkerd.jvm.postGC.CMS_Old_Gen.max': 8.9928499e8,
'linkerd.jvm.postGC.CMS_Old_Gen.used': 2.0269128e7,
'linkerd.jvm.postGC.Par_Eden_Space.max': 1.3959168e8,
'linkerd.jvm.postGC.Par_Eden_Space.used': 0.0,
'linkerd.jvm.postGC.used': 2.0469864e7,
'linkerd.jvm.nonheap.committed': 7.122944e7,
'linkerd.jvm.nonheap.max': -1.0,
'linkerd.jvm.nonheap.used': 6.564336e7,
'linkerd.jvm.tenuring_threshold': 6.0,
'linkerd.jvm.thread.daemon_count': 22.0,
'linkerd.jvm.thread.count': 23.0,
'linkerd.jvm.thread.peak_count': 24.0,
'linkerd.jvm.mem.postGC.Par_Survivor_Space.max': 1.7432576e7,
'linkerd.jvm.mem.postGC.Par_Survivor_Space.used': 200736.0,
'linkerd.jvm.mem.postGC.CMS_Old_Gen.max': 8.9928499e8,
'linkerd.jvm.mem.postGC.CMS_Old_Gen.used': 2.0269128e7,
'linkerd.jvm.mem.postGC.Par_Eden_Space.max': 1.3959168e8,
'linkerd.jvm.mem.postGC.Par_Eden_Space.used': 0.0,
'linkerd.jvm.mem.postGC.used': 2.0469864e7,
'linkerd.jvm.mem.metaspace.max_capacity': 1.10729626e9,
'linkerd.jvm.mem.buffer.direct.max': 0.0,
'linkerd.jvm.mem.buffer.direct.count': 1.0,
'linkerd.jvm.mem.buffer.direct.used': 1.0,
'linkerd.jvm.mem.buffer.mapped.max': 0.0,
'linkerd.jvm.mem.buffer.mapped.count': 0.0,
'linkerd.jvm.mem.buffer.mapped.used': 0.0,
'linkerd.jvm.mem.allocations.eden.bytes': 1.22551552e9,
'linkerd.jvm.mem.current.used': 9.0179664e7,
'linkerd.jvm.mem.current.CMS_Old_Gen.max': 8.9928499e8,
'linkerd.jvm.mem.current.CMS_Old_Gen.used': 2.2799416e7,
'linkerd.jvm.mem.current.Metaspace.max': -1.0,
'linkerd.jvm.mem.current.Metaspace.used': 5.1355408e7,
'linkerd.jvm.mem.current.Par_Eden_Space.max': 1.3959168e8,
'linkerd.jvm.mem.current.Par_Eden_Space.used': 1586640.0,
'linkerd.jvm.mem.current.Par_Survivor_Space.max': 1.7432576e7,
'linkerd.jvm.mem.current.Par_Survivor_Space.used': 200736.0,
'linkerd.jvm.mem.current.Compressed_Class_Space.max': 1.07374182e9,
'linkerd.jvm.mem.current.Compressed_Class_Space.used': 8188496.0,
'linkerd.jvm.mem.current.Code_Cache.max': 5.0331648e7,
'linkerd.jvm.mem.current.Code_Cache.used': 6099456.0,
'linkerd.jvm.num_cpus': 2.0,
'linkerd.jvm.gc.msec': 674.0,
'linkerd.jvm.gc.ParNew.msec': 561.0,
'linkerd.jvm.gc.ParNew.cycles': 163.0,
'linkerd.jvm.gc.ConcurrentMarkSweep.msec': 113.0,
'linkerd.jvm.gc.ConcurrentMarkSweep.cycles': 6.0,
'linkerd.jvm.gc.cycles': 169.0,
'linkerd.jvm.fd_limit': 1048576.0,
'linkerd.jvm.compilation.time_msec': 18540.0,
'linkerd.jvm.uptime': 53922.0,
'linkerd.jvm.safepoint.sync_time_millis': 557.0,
'linkerd.jvm.safepoint.total_time_millis': 1295.0,
'linkerd.jvm.safepoint.count': 592.0,
'linkerd.jvm.heap.committed': 4.3810816e7,
'linkerd.jvm.heap.max': 1.05630925e9,
'linkerd.jvm.heap.used': 2.5757896e7,
'linkerd.jvm.fd_count': 165.0,
'linkerd.rt.server.sent_bytes': 2901160,
'linkerd.rt.server.connects': 50,
'linkerd.rt.server.success': 17694,
'linkerd.rt.server.received_bytes': 1565173,
'linkerd.rt.server.read_timeout': 0,
'linkerd.rt.server.write_timeout': 0,
'linkerd.rt.server.nacks': 0,
'linkerd.rt.server.thread_usage.requests.mean': 0.0,
'linkerd.rt.server.thread_usage.requests.relative_stddev': 0.0,
'linkerd.rt.server.thread_usage.requests.stddev': 0.0,
'linkerd.rt.server.socket_unwritable_ms': 0,
'linkerd.rt.server.closes': 0,
'linkerd.rt.server.status.1XX': 0,
'linkerd.rt.server.status.4XX': 0,
'linkerd.rt.server.status.2XX': 17697,
'linkerd.rt.server.status.error': 0,
'linkerd.rt.server.status.3XX': 0,
'linkerd.rt.server.status.5XX': 0,
'linkerd.rt.server.status.200': 17697,
'linkerd.rt.server.nonretryable_nacks': 0,
'linkerd.rt.server.socket_writable_ms': 0,
'linkerd.rt.server.requests': 17700,
'linkerd.rt.server.pending': 12.0,
'linkerd.rt.server.connections': 50.0,
'linkerd.rt.service.success': 17700,
'linkerd.rt.service.retries.total': 0,
'linkerd.rt.service.retries.budget_exhausted': 0,
'linkerd.rt.service.retries.budget': 1081.0,
'linkerd.rt.service.requests': 17700,
'linkerd.rt.service.pending': 12.0,
'linkerd.rt.client.sent_bytes': 4715595,
'linkerd.rt.client.failure_accrual.removals': 0,
'linkerd.rt.client.failure_accrual.probes': 0,
'linkerd.rt.client.failure_accrual.removed_for_ms': 0,
'linkerd.rt.client.failure_accrual.revivals': 0,
'linkerd.rt.client.connects': 65,
'linkerd.rt.client.pool_num_waited': 0,
'linkerd.rt.client.success': 17703,
'linkerd.rt.client.pool_waiters': 0.0,
'linkerd.rt.client.retries.request_limit': 0,
'linkerd.rt.client.retries.budget_exhausted': 0,
'linkerd.rt.client.retries.cannot_retry': 0,
'linkerd.rt.client.retries.not_open': 0,
'linkerd.rt.client.retries.budget': 944.0,
'linkerd.rt.client.retries.requeues': 0,
'linkerd.rt.client.received_bytes': 2159766,
'linkerd.rt.client.read_timeout': 0,
'linkerd.rt.client.write_timeout': 0,
'linkerd.rt.client.service.success': 17703,
'linkerd.rt.client.service.pending': 1.0,
'linkerd.rt.client.service.requests': 17703,
'linkerd.rt.client.pool_num_too_many_waiters': 0,
'linkerd.rt.client.socket_unwritable_ms': 0,
'linkerd.rt.client.closes': 0,
'linkerd.rt.client.pool_cached': 50.0,
'linkerd.rt.client.nack_admission_control.dropped_requests': 0,
'linkerd.rt.client.status.1XX': 0,
'linkerd.rt.client.status.4XX': 0,
'linkerd.rt.client.status.2XX': 17703,
'linkerd.rt.client.status.error': 0,
'linkerd.rt.client.status.3XX': 0,
'linkerd.rt.client.status.5XX': 0,
'linkerd.rt.client.status.200': 17703,
'linkerd.rt.client.pool_size': 10.0,
'linkerd.rt.client.available': 10.0,
'linkerd.rt.client.socket_writable_ms': 0,
'linkerd.rt.client.cancelled_connects': 0,
'linkerd.rt.client.requests': 17703,
'linkerd.rt.client.loadbalancer.size': 10.0,
'linkerd.rt.client.loadbalancer.rebuilds': 1,
'linkerd.rt.client.loadbalancer.closed': 0.0,
'linkerd.rt.client.loadbalancer.load': 11.0,
'linkerd.rt.client.loadbalancer.meanweight': 1.0,
'linkerd.rt.client.loadbalancer.adds': 10,
'linkerd.rt.client.loadbalancer.updates': 1,
'linkerd.rt.client.loadbalancer.algorithm.p2c_least_loaded': 1.0,
'linkerd.rt.client.loadbalancer.available': 10.0,
'linkerd.rt.client.loadbalancer.max_effort_exhausted': 0,
'linkerd.rt.client.loadbalancer.busy': 0.0,
'linkerd.rt.client.loadbalancer.removes': 0,
'linkerd.rt.client.pending': 10.0,
'linkerd.rt.client.dispatcher.serial.queue_size': 0.0,
'linkerd.rt.client.connections': 52.0,
'linkerd.rt.bindcache.path.expires': 0,
'linkerd.rt.bindcache.path.evicts': 0,
'linkerd.rt.bindcache.path.misses': 1,
'linkerd.rt.bindcache.path.oneshots': 0,
'linkerd.rt.bindcache.bound.expires': 0,
'linkerd.rt.bindcache.bound.evicts': 0,
'linkerd.rt.bindcache.bound.misses': 1,
'linkerd.rt.bindcache.bound.oneshots': 0,
'linkerd.rt.bindcache.tree.expires': 0,
'linkerd.rt.bindcache.tree.evicts': 0,
'linkerd.rt.bindcache.tree.misses': 1,
'linkerd.rt.bindcache.tree.oneshots': 0,
'linkerd.rt.bindcache.client.expires': 0,
'linkerd.rt.bindcache.client.evicts': 0,
'linkerd.rt.bindcache.client.misses': 1,
'linkerd.rt.bindcache.client.oneshots': 0,
}
EXPECTED_METRICS_V2_BASE = {
'linkerd.request_total': aggregator.MONOTONIC_COUNT,
'linkerd.response_total': aggregator.MONOTONIC_COUNT,
'linkerd.response_latency.count': aggregator.GAUGE,
'linkerd.response_latency.sum': aggregator.GAUGE,
'linkerd.route.request_total': aggregator.MONOTONIC_COUNT,
'linkerd.route.response_latency.count': aggregator.GAUGE,
'linkerd.route.response_latency.sum': aggregator.GAUGE,
'linkerd.route.response_total': aggregator.MONOTONIC_COUNT,
'linkerd.route.actual_request_total': aggregator.MONOTONIC_COUNT,
'linkerd.tcp.open_total': aggregator.MONOTONIC_COUNT,
'linkerd.tcp.open_connections': aggregator.GAUGE,
'linkerd.tcp.read_bytes_total': aggregator.MONOTONIC_COUNT,
'linkerd.tcp.write_bytes_total': aggregator.MONOTONIC_COUNT,
'linkerd.tcp.close_total': aggregator.MONOTONIC_COUNT,
'linkerd.control.request_total': aggregator.MONOTONIC_COUNT,
'linkerd.control.response_latency.count': aggregator.GAUGE,
'linkerd.control.response_latency.sum': aggregator.GAUGE,
'linkerd.control.response_total': aggregator.MONOTONIC_COUNT,
'linkerd.process.start_time': aggregator.GAUGE,
'linkerd.process.cpu_seconds_total': aggregator.MONOTONIC_COUNT,
'linkerd.process.open_fds': aggregator.GAUGE,
'linkerd.process.max_fds': aggregator.GAUGE,
'linkerd.process.virtual_memory': aggregator.GAUGE,
'linkerd.process.resident_memory': aggregator.GAUGE,
'linkerd.prometheus.health': aggregator.GAUGE,
}
# These metrics no longer reliably report on latest linkerd installs
EXPECTED_METRICS_V2_EXTENDED = {
'linkerd.route.actual_response_latency.count': aggregator.GAUGE,
'linkerd.route.actual_response_latency.sum': aggregator.GAUGE,
'linkerd.route.actual_response_total': aggregator.MONOTONIC_COUNT,
'linkerd.tcp.connection_duration.count': aggregator.GAUGE,
'linkerd.tcp.connection_duration.sum': aggregator.GAUGE,
}
EXPECTED_METRICS_V2 = EXPECTED_METRICS_V2_BASE.copy()
EXPECTED_METRICS_V2.update(EXPECTED_METRICS_V2_EXTENDED)
EXPECTED_METRICS_V2_E2E = {
k: aggregator.COUNT if v == aggregator.MONOTONIC_COUNT else v for k, v in EXPECTED_METRICS_V2_BASE.items()
}
EXPECTED_METRICS_V2_NEW = {}
for metric_name, metric_type in list(EXPECTED_METRICS_V2.items()):
if metric_name == 'linkerd.prometheus.health':
EXPECTED_METRICS_V2_NEW['linkerd.openmetrics.health'] = metric_type
elif metric_name.endswith('_total'):
EXPECTED_METRICS_V2_NEW['{}.count'.format(metric_name[:-6])] = aggregator.MONOTONIC_COUNT
elif metric_name.endswith('.sum'):
EXPECTED_METRICS_V2_NEW[metric_name] = aggregator.MONOTONIC_COUNT
elif metric_name.endswith('.count'):
EXPECTED_METRICS_V2_NEW[metric_name] = aggregator.MONOTONIC_COUNT
metric_prefix = metric_name[:-6]
# Histogram buckets
if metric_prefix in (
'linkerd.control.response_latency',
'linkerd.response_latency',
'linkerd.route.actual_response_latency',
'linkerd.route.response_latency',
'linkerd.tcp.connection_duration',
):
EXPECTED_METRICS_V2_NEW['{}.bucket'.format(metric_prefix)] = aggregator.MONOTONIC_COUNT
else:
EXPECTED_METRICS_V2_NEW[metric_name] = metric_type
| [
"[email protected]"
] | |
8f6a56a0cb2adb1b054194442eca236cf9d057df | 48e4aedd813ab55fefd137ef22b2af3242012c19 | /lib/readConfig.py | 2b86849f4c882e1d7726174e98f56ffaede9d2e2 | [
"MIT"
] | permissive | philip-shen/MongoDB_TSEOTC_Crawler | 0b307917846606833ee804ea2f7061c4f0cb55df | 87d8dded2557eaca541499ccce6c8942476d3741 | refs/heads/master | 2020-04-17T16:47:53.638885 | 2019-01-29T13:30:26 | 2019-01-29T13:30:26 | 166,756,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 821 | py | import os
import codecs
import configparser
class ReadConfig:
def __init__(self,configPath):
self.configPath=configPath
#fd = open(self.configPath)
fd = open(self.configPath, encoding='utf-8')
data = fd.read()
# remove BOM
if data[:3] == codecs.BOM_UTF8:
data = data[3:]
file = codecs.open(configPath, "w")
file.write(data)
file.close()
fd.close()
self.cf = configparser.ConfigParser()
#self.cf.read(self.configPath)
self.cf.read(self.configPath,encoding='utf-8')
def get_MongoDB(self, name):
value = self.cf.get("MONGODB", name)
return value
def get_SeymourExcel(self,name):
value = self.cf.get("SeymourExcel", name)
return value | [
"[email protected]"
] | |
1de34dbf8faa35f48aada6bff23b5847c55c0a3b | 72b18602ac21c9a1f1474a52c99b45ce9b825288 | /apptools/apptools-android-tests/apptools/versionCode.py | 0f202eff480f6e949bb09c7a5db82b022f5fabfd | [
"BSD-3-Clause"
] | permissive | anawhj/crosswalk-test-suite | 3f3b4cf2de2b3dfdfd15a7a2bbf45d0827f062f7 | 9fa4c96578f6bc95ae884ee943845c8d9a62fc17 | refs/heads/master | 2021-01-16T17:44:21.266562 | 2015-08-04T09:38:28 | 2015-08-04T09:38:28 | 40,181,956 | 0 | 0 | null | 2015-08-04T11:57:27 | 2015-08-04T11:57:27 | null | UTF-8 | Python | false | false | 7,418 | py | #!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Liu, Yun <[email protected]>
import unittest
import os
import comm
from xml.etree import ElementTree
import json
class TestCrosswalkApptoolsFunctions(unittest.TestCase):
def test_versionCode_normal(self):
comm.setUp()
comm.create(self)
os.chdir('org.xwalk.test')
buildcmd = comm.HOST_PREFIX + comm.PackTools + "crosswalk-app build"
buildstatus = os.popen(buildcmd).readlines()
index = 0
for x in range(len(buildstatus),0,-1):
index = x -1
if buildstatus[index].find("Using android:versionCode") != -1:
break
versionCode = buildstatus[index].strip(" *\nUsing android:versionCode")[1:-1]
root = ElementTree.parse(comm.ConstPath + "/../tools/org.xwalk.test/prj/android/AndroidManifest.xml").getroot()
attributes = root.attrib
for x in attributes.keys():
if x.find("versionCode") != -1:
versionCode_xml = attributes[x]
break
comm.clear("org.xwalk.test")
self.assertEquals(versionCode, versionCode_xml)
def test_update_app_version_onedot(self):
comm.setUp()
comm.create(self)
os.chdir('org.xwalk.test')
jsonfile = open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "r")
jsons = jsonfile.read()
jsonfile.close()
jsonDict = json.loads(jsons)
jsonDict["crosswalk_app_version"] = "0.1"
json.dump(jsonDict, open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "w"))
with open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json") as json_file:
data = json.load(json_file)
buildcmd = comm.HOST_PREFIX + comm.PackTools + "crosswalk-app build"
buildstatus = os.popen(buildcmd).readlines()
index = 0
for x in range(len(buildstatus),0,-1):
index = x -1
if buildstatus[index].find("Using android:versionCode") != -1:
break
versionCode = buildstatus[index].strip(" *\nUsing android:versionCode")[1:-1]
root = ElementTree.parse(comm.ConstPath + "/../tools/org.xwalk.test/prj/android/AndroidManifest.xml").getroot()
attributes = root.attrib
for x in attributes.keys():
if x.find("versionCode") != -1:
versionCode_xml = attributes[x]
break
comm.clear("org.xwalk.test")
self.assertEquals(data['crosswalk_app_version'].strip(os.linesep), "0.1")
self.assertEquals(versionCode, versionCode_xml)
def test_update_app_version_twodot(self):
comm.setUp()
comm.create(self)
os.chdir('org.xwalk.test')
jsonfile = open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "r")
jsons = jsonfile.read()
jsonfile.close()
jsonDict = json.loads(jsons)
jsonDict["crosswalk_app_version"] = "0.0.1"
json.dump(jsonDict, open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "w"))
with open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json") as json_file:
data = json.load(json_file)
buildcmd = comm.HOST_PREFIX + comm.PackTools + "crosswalk-app build"
buildstatus = os.popen(buildcmd).readlines()
index = 0
for x in range(len(buildstatus),0,-1):
index = x -1
if buildstatus[index].find("Using android:versionCode") != -1:
break
versionCode = buildstatus[index].strip(" *\nUsing android:versionCode")[1:-1]
root = ElementTree.parse(comm.ConstPath + "/../tools/org.xwalk.test/prj/android/AndroidManifest.xml").getroot()
attributes = root.attrib
for x in attributes.keys():
if x.find("versionCode") != -1:
versionCode_xml = attributes[x]
break
comm.clear("org.xwalk.test")
self.assertEquals(data['crosswalk_app_version'].strip(os.linesep), "0.0.1")
self.assertEquals(versionCode, versionCode_xml)
def test_update_app_version_threedot(self):
comm.setUp()
comm.create(self)
os.chdir('org.xwalk.test')
jsonfile = open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "r")
jsons = jsonfile.read()
jsonfile.close()
jsonDict = json.loads(jsons)
jsonDict["crosswalk_app_version"] = "0.0.0.1"
json.dump(jsonDict, open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "w"))
with open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json") as json_file:
data = json.load(json_file)
buildcmd = comm.HOST_PREFIX + comm.PackTools + "crosswalk-app build"
return_code = os.system(buildcmd)
comm.clear("org.xwalk.test")
self.assertEquals(data['crosswalk_app_version'].strip(os.linesep), "0.0.0.1")
self.assertNotEquals(return_code, 0)
def test_update_app_version_out_of_range(self):
comm.setUp()
comm.create(self)
os.chdir('org.xwalk.test')
jsonfile = open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "r")
jsons = jsonfile.read()
jsonfile.close()
jsonDict = json.loads(jsons)
jsonDict["crosswalk_app_version"] = "1000"
json.dump(jsonDict, open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "w"))
with open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json") as json_file:
data = json.load(json_file)
buildcmd = comm.HOST_PREFIX + comm.PackTools + "crosswalk-app build"
return_code = os.system(buildcmd)
comm.clear("org.xwalk.test")
self.assertEquals(data['crosswalk_app_version'].strip(os.linesep), "1000")
self.assertNotEquals(return_code, 0)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
4fef22836d27d35cc4f8d07acc6629da007ba6d3 | e29f8c29a993156b7de7b0451d63ad8cca51c9a6 | /zajecia10/zadanie_domowe/zadanie_domowe6.py | a968a7b4bbf1f9418fa17abbe7a86929bd3704a6 | [] | no_license | remekwilk/python_basic | d898ad26aba809eb14ebed9d94bd93db69154ffa | af145a9711dabca232dc5f5be8fe4c407a5fda54 | refs/heads/master | 2020-05-03T14:42:19.523070 | 2020-04-11T20:42:03 | 2020-04-11T20:42:03 | 176,701,000 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | from loteria_funkcje import chybil_trafil, dodaj_zaklad, czy_jest_zwyciezca
wszystkie_zaklady = []
moj_zaklad = [1, 2, 3, 4]
dodaj_zaklad(moj_zaklad, wszystkie_zaklady)
for i in range(100):
losowy_zaklad = chybil_trafil()
dodaj_zaklad(losowy_zaklad, wszystkie_zaklady)
print(wszystkie_zaklady)
zwycieskie_liczby = chybil_trafil()
print('Zwycięskie liczby to dziś:', zwycieskie_liczby)
czy_jest_zwyciezca(zwycieskie_liczby, wszystkie_zaklady)
if moj_zaklad == zwycieskie_liczby:
print("HURRA! Moje liczby padły!")
| [
"[email protected]"
] | |
d7a32db31d3c7a9da78a58a494c7e6c264a6e10c | 22671693f8bf7e11b6ec2571f187eaba94810a35 | /day-01/day-01.py | 66dcca8960eec6cfb05bb09e60f16a0315304f4f | [] | no_license | timbook/advent-of-code-2018 | 9d4679d345ba9e108286b85134904ba8613e3327 | f9b62c3528cae66b4e9e60735d83dda43ea9cc05 | refs/heads/master | 2022-09-29T09:05:39.191564 | 2022-08-25T02:19:16 | 2022-08-25T02:19:16 | 159,948,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | import sys
import os
with open("../data/01-input.txt", 'r') as f:
lines = f.readlines()
items = [int(n.strip()) for n in lines]
print("::: PART A")
print(f"FREQUENCY: {sum(items)}\n")
record = set()
freq = 0
done = False
while True:
for i in items:
freq += i
if freq in record:
print("::: PART B")
print(f"FREQUENCY: {freq}")
sys.exit(0)
else:
record.add(freq)
| [
"[email protected]"
] | |
747133d86e0a598e64be92d208d535120b549cf3 | 6ebf510d0b4aa4263e47257ba4746550abefa368 | /warehouse/packaging/models.py | d68bcb74f79ad51980ac9bc53870b5bfcb57ae15 | [
"Apache-2.0"
] | permissive | umeshksingla/warehouse | abc8cf3a330107a0f50f7cd6a1e5ba13270855d4 | 17ca6b4e641481c3672ff9f991f90cdacb5b9713 | refs/heads/master | 2021-01-22T16:56:38.814173 | 2016-03-04T01:51:19 | 2016-03-04T01:51:19 | 48,956,080 | 1 | 0 | null | 2016-03-04T01:51:20 | 2016-01-03T18:23:12 | Python | UTF-8 | Python | false | false | 13,922 | py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
from collections import OrderedDict
from citext import CIText
from pyramid.security import Allow
from pyramid.threadlocal import get_current_request
from sqlalchemy import (
CheckConstraint, Column, Enum, ForeignKey, ForeignKeyConstraint, Index,
Boolean, DateTime, Integer, Table, Text,
)
from sqlalchemy import func, orm, sql
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.hybrid import hybrid_property
from warehouse import db
from warehouse.accounts.models import User
from warehouse.classifiers.models import Classifier
from warehouse.sitemap.models import SitemapMixin
from warehouse.utils.attrs import make_repr
class Role(db.Model):
__tablename__ = "roles"
__table_args__ = (
Index("roles_pack_name_idx", "package_name"),
Index("roles_user_name_idx", "user_name"),
)
__repr__ = make_repr("role_name", "user_name", "package_name")
role_name = Column(Text)
user_name = Column(
CIText,
ForeignKey("accounts_user.username", onupdate="CASCADE"),
)
package_name = Column(
Text,
ForeignKey("packages.name", onupdate="CASCADE"),
)
user = orm.relationship(User, lazy=False)
project = orm.relationship("Project", lazy=False)
class ProjectFactory:
def __init__(self, request):
self.request = request
def __getitem__(self, project):
try:
return self.request.db.query(Project).filter(
Project.normalized_name == func.normalize_pep426_name(project)
).one()
except NoResultFound:
raise KeyError from None
class Project(SitemapMixin, db.ModelBase):
__tablename__ = "packages"
__table_args__ = (
CheckConstraint(
"name ~* '^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$'::text",
name="packages_valid_name",
),
)
__repr__ = make_repr("name")
name = Column(Text, primary_key=True, nullable=False)
normalized_name = orm.column_property(func.normalize_pep426_name(name))
stable_version = Column(Text)
autohide = Column(Boolean, server_default=sql.true())
comments = Column(Boolean, server_default=sql.true())
bugtrack_url = Column(Text)
hosting_mode = Column(Text, nullable=False, server_default="pypi-only")
created = Column(
DateTime(timezone=False),
nullable=False,
server_default=sql.func.now(),
)
has_docs = Column(Boolean)
upload_limit = Column(Integer, nullable=True)
users = orm.relationship(
User,
secondary=Role.__table__,
backref="projects",
)
releases = orm.relationship(
"Release",
backref="project",
cascade="all, delete-orphan",
order_by=lambda: Release._pypi_ordering.desc(),
)
def __getitem__(self, version):
session = orm.object_session(self)
try:
return (
session.query(Release)
.filter((Release.project == self) &
(Release.version == version))
.one()
)
except NoResultFound:
raise KeyError from None
def __acl__(self):
session = orm.object_session(self)
acls = []
# Get all of the users for this project.
query = session.query(Role).filter(Role.project == self)
query = query.options(orm.lazyload("project"))
query = query.options(orm.joinedload("user").lazyload("emails"))
for role in sorted(
query.all(),
key=lambda x: ["Owner", "Maintainer"].index(x.role_name)):
acls.append((Allow, role.user.id, ["upload"]))
return acls
@property
def documentation_url(self):
# TODO: Move this into the database and elimnate the use of the
# threadlocal here.
request = get_current_request()
# If the project doesn't have docs, then we'll just return a None here.
if not self.has_docs:
return
return request.route_url("legacy.docs", project=self.name)
class DependencyKind(enum.IntEnum):
requires = 1
provides = 2
obsoletes = 3
requires_dist = 4
provides_dist = 5
obsoletes_dist = 6
requires_external = 7
# TODO: Move project URLs into their own table, since they are not actually
# a "dependency".
project_url = 8
class Dependency(db.Model):
__tablename__ = "release_dependencies"
__table_args__ = (
Index("rel_dep_name_idx", "name"),
Index("rel_dep_name_version_idx", "name", "version"),
Index("rel_dep_name_version_kind_idx", "name", "version", "kind"),
ForeignKeyConstraint(
["name", "version"],
["releases.name", "releases.version"],
onupdate="CASCADE",
),
)
__repr__ = make_repr("name", "version", "kind", "specifier")
name = Column(Text)
version = Column(Text)
kind = Column(Integer)
specifier = Column(Text)
def _dependency_relation(kind):
return orm.relationship(
"Dependency",
primaryjoin=lambda: sql.and_(
Release.name == Dependency.name,
Release.version == Dependency.version,
Dependency.kind == kind.value,
),
viewonly=True,
)
class Release(db.ModelBase):
__tablename__ = "releases"
@declared_attr
def __table_args__(cls): # noqa
return (
Index("release_created_idx", cls.created.desc()),
Index("release_name_created_idx", cls.name, cls.created.desc()),
Index("release_name_idx", cls.name),
Index("release_pypi_hidden_idx", cls._pypi_hidden),
Index("release_version_idx", cls.version),
)
__repr__ = make_repr("name", "version")
name = Column(
Text,
ForeignKey("packages.name", onupdate="CASCADE"),
primary_key=True,
)
version = Column(Text, primary_key=True)
author = Column(Text)
author_email = Column(Text)
maintainer = Column(Text)
maintainer_email = Column(Text)
home_page = Column(Text)
license = Column(Text)
summary = Column(Text)
description = Column(Text)
keywords = Column(Text)
platform = Column(Text)
download_url = Column(Text)
_pypi_ordering = Column(Integer)
_pypi_hidden = Column(Boolean)
cheesecake_installability_id = Column(
Integer,
ForeignKey("cheesecake_main_indices.id"),
)
cheesecake_documentation_id = Column(
Integer,
ForeignKey("cheesecake_main_indices.id"),
)
cheesecake_code_kwalitee_id = Column(
Integer,
ForeignKey("cheesecake_main_indices.id"),
)
requires_python = Column(Text)
description_from_readme = Column(Boolean)
created = Column(
DateTime(timezone=False),
nullable=False,
server_default=sql.func.now(),
)
_classifiers = orm.relationship(
Classifier,
backref="project_releases",
secondary=lambda: release_classifiers,
order_by=Classifier.classifier,
)
classifiers = association_proxy("_classifiers", "classifier")
files = orm.relationship(
"File",
backref="release",
cascade="all, delete-orphan",
lazy="dynamic",
order_by=lambda: File.filename,
)
dependencies = orm.relationship("Dependency")
_requires = _dependency_relation(DependencyKind.requires)
requires = association_proxy("_requires", "specifier")
_provides = _dependency_relation(DependencyKind.provides)
provides = association_proxy("_provides", "specifier")
_obsoletes = _dependency_relation(DependencyKind.obsoletes)
obsoletes = association_proxy("_obsoletes", "specifier")
_requires_dist = _dependency_relation(DependencyKind.requires_dist)
requires_dist = association_proxy("_requires_dist", "specifier")
_provides_dist = _dependency_relation(DependencyKind.provides_dist)
provides_dist = association_proxy("_provides_dist", "specifier")
_obsoletes_dist = _dependency_relation(DependencyKind.obsoletes_dist)
obsoletes_dist = association_proxy("_obsoletes_dist", "specifier")
_requires_external = _dependency_relation(DependencyKind.requires_external)
requires_external = association_proxy("_requires_external", "specifier")
_project_urls = _dependency_relation(DependencyKind.project_url)
project_urls = association_proxy("_project_urls", "specifier")
uploader = orm.relationship(
"User",
secondary=lambda: JournalEntry.__table__,
primaryjoin=lambda: (
(JournalEntry.name == orm.foreign(Release.name)) &
(JournalEntry.version == orm.foreign(Release.version)) &
(JournalEntry.action == "new release")),
secondaryjoin=lambda: (
(User.username == orm.foreign(JournalEntry._submitted_by))
),
order_by=lambda: JournalEntry.submitted_date.desc(),
# TODO: We have uselist=False here which raises a warning because
# multiple items were returned. This should only be temporary because
# we should add a nullable FK to JournalEntry so we don't need to rely
# on ordering and implicitly selecting the first object to make this
# happen,
uselist=False,
viewonly=True,
)
@property
def urls(self):
_urls = OrderedDict()
if self.home_page:
_urls["Homepage"] = self.home_page
for urlspec in self.project_urls:
name, url = urlspec.split(",", 1)
_urls[name] = url
if self.download_url and "Download" not in _urls:
_urls["Download"] = self.download_url
return _urls
@property
def has_meta(self):
return any([self.keywords])
class File(db.Model):
__tablename__ = "release_files"
__table_args__ = (
ForeignKeyConstraint(
["name", "version"],
["releases.name", "releases.version"],
onupdate="CASCADE",
),
CheckConstraint("sha256_digest ~* '^[A-F0-9]{64}$'"),
Index("release_files_name_idx", "name"),
Index("release_files_name_version_idx", "name", "version"),
Index("release_files_packagetype_idx", "packagetype"),
Index("release_files_version_idx", "version"),
)
name = Column(Text)
version = Column(Text)
python_version = Column(Text)
packagetype = Column(
Enum(
"bdist_dmg", "bdist_dumb", "bdist_egg", "bdist_msi", "bdist_rpm",
"bdist_wheel", "bdist_wininst", "sdist",
),
)
comment_text = Column(Text)
filename = Column(Text, unique=True)
path = Column(Text, unique=True, nullable=False)
size = Column(Integer)
has_signature = Column(Boolean)
md5_digest = Column(Text, unique=True)
sha256_digest = Column(CIText, unique=True, nullable=False)
downloads = Column(Integer, server_default=sql.text("0"))
upload_time = Column(DateTime(timezone=False), server_default=func.now())
@hybrid_property
def pgp_path(self):
return self.path + ".asc"
@pgp_path.expression
def pgp_path(self):
return func.concat(self.path, ".asc")
class Filename(db.ModelBase):
__tablename__ = "file_registry"
id = Column(Integer, primary_key=True, nullable=False)
filename = Column(Text, unique=True, nullable=False)
release_classifiers = Table(
"release_classifiers",
db.metadata,
Column("name", Text()),
Column("version", Text()),
Column("trove_id", Integer(), ForeignKey("trove_classifiers.id")),
ForeignKeyConstraint(
["name", "version"],
["releases.name", "releases.version"],
onupdate="CASCADE",
),
Index("rel_class_name_idx", "name"),
Index("rel_class_name_version_idx", "name", "version"),
Index("rel_class_trove_id_idx", "trove_id"),
Index("rel_class_version_id_idx", "version"),
)
class JournalEntry(db.ModelBase):
__tablename__ = "journals"
@declared_attr
def __table_args__(cls): # noqa
return (
Index(
"journals_changelog",
"submitted_date", "name", "version", "action",
),
Index("journals_id_idx", "id"),
Index("journals_name_idx", "name"),
Index("journals_version_idx", "version"),
Index(
"journals_latest_releases",
"submitted_date", "name", "version",
postgresql_where=(
(cls.version != None) & (cls.action == "new release") # noqa
),
),
)
id = Column(Integer, primary_key=True, nullable=False)
name = Column(Text)
version = Column(Text)
action = Column(Text)
submitted_date = Column(
DateTime(timezone=False),
nullable=False,
server_default=sql.func.now(),
)
_submitted_by = Column(
"submitted_by",
CIText,
ForeignKey(
"accounts_user.username",
onupdate="CASCADE",
),
)
submitted_by = orm.relationship(User)
submitted_from = Column(Text)
| [
"[email protected]"
] | |
33136c62dddf0c19cc065664f31a3052d8fec753 | 8a485c4ab5b12dd5cd7db5de337fc1aedafe9247 | /belajarStaticFileCss/belajarStaticFileCss/urls.py | c022ea22454ff90ef3df75e116fc23c9e91728c7 | [] | no_license | frestea09/django | 91544adbc1d4a63bce54912e027ae7f30e325200 | 0bac6d82a2b694567abca863b4610e540140c50d | refs/heads/master | 2020-04-29T18:10:47.401557 | 2019-04-19T06:48:48 | 2019-04-19T06:48:48 | 176,316,676 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,094 | py | """belajarStaticFileCss URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,re_path,include
from . import views
urlpatterns = [
path('admin/', admin.site.urls),
path('',views.index,name='main_view'),
path('home/',views.index,name='main_view'),
re_path(r'^introduction/$',include('introduction.urls')),
re_path(r'^music/$',include('music.urls')),
re_path(r'^about/$',include('about.urls')),
re_path(r'^blog/$',include('blog.urls')),
]
| [
"[email protected]"
] | |
0cfdb3d99d88dfe1c04b0ad5e0a4aa09ee652000 | 53784d3746eccb6d8fca540be9087a12f3713d1c | /res/packages/scripts/scripts/common/Lib/distutils/unixccompiler.py | 24a787da6e3d7bee12deb2c92f6079e414adaabd | [] | no_license | webiumsk/WOT-0.9.17.1-CT | 736666d53cbd0da6745b970e90a8bac6ea80813d | d7c3cf340ae40318933e7205bf9a17c7e53bac52 | refs/heads/master | 2021-01-09T06:00:33.898009 | 2017-02-03T21:40:17 | 2017-02-03T21:40:17 | 80,870,824 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 8,204 | py | # 2017.02.03 21:58:03 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/distutils/unixccompiler.py
"""distutils.unixccompiler
Contains the UnixCCompiler class, a subclass of CCompiler that handles
the "typical" Unix-style command-line C compiler:
* macros defined with -Dname[=value]
* macros undefined with -Uname
* include search directories specified with -Idir
* libraries specified with -lllib
* library search directories specified with -Ldir
* compile handled by 'cc' (or similar) executable with -c option:
compiles .c to .o
* link static library handled by 'ar' command (possibly with 'ranlib')
* link shared library handled by 'cc -shared'
"""
__revision__ = '$Id$'
import os, sys, re
from types import StringType, NoneType
from distutils import sysconfig
from distutils.dep_util import newer
from distutils.ccompiler import CCompiler, gen_preprocess_options, gen_lib_options
from distutils.errors import DistutilsExecError, CompileError, LibError, LinkError
from distutils import log
if sys.platform == 'darwin':
import _osx_support
class UnixCCompiler(CCompiler):
compiler_type = 'unix'
executables = {'preprocessor': None,
'compiler': ['cc'],
'compiler_so': ['cc'],
'compiler_cxx': ['cc'],
'linker_so': ['cc', '-shared'],
'linker_exe': ['cc'],
'archiver': ['ar', '-cr'],
'ranlib': None}
if sys.platform[:6] == 'darwin':
executables['ranlib'] = ['ranlib']
src_extensions = ['.c',
'.C',
'.cc',
'.cxx',
'.cpp',
'.m']
obj_extension = '.o'
static_lib_extension = '.a'
shared_lib_extension = '.so'
dylib_lib_extension = '.dylib'
static_lib_format = shared_lib_format = dylib_lib_format = 'lib%s%s'
if sys.platform == 'cygwin':
exe_extension = '.exe'
def preprocess(self, source, output_file = None, macros = None, include_dirs = None, extra_preargs = None, extra_postargs = None):
ignore, macros, include_dirs = self._fix_compile_args(None, macros, include_dirs)
pp_opts = gen_preprocess_options(macros, include_dirs)
pp_args = self.preprocessor + pp_opts
if output_file:
pp_args.extend(['-o', output_file])
if extra_preargs:
pp_args[:0] = extra_preargs
if extra_postargs:
pp_args.extend(extra_postargs)
pp_args.append(source)
if self.force or output_file is None or newer(source, output_file):
if output_file:
self.mkpath(os.path.dirname(output_file))
try:
self.spawn(pp_args)
except DistutilsExecError as msg:
raise CompileError, msg
return
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
compiler_so = self.compiler_so
if sys.platform == 'darwin':
compiler_so = _osx_support.compiler_fixup(compiler_so, cc_args + extra_postargs)
try:
self.spawn(compiler_so + cc_args + [src, '-o', obj] + extra_postargs)
except DistutilsExecError as msg:
raise CompileError, msg
def create_static_lib(self, objects, output_libname, output_dir = None, debug = 0, target_lang = None):
objects, output_dir = self._fix_object_args(objects, output_dir)
output_filename = self.library_filename(output_libname, output_dir=output_dir)
if self._need_link(objects, output_filename):
self.mkpath(os.path.dirname(output_filename))
self.spawn(self.archiver + [output_filename] + objects + self.objects)
if self.ranlib:
try:
self.spawn(self.ranlib + [output_filename])
except DistutilsExecError as msg:
raise LibError, msg
else:
log.debug('skipping %s (up-to-date)', output_filename)
def link(self, target_desc, objects, output_filename, output_dir = None, libraries = None, library_dirs = None, runtime_library_dirs = None, export_symbols = None, debug = 0, extra_preargs = None, extra_postargs = None, build_temp = None, target_lang = None):
objects, output_dir = self._fix_object_args(objects, output_dir)
libraries, library_dirs, runtime_library_dirs = self._fix_lib_args(libraries, library_dirs, runtime_library_dirs)
lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs, libraries)
if type(output_dir) not in (StringType, NoneType):
raise TypeError, "'output_dir' must be a string or None"
if output_dir is not None:
output_filename = os.path.join(output_dir, output_filename)
if self._need_link(objects, output_filename):
ld_args = objects + self.objects + lib_opts + ['-o', output_filename]
if debug:
ld_args[:0] = ['-g']
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath(os.path.dirname(output_filename))
try:
if target_desc == CCompiler.EXECUTABLE:
linker = self.linker_exe[:]
else:
linker = self.linker_so[:]
if target_lang == 'c++' and self.compiler_cxx:
i = 0
if os.path.basename(linker[0]) == 'env':
i = 1
while '=' in linker[i]:
i = i + 1
linker[i] = self.compiler_cxx[i]
if sys.platform == 'darwin':
linker = _osx_support.compiler_fixup(linker, ld_args)
self.spawn(linker + ld_args)
except DistutilsExecError as msg:
raise LinkError, msg
else:
log.debug('skipping %s (up-to-date)', output_filename)
return
def library_dir_option(self, dir):
return '-L' + dir
def _is_gcc(self, compiler_name):
return 'gcc' in compiler_name or 'g++' in compiler_name
def runtime_library_dir_option(self, dir):
compiler = os.path.basename(sysconfig.get_config_var('CC'))
if sys.platform[:6] == 'darwin':
return '-L' + dir
elif sys.platform[:5] == 'hp-ux':
if self._is_gcc(compiler):
return ['-Wl,+s', '-L' + dir]
return ['+s', '-L' + dir]
elif sys.platform[:7] == 'irix646' or sys.platform[:6] == 'osf1V5':
return ['-rpath', dir]
elif self._is_gcc(compiler):
return '-Wl,-R' + dir
else:
return '-R' + dir
def library_option(self, lib):
return '-l' + lib
def find_library_file(self, dirs, lib, debug = 0):
shared_f = self.library_filename(lib, lib_type='shared')
dylib_f = self.library_filename(lib, lib_type='dylib')
static_f = self.library_filename(lib, lib_type='static')
if sys.platform == 'darwin':
cflags = sysconfig.get_config_var('CFLAGS')
m = re.search('-isysroot\\s+(\\S+)', cflags)
if m is None:
sysroot = '/'
else:
sysroot = m.group(1)
for dir in dirs:
shared = os.path.join(dir, shared_f)
dylib = os.path.join(dir, dylib_f)
static = os.path.join(dir, static_f)
if sys.platform == 'darwin' and (dir.startswith('/System/') or dir.startswith('/usr/') and not dir.startswith('/usr/local/')):
shared = os.path.join(sysroot, dir[1:], shared_f)
dylib = os.path.join(sysroot, dir[1:], dylib_f)
static = os.path.join(sysroot, dir[1:], static_f)
if os.path.exists(dylib):
return dylib
if os.path.exists(shared):
return shared
if os.path.exists(static):
return static
return
# okay decompyling c:\Users\PC\wotsources\files\originals\res\packages\scripts\scripts\common\Lib\distutils\unixccompiler.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.02.03 21:58:03 Střední Evropa (běžný čas)
| [
"[email protected]"
] | |
fbfc5a0933a68da21f882740ee160c58c51967f9 | 2a63d9e571323351868f72caee8a215d1a5a3617 | /cracking-the-coding-interview/ctci-bubble-sort.py | f95afbbc30efbe9aad7bbcb125a59d1ed0e049af | [] | no_license | JavierCabezas/hackerrank | eb66b16904d452e69f84fc6c67beb8fa60544e88 | 2c32093e7545d621c9158fd0422e91d5955b16d9 | refs/heads/master | 2021-01-14T07:46:35.179276 | 2020-03-02T00:07:08 | 2020-03-02T00:07:08 | 81,884,845 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | n = int(input().strip())
a = list(map(int, input().strip().split(' ')))
number_swaps = 0
for i in range(n):
for j in range(n - 1):
if a[j+1] < a[j]:
a[j + 1], a[j] = a[j], a[j + 1]
number_swaps += 1
print("Array is sorted in {0} swaps.".format(number_swaps))
print("First Element: {0}".format(a[0]))
print("Last Element: {0}".format(a[-1]))
| [
"[email protected]"
] | |
cec26d6da00096d0b6d4ca3b722c3252ef390eb5 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/templates/_algorithms_challenges/pybites/intermediate/157_v5/test_accents.py | 00e47c47c698345f81005a2762a0d7f4f640cf19 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 2,103 | py | # _______ p__
#
# ____ ? _______ ?
#
# # texts taken from:
# # https://losviajesdedomi.com/las-15-ciudades-mas-bonitas-de-espana/
# # and:
# # https://www2.rocketlanguages.com/french/lessons/french-accents/
# texts
# ("Denominada en Euskera como Donostia, está "
# "situada en el Golfo de Vizcaya en la provincia "
# "de Guipúzcoa. San Sebastián no es solo conocida "
# "por su afamado festival de cine, sino también "
# "por la belleza de sus calles, las cuales tienen "
# "un corte francés y aburguesado que atraen cada "
# "año a centenares de turistas."),
# ("La capital de Cataluña, es la ciudad más visitada "
# "de España y la segunda más poblada. Barcelona es "
# "también una de las ciudades europeas más "
# "cosmopolitas y todo un símbolo cultural, "
# "financiero, comercial y turístico. Para muchos "
# "Barcelona es la ciudad más atractiva de España y "
# "una de las más bonitas."),
# ("Sevilla es la capital de Andalucía, y para muchos, "
# "la ciudad más bonita de España. Pasear por sus calles, "
# "contemplar la Giralda, la Catedral o la Torre del Oro "
# "es una auténtica gozada. En primavera el olor a azahar "
# "lo envuelve todo. Al igual que Granada, toda la ciudad "
# "es una auténtica delicia. Su clima hace propensa la "
# "visita en casi cualquier época del año."),
# ("The 5 French accents;"
# "The cédille (cedilla) Ç ..."
# "The accent aigu (acute accent) é ..."
# "The accent circonflexe (circumflex) â, ê, î, ô, û ..."
# "The accent grave (grave accent) à, è, ù ..."
# "The accent tréma (dieresis/umlaut) ë, ï, ü"),
#
# e..
# 'á', 'é', 'ñ', 'ú' ,
# 'á', 'é', 'í', 'ñ' ,
# 'á', 'é', 'í', 'ñ' ,
# 'à', 'â', 'ç', 'è', 'é', 'ê', 'ë', 'î', 'ï', 'ô', 'ù', 'û', 'ü' ,
#
#
#
# ?p__.m__.p. "text, expected", z.. ? e..
# ___ test_filter_accents text e..
# # get rid of duplicates and sort results
# result ? ?
# a.. s.. l.. s.. ?
# ... a.. __ e.. | [
"[email protected]"
] | |
a9dbe3d85b795366c42a2dca4450d111ad924806 | f26d67e3e9f8b90e5d6243279a1c2ce87fa41d46 | /src/prodstats/db/__init__.py | ccce13712ed04ac410ff119837cc10ccc21f8acd | [
"MIT"
] | permissive | OCB-DS/prodstats | cf554e3abee651463e9f81606d4b633f464658a7 | 4ff5a6e0b0d6152af2d7e1f3844ede2d33ad4824 | refs/heads/master | 2022-11-25T15:30:06.988683 | 2020-08-02T16:08:05 | 2020-08-02T16:08:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,529 | py | # flake8: noqa
import logging
import gino
from config import DATABASE_CONFIG, DATABASE_POOL_SIZE_MAX, DATABASE_POOL_SIZE_MIN
logger = logging.getLogger(__name__)
db: gino.Gino = gino.Gino(
naming_convention={ # passed to sqlalchemy.MetaData
"ix": "ix_%(column_0_label)s",
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(constraint_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s",
}
)
async def startup(
pool_min_size: int = DATABASE_POOL_SIZE_MIN,
pool_max_size: int = DATABASE_POOL_SIZE_MAX,
): # nocover (implicitly tested with test client)
if not db.is_bound():
await db.set_bind(db.url, min_size=pool_min_size, max_size=pool_max_size)
logger.debug(f"Connected to {db.url.__to_string__(hide_password=True)}")
async def shutdown(): # nocover (implicitly tested with test client)
await db.pop_bind().close()
logger.debug(f"Disconnected from {db.url.__to_string__(hide_password=True)}")
async def create_engine() -> gino.GinoEngine:
return await gino.create_engine(db.url)
logger.debug(f"Created engine for {db.url.__to_string__(hide_password=True)}")
def qsize():
""" Get current number of connections """
return db.bind.raw_pool._queue.qsize()
# set some properties for convenience
db.qsize, db.startup, db.shutdown, db.create_engine, db.url = (
qsize,
startup,
shutdown,
create_engine,
DATABASE_CONFIG.url,
)
| [
"[email protected]"
] | |
be5f84bdd762a47bd2e8d8f8e6560fa22e3d7252 | 0b20f4ce14b9ff77c84cedbecbaa29831335920d | /tests/common/goget/test_goget_base.py | 6fceebcf63ce6748fcc30c3fc5158fd279e5fee7 | [
"Apache-2.0"
] | permissive | sergesec488/checkov | 219c1b3864ab4f70b39a4cd79b041e98f3145364 | 56008e1c531b3626f14716067731be6e673040bc | refs/heads/master | 2023-04-10T12:26:49.749864 | 2021-02-26T18:36:52 | 2021-02-26T18:40:58 | 342,883,133 | 0 | 1 | Apache-2.0 | 2023-03-30T13:31:25 | 2021-02-27T15:01:08 | null | UTF-8 | Python | false | false | 483 | py | import os
import unittest
from tests.common.goget.local_getter import LocalGetter
class TestBaseGetter(unittest.TestCase):
def test_directory_creation(self):
current_dir = os.getcwd()
getter = LocalGetter(current_dir)
result_dir = getter.get()
print(current_dir)
print(result_dir)
self.assertTrue(current_dir in result_dir)
# Cleanup
os.rmdir(getter.temp_dir)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
0b8c50f09cfd7e557aa018d9e64bb975e34ee8b2 | acbe6bd6cefaf8b12070d7258dab30e4f7fcebed | /tests/introspect/testInitNonFunction.py | 5ee1859253d354f127fef821f61f730071e1c884 | [
"MIT"
] | permissive | RogueScholar/debreate | 02c98c5a78d33041798410f0e3b99e80fda65d00 | dfe9bcac7333a53082b3a2ae169806cf604d59f6 | refs/heads/master | 2023-06-07T11:49:03.821969 | 2023-04-28T02:14:25 | 2023-04-28T02:14:25 | 253,707,766 | 0 | 0 | MIT | 2023-05-28T15:24:17 | 2020-04-07T06:34:47 | Python | UTF-8 | Python | false | false | 432 | py |
# ****************************************************
# * Copyright (C) 2023 - Jordan Irwin (AntumDeluge) *
# ****************************************************
# * This software is licensed under the MIT license. *
# * See: docs/LICENSE.txt for details. *
# ****************************************************
from libdbr.logger import Logger
logger = Logger()
logger.info("'init' is string ...")
init = ""
| [
"[email protected]"
] | |
4f57b1562622adb44ba012675f1ebb0744e9adb2 | b127b5947835293d57a355aa94688bdbd0702a27 | /cs-extract-key.py | 5e6860ba75e9bb4a1f07b37a3b7b50150b4dd7b8 | [] | no_license | DidierStevens/Beta | 87e17ee145e88284956a70bd6c52b8ca5933c43b | 5f10c6d2e906852f6e522e5a93ac92c86bc8bc7b | refs/heads/master | 2023-04-14T16:59:32.661442 | 2023-04-02T11:20:54 | 2023-04-02T11:20:54 | 69,382,206 | 231 | 77 | null | 2021-04-18T17:51:33 | 2016-09-27T17:34:53 | Python | UTF-8 | Python | false | false | 72,321 | py | #!/usr/bin/env python
from __future__ import print_function
__description__ = 'Extract cryptographic keys from Cobalt Strike beacon process dump'
__author__ = 'Didier Stevens'
__version__ = '0.0.2'
__date__ = '2021/11/02'
"""
Source code put in the public domain by Didier Stevens, no Copyright
https://DidierStevens.com
Use at your own risk
History:
2021/04/19: start
2021/04/21: version 3 and 4
2021/04/22: codepages
2021/04/25: option -c
2021/10/07: 0.0.2 updated missing modules logic
2021/10/17: made some key search space improvements
2021/10/28: added option -f
2021/10/29: added option -t
2021/10/31: changes to output
2021/11/02: man page
Todo:
Document flag arguments in man page
"""
import optparse
import sys
import os
import zipfile
import binascii
import random
import gzip
import collections
import glob
import textwrap
import re
import struct
import string
import math
import fnmatch
import json
import time
import csv
import hashlib
import hmac
try:
import Crypto.Cipher.AES
except ImportError:
print('Crypto.Cipher.AES module required: pip install pycryptodome')
exit(-1)
if sys.version_info[0] >= 3:
from io import BytesIO as DataIO
else:
from cStringIO import StringIO as DataIO
if sys.version_info[0] >= 3:
from io import StringIO
else:
from cStringIO import StringIO
def PrintManual():
manual = r'''
Manual:
This tool extracts cryptographic keys, used by Cobalt Strike beacons and team servers to encrypt their C2 communication, from the beacon's process memory.
This tool takes one or more process memory dump files as input. These files will typically be minidump (.dmp) files created with Sysinternals' tools, they can be actually any memory dump, as long as it can fit in Python's memory.
The process memory dump files are handled by this tool as flat, raw data. This tool will not parse minidump file structures (this is foreseen as a feature in another upcoming tool).
As long as the data artifacts processed by this tool are not compressed or encoded, any memory dump is OK.
These data artifacts are found in writable process memory, so a minidump with only writable process memory is sufficient (procdump's option -mp).
When provided with a process memory dump, without using any option, this tool will look for unencrypted Beacon metadata inside the process memory dump.
This metadata starts with a sequence of 4 bytes: 00 00 BE EF. This tool searches for this sequence and parses the metadata that follows it.
Here is an example:
Command: cs-extract-key.py beacon.dmp
Output:
File: beacon.dmp
Position: 0x009094d0
Header: 0000beef
Datasize: 00000045
Raw key: 61f9880e25408a44ae43cdedb642f099
aeskey: 9f71c29ed793f778189a10aae54563cb
hmackey: e236ddf9d75b440484a0550e1ca3e2e8
charset: 04e4 ANSI Latin 1; Western European (Windows)
charset_oem: 01b5 OEM United States
Field: b'65153'
Field: b'2744'
Field: b'6.1'
Field: b'127.0.0.1'
Field: b'WIN-DFGODF5ES9C'
Field: b'root'
Field: b'1'
Field: b'1'
AES key:
Position: 0x001908e0
HMAC key:
Position: 0x001908f0
Right after the header (0x0000BEEF) is an integer with the datasize of the metadata. This should not be too long, very rarely will it be longer than 200 bytes. If it is 1000 or more bytes, then this is a clear indication that this is a false positive: that the found sequence 0x0000BEEF is not the start of a metadata byte sequence.
The "raw key" decoded from the metadata, is a random sequence of 16 bytes generated by the beacon (unique for each process) that serves to generate the HMAC and AES keys used by the beacon and team server to encrypt their communication.
The algorithm is: calculate the SHA256 hash of the raw key, take the first half of the SHA256 as the HMAC key, and take the second half as the AES key.
Thus the HMAC key and AES key itself are not contained inside the metadata, but are calculated from the raw key that is inside the metadata.
Several fields are found after the raw key, like the computername and username running the beacon. These fields can be used to validate that valid metadata was found. If these fields don't look like expected computernames and usernames, then we are most likely dealing with a false positive, that is best ignored.
After deriving the HMAC and AES key from the raw key, this tool will try to find these 2 keys in process memory. These are the Position: entries found in the output example above. If these HMAC and AES keys are not found inside process memory, then we are also most likely dealing with a false positive.
In our experience, detection of metadata as explained above, is only successful with Cobalt Strike version 3.x beacons. And preferably with process memory dumps taken early in the lifespan of a running beacon.
To extract cryptographic keys from process memory of Cobalt Strike version 4.x beacons, another method must be followed.
This second method requires encrypted data obtained from a network capture file (extracted manually or with tool cs-parse-http-traffic.py).
Unlike metadata, HMAC and AES keys have no features that distinguishes them from other data in process memory. They are just 16 bytes of data that looks random.
To detect HMAC and AES keys inside process memory, the tool will proceed with a kind of dictionary attack: all possible 16-byte long, non-null byte sequences obtained from process memory, are considered as potential keys and used in attempts to decrypt and validate the encrypted data.
If this succeeds, a valid key was found.
I have observed that the HMAC and AES keys are often found in process memory, somewhere after string sha256\x00. As a speed optimization tactic, this tool will search for each occurrence of string sha256\x00 and try out the 0x500000 bytes that follow these string occurrences as HMAC and AES keys.
For a typical writable process memory dumb of no more than 10MB, this takes a couple of minutes.
If no occurrences of string sha250\x00 are found, then the complete process memory dump is processed. This processing mode can be forced with option -f.
There is a small difference between the way that data is encrypted by the beacon and the way that data is encrypted by the team server.
Encrypted data sent by the team server to the beacon, contains tasks to be executed by the beacon. That encrypted data looks completely random. Option -t (task) must be used to provide this encrypted data (as a hexadecimal string) to the tool.
Encrypted data sent by the beacon to the team server, contains output from the tasks executed by the beacon. This output is called a callback. That encrypted data looks almost completely random. The first 4 bytes of that data represent the length of the encrypted data, encoded as an unsigned, 32-bit big-endian integer. Thus the first bytes of a callback are often 00 bytes. Callbacks can be concatenated together in one POST (default) request. Option -c (callback) must be used to provide this encrypted data (as a hexadecimal string) to the tool.
Here is an example:
Command: cs-extract-key.py -t d12c14aa698a6b85a8ed3c3c33774fe79acadd0e95fa88f45b66d8751682db734472b2c9c874ccc70afa426fb2f510654df7042aa7d2384229518f26d1e044bd beacon.dmp
Output:
File: beacon.dmp
Searching for AES and HMAC keys
Searching after sha256\x00 string (0x6a8179)
AES key position: 0x006ae6c5
AES Key: 23a79f098615fdd74fbb25116f50aa09
HMAC key position: 0x006b19e5
HMAC Key: 5a8a1e3d8c75f37353937475bd498dfb
SHA256 raw key: 5a8a1e3d8c75f37353937475bd498dfb:23a79f098615fdd74fbb25116f50aa09
Searching for raw key
d12c14aa698a6b85a8ed3c3c33774fe79acadd0e95fa88f45b66d8751682db734472b2c9c874ccc70afa426fb2f510654df7042aa7d2384229518f26d1e044bd is encrypted data (a task), obtained from a network traffic capture file, that is passed to the tool with option -t.
In the example above, the AES and HMAC key are recovered, but not the raw key (the raw key is typically not found with version 4.x beacons).
Recovered HMAC keys are 100% guaranteed to be true positives, since they validate the HMAC signature of the encrypted data.
Recovered AES keys can sometimes be false positives: the decrypted data looks like a task, but it's not actually a task.
But whenever I found an AES key near an HMAC key, they were always true positives. I only had false positives when an AES key was recovered without recovering a HMAC key.
Remark that the above method (using option -t or -c) works also for version 3.x beacons.
Beacon process memory can be encoded while the beacon is sleeping. This is done with a configuration option called a sleep mask. Since beacons sleep most of the time, it is very likely that you will take a process dump while a beacon is sleeping. This tool can not recover cryptographic keys from the process memory of a beacon with a sleep mask. I am working on a tool to decode such process memory, and then the output of that tool can be used by this tool.
'''
for line in manual.split('\n'):
print(textwrap.fill(line, 79))
DEFAULT_SEPARATOR = ','
QUOTE = '"'
CS_FIXED_IV = b'abcdefghijklmnop'
def PrintError(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
#Convert 2 Bytes If Python 3
def C2BIP3(string):
if sys.version_info[0] > 2:
return bytes([ord(x) for x in string])
else:
return string
#Convert 2 Integer If Python 2
def C2IIP2(data):
if sys.version_info[0] > 2:
return data
else:
return ord(data)
# CIC: Call If Callable
def CIC(expression):
if callable(expression):
return expression()
else:
return expression
# IFF: IF Function
def IFF(expression, valueTrue, valueFalse):
if expression:
return CIC(valueTrue)
else:
return CIC(valueFalse)
#-BEGINCODE cBinaryFile------------------------------------------------------------------------------
#import random
#import binascii
#import zipfile
#import gzip
#import sys
#if sys.version_info[0] >= 3:
# from io import BytesIO as DataIO
#else:
# from cStringIO import StringIO as DataIO
def LoremIpsumSentence(minimum, maximum):
words = ['lorem', 'ipsum', 'dolor', 'sit', 'amet', 'consectetur', 'adipiscing', 'elit', 'etiam', 'tortor', 'metus', 'cursus', 'sed', 'sollicitudin', 'ac', 'sagittis', 'eget', 'massa', 'praesent', 'sem', 'fermentum', 'dignissim', 'in', 'vel', 'augue', 'scelerisque', 'auctor', 'libero', 'nam', 'a', 'gravida', 'odio', 'duis', 'vestibulum', 'vulputate', 'quam', 'nec', 'cras', 'nibh', 'feugiat', 'ut', 'vitae', 'ornare', 'justo', 'orci', 'varius', 'natoque', 'penatibus', 'et', 'magnis', 'dis', 'parturient', 'montes', 'nascetur', 'ridiculus', 'mus', 'curabitur', 'nisl', 'egestas', 'urna', 'iaculis', 'lectus', 'maecenas', 'ultrices', 'velit', 'eu', 'porta', 'hac', 'habitasse', 'platea', 'dictumst', 'integer', 'id', 'commodo', 'mauris', 'interdum', 'malesuada', 'fames', 'ante', 'primis', 'faucibus', 'accumsan', 'pharetra', 'aliquam', 'nunc', 'at', 'est', 'non', 'leo', 'nulla', 'sodales', 'porttitor', 'facilisis', 'aenean', 'condimentum', 'rutrum', 'facilisi', 'tincidunt', 'laoreet', 'ultricies', 'neque', 'diam', 'euismod', 'consequat', 'tempor', 'elementum', 'lobortis', 'erat', 'ligula', 'risus', 'donec', 'phasellus', 'quisque', 'vivamus', 'pellentesque', 'tristique', 'venenatis', 'purus', 'mi', 'dictum', 'posuere', 'fringilla', 'quis', 'magna', 'pretium', 'felis', 'pulvinar', 'lacinia', 'proin', 'viverra', 'lacus', 'suscipit', 'aliquet', 'dui', 'molestie', 'dapibus', 'mollis', 'suspendisse', 'sapien', 'blandit', 'morbi', 'tellus', 'enim', 'maximus', 'semper', 'arcu', 'bibendum', 'convallis', 'hendrerit', 'imperdiet', 'finibus', 'fusce', 'congue', 'ullamcorper', 'placerat', 'nullam', 'eros', 'habitant', 'senectus', 'netus', 'turpis', 'luctus', 'volutpat', 'rhoncus', 'mattis', 'nisi', 'ex', 'tempus', 'eleifend', 'vehicula', 'class', 'aptent', 'taciti', 'sociosqu', 'ad', 'litora', 'torquent', 'per', 'conubia', 'nostra', 'inceptos', 'himenaeos']
sample = random.sample(words, random.randint(minimum, maximum))
sample[0] = sample[0].capitalize()
return ' '.join(sample) + '.'
def LoremIpsum(sentences):
return ' '.join([LoremIpsumSentence(15, 30) for i in range(sentences)])
STATE_START = 0
STATE_IDENTIFIER = 1
STATE_STRING = 2
STATE_SPECIAL_CHAR = 3
STATE_ERROR = 4
FUNCTIONNAME_REPEAT = 'repeat'
FUNCTIONNAME_RANDOM = 'random'
FUNCTIONNAME_CHR = 'chr'
FUNCTIONNAME_LOREMIPSUM = 'loremipsum'
def Tokenize(expression):
result = []
token = ''
state = STATE_START
while expression != '':
char = expression[0]
expression = expression[1:]
if char == "'":
if state == STATE_START:
state = STATE_STRING
elif state == STATE_IDENTIFIER:
result.append([STATE_IDENTIFIER, token])
state = STATE_STRING
token = ''
elif state == STATE_STRING:
result.append([STATE_STRING, token])
state = STATE_START
token = ''
elif char >= '0' and char <= '9' or char.lower() >= 'a' and char.lower() <= 'z':
if state == STATE_START:
token = char
state = STATE_IDENTIFIER
else:
token += char
elif char == ' ':
if state == STATE_IDENTIFIER:
result.append([STATE_IDENTIFIER, token])
token = ''
state = STATE_START
elif state == STATE_STRING:
token += char
else:
if state == STATE_IDENTIFIER:
result.append([STATE_IDENTIFIER, token])
token = ''
state = STATE_START
result.append([STATE_SPECIAL_CHAR, char])
elif state == STATE_STRING:
token += char
else:
result.append([STATE_SPECIAL_CHAR, char])
token = ''
if state == STATE_IDENTIFIER:
result.append([state, token])
elif state == STATE_STRING:
result = [[STATE_ERROR, 'Error: string not closed', token]]
return result
def ParseFunction(tokens):
if len(tokens) == 0:
print('Parsing error')
return None, tokens
if tokens[0][0] == STATE_STRING or tokens[0][0] == STATE_IDENTIFIER and tokens[0][1].startswith('0x'):
return [[FUNCTIONNAME_REPEAT, [[STATE_IDENTIFIER, '1'], tokens[0]]], tokens[1:]]
if tokens[0][0] != STATE_IDENTIFIER:
print('Parsing error')
return None, tokens
function = tokens[0][1]
tokens = tokens[1:]
if len(tokens) == 0:
print('Parsing error')
return None, tokens
if tokens[0][0] != STATE_SPECIAL_CHAR or tokens[0][1] != '(':
print('Parsing error')
return None, tokens
tokens = tokens[1:]
if len(tokens) == 0:
print('Parsing error')
return None, tokens
arguments = []
while True:
if tokens[0][0] != STATE_IDENTIFIER and tokens[0][0] != STATE_STRING:
print('Parsing error')
return None, tokens
arguments.append(tokens[0])
tokens = tokens[1:]
if len(tokens) == 0:
print('Parsing error')
return None, tokens
if tokens[0][0] != STATE_SPECIAL_CHAR or (tokens[0][1] != ',' and tokens[0][1] != ')'):
print('Parsing error')
return None, tokens
if tokens[0][0] == STATE_SPECIAL_CHAR and tokens[0][1] == ')':
tokens = tokens[1:]
break
tokens = tokens[1:]
if len(tokens) == 0:
print('Parsing error')
return None, tokens
return [[function, arguments], tokens]
def Parse(expression):
tokens = Tokenize(expression)
if len(tokens) == 0:
print('Parsing error')
return None
if tokens[0][0] == STATE_ERROR:
print(tokens[0][1])
print(tokens[0][2])
print(expression)
return None
functioncalls = []
while True:
functioncall, tokens = ParseFunction(tokens)
if functioncall == None:
return None
functioncalls.append(functioncall)
if len(tokens) == 0:
return functioncalls
if tokens[0][0] != STATE_SPECIAL_CHAR or tokens[0][1] != '+':
print('Parsing error')
return None
tokens = tokens[1:]
def InterpretInteger(token):
if token[0] != STATE_IDENTIFIER:
return None
try:
return int(token[1])
except:
return None
def Hex2Bytes(hexadecimal):
if len(hexadecimal) % 2 == 1:
hexadecimal = '0' + hexadecimal
try:
return binascii.a2b_hex(hexadecimal)
except:
return None
def InterpretHexInteger(token):
if token[0] != STATE_IDENTIFIER:
return None
if not token[1].startswith('0x'):
return None
bytes = Hex2Bytes(token[1][2:])
if bytes == None:
return None
integer = 0
for byte in bytes:
integer = integer * 0x100 + C2IIP2(byte)
return integer
def InterpretNumber(token):
number = InterpretInteger(token)
if number == None:
return InterpretHexInteger(token)
else:
return number
def InterpretBytes(token):
if token[0] == STATE_STRING:
return token[1]
if token[0] != STATE_IDENTIFIER:
return None
if not token[1].startswith('0x'):
return None
return Hex2Bytes(token[1][2:])
def CheckFunction(functionname, arguments, countarguments, maxcountarguments=None):
if maxcountarguments == None:
if countarguments == 0 and len(arguments) != 0:
print('Error: function %s takes no arguments, %d are given' % (functionname, len(arguments)))
return True
if countarguments == 1 and len(arguments) != 1:
print('Error: function %s takes 1 argument, %d are given' % (functionname, len(arguments)))
return True
if countarguments != len(arguments):
print('Error: function %s takes %d arguments, %d are given' % (functionname, countarguments, len(arguments)))
return True
else:
if len(arguments) < countarguments or len(arguments) > maxcountarguments:
print('Error: function %s takes between %d and %d arguments, %d are given' % (functionname, countarguments, maxcountarguments, len(arguments)))
return True
return False
def CheckNumber(argument, minimum=None, maximum=None):
number = InterpretNumber(argument)
if number == None:
print('Error: argument should be a number: %s' % argument[1])
return None
if minimum != None and number < minimum:
print('Error: argument should be minimum %d: %d' % (minimum, number))
return None
if maximum != None and number > maximum:
print('Error: argument should be maximum %d: %d' % (maximum, number))
return None
return number
def Interpret(expression):
functioncalls = Parse(expression)
if functioncalls == None:
return None
decoded = ''
for functioncall in functioncalls:
functionname, arguments = functioncall
if functionname == FUNCTIONNAME_REPEAT:
if CheckFunction(functionname, arguments, 2):
return None
number = CheckNumber(arguments[0], minimum=1)
if number == None:
return None
bytes = InterpretBytes(arguments[1])
if bytes == None:
print('Error: argument should be a byte sequence: %s' % arguments[1][1])
return None
decoded += number * bytes
elif functionname == FUNCTIONNAME_RANDOM:
if CheckFunction(functionname, arguments, 1):
return None
number = CheckNumber(arguments[0], minimum=1)
if number == None:
return None
decoded += ''.join([chr(random.randint(0, 255)) for x in range(number)])
elif functionname == FUNCTIONNAME_LOREMIPSUM:
if CheckFunction(functionname, arguments, 1):
return None
number = CheckNumber(arguments[0], minimum=1)
if number == None:
return None
decoded += LoremIpsum(number)
elif functionname == FUNCTIONNAME_CHR:
if CheckFunction(functionname, arguments, 1, 2):
return None
number = CheckNumber(arguments[0], minimum=0, maximum=255)
if number == None:
return None
if len(arguments) == 1:
decoded += chr(number)
else:
number2 = CheckNumber(arguments[1], minimum=0, maximum=255)
if number2 == None:
return None
if number < number2:
decoded += ''.join([chr(n) for n in range(number, number2 + 1)])
else:
decoded += ''.join([chr(n) for n in range(number, number2 - 1, -1)])
else:
print('Error: unknown function: %s' % functionname)
return None
return decoded
def ParsePackExpression(data):
try:
packFormat, pythonExpression = data.split('#', 1)
data = struct.pack(packFormat, int(pythonExpression))
return data
except:
return None
FCH_FILENAME = 0
FCH_DATA = 1
FCH_ERROR = 2
def FilenameCheckHash(filename, literalfilename):
if literalfilename:
return FCH_FILENAME, filename
elif filename.startswith('#h#'):
result = Hex2Bytes(filename[3:].replace(' ', ''))
if result == None:
return FCH_ERROR, 'hexadecimal'
else:
return FCH_DATA, result
elif filename.startswith('#b#'):
try:
return FCH_DATA, binascii.a2b_base64(filename[3:])
except:
return FCH_ERROR, 'base64'
elif filename.startswith('#e#'):
result = Interpret(filename[3:])
if result == None:
return FCH_ERROR, 'expression'
else:
return FCH_DATA, C2BIP3(result)
elif filename.startswith('#p#'):
result = ParsePackExpression(filename[3:])
if result == None:
return FCH_ERROR, 'pack'
else:
return FCH_DATA, result
elif filename.startswith('#'):
return FCH_DATA, C2BIP3(filename[1:])
else:
return FCH_FILENAME, filename
def AnalyzeFileError(filename):
PrintError('Error opening file %s' % filename)
PrintError(sys.exc_info()[1])
try:
if not os.path.exists(filename):
PrintError('The file does not exist')
elif os.path.isdir(filename):
PrintError('The file is a directory')
elif not os.path.isfile(filename):
PrintError('The file is not a regular file')
except:
pass
class cBinaryFile:
def __init__(self, filename, zippassword='infected', noextraction=False, literalfilename=False):
self.filename = filename
self.zippassword = zippassword
self.noextraction = noextraction
self.literalfilename = literalfilename
self.oZipfile = None
self.extracted = False
self.fIn = None
fch, data = FilenameCheckHash(self.filename, self.literalfilename)
if fch == FCH_ERROR:
line = 'Error %s parsing filename: %s' % (data, self.filename)
raise Exception(line)
try:
if self.filename == '':
if sys.platform == 'win32':
import msvcrt
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
self.fIn = sys.stdin
elif fch == FCH_DATA:
self.fIn = DataIO(data)
elif not self.noextraction and self.filename.lower().endswith('.zip'):
self.oZipfile = zipfile.ZipFile(self.filename, 'r')
if len(self.oZipfile.infolist()) == 1:
self.fIn = self.oZipfile.open(self.oZipfile.infolist()[0], 'r', self.zippassword)
self.extracted = True
else:
self.oZipfile.close()
self.oZipfile = None
self.fIn = open(self.filename, 'rb')
elif not self.noextraction and self.filename.lower().endswith('.gz'):
self.fIn = gzip.GzipFile(self.filename, 'rb')
self.extracted = True
else:
self.fIn = open(self.filename, 'rb')
except:
AnalyzeFileError(self.filename)
raise
def close(self):
if self.fIn != sys.stdin and self.fIn != None:
self.fIn.close()
if self.oZipfile != None:
self.oZipfile.close()
def read(self, size=None):
try:
fRead = self.fIn.buffer
except:
fRead = self.fIn
if size == None:
return fRead.read()
else:
return fRead.read(size)
def Data(self):
data = self.read()
self.close()
return data
#-ENDCODE cBinaryFile--------------------------------------------------------------------------------
def File2Strings(filename):
try:
if filename == '':
f = sys.stdin
else:
f = open(filename, 'r')
except:
return None
try:
return map(lambda line:line.rstrip('\n'), f.readlines())
except:
return None
finally:
if f != sys.stdin:
f.close()
def File2String(filename):
try:
f = open(filename, 'rb')
except:
return None
try:
return f.read()
except:
return None
finally:
f.close()
def ProcessAt(argument):
if argument.startswith('@'):
strings = File2Strings(argument[1:])
if strings == None:
raise Exception('Error reading %s' % argument)
else:
return strings
else:
return [argument]
def Glob(filename):
filenames = glob.glob(filename)
if len(filenames) == 0:
return [filename]
else:
return filenames
class cExpandFilenameArguments():
def __init__(self, filenames, literalfilenames=False, recursedir=False, checkfilenames=False, expressionprefix=None, flagprefix=None):
self.containsUnixShellStyleWildcards = False
self.warning = False
self.message = ''
self.filenameexpressionsflags = []
self.expressionprefix = expressionprefix
self.flagprefix = flagprefix
self.literalfilenames = literalfilenames
expression = ''
flag = ''
if len(filenames) == 0:
self.filenameexpressionsflags = [['', '', '']]
elif literalfilenames:
self.filenameexpressionsflags = [[filename, '', ''] for filename in filenames]
elif recursedir:
for dirwildcard in filenames:
if expressionprefix != None and dirwildcard.startswith(expressionprefix):
expression = dirwildcard[len(expressionprefix):]
elif flagprefix != None and dirwildcard.startswith(flagprefix):
flag = dirwildcard[len(flagprefix):]
else:
if dirwildcard.startswith('@'):
for filename in ProcessAt(dirwildcard):
self.filenameexpressionsflags.append([filename, expression, flag])
elif os.path.isfile(dirwildcard):
self.filenameexpressionsflags.append([dirwildcard, expression, flag])
else:
if os.path.isdir(dirwildcard):
dirname = dirwildcard
basename = '*'
else:
dirname, basename = os.path.split(dirwildcard)
if dirname == '':
dirname = '.'
for path, dirs, files in os.walk(dirname):
for filename in fnmatch.filter(files, basename):
self.filenameexpressionsflags.append([os.path.join(path, filename), expression, flag])
else:
for filename in list(collections.OrderedDict.fromkeys(sum(map(self.Glob, sum(map(ProcessAt, filenames), [])), []))):
if expressionprefix != None and filename.startswith(expressionprefix):
expression = filename[len(expressionprefix):]
elif flagprefix != None and filename.startswith(flagprefix):
flag = filename[len(flagprefix):]
else:
self.filenameexpressionsflags.append([filename, expression, flag])
self.warning = self.containsUnixShellStyleWildcards and len(self.filenameexpressionsflags) == 0
if self.warning:
self.message = "Your filename argument(s) contain Unix shell-style wildcards, but no files were matched.\nCheck your wildcard patterns or use option literalfilenames if you don't want wildcard pattern matching."
return
if self.filenameexpressionsflags == [] and (expression != '' or flag != ''):
self.filenameexpressionsflags = [['', expression, flag]]
if checkfilenames:
self.CheckIfFilesAreValid()
def Glob(self, filename):
if not ('?' in filename or '*' in filename or ('[' in filename and ']' in filename)):
return [filename]
self.containsUnixShellStyleWildcards = True
return glob.glob(filename)
def CheckIfFilesAreValid(self):
valid = []
doesnotexist = []
isnotafile = []
for filename, expression, flag in self.filenameexpressionsflags:
hashfile = False
try:
hashfile = FilenameCheckHash(filename, self.literalfilenames)[0] == FCH_DATA
except:
pass
if filename == '' or hashfile:
valid.append([filename, expression, flag])
elif not os.path.exists(filename):
doesnotexist.append(filename)
elif not os.path.isfile(filename):
isnotafile.append(filename)
else:
valid.append([filename, expression, flag])
self.filenameexpressionsflags = valid
if len(doesnotexist) > 0:
self.warning = True
self.message += 'The following files do not exist and will be skipped: ' + ' '.join(doesnotexist) + '\n'
if len(isnotafile) > 0:
self.warning = True
self.message += 'The following files are not regular files and will be skipped: ' + ' '.join(isnotafile) + '\n'
def Filenames(self):
if self.expressionprefix == None:
return [filename for filename, expression, flag in self.filenameexpressionsflags]
else:
return self.filenameexpressionsflags
def CheckJSON(stringJSON):
try:
object = json.loads(stringJSON)
except:
print('Error parsing JSON')
print(sys.exc_info()[1])
return None
if not isinstance(object, dict):
print('Error JSON is not a dictionary')
return None
if not 'version' in object:
print('Error JSON dictionary has no version')
return None
if object['version'] != 2:
print('Error JSON dictionary has wrong version')
return None
if not 'id' in object:
print('Error JSON dictionary has no id')
return None
if object['id'] != 'didierstevens.com':
print('Error JSON dictionary has wrong id')
return None
if not 'type' in object:
print('Error JSON dictionary has no type')
return None
if object['type'] != 'content':
print('Error JSON dictionary has wrong type')
return None
if not 'fields' in object:
print('Error JSON dictionary has no fields')
return None
if not 'name' in object['fields']:
print('Error JSON dictionary has no name field')
return None
if not 'content' in object['fields']:
print('Error JSON dictionary has no content field')
return None
if not 'items' in object:
print('Error JSON dictionary has no items')
return None
for item in object['items']:
item['content'] = binascii.a2b_base64(item['content'])
return object['items']
CUTTERM_NOTHING = 0
CUTTERM_POSITION = 1
CUTTERM_FIND = 2
CUTTERM_LENGTH = 3
def Replace(string, dReplacements):
if string in dReplacements:
return dReplacements[string]
else:
return string
def ParseInteger(argument):
sign = 1
if argument.startswith('+'):
argument = argument[1:]
elif argument.startswith('-'):
argument = argument[1:]
sign = -1
if argument.startswith('0x'):
return sign * int(argument[2:], 16)
else:
return sign * int(argument)
def ParseCutTerm(argument):
if argument == '':
return CUTTERM_NOTHING, None, ''
oMatch = re.match(r'\-?0x([0-9a-f]+)', argument, re.I)
if oMatch == None:
oMatch = re.match(r'\-?(\d+)', argument)
else:
value = int(oMatch.group(1), 16)
if argument.startswith('-'):
value = -value
return CUTTERM_POSITION, value, argument[len(oMatch.group(0)):]
if oMatch == None:
oMatch = re.match(r'\[([0-9a-f]+)\](\d+)?([+-](?:0x[0-9a-f]+|\d+))?', argument, re.I)
else:
value = int(oMatch.group(1))
if argument.startswith('-'):
value = -value
return CUTTERM_POSITION, value, argument[len(oMatch.group(0)):]
if oMatch == None:
oMatch = re.match(r"\[u?\'(.+?)\'\](\d+)?([+-](?:0x[0-9a-f]+|\d+))?", argument)
else:
if len(oMatch.group(1)) % 2 == 1:
raise Exception("Uneven length hexadecimal string")
else:
return CUTTERM_FIND, (binascii.a2b_hex(oMatch.group(1)), int(Replace(oMatch.group(2), {None: '1'})), ParseInteger(Replace(oMatch.group(3), {None: '0'}))), argument[len(oMatch.group(0)):]
if oMatch == None:
return None, None, argument
else:
if argument.startswith("[u'"):
# convert ascii to unicode 16 byte sequence
searchtext = oMatch.group(1).decode('unicode_escape').encode('utf16')[2:]
else:
searchtext = oMatch.group(1)
return CUTTERM_FIND, (searchtext, int(Replace(oMatch.group(2), {None: '1'})), ParseInteger(Replace(oMatch.group(3), {None: '0'}))), argument[len(oMatch.group(0)):]
def ParseCutArgument(argument):
type, value, remainder = ParseCutTerm(argument.strip())
if type == CUTTERM_NOTHING:
return CUTTERM_NOTHING, None, CUTTERM_NOTHING, None
elif type == None:
if remainder.startswith(':'):
typeLeft = CUTTERM_NOTHING
valueLeft = None
remainder = remainder[1:]
else:
return None, None, None, None
else:
typeLeft = type
valueLeft = value
if typeLeft == CUTTERM_POSITION and valueLeft < 0:
return None, None, None, None
if typeLeft == CUTTERM_FIND and valueLeft[1] == 0:
return None, None, None, None
if remainder.startswith(':'):
remainder = remainder[1:]
else:
return None, None, None, None
type, value, remainder = ParseCutTerm(remainder)
if type == CUTTERM_POSITION and remainder == 'l':
return typeLeft, valueLeft, CUTTERM_LENGTH, value
elif type == None or remainder != '':
return None, None, None, None
elif type == CUTTERM_FIND and value[1] == 0:
return None, None, None, None
else:
return typeLeft, valueLeft, type, value
def Find(data, value, nth, startposition=-1):
position = startposition
while nth > 0:
position = data.find(value, position + 1)
if position == -1:
return -1
nth -= 1
return position
def CutData(stream, cutArgument):
if cutArgument == '':
return [stream, None, None]
typeLeft, valueLeft, typeRight, valueRight = ParseCutArgument(cutArgument)
if typeLeft == None:
return [stream, None, None]
if typeLeft == CUTTERM_NOTHING:
positionBegin = 0
elif typeLeft == CUTTERM_POSITION:
positionBegin = valueLeft
elif typeLeft == CUTTERM_FIND:
positionBegin = Find(stream, valueLeft[0], valueLeft[1])
if positionBegin == -1:
return ['', None, None]
positionBegin += valueLeft[2]
else:
raise Exception("Unknown value typeLeft")
if typeRight == CUTTERM_NOTHING:
positionEnd = len(stream)
elif typeRight == CUTTERM_POSITION and valueRight < 0:
positionEnd = len(stream) + valueRight
elif typeRight == CUTTERM_POSITION:
positionEnd = valueRight + 1
elif typeRight == CUTTERM_LENGTH:
positionEnd = positionBegin + valueRight
elif typeRight == CUTTERM_FIND:
positionEnd = Find(stream, valueRight[0], valueRight[1], positionBegin)
if positionEnd == -1:
return ['', None, None]
else:
positionEnd += len(valueRight[0])
positionEnd += valueRight[2]
else:
raise Exception("Unknown value typeRight")
return [stream[positionBegin:positionEnd], positionBegin, positionEnd]
#-BEGINCODE cDump------------------------------------------------------------------------------------
#import binascii
#import sys
#if sys.version_info[0] >= 3:
# from io import StringIO
#else:
# from cStringIO import StringIO
class cDump():
def __init__(self, data, prefix='', offset=0, dumplinelength=16):
self.data = data
self.prefix = prefix
self.offset = offset
self.dumplinelength = dumplinelength
def HexDump(self):
oDumpStream = self.cDumpStream(self.prefix)
hexDump = ''
for i, b in enumerate(self.data):
if i % self.dumplinelength == 0 and hexDump != '':
oDumpStream.Addline(hexDump)
hexDump = ''
hexDump += IFF(hexDump == '', '', ' ') + '%02X' % self.C2IIP2(b)
oDumpStream.Addline(hexDump)
return oDumpStream.Content()
def CombineHexAscii(self, hexDump, asciiDump):
if hexDump == '':
return ''
countSpaces = 3 * (self.dumplinelength - len(asciiDump))
if len(asciiDump) <= self.dumplinelength / 2:
countSpaces += 1
return hexDump + ' ' + (' ' * countSpaces) + asciiDump
def HexAsciiDump(self, rle=False):
oDumpStream = self.cDumpStream(self.prefix)
position = ''
hexDump = ''
asciiDump = ''
previousLine = None
countRLE = 0
for i, b in enumerate(self.data):
b = self.C2IIP2(b)
if i % self.dumplinelength == 0:
if hexDump != '':
line = self.CombineHexAscii(hexDump, asciiDump)
if not rle or line != previousLine:
if countRLE > 0:
oDumpStream.Addline('* %d 0x%02x' % (countRLE, countRLE * self.dumplinelength))
oDumpStream.Addline(position + line)
countRLE = 0
else:
countRLE += 1
previousLine = line
position = '%08X:' % (i + self.offset)
hexDump = ''
asciiDump = ''
if i % self.dumplinelength == self.dumplinelength / 2:
hexDump += ' '
hexDump += ' %02X' % b
asciiDump += IFF(b >= 32 and b < 127, chr(b), '.')
if countRLE > 0:
oDumpStream.Addline('* %d 0x%02x' % (countRLE, countRLE * self.dumplinelength))
oDumpStream.Addline(self.CombineHexAscii(position + hexDump, asciiDump))
return oDumpStream.Content()
def Base64Dump(self, nowhitespace=False):
encoded = binascii.b2a_base64(self.data).decode().strip()
if nowhitespace:
return encoded
oDumpStream = self.cDumpStream(self.prefix)
length = 64
for i in range(0, len(encoded), length):
oDumpStream.Addline(encoded[0+i:length+i])
return oDumpStream.Content()
class cDumpStream():
def __init__(self, prefix=''):
self.oStringIO = StringIO()
self.prefix = prefix
def Addline(self, line):
if line != '':
self.oStringIO.write(self.prefix + line + '\n')
def Content(self):
return self.oStringIO.getvalue()
@staticmethod
def C2IIP2(data):
if sys.version_info[0] > 2:
return data
else:
return ord(data)
#-ENDCODE cDump--------------------------------------------------------------------------------------
def IfWIN32SetBinary(io):
if sys.platform == 'win32':
import msvcrt
msvcrt.setmode(io.fileno(), os.O_BINARY)
#Fix for http://bugs.python.org/issue11395
def StdoutWriteChunked(data):
if sys.version_info[0] > 2:
if isinstance(data, str):
sys.stdout.write(data)
else:
sys.stdout.buffer.write(data)
else:
while data != '':
sys.stdout.write(data[0:10000])
try:
sys.stdout.flush()
except IOError:
return
data = data[10000:]
class cVariables():
def __init__(self, variablesstring='', separator=DEFAULT_SEPARATOR):
self.dVariables = {}
if variablesstring == '':
return
for variable in variablesstring.split(separator):
name, value = VariableNameValue(variable)
self.dVariables[name] = value
def SetVariable(self, name, value):
self.dVariables[name] = value
def Instantiate(self, astring):
for key, value in self.dVariables.items():
astring = astring.replace('%' + key + '%', value)
return astring
class cOutput():
def __init__(self, filenameOption=None, binary=False):
self.starttime = time.time()
self.filenameOption = filenameOption
self.separateFiles = False
self.progress = False
self.console = False
self.head = False
self.headCounter = 0
self.tail = False
self.tailQueue = []
self.fOut = None
self.oCsvWriter = None
self.rootFilenames = {}
self.binary = binary
if self.binary:
self.fileoptions = 'wb'
else:
self.fileoptions = 'w'
if self.filenameOption:
if self.ParseHash(self.filenameOption):
if not self.separateFiles and self.filename != '':
self.fOut = open(self.filename, self.fileoptions)
elif self.filenameOption != '':
self.fOut = open(self.filenameOption, self.fileoptions)
def ParseHash(self, option):
if option.startswith('#'):
position = self.filenameOption.find('#', 1)
if position > 1:
switches = self.filenameOption[1:position]
self.filename = self.filenameOption[position + 1:]
for switch in switches:
if switch == 's':
self.separateFiles = True
elif switch == 'p':
self.progress = True
elif switch == 'c':
self.console = True
elif switch == 'l':
pass
elif switch == 'g':
if self.filename != '':
extra = self.filename + '-'
else:
extra = ''
self.filename = '%s-%s%s.txt' % (os.path.splitext(os.path.basename(sys.argv[0]))[0], extra, self.FormatTime())
elif switch == 'h':
self.head = True
elif switch == 't':
self.tail = True
else:
return False
return True
return False
@staticmethod
def FormatTime(epoch=None):
if epoch == None:
epoch = time.time()
return '%04d%02d%02d-%02d%02d%02d' % time.localtime(epoch)[0:6]
def RootUnique(self, root):
if not root in self.rootFilenames:
self.rootFilenames[root] = None
return root
iter = 1
while True:
newroot = '%s_%04d' % (root, iter)
if not newroot in self.rootFilenames:
self.rootFilenames[newroot] = None
return newroot
iter += 1
def LineSub(self, line, eol):
if self.fOut == None or self.console:
try:
print(line, end=eol)
except UnicodeEncodeError:
encoding = sys.stdout.encoding
print(line.encode(encoding, errors='backslashreplace').decode(encoding), end=eol)
# sys.stdout.flush()
if self.fOut != None:
self.fOut.write(line + '\n')
self.fOut.flush()
def Line(self, line, eol='\n'):
if self.head:
if self.headCounter < 10:
self.LineSub(line, eol)
elif self.tail:
self.tailQueue = self.tailQueue[-9:] + [[line, eol]]
self.headCounter += 1
elif self.tail:
self.tailQueue = self.tailQueue[-9:] + [[line, eol]]
else:
self.LineSub(line, eol)
def LineTimestamped(self, line):
self.Line('%s: %s' % (self.FormatTime(), line))
def WriteBinary(self, data):
if self.fOut != None:
self.fOut.write(data)
self.fOut.flush()
else:
IfWIN32SetBinary(sys.stdout)
StdoutWriteChunked(data)
def CSVWriteRow(self, row):
if self.oCsvWriter == None:
self.StringIOCSV = StringIO()
# self.oCsvWriter = csv.writer(self.fOut)
self.oCsvWriter = csv.writer(self.StringIOCSV)
self.oCsvWriter.writerow(row)
self.Line(self.StringIOCSV.getvalue(), '')
self.StringIOCSV.truncate(0)
self.StringIOCSV.seek(0)
def Filename(self, filename, index, total):
self.separateFilename = filename
if self.progress:
if index == 0:
eta = ''
else:
seconds = int(float((time.time() - self.starttime) / float(index)) * float(total - index))
eta = 'estimation %d seconds left, finished %s ' % (seconds, self.FormatTime(time.time() + seconds))
PrintError('%d/%d %s%s' % (index + 1, total, eta, self.separateFilename))
if self.separateFiles and self.filename != '':
oFilenameVariables = cVariables()
oFilenameVariables.SetVariable('f', self.separateFilename)
basename = os.path.basename(self.separateFilename)
oFilenameVariables.SetVariable('b', basename)
oFilenameVariables.SetVariable('d', os.path.dirname(self.separateFilename))
root, extension = os.path.splitext(basename)
oFilenameVariables.SetVariable('r', root)
oFilenameVariables.SetVariable('ru', self.RootUnique(root))
oFilenameVariables.SetVariable('e', extension)
self.Close()
self.fOut = open(oFilenameVariables.Instantiate(self.filename), self.fileoptions)
def Close(self):
if self.head and self.tail and len(self.tailQueue) > 0:
self.LineSub('...', '\n')
for line, eol in self.tailQueue:
self.LineSub(line, eol)
self.headCounter = 0
self.tailQueue = []
if self.fOut != None:
self.fOut.close()
self.fOut = None
def ToString(value):
if isinstance(value, str):
return value
else:
return str(value)
def Quote(value, separator, quote):
value = ToString(value)
if len(value) > 1 and value[0] == quote and value[-1] == quote:
return value
if separator in value or value == '':
return quote + value + quote
else:
return value
def MakeCSVLine(row, separator, quote):
return separator.join([Quote(value, separator, quote) for value in row])
class cLogfile():
def __init__(self, keyword, comment):
self.starttime = time.time()
self.errors = 0
if keyword == '':
self.oOutput = None
else:
self.oOutput = cOutput('%s-%s-%s.log' % (os.path.splitext(os.path.basename(sys.argv[0]))[0], keyword, self.FormatTime()))
self.Line('Start')
self.Line('UTC', '%04d%02d%02d-%02d%02d%02d' % time.gmtime(time.time())[0:6])
self.Line('Comment', comment)
self.Line('Args', repr(sys.argv))
self.Line('Version', __version__)
self.Line('Python', repr(sys.version_info))
self.Line('Platform', sys.platform)
self.Line('CWD', repr(os.getcwd()))
@staticmethod
def FormatTime(epoch=None):
if epoch == None:
epoch = time.time()
return '%04d%02d%02d-%02d%02d%02d' % time.localtime(epoch)[0:6]
def Line(self, *line):
if self.oOutput != None:
self.oOutput.Line(MakeCSVLine((self.FormatTime(), ) + line, DEFAULT_SEPARATOR, QUOTE))
def LineError(self, *line):
self.Line('Error', *line)
self.errors += 1
def Close(self):
if self.oOutput != None:
self.Line('Finish', '%d error(s)' % self.errors, '%d second(s)' % (time.time() - self.starttime))
self.oOutput.Close()
def CalculateByteStatistics(dPrevalence=None, data=None):
averageConsecutiveByteDifference = None
if dPrevalence == None:
dPrevalence = {iter: 0 for iter in range(0x100)}
sumDifferences = 0.0
previous = None
if len(data) > 1:
for byte in data:
byte = C2IIP2(byte)
dPrevalence[byte] += 1
if previous != None:
sumDifferences += abs(byte - previous)
previous = byte
averageConsecutiveByteDifference = sumDifferences /float(len(data)-1)
sumValues = sum(dPrevalence.values())
countNullByte = dPrevalence[0]
countControlBytes = 0
countWhitespaceBytes = 0
countUniqueBytes = 0
for iter in range(1, 0x21):
if chr(iter) in string.whitespace:
countWhitespaceBytes += dPrevalence[iter]
else:
countControlBytes += dPrevalence[iter]
countControlBytes += dPrevalence[0x7F]
countPrintableBytes = 0
for iter in range(0x21, 0x7F):
countPrintableBytes += dPrevalence[iter]
countHighBytes = 0
for iter in range(0x80, 0x100):
countHighBytes += dPrevalence[iter]
countHexadecimalBytes = 0
countBASE64Bytes = 0
for iter in range(0x30, 0x3A):
countHexadecimalBytes += dPrevalence[iter]
countBASE64Bytes += dPrevalence[iter]
for iter in range(0x41, 0x47):
countHexadecimalBytes += dPrevalence[iter]
for iter in range(0x61, 0x67):
countHexadecimalBytes += dPrevalence[iter]
for iter in range(0x41, 0x5B):
countBASE64Bytes += dPrevalence[iter]
for iter in range(0x61, 0x7B):
countBASE64Bytes += dPrevalence[iter]
countBASE64Bytes += dPrevalence[ord('+')] + dPrevalence[ord('/')] + dPrevalence[ord('=')]
entropy = 0.0
for iter in range(0x100):
if dPrevalence[iter] > 0:
prevalence = float(dPrevalence[iter]) / float(sumValues)
entropy += - prevalence * math.log(prevalence, 2)
countUniqueBytes += 1
return sumValues, entropy, countUniqueBytes, countNullByte, countControlBytes, countWhitespaceBytes, countPrintableBytes, countHighBytes, countHexadecimalBytes, countBASE64Bytes, averageConsecutiveByteDifference
def Unpack(format, data):
size = struct.calcsize(format)
result = list(struct.unpack(format, data[:size]))
result.append(data[size:])
return result
def InstantiateCOutput(options):
filenameOption = None
if options.output != '':
filenameOption = options.output
return cOutput(filenameOption)
def FindAll(data, sub):
result = []
start = 0
while True:
position = data.find(sub, start)
if position == -1:
return result
result.append(position)
start = position + 1
class cStruct(object):
def __init__(self, data):
self.data = data
self.originaldata = data
def Unpack(self, format):
formatsize = struct.calcsize(format)
if len(self.data) < formatsize:
raise Exception('Not enough data')
tounpack = self.data[:formatsize]
self.data = self.data[formatsize:]
return struct.unpack(format, tounpack)
def Truncate(self, length):
self.data = self.data[:length]
def GetBytes(self, length=None, peek=False):
if length == None:
length = len(self.data)
result = self.data[:length]
if not peek:
self.data = self.data[length:]
return result
#https://msdn.microsoft.com/en-us/library/windows/desktop/dd317756%28v=vs.85%29.aspx
dCodepages = {
37: 'IBM EBCDIC US-Canada',
437: 'OEM United States',
500: 'IBM EBCDIC International',
708: 'Arabic (ASMO 708)',
709: 'Arabic (ASMO-449+, BCON V4)',
710: 'Arabic - Transparent Arabic',
720: 'Arabic (Transparent ASMO); Arabic (DOS)',
737: 'OEM Greek (formerly 437G); Greek (DOS)',
775: 'OEM Baltic; Baltic (DOS)',
850: 'OEM Multilingual Latin 1; Western European (DOS)',
852: 'OEM Latin 2; Central European (DOS)',
855: 'OEM Cyrillic (primarily Russian)',
857: 'OEM Turkish; Turkish (DOS)',
858: 'OEM Multilingual Latin 1 + Euro symbol',
860: 'OEM Portuguese; Portuguese (DOS)',
861: 'OEM Icelandic; Icelandic (DOS)',
862: 'OEM Hebrew; Hebrew (DOS)',
863: 'OEM French Canadian; French Canadian (DOS)',
864: 'OEM Arabic; Arabic (864)',
865: 'OEM Nordic; Nordic (DOS)',
866: 'OEM Russian; Cyrillic (DOS)',
869: 'OEM Modern Greek; Greek, Modern (DOS)',
870: 'IBM EBCDIC Multilingual/ROECE (Latin 2); IBM EBCDIC Multilingual Latin 2',
874: 'ANSI/OEM Thai (ISO 8859-11); Thai (Windows)',
875: 'IBM EBCDIC Greek Modern',
932: 'ANSI/OEM Japanese; Japanese (Shift-JIS)',
936: 'ANSI/OEM Simplified Chinese (PRC, Singapore); Chinese Simplified (GB2312)',
949: 'ANSI/OEM Korean (Unified Hangul Code)',
950: 'ANSI/OEM Traditional Chinese (Taiwan; Hong Kong SAR, PRC); Chinese Traditional (Big5)',
1026: 'IBM EBCDIC Turkish (Latin 5)',
1047: 'IBM EBCDIC Latin 1/Open System',
1140: 'IBM EBCDIC US-Canada (037 + Euro symbol); IBM EBCDIC (US-Canada-Euro)',
1141: 'IBM EBCDIC Germany (20273 + Euro symbol); IBM EBCDIC (Germany-Euro)',
1142: 'IBM EBCDIC Denmark-Norway (20277 + Euro symbol); IBM EBCDIC (Denmark-Norway-Euro)',
1143: 'IBM EBCDIC Finland-Sweden (20278 + Euro symbol); IBM EBCDIC (Finland-Sweden-Euro)',
1144: 'IBM EBCDIC Italy (20280 + Euro symbol); IBM EBCDIC (Italy-Euro)',
1145: 'IBM EBCDIC Latin America-Spain (20284 + Euro symbol); IBM EBCDIC (Spain-Euro)',
1146: 'IBM EBCDIC United Kingdom (20285 + Euro symbol); IBM EBCDIC (UK-Euro)',
1147: 'IBM EBCDIC France (20297 + Euro symbol); IBM EBCDIC (France-Euro)',
1148: 'IBM EBCDIC International (500 + Euro symbol); IBM EBCDIC (International-Euro)',
1149: 'IBM EBCDIC Icelandic (20871 + Euro symbol); IBM EBCDIC (Icelandic-Euro)',
1200: 'Unicode UTF-16, little endian byte order (BMP of ISO 10646); available only to managed applications',
1201: 'Unicode UTF-16, big endian byte order; available only to managed applications',
1250: 'ANSI Central European; Central European (Windows)',
1251: 'ANSI Cyrillic; Cyrillic (Windows)',
1252: 'ANSI Latin 1; Western European (Windows)',
1253: 'ANSI Greek; Greek (Windows)',
1254: 'ANSI Turkish; Turkish (Windows)',
1255: 'ANSI Hebrew; Hebrew (Windows)',
1256: 'ANSI Arabic; Arabic (Windows)',
1257: 'ANSI Baltic; Baltic (Windows)',
1258: 'ANSI/OEM Vietnamese; Vietnamese (Windows)',
1361: 'Korean (Johab)',
10000: 'MAC Roman; Western European (Mac)',
10001: 'Japanese (Mac)',
10002: 'MAC Traditional Chinese (Big5); Chinese Traditional (Mac)',
10003: 'Korean (Mac)',
10004: 'Arabic (Mac)',
10005: 'Hebrew (Mac)',
10006: 'Greek (Mac)',
10007: 'Cyrillic (Mac)',
10008: 'MAC Simplified Chinese (GB 2312); Chinese Simplified (Mac)',
10010: 'Romanian (Mac)',
10017: 'Ukrainian (Mac)',
10021: 'Thai (Mac)',
10029: 'MAC Latin 2; Central European (Mac)',
10079: 'Icelandic (Mac)',
10081: 'Turkish (Mac)',
10082: 'Croatian (Mac)',
12000: 'Unicode UTF-32, little endian byte order; available only to managed applications',
12001: 'Unicode UTF-32, big endian byte order; available only to managed applications',
20000: 'CNS Taiwan; Chinese Traditional (CNS)',
20001: 'TCA Taiwan',
20002: 'Eten Taiwan; Chinese Traditional (Eten)',
20003: 'IBM5550 Taiwan',
20004: 'TeleText Taiwan',
20005: 'Wang Taiwan',
20105: 'IA5 (IRV International Alphabet No. 5, 7-bit); Western European (IA5)',
20106: 'IA5 German (7-bit)',
20107: 'IA5 Swedish (7-bit)',
20108: 'IA5 Norwegian (7-bit)',
20127: 'US-ASCII (7-bit)',
20261: 'T.61',
20269: 'ISO 6937 Non-Spacing Accent',
20273: 'IBM EBCDIC Germany',
20277: 'IBM EBCDIC Denmark-Norway',
20278: 'IBM EBCDIC Finland-Sweden',
20280: 'IBM EBCDIC Italy',
20284: 'IBM EBCDIC Latin America-Spain',
20285: 'IBM EBCDIC United Kingdom',
20290: 'IBM EBCDIC Japanese Katakana Extended',
20297: 'IBM EBCDIC France',
20420: 'IBM EBCDIC Arabic',
20423: 'IBM EBCDIC Greek',
20424: 'IBM EBCDIC Hebrew',
20833: 'IBM EBCDIC Korean Extended',
20838: 'IBM EBCDIC Thai',
20866: 'Russian (KOI8-R); Cyrillic (KOI8-R)',
20871: 'IBM EBCDIC Icelandic',
20880: 'IBM EBCDIC Cyrillic Russian',
20905: 'IBM EBCDIC Turkish',
20924: 'IBM EBCDIC Latin 1/Open System (1047 + Euro symbol)',
20932: 'Japanese (JIS 0208-1990 and 0212-1990)',
20936: 'Simplified Chinese (GB2312); Chinese Simplified (GB2312-80)',
20949: 'Korean Wansung',
21025: 'IBM EBCDIC Cyrillic Serbian-Bulgarian',
21027: '(deprecated)',
21866: 'Ukrainian (KOI8-U); Cyrillic (KOI8-U)',
28591: 'ISO 8859-1 Latin 1; Western European (ISO)',
28592: 'ISO 8859-2 Central European; Central European (ISO)',
28593: 'ISO 8859-3 Latin 3',
28594: 'ISO 8859-4 Baltic',
28595: 'ISO 8859-5 Cyrillic',
28596: 'ISO 8859-6 Arabic',
28597: 'ISO 8859-7 Greek',
28598: 'ISO 8859-8 Hebrew; Hebrew (ISO-Visual)',
28599: 'ISO 8859-9 Turkish',
28603: 'ISO 8859-13 Estonian',
28605: 'ISO 8859-15 Latin 9',
29001: 'Europa 3',
38598: 'ISO 8859-8 Hebrew; Hebrew (ISO-Logical)',
50220: 'ISO 2022 Japanese with no halfwidth Katakana; Japanese (JIS)',
50221: 'ISO 2022 Japanese with halfwidth Katakana; Japanese (JIS-Allow 1 byte Kana)',
50222: 'ISO 2022 Japanese JIS X 0201-1989; Japanese (JIS-Allow 1 byte Kana - SO/SI)',
50225: 'ISO 2022 Korean',
50227: 'ISO 2022 Simplified Chinese; Chinese Simplified (ISO 2022)',
50229: 'ISO 2022 Traditional Chinese',
50930: 'EBCDIC Japanese (Katakana) Extended',
50931: 'EBCDIC US-Canada and Japanese',
50933: 'EBCDIC Korean Extended and Korean',
50935: 'EBCDIC Simplified Chinese Extended and Simplified Chinese',
50936: 'EBCDIC Simplified Chinese',
50937: 'EBCDIC US-Canada and Traditional Chinese',
50939: 'EBCDIC Japanese (Latin) Extended and Japanese',
51932: 'EUC Japanese',
51936: 'EUC Simplified Chinese; Chinese Simplified (EUC)',
51949: 'EUC Korean',
51950: 'EUC Traditional Chinese',
52936: 'HZ-GB2312 Simplified Chinese; Chinese Simplified (HZ)',
54936: 'Windows XP and later: GB18030 Simplified Chinese (4 byte); Chinese Simplified (GB18030)',
57002: 'ISCII Devanagari',
57003: 'ISCII Bengali',
57004: 'ISCII Tamil',
57005: 'ISCII Telugu',
57006: 'ISCII Assamese',
57007: 'ISCII Oriya',
57008: 'ISCII Kannada',
57009: 'ISCII Malayalam',
57010: 'ISCII Gujarati',
57011: 'ISCII Punjabi',
65000: 'Unicode (UTF-7)',
65001: 'Unicode (UTF-8)'
}
def FindAll(data, sub):
result = []
start = 0
while True:
position = data.find(sub, start)
if position == -1:
return result
result.append(position)
start = position + 1
def ExtractEncryptedCallback(data):
length = struct.unpack('>I', data[:4])[0]
ciphertext = data[4:4 + length]
if len(ciphertext) != length:
return None
else:
return ciphertext
def ProcessBinaryFile(filename, content, cutexpression, flag, oOutput, oLogfile, options, oParserFlag):
if content == None:
try:
oBinaryFile = cBinaryFile(filename, C2BIP3(options.password), options.noextraction, options.literalfilenames)
except:
oLogfile.LineError('Opening file %s %s' % (filename, repr(sys.exc_info()[1])))
return
oLogfile.Line('Success', 'Opening file %s' % filename)
try:
data = oBinaryFile.read()
except:
oLogfile.LineError('Reading file %s %s' % (filename, repr(sys.exc_info()[1])))
return
data = CutData(data, cutexpression)[0]
oBinaryFile.close()
else:
data = content
(flagoptions, flagargs) = oParserFlag.parse_args(flag.split(' '))
try:
# ----- Put your data processing code here -----
oOutput.Line('File: %s%s' % (filename, IFF(oBinaryFile.extracted, ' (extracted)', '')))
payload = None
if options.task != '':
payload = binascii.a2b_hex(options.task)
elif options.callback != '':
encryptedCallbackData = ExtractEncryptedCallback(binascii.a2b_hex(options.callback))
if encryptedCallbackData == None:
oOutput.Line("This doesn't look like callback data (or it is incomplete)")
return
payload = encryptedCallbackData
if payload == None:
for position in FindAll(data, b'\x00\x00\xBE\xEF\x00'):
oOutput.Line('Position: 0x%08x' % position)
oStruct = cStruct(data[position:position+1000])
beef = oStruct.Unpack('>I')[0]
oOutput.Line('Header: %08x' % beef)
datasize = oStruct.Unpack('>I')[0]
oOutput.Line('Datasize: %08x' % datasize)
oStruct.Truncate(datasize)
rawkey = oStruct.GetBytes(16)
oOutput.Line('Raw key: %s' % binascii.b2a_hex(rawkey).decode())
sha256hex = hashlib.sha256(rawkey).hexdigest()
aeskey = sha256hex[:32]
hmackey = sha256hex[32:]
oOutput.Line(' aeskey: %s' % aeskey)
oOutput.Line(' hmackey: %s' % hmackey)
charset, charset_oem = oStruct.Unpack('<HH')
oOutput.Line('charset: %04x %s' % (charset, dCodepages.get(charset, '')))
oOutput.Line('charset_oem: %04x %s' % (charset_oem, dCodepages.get(charset_oem, '')))
peek = oStruct.GetBytes(peek=True)
if not re.match(b'[0-9]+\t[0-9]+\t[0-9]', peek):
bid, pid, port, flags = oStruct.Unpack('>IIHB')
oOutput.Line('bid: %04x %d' % (bid, bid))
oOutput.Line('pid: %04x %d' % (pid, pid))
oOutput.Line('port: %d' % port)
oOutput.Line('flags: %02x' % flags)
remainder = oStruct.GetBytes()
for field in remainder.split(b'\t'):
oOutput.Line('Field: %s' % field)
oOutput.Line('')
oOutput.Line('AES key:')
for position in FindAll(data, binascii.a2b_hex(aeskey)):
oOutput.Line(' Position: 0x%08x' % position)
oOutput.Line('')
oOutput.Line('HMAC key:')
for position in FindAll(data, binascii.a2b_hex(hmackey)):
oOutput.Line(' Position: 0x%08x' % position)
oOutput.Line('')
else:
oOutput.Line('Searching for AES and HMAC keys')
encryptedCallbackData = payload
encryptedData = encryptedCallbackData[:-16]
hmacSignatureMessage = encryptedCallbackData[-16:]
aeskey = None
hmackey = None
hmacaeskey = None
fullsearch = options.fullsearch
searchPositions = FindAll(data, b'sha256\x00')
if searchPositions == []:
fullsearch = True
if fullsearch:
searchPositions = [0]
starttime = time.time()
progressCounter = 0
for searchPosition in searchPositions:
searchRange = len(data)
if searchPosition != 0:
oOutput.Line('Searching after sha256\\x00 string (0x%x)' % searchPosition)
searchRange = 0x500000
for iter in range(searchRange):
if fullsearch and int((time.time() - starttime) / 60) > progressCounter:
progressCounter += 1
oOutput.Line('Progress: %d%% in %d seconds' % (float(iter) / float(searchRange) * 100.0, int(time.time() - starttime)))
iter = iter + searchPosition
key = data[iter:iter + 16]
if len(key) < 16:
break
if key == b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00':
continue
hmacsSgnatureCalculated = hmac.new(key, encryptedData, hashlib.sha256).digest()[:16]
if hmacSignatureMessage == hmacsSgnatureCalculated:
oOutput.Line('HMAC key position: 0x%08x' % iter)
oOutput.Line('HMAC Key: %s' % binascii.b2a_hex(key).decode())
hmackey = key
cypher = Crypto.Cipher.AES.new(key, Crypto.Cipher.AES.MODE_CBC, CS_FIXED_IV)
decryptedData = cypher.decrypt(encryptedData)
callbackid = struct.unpack('>I', decryptedData[8:12])[0]
if callbackid < 256:
oOutput.Line('AES key position: 0x%08x' % iter)
oOutput.Line('AES Key: %s' % binascii.b2a_hex(key).decode())
aeskey = key
if hmackey != None and aeskey != None and hmacaeskey == None:
hmacaeskey = '%s:%s' % (binascii.b2a_hex(hmackey).decode(), binascii.b2a_hex(aeskey).decode())
oOutput.Line('SHA256 raw key: %s' % hmacaeskey)
oOutput.Line('Searching for raw key')
if hmackey != None and aeskey != None:
for iter in range(searchRange):
iter = iter + searchPosition
key = data[iter:iter + 16]
if len(key) < 16:
break
if key == b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00':
continue
sha256 = hashlib.sha256(key).digest()
if aeskey == sha256[:16] and hmackey == sha256[16:]:
oOutput.Line('Raw key position: 0x%08x' % iter)
oOutput.Line('Raw Key: %s' % binascii.b2a_hex(key).decode())
# ----------------------------------------------
except:
oLogfile.LineError('Processing file %s %s' % (filename, repr(sys.exc_info()[1])))
if not options.ignoreprocessingerrors:
raise
def ProcessBinaryFiles(filenames, oLogfile, options, oParserFlag):
oOutput = InstantiateCOutput(options)
index = 0
if options.jsoninput:
items = CheckJSON(sys.stdin.read())
if items == None:
return
for item in items:
oOutput.Filename(item['name'], index, len(items))
index += 1
ProcessBinaryFile(item['name'], item['content'], '', '', oOutput, oLogfile, options, oParserFlag)
else:
for filename, cutexpression, flag in filenames:
oOutput.Filename(filename, index, len(filenames))
index += 1
ProcessBinaryFile(filename, None, cutexpression, flag, oOutput, oLogfile, options, oParserFlag)
def Main():
moredesc = '''
Source code put in the public domain by Didier Stevens, no Copyright
Use at your own risk
https://DidierStevens.com'''
oParserFlag = optparse.OptionParser(usage='\nFlag arguments start with #f#:')
oParserFlag.add_option('-l', '--length', action='store_true', default=False, help='Print length of files')
oParser = optparse.OptionParser(usage='usage: %prog [options] [[@]file]\n' + __description__ + moredesc, version='%prog ' + __version__)
oParser.add_option('-m', '--man', action='store_true', default=False, help='Print manual')
oParser.add_option('-t', '--task', type=str, default='', help='Encrypted task data (hexadecimal)')
oParser.add_option('-c', '--callback', type=str, default='', help='Encrypted callback data (hexadecimal)')
oParser.add_option('-f', '--fullsearch', action='store_true', default=False, help='Search the complete memory dump (in combination with options -t and -c)')
oParser.add_option('-o', '--output', type=str, default='', help='Output to file (# supported)')
oParser.add_option('-p', '--password', default='infected', help='The ZIP password to be used (default infected)')
oParser.add_option('-n', '--noextraction', action='store_true', default=False, help='Do not extract from archive file')
oParser.add_option('-l', '--literalfilenames', action='store_true', default=False, help='Do not interpret filenames')
oParser.add_option('-r', '--recursedir', action='store_true', default=False, help='Recurse directories (wildcards and here files (@...) allowed)')
oParser.add_option('--checkfilenames', action='store_true', default=False, help='Perform check if files exist prior to file processing')
oParser.add_option('-j', '--jsoninput', action='store_true', default=False, help='Consume JSON from stdin')
oParser.add_option('--logfile', type=str, default='', help='Create logfile with given keyword')
oParser.add_option('--logcomment', type=str, default='', help='A string with comments to be included in the log file')
oParser.add_option('--ignoreprocessingerrors', action='store_true', default=False, help='Ignore errors during file processing')
(options, args) = oParser.parse_args()
if options.man:
oParser.print_help()
oParserFlag.print_help()
PrintManual()
return
if len(args) != 0 and options.jsoninput:
print('Error: option -j can not be used with files')
return
oLogfile = cLogfile(options.logfile, options.logcomment)
oExpandFilenameArguments = cExpandFilenameArguments(args, options.literalfilenames, options.recursedir, options.checkfilenames, '#c#', '#f#')
oLogfile.Line('FilesCount', str(len(oExpandFilenameArguments.Filenames())))
oLogfile.Line('Files', repr(oExpandFilenameArguments.Filenames()))
if oExpandFilenameArguments.warning:
PrintError('\nWarning:')
PrintError(oExpandFilenameArguments.message)
oLogfile.Line('Warning', repr(oExpandFilenameArguments.message))
ProcessBinaryFiles(oExpandFilenameArguments.Filenames(), oLogfile, options, oParserFlag)
if oLogfile.errors > 0:
PrintError('Number of errors: %d' % oLogfile.errors)
oLogfile.Close()
if __name__ == '__main__':
Main()
| [
"[email protected]"
] | |
e8be4f8a2eb4e59320a4740f22abbba70ac66c02 | 75c4f5e6f840a14fed3e5d3e57012abf6d0e77db | /Pacote Dawload/Projeto progamas Python/ex1007 Diferença.py | 5234571cda69706fd7630b149bc7a6be7226d9e4 | [
"MIT"
] | permissive | wagnersistemalima/Exercicios-Python-URI-Online-Judge-Problems---Contests | fc378abca0264ceb7fa5feebc57df17d1372953a | d839a344b899c08f4199ff1ae22dd6ee931df6a2 | refs/heads/master | 2022-07-18T03:52:44.941510 | 2020-05-20T01:01:38 | 2020-05-20T01:01:38 | 264,508,452 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | #Ex1007 Diferença 09/04/2020
valor_a = int(input())
valor_b = int(input())
valor_c = int(input())
valor_d = int(input())
diferenca = valor_a * valor_b - valor_c * valor_d
print('DIFERENCA = {}'.format(diferenca)) | [
"[email protected]"
] | |
69848d8d6b615309e741c0e5995c99da100e36b3 | 45e376ae66b78b17788b1d3575b334b2cb1d0b1c | /checkov/kubernetes/checks/resource/k8s/KubeControllerManagerServiceAccountCredentials.py | 2000075d5c1086b60c6802f3d5acd29a8ffa723f | [
"Apache-2.0"
] | permissive | bridgecrewio/checkov | aeb8febed2ed90e61d5755f8f9d80b125362644d | e64cbd27ffb6f09c2c9f081b45b7a821a3aa1a4d | refs/heads/main | 2023-08-31T06:57:21.990147 | 2023-08-30T23:01:47 | 2023-08-30T23:01:47 | 224,386,599 | 5,929 | 1,056 | Apache-2.0 | 2023-09-14T20:10:23 | 2019-11-27T08:55:14 | Python | UTF-8 | Python | false | false | 1,103 | py | from typing import Any, Dict
from checkov.common.models.enums import CheckResult
from checkov.kubernetes.checks.resource.base_container_check import BaseK8sContainerCheck
class KubeControllerManagerServiceAccountCredentials(BaseK8sContainerCheck):
def __init__(self) -> None:
id = "CKV_K8S_108"
name = "Ensure that the --use-service-account-credentials argument is set to true"
super().__init__(name=name, id=id)
def scan_container_conf(self, metadata: Dict[str, Any], conf: Dict[str, Any]) -> CheckResult:
self.evaluated_container_keys = ["command"]
if conf.get("command"):
if "kube-controller-manager" in conf["command"]:
for command in conf["command"]:
if command.startswith("--use-service-account-credentials"):
value = command.split("=")[1]
if value == "true":
return CheckResult.PASSED
return CheckResult.FAILED
return CheckResult.PASSED
check = KubeControllerManagerServiceAccountCredentials()
| [
"[email protected]"
] | |
b7495338c27e7909104e693f683854f1eacbb5ce | 7cb3e5e16fd93e6f8a1c07c211cee16dc248ef5d | /venv/lib/python3.6/site-packages/django/core/mail/backends/filebased.py | 1037f4b3d03f833ad4f62d0f4ab687ba30d55d27 | [] | no_license | JustynaJBroniszewska/Blog | d74a8cb19fa037b834f5218522ff1397eb60d370 | cfd8efbcce3e23c7ebeea82b2e732de63c663ac8 | refs/heads/master | 2022-11-03T22:01:07.165652 | 2020-06-05T14:25:01 | 2020-06-05T14:25:01 | 266,791,768 | 0 | 0 | null | 2020-06-05T14:25:02 | 2020-05-25T13:52:19 | Python | UTF-8 | Python | false | false | 2,802 | py | """Email backend that writes messages to a file."""
import datetime
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.mail.backends.console import EmailBackend as ConsoleEmailBackend
class EmailBackend(ConsoleEmailBackend):
def __init__(self, *args, file_path=None, **kwargs):
self._fname = None
if file_path is not None:
self.file_path = file_path
else:
self.file_path = getattr(settings, "EMAIL_FILE_PATH", None)
# Make sure self.file_path is a string.
if not isinstance(self.file_path, str):
raise ImproperlyConfigured(
"Path for saving emails is invalid: %r" % self.file_path
)
self.file_path = os.path.abspath(self.file_path)
# Make sure that self.file_path is a directory if it exists.
if os.path.exists(self.file_path) and not os.path.isdir(self.file_path):
raise ImproperlyConfigured(
"Path for saving email messages exists, but is not a directory: %s"
% self.file_path
)
# Try to create it, if it not exists.
elif not os.path.exists(self.file_path):
try:
os.makedirs(self.file_path)
except OSError as err:
raise ImproperlyConfigured(
"Could not create directory for saving email messages: %s (%s)"
% (self.file_path, err)
)
# Make sure that self.file_path is writable.
if not os.access(self.file_path, os.W_OK):
raise ImproperlyConfigured(
"Could not write to directory: %s" % self.file_path
)
# Finally, call super().
# Since we're using the console-based backend as a base,
# force the stream to be None, so we don't default to stdout
kwargs["stream"] = None
super().__init__(*args, **kwargs)
def write_message(self, message):
self.stream.write(message.message().as_bytes() + b"\n")
self.stream.write(b"-" * 79)
self.stream.write(b"\n")
def _get_filename(self):
"""Return a unique file name."""
if self._fname is None:
timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
fname = "%s-%s.log" % (timestamp, abs(id(self)))
self._fname = os.path.join(self.file_path, fname)
return self._fname
def open(self):
if self.stream is None:
self.stream = open(self._get_filename(), "ab")
return True
return False
def close(self):
try:
if self.stream is not None:
self.stream.close()
finally:
self.stream = None
| [
"[email protected]"
] | |
10e27cc0f1cdf158c62d57f1fcd0b60c721571eb | b049a961f100444dde14599bab06a0a4224d869b | /sdk/python/pulumi_azure_native/aadiam/v20200701preview/__init__.py | efff6040c30972e572a699b04d5d0c8ba770037e | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | pulumi/pulumi-azure-native | b390c88beef8381f9a71ab2bed5571e0dd848e65 | 4c499abe17ec6696ce28477dde1157372896364e | refs/heads/master | 2023-08-30T08:19:41.564780 | 2023-08-28T19:29:04 | 2023-08-28T19:29:04 | 172,386,632 | 107 | 29 | Apache-2.0 | 2023-09-14T13:17:00 | 2019-02-24T20:30:21 | Python | UTF-8 | Python | false | false | 324 | py | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from ... import _utilities
import typing
# Export this package's modules as members:
from .azure_ad_metric import *
from .get_azure_ad_metric import *
from . import outputs
| [
"[email protected]"
] | |
74d55dec6154aa9dbeaa63142a728a59b4562950 | 64b7ce2f4d2d24d24f7e169a58fe69257078984c | /buffpy/models/__init__.py | 21ba4b6ff01041e8a5637d37b02f5e88bf9b8199 | [
"MIT"
] | permissive | holdenk/buffer-python | 055d444c385ea6e6ee285bb348b0c19ebda304c1 | 001a3bffda8736f32a8ffae3d26af9b79b2a3680 | refs/heads/master | 2020-03-29T20:50:58.408856 | 2018-09-26T19:25:51 | 2018-09-26T19:25:51 | 150,334,500 | 1 | 0 | MIT | 2018-09-25T22:02:00 | 2018-09-25T22:01:59 | null | UTF-8 | Python | false | false | 75 | py | from .link import Link
from .profile import Profile
from .user import User
| [
"[email protected]"
] | |
c63c1933946045abfeae7d4d5f4503aeccadd01d | 304afb86737a108609becf890696ccf54a437756 | /bolg/flow/apps.py | 515475afbaa9aed4134507f96d7bfa9db324ea60 | [] | no_license | linwenjunid/myprojects | ea3d6ffd40b0fb1d41f92064ee54f7de938ade23 | e250a4c500bf1986c0e6e2f90c7f864ccc5bfaa0 | refs/heads/master | 2020-04-17T06:46:35.236618 | 2019-04-23T08:22:17 | 2019-04-23T08:22:17 | 166,339,495 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | from django.apps import AppConfig
class FlowConfig(AppConfig):
name = 'flow'
verbose_name = '流程设计'
| [
"[email protected]"
] | |
3925dc3b8bf5f9bfef206788ab3039aeb80b6a9c | 6bf4e54f8ae95582b73bb969ba44069c64e87651 | /kdhi/main_site/migrations/0016_rok_individual_name_true.py | 87b886ada5621d6df781553e0e2727c809ce4525 | [] | no_license | speedycowenator/kdhi_migration | 4bc983c4656a2a87cb056461bfb4219e38da1a85 | 422b2e3f142a30c81f428fb8eaa813e4a71d56fc | refs/heads/master | 2022-11-14T13:27:51.520697 | 2020-07-02T19:31:12 | 2020-07-02T19:31:12 | 246,138,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | # Generated by Django 2.2.5 on 2020-03-16 03:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_site', '0015_auto_20200315_2328'),
]
operations = [
migrations.AddField(
model_name='rok_individual',
name='name_true',
field=models.CharField(default='BLANK', max_length=200),
preserve_default=False,
),
]
| [
"[email protected]"
] | |
6fff7a33ec4993a71006924c414459b7b912ac5e | 32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd | /benchmark/goodweather/testcase/firstcases/testcase3_029.py | 1615709d896778e9e889f0338b549707f4674cfa | [] | no_license | Prefest2018/Prefest | c374d0441d714fb90fca40226fe2875b41cf37fc | ac236987512889e822ea6686c5d2e5b66b295648 | refs/heads/master | 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,725 | py | #coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'org.asdtm.goodweather',
'appActivity' : 'org.asdtm.goodweather.MainActivity',
'resetKeyboard' : True,
'androidCoverage' : 'org.asdtm.goodweather/org.asdtm.goodweather.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase029
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememt(driver, "new UiSelector().resourceId(\"org.asdtm.goodweather:id/main_menu_refresh\").className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.asdtm.goodweather:id/main_menu_detect_location\").className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
driver.press_keycode(4)
driver.press_keycode(4)
element = getElememtBack(driver, "new UiSelector().text(\"Cancel\")", "new UiSelector().className(\"android.widget.Button\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.asdtm.goodweather:id/fab\").className(\"android.widget.ImageButton\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.asdtm.goodweather:id/main_menu_refresh\").className(\"android.widget.TextView\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.asdtm.goodweather:id/main_menu_refresh\").className(\"android.widget.TextView\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.asdtm.goodweather:id/main_menu_search_city\").className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.asdtm.goodweather:id/search_close_btn\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"NL\")", "new UiSelector().className(\"android.widget.TextView\").instance(1)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.asdtm.goodweather:id/main_menu_search_city\").className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"3_029\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'org.asdtm.goodweather'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage)
| [
"[email protected]"
] | |
aa91781b96d7642e15834414e4aa606fa0aac250 | a372a816373d63ad626a9947077e137eac2e6daf | /test/leetcode/test_SnapshotArray.py | f0c14e8d86207ff54939ccda9f60c2ea37835f38 | [] | no_license | DmitryPukhov/pyquiz | 07d33854a0e04cf750b925d2c399dac8a1b35363 | 8ae84f276cd07ffdb9b742569a5e32809ecc6b29 | refs/heads/master | 2021-06-13T14:28:51.255385 | 2021-06-13T08:19:36 | 2021-06-13T08:19:36 | 199,842,913 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,469 | py | from unittest import TestCase
from pyquiz.leetcode.SnapshotArray import SnapshotArray
class TestSnapshotArray(TestCase):
def test_get_closest_snap_id__123_1(self):
sa = SnapshotArray(1)
self.assertEqual(1, sa.get_closest_snap_id([1, 2, 3], 1))
def test_get_closest_snap_id__123_2(self):
sa = SnapshotArray(1)
self.assertEqual(2, sa.get_closest_snap_id([1, 2, 3], 2))
def test_get_closest_snap_id__123_3(self):
sa = SnapshotArray(1)
self.assertEqual(3, sa.get_closest_snap_id([1, 2, 3], 3))
def test_get_closest_snap_id__1_1(self):
sa = SnapshotArray(1)
self.assertEqual(1, sa.get_closest_snap_id([1], 1))
def test_get_closest_snap_id__13_2(self):
sa = SnapshotArray(1)
self.assertEqual(1, sa.get_closest_snap_id([1, 3], 2))
def test_get_closest_snap_id__13_4(self):
sa = SnapshotArray(1)
self.assertEqual(3, sa.get_closest_snap_id([1, 3], 4))
def test_get_closest_snap_id__23_1(self):
sa = SnapshotArray(1)
self.assertEqual(None, sa.get_closest_snap_id([2, 3], 1))
def test_get_closest_snap_id__12_1(self):
sa = SnapshotArray(1)
self.assertEqual(1, sa.get_closest_snap_id([1, 2], 1))
def test_get_closest_snap_id__12_2(self):
sa = SnapshotArray(1)
self.assertEqual(2, sa.get_closest_snap_id([1, 2], 2))
def test_example1(self):
"""
Example 1:
Input: ["SnapshotArray","set","snap","set","get"]
[[3],[0,5],[],[0,6],[0,0]]
Output: [null,null,0,null,5]
Explanation:
SnapshotArray snapshotArr = new SnapshotArray(3); // set the length to be 3
snapshotArr.set(0,5); // Set array[0] = 5
snapshotArr.snap(); // Take a snapshot, return snap_id = 0
snapshotArr.set(0,6);
snapshotArr.get(0,0); // Get the value of array[0] with snap_id = 0, return 5
:return:
"""
# SnapshotArray snapshotArr = new SnapshotArray(3); // set the length to be 3
sa = SnapshotArray(3)
# Set array[0] = 5
sa.set(0, 5)
# Take a snapshot, return snap_id = 0
snapid = sa.snap()
self.assertEqual(0, snapid)
sa.set(0, 6)
# Get the value of array[0] with snap_id = 0, return 5
out = sa.get(0, 0)
self.assertEqual(5, out)
def test_case2(self):
# SnapshotArray snapshotArr = new SnapshotArray(3); // set the length to be 3
sa = SnapshotArray(3)
out = sa.get(2, 0)
self.assertEqual(0, out)
# Set array[0] = 5
sa.set(0, 5)
# Take a snapshot, return snap_id = 0
snapid = sa.snap()
self.assertEqual(0, snapid)
sa.set(0, 6)
snapid = sa.snap()
self.assertEqual(1, snapid)
out = sa.get(0, 0)
self.assertEqual(5, out)
out = sa.get(0, 1)
self.assertEqual(6, out)
def test_case3(self):
"""
["SnapshotArray","set","snap","snap","snap","get","snap","snap","get"]
[[1],[0,15],[],[],[],[0,2],[],[],[0,0]]
"""
sa = SnapshotArray(1)
sa.set(0, 15)
sa.snap()
sa.snap()
sa.snap()
out = sa.get(0, 1)
out = sa.get(0, 2)
self.assertEqual(15, out)
sa.snap()
sa.snap()
out = sa.get(0, 0)
self.assertEqual(out, 15)
def test_case4(self):
"""
["SnapshotArray","snap","get","get","set","get","set","get","set"]
[[2],[],[1,0],[0,0],[1,8],[1,0],[0,20],[0,0],[0,7]]
[null,0,0,0,null,8,null,20,null]
Expected
[null,0,0,0,null,0,null,0,null]
"""
sa = SnapshotArray(2)
self.assertEqual(0, sa.snap())
self.assertEqual(0, sa.get(1, 0))
self.assertEqual(0, sa.get(0, 0))
sa.set(1, 8)
self.assertEqual(0, sa.get(1, 0))
sa.set(0, 20)
self.assertEqual(0, sa.get(0, 0))
sa.set(0, 7)
def test_case5(self):
"""
["SnapshotArray","set","set","snap","get","set","snap","set","set","get","get"]
[[3],[1,18],[1,4],[],[0,0],[0,20],[],[0,2],[1,1],[1,1],[1,0]]
bad: [null,null,null,0,0,null,1,null,null,0,4]
Expected: [null,null,null,0,0,null,1,null,null,4,4]
"""
sa = SnapshotArray(3)
# ["SnapshotArray","set","set","snap","get", "set","snap","set","set","get","get"]
# [[3],[1,18],[1,4],[],[0,0], [0,20],[], [0,2],[1,1], [1,1],[1,0]]
sa.set(1, 18)
sa.set(1, 4)
self.assertEqual(0, sa.snap())
sa.set(0, 20)
sa.snap()
sa.set(0, 2)
sa.set(1, 1)
self.assertEqual(4, sa.get(1, 1))
self.assertEqual(4, sa.get(1, 0))
def test_case6(self):
"""
["SnapshotArray","set","snap","set","get","snap", "get","get","set","set", "snap","get","set","snap","snap","get","snap","get"]
[[3],[1,5],[],[1,6],[0,0],[], [0,0],[0,0],[0,11],[1,16], [],[0,1],[2,12],[],[],[0,4],[],[1,1]]
bad Output [null,null,0,null,0,1,0,0,null,null, 2,0,null,3,4,11,5,0]
Expected [null,null,0,null,0,1,0,0,null,null, 2,0,null,3,4,11,5,6]
"""
sa = SnapshotArray(3)
sa.set(1, 5)
sa.snap()
sa.set(1, 6)
self.assertEqual(0, sa.get(0, 0))
sa.snap()
self.assertEqual(0, sa.get(0, 0))
self.assertEqual(0, sa.get(0, 0))
sa.set(0, 11)
sa.set(1, 16)
def test_case7(self):
"""
["SnapshotArray","snap","snap","set","snap","get","set","get","snap","get"]
[[1],[],[],[0,4],[],[0,1],[0,12],[0,1],[],[0,3]]
Output
[null,0,1,null,2,0,null,0,3,0]
Expected
[null,0,1,null,2,0,null,0,3,12]
"""
sa = SnapshotArray(1)
sa.snap()
sa.snap()
sa.set(0, 4)
sa.snap()
self.assertEqual(0, sa.get(0, 1))
sa.snap()
self.assertEqual(0, sa.get(0, 0))
self.assertEqual(0, sa.get(0, 0))
sa.set(0, 11)
sa.set(1, 16)
def test_case7(self):
sa = SnapshotArray(1)
sa.snap()
sa.snap()
sa.set(0,4)
sa.snap()
sa.get(0,1)
sa.get(0,12)
sa.set(0,1)
sa.snap()
sa.get(0,3)
#
#
# ["SnapshotArray","snap","snap","set","snap","get","set","get","snap","get"]
# [[1],[],[],[0,4],[],[0,1],[0,12],[0,1],[],[0,3]]
| [
"[email protected]"
] | |
b0bc199d141c65c18eb061d68f55044ec9df1e0d | a606893da1e354c7c617d0c9247b23118be2813a | /二叉树/1530.py | c667d4d4f62c56d93cb9d7f26420def687e14e1a | [] | no_license | lindo-zy/leetcode | 4ce6cb9ded7eeea0a6953b6d8152b5a9657965da | f4277c11e620ddd748c2a2f3d9f5f05ee58e5716 | refs/heads/master | 2023-07-22T06:19:00.589026 | 2023-07-16T12:35:14 | 2023-07-16T12:35:14 | 229,958,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,041 | py | #!/usr/bin/python3
# -*- coding:utf-8 -*-
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def countPairs(self, root: TreeNode) -> int:
res = []
def dfs(root, path, counter):
if root:
if counter <= 0:
res.append(path)
return
if not root.left and not root.right:
res.append(path)
return
else:
dfs(root.left, path + [root.val], counter - root.val)
dfs(root.right, path + [root.val], counter - root.val)
dfs(root, [], 4)
return res
if __name__ == '__main__':
s = Solution()
# root = [1, None, 2, None, 3, None, 4, None, 5,]
root = [1, 2, 3, 4, None, None, 5]
from gen_tree import generate_tree
tree = generate_tree(root)
print(s.countPairs(tree))
| [
"[email protected]"
] | |
38829a9e4b5166f3f94813441cc6e86222184a9a | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/validators/densitymapbox/_lon.py | 01f0a45fe1ed12a0960dfc365a231700a0e093d3 | [
"MIT"
] | permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 393 | py | import _plotly_utils.basevalidators
class LonValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="lon", parent_name="densitymapbox", **kwargs):
super(LonValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs
)
| [
"[email protected]"
] | |
ca949b6bf095d25adb67f5f050bacca373a28d8f | a479a5773fd5607f96c3b84fed57733fe39c3dbb | /napalm_yang/models/openconfig/network_instances/network_instance/mpls/signaling_protocols/rsvp_te/sessions/session/record_route_objects/__init__.py | 14f5888b37abb8ac8011dc5f3e0f91acabbaf5dd | [
"Apache-2.0"
] | permissive | napalm-automation/napalm-yang | 839c711e9294745534f5fbbe115e0100b645dbca | 9148e015b086ebe311c07deb92e168ea36fd7771 | refs/heads/develop | 2021-01-11T07:17:20.226734 | 2019-05-15T08:43:03 | 2019-05-15T08:43:03 | 69,226,025 | 65 | 64 | Apache-2.0 | 2019-05-15T08:43:24 | 2016-09-26T07:48:42 | Python | UTF-8 | Python | false | false | 15,544 | py | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import record_route_object
class record_route_objects(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/mpls/signaling-protocols/rsvp-te/sessions/session/record-route-objects. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Enclosing container for MPLS RRO objects associated with the
traffic engineered tunnel.
"""
__slots__ = ("_path_helper", "_extmethods", "__record_route_object")
_yang_name = "record-route-objects"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__record_route_object = YANGDynClass(
base=YANGListType(
"index",
record_route_object.record_route_object,
yang_name="record-route-object",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="index",
extensions=None,
),
is_container="list",
yang_name="record-route-object",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"mpls",
"signaling-protocols",
"rsvp-te",
"sessions",
"session",
"record-route-objects",
]
def _get_record_route_object(self):
"""
Getter method for record_route_object, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/sessions/session/record_route_objects/record_route_object (list)
YANG Description: Read-only list of record route objects associated with the
traffic engineered tunnel. Each entry in the list
may contain a hop IP address, MPLS label allocated
at the hop, and the flags associated with the entry.
"""
return self.__record_route_object
def _set_record_route_object(self, v, load=False):
"""
Setter method for record_route_object, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/sessions/session/record_route_objects/record_route_object (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_record_route_object is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_record_route_object() directly.
YANG Description: Read-only list of record route objects associated with the
traffic engineered tunnel. Each entry in the list
may contain a hop IP address, MPLS label allocated
at the hop, and the flags associated with the entry.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGListType(
"index",
record_route_object.record_route_object,
yang_name="record-route-object",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="index",
extensions=None,
),
is_container="list",
yang_name="record-route-object",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """record_route_object must be of a type compatible with list""",
"defined-type": "list",
"generated-type": """YANGDynClass(base=YANGListType("index",record_route_object.record_route_object, yang_name="record-route-object", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='index', extensions=None), is_container='list', yang_name="record-route-object", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)""",
}
)
self.__record_route_object = t
if hasattr(self, "_set"):
self._set()
def _unset_record_route_object(self):
self.__record_route_object = YANGDynClass(
base=YANGListType(
"index",
record_route_object.record_route_object,
yang_name="record-route-object",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="index",
extensions=None,
),
is_container="list",
yang_name="record-route-object",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
record_route_object = __builtin__.property(_get_record_route_object)
_pyangbind_elements = OrderedDict([("record_route_object", record_route_object)])
from . import record_route_object
class record_route_objects(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/mpls/signaling-protocols/rsvp-te/sessions/session/record-route-objects. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Enclosing container for MPLS RRO objects associated with the
traffic engineered tunnel.
"""
__slots__ = ("_path_helper", "_extmethods", "__record_route_object")
_yang_name = "record-route-objects"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__record_route_object = YANGDynClass(
base=YANGListType(
"index",
record_route_object.record_route_object,
yang_name="record-route-object",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="index",
extensions=None,
),
is_container="list",
yang_name="record-route-object",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"mpls",
"signaling-protocols",
"rsvp-te",
"sessions",
"session",
"record-route-objects",
]
def _get_record_route_object(self):
"""
Getter method for record_route_object, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/sessions/session/record_route_objects/record_route_object (list)
YANG Description: Read-only list of record route objects associated with the
traffic engineered tunnel. Each entry in the list
may contain a hop IP address, MPLS label allocated
at the hop, and the flags associated with the entry.
"""
return self.__record_route_object
def _set_record_route_object(self, v, load=False):
"""
Setter method for record_route_object, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te/sessions/session/record_route_objects/record_route_object (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_record_route_object is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_record_route_object() directly.
YANG Description: Read-only list of record route objects associated with the
traffic engineered tunnel. Each entry in the list
may contain a hop IP address, MPLS label allocated
at the hop, and the flags associated with the entry.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGListType(
"index",
record_route_object.record_route_object,
yang_name="record-route-object",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="index",
extensions=None,
),
is_container="list",
yang_name="record-route-object",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """record_route_object must be of a type compatible with list""",
"defined-type": "list",
"generated-type": """YANGDynClass(base=YANGListType("index",record_route_object.record_route_object, yang_name="record-route-object", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='index', extensions=None), is_container='list', yang_name="record-route-object", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)""",
}
)
self.__record_route_object = t
if hasattr(self, "_set"):
self._set()
def _unset_record_route_object(self):
self.__record_route_object = YANGDynClass(
base=YANGListType(
"index",
record_route_object.record_route_object,
yang_name="record-route-object",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="index",
extensions=None,
),
is_container="list",
yang_name="record-route-object",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
record_route_object = __builtin__.property(_get_record_route_object)
_pyangbind_elements = OrderedDict([("record_route_object", record_route_object)])
| [
"[email protected]"
] | |
a0bbb14829985858e622f39ae0198a7bd9cbebb8 | a350e6471598e8518f639fcff50511c35a94bceb | /app_common/libs/mysql_orm_op.py | c4cb1031f3ef7e5a00bdd91ea79518f78b95c2e9 | [
"MIT"
] | permissive | WooodHead/bearing_project | 2e26602c326f703869e13bf84cecba95edff59fa | ca64b04dad7010620414e37b2c7923fd904a0f11 | refs/heads/master | 2022-04-20T16:03:35.179584 | 2020-04-15T18:23:24 | 2020-04-15T18:23:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,583 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: mysql_orm_op.py
@time: 2018-03-15 21:43
"""
from sqlalchemy.inspection import inspect
from sqlalchemy import distinct
class DbInstance(object):
def __init__(self, db_instance):
self.db_instance = db_instance
# todo get_rows order_by
# todo insert ignore
def get_row_by_id(self, model_class, pk_id):
"""
通过 id 获取信息
:param model_class:
:param pk_id:
:return: None/object
"""
return self.db_instance.session.query(model_class).get(pk_id)
def get_row(self, model_class, *args, **kwargs):
"""
获取信息
Usage:
# 方式一
get_row(User, User.id > 1)
# 方式二
test_condition = {
'name': "Larry"
}
get_row(User, **test_condition)
:param model_class:
:param args:
:param kwargs:
:return: None/object
"""
return self.db_instance.session \
.query(model_class) \
.filter(*args) \
.filter_by(**kwargs) \
.first()
def get_latest(self, model_class, *args, **kwargs):
"""
获取信息
Usage:
# 方式一
get_latest(User, User.id > 1)
# 方式二
test_condition = {
'name': "Larry"
}
get_latest(User, **test_condition)
:param model_class:
:param args:
:param kwargs:
:return: None/object
"""
model_pk = inspect(model_class).primary_key[0]
return self.db_instance.session \
.query(model_class) \
.filter(*args) \
.filter_by(**kwargs) \
.order_by(model_pk.desc()) \
.first()
def get_rows(self, model_class, *args, **kwargs):
"""
获取列表信息
Usage:
# 方式一
get_lists(User, User.id > 1)
# 方式二
test_condition = {
'name': "Larry"
}
get_lists(User, **test_condition)
:param model_class:
:param args:
:param kwargs:
:return: None/list
"""
return self.db_instance.session \
.query(model_class) \
.filter(*args) \
.filter_by(**kwargs) \
.all()
def get_rows_by_ids(self, model_class, pk_ids):
"""
通过一组 ids 获取信息列表
:param model_class:
:param pk_ids:
:return: list
"""
model_pk = inspect(model_class).primary_key[0]
return self.db_instance.session \
.query(model_class) \
.filter(model_pk.in_(pk_ids)) \
.all()
def get_limit_rows_by_last_id(self, model_class, last_pk_id, limit_num, *args, **kwargs):
"""
通过最后一个主键 id 获取最新信息列表
避免id乱序, 需要加入order_by
适用场景:
1、动态加载
2、快速定位
:param model_class:
:param last_pk_id:
:param limit_num:
:param args:
:param kwargs:
:return: list
"""
model_pk = inspect(model_class).primary_key[0]
return self.db_instance.session \
.query(model_class) \
.filter(model_pk > last_pk_id, *args) \
.filter_by(**kwargs) \
.order_by(model_pk) \
.limit(limit_num) \
.all()
def count(self, model_class, *args, **kwargs):
"""
计数
Usage:
# 方式一
count(User, User.id > 1)
# 方式二
test_condition = {
'name': "Larry"
}
count(User, **test_condition)
:param model_class:
:param args:
:param kwargs:
:return: 0/Number(int)
"""
return self.db_instance.session \
.query(model_class) \
.filter(*args) \
.filter_by(**kwargs) \
.count()
def add(self, model_class, data):
"""
添加信息
:param model_class:
:param data:
:return: None/Value of model_obj.PK
"""
model_obj = model_class(**data)
try:
self.db_instance.session.add(model_obj)
self.db_instance.session.commit()
return inspect(model_obj).identity[0]
except Exception as e:
self.db_instance.session.rollback()
raise e
def edit(self, model_class, pk_id, data):
"""
修改信息
:param model_class:
:param pk_id:
:param data:
:return: Number of affected rows (Example: 0/1)
"""
model_pk_name = inspect(model_class).primary_key[0].name
model_pk = getattr(model_class, model_pk_name)
try:
model_obj = self.db_instance.session.query(model_class).filter(model_pk == pk_id)
result = model_obj.update(data)
self.db_instance.session.commit()
return result
except Exception as e:
self.db_instance.session.rollback()
raise e
def merge(self, model_class, data):
"""
覆盖信息(没有新增,存在更新)
数据中必须带主键字段
:param model_class:
:param data:
:return: Value of PK
"""
model_obj = model_class(**data)
try:
r = self.db_instance.session.merge(model_obj)
self.db_instance.session.commit()
return inspect(r).identity[0]
except Exception as e:
self.db_instance.session.rollback()
raise e
def increase(self, model_class, pk_id, field, num=1, **kwargs):
"""
字段自增
:param model_class:
:param pk_id:
:param field:
:param num:
:param kwargs:
:return: Number of affected rows (Example: 0/1)
"""
model_pk_name = inspect(model_class).primary_key[0].name
model_pk = getattr(model_class, model_pk_name)
try:
model_obj = self.db_instance.session.query(model_class).filter(model_pk == pk_id)
value = getattr(model_class, field) + num
data = {
field: value
}
data.update(**kwargs) # 更新扩展字段
result = model_obj.update(data)
self.db_instance.session.commit()
return result
except Exception as e:
self.db_instance.session.rollback()
raise e
def delete(self, model_class, pk_id):
"""
删除信息
:param model_class:
:param pk_id:
:return: Number of affected rows (Example: 0/1)
"""
model_pk_name = inspect(model_class).primary_key[0].name
model_pk = getattr(model_class, model_pk_name)
try:
model_obj = self.db_instance.session.query(model_class).filter(model_pk == pk_id)
result = model_obj.delete()
self.db_instance.session.commit()
return result
except Exception as e:
self.db_instance.session.rollback()
raise e
def get_distinct_field(self, model_class, field, *args, **kwargs):
try:
return self.db_instance.session \
.query(distinct(getattr(model_class, field)).label(field)) \
.filter(*args) \
.filter_by(**kwargs) \
.all()
except Exception as e:
self.db_instance.session.rollback()
raise e
@staticmethod
def get_pagination(model_class, page=1, per_page=10, *args, **kwargs):
"""
获取信息列表(分页)
Usage:
items: 信息列表
has_next: 如果本页之后还有超过一个分页,则返回True
has_prev: 如果本页之前还有超过一个分页,则返回True
next_num: 返回下一页的页码
prev_num: 返回上一页的页码
iter_pages(): 页码列表
iter_pages(left_edge=2, left_current=2, right_current=5, right_edge=2) 页码列表默认参数
:param model_class:
:param page:
:param per_page:
:param args:
:param kwargs:
:return: None/object
"""
return model_class.query \
.filter(*args) \
.filter_by(**kwargs) \
.order_by(model_class.create_time.desc()) \
.paginate(page, per_page, False)
def delete_table(self, model_class):
"""
清空表(保留结构)
:param model_class:
:return:
"""
try:
model_obj = self.db_instance.session.query(model_class)
result = model_obj.delete()
self.db_instance.session.commit()
return result
except Exception as e:
self.db_instance.session.rollback()
raise e
def drop_table(self, model_class):
"""
删除表(数据、结构全部清除)
:param model_class:
:return:
"""
return model_class.__table__.drop(bind=self.db_instance.engine)
def insert_rows(self, model_class, data_list):
"""
批量插入数据(遇到主键/唯一索引重复,忽略报错,继续执行下一条插入任务)
注意:
Warning: Duplicate entry
警告有可能会提示:
UnicodeEncodeError: 'ascii' codec can't encode characters in position 17-20: ordinal not in range(128)
处理:
import sys
reload(sys)
sys.setdefaultencoding('utf8')
sql 语句大小限制
show VARIABLES like '%max_allowed_packet%';
参考:http://dev.mysql.com/doc/refman/5.7/en/packet-too-large.html
:param model_class:
:param data_list:
:return:
"""
try:
result = self.db_instance.session.execute(model_class.__table__.insert().prefix_with('IGNORE'), data_list)
self.db_instance.session.commit()
return result.rowcount
except Exception as e:
self.db_instance.session.rollback()
raise e
def update_rows(self, model_class, data, *args, **kwargs):
"""
批量修改数据
:param model_class:
:param data:
:param args:
:param kwargs:
:return:
"""
try:
model_obj = self.db_instance.session.query(model_class).filter(*args).filter_by(**kwargs)
result = model_obj.update(data, synchronize_session=False)
self.db_instance.session.commit()
return result
except Exception as e:
self.db_instance.session.rollback()
raise e
def update_rows_by_ids(self, model_class, pk_ids, data):
"""
根据一组主键id 批量修改数据
"""
model_pk = inspect(model_class).primary_key[0]
try:
model_obj = self.db_instance.session.query(model_class).filter(model_pk.in_(pk_ids))
result = model_obj.update(data, synchronize_session=False)
self.db_instance.session.commit()
return result
except Exception as e:
self.db_instance.session.rollback()
raise e
| [
"[email protected]"
] | |
654f89d213946c60e1ac0472df7ebdbd69014fe6 | f337c975d2446cf4c2ac4fb23a3f2c540a41d23d | /testing/date/test_datetimezone_klass.py | 3a3c84938a57e2077131e8f1edda2e89e62f3d8d | [
"MIT"
] | permissive | shendel/hippyvm | 9fca2bb479da8273a3036350f3f4e06c2d44afc9 | 26cc6675612e4ddc4d1b425d2731d34c97355c45 | refs/heads/master | 2021-01-15T15:31:04.689510 | 2014-04-14T08:54:33 | 2014-04-14T08:54:33 | 18,753,589 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,295 | py | import pytest
from testing.test_interpreter import BaseTestInterpreter
class TestDateTimeZone(BaseTestInterpreter):
def test_constructor(self):
output = self.run('''
$tz = new DateTimeZone('Pacific/Nauru');
echo get_class($tz);
''')
assert self.space.str_w(output[0]) == 'DateTimeZone'
def test_get_name(self):
output = self.run('''
$tz = new DateTimeZone('Pacific/Nauru');
echo $tz->getName();
''')
assert self.space.str_w(output[0]) == 'Pacific/Nauru'
def test_get_offset(self):
output = self.run('''
$dateTimeZoneTaipei = new DateTimeZone("Asia/Taipei");
$dateTimeZoneJapan = new DateTimeZone("Asia/Tokyo");
$dateTimeTaipei = new DateTime("now", $dateTimeZoneTaipei);
$dateTimeJapan = new DateTime("now", $dateTimeZoneJapan);
echo $dateTimeZoneJapan->getOffset($dateTimeTaipei);
echo $dateTimeZoneJapan->getOffset($dateTimeJapan);
''')
assert self.space.int_w(output.pop(0)) == 32400
assert self.space.int_w(output.pop(0)) == 32400
def test_list_abbreviations(self):
output = self.run('''
$tz = new DateTimeZone('Pacific/Nauru');
echo count($tz->listAbbreviations());
''')
assert self.space.int_w(output[0]) == 373
def test_list_identifiers(self):
output = self.run('''
$tz = new DateTimeZone('Pacific/Nauru');
echo count($tz->listIdentifiers(128));
echo count($tz->listIdentifiers(DateTimeZone::EUROPE));
''')
assert self.space.int_w(output.pop(0)) == 56
assert self.space.int_w(output.pop(0)) == 56
def test_consts(self):
output = self.run('''
echo DateTimeZone::ASIA;
''')
assert self.space.int_w(output[0]) == 16
def test_listI_ientifiers_constants(self):
output = self.run('''
$tz = new DateTimeZone('Pacific/Nauru');
echo count($tz->listIdentifiers(DateTimeZone::PER_COUNTRY, 'PL'));
echo count($tz->listIdentifiers(DateTimeZone::PER_COUNTRY, 'RU'));
''')
assert self.space.int_w(output[0]) == 1
assert self.space.int_w(output[1]) == 18
def test_get_transition_1(self):
pytest.xfail("broken implementation")
output = self.run('''
$timezone = new DateTimeZone("Europe/London");
echo $timezone->getTransitions();
echo $timezone->getTransitions(2120015000);
echo $timezone->getTransitions(0);
echo $timezone->getTransitions(0, 2140045200);
echo $timezone->getTransitions(0, 2140045300);
echo $timezone->getTransitions(2140045200);
echo $timezone->getTransitions(2121901200);
''')
assert len(output.pop(0).as_pair_list(self.space)) == 243
assert len(output.pop(0).as_pair_list(self.space)) == 3
assert len(output.pop(0).as_pair_list(self.space)) == 135
assert len(output.pop(0).as_pair_list(self.space)) == 134
assert len(output.pop(0).as_pair_list(self.space)) == 135
assert len(output.pop(0).as_pair_list(self.space)) == 1
assert len(output.pop(0).as_pair_list(self.space)) == 2
def test_get_transition_2(self):
pytest.xfail("broken implementation")
output = self.run('''
$timezone = new DateTimeZone("Europe/Prague");
echo $timezone->getTransitions();
''')
first = output[0].as_list_w()[0].as_dict()
last = output[0].as_list_w()[-1].as_dict()
assert self.space.int_w(first['ts']) == -9223372036854775808
assert self.space.str_w(first['time']) == '-292277022657-01-27T08:29:52+0000'
assert self.space.int_w(first['offset']) == 7200
assert first['isdst'] == self.space.w_True
assert self.space.str_w(first['abbr']) == 'CEST'
assert self.space.int_w(last['ts']) == 2140045200
assert self.space.str_w(last['time']) == '2037-10-25T01:00:00+0000'
assert self.space.int_w(first['offset']) == 7200
assert last['isdst'] == self.space.w_False
assert self.space.str_w(last['abbr']) == 'CET'
| [
"[email protected]"
] | |
fbe1230ddb3094aab923835a4478b8aec663ec4a | 97eb35e37480f2b6cfb7c8055d467cec1e1c018e | /plugin.video.fen/resources/lib/modules/kodi_utils.py | 1979983a44cb25dd9255effa8ed61ffeca33048a | [] | no_license | CYBERxNUKE/xbmc-addon | 46163c286197e21edfdeb3e590ca4213eade0994 | eb4d38f11da7ea54d194359c7dcaeddfb3a40e52 | refs/heads/master | 2023-01-10T20:24:08.629399 | 2023-01-07T21:28:06 | 2023-01-07T21:28:06 | 55,860,463 | 7 | 2 | null | 2022-12-08T10:39:26 | 2016-04-09T18:27:34 | Python | UTF-8 | Python | false | false | 24,574 | py | # -*- coding: utf-8 -*-
import xbmc, xbmcgui, xbmcplugin, xbmcvfs
from xbmcaddon import Addon
import sys
import json
import requests
import _strptime
import sqlite3 as database
from random import choice, sample
from threading import Thread, activeCount
from urllib.parse import unquote, unquote_plus, urlencode, quote, parse_qsl, urlparse
from modules import icons
__addon__ = Addon(id='plugin.video.fen')
getLocalizedString = __addon__.getLocalizedString
player, xbmc_player, numeric_input, xbmc_monitor, translatePath = xbmc.Player(), xbmc.Player, 1, xbmc.Monitor, xbmcvfs.translatePath
ListItem, getSkinDir, log, getCurrentWindowId, Window = xbmcgui.ListItem, xbmc.getSkinDir, xbmc.log, xbmcgui.getCurrentWindowId, xbmcgui.Window
File, exists, copy, delete, rmdir, rename = xbmcvfs.File, xbmcvfs.exists, xbmcvfs.copy, xbmcvfs.delete, xbmcvfs.rmdir, xbmcvfs.rename
get_infolabel, get_visibility, execute_JSON, window_xml_dialog = xbmc.getInfoLabel, xbmc.getCondVisibility, xbmc.executeJSONRPC, xbmcgui.WindowXMLDialog
executebuiltin, xbmc_sleep, convertLanguage, getSupportedMedia, PlayList = xbmc.executebuiltin, xbmc.sleep, xbmc.convertLanguage, xbmc.getSupportedMedia, xbmc.PlayList
monitor, window, dialog, progressDialog, progressDialogBG = xbmc_monitor(), Window(10000), xbmcgui.Dialog(), xbmcgui.DialogProgress(), xbmcgui.DialogProgressBG()
endOfDirectory, addSortMethod, listdir, mkdir, mkdirs = xbmcplugin.endOfDirectory, xbmcplugin.addSortMethod, xbmcvfs.listdir, xbmcvfs.mkdir, xbmcvfs.mkdirs
addDirectoryItem, addDirectoryItems, setContent = xbmcplugin.addDirectoryItem, xbmcplugin.addDirectoryItems, xbmcplugin.setContent
window_xml_left_action, window_xml_right_action, window_xml_up_action, window_xml_down_action, window_xml_info_action = 1, 2, 3, 4, 11
window_xml_selection_actions, window_xml_closing_actions, window_xml_context_actions = (7, 100), (9, 10, 13, 92), (101, 108, 117)
img_url = 'https://i.imgur.com/%s.png'
empty_poster, item_jump, item_next = img_url % icons.box_office, img_url % icons.item_jump, img_url % icons.item_next
tmdb_default_api, fanarttv_default_api = 'b370b60447737762ca38457bd77579b3', 'fa836e1c874ba95ab08a14ee88e05565'
database_path_raw = 'special://profile/addon_data/plugin.video.fen/databases/%s'
current_dbs = ('navigator.db', 'watched.db', 'favourites.db', 'views.db', 'traktcache4.db', 'maincache.db', 'metacache.db', 'debridcache.db', 'providerscache2.db')
fen_settings_str, menu_cache_prop, highlight_prop, meta_filter_prop = 'fen_settings', 'fen.kodi_menu_cache', 'fen.highlight', 'fen.meta_filter'
view_type_prop, props_made_prop, pause_settings_prop, build_content_prop = 'fen.view_type_%s', 'fen.window_properties_made', 'fen.pause_settings', 'fen.build_content'
custom_context_prop, custom_info_prop, pause_services_prop = 'fen.custom_context_menu', 'fen.custom_info_dialog', 'fen.pause_services'
int_window_prop = 'fen.internal_results.%s'
databases_path = translatePath('special://profile/addon_data/plugin.video.fen/databases/')
navigator_db = translatePath(database_path_raw % current_dbs[0])
watched_db = translatePath(database_path_raw % current_dbs[1])
favorites_db = translatePath(database_path_raw % current_dbs[2])
views_db = translatePath(database_path_raw % current_dbs[3])
trakt_db = translatePath(database_path_raw % current_dbs[4])
maincache_db = translatePath(database_path_raw % current_dbs[5])
metacache_db = translatePath(database_path_raw % current_dbs[6])
debridcache_db = translatePath(database_path_raw % current_dbs[7])
external_db = translatePath(database_path_raw % current_dbs[8])
userdata_path = translatePath('special://profile/addon_data/plugin.video.fen/')
addon_settings = translatePath('special://home/addons/plugin.video.fen/resources/settings.xml')
user_settings = translatePath('special://profile/addon_data/plugin.video.fen/settings.xml')
addon_icon = translatePath('special://home/addons/plugin.video.fen/resources/media/fen_icon.png')
addon_fanart = translatePath('special://home/addons/plugin.video.fen/resources/media/fen_fanart.png')
addon_clearlogo = translatePath('special://home/addons/plugin.video.fen/resources/media/fen_clearlogo.png')
myvideos_db_paths = {19: '119', 20: '119'}
sort_method_dict = {'episodes': 24, 'files': 5, 'label': 2}
playlist_type_dict = {'music': 0, 'video': 1}
movie_dict_removals = ('fanart_added', 'cast', 'poster', 'rootname', 'imdb_id', 'tmdb_id', 'tvdb_id', 'all_trailers', 'fanart', 'banner', 'clearlogo', 'clearlogo2', 'clearart',
'landscape', 'discart', 'original_title', 'english_title', 'extra_info', 'alternative_titles', 'country_codes', 'fanarttv_fanart', 'fanarttv_poster',
'fanart2', 'poster2', 'keyart', 'images', 'custom_poster', 'custom_fanart', 'custom_clearlogo')
tvshow_dict_removals = ('fanart_added', 'cast', 'poster', 'rootname', 'imdb_id', 'tmdb_id', 'tvdb_id', 'all_trailers', 'discart', 'total_episodes', 'total_seasons', 'fanart',
'banner', 'clearlogo', 'clearlogo2', 'clearart', 'landscape', 'season_data', 'original_title', 'extra_info', 'alternative_titles', 'english_title',
'season_summary', 'country_codes', 'fanarttv_fanart', 'fanarttv_poster', 'total_aired_eps', 'fanart2', 'poster2', 'keyart', 'images', 'custom_poster',
'custom_fanart', 'custom_clearlogo')
episode_dict_removals = ('thumb', 'guest_stars')
video_extensions = ('m4v', '3g2', '3gp', 'nsv', 'tp', 'ts', 'ty', 'pls', 'rm', 'rmvb', 'mpd', 'ifo', 'mov', 'qt', 'divx', 'xvid', 'bivx', 'vob', 'nrg', 'img', 'iso', 'udf', 'pva',
'wmv', 'asf', 'asx', 'ogm', 'm2v', 'avi', 'bin', 'dat', 'mpg', 'mpeg', 'mp4', 'mkv', 'mk3d', 'avc', 'vp3', 'svq3', 'nuv', 'viv', 'dv', 'fli', 'flv', 'wpl',
'xspf', 'vdr', 'dvr-ms', 'xsp', 'mts', 'm2t', 'm2ts', 'evo', 'ogv', 'sdp', 'avs', 'rec', 'url', 'pxml', 'vc1', 'h264', 'rcv', 'rss', 'mpls', 'mpl', 'webm',
'bdmv', 'bdm', 'wtv', 'trp', 'f4v', 'pvr', 'disc')
image_extensions = ('jpg', 'jpeg', 'jpe', 'jif', 'jfif', 'jfi', 'bmp', 'dib', 'png', 'gif', 'webp', 'tiff', 'tif',
'psd', 'raw', 'arw', 'cr2', 'nrw', 'k25', 'jp2', 'j2k', 'jpf', 'jpx', 'jpm', 'mj2')
def get_icon(image_name):
return img_url % getattr(icons, image_name)
def local_string(string):
if isinstance(string, str):
try: string = int(string)
except: return string
return getLocalizedString(string)
def build_url(url_params):
return 'plugin://plugin.video.fen/?%s' % urlencode(url_params)
def remove_meta_keys(dict_item, dict_removals):
for k in dict_removals: dict_item.pop(k, None)
return dict_item
def add_dir(url_params, list_name, handle, iconImage='folder', fanartImage=None, isFolder=True):
fanart = fanartImage or addon_fanart
icon = get_icon(iconImage)
url = build_url(url_params)
listitem = make_listitem()
listitem.setLabel(list_name)
listitem.setArt({'icon': icon, 'poster': icon, 'thumb': icon, 'fanart': fanart, 'banner': icon, 'clearlogo': addon_clearlogo})
listitem.setInfo('video', {'plot': ' '})
add_item(handle, url, listitem, isFolder)
def make_listitem():
return ListItem(offscreen=True)
def add_item(handle, url, listitem, isFolder):
addDirectoryItem(handle, url, listitem, isFolder)
def add_items(handle, item_list):
addDirectoryItems(handle, item_list)
def set_content(handle, content):
setContent(handle, content)
def end_directory(handle, cacheToDisc=None):
if cacheToDisc == None: cacheToDisc = get_property(menu_cache_prop) == 'true'
endOfDirectory(handle, cacheToDisc=cacheToDisc)
def set_view_mode(view_type, content='files'):
view_id = get_property(view_type_prop % view_type)
if not view_id:
try:
dbcon = database.connect(views_db, timeout=40.0, isolation_level=None)
dbcur = dbcon.cursor()
dbcur.execute("SELECT view_id FROM views WHERE view_type = ?", (str(view_type),))
view_id = dbcur.fetchone()[0]
except: return
try:
hold = 0
sleep(100)
while not container_content() == content:
hold += 1
if hold < 5000: sleep(1)
else: return
if view_id: execute_builtin('Container.SetViewMode(%s)' % view_id)
except: return
def build_content():
return get_property(build_content_prop) != 'false'
def append_path(_path):
sys.path.append(translatePath(_path))
def logger(heading, function):
log('###%s###: %s' % (heading, function), 1)
def get_property(prop):
return window.getProperty(prop)
def set_property(prop, value):
return window.setProperty(prop, value)
def clear_property(prop):
return window.clearProperty(prop)
def addon(addon_id='plugin.video.fen'):
return Addon(id=addon_id)
def addon_installed(addon_id):
return get_visibility('System.HasAddon(%s)' % addon_id)
def addon_enabled(addon_id):
return get_visibility('System.AddonIsEnabled(%s)' % addon_id)
def container_content():
return get_infolabel('Container.Content')
def set_sort_method(handle, method):
addSortMethod(handle, sort_method_dict[method])
def make_session(url='https://'):
session = requests.Session()
session.mount(url, requests.adapters.HTTPAdapter(pool_maxsize=100))
return session
def make_requests():
return requests
def make_playlist(playlist_type='video'):
return PlayList(playlist_type_dict[playlist_type])
def convert_language(lang):
return convertLanguage(lang, 1)
def supported_media():
return getSupportedMedia('video')
def path_exists(path):
return exists(path)
def open_file(_file, mode='r'):
return File(_file, mode)
def copy_file(source, destination):
return copy(source, destination)
def delete_file(_file):
delete(_file)
def delete_folder(_folder, force=False):
rmdir(_folder, force)
def rename_file(old, new):
rename(old, new)
def list_dirs(location):
return listdir(location)
def make_directory(path):
mkdir(path)
def make_directories(path):
mkdirs(path)
def translate_path(path):
return translatePath(path)
def sleep(time):
return xbmc_sleep(time)
def execute_builtin(command):
return executebuiltin(command)
def get_kodi_version():
return int(get_infolabel('System.BuildVersion')[0:2])
def current_skin():
return getSkinDir()
def get_window_id():
return getCurrentWindowId()
def current_window_id():
return Window(get_window_id())
def get_video_database_path():
return translate_path('special://profile/Database/MyVideos%s.db' % myvideos_db_paths[get_kodi_version()])
def show_busy_dialog():
return execute_builtin('ActivateWindow(busydialognocancel)')
def hide_busy_dialog():
execute_builtin('Dialog.Close(busydialognocancel)')
execute_builtin('Dialog.Close(busydialog)')
def close_dialog(dialog):
execute_builtin('Dialog.Close(%s,true)' % dialog)
def close_all_dialog():
execute_builtin('Dialog.Close(all,true)')
def run_addon(addon='plugin.video.fen'):
return execute_builtin('RunAddon(%s)' % addon)
def external_browse():
return 'fen' not in get_infolabel('Container.PluginName')
def kodi_refresh():
if external_browse(): execute_builtin('UpdateLibrary(video,special://skin/foo)')
else: container_refresh()
def run_plugin(params):
if isinstance(params, dict): params = build_url(params)
return execute_builtin('RunPlugin(%s)' % params)
def container_update(params):
if isinstance(params, dict): params = build_url(params)
return execute_builtin('Container.Update(%s)' % params)
def container_refresh():
return execute_builtin('Container.Refresh')
def disable_enable_addon(addon_name='plugin.video.fen'):
try:
execute_JSON(json.dumps({'jsonrpc': '2.0', 'id': 1, 'method': 'Addons.SetAddonEnabled', 'params': {'addonid': addon_name, 'enabled': False}}))
execute_JSON(json.dumps({'jsonrpc': '2.0', 'id': 1, 'method': 'Addons.SetAddonEnabled', 'params': {'addonid': addon_name, 'enabled': True}}))
except: pass
def update_local_addons():
execute_builtin('UpdateLocalAddons')
sleep(2500)
def make_global_list():
global global_list
global_list = []
def progress_dialog(heading=32036, icon=addon_icon):
from windows import create_window
if isinstance(heading, int): heading = local_string(heading)
progress_dialog = create_window(('windows.progress', 'Progress'), 'progress.xml', heading=heading, icon=icon)
Thread(target=progress_dialog.run).start()
return progress_dialog
def ok_dialog(heading=32036, text='', ok_label=32839, top_space=True):
from windows import open_window
if isinstance(heading, int): heading = local_string(heading)
if isinstance(text, int): text = local_string(text)
if isinstance(ok_label, int): ok_label = local_string(ok_label)
if not text: text = '[CR][CR]%s' % local_string(32760)
elif top_space: text = '[CR][CR]%s' % text
kwargs = {'heading': heading, 'text': text, 'ok_label': ok_label}
return open_window(('windows.select_ok', 'OK'), 'ok.xml', **kwargs)
def confirm_dialog(heading=32036, text='', ok_label=32839, cancel_label=32840, top_space=True, default_control=11):
from windows import open_window
if isinstance(heading, int): heading = local_string(heading)
if isinstance(text, int): text = local_string(text)
if isinstance(ok_label, int): ok_label = local_string(ok_label)
if isinstance(cancel_label, int): cancel_label = local_string(cancel_label)
if not text: text = '[CR][CR]%s' % local_string(32580)
elif top_space: text = '[CR][CR]%s' % text
kwargs = {'heading': heading, 'text': text, 'ok_label': ok_label, 'cancel_label': cancel_label, 'default_control': default_control}
return open_window(('windows.select_ok', 'Confirm'), 'confirm.xml', **kwargs)
def select_dialog(function_list, **kwargs):
from windows import open_window
window_xml = kwargs.get('window_xml', 'select.xml')
selection = open_window(('windows.select_ok', 'Select'), window_xml, **kwargs)
if selection in ([], None): return None
if kwargs.get('multi_choice', 'false') == 'true': return [function_list[i] for i in selection]
return function_list[selection]
def confirm_progress_media(meta, text='', enable_fullscreen=False, enable_buttons=False, true_button=32824, false_button=32828, focus_button=11, percent=0):
if enable_buttons:
from windows import open_window
if isinstance(text, int): text = local_string(text)
if isinstance(true_button, int): true_button = local_string(true_button)
if isinstance(false_button, int): false_button = local_string(false_button)
return open_window(('windows.confirm_progress_media', 'ConfirmProgressMedia'), 'confirm_progress_media.xml',
meta=meta, text=text, enable_buttons=enable_buttons, true_button=true_button, false_button=false_button, focus_button=focus_button, percent=percent)
else:
from windows import create_window
progress_dialog = create_window(('windows.confirm_progress_media', 'ConfirmProgressMedia'), 'confirm_progress_media.xml', meta=meta, enable_fullscreen=enable_fullscreen)
Thread(target=progress_dialog.run).start()
return progress_dialog
def show_text(heading, text=None, file=None, font_size='small', kodi_log=False):
from windows import open_window
if isinstance(heading, int): heading = local_string(heading)
if isinstance(text, int): text = local_string(text)
heading = heading.replace('[B]', '').replace('[/B]', '')
if file:
with open(file, encoding='utf-8') as r: text = r.readlines()
if kodi_log:
confirm = confirm_dialog(text=32855, ok_label=32824, cancel_label=32828)
if confirm == None: return
if confirm: text = [i for i in text if any(x in i.lower() for x in ('exception', 'error'))]
text = ''.join(text)
return open_window(('windows.textviewer', 'TextViewer'), 'textviewer.xml', heading=heading, text=text, font_size=font_size)
def notification(line1, time=5000, icon=None, sound=False):
if isinstance(line1, int): line1 = local_string(line1)
icon = icon or addon_icon
dialog.notification(local_string(32036), line1, icon, time, sound)
def choose_view(view_type, content):
handle = int(sys.argv[1])
set_view_str = local_string(32547)
settings_icon = get_icon('settings')
listitem = make_listitem()
listitem.setLabel(set_view_str)
params_url = build_url({'mode': 'set_view', 'view_type': view_type})
listitem.setArt({'icon': settings_icon, 'poster': settings_icon, 'thumb': settings_icon, 'fanart': addon_fanart, 'banner': settings_icon, 'clearlogo': addon_clearlogo})
listitem.setInfo('video', {'plot': ' '})
add_item(handle, params_url, listitem, False)
set_content(handle, content)
end_directory(handle)
set_view_mode(view_type, content)
def set_temp_highlight(temp_highlight):
current_highlight = get_property(highlight_prop)
set_property(highlight_prop, temp_highlight)
return current_highlight
def restore_highlight(current_highlight):
set_property(highlight_prop, current_highlight)
def set_view(view_type):
view_id = str(current_window_id().getFocusId())
dbcon = database.connect(views_db, timeout=40.0, isolation_level=None)
dbcur = dbcon.cursor()
dbcur.execute('''PRAGMA synchronous = OFF''')
dbcur.execute('''PRAGMA journal_mode = OFF''')
dbcur.execute("INSERT OR REPLACE INTO views VALUES (?, ?)", (view_type, view_id))
set_view_property(view_type, view_id)
notification(get_infolabel('Container.Viewmode').upper(), time=500)
def set_view_property(view_type, view_id):
set_property(view_type_prop % view_type, view_id)
def set_view_properties():
dbcon = database.connect(views_db, timeout=40.0, isolation_level=None)
dbcur = dbcon.cursor()
dbcur.execute('''PRAGMA synchronous = OFF''')
dbcur.execute('''PRAGMA journal_mode = OFF''')
dbcur.execute("SELECT * FROM views")
view_ids = dbcur.fetchall()
for item in view_ids: set_view_property(item[0], item[1])
def timeIt(func):
# Thanks to 123Venom
import time
fnc_name = func.__name__
def wrap(*args, **kwargs):
started_at = time.time()
result = func(*args, **kwargs)
logger('%s.%s' % (__name__ , fnc_name), (time.time() - started_at))
return result
return wrap
def volume_checker():
# 0% == -60db, 100% == 0db
try:
if get_setting('playback.volumecheck_enabled', 'false') == 'false' or get_visibility('Player.Muted'): return
from modules.utils import string_alphanum_to_num
max_volume = min(int(get_setting('playback.volumecheck_percent', '50')), 100)
if int(100 - (float(string_alphanum_to_num(get_infolabel('Player.Volume').split('.')[0]))/60)*100) > max_volume: execute_builtin('SetVolume(%d)' % max_volume)
except: pass
def focus_index(index, sleep_time=100):
sleep(sleep_time)
current_window = current_window_id()
focus_id = current_window.getFocusId()
try: current_window.getControl(focus_id).selectItem(index)
except: pass
def clear_settings_window_properties():
clear_property('fen_settings')
notification(32576, 2500)
def fetch_kodi_imagecache(image):
result = None
try:
dbcon = database.connect(translate_path('special://database/Textures13.db'), timeout=40.0)
dbcur = dbcon.cursor()
dbcur.execute("SELECT cachedurl FROM texture WHERE url = ?", (image,))
result = dbcur.fetchone()[0]
except: pass
return result
def get_all_icon_vars(include_values=False):
if include_values: return [(k, v) for k, v in vars(icons).items() if not k.startswith('__')]
else: return [k for k, v in vars(icons).items() if not k.startswith('__')]
def toggle_language_invoker():
import xml.etree.ElementTree as ET
close_all_dialog()
sleep(100)
addon_xml = translate_path('special://home/addons/plugin.video.fen/addon.xml')
current_addon_setting = get_setting('reuse_language_invoker', 'true')
new_value = 'false' if current_addon_setting == 'true' else 'true'
if not confirm_dialog(text=local_string(33018) % (current_addon_setting.upper(), new_value.upper()), top_space=False): return
if new_value == 'true' and not confirm_dialog(text=33019): return
tree = ET.parse(addon_xml)
root = tree.getroot()
for item in root.iter('reuselanguageinvoker'):
item.text = new_value
tree.write(addon_xml)
break
set_setting('reuse_language_invoker', new_value)
ok_dialog(text=32576)
execute_builtin('ActivateWindow(Home)')
update_local_addons()
disable_enable_addon()
def upload_logfile():
# Thanks 123Venom
if not confirm_dialog(text=32580): return
show_busy_dialog()
url = 'https://paste.kodi.tv/'
log_file = translate_path('special://logpath/kodi.log')
if not path_exists(log_file): return ok_dialog(text=33039)
try:
with open_file(log_file) as f: text = f.read()
UserAgent = 'Fen %s' % __addon__.getAddonInfo('version')
response = requests.post('%s%s' % (url, 'documents'), data=text.encode('utf-8', errors='ignore'), headers={'User-Agent': UserAgent}).json()
if 'key' in response: ok_dialog(text='%s%s' % (url, response['key']))
else: ok_dialog(text=33039)
except: ok_dialog(text=33039)
hide_busy_dialog()
def open_settings(query, addon='plugin.video.fen'):
hide_busy_dialog()
if query:
try:
button, control = 100, 80
menu, function = query.split('.')
execute_builtin('Addon.OpenSettings(%s)' % addon)
execute_builtin('SetFocus(%i)' % (int(menu) - button))
execute_builtin('SetFocus(%i)' % (int(function) - control))
except: execute_builtin('Addon.OpenSettings(%s)' % addon)
else: execute_builtin('Addon.OpenSettings(%s)' % addon)
def clean_settings(silent=False):
import xml.etree.ElementTree as ET
def _make_content(dict_object):
content = '<settings version="2">'
new_line = '\n '
for item in dict_object:
_id = item['id']
if _id in active_settings:
if 'default' in item and 'value' in item: content += '%s<setting id="%s" default="%s">%s</setting>' % (new_line, _id, item['default'], item['value'])
elif 'default' in item: content += '%s<setting id="%s" default="%s"></setting>' % (new_line, _id, item['default'])
elif 'value' in item: content += '%s<setting id="%s">%s</setting>' % (new_line, _id, item['value'])
else: content += '%s<setting id="%s"></setting>' % new_line
content += '\n</settings>'
return content
close_all_dialog()
active_settings, current_user_settings = [], []
active_append, current_append = active_settings.append, current_user_settings.append
root = ET.parse(addon_settings).getroot()
for i in root.findall('./category/setting'):
setting_id = i.get('id')
if setting_id: active_append(setting_id)
root = ET.parse(user_settings).getroot()
for i in root:
dict_item = {}
setting_id = i.get('id')
setting_default = i.get('default')
setting_value = i.text
dict_item['id'] = setting_id
if setting_value: dict_item['value'] = setting_value
if setting_default: dict_item['default'] = setting_default
current_append(dict_item)
new_content = _make_content(current_user_settings)
with open_file(user_settings, 'w') as f: f.write(new_content)
if not silent:
removed = str(len(root) - len(active_settings))
notification('%s - Removed %s %s' % (local_string(32576), removed, 'Setting' if removed == '1' else 'Settings'), 2500)
def set_setting(setting_id, value):
addon().setSetting(setting_id, value)
def get_setting(setting_id, fallback=None):
try: settings_dict = json.loads(get_property(fen_settings_str))
except: settings_dict = make_settings_dict()
if settings_dict is None or setting_id not in settings_dict:
settings_dict = get_setting_fallback(setting_id)
make_settings_dict()
make_window_properties()
value = settings_dict.get(setting_id, '')
if value == '':
if fallback is None: return value
return fallback
return value
def get_setting_fallback(setting_id):
return {setting_id: addon().getSetting(setting_id)}
def make_settings_dict():
import xml.etree.ElementTree as ET
settings_dict = None
clear_property(fen_settings_str)
try:
if not path_exists(userdata_path): make_directories(userdata_path)
root = ET.parse(user_settings).getroot()
settings_dict = {}
dict_update = settings_dict.update
for item in root:
setting_id = item.get('id')
setting_value = item.text
if setting_value is None: setting_value = ''
dict_item = {setting_id: setting_value}
dict_update(dict_item)
set_property(fen_settings_str, json.dumps(settings_dict))
except Exception as e: logger('error in make_settings_dict', str(e))
return settings_dict
def make_window_properties(override=False):
if override: make_properties = True
else: make_properties = get_property(props_made_prop) != 'true'
if make_properties:
fen_addon = addon()
set_view_properties()
set_property(menu_cache_prop, fen_addon.getSetting('kodi_menu_cache'))
set_property(highlight_prop, fen_addon.getSetting('fen.highlight'))
set_property(meta_filter_prop, fen_addon.getSetting('meta_filter'))
set_property(custom_context_prop, fen_addon.getSetting('custom_context_menu'))
set_property(custom_info_prop, fen_addon.getSetting('custom_info_dialog'))
set_property(props_made_prop, 'true')
def pause_settings_change():
set_property(pause_settings_prop, 'true')
def unpause_settings_change():
clear_property(pause_settings_prop) | [
"[email protected]"
] | |
194a7792131d764b813a168334daf94c60f44ea9 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /055_modules/001_modules/examples/Python-from-Zero-to-Hero/07-Модули и пакеты/02-Modules and Packages/MainPackage/SubPackage/subscript.py | 8ffbacc63f9e112bc2a9d4c661425da1249ffb08 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 93 | py | def hello_subscript():
print('Hello subscript')
def hello_indeed():
print('Indeed Hello') | [
"[email protected]"
] | |
a68582856ced763fc141c32c0220df5f01620038 | b627da650f75bdcf7e0dc0ef5c4419cf53a1d690 | /src/zqh_devices/zqh_bootrom/zqh_bootrom_parameters.py | 6517029efbcb015d18f624aad5b278aefbb369d4 | [] | no_license | Jusan-zyh/zqh_riscv | 4aa8a4c51e19fb786ba0c2a120722f1382994a52 | bccde2f81b42ac258b92c21bb450ec6ff848387a | refs/heads/main | 2023-08-06T12:56:52.420302 | 2021-09-21T01:25:41 | 2021-09-21T01:25:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | import sys
import os
from phgl_imp import *
from zqh_tilelink.zqh_tilelink_node_module_parameters import zqh_tilelink_node_module_parameter
from zqh_tilelink.zqh_tilelink_parameters import zqh_tl_bundle_all_channel_parameter
class zqh_bootrom_parameter(zqh_tilelink_node_module_parameter):
def set_par(self):
super(zqh_bootrom_parameter, self).set_par()
self.par('bootrom_file', '../tests/zqh_riscv_sw/bootrom/bootrom.hex.fix')
def check_par(self):
super(zqh_bootrom_parameter, self).check_par()
def address(self):
return self.extern_slaves[0].address[0]
| [
"[email protected]"
] | |
2652aa5bc343df3fbf33ffc1f047e00a6c02bb47 | 55d100eb611c32015d78aee9f85fae62cb988603 | /docs/conf.py | ad9ac97b05ba93e346d14652256dfa907c631ddc | [
"BSD-3-Clause"
] | permissive | crazymerlyn/boltons | d2e37935739e55da88607eae24333b9782df46e7 | 2c300e71c4a493d72ba2444825af3ce5ea301738 | refs/heads/master | 2020-03-31T17:47:23.913244 | 2019-01-29T18:10:31 | 2019-01-29T18:10:31 | 152,434,305 | 0 | 0 | NOASSERTION | 2018-10-10T14:09:51 | 2018-10-10T14:09:51 | null | UTF-8 | Python | false | false | 9,797 | py | # -*- coding: utf-8 -*-
#
# boltons documentation build configuration file, created by
# sphinx-quickstart on Sat Mar 21 00:34:18 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import sphinx
from pprint import pprint
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
CUR_PATH = os.path.dirname(os.path.abspath(__file__))
PROJECT_PATH = os.path.abspath(CUR_PATH + '/../')
PACKAGE_PATH = os.path.abspath(CUR_PATH + '/../boltons/')
sys.path.insert(0, PROJECT_PATH)
sys.path.insert(0, PACKAGE_PATH)
pprint(os.environ)
def get_mod_stats():
# TODO: docstring percentage.
import pkgutil
from boltons.funcutils import get_module_callables
mod_count = 0
tot_type_count = 0
tot_func_count = 0
ignore = lambda attr_name: attr_name.startswith('_')
for _, mod_name, _ in pkgutil.iter_modules([PACKAGE_PATH]):
if not mod_name.endswith('utils'):
continue
mod = __import__(mod_name)
types, funcs = get_module_callables(mod, ignore=ignore)
if not len(types) and not len(funcs):
continue
mod_count += 1
tot_type_count += len(types)
tot_func_count += len(funcs)
ret = (mod_count, tot_type_count, tot_func_count)
print ('==== %s modules ==== %s types ==== %s funcs ====' % ret)
return ret
B_MOD_COUNT, B_TYPE_COUNT, B_FUNC_COUNT = get_mod_stats()
rst_epilog = """
.. |b_mod_count| replace:: {mod_count}
.. |b_type_count| replace:: {type_count}
.. |b_func_count| replace:: {func_count}
""".format(mod_count=B_MOD_COUNT,
type_count=B_TYPE_COUNT,
func_count=B_FUNC_COUNT)
# -- General configuration ------------------------------------------------
autosummary_generate = True
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Read the Docs is version 1.2 as of writing
if sphinx.version_info[:2] < (1, 3):
extensions.append('sphinxcontrib.napoleon')
else:
extensions.append('sphinx.ext.napoleon')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'boltons'
copyright = u'2018, Mahmoud Hashemi'
author = u'Mahmoud Hashemi'
version = '18.0'
release = '18.0.1'
if os.name != 'nt':
today_fmt = '%B %d, %Y'
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('https://docs.python.org/2.7', None)}
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
html_theme = 'default'
else: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = ['_themes', sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# TEMP: see https://github.com/rtfd/readthedocs.org/issues/1692
# Add RTD Theme Path.
#if 'html_theme_path' in globals():
# html_theme_path.append('/home/docs/checkouts/readthedocs.org/readthedocs/templates/sphinx')
#else:
# html_theme_path = ['_themes', '/home/docs/checkouts/readthedocs.org/readthedocs/templates/sphinx']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'boltonsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'boltons.tex', u'boltons Documentation',
u'Mahmoud Hashemi', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'boltons', u'boltons Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'boltons', u'boltons Documentation',
author, 'boltons', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| [
"[email protected]"
] | |
a1cef2729bb056ca6f889a0eb5e4cf66ed69ab4d | 39e91ca0b536166b0a1e8ffb21b75440aa00466e | /dbplot/__init__.py | f2ea98df08eb65965acf6140b7fac972806aaf78 | [
"Apache-2.0"
] | permissive | danielfrg/dbplot | bd26f5560945c08324e5346519a51581e31aa830 | fa0902c61f79e05dac9b71934b9a233658cabeba | refs/heads/master | 2020-04-17T20:12:21.897004 | 2019-02-04T21:33:30 | 2019-02-04T21:33:30 | 166,895,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | """
DBPLOT
"""
from ._version import get_versions
__version__ = get_versions()["version"]
del get_versions
from dbplot.mpl import *
| [
"[email protected]"
] | |
87a7111cc408f1244c5c1e4ea5a047a52f48ac18 | 31b3ac7cc2f0cf43a4979e53d43002a9c5fb2038 | /detect pattern of length M repeated in array1.py | 1d723775c7a449082c18cd25734c9ebfaad3ab87 | [] | no_license | shreyansh-tyagi/leetcode-problem | ed31ada9608a1526efce6178b4fe3ee18da98902 | f8679a7b639f874a52cf9081b84e7c7abff1d100 | refs/heads/master | 2023-08-26T13:50:27.769753 | 2021-10-29T17:39:41 | 2021-10-29T17:39:41 | 378,711,844 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,943 | py | '''
Given an array of positive integers arr, find a pattern of length m that is repeated k or more times.
A pattern is a subarray (consecutive sub-sequence) that consists of one or more values, repeated multiple times consecutively without overlapping. A pattern is defined by its length and the number of repetitions.
Return true if there exists a pattern of length m that is repeated k or more times, otherwise return false.
Example 1:
Input: arr = [1,2,4,4,4,4], m = 1, k = 3
Output: true
Explanation: The pattern (4) of length 1 is repeated 4 consecutive times. Notice that pattern can be repeated k or more times but not less.
Example 2:
Input: arr = [1,2,1,2,1,1,1,3], m = 2, k = 2
Output: true
Explanation: The pattern (1,2) of length 2 is repeated 2 consecutive times. Another valid pattern (2,1) is also repeated 2 times.
Example 3:
Input: arr = [1,2,1,2,1,3], m = 2, k = 3
Output: false
Explanation: The pattern (1,2) is of length 2 but is repeated only 2 times. There is no pattern of length 2 that is repeated 3 or more times.
Example 4:
Input: arr = [1,2,3,1,2], m = 2, k = 2
Output: false
Explanation: Notice that the pattern (1,2) exists twice but not consecutively, so it doesn't count.
Example 5:
Input: arr = [2,2,2,2], m = 2, k = 3
Output: false
Explanation: The only pattern of length 2 is (2,2) however it's repeated only twice. Notice that we do not count overlapping repetitions.
Constraints:
2 <= arr.length <= 100
1 <= arr[i] <= 100
1 <= m <= 100
2 <= k <= 100
'''
class Solution:
def containsPattern(self, arr: List[int], m: int, k: int) -> bool:
a,b=list(set(arr)),[]
arr.append(0)
for i in range(len(arr)-1):
if arr[i]==arr[i+1]:
b.append(arr[i])
b=list(set(b))
for j in range(len(b)):
if arr.count(b[j])>=k and len(b)==m:
return True
else:
return False | [
"[email protected]"
] | |
8bc0f6acc9da8bb2a8f1bffad8a631d9ae6ad984 | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/django-1.4/django/contrib/localflavor/br/forms.py | 2a4375f5c6ff2f4124475b855e115f154d6a028f | [] | no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | /home/action/.parts/packages/googleappengine/1.9.4/lib/django-1.4/django/contrib/localflavor/br/forms.py | [
"[email protected]"
] | |
e78dbe71cec3d1aa3f760ae589ea001a8b971dac | a1080c28573e1a59ec418ad3b0b0bf18e035dc41 | /Common Algos/Dynamic Programming/knapsack.py | d151e39b6b328cbf9cf0f327eb06c777b8b86c0f | [] | no_license | AumkarG/Algorithms-and-Data-Structures | 8c6fc21218897d2361fed1512dc6bb13eabd8842 | 03603ad579564ef213c58edd57cb8753cf8f86ba | refs/heads/master | 2023-03-04T09:48:25.167519 | 2021-02-14T17:16:18 | 2021-02-14T17:16:18 | 330,424,446 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | n=int(input())
m=int(input())
w=[int(i) for i in input().split()]
p=[int(i) for i in input().split()]
table=[]
for i in range(n+1):
table.append([0]*(m+1))
for k in range(w[0],m+1):
table[1][k]=p[0]
for i in range(1,n):
weight=w[i]
profit=p[i]
for j in range(1,m+1):
if j<weight:
table[i+1][j]=table[i][j]
else:
table[i+1][j]=max(table[i][j],table[i][j-weight]+profit)
for i in table:
print(i)
print(table[n][m]) | [
"aumkaar,[email protected]"
] | aumkaar,[email protected] |
507c104bb82cec4979e1d42788699aa47f9a1f71 | 014d996ab984699bf9fccc9f92b6925fca65fea9 | /lista/src/ex11.py | 8e49def459b8c2aaecc146889fb7e7891ed37f0e | [] | no_license | fernandooliveirapimenta/python | e04fb7dbb002761da64cdff3eac987e91e2cb43f | f174b0f35cd3918e5c17a96deab59e4ae7e191ab | refs/heads/master | 2022-10-10T09:02:39.382974 | 2020-10-27T18:00:48 | 2020-10-27T18:00:48 | 208,432,260 | 0 | 0 | null | 2022-09-20T22:15:58 | 2019-09-14T11:43:18 | Python | UTF-8 | Python | false | false | 283 | py | larg = float(input('Largura da parede: '))
alt = float(input('Altura da parede: '))
area = larg * alt
print('Sua parede tem a dimensao de {}x{} e sua area é de {}m2'.format(larg, alt, area))
print('Para pintar essa parede, você vai precisar de {} litros de tinta'.format(area / 2)) | [
"[email protected]"
] | |
0bfff896c1fb061d612f9c67d83aeafadf8d1a8d | 7b91550bb272385d74f9868b8d18bbb5757c0c7c | /workertier/backends/cache/memcluster/dns.py | bf23625a603cbf2377caa3e46fedc669300ab671 | [
"Apache-2.0"
] | permissive | krallin/tutorial-workertier | c0becd4566bf951df60d5d28f569ecb684433a8b | ab579cbe9d68adee26209a8b6b092ef9895fda65 | refs/heads/master | 2023-08-31T10:45:06.326063 | 2013-09-03T19:15:48 | 2013-09-03T19:15:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | #coding:utf-8
import logging
from gevent import dns
from workertier.backends.cache.memcluster import BaseMemcachedClusterCache
logger = logging.getLogger(__name__)
class DNSMemcachedClusterCache(BaseMemcachedClusterCache):
def __init__(self, domain, port, timeout, refresh_signal):
super(DNSMemcachedClusterCache, self).__init__(port, timeout, refresh_signal)
self.domain = domain
def _get_servers_list(self):
ttl, ips = dns.resolve_ipv4(self.domain)
# noinspection PyUnresolvedReferences
return [socket.inet_ntoa(ip) for ip in sorted(ips)]
| [
"[email protected]"
] | |
f75feeea7f36bed818bba69f8ee300c7f7442619 | f48e212274d87c561b0e0706fa84a955657afc0b | /msg_app/apps.py | 95ca63f658b25a3157bb87fb7cd2a83c682ec7d7 | [
"Unlicense"
] | permissive | Asyavaliqa/djing | 4adcd797f0d75f29e52e315d17a5ccfee41d63ef | 8cc0c670600254d288178acd47965f7b3db6856e | refs/heads/master | 2023-03-21T10:46:39.674804 | 2020-11-18T20:11:23 | 2020-11-18T20:11:23 | 571,789,203 | 1 | 0 | Unlicense | 2022-11-28T22:15:26 | 2022-11-28T22:15:25 | null | UTF-8 | Python | false | false | 88 | py | from django.apps import AppConfig
class MsgAppConfig(AppConfig):
name = 'msg_app'
| [
"[email protected]"
] | |
16358668e72c2d4470391eb9b565ac07a520392e | f891828ffe9c8501d276560c8c52d319f284056f | /285_bst_inorder_successor_m/main.py | 39f43c7a1696d08865e214ac086d1e2452a87734 | [] | no_license | chao-shi/lclc | 1b852ab61fef4072039c61f68e951ab2072708bf | 2722c0deafcd094ce64140a9a837b4027d29ed6f | refs/heads/master | 2021-06-14T22:07:54.120375 | 2019-09-02T23:13:59 | 2019-09-02T23:13:59 | 110,387,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 786 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def inorderSuccessor(self, root, p):
"""
:type root: TreeNode
:type p: TreeNode
:rtype: TreeNode
"""
left_tree = []
q = root
while q != p:
if q.val < p.val:
q = q.right
else:
left_tree.append(q)
q = q.left
q = p.right
if q:
while q.left != None:
q = q.left
return q
elif left_tree:
return left_tree[-1]
else:
return None
| [
"[email protected]"
] | |
19afb8673972e5cc6b22b16a764caa23317a77aa | 7c688104f1fd816be603257116fb27040c620db3 | /RemoveDupSortedListTwo.py | 65ca336e74a6f2c02c62ee337edd742dcf97b492 | [] | no_license | xyzhangaa/ltsolution | 5e22c6a12e1ba9f6dea18be69d45c6ac14bc4fe5 | 9445ba22d5f0c12fd12b17e6791d1b387f1c4082 | refs/heads/master | 2020-05-21T11:37:58.781437 | 2015-10-14T14:45:04 | 2015-10-14T14:45:04 | 32,817,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | ###Given a sorted linked list, delete all nodes that have duplicate numbers,
###leaving only distinct numbers from the original list.
###For example,
###Given 1->2->3->3->4->4->5, return 1->2->5.
###Given 1->1->1->2->3, return 2->3.
#O(n), O(1)
class ListNode:
def __init__(self,x):
self.val = x
self.next = None
def RemoveDupSortedLinkedListTwo(self,head):
if head == None or head.next == None:
return Head
dummy = ListNode(0)
dummy.next = head
p = dummy
temp = dummy.next
while p.next:
while temp.next and temp.next.val == p.next.val:
temp = temp.next
if temp == p.next:
p = p.next
else:
p.next = temp.next
return dummy.hext
| [
"[email protected]"
] | |
198a751c7ef0698faac7959a9358d589b7b908f2 | 0a65d42f4f0e491cb2aada408401b94909f821c2 | /management/management_main/migrations/0001_initial.py | 1b30ce4bcc4d93d51f9cdb030eb6dbbf7f77a6db | [] | no_license | jmadlansacay/_Office | 3acde7655784e91c7dcecfc853d4f36cdfeef028 | 7f46449b9f7e8e892e2e0025ba493259197fa592 | refs/heads/main | 2023-07-28T10:23:54.680822 | 2021-09-11T02:28:07 | 2021-09-11T02:28:07 | 379,155,026 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,415 | py | # Generated by Django 2.2.5 on 2021-01-01 02:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('management_accounts', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Employees',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('employee_idno', models.CharField(max_length=4)),
('last_name', models.CharField(blank=True, max_length=50, null=True)),
('first_name', models.CharField(blank=True, max_length=50, null=True)),
('middle_name', models.CharField(blank=True, max_length=50, null=True)),
('nickname', models.CharField(blank=True, max_length=50, null=True)),
('project', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='management_accounts.Projects')),
],
options={
'verbose_name': 'Employee List',
},
),
migrations.CreateModel(
name='ProjectHours',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('year', models.IntegerField()),
('jan_hrs', models.FloatField(default=0)),
('feb_hrs', models.FloatField(default=0)),
('mar_hrs', models.FloatField(default=0)),
('apr_hrs', models.FloatField(default=0)),
('may_hrs', models.FloatField(default=0)),
('jun_hrs', models.FloatField(default=0)),
('jul_hrs', models.FloatField(default=0)),
('aug_hrs', models.FloatField(default=0)),
('sep_hrs', models.FloatField(default=0)),
('oct_hrs', models.FloatField(default=0)),
('nov_hrs', models.FloatField(default=0)),
('dec_hrs', models.FloatField(default=0)),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='management_accounts.Projects')),
],
options={
'verbose_name': 'No of Project Hour',
'unique_together': {('project', 'year')},
},
),
]
| [
"[email protected]"
] | |
f71dfa7c6489547a83673da08aed8c223511a034 | ca75f7099b93d8083d5b2e9c6db2e8821e63f83b | /z2/part3/updated_part2_batch/jm/parser_errors_2/101032623.py | d0b164169b518ca6bdd2b447b5d28c4a453d6b87 | [
"MIT"
] | permissive | kozakusek/ipp-2020-testy | 210ed201eaea3c86933266bd57ee284c9fbc1b96 | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | refs/heads/master | 2022-10-04T18:55:37.875713 | 2020-06-09T21:15:37 | 2020-06-09T21:15:37 | 262,290,632 | 0 | 0 | MIT | 2020-06-09T21:15:38 | 2020-05-08T10:10:47 | C | UTF-8 | Python | false | false | 1,794 | py | from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 101032623
"""
"""
random actions, total chaos
"""
board = gamma_new(2, 3, 2, 3)
assert board is not None
assert gamma_move(board, 1, 2, 1) == 0
assert gamma_golden_possible(board, 1) == 0
assert gamma_move(board, 2, 2, 0) == 0
assert gamma_move(board, 2, 1, 0) == 1
assert gamma_move(board, 1, 0, 2) == 1
assert gamma_move(board, 2, 0, 0) == 1
assert gamma_move(board, 1, 1, 1) == 1
assert gamma_move(board, 1, 0, 2) == 0
board800795293 = gamma_board(board)
assert board800795293 is not None
assert board800795293 == ("1.\n" ".1\n" "22\n")
del board800795293
board800795293 = None
assert gamma_move(board, 2, 1, 0) == 0
assert gamma_golden_possible(board, 2) == 1
board868257597 = gamma_board(board)
assert board868257597 is not None
assert board868257597 == ("1.\n" ".1\n" "22\n")
del board868257597
board868257597 = None
assert gamma_move(board, 1, 2, 1) == 0
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 2, 1) == 0
assert gamma_move(board, 2, 0, 0) == 0
assert gamma_move(board, 1, 1, 0) == 0
assert gamma_move(board, 1, 0, 0) == 0
board272709497 = gamma_board(board)
assert board272709497 is not None
assert board272709497 == ("1.\n" ".1\n" "22\n")
del board272709497
board272709497 = None
assert gamma_move(board, 2, 1, 1) == 0
assert gamma_move(board, 1, 1, 1) == 0
assert gamma_move(board, 1, 1, 2) == 1
assert gamma_free_fields(board, 1) == 1
board769041695 = gamma_board(board)
assert board769041695 is not None
assert board769041695 == ("11\n" ".1\n" "22\n")
del board769041695
board769041695 = None
gamma_delete(board)
| [
"[email protected]"
] | |
0e157e83d1cba0ce1ad766f371ea6de09d9138db | 26a73e4df854313aa213882c5d3db16269a3254b | /hist/ks_tests.py | 8955ce6d77eb8499cb5ac19bb688c03dcfba6f23 | [] | no_license | howonlee/vr-timeseries-analysis | 69b18c35589d84e12ace02671a45d97443dbf333 | bec34c7088e897fb17c424cfcd0da687223e44df | refs/heads/master | 2020-04-11T10:59:04.375920 | 2015-06-11T00:13:26 | 2015-06-11T00:13:26 | 33,697,737 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 797 | py | import scipy.stats as sci_stats
import numpy as np
import matplotlib.pyplot as plt
with open("total_correlations") as correlations_file:
corrs = map(lambda x: float(x.strip()), list(correlations_file))
#corrs_sum = float(sum(corrs))
#norm_corrs = [corr / corrs_sum for corr in corrs]
with open("total_gammas") as gamma_file:
gammas = map(lambda x: float(x.strip()), list(gamma_file))
#gammas_sum = float(sum(gammas))
#norm_gammas = [gamma / gammas_sum for gamma in gammas]
with open("total_cmis") as cmi_file:
cmis = map(lambda x: float(x.strip()), list(cmi_file))
#cmis_sum = float(sum(cmis))
#norm_cmis = [cmi / cmis_sum for cmi in cmis]
print sci_stats.ks_2samp(corrs, cmis)
print sci_stats.ks_2samp(corrs, gammas)
print sci_stats.ks_2samp(cmis, gammas)
| [
"[email protected]"
] | |
8000a18e2e630764b2ef77d3854c06b4ca2e8ac0 | d10190ccc03c89032cc44738d275010eb62b46f3 | /urlybirdlite/urls.py | 708427a25564d74d0ed4ec804ecc5e5601ef32a7 | [] | no_license | bekkblando/urlybirdlite-fixed | 659aa236110c088ff606c57ed23e5d0f1216ea09 | 84dec1c712210fd04628592a187154446e60ad43 | refs/heads/master | 2021-01-15T13:00:03.854463 | 2015-08-18T14:15:39 | 2015-08-18T14:15:39 | 38,802,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,939 | py | """urlybirdlite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.auth.forms import UserCreationForm
from django.views.generic import CreateView
from urlshortner.views import home, CreateBookMark, BookmarkUpdate, BookmarkDelete, profile, wtd, wtupdate
urlpatterns = [
url('^register/', CreateView.as_view(
template_name='registration/create_user.html',
form_class=UserCreationForm,
success_url='/'), name="regis"),
url(r'^admin/', include(admin.site.urls)),
url(r'^login/$', 'django.contrib.auth.views.login',name="login"),
url(r'^logout', 'django.contrib.auth.views.logout', name="logout"),
url('^bookmark/', CreateBookMark.as_view(
template_name='createbookmark.html',
success_url='/profile/'), name="createbookmark"),
url(r'^wtd/', wtd, name="wtd"),
url(r'^uwtupdate/', wtupdate, name="wtupdate"),
url(r'^profile/', profile, name="profile"),
url('^delrate(?P<pk>\w+)', BookmarkDelete.as_view(
template_name='deletebookmark.html',
success_url='/profile/'), name="delbookmark"),
url('^update(?P<pk>\w+)', BookmarkUpdate.as_view(
template_name='bookmark_update.html',
success_url='/profile/'), name="updatebookmark"),
url(r'^', home, name="home"),
] | [
"[email protected]"
] | |
9e10a26d7e2bcaab2aa9a47e0250f1b7413dd2cc | 9bed711e5b7c4fbeb556c2cea918adcf1de94bdc | /app_reports/templatetags/app_reports_tags.py | 90752faa82b4f5f8ca96d36211a37cec1d85c665 | [] | no_license | imagilex/django_main_apps | bc6e900db4cab2a0603f14c844e769faf93be3a3 | fa52958a384fdb06121b17b64967a2e0c8e468cd | refs/heads/env_dev | 2022-11-24T09:05:17.523336 | 2021-03-03T04:29:44 | 2021-03-03T04:29:44 | 232,409,309 | 0 | 1 | null | 2022-11-22T05:47:32 | 2020-01-07T20:17:04 | Python | UTF-8 | Python | false | false | 2,160 | py | from django import template
from app_reports.models import Esfera
register = template.Library()
@register.inclusion_tag('app_reports/esfera/card.html')
def esfera_card(user, context):
"""
Inclusion tag: {% esfera_card user %}
"""
esferas = []
for esfera in Esfera.objects.all():
if esfera.accesible_by(user):
esferas.append(esfera)
return {'esferas': esferas, 'context': context}
@register.filter
def esfera_accesible_by(esfera, user):
"""
Simple Tag: {% if esfera|esfera_accesible_by:user %}
Devuelve verdadero si el usuario tiene permisos para accesar a la esfera
Parameters
----------
esfera : objeto Esfera
user : objeto User
Returns
-------
boolean
True si el usuario puede accesar a la esfera, False en otro caso
"""
return esfera.accesible_by(user)
@register.filter
def dimension_accesible_by(dimension, user):
"""
Simple Tag: {% if dimension|dimension_accesible_by:user %}
Devuelve verdadero si el usuario tiene permisos para accesar a la
dimension del reporte
Parameters
----------
dimension : objeto DimensionReporte
user : objeto User
Returns
-------
boolean
True si el usuario puede accesar a la dimension de reporte,
False en otro caso
"""
return dimension.accesible_by(user)
@register.filter
def reporte_accesible_by(reporte, user):
"""
Simple Tag: {% if reporte|reporte_accesible_by:user %}
Devuelve verdadero si el usuario tiene permisos para accesar al reporte
Parameters
----------
reporte : objeto Reporte
user : objeto User
Returns
-------
boolean
True si el usuario puede accesar al reporte, False en otro caso
"""
return reporte.accesible_by(user)
@register.inclusion_tag('app_reports/esfera/menu_opc.html')
def dimension_as_menu(esfera, dimension, user, nivel=0):
"""
Inclusion tag: {% dimension_as_menu esfera dimension user nivel %}
"""
nivel = int(nivel) + 1
return {
'esfera': esfera,
'dimension': dimension,
'user': user,
'nivel': nivel}
| [
"[email protected]"
] | |
400887601c2793e3af1e6dc4a7b58058d3bfc0b5 | d8546aa0fc53775300b0f9d86cb705b0e5157890 | /arguments3.py | 774bd879eab7653b57e5e9a9eaa21b5a18a1a8fd | [] | no_license | larago/click | ecf02e7462ada4c41e685480dde239ece2f1749d | 89bedb148398965a9046777b18d3e02e10c1630b | refs/heads/master | 2021-04-30T16:26:20.722104 | 2017-01-26T12:41:21 | 2017-01-26T12:41:21 | 80,103,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | #encoding=utf8
import click
@click.command()
@click.argument('src', nargs=-1)
@click.argument('dst', nargs=1)
def move(src, dst):
click.echo('move %s to %s' % (src, dst))
move()
| [
"[email protected]"
] | |
e4a65033a44b01d85d2d3eaec791905f28567ada | a25223f5be818b549323e8232c31a606fcca275a | /work/lib_learn/fuzzy_learn.py | dbb281d2d370d084e8bde9994599c3168320dde1 | [] | no_license | Jsonming/workspace | 4ef1119606b3c138ff9594b3e0cf16de8077e28d | 2ac1b07d85deeb611600f5e64083c4eb0688fdb4 | refs/heads/master | 2020-06-13T04:23:15.492080 | 2020-03-31T10:34:30 | 2020-03-31T10:34:30 | 194,531,923 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/9/30 10:48
# @Author : yangmingming
# @Site :
# @File : fuzzy_learn.py
# @Software: PyCharm
| [
"[email protected]"
] | |
6753a86ca1666946b2512abe25045282bb3863fc | a110805b0e0cf26d1da8e6276ec6883ed4297752 | /SOLUCIONES/SOLUCIONES/intermedio I/ejemplos/fecha_hora_sistema.py | 5668b7246f9d8e1eddcdf6fb495eda480aa0f7b3 | [] | no_license | dayes/curso_Python | a1e77725bd8ab4c287589f15e36849817bcb39e8 | 352b0505a5e3d6f3310893b5c87d1eab31a2a66d | refs/heads/master | 2020-03-22T07:37:54.487944 | 2018-07-13T10:59:40 | 2018-07-13T10:59:40 | 139,713,481 | 0 | 0 | null | 2018-07-04T11:40:03 | 2018-07-04T11:34:55 | null | UTF-8 | Python | false | false | 273 | py | # fecha y hora del sistema:
import time
t = time.strftime("%H:%M:%S")
d = time.strftime("%d/%m/%y")
d2 = time.strftime("%d/%m/%Y")
dt = time.strftime("%d/%m/%Y %H:%M:%S")
print ("time: ", t)
print ("date: ", d)
print ("date: ", d2)
print ("datetime: ", dt)
| [
"[email protected]"
] | |
c6b54f026d0192e33cc07008b2f0144c230049c8 | 0f79fd61dc47fcafe22f83151c4cf5f2f013a992 | /BOJ/1753.py | 9ad0359bef96a50500ff44c3a43a2cb8f213596e | [] | no_license | sangm1n/problem-solving | 670e119f28b0f0e293dbc98fc8a1aea74ea465ab | bc03f8ea9a6a4af5d58f8c45c41e9f6923f55c62 | refs/heads/master | 2023-04-22T17:56:21.967766 | 2021-05-05T12:34:01 | 2021-05-05T12:34:01 | 282,863,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 881 | py | """
author : Lee Sang Min
github : https://github.com/sangm1n
e-mail : [email protected]
title : 최단경로
description : Shortest Path
"""
import heapq
def dijkstra(start, distance):
q = []
heapq.heappush(q, (0, start))
distance[start] = 0
while q:
weight, v = heapq.heappop(q)
if distance[v] >= weight:
for i, cost in graph[v]:
new_cost = weight + cost
if distance[i] > new_cost:
distance[i] = new_cost
heapq.heappush(q, (new_cost, i))
INF = int(1e9)
V, E = map(int, input().split())
start = int(input())
graph = [[] for _ in range(V + 1)]
for _ in range(E):
u, v, w = map(int, input().split())
graph[u].append((v, w))
distance = [INF] * (V + 1)
dijkstra(start, distance)
[print('INF' if dist == INF else dist) for dist in distance[1:]]
| [
"[email protected]"
] | |
8f49d60cc15b59556cdcb8cf63d51c39fdccb49e | bb680585552532aeaad4c297f68c6d87c29b70b3 | /checks/cs50/2017/x/greedy/checks.py | 0ad789000789772622adc3837c1603266050c84d | [] | no_license | mstrperson/check50 | 3eca1d2f67ca422d50313771953e903fd1d3636f | e615b8685c5aeb4c4015c86cd62cae9dc1c9bab5 | refs/heads/develop | 2021-01-01T06:14:33.906452 | 2017-07-18T20:07:02 | 2017-07-18T20:07:02 | 97,390,234 | 0 | 0 | null | 2017-07-18T18:54:13 | 2017-07-16T14:51:44 | Python | UTF-8 | Python | false | false | 2,133 | py | import os
import re
import sys
sys.path.append(os.getcwd())
from check50 import TestCase, Error, check
class Greedy(TestCase):
@check()
def exists(self):
"""greedy.c exists."""
super(Greedy, self).exists("greedy.c")
@check("exists")
def compiles(self):
"""greedy.c compiles."""
self.spawn("clang -o greedy greedy.c -lcs50").exit(0)
@check("compiles")
def test041(self):
"""input of 0.41 yields output of 4"""
self.spawn("./greedy").stdin("0.41").stdout("^4\n$", 4).exit(0)
@check("compiles")
def test001(self):
"""input of 0.01 yields output of 1"""
self.spawn("./greedy").stdin("0.01").stdout("^1\n$", 1).exit(0)
@check("compiles")
def test015(self):
"""input of 0.15 yields output of 2"""
self.spawn("./greedy").stdin("0.15").stdout("^2\n$", 2).exit(0)
@check("compiles")
def test160(self):
"""input of 1.6 yields output of 7"""
self.spawn("./greedy").stdin("1.6").stdout("^7\n$", 7).exit(0)
@check("compiles")
def test230(self):
"""input of 23 yields output of 92"""
self.spawn("./greedy").stdin("23").stdout("^92\n$", 92).exit(0)
@check("compiles")
def test420(self):
"""input of 4.2 yields output of 18"""
out = self.spawn("./greedy").stdin("4.2").stdout()
desired = "18"
if not re.compile("^18\n$").match(out):
if re.compile("^22\n$").match(out):
raise Error((out, desired), "Did you forget to round your input to the nearest cent?")
else:
raise Error((out, desired))
@check("compiles")
def test_reject_negative(self):
"""rejects a negative input like -.1"""
self.spawn("./greedy").stdin("-1").reject()
@check("compiles")
def test_reject_foo(self):
"""rejects a non-numeric input of "foo" """
self.spawn("./greedy").stdin("foo").reject()
@check("compiles")
def test_reject_empty(self):
"""rejects a non-numeric input of "" """
self.spawn("./greedy").stdin("").reject()
| [
"[email protected]"
] | |
56555304f5a4596878649f0fb0942c6ca5ceb0d2 | d14e79ff45ac22139df111c07fd130c2b37a1ad3 | /slide.py | 41a2425b464aae7f4b25d60152fdcd42e5a85a8d | [] | no_license | msarch/slide | 90127c19ed1c3fc940bec521e29fec04cf531ec4 | ec8d34124e43e1a5cde4290de5560e90a3d3f669 | refs/heads/master | 2021-01-19T18:41:13.309224 | 2017-04-28T19:41:23 | 2017-04-28T19:41:23 | 88,375,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,862 | py | #!/usr/bin/python
# -*- coding: iso-8859-1 -*-
# simple pyglet animation
# http://www.github.com/msarch/slide
import math
import pyglet
from pyglet.gl import *
DEG2RAD = 2* math.pi/360
OMEGA = 360.0 * 0.5 # angular velocity (rev/s) : 1/2 rev/s
ORIGIN = [1280/2,800/2,0] # x,y of screen center, rotation = 0
alpha = 0.0 # initial angle
vis = 1 # visibility switch
#---------------------------------- SKETCH ------------------------------------
class Sketch(pyglet.graphics.Group): # subclass with position/rotation ability
'''
'sketches' are regular pyglet graphics.Groups whom 'set_state' and
'unset_state' methods are used to add move and rotate functionnalities.
Adding a shape to a group (batch.add) returns the matching vertex list,
color and vertex position are accessible through .colors and .vertices
'''
def __init__(self,pos=ORIGIN):
super(Sketch, self).__init__()
self.pos=pos
def set_state(self):
glPushMatrix()
glTranslatef(self.pos[0], self.pos[1], 0)
glRotatef(self.pos[2], 0, 0, 1) # rot. in degrees; x,y,z of rot. axis
def unset_state(self):
glPopMatrix()
# vertex_list modifier function -----------------------------------------------
def translate(vtx,pos): # modifying a list of vertices at once to new pos
return(reduce(tuple.__add__, zip([x+pos[0] for x in vtx[0::2]],
[y+pos[1] for y in vtx[1::2]])))
#--------------------------------- PYGLET STUFF -------------------------------
batch = pyglet.graphics.Batch() # holds all graphics
canvas = pyglet.window.Window(fullscreen=True)
canvas.set_mouse_visible(False)
glEnable(GL_LINE_SMOOTH)
glHint(GL_LINE_SMOOTH_HINT, GL_DONT_CARE)
glEnable(GL_BLEND) # transparency
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) # transparency
black =( 0, 0, 0, 255)
glClearColor(*black) # background color
@canvas.event
def on_key_press(symbol, modifiers):
global vis
if symbol == pyglet.window.key.I:
vis=not(vis) # visibility switch
toggle(vis)
else: pyglet.app.exit()
@canvas.event
def draw():
canvas.clear()
batch.draw() # ALL graphics are added to this single batch!
def update(dt): # updates an uniform circular motion then calls custom actions
global alpha
alpha+= dt * OMEGA % 360 # stay within [0,360°]
updates(dt)
draw()
def toggle(vis):
for e in [vr, hr]: e.colors = (200,200,200,255*vis)*5
dot.colors = (255,0,0,255*vis)*5
#-------------------------------- SCENE STUFF ---------------------------------
still = Sketch() # is a still sketch, 'default'
wheel = Sketch() # is revolving, 'default'
hslide = Sketch() # sliding horizontally
vslide = Sketch() # slides verticaly
# dot -------------------------------------------------------------------------
# dot, transparency toggled when key 'I' pressed
dot=batch.add(5, pyglet.gl.GL_LINE_STRIP, wheel,'v2i/static', 'c4B/static')
dot.colors = (255,0,0,255*vis)*5 # vertex list color data, rgba format
dot.vertices = translate([-3, 0, 3, 0, 0, 0, 0, 3, 0, -3], (400-10,0))
# recs ------------------------------------------------------------------------
def rec(w=100, h=100, color=(255,255,255,255), pos=ORIGIN, sk=still):
rec=batch.add(6, pyglet.gl.GL_TRIANGLES, sk, 'v2f/static', 'c4B/static')
rec.colors = color*6
rec.vertices = translate((0,0,0,h,w,h,w,h,w,0,0,0), pos)
return(rec) # batch.add() returns a vertex_list
gu = int(800/85) # overall drawing V size is 85 gu and just fits into screen
len, wth, thk = 33 * gu, 11 * gu, 6 * gu # proportions of the kapla block
white = (255, 255, 255, 255)
# four + 1 horizontal rects
r1 = rec(w=len, h=thk, color=white, pos=(wth/2 + thk, wth/2))
r2 = rec(w=len, h=thk, color=white, pos=(wth/2 + thk, -wth/2-thk))
r3 = rec(w=len, h=thk, color=white, pos=(-len-thk-wth/2, wth/2))
r4 = rec(w=len, h=thk, color=white, pos=(-len-thk-wth/2, -wth/2-thk))
s1 = rec(w=len, h=wth, color=white, pos=(-len/2, -wth/2, 0, 0), sk=hslide)
# four vertical rects
r5 = rec(w=thk, h=len, color=white, pos=(wth/2, wth/2+thk))
r6 = rec(w=thk, h=len, color=white, pos=(-wth/2-thk, wth/2+thk))
r7 = rec(w=thk, h=len, color=white, pos=(wth/2, -len-thk-wth/2))
r8 = rec(w=thk, h=len, color=white, pos=(-wth/2 - thk, -len-thk-wth/2))
s2 = rec(w=wth, h=len, color=white, pos=(-wth/2, -len/2, 0.1, 0), sk=vslide)
# large rec, transparency toggled when key 'I' pressed
vr=batch.add(5, pyglet.gl.GL_LINE_STRIP, vslide, 'v2f/static', 'c4B/static')
vr.colors = (200,200,200,255*vis)*5 # vis = true/false visibility switch
vr.vertices = (-640,-len/2,640,-len/2,640,len/2,-640,len/2,-640, -len/2)
# large rec, transparency toggled when key 'I' pressed
hr=batch.add(5, pyglet.gl.GL_LINE_STRIP, hslide, 'v2f/static', 'c4B/static')
hr.colors = (200,200,200,255*vis)*5 # vis = true/false visibility switch
hr.vertices = (-len/2,-400,len/2,-400,len/2,400,-len/2,400,-len/2, -400)
# updates ---------------------------------------------------------------------
from itertools import cycle
previous_hdir, previous_vdir = 1, 1
BOW = pyglet.media.load('bow.wav', streaming=False)
BOW1 = pyglet.media.load('bow1.wav', streaming=False)
# kapla_colors
redk =(255, 69, 0, 255) # red kapla
bluk =( 0, 0, 140, 255) # blue kapla
grnk =( 0, 99, 0, 255) # green kapla
yelk =(255, 214, 0, 255) # yellow kapla
target_h = cycle((r2,r1,r3,r4,s1)) # color change toggled by hslide movmnt
target_v = cycle((r5,r6,r8,r7,s2)) # color change toggled by vslide movmnt
h_color = cycle((redk, grnk, bluk, yelk)) # color choice for target_h
v_color = cycle((yelk, bluk, grnk, redk)) # color choice for target_v
def updates(dt):
global previous_hdir, previous_vdir
# wheel is rotating
wheel.pos = [wheel.pos[0],wheel.pos[1], alpha]
# hslide is oscillating
# if direction changes, target_h colors cycle, sound played
cosa = math.cos(alpha*DEG2RAD)
previous_h_pos = hslide.pos[0]
hslide.pos = [640+cosa*(640-len/2), hslide.pos[1], 0]
new_hdir = cmp( previous_h_pos, hslide.pos[0])
if new_hdir + previous_hdir == 0:
BOW.play()
target_h.next().colors = h_color.next()*6
previous_hdir=new_hdir
# vslide is oscillating
# if direction changes, target_v colors cycle, sound played
sina = math.sin(alpha*DEG2RAD)
previous_vslide_pos1 = vslide.pos[1]
vslide.pos = [vslide.pos[0], 400+sina*(400-len/2), 0]
new_vdir = cmp( previous_vslide_pos1, vslide.pos[1])
if new_vdir + previous_vdir == 0:
BOW1.play()
target_v.next().colors = v_color.next()*6
previous_vdir=new_vdir
#---------------------------------- MAIN --------------------------------------
if __name__ == "__main__":
pyglet.clock.schedule_interval(update, 1.0/60)
pyglet.app.run()
| [
"[email protected]"
] | |
72b25c0a76850bdcae1bfe7848dc4a64f1b3ddc7 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_96/762.py | 2083c366cc527bd043cb724883b8d032aaa22ed4 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,937 | py | import math
def dfs(N,S,P,t):
# print t
q=[]
q.append((S,-1,0))
maxV=-1
while(len(q)!=0):
#print q
c=q.pop()
#print c
if c[1]==N-1:
if c[0]==0:
maxV=max(maxV,c[2])
continue
shifts=[-2,-1,0,1,2]
cTop=t[c[1]+1]
#print cTop
p1s=cTop/3
pshifts=[-1,0,1]
for a in pshifts:
p1=p1s+a
if p1<0 or p1>10:
continue
for x in shifts:
p2=p1s+x
p3=cTop-p1-p2
if p2<0 or p2>10 or p3<0 or p3>10:
continue
delta=0
if p1>=P or p2>=P or p3>=P :
delta=1
if math.fabs(p3-p1)<=1 and math.fabs(p3-p2)<=1 and math.fabs(p1-p2)<=1:
# print 't',p1,p2,p3
q.append((c[0],c[1]+1,c[2]+delta))
elif math.fabs(p3-p1)<=2 and math.fabs(p3-p2)<=2 and math.fabs(p1-p2)<=2 and c[0]>0:
# print 't',p1,p2,p3
q.append((c[0]-1,c[1]+1,c[2]+delta))
# elif math.fabs(p3-p1)<=2 and math.fabs(p3-p2)==2and c[0]>0:
# q.append((c[0]-1,c[1]+1,c[2]+delta))
return maxV
f = open('B-small-attempt0.in', 'r')
T=int(f.readline())
totalOut=''
for i in range(1,T+1):
S= f.readline()
if (('\n') in S):
S=S[:-1]
lst=S.split(' ')
N=int(lst[0])
S=int(lst[1])
P=int(lst[2])
lst=lst[3:]
t=map(int,lst)
totalOut+= 'Case #'+str(i)+': '+str(dfs(N,S,P,t))+'\n'
totalOut=totalOut[:-1]
#print totalOut
outD= open ('B-small-attempt0.out','w')
outD.write(totalOut) | [
"[email protected]"
] | |
bec411eed48f5799289ce5fcb8295f2fd1cc9286 | b05a07bbb0c509211d67e2afe79707f823fb8789 | /train_distillation.py | 71fb749c78bcfbc126972075c4c3b2ee7dcdc027 | [] | no_license | MLDL/SKD | 9138a5c7ee8276fe33efd34a3780767211172698 | 90790ee20bd183662fe01ee05e28100d1b56d50c | refs/heads/master | 2022-10-15T20:36:12.669840 | 2020-06-18T00:45:33 | 2020-06-18T00:45:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,802 | py | """
the general training framework
"""
from __future__ import print_function
import os
import argparse
import socket
import time
import sys
from tqdm import tqdm
import mkl
import torch
import torch.optim as optim
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
import torch.nn.functional as F
from models import model_pool
from models.util import create_model, get_teacher_name
from distill.util import Embed
from distill.criterion import DistillKL, NCELoss, Attention, HintLoss
from dataset.mini_imagenet import ImageNet, MetaImageNet
from dataset.tiered_imagenet import TieredImageNet, MetaTieredImageNet
from dataset.cifar import CIFAR100, MetaCIFAR100
from dataset.transform_cfg import transforms_options, transforms_list
from util import adjust_learning_rate, accuracy, AverageMeter
from eval.meta_eval import meta_test, meta_test_tune
from eval.cls_eval import validate
from models.resnet import resnet12
import numpy as np
from util import Logger
import wandb
from dataloader import get_dataloaders
import copy
def get_freer_gpu():
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')
memory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]
return np.argmax(memory_available)
os.environ["CUDA_VISIBLE_DEVICES"]=str(get_freer_gpu())
# os.environ['OPENBLAS_NUM_THREADS'] = '4'
mkl.set_num_threads(2)
class Wrapper(nn.Module):
def __init__(self, model, args):
super(Wrapper, self).__init__()
self.model = model
self.feat = torch.nn.Sequential(*list(self.model.children())[:-2])
self.last = torch.nn.Linear(list(self.model.children())[-2].in_features, 64)
def forward(self, images):
feat = self.feat(images)
feat = feat.view(images.size(0), -1)
out = self.last(feat)
return feat, out
def parse_option():
parser = argparse.ArgumentParser('argument for training')
parser.add_argument('--eval_freq', type=int, default=10, help='meta-eval frequency')
parser.add_argument('--print_freq', type=int, default=100, help='print frequency')
parser.add_argument('--tb_freq', type=int, default=500, help='tb frequency')
parser.add_argument('--save_freq', type=int, default=10, help='save frequency')
parser.add_argument('--batch_size', type=int, default=64, help='batch_size')
parser.add_argument('--num_workers', type=int, default=8, help='num of workers to use')
parser.add_argument('--epochs', type=int, default=100, help='number of training epochs')
# optimization
parser.add_argument('--learning_rate', type=float, default=0.001, help='learning rate')
parser.add_argument('--lr_decay_epochs', type=str, default='60,80', help='where to decay lr, can be a list')
parser.add_argument('--lr_decay_rate', type=float, default=0.1, help='decay rate for learning rate')
parser.add_argument('--weight_decay', type=float, default=5e-4, help='weight decay')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
# dataset and model
parser.add_argument('--model_s', type=str, default='resnet12', choices=model_pool)
parser.add_argument('--model_t', type=str, default='resnet12', choices=model_pool)
parser.add_argument('--dataset', type=str, default='miniImageNet', choices=['miniImageNet', 'tieredImageNet',
'CIFAR-FS', 'FC100'])
parser.add_argument('--simclr', type=bool, default=False, help='use simple contrastive learning representation')
parser.add_argument('--ssl', type=bool, default=True, help='use self supervised learning')
parser.add_argument('--tags', type=str, default="gen1, ssl", help='add tags for the experiment')
parser.add_argument('--transform', type=str, default='A', choices=transforms_list)
# path to teacher model
parser.add_argument('--path_t', type=str, default="", help='teacher model snapshot')
# distillation
parser.add_argument('--distill', type=str, default='kd', choices=['kd', 'contrast', 'hint', 'attention'])
parser.add_argument('--trial', type=str, default='1', help='trial id')
parser.add_argument('-r', '--gamma', type=float, default=1, help='weight for classification')
parser.add_argument('-a', '--alpha', type=float, default=0, help='weight balance for KD')
parser.add_argument('-b', '--beta', type=float, default=0, help='weight balance for other losses')
# KL distillation
parser.add_argument('--kd_T', type=float, default=2, help='temperature for KD distillation')
# NCE distillation
parser.add_argument('--feat_dim', default=128, type=int, help='feature dimension')
parser.add_argument('--nce_k', default=16384, type=int, help='number of negative samples for NCE')
parser.add_argument('--nce_t', default=0.07, type=float, help='temperature parameter for softmax')
parser.add_argument('--nce_m', default=0.5, type=float, help='momentum for non-parametric updates')
# cosine annealing
parser.add_argument('--cosine', action='store_true', help='using cosine annealing')
# specify folder
parser.add_argument('--model_path', type=str, default='save/', help='path to save model')
parser.add_argument('--tb_path', type=str, default='tb/', help='path to tensorboard')
parser.add_argument('--data_root', type=str, default='/raid/data/IncrementLearn/imagenet/Datasets/MiniImagenet/', help='path to data root')
# setting for meta-learning
parser.add_argument('--n_test_runs', type=int, default=600, metavar='N',
help='Number of test runs')
parser.add_argument('--n_ways', type=int, default=5, metavar='N',
help='Number of classes for doing each classification run')
parser.add_argument('--n_shots', type=int, default=1, metavar='N',
help='Number of shots in test')
parser.add_argument('--n_queries', type=int, default=15, metavar='N',
help='Number of query in test')
parser.add_argument('--n_aug_support_samples', default=5, type=int,
help='The number of augmented samples for each meta test sample')
parser.add_argument('--test_batch_size', type=int, default=1, metavar='test_batch_size',
help='Size of test batch)')
opt = parser.parse_args()
if opt.dataset == 'CIFAR-FS' or opt.dataset == 'FC100':
opt.transform = 'D'
if 'trainval' in opt.path_t:
opt.use_trainval = True
else:
opt.use_trainval = False
if opt.use_trainval:
opt.trial = opt.trial + '_trainval'
# set the path according to the environment
if not opt.model_path:
opt.model_path = './models_distilled'
if not opt.tb_path:
opt.tb_path = './tensorboard'
if not opt.data_root:
opt.data_root = './data/{}'.format(opt.dataset)
else:
opt.data_root = '{}/{}'.format(opt.data_root, opt.dataset)
opt.data_aug = True
tags = opt.tags.split(',')
opt.tags = list([])
for it in tags:
opt.tags.append(it)
iterations = opt.lr_decay_epochs.split(',')
opt.lr_decay_epochs = list([])
for it in iterations:
opt.lr_decay_epochs.append(int(it))
opt.model_name = 'S:{}_T:{}_{}_{}_r:{}_a:{}_b:{}_trans_{}'.format(opt.model_s, opt.model_t, opt.dataset,
opt.distill, opt.gamma, opt.alpha, opt.beta,
opt.transform)
if opt.cosine:
opt.model_name = '{}_cosine'.format(opt.model_name)
opt.model_name = '{}_{}'.format(opt.model_name, opt.trial)
opt.tb_folder = os.path.join(opt.tb_path, opt.model_name)
if not os.path.isdir(opt.tb_folder):
os.makedirs(opt.tb_folder)
opt.save_folder = os.path.join(opt.model_path, opt.model_name)
if not os.path.isdir(opt.save_folder):
os.makedirs(opt.save_folder)
#extras
opt.fresh_start = True
return opt
def load_teacher(model_path, model_name, n_cls, dataset='miniImageNet'):
"""load the teacher model"""
print('==> loading teacher model')
print(model_name)
model = create_model(model_name, n_cls, dataset)
model.load_state_dict(torch.load(model_path)['model'])
print('==> done')
return model
def main():
best_acc = 0
opt = parse_option()
wandb.init(project=opt.model_path.split("/")[-1], tags=opt.tags)
wandb.config.update(opt)
wandb.save('*.py')
wandb.run.save()
# dataloader
train_loader, val_loader, meta_testloader, meta_valloader, n_cls = get_dataloaders(opt)
# model
model_t = []
if("," in opt.path_t):
for path in opt.path_t.split(","):
model_t.append(load_teacher(path, opt.model_t, n_cls, opt.dataset))
else:
model_t.append(load_teacher(opt.path_t, opt.model_t, n_cls, opt.dataset))
# model_s = create_model(opt.model_s, n_cls, opt.dataset, dropout=0.4)
# model_s = Wrapper(model_, opt)
model_s = copy.deepcopy(model_t[0])
criterion_cls = nn.CrossEntropyLoss()
criterion_div = DistillKL(opt.kd_T)
criterion_kd = DistillKL(opt.kd_T)
optimizer = optim.SGD(model_s.parameters(),
lr=opt.learning_rate,
momentum=opt.momentum,
weight_decay=opt.weight_decay)
if torch.cuda.is_available():
for m in model_t:
m.cuda()
model_s.cuda()
criterion_cls = criterion_cls.cuda()
criterion_div = criterion_div.cuda()
criterion_kd = criterion_kd.cuda()
cudnn.benchmark = True
meta_test_acc = 0
meta_test_std = 0
# routine: supervised model distillation
for epoch in range(1, opt.epochs + 1):
if opt.cosine:
scheduler.step()
else:
adjust_learning_rate(epoch, opt, optimizer)
print("==> training...")
time1 = time.time()
train_acc, train_loss = train(epoch, train_loader, model_s, model_t , criterion_cls, criterion_div, criterion_kd, optimizer, opt)
time2 = time.time()
print('epoch {}, total time {:.2f}'.format(epoch, time2 - time1))
val_acc = 0
val_loss = 0
meta_val_acc = 0
meta_val_std = 0
# val_acc, val_acc_top5, val_loss = validate(val_loader, model_s, criterion_cls, opt)
# #evaluate
# start = time.time()
# meta_val_acc, meta_val_std = meta_test(model_s, meta_valloader)
# test_time = time.time() - start
# print('Meta Val Acc: {:.4f}, Meta Val std: {:.4f}, Time: {:.1f}'.format(meta_val_acc, meta_val_std, test_time))
#evaluate
start = time.time()
meta_test_acc, meta_test_std = meta_test(model_s, meta_testloader, use_logit=False)
test_time = time.time() - start
print('Meta Test Acc: {:.4f}, Meta Test std: {:.4f}, Time: {:.1f}'.format(meta_test_acc, meta_test_std, test_time))
# regular saving
if epoch % opt.save_freq == 0 or epoch==opt.epochs:
print('==> Saving...')
state = {
'epoch': epoch,
'model': model_s.state_dict(),
}
save_file = os.path.join(opt.save_folder, 'model_'+str(wandb.run.name)+'.pth')
torch.save(state, save_file)
#wandb saving
torch.save(state, os.path.join(wandb.run.dir, "model.pth"))
wandb.log({'epoch': epoch,
'Train Acc': train_acc,
'Train Loss':train_loss,
'Val Acc': val_acc,
'Val Loss':val_loss,
'Meta Test Acc': meta_test_acc,
'Meta Test std': meta_test_std,
'Meta Val Acc': meta_val_acc,
'Meta Val std': meta_val_std
})
#final report
generate_final_report(model_s, opt, wandb)
#remove output.txt log file
output_log_file = os.path.join(wandb.run.dir, "output.log")
if os.path.isfile(output_log_file):
os.remove(output_log_file)
else: ## Show an error ##
print("Error: %s file not found" % output_log_file)
def train(epoch, train_loader, model_s, model_t , criterion_cls, criterion_div, criterion_kd, optimizer, opt):
"""One epoch training"""
model_s.train()
for m in model_t:
m.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
with tqdm(train_loader, total=len(train_loader)) as pbar:
for idx, data in enumerate(pbar):
inputs, targets, _ = data
data_time.update(time.time() - end)
inputs = inputs.float()
if torch.cuda.is_available():
inputs = inputs.cuda()
targets = targets.cuda()
batch_size = inputs.size()[0]
x = inputs
x_90 = x.transpose(2,3).flip(2)
# x_180 = x.flip(2).flip(3)
# x_270 = x.flip(2).transpose(2,3)
# inputs_aug = torch.cat((x_90, x_180, x_270),0)
# sampled_inputs = inputs_aug[torch.randperm(3*batch_size)[:batch_size]]
inputs_all = torch.cat((x, x_180, x_90, x_270),0)
# ===================forward=====================
with torch.no_grad():
(_,_,_,_, feat_t), (logit_t, rot_t) = model_t[0](inputs_all[:batch_size], rot=True)
(_,_,_,_, feat_s_all), (logit_s_all, rot_s_all) = model_s(inputs_all[:4*batch_size], rot=True)
loss_div = criterion_div(logit_s_all[:batch_size], logit_t[:batch_size])
d_90 = logit_s_all[batch_size:2*batch_size] - logit_s_all[:batch_size]
loss_a = torch.mean(torch.sqrt(torch.sum((d_90)**2, dim=1)))
# d_180 = logit_s_all[2*batch_size:3*batch_size] - logit_s_all[:batch_size]
# loss_a += torch.mean(torch.sqrt(torch.sum((d_180)**2, dim=1)))
# d_270 = logit_s_all[3*batch_size:4*batch_size] - logit_s_all[:batch_size]
# loss_a += torch.mean(torch.sqrt(torch.sum((d_270)**2, dim=1)))
if(torch.isnan(loss_a).any()):
break
else:
loss = loss_div + opt.gamma*loss_a / 3
acc1, acc5 = accuracy(logit_s_all[:batch_size], targets, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
top1.update(acc1[0], inputs.size(0))
top5.update(acc5[0], inputs.size(0))
# ===================backward=====================
optimizer.zero_grad()
loss.backward()
optimizer.step()
# ===================meters=====================
batch_time.update(time.time() - end)
end = time.time()
pbar.set_postfix({"Acc@1":'{0:.2f}'.format(top1.avg.cpu().numpy()),
"Acc@5":'{0:.2f}'.format(top5.avg.cpu().numpy(),2),
"Loss" :'{0:.2f}'.format(losses.avg,2),
})
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg, losses.avg
def generate_final_report(model, opt, wandb):
opt.n_shots = 1
train_loader, val_loader, meta_testloader, meta_valloader, _ = get_dataloaders(opt)
#validate
meta_val_acc, meta_val_std = meta_test(model, meta_valloader, use_logit=True)
meta_val_acc_feat, meta_val_std_feat = meta_test(model, meta_valloader, use_logit=False)
#evaluate
meta_test_acc, meta_test_std = meta_test(model, meta_testloader, use_logit=True)
meta_test_acc_feat, meta_test_std_feat = meta_test(model, meta_testloader, use_logit=False)
print('Meta Val Acc : {:.4f}, Meta Val std: {:.4f}'.format(meta_val_acc, meta_val_std))
print('Meta Val Acc (feat): {:.4f}, Meta Val std (feat): {:.4f}'.format(meta_val_acc_feat, meta_val_std_feat))
print('Meta Test Acc: {:.4f}, Meta Test std: {:.4f}'.format(meta_test_acc, meta_test_std))
print('Meta Test Acc (feat): {:.4f}, Meta Test std (feat): {:.4f}'.format(meta_test_acc_feat, meta_test_std_feat))
wandb.log({'Final Meta Test Acc @1': meta_test_acc,
'Final Meta Test std @1': meta_test_std,
'Final Meta Test Acc (feat) @1': meta_test_acc_feat,
'Final Meta Test std (feat) @1': meta_test_std_feat,
'Final Meta Val Acc @1': meta_val_acc,
'Final Meta Val std @1': meta_val_std,
'Final Meta Val Acc (feat) @1': meta_val_acc_feat,
'Final Meta Val std (feat) @1': meta_val_std_feat
})
opt.n_shots = 5
train_loader, val_loader, meta_testloader, meta_valloader, _ = get_dataloaders(opt)
#validate
meta_val_acc, meta_val_std = meta_test(model, meta_valloader, use_logit=True)
meta_val_acc_feat, meta_val_std_feat = meta_test(model, meta_valloader, use_logit=False)
#evaluate
meta_test_acc, meta_test_std = meta_test(model, meta_testloader, use_logit=True)
meta_test_acc_feat, meta_test_std_feat = meta_test(model, meta_testloader, use_logit=False)
print('Meta Val Acc : {:.4f}, Meta Val std: {:.4f}'.format(meta_val_acc, meta_val_std))
print('Meta Val Acc (feat): {:.4f}, Meta Val std (feat): {:.4f}'.format(meta_val_acc_feat, meta_val_std_feat))
print('Meta Test Acc: {:.4f}, Meta Test std: {:.4f}'.format(meta_test_acc, meta_test_std))
print('Meta Test Acc (feat): {:.4f}, Meta Test std (feat): {:.4f}'.format(meta_test_acc_feat, meta_test_std_feat))
wandb.log({'Final Meta Test Acc @5': meta_test_acc,
'Final Meta Test std @5': meta_test_std,
'Final Meta Test Acc (feat) @5': meta_test_acc_feat,
'Final Meta Test std (feat) @5': meta_test_std_feat,
'Final Meta Val Acc @5': meta_val_acc,
'Final Meta Val std @5': meta_val_std,
'Final Meta Val Acc (feat) @5': meta_val_acc_feat,
'Final Meta Val std (feat) @5': meta_val_std_feat
})
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
1a7ef9178fd407f25a697c82abad53c9092aff20 | e64c3c051f6b70aa0bab5be3508448578b1e0b15 | /core/orienteer/models/attitude/__init__.py | f338c8ab793aec114ca2cc88b491c2a28c974f30 | [] | no_license | davenquinn/Orienteer | c85c6cd16145ef545279c38bdb466856c02abd9d | 4f77575c12cf7a04ce70e3045630079ab8ebc5e0 | refs/heads/master | 2022-07-10T20:39:36.414259 | 2022-06-15T18:56:38 | 2022-06-15T18:56:38 | 172,312,115 | 0 | 0 | null | 2022-06-15T18:56:39 | 2019-02-24T08:09:26 | CoffeeScript | UTF-8 | Python | false | false | 8,078 | py | from __future__ import division
import numpy as N
from shapely.geometry import mapping
from sqlalchemy import func, select, CheckConstraint
from sqlalchemy.ext.associationproxy import association_proxy
import logging as log
from geoalchemy2.types import Geometry
from geoalchemy2.shape import from_shape, to_shape
from attitude.orientation import Orientation
from attitude.coordinates import centered
from attitude.error.axes import sampling_axes, noise_axes, angular_errors
from sqlalchemy.dialects.postgresql import array, ARRAY
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import relationship
from sqlalchemy import (
Column,
String,
Text,
Integer,
DateTime,
ForeignKey,
Boolean,
Float,
)
from .tag import Tag, attitude_tag
from ..feature import DatasetFeature, SRID
from ...database import db
from ..base import BaseModel
class Attitude(BaseModel):
__tablename__ = "attitude"
__mapper_args__ = dict(polymorphic_on="type", polymorphic_identity="single")
id = Column(Integer, primary_key=True)
type = Column(String)
feature_id = Column(
Integer,
ForeignKey("dataset_feature.id", ondelete="CASCADE", onupdate="CASCADE"),
)
feature = relationship(DatasetFeature)
strike = Column(Float)
dip = Column(Float)
rake = Column(Float)
correlation_coefficient = Column(Float)
principal_axes = Column(ARRAY(Float, dimensions=2, zero_indexes=True))
singular_values = Column(ARRAY(Float, zero_indexes=True))
hyperbolic_axes = Column(ARRAY(Float, zero_indexes=True))
n_samples = Column(Integer)
max_angular_error = Column(Float)
min_angular_error = Column(Float)
geometry = association_proxy("feature", "geometry")
center = Column(Geometry("POINTZ", srid=SRID))
valid = Column(Boolean)
member_of = Column(Integer, ForeignKey("attitude.id"))
group = relationship("AttitudeGroup", back_populates="measurements", remote_side=id)
_tags = relationship("Tag", secondary=attitude_tag, backref="features")
tags = association_proxy("_tags", "name")
__table_args__ = (
# Check that we don't define group membership and feature
# if isn't a group.
CheckConstraint("feature_id IS NOT NULL = (type = 'single')"),
# Groups should not be members of other groups
CheckConstraint(
"type IN ('group','collection') = (member_of IS NULL AND feature_id IS NULL)"
),
)
@property
def aligned_array(self):
"""
Array aligned with the principal components
of the orientation measurement.
"""
return N.array(self.feature.axis_aligned)
def error_ellipse(self):
from .plot import error_ellipse
return error_ellipse(self)
def plot_aligned(self):
from attitude.display import plot_aligned
return plot_aligned(self.pca())
@property
def array(self):
return self.feature.array
@property
def centered_array(self):
return centered(self.array)
def regress(self):
return self.pca
def pca(self):
"""
Initialize a principal components
analysis against the attitude.
"""
try:
return self.__pca
except AttributeError:
a = self.centered_array
ax = N.array(self.principal_axes) * N.array(self.singular_values)
self.__pca = Orientation(a, axes=ax)
return self.__pca
def __repr__(self):
def val(obj, s):
try:
return s.format(obj)
except ValueError:
return "unmeasured"
except TypeError:
return "unmeasured"
s = "{cls} {id}: strike {s}, dip {d}".format(
cls=self.__class__.__name__,
id=self.id,
s=val(self.strike, "{0:.1f}"),
d=val(self.dip, "{0:.1f}"),
)
return s
def serialize(self):
return dict(
type="Feature",
id=self.id,
tags=list(self.tags),
geometry=mapping(to_shape(self.feature.geometry)),
properties=dict(
r=self.correlation_coefficient,
center=mapping(to_shape(self.center)),
strike=self.strike,
dip=self.dip,
rake=self.rake,
n_samples=self.n_samples,
hyperbolic_axes=self.hyperbolic_axes,
axes=self.principal_axes,
),
)
def calculate(self):
self.center = func.ST_SetSRID(func.ST_MakePoint(*self.array.mean(axis=0)), SRID)
try:
pca = Orientation(self.centered_array)
except IndexError:
# If there aren't enough coordinates
return
except ValueError:
return
self.principal_axes = pca.axes.tolist()
self.singular_values = pca.singular_values.tolist()
# Really this is hyperbolic axis lengths
# should change API to reflect this distinction
self.hyperbolic_axes = sampling_axes(pca).tolist()
self.n_samples = pca.n
self.strike, self.dip, self.rake = pca.strike_dip_rake()
if self.dip == 90:
self.valid = False
a = angular_errors(self.hyperbolic_axes)
self.min_angular_error = 2 * N.degrees(a[0])
self.max_angular_error = 2 * N.degrees(a[1])
# Analogous to correlation coefficient for PCA
# but not exactly the same
self.correlation_coefficient = pca.explained_variance
def extract(self, *args, **kwargs):
self.feature.extract(*args, **kwargs)
def __str__(self):
return "Attitude {}".format(self.id)
class AttitudeGroup(Attitude):
__mapper_args__ = dict(polymorphic_on="type", polymorphic_identity="group")
same_plane = Column(Boolean, nullable=False, default=False, server_default="0")
measurements = relationship(Attitude)
def __init__(self, attitudes, **kwargs):
self.type = "group"
self.feature_id = None
self.member_of = None
self.measurements = attitudes
Attitude.__init__(self, **kwargs)
self.calculate()
def __str__(self):
return "Group {}".format(self.id)
# Add a property for geometry that creates a union
# of all component data
def __build_geometry(self):
"""
Un-executed query to find geometry from component
parts
"""
__ = func.ST_Union(DatasetFeature.geometry)
return (
select([func.ST_SetSrid(__, srid.world)])
.select_from(DatasetFeature.__table__.join(Attitude))
.where(Attitude.member_of == self.id)
.group_by(Attitude.member_of)
)
@hybrid_property
def geometry(self):
return db.session.execute(self.__build_geometry()).scalar()
@geometry.expression
def geometry(cls):
return __build_geometry(cls)
@property
def centered_array(self):
if self.same_plane:
a = "array"
else:
a = "centered_array"
arrays = [getattr(m, a) for m in self.measurements]
if len(arrays) == 0:
return N.array([])
arr = N.concatenate(arrays)
if self.same_plane:
return centered(arr)
else:
return arr
@property
def array(self):
return N.concatenate([m.array for m in self.measurements])
def serialize(self):
return dict(
type="GroupedAttitude",
id=self.id,
strike=self.strike,
dip=self.dip,
tags=list(self.tags),
same_plane=self.same_plane,
r=self.correlation_coefficient,
n_samples=self.n_samples,
hyperbolic_axes=self.hyperbolic_axes,
axes=self.principal_axes,
measurements=[m.id for m in self.measurements],
)
| [
"[email protected]"
] | |
cb05c2490be60fcbc72afce15389303ae36f68d2 | 5f0f0865b7e4e2aa1867a88c138df56936c0b23b | /blocks/tests/test_pylearn2.py | 6317698fe0dd1326830e4961c5d482d72d3399e1 | [
"MIT"
] | permissive | jych/blocks | 2c709dcf042f4259981adcb54d9e3a48dac0c87f | 995cb7b67545b272877ecf9e90285cc71c9e6091 | refs/heads/master | 2021-01-09T06:51:34.967301 | 2014-11-27T04:12:40 | 2014-11-27T04:12:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,329 | py | import logging
import numpy
import pylearn2
from pylearn2.space import VectorSpace
from pylearn2.testing.datasets import random_dense_design_matrix
from pylearn2.train import Train
from pylearn2.training_algorithms.sgd import SGD
from blocks.bricks import Sigmoid, MLP
from blocks.cost import SquaredError
from blocks.initialization import IsotropicGaussian, Constant
from blocks.pylearn2 import BlocksModel, BlocksCost
def test_pylearn2_trainin():
# Construct the model
mlp = MLP(activations=[Sigmoid(), Sigmoid()], dims=[784, 100, 784],
weights_init=IsotropicGaussian(), biases_init=Constant(0.01))
mlp.initialize()
cost = SquaredError()
block_cost = BlocksCost(cost)
block_model = BlocksModel(mlp, (VectorSpace(dim=784), 'features'))
# Load the data
rng = numpy.random.RandomState(14)
train_dataset = random_dense_design_matrix(rng, 1024, 784, 10)
valid_dataset = random_dense_design_matrix(rng, 1024, 784, 10)
# Silence Pylearn2's logger
logger = logging.getLogger(pylearn2.__name__)
logger.setLevel(logging.ERROR)
# Training algorithm
sgd = SGD(learning_rate=0.01, cost=block_cost, batch_size=128,
monitoring_dataset=valid_dataset)
train = Train(train_dataset, block_model, algorithm=sgd)
train.main_loop(time_budget=3)
| [
"[email protected]"
] | |
7c4bb7211655df9f643a3c3968ccecb8f9c5c2bd | 700b0528e949d7eacb6846ee7579e912b854fd51 | /TrustPot/settings.py | f56c0223ba3afd1780dabd7e621d0a5a53a18360 | [] | no_license | nucleoosystem/TrustPot | 340f33a46757fa6dbd98eae6be248f14ea8ca099 | 5e72bffdcd0f4232e98638387c794e7aaf684c9c | refs/heads/master | 2020-06-18T17:47:40.154449 | 2015-02-18T03:27:43 | 2015-02-18T03:27:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,268 | py | """
Django settings for TrustPot project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9ni!$$2aw4)il(+6xb2xzmboivmza(1(hugs!$^h(r!7bg(5$c'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'suit',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'reversion',
'translation'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS += (
"django.core.context_processors.request",
)
ROOT_URLCONF = 'TrustPot.urls'
WSGI_APPLICATION = 'TrustPot.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_ROOT = 'static/'
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
b561709b47b0de856671dd1a8fa6d77dd686e849 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5690574640250880_0/Python/EnTerr/MinesweeperMaster.py | 889a6773a64d627ce18e10efa7cc9d0accbd413b | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 3,313 | py | #
# Google Code Jam 2014
# Roaund 0: C. Minesweeper Master
# submission by EnTerr
#
'''
Input
The first line of the input gives the number of test cases, T. T lines follow.
Each line contains three space-separated integers: R, C, and M (Rows, Columns, Mines).
Output
For each test case, output a line containing "Case #x:", where x is the test case number.
On the following R lines, output the board configuration with C characters per line,
using '.' to represent an empty cell, '*' to represent a cell that contains a mine,
and 'c' to represent the clicked cell. If there is no possible configuration,
then instead of the grid, output a line with "Impossible" instead.
If there are multiple possible configurations, output any one of them.
Limits
0 <= M < R * C.
Small dataset
1 <= T <= 230.
1 <= R, C <= 5.
Large dataset
1 <= T <= 140.
1 <= R, C <= 50.
Sample
---Input
5
5 5 23
3 1 1
2 2 1
4 7 3
10 10 82
---Output
Case #1:
Impossible
Case #2:
c
.
*
Case #3:
Impossible
Case #4:
......*
.c....*
.......
..*....
Case #5:
**********
**********
**********
****....**
***.....**
***.c...**
***....***
**********
**********
**********
'''
import sys
from time import clock
f = open(sys.argv[1])
def input(): return f.readline().strip();
from itertools import product, combinations
def genBoards(R, C, M):
#extra empty/boundary row added at the end (also reached as the one before [0])
#each row has extra empty/boundary element at the end
for mines in combinations( product(range(R), range(C)), M):
board = [ ['.'] * C + [''] for _ in range(R) ]
for row, col in mines:
board[row][col] = '*'
yield board + [[''] * (C+1)]
pass
def oneClickSolution(R, C, M):
for bd in genBoards(R, C, M):
#count number of mines
minTile = 10
for r in range(R):
for c in range(C):
if bd[r][c] == '.':
n = sum(bd[r+i][c+j]=='*' for i in (-1,0,1) for j in (-1,0,1))
bd[r][c] = `n`
if n <= minTile:
minTile = n
minR, minC = r, c
if minTile < 10:
#use flood from a 0 square, does it reach all 0-s?
queue = [ (minR, minC) ]
nOpen = 0
while queue:
r,c = queue.pop()
if bd[r][c] == '0':
for i in -1,0,1:
for j in -1,0,1:
if i or j: # we don't add the one we popped back
queue.append( (r+i, c+j) )
if bd[r][c] not in '.*':
bd[r][c] = '.'
nOpen += 1
if M + nOpen == R*C:
bd[minR][minC] = 'c'
return '\n'.join( ''.join(row[:-1]) for row in bd[:-1] )
return 'Impossible'
clk = clock()
for caseNo in xrange(1, int(input())+1):
R, C, M = map(int, input().split())
print >>sys.stderr, caseNo, R, C, M #, oneClickSolution(R, C, M)<>'Impossible'
print 'Case #%d:' % caseNo
print oneClickSolution(R, C, M)
print >>sys.stderr, 'time= %.1f seconds' % (clock()-clk )
| [
"[email protected]"
] | |
ef52eee784648cbe24760db5f85f64e78d493acf | 625f2f86f2b2e07cb35204d9b3232427bf462a09 | /data/HIRun2016PA/PASingleMuon_PARun2016C_PromptReco_v1_ZMMfilter_PBP/runForestAOD_pPb_DATA_80X_ZMMfilter.py | d609a491d97a0958a6cd6cdf1e3a5a0e685a95d7 | [] | no_license | ttrk/production | abb84c423a076fd9966276b7ed4350936c755e0b | f8a64c9c38de215802799365f0f7a99e1ee78276 | refs/heads/master | 2023-02-08T23:48:56.355141 | 2023-01-26T08:46:22 | 2023-01-26T08:46:22 | 52,877,406 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 12,606 | py | ### HiForest Configuration
# Collisions: pp
# Type: MC
# Input: AOD
import FWCore.ParameterSet.Config as cms
process = cms.Process('HiForest')
process.options = cms.untracked.PSet()
#####################################################################################
# HiForest labelling info
#####################################################################################
process.load("HeavyIonsAnalysis.JetAnalysis.HiForest_cff")
process.HiForest.inputLines = cms.vstring("HiForest V3",)
import subprocess
version = subprocess.Popen(["(cd $CMSSW_BASE/src && git describe --tags)"], stdout=subprocess.PIPE, shell=True).stdout.read()
if version == '':
version = 'no git info'
process.HiForest.HiForestVersion = cms.string(version)
#####################################################################################
# Input source
#####################################################################################
process.source = cms.Source("PoolSource",
duplicateCheckMode = cms.untracked.string("noDuplicateCheck"),
fileNames = cms.untracked.vstring(
# 'file:/afs/cern.ch/user/k/kjung/run2Validation/HLTchecks/CMSSW_8_0_22/src/pPb_5TeVEpos_RECO.root'
"root://cms-xrd-global.cern.ch//store/hidata/PARun2016C/PASingleMuon/AOD/PromptReco-v1/000/285/517/00000/04D2735E-28B0-E611-9AE6-02163E014386.root"
)
)
# Number of events we want to process, -1 = all events
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(5000))
#####################################################################################
# Load Global Tag, Geometry, etc.
#####################################################################################
process.load('Configuration.StandardSequences.Services_cff')
process.load('Configuration.Geometry.GeometryDB_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load("CondCore.DBCommon.CondDBCommon_cfi")
from Configuration.AlCa.GlobalTag_condDBv2 import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '80X_mcRun2_asymptotic_2016_miniAODv2_v1', '')
process.HiForest.GlobalTagLabel = process.GlobalTag.globaltag
# Customization
from HeavyIonsAnalysis.Configuration.CommonFunctions_cff import overrideJEC_pPb8TeV
process = overrideJEC_pPb8TeV(process)
process.GlobalTag.toGet.extend([
cms.PSet(record = cms.string("HeavyIonRcd"),
#tag = cms.string("CentralityTable_HFtowersPlusTrunc200_EPOS8TeV_v80x01_mc"),
tag = cms.string("CentralityTable_HFtowersPlusTrunc200_EPOS5TeV_v80x01_mc"),
connect = cms.string("frontier://FrontierProd/CMS_CONDITIONS"),
label = cms.untracked.string("HFtowersPlusTruncEpos")
),
cms.PSet(record = cms.string("L1TGlobalPrescalesVetosRcd"),
tag = cms.string("L1TGlobalPrescalesVetos_Stage2v0_hlt"),
connect = cms.string("frontier://FrontierProd/CMS_CONDITIONS")
)
])
#####################################################################################
# Define tree output
#####################################################################################
process.TFileService = cms.Service("TFileService",
fileName=cms.string("HiForestAOD_ZMMfilter.root"))
#####################################################################################
# Additional Reconstruction and Analysis: Main Body
#####################################################################################
####################################################################################
#############################
# Jets
#############################
process.load("HeavyIonsAnalysis.JetAnalysis.FullJetSequence_DataPPb")
#####################################################################################
############################
# Event Analysis
############################
## temporary centrality bin
process.load("RecoHI.HiCentralityAlgos.CentralityBin_cfi")
process.centralityBin.Centrality = cms.InputTag("pACentrality")
process.centralityBin.centralityVariable = cms.string("HFtowersPlusTrunc")
#process.centralityBin.nonDefaultGlauberModel = cms.string("Hydjet_Drum")
process.centralityBin.nonDefaultGlauberModel = cms.string("Epos")
process.load('HeavyIonsAnalysis.EventAnalysis.hltanalysis_cff')
process.load('HeavyIonsAnalysis.EventAnalysis.hltobject_pPb_cfi')
process.load('HeavyIonsAnalysis.EventAnalysis.hievtanalyzer_data_cfi') #use data version to avoid PbPb MC
process.hiEvtAnalyzer.Vertex = cms.InputTag("offlinePrimaryVertices")
process.hiEvtAnalyzer.doCentrality = cms.bool(True)
process.hiEvtAnalyzer.CentralitySrc = cms.InputTag("pACentrality")
process.hiEvtAnalyzer.CentralityBinSrc = cms.InputTag("centralityBin","HFtowersPlusTrunc")
process.hiEvtAnalyzer.doEvtPlane = cms.bool(False)
process.hiEvtAnalyzer.doMC = cms.bool(True) #general MC info
process.hiEvtAnalyzer.doHiMC = cms.bool(False) #HI specific MC info
process.load('HeavyIonsAnalysis.EventAnalysis.runanalyzer_cff')
process.load("HeavyIonsAnalysis.JetAnalysis.pfcandAnalyzer_pp_cfi")
process.pfcandAnalyzer.pfPtMin = 0
process.pfcandAnalyzer.pfCandidateLabel = cms.InputTag("particleFlow")
process.pfcandAnalyzer.doVS = cms.untracked.bool(False)
process.pfcandAnalyzer.doUEraw_ = cms.untracked.bool(False)
#####################################################################################
#########################
# Track Analyzer
#########################
process.load('HeavyIonsAnalysis.JetAnalysis.ExtraTrackReco_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.TrkAnalyzers_cff')
# Use this instead for track corrections
## process.load('HeavyIonsAnalysis.JetAnalysis.TrkAnalyzers_Corr_cff')
#####################################################################################
#####################
# photons
######################
process.load('HeavyIonsAnalysis.PhotonAnalysis.ggHiNtuplizer_cfi')
process.ggHiNtuplizer.gsfElectronLabel = cms.InputTag("gedGsfElectrons")
process.ggHiNtuplizer.recoPhotonHiIsolationMap = cms.InputTag('photonIsolationHIProducerpp')
process.ggHiNtuplizer.VtxLabel = cms.InputTag("offlinePrimaryVertices")
process.ggHiNtuplizer.particleFlowCollection = cms.InputTag("particleFlow")
process.ggHiNtuplizer.doVsIso = cms.bool(False)
process.ggHiNtuplizer.doElectronVID = cms.bool(True)
process.ggHiNtuplizerGED = process.ggHiNtuplizer.clone(recoPhotonSrc = cms.InputTag('gedPhotons'),
recoPhotonHiIsolationMap = cms.InputTag('photonIsolationHIProducerppGED'))
####################################################################################
#####################
# Electron ID
#####################
from PhysicsTools.SelectorUtils.tools.vid_id_tools import *
# turn on VID producer, indicate data format to be processed
# DataFormat.AOD or DataFormat.MiniAOD
dataFormat = DataFormat.AOD
switchOnVIDElectronIdProducer(process, dataFormat)
# define which IDs we want to produce. Check here https://twiki.cern.ch/twiki/bin/viewauth/CMS/CutBasedElectronIdentificationRun2#Recipe_for_regular_users_for_7_4
my_id_modules = ['RecoEgamma.ElectronIdentification.Identification.cutBasedElectronID_Spring15_25ns_V1_cff']
#add them to the VID producer
for idmod in my_id_modules:
setupAllVIDIdsInModule(process,idmod,setupVIDElectronSelection)
#####################################################################################
#####################
# Rechit analyzer
#####################
process.load('HeavyIonsAnalysis.JetAnalysis.rechitanalyzer_pp_cfi')
process.rechitanalyzer.doVS = cms.untracked.bool(False)
process.rechitanalyzer.doEcal = cms.untracked.bool(False)
process.rechitanalyzer.doHcal = cms.untracked.bool(False)
process.rechitanalyzer.doHF = cms.untracked.bool(False)
process.rechitanalyzer.JetSrc = cms.untracked.InputTag("ak4CaloJets")
process.pfTowers.JetSrc = cms.untracked.InputTag("ak4CaloJets")
#####################
# New rho analyzer
#####################
process.load('HeavyIonsAnalysis.JetAnalysis.hiFJRhoAnalyzer_cff')
#####################
# Muon Analyzer
#####################
process.load('HeavyIonsAnalysis.MuonAnalysis.hltMuTree_cfi')
process.hltMuTree.vertices = cms.InputTag("offlinePrimaryVertices")
#########################
# Main analysis list
#########################
process.ana_step = cms.Path(process.hltanalysis *
process.hltobject *
process.centralityBin *
process.hiEvtAnalyzer *
process.jetSequences +
process.egmGsfElectronIDSequence + #Should be added in the path for VID module
process.ggHiNtuplizer +
process.ggHiNtuplizerGED +
process.hiFJRhoAnalyzer +
process.pfcandAnalyzer +
process.hltMuTree +
process.HiForest +
process.trackSequencesPP +
process.runAnalyzer +
process.rechitanalyzer
)
#####################################################################################
#########################
# Event Selection
#########################
process.load('HeavyIonsAnalysis.JetAnalysis.EventSelection_cff')
process.pHBHENoiseFilterResultProducer = cms.Path( process.HBHENoiseFilterResultProducer )
process.HBHENoiseFilterResult = cms.Path(process.fHBHENoiseFilterResult)
process.HBHENoiseFilterResultRun1 = cms.Path(process.fHBHENoiseFilterResultRun1)
process.HBHENoiseFilterResultRun2Loose = cms.Path(process.fHBHENoiseFilterResultRun2Loose)
process.HBHENoiseFilterResultRun2Tight = cms.Path(process.fHBHENoiseFilterResultRun2Tight)
process.HBHEIsoNoiseFilterResult = cms.Path(process.fHBHEIsoNoiseFilterResult)
process.PAprimaryVertexFilter = cms.EDFilter("VertexSelector",
src = cms.InputTag("offlinePrimaryVertices"),
cut = cms.string("!isFake && abs(z) <= 25 && position.Rho <= 2 && tracksSize >= 2"),
filter = cms.bool(True), # otherwise it won't filter the events
)
process.NoScraping = cms.EDFilter("FilterOutScraping",
applyfilter = cms.untracked.bool(True),
debugOn = cms.untracked.bool(False),
numtrack = cms.untracked.uint32(10),
thresh = cms.untracked.double(0.25)
)
process.pPAprimaryVertexFilter = cms.Path(process.PAprimaryVertexFilter)
process.pBeamScrapingFilter=cms.Path(process.NoScraping)
process.load("HeavyIonsAnalysis.VertexAnalysis.PAPileUpVertexFilter_cff")
process.pVertexFilterCutG = cms.Path(process.pileupVertexFilterCutG)
process.pVertexFilterCutGloose = cms.Path(process.pileupVertexFilterCutGloose)
process.pVertexFilterCutGtight = cms.Path(process.pileupVertexFilterCutGtight)
process.pVertexFilterCutGplus = cms.Path(process.pileupVertexFilterCutGplus)
process.pVertexFilterCutE = cms.Path(process.pileupVertexFilterCutE)
process.pVertexFilterCutEandG = cms.Path(process.pileupVertexFilterCutEandG)
process.pAna = cms.EndPath(process.skimanalysis)
# Customization
# KT : add HLT filtering
import HLTrigger.HLTfilters.hltHighLevel_cfi
process.hltfilter = HLTrigger.HLTfilters.hltHighLevel_cfi.hltHighLevel.clone()
process.hltfilter.HLTPaths = ["HLT_*PAL*Mu*"]
# KT : add filtering based on object
process.muonSelector = cms.EDFilter("MuonSelector",
src = cms.InputTag("muons"),
cut = cms.string("(isTrackerMuon && isGlobalMuon) && pt > 5."),
filter = cms.bool(True)
)
process.muonCountFilter = cms.EDFilter("MuonCountFilter",
src = cms.InputTag("muonSelector"),
minNumber = cms.uint32(2)
)
process.dimuonCand = cms.EDProducer("CandViewShallowCloneCombiner",
checkCharge = cms.bool(True),
cut = cms.string(' 20 < mass < 140'),
decay = cms.string("muonSelector@+ muonSelector@-")
)
process.dimuonCandFilter = cms.EDFilter("CandViewCountFilter",
src = cms.InputTag("dimuonCand"),
minNumber = cms.uint32(1)
)
process.filterSequence = cms.Sequence(
process.hltfilter *
process.muonSelector *
process.muonCountFilter *
process.dimuonCand *
process.dimuonCandFilter
)
# process.filterstep = cms.Path(process.filterSequence)
process.superFilterPath = cms.Path(process.filterSequence)
process.skimanalysis.superFilters = cms.vstring("superFilterPath")
##filter all path with the production filter sequence
for path in process.paths:
getattr(process,path)._seq = process.filterSequence * getattr(process,path)._seq
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.