blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3cff9791f0f876f817bef6ab82efff8e16924526 | 62e58c051128baef9452e7e0eb0b5a83367add26 | /x12/4030/840004030.py | ea51f50d52dde6357e28745d6a0cd44e12f4297f | []
| no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 7,084 | py | from bots.botsconfig import *
from records004030 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'RQ',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BQT', MIN: 1, MAX: 1},
{ID: 'CUR', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 99999},
{ID: 'PER', MIN: 0, MAX: 3},
{ID: 'TAX', MIN: 0, MAX: 3},
{ID: 'FOB', MIN: 0, MAX: 99999},
{ID: 'CTP', MIN: 0, MAX: 99999},
{ID: 'PAM', MIN: 0, MAX: 10},
{ID: 'CSH', MIN: 0, MAX: 25},
{ID: 'SAC', MIN: 0, MAX: 25},
{ID: 'ITD', MIN: 0, MAX: 5},
{ID: 'DIS', MIN: 0, MAX: 20},
{ID: 'DTM', MIN: 0, MAX: 10},
{ID: 'LIN', MIN: 0, MAX: 5},
{ID: 'PID', MIN: 0, MAX: 200},
{ID: 'MEA', MIN: 0, MAX: 40},
{ID: 'PWK', MIN: 0, MAX: 25},
{ID: 'PKG', MIN: 0, MAX: 200},
{ID: 'TD1', MIN: 0, MAX: 2},
{ID: 'TD5', MIN: 0, MAX: 12},
{ID: 'TD3', MIN: 0, MAX: 12},
{ID: 'TD4', MIN: 0, MAX: 5},
{ID: 'MAN', MIN: 0, MAX: 10},
{ID: 'RRA', MIN: 0, MAX: 100},
{ID: 'CTB', MIN: 0, MAX: 99999},
{ID: 'LDT', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'QTY', MIN: 0, MAX: 99999},
{ID: 'MTX', MIN: 0, MAX: 99999},
]},
{ID: 'N9', MIN: 0, MAX: 1000, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 99999},
{ID: 'MTX', MIN: 0, MAX: 99999},
{ID: 'PWK', MIN: 0, MAX: 99999},
{ID: 'EFI', MIN: 0, MAX: 99999},
]},
{ID: 'N1', MIN: 0, MAX: 10000, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 99999},
{ID: 'REF', MIN: 0, MAX: 12},
{ID: 'PER', MIN: 0, MAX: 99999},
{ID: 'SI', MIN: 0, MAX: 99999},
{ID: 'FOB', MIN: 0, MAX: 1},
{ID: 'TD1', MIN: 0, MAX: 2},
{ID: 'TD5', MIN: 0, MAX: 12},
{ID: 'TD3', MIN: 0, MAX: 12},
{ID: 'TD4', MIN: 0, MAX: 5},
{ID: 'PKG', MIN: 0, MAX: 200},
{ID: 'RRA', MIN: 0, MAX: 25},
]},
{ID: 'SPI', MIN: 0, MAX: 1, LEVEL: [
{ID: 'REF', MIN: 0, MAX: 5},
{ID: 'DTM', MIN: 0, MAX: 5},
{ID: 'MTX', MIN: 0, MAX: 99999},
{ID: 'N1', MIN: 0, MAX: 20, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 20},
{ID: 'G61', MIN: 0, MAX: 1},
{ID: 'MTX', MIN: 0, MAX: 99999},
]},
{ID: 'CB1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'REF', MIN: 0, MAX: 20},
{ID: 'DTM', MIN: 0, MAX: 5},
{ID: 'LDT', MIN: 0, MAX: 1},
{ID: 'MTX', MIN: 0, MAX: 99999},
]},
]},
{ID: 'PCT', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'AMT', MIN: 0, MAX: 99999},
]},
{ID: 'ADV', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 99999},
{ID: 'MTX', MIN: 0, MAX: 99999},
]},
{ID: 'LM', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'LQ', MIN: 1, MAX: 99999},
]},
{ID: 'PO1', MIN: 1, MAX: 100000, LEVEL: [
{ID: 'LIN', MIN: 0, MAX: 99999},
{ID: 'G53', MIN: 0, MAX: 1},
{ID: 'CUR', MIN: 0, MAX: 1},
{ID: 'CN1', MIN: 0, MAX: 1},
{ID: 'PO3', MIN: 0, MAX: 25},
{ID: 'CTP', MIN: 0, MAX: 99999},
{ID: 'PAM', MIN: 0, MAX: 10},
{ID: 'CTB', MIN: 0, MAX: 99999},
{ID: 'MEA', MIN: 0, MAX: 40},
{ID: 'PID', MIN: 0, MAX: 1000, LEVEL: [
{ID: 'MEA', MIN: 0, MAX: 10},
]},
{ID: 'PWK', MIN: 0, MAX: 25},
{ID: 'PKG', MIN: 0, MAX: 200},
{ID: 'PO4', MIN: 0, MAX: 99999},
{ID: 'REF', MIN: 0, MAX: 99999},
{ID: 'PER', MIN: 0, MAX: 3},
{ID: 'SAC', MIN: 0, MAX: 25},
{ID: 'IT8', MIN: 0, MAX: 25},
{ID: 'CSH', MIN: 0, MAX: 99999},
{ID: 'ITD', MIN: 0, MAX: 2},
{ID: 'DIS', MIN: 0, MAX: 20},
{ID: 'TAX', MIN: 0, MAX: 3},
{ID: 'FOB', MIN: 0, MAX: 99999},
{ID: 'SDQ', MIN: 0, MAX: 50},
{ID: 'DTM', MIN: 0, MAX: 10},
{ID: 'FST', MIN: 0, MAX: 99999},
{ID: 'TD1', MIN: 0, MAX: 1},
{ID: 'TD5', MIN: 0, MAX: 12},
{ID: 'TD3', MIN: 0, MAX: 12},
{ID: 'TD4', MIN: 0, MAX: 5},
{ID: 'MAN', MIN: 0, MAX: 10},
{ID: 'RRA', MIN: 0, MAX: 25},
{ID: 'MTX', MIN: 0, MAX: 99999},
{ID: 'SPI', MIN: 0, MAX: 99999},
{ID: 'LM', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'LQ', MIN: 1, MAX: 99999},
]},
{ID: 'QTY', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'SI', MIN: 0, MAX: 99999},
]},
{ID: 'SCH', MIN: 0, MAX: 104, LEVEL: [
{ID: 'TD1', MIN: 0, MAX: 2},
{ID: 'TD5', MIN: 0, MAX: 12},
{ID: 'TD3', MIN: 0, MAX: 12},
{ID: 'TD4', MIN: 0, MAX: 5},
{ID: 'REF', MIN: 0, MAX: 99999},
]},
{ID: 'LDT', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'QTY', MIN: 0, MAX: 99999},
{ID: 'MTX', MIN: 0, MAX: 99999},
{ID: 'LM', MIN: 0, MAX: 1, LEVEL: [
{ID: 'LQ', MIN: 1, MAX: 1},
]},
]},
{ID: 'SLN', MIN: 0, MAX: 1000, LEVEL: [
{ID: 'MTX', MIN: 0, MAX: 99999},
{ID: 'PID', MIN: 0, MAX: 1000},
{ID: 'ADV', MIN: 0, MAX: 99999},
{ID: 'QTY', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'SI', MIN: 0, MAX: 99999},
]},
]},
{ID: 'N9', MIN: 0, MAX: 1000, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 99999},
{ID: 'MTX', MIN: 0, MAX: 99999},
{ID: 'PWK', MIN: 0, MAX: 99999},
{ID: 'EFI', MIN: 0, MAX: 99999},
]},
{ID: 'N1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'QTY', MIN: 0, MAX: 99999},
{ID: 'REF', MIN: 0, MAX: 12},
{ID: 'PER', MIN: 0, MAX: 3},
{ID: 'SI', MIN: 0, MAX: 99999},
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'FOB', MIN: 0, MAX: 1},
{ID: 'SCH', MIN: 0, MAX: 200},
{ID: 'TD1', MIN: 0, MAX: 2},
{ID: 'TD5', MIN: 0, MAX: 12},
{ID: 'TD3', MIN: 0, MAX: 12},
{ID: 'TD4', MIN: 0, MAX: 5},
{ID: 'PKG', MIN: 0, MAX: 200},
{ID: 'RRA', MIN: 0, MAX: 25},
{ID: 'CTP', MIN: 0, MAX: 1},
{ID: 'PAM', MIN: 0, MAX: 10},
{ID: 'LDT', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'MAN', MIN: 0, MAX: 10},
{ID: 'QTY', MIN: 0, MAX: 5},
{ID: 'MTX', MIN: 0, MAX: 99999},
]},
]},
{ID: 'PCT', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'AMT', MIN: 0, MAX: 99999},
]},
]},
{ID: 'CTT', MIN: 0, MAX: 1},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"[email protected]"
]
| |
fc1266738f799c65b9d4f71e6846f6b72d00fc74 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/recoveryservices/get_replication_migration_item.py | d1e403b2a6e74c291c8bcdb23200e48a30cd7dcb | [
"BSD-3-Clause",
"Apache-2.0"
]
| permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 4,452 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetReplicationMigrationItemResult',
'AwaitableGetReplicationMigrationItemResult',
'get_replication_migration_item',
]
@pulumi.output_type
class GetReplicationMigrationItemResult:
"""
Migration item.
"""
def __init__(__self__, id=None, location=None, name=None, properties=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource Location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.MigrationItemPropertiesResponse':
"""
The migration item properties.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource Type
"""
return pulumi.get(self, "type")
class AwaitableGetReplicationMigrationItemResult(GetReplicationMigrationItemResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetReplicationMigrationItemResult(
id=self.id,
location=self.location,
name=self.name,
properties=self.properties,
type=self.type)
def get_replication_migration_item(fabric_name: Optional[str] = None,
migration_item_name: Optional[str] = None,
protection_container_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetReplicationMigrationItemResult:
"""
Migration item.
API Version: 2018-07-10.
:param str fabric_name: Fabric unique name.
:param str migration_item_name: Migration item name.
:param str protection_container_name: Protection container name.
:param str resource_group_name: The name of the resource group where the recovery services vault is present.
:param str resource_name: The name of the recovery services vault.
"""
__args__ = dict()
__args__['fabricName'] = fabric_name
__args__['migrationItemName'] = migration_item_name
__args__['protectionContainerName'] = protection_container_name
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:recoveryservices:getReplicationMigrationItem', __args__, opts=opts, typ=GetReplicationMigrationItemResult).value
return AwaitableGetReplicationMigrationItemResult(
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
type=__ret__.type)
| [
"[email protected]"
]
| |
d793f87b0bef4eaaef51634ad0c4592d4a02d5ee | dd573ed68682fd07da08143dd09f6d2324f51345 | /daily_study/ProblemSolving/5430_AC.py | ee3166173aa8bebbfdd26b513e4d008af4aec83f | []
| no_license | chelseashin/My-Algorithm | 0f9fb37ea5c6475e8ff6943a5fdaa46f0cd8be61 | db692e158ebed2d607855c8e554fd291c18acb42 | refs/heads/master | 2021-08-06T12:05:23.155679 | 2021-07-04T05:07:43 | 2021-07-04T05:07:43 | 204,362,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,117 | py | from sys import stdin
input = stdin.readline
def solve(numbers):
print("초기상태", numbers, len(numbers))
rcnt, dcnt = 0, 0
for cmd in p:
if cmd == "R":
rcnt += 1
elif cmd == "D":
try:
if rcnt % 2 == 0:
dcnt += 1 # 나중에 빼줄 때 사용
else:
numbers.pop() # 지금 바로 빼주기
except:
return "error"
# print("rcnt", rcnt, "dcnt", dcnt, numbers)
if len(numbers) < dcnt:
return "error"
if rcnt%2:
numbers[dcnt:].reverse()
else:
numbers = numbers[dcnt:]
result = "["
for i in range(len(numbers)):
if i < len(numbers)-1:
result += numbers[i] + ","
else:
result += numbers[i] + "]"
# print("최종", numbers, result)
return result
T = int(input())
for _ in range(T):
p = input().strip()
n = int(input())
numbers = input().strip().split(',')
numbers[0] = numbers[0][1:]
numbers[-1] = numbers[-1][:-1]
print(solve(numbers)) | [
"[email protected]"
]
| |
0db7f68eab74751e0b8f455e123cefcc363b17d2 | 470eb6b6af669ae037d1aaaf28c7169d906ca25e | /src/split_read_matrices_by_plate.py | 8292d94d002a13ad6308b38113fa0d8197f0494f | []
| no_license | wxgao33/CSI-Microbes-analysis | 5bddd6cc4ffb7ec2dca833231a4e966b92f348a1 | 273b41a20c4c13af0efe2a888821b0cfc5e0c189 | refs/heads/master | 2023-04-12T00:59:02.423797 | 2021-05-18T15:03:11 | 2021-05-18T15:03:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | import pandas as pd
read_df = pd.read_csv(snakemake.input[0], sep="\t", index_col=0)
metadata_df = pd.read_csv(snakemake.input[1], sep="\t")
metadata_df = metadata_df.loc[metadata_df["plate"].astype("str") == snakemake.wildcards["plate"]]
read_df = read_df[metadata_df["cell"]]
read_df.to_csv(snakemake.output[0], sep="\t")
metadata_df.to_csv(snakemake.output[1], sep="\t", index=False)
| [
"[email protected]"
]
| |
0e82ee79e918a29ba71b84fda1e05d64b7d61662 | 88509a8ce62a22acc0639c683900d5d0cb8d69e7 | /Day22/orm/app/views.py | 034ffce2d9c3976faf3424c9b86052e00b42b8fe | []
| no_license | pytutorial/py2104 | 8b0238ab6f6d2f5395aee5fbe1f4aff03b819cd3 | 48b36d6b1f40730ef2747c310e70fb6997eda388 | refs/heads/main | 2023-09-03T16:55:02.285158 | 2021-10-20T05:24:31 | 2021-10-20T05:24:31 | 391,613,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,166 | py | from django.shortcuts import HttpResponse
from .models import *
import json
# Create your views here.
def get_product_by_code(request, code):
product = Product.objects.get(code=code)
data = {
'id': product.id,
'name': product.name,
'code': product.code,
'description': product.description,
'price': product.price
}
return HttpResponse(json.dumps(data))
def search_product(request):
input_data = request.GET
keyword = input_data.get('keyword', '')
product_list = Product.objects.filter(
name__icontains=keyword)
result = [product.name for product in product_list]
return HttpResponse(','.join(result))
def get_customer_by_phone(request, phone):
customer = Customer.objects.get(phone=phone)
return HttpResponse(customer.name)
def search_customer(request):
input_data = request.GET
keyword = input_data.get('keyword', '')
print('keyword=', keyword)
customer_list = Customer.objects.filter(name__icontains=keyword)
print('customer_list=', customer_list)
result = ','.join([customer.name for customer in customer_list])
return HttpResponse(result) | [
"[email protected]"
]
| |
9b2e42ad3619a8aa8d9e99c6a2b3c8045609e66e | 475d1b83b77e2730b53722f0d8d11b070f97018a | /travelapp/migrations/backup/0013_auto_20210221_1309.py | 6de634dbf85cd70b3b448828cfa895fc3a0f6706 | [
"MIT"
]
| permissive | Gwellir/my-region | b651284ee4d4ec7ec892bb78a7ce3444c833d035 | baacb7f54a19c55854fd068d6e38b3048a03d13d | refs/heads/main | 2023-04-20T17:31:33.040419 | 2021-05-17T13:35:38 | 2021-05-17T13:35:38 | 336,533,029 | 0 | 1 | MIT | 2021-05-17T13:35:39 | 2021-02-06T12:31:08 | Python | UTF-8 | Python | false | false | 1,558 | py | # Generated by Django 3.1.6 on 2021-02-21 10:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('travelapp', '0012_trip_subbed'),
]
operations = [
migrations.RemoveField(
model_name='route',
name='base_price_currency',
),
migrations.RemoveField(
model_name='trip',
name='price_currency',
),
migrations.AlterField(
model_name='route',
name='base_price',
field=models.DecimalField(decimal_places=2, default=0, max_digits=7, verbose_name='Ориентировочная стоимость прохождения маршрута'),
),
migrations.AlterField(
model_name='trip',
name='price',
field=models.DecimalField(decimal_places=2, max_digits=7, verbose_name='Стоимость прохождения маршрута'),
),
migrations.CreateModel(
name='TripOption',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='Наименование опции')),
('price', models.DecimalField(decimal_places=2, default=0, max_digits=5, verbose_name='Стоимость опции')),
('trip', models.ManyToManyField(related_name='options', to='travelapp.Trip')),
],
),
]
| [
"[email protected]"
]
| |
0e54f592add357a09ba8655d612cbf44e75aacd4 | e694891ff8c9d06df7b7b5def7ba71c1dba03aa8 | /redis_queue/db.py | 730396f0069a2660ad5e33e14ba3afafc373801f | []
| no_license | wangyu190810/python-skill | 78f9abb39ebfa01b92ffb2ec96c7ef57c490d68d | 719d082d47a5a82ce4a15c57dd481932a9d8f1ba | refs/heads/master | 2020-04-05T17:43:48.005145 | 2019-02-01T01:45:49 | 2019-02-01T01:45:49 | 41,524,479 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | # -*-coding:utf-8-*-
# email:[email protected]
__author__ = 'wangyu'
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine,text
from config import Config
def connection(database):
engine = create_engine(database)
Session = sessionmaker(engine)
session = Session()
return session
conn = connection(Config.db)
def insert_data(data):
sql =text("insert into queue_message (url,status_code) "
"VALUES (:url,:status_code)")
sql = sql.bindparams(url=data.get("url"),
status_code=data.get("status_code"))
conn.execute(sql)
conn.commit()
| [
"[email protected]"
]
| |
34bf0ddf4c836f00f7809ad719bf5652f662b7e8 | 373035950bdc8956cc0b74675aea2d1857263129 | /spar_python/report_generation/ta1/ta1_section_overview_p2.py | 8c710d1d99d7cf13febced25219e657a0bc71447 | [
"BSD-2-Clause",
"BSD-3-Clause"
]
| permissive | limkokholefork/SPARTA | 5d122cd2e920775d61a5404688aabbafa164f22e | 6eeb28b2dd147088b6e851876b36eeba3e700f16 | refs/heads/master | 2021-11-11T21:09:38.366985 | 2017-06-02T16:21:48 | 2017-06-02T16:21:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,053 | py | # *****************************************************************
# Copyright 2013 MIT Lincoln Laboratory
# Project: SPAR
# Authors: SY
# Description: Section class
#
#
# Modifications:
# Date Name Modification
# ---- ---- ------------
# 19 Sep 2013 SY Original version
# *****************************************************************
# general imports:
import logging
# SPAR imports:
import spar_python.report_generation.common.section as section
import spar_python.report_generation.ta1.ta1_section_overview_common as t1soc
import spar_python.report_generation.ta1.ta1_schema as t1s
import spar_python.report_generation.common.regression as regression
import spar_python.report_generation.common.graphing as graphing
import spar_python.report_generation.common.latex_classes as latex_classes
# LOGGER:
LOGGER = logging.getLogger(__name__)
class Ta1OverviewP2Section(t1soc.Ta1OverviewCommonSection):
"""The equality overview section of the TA1 report."""
def __init__(self, jinja_template, report_generator):
"""Initializes the section with a jinja template and a report generator.
"""
cat = t1s.CATEGORIES.P2
super(Ta1OverviewP2Section, self).__init__(
jinja_template, report_generator, cat)
def _get_parameters(self, selection_cols):
"""Returns parameters for the 3d graph."""
parameters = {}
parameters["z_label"] = (
self._config.var_rangesize + " = range size")
# find the data:
this_constraint_list = (
self._config.get_constraint_list() +
self._inp.get_constraint_list() + [
(t1s.DBP_TABLENAME, t1s.DBP_SELECTIONCOLS, selection_cols)])
these_atomic_fields_and_functions = [
(t1s.DBA_RANGE,
t1s.Ta1ResultsSchema().get_complex_function(t1s.DBA_TABLENAME,
t1s.DBA_RANGE))]
parameters["values"] = self._config.results_db.get_query_values(
[(t1s.DBP_TABLENAME, t1s.DBP_NUMNEWRETURNEDRECORDS),
(t1s.DBP_TABLENAME, t1s.DBP_QUERYLATENCY)],
constraint_list=this_constraint_list,
atomic_fields_and_functions=these_atomic_fields_and_functions)
parameters["ftr"] = self._config.ql_p2_ftr
return parameters
def _populate_output(self):
"""Populates the output object which is passed to the Jinja tempalte
in get_string."""
super(Ta1OverviewP2Section, self)._populate_output()
this_constraint_list = (
self._config.get_constraint_list() +
self._inp.get_constraint_list())
categories = self._config.results_db.get_unique_query_values(
simple_fields=[(t1s.DBP_TABLENAME, t1s.DBP_SELECTIONCOLS)],
constraint_list=this_constraint_list)
for selection_cols in categories:
self._store_3d_latency_graph(selection_cols)
| [
"[email protected]"
]
| |
d1637ba38880e1918ef3ef2ff63a4a45df0985d1 | 73e277935ef28fd05935c93a3f155c9cc6dc6de7 | /ctf/crypto/rsa/pq_common_pollard_rho.py | b3132f1b7d7879f7cddc3228571c37da556ae317 | []
| no_license | ohmygodlin/snippet | 5ffe6b8fec99abd67dd5d7f819520e28112eae4b | 21d02015492fb441b2ad93b4a455dc4a145f9913 | refs/heads/master | 2023-01-08T14:59:38.618791 | 2022-12-28T11:23:23 | 2022-12-28T11:23:23 | 190,989,347 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,409 | py | #Easy_Rsa, yangcheng-2021, https://lazzzaro.github.io/2021/09/12/match-2021%E7%BE%8A%E5%9F%8E%E6%9D%AF%E7%BD%91%E7%BB%9C%E5%AE%89%E5%85%A8%E5%A4%A7%E8%B5%9B/, https://xz.aliyun.com/t/6703
from Crypto.Util.number import *
import gmpy2
n = 84236796025318186855187782611491334781897277899439717384242559751095347166978304126358295609924321812851255222430530001043539925782811895605398187299748256080526691975084042025794113521587064616352833904856626744098904922117855866813505228134381046907659080078950018430266048447119221001098505107823645953039
e = 58337
c = 13646200911032594651110040891135783560995665642049282201695300382255436792102048169200570930229947213493204600006876822744757042959653203573780257603577712302687497959686258542388622714078571068849217323703865310256200818493894194213812410547780002879351619924848073893321472704218227047519748394961963394668
def f(x):
return (pow(x, n - 1, n) + 3) % n #(x*x+1)%n
def rho():
i = 1
while True:
a = getRandomRange(2, n)
b = f(a)
j = 1
while a != b:
p = GCD(a - b, n)
print('{} in {} circle'.format(j, i))
if p > 1:
return (p, n // p)
a = f(a)
b = f(f(b))
j += 1
i += 1
p, q = rho()
d = gmpy2.invert(e, (p-1)*(q-1))
m = pow(c, d, n)
print(long_to_bytes(m))
#b'SangFor{0a8c2220-4c1b-32c8-e8c1-adf92ec7678b}' | [
"[email protected]"
]
| |
8350f11980db9cb44191f5846907f76bee29c0a3 | 99c4d4a6592fded0e8e59652484ab226ac0bd38c | /code/batch-2/vse-naloge-brez-testov/DN13-M-065.py | d4f9aec2e9e2b28eadb12b6390cf9ff7b76a6e9f | []
| no_license | benquick123/code-profiling | 23e9aa5aecb91753e2f1fecdc3f6d62049a990d5 | 0d496d649247776d121683d10019ec2a7cba574c | refs/heads/master | 2021-10-08T02:53:50.107036 | 2018-12-06T22:56:38 | 2018-12-06T22:56:38 | 126,011,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,396 | py | from math import fabs
class Minobot:
def __init__(self):
self.x = 0
self.y = 0
self.direction = 0
self.x_direction_coefficient = [1, 0, -1, 0]
self.y_direction_coefficient = [0, -1, 0, 1]
self.states = []
def get_current_state(self):
return {'x': self.x, 'y': self.y, 'direction': self.direction}
def save_current_state(self):
self.states.append(self.get_current_state())
def change_direction(self, direction):
self.save_current_state()
self.direction = (self.direction + direction) % 4
def levo(self):
self.change_direction(-1)
def desno(self):
self.change_direction(1)
def naprej(self, d):
self.save_current_state()
if self.x_direction_coefficient[self.direction]:
self.x += d * self.x_direction_coefficient[self.direction]
else:
self.y += d * self.y_direction_coefficient[self.direction]
def razveljavi(self):
if self.states:
previous_state = self.states.pop()
self.x = previous_state['x']
self.y = previous_state['y']
self.direction = previous_state['direction']
def razdalja(self):
return abs(self.x) + abs(self.y)
def koordinate(self):
return self.x, self.y
| [
"[email protected]"
]
| |
63f87d61e8c964d81e856f1e6f01cd937940a20b | 6b8bf10a57e1a85d2281579da9511310e39b9125 | /Exercise5/list_module.py | 653fd5fdce78508a87f475cb1e928e78a0de0a2d | []
| no_license | Hadirback/python | 9c0c5b622b18da50379d4c17df8ba68b67d452c9 | 88e03c34edb1c2f60a1624ee04b5bd975967e8ad | refs/heads/master | 2020-07-20T12:47:48.224472 | 2019-10-11T20:39:12 | 2019-10-11T20:39:12 | 206,643,640 | 0 | 1 | null | 2019-09-05T19:48:10 | 2019-09-05T19:38:58 | null | UTF-8 | Python | false | false | 629 | py | # Lesson 5 Exercise 2
import random
def get_random_elem(list):
if not list:
return None
return random.choice(list)
def fill_list():
my_list = []
while True:
elem = input('Введите элемент списка или Enter чтобы закончить ввод: ')
if not elem:
return my_list
else:
my_list.append(elem)
if __name__ == '__main__':
main_list = fill_list()
print(f'Мой список элементов: {main_list}')
print(f'Мой рандомный элемент из списка - {get_random_elem(main_list)}')
| [
"[email protected]"
]
| |
705172b35e4e926f7aaafbb9431f13fc097b88a4 | 54a26bf56aebd604d4dece733f08d7d30cd27f89 | /zdemo/auser/urls.py | 111ae8dee1420e3cac23d71f7714792b65cc4091 | [
"MIT"
]
| permissive | zzZaida/django_27 | b78f5ae8bccfa11074221ba32241878d703aa535 | bbbba8be9547fb815c68e94fadb7e8b6eebf75c9 | refs/heads/master | 2020-07-03T19:47:25.037195 | 2019-08-13T12:11:29 | 2019-08-13T12:11:29 | 202,030,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 793 | py | """zdemo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^user/', views.index,name='index'),
]
| [
"[email protected]"
]
| |
347021acc8f528e862d6401bb21dfa7d3134cf58 | 8d73ebf53f3d0aa08c3a50f18f47ef7d48e6febf | /CGPA_Calculator/icon.py | 7c01b70363b3922098a63c8e25bc682ad829f7c7 | [
"MIT"
]
| permissive | deepdalsania/calculator | 1da25f91feed8723a1faf43a2dffd8a955d7a359 | 1460fc7f91ef9e379bdde240ddbcb0183d7ec092 | refs/heads/master | 2022-12-20T16:42:36.522300 | 2020-10-06T05:03:51 | 2020-10-06T05:03:51 | 300,562,691 | 0 | 5 | MIT | 2020-10-06T05:03:52 | 2020-10-02T09:18:04 | Python | UTF-8 | Python | false | false | 864 | py | import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWebEngineWidgets import QWebEngineView
from PyQt5.QtWidgets import QMainWindow, QLabel, QLineEdit, QPushButton, QApplication
def arrowIcon(self):
self.arrow = QLabel(self)
self.arrow.setPixmap(QPixmap("ARR.png"))
self.arrow.setGeometry(QRect(650, 240, 50, 40))
self.arrow.setScaledContents(True)
self.arrow.setToolTip('Tech-Totes Club.')
self.arrow = QLabel(self)
self.arrow.setPixmap(QPixmap("ARR.png"))
self.arrow.setGeometry(QRect(280, 345, 30, 30))
self.arrow.setScaledContents(True)
self.arrow.setToolTip('Tech-Totes Club.')
self.arrow = QLabel(self)
self.arrow.setPixmap(QPixmap("ARR.png"))
self.arrow.setGeometry(QRect(280, 395, 30, 30))
self.arrow.setScaledContents(True)
self.arrow.setToolTip('Tech-Totes Club.') | [
"[email protected]"
]
| |
6f3281175ab81b728476fb5171d77260cd8d394d | 73f5461ea52354ea8caa6e08a3989f833fc9d5d0 | /src/python/fsqio/pants/buildgen/jvm/map_third_party_jar_symbols.py | c581fd1cf759f63584ab20647a192c01cd433beb | [
"Apache-2.0"
]
| permissive | OpenGeoscience/fsqio | 52b674b3e2d1742916fcec83bbb831ddbd58d1f2 | aaee25552b602712e8ca3d8b02e0d28e4262e53e | refs/heads/master | 2021-01-15T20:23:18.180635 | 2017-06-05T20:25:18 | 2017-06-05T20:25:18 | 66,481,281 | 3 | 0 | null | 2017-06-05T20:25:18 | 2016-08-24T16:36:46 | Scala | UTF-8 | Python | false | false | 4,428 | py | # coding=utf-8
# Copyright 2014 Foursquare Labs Inc. All Rights Reserved.
from __future__ import (
absolute_import,
division,
generators,
nested_scopes,
print_function,
unicode_literals,
with_statement,
)
from contextlib import closing
from itertools import chain
import json
import os
import re
from zipfile import ZipFile
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.invalidation.cache_manager import VersionedTargetSet
from pants.task.task import Task
from pants.util.dirutil import safe_mkdir
class MapThirdPartyJarSymbols(Task):
@classmethod
def product_types(cls):
return [
'third_party_jar_symbols',
]
@classmethod
def prepare(cls, options, round_manager):
super(MapThirdPartyJarSymbols, cls).prepare(options, round_manager)
# NOTE(mateo): This is a deprecated concept upstream - everything is in the classpath now. So it will take some
# fiddling to get the jar symbols for anyone not using pom-resolve.
round_manager.require_data('compile_classpath')
round_manager.require_data('java')
round_manager.require_data('scala')
CLASSFILE_RE = re.compile(r'(?P<path_parts>(?:\w+/)+)'
r'(?P<file_part>.*?)'
r'\.class')
CLASS_NAME_RE = re.compile(r'[a-zA-Z]\w*')
def fully_qualified_classes_from_jar(self, jar_abspath):
with closing(ZipFile(jar_abspath)) as dep_zip:
for qualified_file_name in dep_zip.namelist():
match = self.CLASSFILE_RE.match(qualified_file_name)
if match is not None:
file_part = match.groupdict()['file_part']
path_parts = match.groupdict()['path_parts']
path_parts = filter(None, path_parts.split('/'))
package = '.'.join(path_parts)
non_anon_file_part = file_part.split('$$')[0]
nested_classes = non_anon_file_part.split('$')
for i in range(len(nested_classes)):
if not self.CLASS_NAME_RE.match(nested_classes[i]):
break
nested_class_name = '.'.join(nested_classes[:i + 1])
fully_qualified_class = '.'.join([package, nested_class_name])
yield fully_qualified_class
def execute(self):
products = self.context.products
targets = self.context.targets(lambda t: isinstance(t, JarLibrary))
with self.invalidated(targets, invalidate_dependents=False) as invalidation_check:
global_vts = VersionedTargetSet.from_versioned_targets(invalidation_check.all_vts)
vts_workdir = os.path.join(self._workdir, global_vts.cache_key.hash)
vts_analysis_file = os.path.join(vts_workdir, 'buildgen_analysis.json')
if invalidation_check.invalid_vts or not os.path.exists(vts_analysis_file):
classpath = self.context.products.get_data('compile_classpath')
jar_entries = classpath.get_for_targets(targets)
all_jars = [jar for _, jar in jar_entries]
calculated_analysis = {}
calculated_analysis['hash'] = global_vts.cache_key.hash
calculated_analysis['jar_to_symbols_exported'] = {}
for jar_path in sorted(all_jars):
if os.path.splitext(jar_path)[1] != '.jar':
continue
fully_qualified_classes = list(set(self.fully_qualified_classes_from_jar(jar_path)))
calculated_analysis['jar_to_symbols_exported'][jar_path] = {
'fully_qualified_classes': fully_qualified_classes,
}
calculated_analysis_json = json.dumps(calculated_analysis)
safe_mkdir(vts_workdir)
with open(vts_analysis_file, 'wb') as f:
f.write(calculated_analysis_json)
if self.artifact_cache_writes_enabled():
self.update_artifact_cache([(global_vts, [vts_analysis_file])])
with open(vts_analysis_file, 'rb') as f:
analysis = json.loads(f.read())
third_party_jar_symbols = set(chain.from_iterable(
v['fully_qualified_classes'] for v in analysis['jar_to_symbols_exported'].values()
))
products.safe_create_data('third_party_jar_symbols', lambda: third_party_jar_symbols)
def check_artifact_cache_for(self, invalidation_check):
# Pom-resolve is an output dependent on the entire target set, and is not divisible
# by target. So we can only cache it keyed by the entire target set.
global_vts = VersionedTargetSet.from_versioned_targets(invalidation_check.all_vts)
return [global_vts]
| [
"[email protected]"
]
| |
8a900fcc1c9f2cb65f9dd2a6b7c15eef2898558d | 1b9bd441c500e79042c48570035071dc20bfaf44 | /sources/Content_Quality/mekhilta.py | 6ded5ff121376d5bb37ff8e30b43ebf4f016f14d | []
| no_license | Sefaria/Sefaria-Data | ad2d1d38442fd68943535ebf79e2603be1d15b2b | 25bf5a05bf52a344aae18075fba7d1d50eb0713a | refs/heads/master | 2023-09-05T00:08:17.502329 | 2023-08-29T08:53:40 | 2023-08-29T08:53:40 | 5,502,765 | 51 | 52 | null | 2023-08-29T11:42:31 | 2012-08-22T00:18:38 | null | UTF-8 | Python | false | false | 1,737 | py | from sources.functions import *
alt_toc = """Massekta dePesah / מסכתא דפסחא
Exodus 12:1–13:16
Massekta deVayehi Beshalach / מסכתא דויהי בשלח
Exodus 13:17-14:31
Massekta deShirah / מסכתא דשירה
Exodus 15:1-15:21
Massekta deVayassa / מסכתא דויסע
Exodus 15:22-17:7
Massekta deAmalek / מסכתא דעמלק
Exodus 17:8- 18:27
Massekta deBahodesh / מסכתא דבחודש
Exodus 19:1-20:26
Massekta deNezikin / מסכתא דנזיקין
Exodus 21:1-22:23
Massekta deKaspa / מסכתא דכספא
Exodus 22:24-23:19
Massekta deShabbeta / מסכתא דשבתא
Exodus 31:12-35:3"""
nodes = []
alt_toc = alt_toc.splitlines()
for r, row in enumerate(alt_toc):
if r % 2 == 0:
node = ArrayMapNode()
en, he = row.strip().split(" / ")
node.add_primary_titles(en, he)
node.depth = 0
node.refs = []
else:
node.wholeRef = row.strip().replace("Exodus", "Mekhilta d'Rabbi Yishmael")
node.validate()
nodes.append(node.serialize())
index = get_index_api("Mekhilta d'Rabbi Yishmael", server="https://germantalmud.cauldron.sefaria.org")
index["alt_structs"] = {"Parasha": {"nodes": nodes}}
#post_index(index, server="https://www.sefaria.org")
links = []
for sec_ref in library.get_index("Mekhilta d'Rabbi Yishmael").all_section_refs():
seg_ref = sec_ref.as_ranged_segment_ref().normal()
exodus_ref = sec_ref.normal().replace("Mekhilta d'Rabbi Yishmael", "Exodus")
print(exodus_ref)
print(seg_ref)
print("***")
links.append({"refs": [exodus_ref, seg_ref], "generated_by": "mekhilta_to_exodus", "auto": True, "type": "Commentary"})
post_link_in_steps(links, server="https://www.sefaria.org", step=100, sleep_amt=10) | [
"[email protected]"
]
| |
dbc9f7579143bb64ff98823e0279763d02cde114 | 6a8644cc47ed31adb60aba0f47551a897fdf8767 | /src/seraing/urban/dataimport/acropole/mappers.py | caa692885e0c32bea9b5dba44a5c0dcf0ed4a1e6 | []
| no_license | IMIO/seraing.urban.dataimport | 4e6ad4340348fc24b8576d4ce1f4c89a03063b88 | c9bd8e49390a14eeb53ce04a3d7b571e34da8a20 | refs/heads/master | 2021-01-20T03:42:12.232635 | 2017-04-27T12:23:47 | 2017-04-27T12:23:47 | 89,576,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46,564 | py | # -*- coding: utf-8 -*-
import unicodedata
import datetime
from seraing.urban.dataimport.acropole.settings import AcropoleImporterSettings
from seraing.urban.dataimport.acropole.utils import get_state_from_licences_dates, get_date_from_licences_dates, \
load_architects, load_geometers, load_notaries, load_parcellings, get_point_and_digits
from imio.urban.dataimport.config import IMPORT_FOLDER_PATH
from imio.urban.dataimport.exceptions import NoObjectToCreateException
from imio.urban.dataimport.factory import BaseFactory
from imio.urban.dataimport.mapper import Mapper, FinalMapper, PostCreationMapper
from imio.urban.dataimport.utils import CadastralReference
from imio.urban.dataimport.utils import cleanAndSplitWord
from imio.urban.dataimport.utils import guess_cadastral_reference
from imio.urban.dataimport.utils import identify_parcel_abbreviations
from imio.urban.dataimport.utils import parse_cadastral_reference
from DateTime import DateTime
from Products.CMFPlone.utils import normalizeString
from Products.CMFPlone.utils import safe_unicode
from plone import api
from plone.i18n.normalizer import idnormalizer
import re
import os
#
# LICENCE
#
# factory
class LicenceFactory(BaseFactory):
def getCreationPlace(self, factory_args):
path = '%s/urban/%ss' % (self.site.absolute_url_path(), factory_args['portal_type'].lower())
return self.site.restrictedTraverse(path)
# mappers
class IdMapper(Mapper):
def __init__(self, importer, args):
super(IdMapper, self).__init__(importer, args)
load_architects()
load_geometers()
load_notaries()
load_parcellings()
def mapId(self, line):
return normalizeString(self.getData('id'))
class ReferenceMapper(Mapper):
def mapReference(self, line):
if AcropoleImporterSettings.file_type == 'old':
ref = self.getData('Numero Permis') + " old"
if ref.strip():
return ref
else:
id = self.getData('id')
return "NC/%s" % (id) + " old"
elif AcropoleImporterSettings.file_type == 'new':
return self.getData('Reference') + " new"
class ReferenceDGO3Mapper(Mapper):
def mapReferencedgatlp(self, line):
type = self.getData('Type')
if type and type.startswith("PE1") or type.startswith("PE2"):
dg03ref = self.getData('PENReference DGO3')
if dg03ref:
return dg03ref
class PortalTypeMapper(Mapper):
def mapPortal_type(self, line):
if AcropoleImporterSettings.file_type == 'old':
return 'BuildLicence'
type = self.getData('Type')
# if type and type.startswith("PE1"):
# return "EnvClassOne"
# elif type and type.startswith("PE2"):
# return "EnvClassTwo"
# else:
# raise NoObjectToCreateException
if type and len(type) >= 3:
type_map = self.getValueMapping('type_map')
base_type = type.strip()[0:3]
# if base_type in ['PE', 'PEX', 'PUN']:
# base_type = type.strip()[0:4]
portal_type = type_map[base_type]
return portal_type
else:
raise NoObjectToCreateException
def mapFoldercategory(self, line):
foldercategory = 'uat'
return foldercategory
class LicenceSubjectMapper(Mapper):
def mapLicencesubject(self, line):
object1 = self.getData('Genre de Travaux')
object2 = self.getData('Divers')
return '%s %s' % (object1, object2)
class WorklocationMapper(Mapper):
def mapWorklocations(self, line):
num = self.getData('AdresseTravauxNumero')
noisy_words = set(('d', 'du', 'de', 'des', 'le', 'la', 'les', 'à', ',', 'rues', 'terrain', 'terrains', 'garage', 'magasin', 'entrepôt'))
raw_street = self.getData('AdresseTravauxRue')
# remove string in () and []
raw_street = re.sub("[\(\[].*?[\)\]]", "", raw_street)
street = cleanAndSplitWord(raw_street)
street_keywords = [word for word in street if word not in noisy_words and len(word) > 1]
if len(street_keywords) and street_keywords[-1] == 'or':
street_keywords = street_keywords[:-1]
locality = self.getData('AdresseTravauxVille')
street_keywords.extend(cleanAndSplitWord(locality))
brains = self.catalog(portal_type='Street', Title=street_keywords)
if len(brains) == 1:
return ({'street': brains[0].UID, 'number': num},)
if street:
self.logError(self, line, 'Couldnt find street or found too much streets', {
'address': '%s, %s, %s ' % (num, raw_street, locality),
'street': street_keywords,
'search result': len(brains)
})
return {}
class WorklocationOldMapper(Mapper):
def mapWorklocations(self, line):
noisy_words = set(('d', 'du', 'de', 'des', 'le', 'la', 'les', 'à', ',', 'rues', 'terrain', 'terrains', 'garage', 'magasin', 'entrepôt'))
raw_street = self.getData('Lieu de construction')
num = ''.join(ele for ele in raw_street if ele.isdigit())
# remove string in () and []
raw_street = re.sub("[\(\[].*?[\)\]]", "", raw_street)
street = cleanAndSplitWord(raw_street)
street_keywords = [word for word in street if word not in noisy_words and len(word) > 1]
if len(street_keywords) and street_keywords[-1] == 'or':
street_keywords = street_keywords[:-1]
brains = self.catalog(portal_type='Street', Title=street_keywords)
if len(brains) == 1:
return ({'street': brains[0].UID, 'number': num},)
if street:
self.logError(self, line, 'Couldnt find street or found too much streets', {
'address': '%s' % (raw_street),
'street': street_keywords,
'search result': len(brains)
})
return {}
class CityMapper(Mapper):
def mapCity(self, line):
city = self.getData('Ville Demandeur')
return (''.join(ele for ele in city if not ele.isdigit())).strip()
class PostalCodeMapper(Mapper):
def mapZipcode(self, line):
zip = self.getData('Ville Demandeur')
return (''.join(ele for ele in zip if ele.isdigit())).strip()
class ParcellingUIDMapper(Mapper):
def mapParcellings(self, line):
title = self.getData('Lotissement')
if title:
title = title.replace("(phase I)","").strip()
title = title.replace("(partie 1)","").strip()
title = title.replace("(partie 2)","").strip()
catalog = api.portal.get_tool('portal_catalog')
brains = catalog(portal_type='ParcellingTerm', Title=title)
parcelling_uids = [brain.getObject().UID() for brain in brains]
if len(parcelling_uids) == 1:
return parcelling_uids
if parcelling_uids:
self.logError(self, line, 'Couldnt find parcelling or found too much parcellings', {
'titre': '%s' % title,
'search result': len(parcelling_uids)
})
class IsInSubdivisionMapper(Mapper):
def mapIsinsubdivision(self, line):
title = self.getData('Lotissement')
return bool(title)
class SubdivisionDetailsMapper(Mapper):
def mapSubdivisiondetails(self, line):
lot = self.getData('Lot')
return lot
class WorkTypeMapper(Mapper):
def mapWorktype(self, line):
worktype = self.getData('Code_220+')
return [worktype]
class InquiryStartDateMapper(Mapper):
def mapInvestigationstart(self, line):
date = self.getData('DateDebEnq')
if date:
date = datetime.datetime.strptime(date, "%d/%m/%Y")
return date
class InquiryEndDateMapper(Mapper):
def mapInvestigationend(self, line):
date = self.getData('DateFinEnq')
if date:
date = datetime.datetime.strptime(date, "%d/%m/%Y")
return date
class InvestigationReasonsMapper(Mapper):
def mapInvestigationreasons(self, line):
reasons = '<p>%s</p> <p>%s</p>' % (self.getData('ParticularitesEnq1'), self.getData('ParticularitesEnq2'))
return reasons
class InquiryReclamationNumbersMapper(Mapper):
def mapInvestigationwritereclamationnumber(self, line):
reclamation = self.getData('NBRec')
return reclamation
class InquiryArticlesMapper(PostCreationMapper):
def mapInvestigationarticles(self, line, plone_object):
raw_articles = self.getData('Enquete')
articles = []
if raw_articles:
article_regex = '(\d+ ?, ?\d+)°'
found_articles = re.findall(article_regex, raw_articles)
if not found_articles:
self.logError(self, line, 'No investigation article found.', {'articles': raw_articles})
for art in found_articles:
article_id = re.sub(' ?, ?', '-', art)
if not self.article_exists(article_id, licence=plone_object):
self.logError(
self, line, 'Article %s does not exist in the config',
{'article id': article_id, 'articles': raw_articles}
)
else:
articles.append(article_id)
return articles
def article_exists(self, article_id, licence):
return article_id in licence.getLicenceConfig().investigationarticles.objectIds()
class AskOpinionsMapper(Mapper):
def mapSolicitopinionsto(self, line):
ask_opinions = []
for i in range(60, 76):
j = i - 59
if line[i] == "VRAI":
solicitOpinionDictionary = self.getValueMapping('solicitOpinionDictionary')
opinion = solicitOpinionDictionary[str(j)]
if opinion:
ask_opinions.append(opinion)
return ask_opinions
class RubricsMapper(Mapper):
def mapRubrics(self, line):
rubric_list = []
# licence = self.importer.current_containers_stack[-1]
# if licence.portal_type == 'EnvClassThree':
rubric_raw = self.getData('DENRubrique1')
if rubric_raw:
rubric_raw.replace("//", "/")
rubrics = rubric_raw.split("/")
if rubrics:
for rubric in rubrics:
point_and_digits = get_point_and_digits(rubric)
if point_and_digits and '.' in point_and_digits:
catalog = api.portal.get_tool('portal_catalog')
rubric_uids = [brain.UID for brain in catalog(portal_type='EnvironmentRubricTerm', id=point_and_digits)]
if not rubric_uids:
self.logError(self, line, 'No rubric found',
{
'rubric': point_and_digits,
})
else:
rubric_list.append(rubric_uids[0])
return rubric_list
class ObservationsMapper(Mapper):
def mapDescription(self, line):
description = '<p>%s</p> <p>%s</p>' % (self.getData('ParticularitesEnq1'),self.getData('ParticularitesEnq2'))
return description
class ObservationsOldMapper(Mapper):
def mapDescription(self, line):
description = '<p>%s</p>' % (self.getData('Remarques'))
return description
class TechnicalConditionsMapper(Mapper):
def mapLocationtechnicalconditions(self, line):
obs_decision1 = '<p>%s</p>' % self.getData('memo_Autorisation')
obs_decision2 = '<p>%s</p>' % self.getData('memo_Autorisation2')
return '%s%s' % (obs_decision1, obs_decision2)
class ArchitectMapper(PostCreationMapper):
def mapArchitects(self, line, plone_object):
# archi_name = '%s %s %s' % (self.getData('Nom Architecte'), self.getData('Prenom Architecte'), self.getData('Societe Architecte'))
archi_name = ' %s %s' % ( self.getData('Prenom Architecte'), self.getData('Societe Architecte'))
fullname = cleanAndSplitWord(archi_name)
if not fullname:
return []
noisy_words = ['monsieur', 'madame', 'architecte', '&', ',', '.', 'or', 'mr', 'mme', '/']
name_keywords = [word.lower() for word in fullname if word.lower() not in noisy_words]
architects = self.catalog(portal_type='Architect', Title=name_keywords)
if len(architects) == 0:
Utils.createArchitect(archi_name)
architects = self.catalog(portal_type='Architect', Title=name_keywords)
if len(architects) == 1:
return architects[0].getObject()
self.logError(self, line, 'No architects found or too much architects found',
{
'raw_name': archi_name,
'name': name_keywords,
'search_result': len(architects)
})
return []
class FolderZoneTableMapper(Mapper):
def mapFolderzone(self, line):
folderZone = []
sectorMap1 = self.getData('Plan de Secteur 1')
sectorMap2 = self.getData('Plan de Secteur 2')
zoneDictionnary = self.getValueMapping('zoneDictionary')
if sectorMap1 in zoneDictionnary:
folderZone.append(zoneDictionnary[sectorMap1])
if sectorMap2 in zoneDictionnary:
folderZone.append(zoneDictionnary[sectorMap2])
return folderZone
class GeometricianMapper(PostCreationMapper):
def mapGeometricians(self, line, plone_object):
name = self.getData('LOTGeoNom')
firstname = self.getData('LOTGeoPrenom')
raw_name = firstname + name
# name = cleanAndSplitWord(name)
# firstname = cleanAndSplitWord(firstname)
names = name + ' ' + firstname
if raw_name:
geometrician = self.catalog(portal_type='Geometrician', Title=names)
if len(geometrician) == 0:
Utils.createGeometrician(name, firstname)
geometrician = self.catalog(portal_type='Geometrician', Title=names)
if len(geometrician) == 1:
return geometrician[0].getObject()
self.logError(self, line, 'no geometricians found or too much geometricians found',
{
'raw_name': raw_name,
'name': name,
'firstname': firstname,
'search_result': len(geometrician)
})
return []
class PcaUIDMapper(Mapper):
def mapPca(self, line):
title = self.getData('PPA')
if title:
catalog = api.portal.get_tool('portal_catalog')
pca_id = catalog(portal_type='PcaTerm', Title=title)[0].id
return pca_id
return []
class IsInPcaMapper(Mapper):
def mapIsinpca(self, line):
title = self.getData('PPA')
return bool(title)
class EnvRubricsMapper(Mapper):
def mapDescription(self, line):
rubric = Utils().convertToUnicode(self.getData('LibNat'))
return rubric
class CompletionStateMapper(PostCreationMapper):
def map(self, line, plone_object):
self.line = line
transition = None
if AcropoleImporterSettings.file_type == 'old':
type_decision = self.getData('Type Decision')
if type_decision == 'REFUS':
transition = 'refuse'
else:
transition = 'accept'
else:
if plone_object.portal_type in ['BuildLicence', 'ParcelOutLicence']:
datePermis = self.getData('Date Permis')
dateRefus = self.getData('Date Refus')
datePermisRecours = self.getData('Date Permis sur recours')
dateRefusRecours = self.getData('Date Refus sur recours')
transition = get_state_from_licences_dates(datePermis, dateRefus, datePermisRecours, dateRefusRecours)
elif plone_object.portal_type == 'Declaration':
if self.getData('DURDecision') == 'Favorable':
transition = 'accept'
elif self.getData('DURDecision') == 'Défavorable':
transition = 'refuse'
elif plone_object.portal_type == 'UrbanCertificateTwo':
if self.getData('CU2Decision') == 'Favorable':
transition = 'accept'
elif self.getData('CU2Decision') == 'Défavorable':
transition = 'refuse'
elif plone_object.portal_type == 'EnvClassThree':
if self.getData('DENDecision') == 'irrecevable':
transition = 'refuse'
elif self.getData('DENDecision') == 'OK sans conditions' or self.getData('DENDecision') == 'OK avec conditions':
transition = 'accept'
if transition:
api.content.transition(plone_object, transition)
# api.content.transition(plone_object, 'nonapplicable')
class ErrorsMapper(FinalMapper):
def mapDescription(self, line, plone_object):
line_number = self.importer.current_line
errors = self.importer.errors.get(line_number, None)
description = plone_object.Description()
error_trace = []
if errors:
for error in errors:
data = error.data
if 'streets' in error.message:
error_trace.append('<p>adresse : %s</p>' % data['address'])
elif 'notaries' in error.message:
error_trace.append('<p>notaire : %s %s %s</p>' % (data['title'], data['firstname'], data['name']))
elif 'architects' in error.message:
error_trace.append('<p>architecte : %s</p>' % data['raw_name'])
elif 'geometricians' in error.message:
error_trace.append('<p>géomètre : %s</p>' % data['raw_name'])
elif 'parcels' in error.message and AcropoleImporterSettings.file_type == 'old':
error_trace.append('<p>parcels : %s </p>' % data['args'])
elif 'rubric' in error.message.lower():
error_trace.append('<p>Rubrique non trouvée : %s</p>' % (data['rubric']))
elif 'parcelling' in error.message:
if data['search result'] == '0':
error_trace.append('<p>lotissement non trouvé : %s </p>' % data['titre'])
else:
error_trace.append("<p>lotissement trouvé plus d'une fois: %s : %s fois</p>" % (data['titre'], data['search result'] ))
elif 'article' in error.message.lower():
error_trace.append('<p>Articles de l\'enquête : %s</p>' % (data['articles']))
error_trace = ''.join(error_trace)
return '%s%s' % (error_trace, description)
#
# CONTACT
#
# factory
class ContactFactory(BaseFactory):
def getPortalType(self, container, **kwargs):
if container.portal_type in ['UrbanCertificateOne', 'UrbanCertificateTwo', 'NotaryLetter']:
return 'Proprietary'
return 'Applicant'
# mappers
class ContactIdMapper(Mapper):
def mapId(self, line):
name = '%s%s%s' % (self.getData('NomDemandeur1'), self.getData('PrenomDemandeur1'), self.getData('id'))
name = name.replace(' ', '').replace('-', '')
return normalizeString(self.site.portal_urban.generateUniqueId(name))
class ContactIdOldMapper(Mapper):
def mapId(self, line):
name = '%s%s' % (self.getData('Nom Demandeur'), self.getData('id'))
name = name.replace(' ', '').replace('-', '')
return normalizeString(self.site.portal_urban.generateUniqueId(name))
class ContactTitleMapper(Mapper):
def mapPersontitle(self, line):
title1 = self.getData('Civi').lower()
title = title1 or self.getData('Civi2').lower()
title_mapping = self.getValueMapping('titre_map')
return title_mapping.get(title, 'notitle')
class ContactNameMapper(Mapper):
def mapName1(self, line):
title = self.getData('Civi2')
name = self.getData('D_Nom')
regular_titles = [
'M.',
'M et Mlle',
'M et Mme',
'M. et Mme',
'M. l\'Architecte',
'M. le président',
'Madame',
'Madame Vve',
'Mademoiselle',
'Maître',
'Mlle et Monsieur',
'Mesdames',
'Mesdemoiselles',
'Messieurs',
'Mlle',
'MM',
'Mme',
'Mme et M',
'Monsieur',
'Monsieur,',
'Monsieur et Madame',
'Monsieur l\'Architecte',
]
if title not in regular_titles:
name = '%s %s' % (title, name)
return name
class ContactSreetMapper(Mapper):
def mapStreet(self, line):
regex = '((?:[^\d,]+\s*)+),?'
raw_street = self.getData('D_Adres')
match = re.match(regex, raw_street)
if match:
street = match.group(1)
else:
street = raw_street
return street
class ContactNumberMapper(Mapper):
def mapNumber(self, line):
regex = '(?:[^\d,]+\s*)+,?\s*(.*)'
raw_street = self.getData('D_Adres')
number = ''
match = re.match(regex, raw_street)
if match:
number = match.group(1)
return number
class ContactPhoneMapper(Mapper):
def mapPhone(self, line):
raw_phone = self.getData('D_Tel')
gsm = self.getData('D_GSM')
phone = ''
if raw_phone:
phone = raw_phone
if gsm:
phone = phone and '%s %s' % (phone, gsm) or gsm
return phone
#
# PARCEL
#
#factory
class ParcelFactory(BaseFactory):
def create(self, parcel, container=None, line=None):
searchview = self.site.restrictedTraverse('searchparcels')
#need to trick the search browser view about the args in its request
parcel_args = parcel.to_dict()
parcel_args.pop('partie')
for k, v in parcel_args.iteritems():
searchview.context.REQUEST[k] = v
#check if we can find a parcel in the db cadastre with these infos
found = searchview.findParcel(**parcel_args)
if not found:
found = searchview.findParcel(browseoldparcels=True, **parcel_args)
if len(found) == 1 and parcel.has_same_attribute_values(found[0]):
parcel_args['divisionCode'] = parcel_args['division']
parcel_args['isOfficialParcel'] = True
else:
self.logError(self, line, 'Too much parcels found or not enough parcels found', {'args': parcel_args, 'search result': len(found)})
parcel_args['isOfficialParcel'] = False
parcel_args['id'] = parcel.id
parcel_args['partie'] = parcel.partie
return super(ParcelFactory, self).create(parcel_args, container=container)
def objectAlreadyExists(self, parcel, container):
existing_object = getattr(container, parcel.id, None)
return existing_object
# mappers
class ParcelDataMapper(Mapper):
def map(self, line, **kwargs):
section = self.getData('Parcelle1section', line).upper()
if len(section) > 0:
section = section[0]
remaining_reference = '%s %s' % (self.getData('Parcelle1numero', line), self.getData('Parcelle1numerosuite', line))
if not remaining_reference:
return []
abbreviations = identify_parcel_abbreviations(remaining_reference)
division = '25111' if self.getData('AdresseTravauxVille', line) == 'Wauthier-Braine' else '25015'
if not remaining_reference or not section or not abbreviations:
return []
base_reference = parse_cadastral_reference(division + section + abbreviations[0])
base_reference = CadastralReference(*base_reference)
parcels = [base_reference]
for abbreviation in abbreviations[1:]:
new_parcel = guess_cadastral_reference(base_reference, abbreviation)
parcels.append(new_parcel)
section2 = self.getData('Parcelle2section', line).upper()
if section2 :
section2 = section2[0]
remaining_reference2 = '%s %s' % (self.getData('Parcelle2numero', line), self.getData('Parcelle2numerosuite', line))
if not remaining_reference2:
return []
abbreviations2 = identify_parcel_abbreviations(remaining_reference2)
if not remaining_reference2 or not section2:
return []
base_reference2 = parse_cadastral_reference(division + section2 + abbreviations2[0])
base_reference2 = CadastralReference(*base_reference2)
for abbreviation2 in abbreviations2[1:]:
new_parcel2 = guess_cadastral_reference(base_reference2, abbreviation2)
parcels.append(new_parcel2)
return parcels
class OldParcelDataMapper(Mapper):
def map(self, line, **kwargs):
raw_parcel = self.getData('Cadastre', line)
if raw_parcel:
self.logError(self, line, 'parcels', {'args': raw_parcel})
# section = raw_parcel[0].upper()
# remaining_reference = raw_parcel[1:]
# remaining_reference = remaining_reference.replace("-","").strip()
# if not remaining_reference:
# return []
# abbreviations = identify_parcel_abbreviations(remaining_reference)
# division = '25015'
# if not remaining_reference or not section or not abbreviations:
# return []
# base_reference = parse_cadastral_reference(division + section + abbreviations[0])
#
# base_reference = CadastralReference(*base_reference)
#
# parcels = [base_reference]
# for abbreviation in abbreviations[1:]:
# new_parcel = guess_cadastral_reference(base_reference, abbreviation)
# self.logError(self, line, 'parcels', {'args': new_parcel})
# parcels.append(new_parcel)
#
# return parcels
raise NoObjectToCreateException
#
# UrbanEvent deposit
#
# factory
class UrbanEventFactory(BaseFactory):
def getPortalType(self, **kwargs):
return 'UrbanEvent'
def create(self, kwargs, container, line):
if not kwargs['eventtype']:
return []
eventtype_uid = kwargs.pop('eventtype')
urban_event = container.createUrbanEvent(eventtype_uid, **kwargs)
return urban_event
#mappers
class DepositEventMapper(Mapper):
def mapEventtype(self, line):
licence = self.importer.current_containers_stack[-1]
urban_tool = api.portal.get_tool('portal_urban')
eventtype_id = self.getValueMapping('eventtype_id_map')[licence.portal_type]['deposit_event']
config = urban_tool.getUrbanConfig(licence)
return getattr(config.urbaneventtypes, eventtype_id).UID()
class DepositDateMapper(Mapper):
def mapEventdate(self, line):
date = self.getData('Recepisse')
if not date:
raise NoObjectToCreateException
date = datetime.datetime.strptime(date, "%d/%m/%Y")
return date
class DepositEventIdMapper(Mapper):
def mapId(self, line):
return 'depot-de-la-demande'
# UrbanEvent transmitted decision
class TransmittedIdMapper(Mapper):
def mapId(self, line):
return 'transmis-decision'
class TransmittedEventMapper(Mapper):
def mapEventtype(self, line):
licence = self.importer.current_containers_stack[-1]
urban_tool = api.portal.get_tool('portal_urban')
eventtype_id = 'transmis-decision'
config = urban_tool.getUrbanConfig(licence)
return getattr(config.urbaneventtypes, eventtype_id).UID()
class DateTransmissionMapper(Mapper):
def mapEventdate(self, line):
date = self.getData('DURDateTransmission')
if not date:
raise NoObjectToCreateException
date = datetime.datetime.strptime(date, "%d/%m/%Y")
return date
class DateTransmissionEventIdMapper(Mapper):
def mapId(self, line):
return 'transmis-decision'
#
# UrbanEvent ask opinions
#
# factory
class OpinionMakersFactory(BaseFactory):
""" """
#mappers
class OpinionMakersTableMapper(Mapper):
""" """
def map(self, line, **kwargs):
lines = self.query_secondary_table(line)
for secondary_line in lines:
for mapper in self.mappers:
return mapper.map(secondary_line, **kwargs)
break
return []
class OpinionMakersMapper(Mapper):
def map(self, line):
opinionmakers_args = []
for i in range(1, 11):
opinionmakers_id = self.getData('Org{}'.format(i), line)
if not opinionmakers_id:
return opinionmakers_args
event_date = self.getData('Cont{}'.format(i), line)
receipt_date = self.getData('Rec{}'.format(i), line)
args = {
'id': opinionmakers_id,
'eventtype': opinionmakers_id,
'eventDate': event_date and DateTime(event_date) or None,
'transmitDate': event_date and DateTime(event_date) or None,
'receiptDate': receipt_date and DateTime(receipt_date) or None,
'receivedDocumentReference': self.getData('Ref{}'.format(i), line),
}
opinionmakers_args.append(args)
if not opinionmakers_args:
raise NoObjectToCreateException
return opinionmakers_args
class LinkedInquiryMapper(PostCreationMapper):
def map(self, line, plone_object):
opinion_event = plone_object
licence = opinion_event.aq_inner.aq_parent
inquiry = licence.getInquiries() and licence.getInquiries()[-1] or licence
opinion_event.setLinkedInquiry(inquiry)
#
# Claimant
#
# factory
class ClaimantFactory(BaseFactory):
def getPortalType(self, container, **kwargs):
return 'Claimant'
#mappers
class ClaimantIdMapper(Mapper):
def mapId(self, line):
name = '%s%s' % (self.getData('RECNom'), self.getData('RECPrenom'))
name = name.replace(' ', '').replace('-', '')
if not name:
raise NoObjectToCreateException
return normalizeString(self.site.portal_urban.generateUniqueId(name))
class ClaimantTitleMapper(Mapper):
def mapPersontitle(self, line):
title = self.getData('Civi_Rec').lower()
title_mapping = self.getValueMapping('titre_map')
return title_mapping.get(title, 'notitle')
class ClaimantSreetMapper(Mapper):
def mapStreet(self, line):
regex = '((?:[^\d,]+\s*)+),?'
raw_street = self.getData('RECAdres')
match = re.match(regex, raw_street)
if match:
street = match.group(1)
else:
street = raw_street
return street
class ClaimantNumberMapper(Mapper):
def mapNumber(self, line):
regex = '(?:[^\d,]+\s*)+,?\s*(.*)'
raw_street = self.getData('RECAdres')
number = ''
match = re.match(regex, raw_street)
if match:
number = match.group(1)
return number
#
# UrbanEvent second RW
#
#mappers
class SecondRWEventTypeMapper(Mapper):
def mapEventtype(self, line):
licence = self.importer.current_containers_stack[-1]
urban_tool = api.portal.get_tool('portal_urban')
eventtype_id = 'transmis-2eme-dossier-rw'
config = urban_tool.getUrbanConfig(licence)
return getattr(config.urbaneventtypes, eventtype_id).UID()
class SecondRWEventDateMapper(Mapper):
def mapEventdate(self, line):
date = self.getData('UR_Datenv2')
date = date and DateTime(date) or None
if not date:
raise NoObjectToCreateException
return date
class SecondRWDecisionMapper(Mapper):
def mapExternaldecision(self, line):
raw_decision = self.getData('UR_Avis')
decision = self.getValueMapping('externaldecisions_map').get(raw_decision, [])
return decision
class SecondRWDecisionDateMapper(Mapper):
def mapDecisiondate(self, line):
date = self.getData('UR_Datpre')
date = date and DateTime(date) or None
return date
class SecondRWReceiptDateMapper(Mapper):
def mapReceiptdate(self, line):
date = self.getData('UR_Datret')
date = date and DateTime(date) or None
return date
#
# UrbanEvent decision
#
#mappers
class DecisionEventTypeMapper(Mapper):
def mapEventtype(self, line):
licence = self.importer.current_containers_stack[-1]
urban_tool = api.portal.get_tool('portal_urban')
eventtype_id = self.getValueMapping('eventtype_id_map')[licence.portal_type]['decision_event']
config = urban_tool.getUrbanConfig(licence)
return getattr(config.urbaneventtypes, eventtype_id).UID()
class DecisionEventIdMapper(Mapper):
def mapId(self, line):
return 'decision_event'
class DecisionEventDateMapper(Mapper):
def mapDecisiondate(self, line):
licence = self.importer.current_containers_stack[-1]
if licence.portal_type in ['BuildLicence', 'ParcelOutLicence', 'EnvClassOne', 'EnvClassTwo']:
datePermis = self.getData('Date Permis')
dateRefus = self.getData('Date Refus')
datePermisRecours = self.getData('Date Permis sur recours')
dateRefusRecours = self.getData('Date Refus sur recours')
date = get_date_from_licences_dates(datePermis, dateRefus, datePermisRecours, dateRefusRecours)
if not date:
self.logError(self, line, 'No decision date found')
raise NoObjectToCreateException
elif licence.portal_type == 'Declaration':
date = self.getData('DURDateDecision')
if not date:
date = self.getData('DURDateTransmission')
if not date:
decision = self.getData('DURDecision')
if decision:
date = self.getValueMapping('default_date_decision')
elif licence.portal_type == 'UrbanCertificateTwo':
date = self.getData('CU2DateDecision')
return datetime.datetime.strptime(date, "%d/%m/%Y")
class DecisionEventDecisionMapper(Mapper):
def mapDecision(self, line):
licence = self.importer.current_containers_stack[-1]
if licence.portal_type in ['BuildLicence', 'ParcelOutLicence', 'EnvClassOne', 'EnvClassTwo']:
datePermis = self.getData('Date Permis')
dateRefus = self.getData('Date Refus')
datePermisRecours = self.getData('Date Permis sur recours')
dateRefusRecours = self.getData('Date Refus sur recours')
state = get_state_from_licences_dates(datePermis, dateRefus, datePermisRecours, dateRefusRecours)
if state == 'accept':
return u'Favorable'
elif state == 'refuse':
return u'Défavorable'
elif licence.portal_type == 'Declaration':
return self.getData('DURDecision')
elif licence.portal_type == 'UrbanCertificateTwo':
return self.getData('CU2Decision')
class DecisionEventNotificationDateMapper(Mapper):
def mapEventdate(self, line):
licence = self.importer.current_containers_stack[-1]
if licence.portal_type in ['BuildLicence', 'ParcelOutLicence', 'EnvClassOne', 'EnvClassTwo']:
datePermis = self.getData('Date Permis')
dateRefus = self.getData('Date Refus')
datePermisRecours = self.getData('Date Permis sur recours')
dateRefusRecours = self.getData('Date Refus sur recours')
eventDate = get_date_from_licences_dates(datePermis, dateRefus, datePermisRecours, dateRefusRecours)
elif licence.portal_type == 'Declaration':
eventDate = self.getData('DURDateDecision')
if not eventDate:
eventDate = self.getData('DURDateTransmission')
decision = self.getData('DURDecision')
if decision and not eventDate:
eventDate = self.getValueMapping('default_date_decision')
elif licence.portal_type == 'UrbanCertificateTwo':
eventDate = self.getData('CU2DateDecision')
if eventDate:
return datetime.datetime.strptime(eventDate, "%d/%m/%Y")
else:
raise NoObjectToCreateException
class EnvClassThreeCondAcceptabilityEventIdMapper(Mapper):
def mapId(self, line):
return 'acceptation-de-la-demande-cond'
class EnvClassThreeCondAcceptabilityEventMapper(Mapper):
def mapEventtype(self, line):
licence = self.importer.current_containers_stack[-1]
urban_tool = api.portal.get_tool('portal_urban')
eventtype_id = 'acceptation-de-la-demande-cond'
config = urban_tool.getUrbanConfig(licence)
if hasattr(config.urbaneventtypes, eventtype_id):
return getattr(config.urbaneventtypes, eventtype_id).UID()
class EventDateEnvClassThreeCondAcceptabilityMapper(Mapper):
def mapEventdate(self, line):
eventDate = self.getData('DENDatePriseActeAvecConditions')
eventDecision = self.getData('DENDecision')
if eventDecision == "OK avec conditions":
if not eventDate:
eventDate = self.getValueMapping('default_date_decision')
return datetime.datetime.strptime(eventDate, "%d/%m/%Y")
else:
raise NoObjectToCreateException
return eventDate
class EnvClassThreeAcceptabilityEventIdMapper(Mapper):
def mapId(self, line):
return 'acceptation-de-la-demande'
class EnvClassThreeAcceptabilityEventMapper(Mapper):
def mapEventtype(self, line):
licence = self.importer.current_containers_stack[-1]
urban_tool = api.portal.get_tool('portal_urban')
eventtype_id = 'acceptation-de-la-demande'
config = urban_tool.getUrbanConfig(licence)
if hasattr(config.urbaneventtypes, eventtype_id):
return getattr(config.urbaneventtypes, eventtype_id).UID()
class EventDateEnvClassThreeAcceptabilityMapper(Mapper):
def mapEventdate(self, line):
eventDate = self.getData('DENDatePriseActeSansConditions')
eventDecision = self.getData('DENDecision')
if eventDecision == "OK sans conditions":
if not eventDate:
eventDate = self.getValueMapping('default_date_decision')
return datetime.datetime.strptime(eventDate, "%d/%m/%Y")
else:
raise NoObjectToCreateException
return eventDate
class EnvClassThreeUnacceptabilityEventIdMapper(Mapper):
def mapId(self, line):
return 'acceptation-de-la-demande'
class EnvClassThreeUnacceptabilityEventMapper(Mapper):
def mapEventtype(self, line):
licence = self.importer.current_containers_stack[-1]
urban_tool = api.portal.get_tool('portal_urban')
eventtype_id = 'refus-de-la-demande'
config = urban_tool.getUrbanConfig(licence)
if hasattr(config.urbaneventtypes, eventtype_id):
return getattr(config.urbaneventtypes, eventtype_id).UID()
class EventDateEnvClassThreeUnacceptabilityMapper(Mapper):
def mapEventdate(self, line):
eventDate = self.getData('DENDateIrrecevable')
eventDecision = self.getData('DENDecision')
if eventDecision == "irrecevable":
if not eventDate:
eventDate = self.getValueMapping('default_date_decision')
return datetime.datetime.strptime(eventDate, "%d/%m/%Y")
else:
raise NoObjectToCreateException
return eventDate
class OldDecisionEventDateMapper(Mapper):
def mapDecisiondate(self, line):
datePermis = self.getData('DENDatePriseActeAvecConditions')
try:
d = datetime.datetime.strptime(datePermis, "%d.%m.%y")
if d > datetime.datetime.now():
d = datetime(d.year - 100, d.month, d.day)
return d
except ValueError:
return
class OldDecisionEventDecisionMapper(Mapper):
def mapDecision(self, line):
decision = self.getData('Type Decision')
if decision == 'REFUS':
return u'Défavorable'
else:
return u'Favorable'
class OldDecisionEventNotificationDateMapper(Mapper):
def mapEventdate(self, line):
datePermis = self.getData('Date Permis')
try:
d = datetime.datetime.strptime(datePermis, "%d.%m.%y")
if d > datetime.datetime.now():
d = datetime(d.year - 100, d.month, d.day)
return d
except ValueError:
raise NoObjectToCreateException
class CollegeReportTypeMapper(Mapper):
def mapEventtype(self, line):
licence = self.importer.current_containers_stack[-1]
urban_tool = api.portal.get_tool('portal_urban')
eventtype_id = self.getValueMapping('eventtype_id_map')[licence.portal_type]['college_report_event']
config = urban_tool.getUrbanConfig(licence)
return getattr(config.urbaneventtypes, eventtype_id).UID()
class CollegeReportIdMapper(Mapper):
def mapId(self, line):
return 'college_report_event'
class CollegeReportEventDateMapper(Mapper):
def mapEventdate(self, line):
eventDate = self.getData('Rapport du College')
if eventDate:
return eventDate
else:
raise NoObjectToCreateException
class CompleteFolderEventMapper(Mapper):
def mapEventtype(self, line):
licence = self.importer.current_containers_stack[-1]
urban_tool = api.portal.get_tool('portal_urban')
eventtype_id = self.getValueMapping('eventtype_id_map')[licence.portal_type]['complete_folder']
config = urban_tool.getUrbanConfig(licence)
if hasattr(config.urbaneventtypes, eventtype_id):
return getattr(config.urbaneventtypes, eventtype_id).UID()
class CompleteFolderDateMapper(Mapper):
def mapEventdate(self, line):
date = self.getData('PENDtDossierComplet')
if not date:
raise NoObjectToCreateException
try:
d = datetime.datetime.strptime(date, "%d/%m/%Y")
if d > datetime.datetime.now():
d = datetime(d.year - 100, d.month, d.day)
return d
except ValueError:
raise NoObjectToCreateException
class CompleteFolderEventIdMapper(Mapper):
def mapId(self, line):
return 'complete_folder'
class IncompleteFolderEventMapper(Mapper):
def mapEventtype(self, line):
licence = self.importer.current_containers_stack[-1]
urban_tool = api.portal.get_tool('portal_urban')
eventtype_id = ('dossier-incomplet')
config = urban_tool.getUrbanConfig(licence)
if hasattr(config.urbaneventtypes, eventtype_id):
return getattr(config.urbaneventtypes, eventtype_id).UID()
class IncompleteFolderDateMapper(Mapper):
def mapEventdate(self, line):
date = self.getData('PENDtDossierIncomplet')
if not date:
raise NoObjectToCreateException
try:
d = datetime.datetime.strptime(date, "%d/%m/%Y")
if d > datetime.datetime.now():
d = datetime(d.year - 100, d.month, d.day)
return d
except ValueError:
raise NoObjectToCreateException
class IncompleteFolderEventIdMapper(Mapper):
def mapId(self, line):
return 'incomplete_folder'
#
# UrbanEvent suspension
#
# factory
class SuspensionEventFactory(UrbanEventFactory):
def create(self, kwargs, container, line):
if not kwargs['eventtype']:
return []
eventtype_uid = kwargs.pop('eventtype')
suspension_reason = kwargs.pop('suspensionReason')
urban_event = container.createUrbanEvent(eventtype_uid, **kwargs)
urban_event.setSuspensionReason(suspension_reason)
return urban_event
#
# Documents
#
# factory
class DocumentsFactory(BaseFactory):
""" """
def getPortalType(self, container, **kwargs):
return 'File'
# *** Utils ***
class Utils():
@staticmethod
def convertToUnicode(string):
if isinstance(string, unicode):
return string
# convert to unicode if necessary, against iso-8859-1 : iso-8859-15 add € and oe characters
data = ""
if string and isinstance(string, str):
try:
data = unicodedata.normalize('NFKC', unicode(string, "iso-8859-15"))
except UnicodeDecodeError:
import ipdb; ipdb.set_trace() # TODO REMOVE BREAKPOINT
return data
@staticmethod
def createArchitect(name):
idArchitect = idnormalizer.normalize(name + 'Architect').replace(" ", "")
containerArchitects = api.content.get(path='/urban/architects')
if idArchitect not in containerArchitects.objectIds():
new_id = idArchitect
new_name1 = name
if not (new_id in containerArchitects.objectIds()):
object_id = containerArchitects.invokeFactory('Architect', id=new_id,
name1=new_name1)
@staticmethod
def createGeometrician(name1, name2):
idGeometrician = idnormalizer.normalize(name1 + name2 + 'Geometrician').replace(" ", "")
containerGeometricians = api.content.get(path='/urban/geometricians')
if idGeometrician not in containerGeometricians.objectIds():
new_id = idGeometrician
new_name1 = name1
new_name2 = name2
if not (new_id in containerGeometricians.objectIds()):
object_id = containerGeometricians.invokeFactory('Geometrician', id=new_id,
name1=new_name1,
name2=new_name2) | [
"[email protected]"
]
| |
f9e0930f0105b1e7ffa5cf93463939b85918a75f | e6cf2817154c6764d308503a3b06f798e5854799 | /4. Comprehension/EXERCISE/01_word_filter.py | 95cd69c9e5aa75a075274ce5a05d8b05b36d8346 | []
| no_license | dimDamyanov/PythonAdvanced | dca1629cac2ee7ae113b82f19276e1c5e0e07023 | 00ca5148c4d57c8cdd13f069315942e81902f746 | refs/heads/main | 2023-04-06T18:16:11.240712 | 2021-04-22T20:57:28 | 2021-04-22T20:57:28 | 328,765,865 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | text = input()
even_words = [word for word in text.split() if len(word) % 2 == 0]
for word in even_words:
print(word)
| [
"[email protected]"
]
| |
057689d1fa8f8c16acf59a0f0e342efca11d8cde | cb9281a34c3c5a36d4b3a846fb6ff22ede12f2f6 | /annotate_communities.py | c792c346aaae78ae95b967b3522d7c87354ffd69 | []
| no_license | juliettapc/CalorieKing | 9cb9f35ae9b239d2284175b0802cf2c60dc79d1d | 5f80bffb65fe4644a81ae2ab0b1738861e028331 | refs/heads/master | 2022-02-10T07:52:24.133379 | 2022-02-08T01:25:18 | 2022-02-08T01:25:18 | 153,174,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,823 | py | import networkx as nx
from transform_labels_to_nx import transform_labels_to_nx
import sys, os
from numpy import *
import itertools
def annotate_communities(G, num_points, filename, communitylist, dbdate = '2010'):
'''
Created by Rufaro Mukogo on 2011-03-31.
Copyright (c) 2010 __Northwestern University__. All rights reserved.
This script takes a GML file and the number of points and reads the a dat file that contains the
list of lists for the communties and then annotates the GML file with a community attribute for
each node that belongs to a community, the communities are odered from the largest to the smallest
the identifies is "n_s" where n is the number of the communitiy (zero is the largest) and s is the size of the community
'''
for n in G.nodes(): # because the type of the labels or ids in some gml files is diff, and otherwise it gives me an error
G.node[n]['label']=str(G.node[n]['label'])
G.node[n]['id']=str(G.node[n]['id'])
if dbdate =="2010":
G = transform_labels_to_nx(G)
#open file with the list of communities
f = open(str(communitylist)).readlines()
else:
print "You need to generate a gml file that has only 2009 data"
sys.exit()
#extract list of communities should return a list of list
communities = [x.strip().split(";") for x in f]
# print communities,"\n"
communities = [x.strip().split(",") for x in communities[0]]
#print communities,"\n"
#sort communities
communities = sorted(communities, key=len, reverse=True)
#lisf of all the nodes that are in a community
com_nodes= itertools.chain(*communities)
#convert to integers to avoid key errors
com_nodes =map(int, list(com_nodes))
for n in G.nodes():
if n not in com_nodes:
G.node[n]["community"] = ""
#print n
ii = 0
for co in communities:
s = str(ii)+"_"+str(len(co))
#print "community_size", len(co), "s:",s
for n in co:
#add attribute to the main GML file
n=str(n)
G.node[n]["community"] = s
ii+=1
nx.write_gml(G,str(filename)+".gml")
return G
if __name__ =="__main__":
if len(sys.argv)>1:
communitylist = sys.argv[1]
else:
print "Enter the name of the list of communities"
if len(sys.argv)>2:
filename = sys.argv[2]
else:
print "Enter the name of the name of the .gml file"
num_points = 5
M = nx.read_gml(str(filename)+".gml")
for n in M.nodes():
M.node[n]["community"] = ""
H = annotate_communities(M,num_points, filename, communitylist)
| [
"[email protected]"
]
| |
8a47069ad08ff4a25b593f7f933e7207a34c9c81 | e6b1ad137a9bd3d39ae7c61cb5c7f7956ce095b9 | /bruteforce/first_and_last.py | 254541adec5d55d00b4b5ecdb2ee1dce8ea5e268 | []
| no_license | jadenpadua/Data-Structures-and-Algorithms | d9ba8ece779a2d564a7d66fcbacc9fb7fa1f7205 | 838c29112fec4beb9d9cc3f54db00492b4a480b0 | refs/heads/master | 2021-07-17T13:10:52.029327 | 2020-07-08T02:00:14 | 2020-07-08T02:00:14 | 186,896,842 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | Write a function that returns the lexicographically first and lexicographically last rearrangements of a string. Output the results in the following manner:
first_and_last(string) ➞ [first, last]
Examples
first_and_last("marmite") ➞ ["aeimmrt", "trmmiea"]
first_and_last("bench") ➞ ["bcehn", "nhecb"]
first_and_last("scoop") ➞ ["coops", "spooc"]
def first_and_last(s):
s_list = []
output = []
for i in range(len(s)):
s_list.append(s[i])
sorted_list = sorted(s_list)
sorted_list_rev = []
for i in range(len(sorted_list)-1,-1,-1):
sorted_list_rev.append(sorted_list[i])
sorted_string = ''.join(sorted_list)
sorted_rev_string = ''.join(sorted_list_rev)
output.append(sorted_string)
output.append(sorted_rev_string)
return output
| [
"[email protected]"
]
| |
4458795c392ba0ab3f81e2d130be56272b33e8c0 | ee00ebe5e71c36b05fbff993b19e9723b963313f | /280_Wiggle_Sort.py | 09fa9084f0ab202059ebdd2af873de234323560f | []
| no_license | 26XINXIN/leetcode | f365560d93604a28abf399707b333f3c11f924ec | 78ed11f34fd03e9a188c9c6cb352e883016d05d9 | refs/heads/master | 2021-06-28T16:31:45.103879 | 2020-09-19T20:33:55 | 2020-09-19T20:33:55 | 144,975,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 526 | py | class Solution:
def wiggleSort(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
if len(nums) <= 1:
return
for i in range(1, len(nums)):
if i % 2 == 1: # increasing
if nums[i] < nums[i-1]:
nums[i-1], nums[i] = nums[i], nums[i-1]
else: # decreasing
if nums[i] > nums[i-1]:
nums[i-1], nums[i] = nums[i], nums[i-1]
| [
"[email protected]"
]
| |
7ab34c90f6402e871718fc7299fa5701b912a3e5 | 82236c1cf2fe6ca26f52ce4eeae1745cf3cbc5ca | /docs/source/conf.py | 970611753ff44195353547e41808aed5480865fe | [
"Apache-2.0"
]
| permissive | CKrawczyk/python-reducers-for-caesar | 8b607fddd7ce36cd81e1b4e2e7079e1a66526d22 | 9c5d9e072906d3fde2497fa61a66e4c8c0113ec2 | refs/heads/master | 2021-06-04T07:35:25.738616 | 2017-08-10T15:56:42 | 2017-08-10T15:56:42 | 91,355,049 | 1 | 2 | Apache-2.0 | 2019-04-03T20:28:31 | 2017-05-15T15:40:00 | Python | UTF-8 | Python | false | false | 5,261 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# panoptes_aggregation documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 7 13:22:24 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../panoptes_aggregation'))
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
'sphinxcontrib.autohttp.flask'
]
napoleon_google_docstring = False
napoleon_use_param = False
napoleon_use_ivar = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'panoptes_aggregation'
copyright = '2017, Coleman Krawczyk'
author = 'Coleman Krawczyk'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'panoptes_aggregationdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'panoptes_aggregation.tex', 'panoptes\\_aggregation Documentation',
'Coleman Krawczyk', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'panoptes_aggregation', 'panoptes_aggregation Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'panoptes_aggregation', 'panoptes_aggregation Documentation',
author, 'panoptes_aggregation', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| [
"[email protected]"
]
| |
de8a9c196a80dde711075fc0f91d2dc1ce5625e9 | 10b22cef27b7cb7f06221954eef6ea678c5289c1 | /database/database_schemas_ms.py | 0ad92f442cd946089275a60618ee4b0020b399d7 | [
"MIT"
]
| permissive | mshobair/invitro_cheminformatics | 0c1d7c4c2cfd5e20ee24fffac6a0332d503957df | 17201496c73453accd440646a1ee81726119a59c | refs/heads/main | 2023-04-04T19:06:27.098377 | 2021-03-26T17:07:25 | 2021-03-26T17:07:25 | 348,917,957 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | class Schemas:
"""
Class that contains DATABASE schema names.
"""
chemprop_schema = "sbox_rlougee_chemprop"
dsstox_schema = "ro_20191118_dsstox"
qsar_schema = "sbox_mshobair_qsar_snap"
invitrodb_schema = "prod_internal_invitrodb_v3_3"
information_schema = "information_schema"
| [
"[email protected]"
]
| |
07f64f78d296821856c1ef3a04cfa9596a3859d1 | 1c76418fee90f80368f2f843007ebd6a37bfc01f | /GLOBALS.py | 6efbf9f79ee858f37aa06dd7eaf8915877d118f8 | []
| no_license | SyntaxVoid/HighRedshiftGalaxyFinder | e5dfb244bbba53c310de9b7fe414990b04bcb3a0 | 83fad048e37d65a1a7c98727c0d4164c8e84922a | refs/heads/master | 2021-01-20T21:59:06.211431 | 2015-11-19T04:24:58 | 2015-11-19T04:24:58 | 42,703,816 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,705 | py | ## The filters [str] which we are looking at in this project
FILTERS = ['f125w','f160w','f435w','f606w','f775w','f850l']
SELECTIONS = ["b435","i775","v606","z4","z5","z6","z7","z8"]
# Column number corresponding to each column name of my header
MASTER_COL_DICT = {"Number": 0, "RA": 1, "ALPHA_J2000": 1, "DEC": 2,"DELTA_J2000": 2,
"F125W_FLUX": 3 , "F125W_FLUXERR": 4 , "F125W_MAG": 5 , "F125W_MAGERR": 6 ,
"F160W_FLUX": 7 , "F160W_FLUXERR": 8 , "F160W_MAG": 9 , "F160W_MAGERR": 10,
"F435W_FLUX": 11, "F435W_FLUXERR": 12, "F435W_MAG": 13, "F435W_MAGERR": 14,
"F606W_FLUX": 15, "F606W_FLUXERR": 16, "F606W_MAG": 17, "F606W_MAGERR": 18,
"F775W_FLUX": 19, "F775W_FLUXERR": 20, "F775W_MAG": 21, "F775W_MAGERR": 22,
"F850L_FLUX": 23, "F850L_FLUXERR": 24, "F850L_MAG": 25, "F850L_MAGERR": 26}
MASTER_CANDELS_DICT = {"Number": 0, "IAU_Name": 1, "RA": 2, "ALPHA_J2000": 2,"DEC":3, "DELTA_J2000": 3,
"F160W_LIMIT_MAG": 4, "FLAGS": 5, "CLASS_STAR": 6, "CITO_U_FLUX": 7, "CITO_U_FLUXERR": 8,
"CITO_U_WEIGHT": 9, "VIMOS_U_FLUX": 10, "VIMOS_U_FLUXERR": 11, "VIMOS_U_WEIGHT": 12,
"F435W_FLUX": 13, "F435W_FLUXERR": 14, "F435W_WEIGHT": 15,
"F606W_FLUX": 16, "F606W_FLUXERR": 17, "F606W_WEIGHT": 18,
"F775W_FLUX": 19, "F775W_FLUXERR": 20, "F775W_WEIGHT": 21,
"F814W_FLUX": 22, "F814W_FLUXERR": 23, "F814W_WEIGHT": 24,
"F850L_FLUX": 25, "F850L_FLUXERR": 26, "F850L_WEIGHT": 27,
"F098M_FLUX": 28, "F098M_FLUXERR": 29, "F098M_WEIGHT": 30,
"F105W_FLUX": 31, "F105W_FLUXERR": 32, "F105W_WEIGHT": 33,
"F125W_FLUX": 34, "F125W_FLUXERR": 35, "F125W_WEIGHT": 36,
"F160W_FLUX": 37, "F160W_FLUXERR": 38, "F160W_WEIGHT": 39}
MY_COLOR_COLOR_OPS = {"b435": [[MASTER_COL_DICT["F435W_MAG"],MASTER_COL_DICT["F606W_MAG"]],
[MASTER_COL_DICT["F606W_MAG"],MASTER_COL_DICT["F850L_MAG"]]],
"v606": [[MASTER_COL_DICT["F606W_MAG"],MASTER_COL_DICT["F775W_MAG"]],
[MASTER_COL_DICT["F775W_MAG"],MASTER_COL_DICT["F850L_MAG"]]],
"i775": [[MASTER_COL_DICT["F606W_MAG"],MASTER_COL_DICT["F850L_MAG"]],
[MASTER_COL_DICT["F775W_MAG"],MASTER_COL_DICT["F850L_MAG"]]]}
CANDELS_COLOR_COLOR_OPS = {"b435": [[MASTER_CANDELS_DICT["F435W_FLUX"],MASTER_CANDELS_DICT["F606W_FLUX"]],
[MASTER_CANDELS_DICT["F606W_FLUX"],MASTER_CANDELS_DICT["F850L_FLUX"]]],
"v606": [[MASTER_CANDELS_DICT["F606W_FLUX"],MASTER_CANDELS_DICT["F775W_FLUX"]],
[MASTER_CANDELS_DICT["F775W_FLUX"],MASTER_CANDELS_DICT["F850L_FLUX"]]],
"i775": [[MASTER_CANDELS_DICT["F606W_FLUX"],MASTER_CANDELS_DICT["F850L_FLUX"]],
[MASTER_CANDELS_DICT["F775W_FLUX"],MASTER_CANDELS_DICT["F850L_FLUX"]]]}
COLOR_RULES = {"b435": ["V-Z","B-V",1.6,1.1,[-1,4],[-1,6],1.10,1.00,'yes'],
"v606": ["I-Z","V-I",1.3,1.2,[-2,5],[-1,6],1.47,0.89,'yes'],
"i775": ["V-Z","I-Z",1.2,1.3,[-1,4],[-1,6],1.20,1.30,'yes']}
## The column [int] corresponding to the filter [str] in the Candels column
PUB_COL_DICT = {"f125w":42,"f160w":45,"f435w":21,"f606w":24,"f775w":27,"f850l":33}
## Zerp points [float] of each filter we are analyzing corresponding
## to the units that our map is in. All maps except f435 are in units
## of uJy, which has a corresponding zero point of 23.9.
ZP_f125w = 23.9
ZP_f160w = 23.9
ZP_f435w = 25.665
ZP_f606w = 23.9
ZP_f775w = 23.9
ZP_f850l = 23.9
MY_MAPS = ["FullMaps/gs_f125w_cropcal.fits","FullMaps/gs_f160w_cropcal.fits","FullMaps/gs_f606w_cropcal.fits",
"FullMaps/gs_f775w_cropcal.fits","FullMaps/gs_f850l_cropcal.fits"]
CANDELS_MAPS = ["CandelsRescaled/gs_all_candels_ers_udf_f125w_060mas_v0.5_drz.fits",
"CandelsRescaled/gs_all_candels_ers_udf_f160w_060mas_v0.5_drz.fits",
"CandelsRescaled/gs_presm4_all_acs_f606w_60mas_v3.0_drz.fits",
"CandelsRescaled/gs_presm4_all_acs_f775w_60mas_v3.0_drz.fits",
"CandelsRescaled/gs_presm4_all_acs_f850l_60mas_v3.0_drz.fits"]
SUB_DEST = ["SubtractedMaps/f125w_sub.fits","SubtractedMaps/f160w_sub.fits","SubtractedMaps/f606w_sub.fits",
"SubtractedMaps/f775w_sub.fits","SubtractedMaps/f850l_sub.fits"]
# Header to add to the catalogs that I generate
header = '''# 1 NUMBER Running object number [count]
# 2 ALPHA_J2000 Right ascension of barycenter (J2000) [deg]
# 3 DELTA_J2000 Declination of barycenter (J2000) [deg]
# 4 F125W_FLUX_AUTO Flux within a Kron-like elliptical aperture [uJy]
# 5 F125W_FLUXERR_AUTO RMS error for AUTO flux [uJy]
# 6 F125W_MAG_AUTO Kron-like elliptical aperture magnitude [mag]
# 7 F125W_MAGERR_AUTO RMS error for AUTO magnitude [mag]
# 8 F160W_FLUX_AUTO Flux within a Kron-like elliptical aperture [uJy]
# 9 F160W_FLUXERR_AUTO RMS error for AUTO flux [uJy]
# 10 F160W_MAG_AUTO Kron-like elliptical aperture magnitude [mag]
# 11 F160W_MAGERR_AUTO RMS error for AUTO magnitude [mag]
# 12 F435W_FLUX_AUTO Flux within a Kron-like elliptical aperture [uJy]
# 13 F435W_FLUXERR_AUTO RMS error for AUTO flux [uJy]
# 14 F435W_MAG_AUTO Kron-like elliptical aperture magnitude [mag]
# 15 F435W_MAGERR_AUTO RMS error for AUTO magnitude [mag]
# 16 F606W_FLUX_AUTO Flux within a Kron-like elliptical aperture [uJy]
# 17 F606W_FLUXERR_AUTO RMS error for AUTO flux [uJy]
# 18 F606W_MAG_AUTO Kron-like elliptical aperture magnitude [mag]
# 19 F606W_MAGERR_AUTO RMS error for AUTO magnitude [mag]
# 20 F775W_FLUX_AUTO Flux within a Kron-like elliptical aperture [uJy]
# 21 F775W_FLUXERR_AUTO RMS error for AUTO flux [uJy]
# 22 F775W_MAG_AUTO Kron-like elliptical aperture magnitude [mag]
# 23 F775_MAGERR_AUTO RMS error for AUTO magnitude [mag]
# 24 F850LP_FLUX_AUTO Flux within a Kron-like elliptical aperture [uJy]
# 25 F850LP_FLUXERR_AUTO RMS error for AUTO flux [uJy]
# 26 F850LP_MAG_AUTO Kron-like elliptical aperture magnitude [mag]
# 27 F850LP_MAGERR_AUTO RMS error for AUTO magnitude [mag]
'''
candels_header = '''# 1 ID (F160W SExtractor ID)
# 2 IAU_Name
# 3 RA (F160W coordinate, J2000, degree)
# 4 DEC (F160W coordinate, J2000, degree)
# 5 F160W_LIMITING_MAGNITUDE (AB)
# 6 FLAGS
# 7 CLASS_STAR (F160W SExtractor S/G classifier output)
# 8 CTIO_U_FLUX (uJy)
# 9 CTIO_U_FLUXERR (uJy)
# 10 CTIO_U_WEIGHT
# 11 VIMOS_U_FLUX (uJy)
# 12 VIMOS_U_FLUXERR (uJy)
# 13 VIMOS_U_WEIGHT
# 14 ACS_F435W_FLUX (uJy)
# 15 ACS_F435W_FLUXERR (uJy)
# 16 ACS_F435W_WEIGHT
# 17 ACS_F606W_FLUX (uJy)
# 18 ACS_F606W_FLUXERR (uJy)
# 19 ACS_F606W_WEIGHT
# 20 ACS_F775W_FLUX (uJy)
# 21 ACS_F775W_FLUXERR (uJy)
# 22 ACS_F775W_WEIGHT
# 23 ACS_F814W_FLUX (uJy)
# 24 ACS_F814W_FLUXERR (uJy)
# 25 ACS_F814W_WEIGHT
# 26 ACS_F850LP_FLUX (uJy)
# 27 ACS_F850LP_FLUXERR (uJy)
# 28 ACS_F850LP_WEIGHT
# 29 WFC3_F098M_FLUX (uJy)
# 30 WFC3_F098M_FLUXERR (uJy)
# 31 WFC3_F098M_WEIGHT
# 32 WFC3_F105W_FLUX (uJy)
# 33 WFC3_F105W_FLUXERR (uJy)
# 34 WFC3_F105W_WEIGHT
# 35 WFC3_F125W_FLUX (uJy)
# 36 WFC3_F125W_FLUXERR (uJy)
# 37 WFC3_F125W_WEIGHT
# 38 WFC3_F160W_FLUX (uJy)
# 39 WFC3_F160W_FLUXERR (uJy)
# 40 WFC3_F160W_WEIGHT
# 41 ISAAC_KS_FLUX (uJy)
# 42 ISAAC_KS_FLUXERR (uJy)
# 43 ISAAC_KS_WEIGHT
# 44 HAWKI_KS_FLUX (uJy)
# 45 HAWKI_KS_FLUXERR (uJy)
# 46 HAWKI_KS_WEIGHT
# 47 IRAC_CH1_FLUX (uJy)
# 48 IRAC_CH1_FLUXERR (uJy)
# 49 IRAC_CH1_WEIGHT
# 50 IRAC_CH2_FLUX (uJy)
# 51 IRAC_CH2_FLUXERR (uJy)
# 52 IRAC_CH2_WEIGHT
# 53 IRAC_CH3_FLUX (uJy)
# 54 IRAC_CH3_FLUXERR (uJy)
# 55 IRAC_CH3_WEIGHT
# 56 IRAC_CH4_FLUX (uJy)
# 57 IRAC_CH4_FLUXERR (uJy)
# 58 IRAC_CH4_WEIGHT
# 59 FLUX_ISO (SExtractor F160W FLUX_ISO, uJy)
# 60 FLUXERR_ISO (SExtractor F160W FLUXERR_ISO, uJy)
# 61 FLUX_AUTO (SExtractor F160W FLUX_AUTO, uJy)
# 62 FLUXERR_AUTO (SExtractor F160W FLUXERR_AUTO, uJy)
# 63 FWHM_IMAGE (FWHM of F160W, pixel, 1 pixel=0.06 arcsec)
# 64 A_IMAGE (F160W SExtractor Profile RMS along major axis, pixel)
# 65 B_IMAGE (F160W SExtractor Profile RMS along minor axis, pixel)
# 66 KRON_RADIUS (F160W SExtractor Kron aperture in units of A or B)
# 67 FLUX_RADIUS_1 (F160W SExtractor 20% of light radius, pixel)
# 68 FLUX_RADIUS_2 (F160W SExtractor 50% of light radius, pixel)
# 69 FLUX_RADIUS_3 (F160W SExtractor 80% of light radius, pixel)
# 70 THETA_IMAGE (F160W SExtractor Position angle (CCW/x), degree)
# 71 APCORR (F160W FLUX_AUTO/FLUX_ISO, applied to ACS and WFC3 bands)
# 72 HOT_FLAG (Source enters the catalog as a hot detection (=1) or a cold detection (=0))
# 73 ISOAREAF_IMAGE (SExtractor F160W Isophotal Area)''' | [
"[email protected]"
]
| |
8c233f047715954abc685b0149bdc1c86d63168e | 36c00fe2afff4818c937e312ce0c6a79f35e2a77 | /7-kyu/naughty-or-nice-/python/solution.py | 7d97dd81f35376746e01998e5608dffd391051cd | []
| no_license | p-lots/codewars | 0a67b6ee4c91180ff78c648421b9d2d64463ddc3 | 535faeee475c6b398124d6f5002b0e111406e8bb | refs/heads/master | 2023-08-23T22:14:33.635011 | 2023-08-23T13:30:37 | 2023-08-23T13:30:37 | 195,320,309 | 0 | 0 | null | 2023-05-09T19:25:50 | 2019-07-05T01:40:15 | Python | UTF-8 | Python | false | false | 191 | py | def get_nice_names(people):
return [dct['name'] for dct in people if dct['was_nice']]
def get_naughty_names(people):
return [dct['name'] for dct in people if not dct['was_nice']] | [
"[email protected]"
]
| |
cdf73285697080951a456e2a5c01d533c393b240 | 1cb4b326b8148779221f38da5ba1b4fa9a017f12 | /Game22/modules/online/server.py | e486128b2439a353fb0c15ea4f0e7de70fef91c8 | [
"MIT"
]
| permissive | Chirag2007/Games | c7b27d5bb735912a7dec1fade76f92abfbc078e8 | bc05c6826e63e5e3e279073443f4587f70fae741 | refs/heads/master | 2023-08-21T09:02:06.799276 | 2021-10-01T04:07:03 | 2021-10-01T04:07:03 | 412,320,297 | 0 | 0 | MIT | 2021-10-01T04:07:04 | 2021-10-01T04:05:26 | null | UTF-8 | Python | false | false | 14,729 | py | '''
Function:
联机对战服务器端
Author:
Charles
微信公众号:
Charles的皮卡丘
'''
import socket
import pygame
import random
import threading
from ..misc import *
from PyQt5 import QtCore
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from itertools import product
'''服务器端'''
class gobangSever(QWidget):
back_signal = pyqtSignal()
exit_signal = pyqtSignal()
receive_signal = pyqtSignal(dict, name='data')
send_back_signal = False
def __init__(self, cfg, nickname, parent=None, **kwargs):
super(gobangSever, self).__init__(parent)
# 预定义一些必要的变量
self.cfg = cfg
self.nickname = nickname
self.opponent_nickname = None
self.client_ipport = None
self.is_gaming = False
self.chessboard = [[None for i in range(19)] for _ in range(19)]
self.history_record = []
self.winner = None
self.winner_info_label = None
self.player_color = 'white'
self.opponent_player_color = 'black'
self.whoseround = None
# 当前窗口的基本设置
self.setFixedSize(760, 650)
self.setWindowTitle('五子棋-微信公众号: Charles的皮卡丘')
self.setWindowIcon(QIcon(cfg.ICON_FILEPATH))
# 背景图片
palette = QPalette()
palette.setBrush(self.backgroundRole(), QBrush(QPixmap(cfg.BACKGROUND_IMAGEPATHS.get('bg_game'))))
self.setPalette(palette)
# 显示你的昵称
self.nickname_label = QLabel('您是%s' % self.nickname, self)
self.nickname_label.resize(200, 40)
self.nickname_label.move(640, 180)
# 落子标志
self.chessman_sign = QLabel(self)
sign = QPixmap(cfg.CHESSMAN_IMAGEPATHS.get('sign'))
self.chessman_sign.setPixmap(sign)
self.chessman_sign.setFixedSize(sign.size())
self.chessman_sign.show()
self.chessman_sign.hide()
# 按钮
self.home_button = PushButton(cfg.BUTTON_IMAGEPATHS.get('home'), self)
self.home_button.click_signal.connect(self.goHome)
self.home_button.move(680, 10)
self.startgame_button = PushButton(cfg.BUTTON_IMAGEPATHS.get('startgame'), self)
self.startgame_button.click_signal.connect(self.startgame)
self.startgame_button.move(640, 240)
self.regret_button = PushButton(cfg.BUTTON_IMAGEPATHS.get('regret'), self)
self.regret_button.click_signal.connect(self.regret)
self.regret_button.move(640, 310)
self.givein_button = PushButton(cfg.BUTTON_IMAGEPATHS.get('givein'), self)
self.givein_button.click_signal.connect(self.givein)
self.givein_button.move(640, 380)
self.urge_button = PushButton(cfg.BUTTON_IMAGEPATHS.get('urge'), self)
self.urge_button.click_signal.connect(self.urge)
self.urge_button.move(640, 450)
# 落子和催促声音加载
pygame.mixer.init()
self.drop_sound = pygame.mixer.Sound(cfg.SOUNDS_PATHS.get('drop'))
self.urge_sound = pygame.mixer.Sound(cfg.SOUNDS_PATHS.get('urge'))
# 接收数据信号绑定到responseForReceiveData函数
self.receive_signal.connect(self.responseForReceiveData)
# TCP/IP服务器
self.tcp_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.tcp_server.bind(('0.0.0.0', cfg.PORT))
self.tcp_server.listen(1)
# TCP/IP的socket
self.tcp_socket = None
# 开一个线程进行监听
threading.Thread(target=self.startListen).start()
'''返回游戏主界面'''
def goHome(self):
self.send_back_signal = True
self.close()
self.back_signal.emit()
'''开始游戏'''
def startgame(self):
if self.tcp_socket is None:
QMessageBox.information(self, '提示', '对方未连接, 请耐心等待')
else:
self.randomAssignColor()
data = {'type': 'action', 'detail': 'startgame', 'data': [self.player_color, self.opponent_player_color]}
self.tcp_socket.sendall(packSocketData(data))
QMessageBox.information(self, '提示', '游戏开始请求已发送, 等待对方确定中')
'''认输'''
def givein(self):
if self.tcp_socket and self.is_gaming and (self.winner is None) and (self.whoseround == self.player_color):
self.winner = self.opponent_player_color
self.showGameEndInfo()
data = {'type': 'action', 'detail': 'givein'}
self.tcp_socket.sendall(packSocketData(data))
'''悔棋-只有在对方回合才能悔棋'''
def regret(self):
if self.tcp_socket and self.is_gaming and (self.winner is None) and (self.whoseround == self.opponent_player_color):
data = {'type': 'action', 'detail': 'regret'}
self.tcp_socket.sendall(packSocketData(data))
'''催促'''
def urge(self):
if self.tcp_socket and self.is_gaming and (self.winner is None) and (self.whoseround == self.opponent_player_color):
data = {'type': 'action', 'detail': 'urge'}
self.tcp_socket.sendall(packSocketData(data))
self.urge_sound.play()
'''鼠标左键点击事件-玩家回合'''
def mousePressEvent(self, event):
if (self.tcp_socket is None) or (event.buttons() != QtCore.Qt.LeftButton) or (self.winner is not None) or (self.whoseround != self.player_color) or (not self.is_gaming):
return
# 保证只在棋盘范围内响应
if event.x() >= 50 and event.x() <= 50 + 30 * 18 + 14 and event.y() >= 50 and event.y() <= 50 + 30 * 18 + 14:
pos = Pixel2Chesspos(event)
# 保证落子的地方本来没有人落子
if self.chessboard[pos[0]][pos[1]]:
return
# 实例化一个棋子并显示
c = Chessman(self.cfg.CHESSMAN_IMAGEPATHS.get(self.whoseround), self)
c.move(event.pos())
c.show()
self.chessboard[pos[0]][pos[1]] = c
# 落子声音响起
self.drop_sound.play()
# 最后落子位置标志对落子位置进行跟随
self.chessman_sign.show()
self.chessman_sign.move(c.pos())
self.chessman_sign.raise_()
# 记录这次落子
self.history_record.append([*pos, self.whoseround])
# 发送给对方自己的落子位置
data = {'type': 'action', 'detail': 'drop', 'data': pos}
self.tcp_socket.sendall(packSocketData(data))
# 是否胜利了
self.winner = checkWin(self.chessboard)
if self.winner:
self.showGameEndInfo()
return
# 切换回合方(其实就是改颜色)
self.nextRound()
'''显示游戏结束结果'''
def showGameEndInfo(self):
self.is_gaming = False
info_img = QPixmap(self.cfg.WIN_IMAGEPATHS.get(self.winner))
self.winner_info_label = QLabel(self)
self.winner_info_label.setPixmap(info_img)
self.winner_info_label.resize(info_img.size())
self.winner_info_label.move(50, 50)
self.winner_info_label.show()
'''响应接收到的数据'''
def responseForReceiveData(self, data):
if data['type'] == 'action' and data['detail'] == 'exit':
QMessageBox.information(self, '提示', '您的对手已退出游戏, 游戏将自动返回主界面')
self.goHome()
elif data['type'] == 'action' and data['detail'] == 'startgame':
self.opponent_player_color, self.player_color = data['data']
self.whoseround = 'white'
self.whoseround2nickname_dict = {self.player_color: self.nickname, self.opponent_player_color: self.opponent_nickname}
res = QMessageBox.information(self, '提示', '对方请求(重新)开始游戏, 您为%s, 您是否同意?' % {'white': '白子', 'black': '黑子'}.get(self.player_color), QMessageBox.Yes | QMessageBox.No)
if res == QMessageBox.Yes:
data = {'type': 'reply', 'detail': 'startgame', 'data': True}
self.tcp_socket.sendall(packSocketData(data))
self.is_gaming = True
self.setWindowTitle('五子棋-微信公众号: Charles的皮卡丘 ——> %s走棋' % self.whoseround2nickname_dict.get(self.whoseround))
for i, j in product(range(19), range(19)):
if self.chessboard[i][j]:
self.chessboard[i][j].close()
self.chessboard[i][j] = None
self.history_record.clear()
self.winner = None
if self.winner_info_label:
self.winner_info_label.close()
self.winner_info_label = None
self.chessman_sign.hide()
else:
data = {'type': 'reply', 'detail': 'startgame', 'data': False}
self.tcp_socket.sendall(packSocketData(data))
elif data['type'] == 'action' and data['detail'] == 'drop':
pos = data['data']
# 实例化一个棋子并显示
c = Chessman(self.cfg.CHESSMAN_IMAGEPATHS.get(self.whoseround), self)
c.move(QPoint(*Chesspos2Pixel(pos)))
c.show()
self.chessboard[pos[0]][pos[1]] = c
# 落子声音响起
self.drop_sound.play()
# 最后落子位置标志对落子位置进行跟随
self.chessman_sign.show()
self.chessman_sign.move(c.pos())
self.chessman_sign.raise_()
# 记录这次落子
self.history_record.append([*pos, self.whoseround])
# 是否胜利了
self.winner = checkWin(self.chessboard)
if self.winner:
self.showGameEndInfo()
return
# 切换回合方(其实就是改颜色)
self.nextRound()
elif data['type'] == 'action' and data['detail'] == 'givein':
self.winner = self.player_color
self.showGameEndInfo()
elif data['type'] == 'action' and data['detail'] == 'urge':
self.urge_sound.play()
elif data['type'] == 'action' and data['detail'] == 'regret':
res = QMessageBox.information(self, '提示', '对方请求悔棋, 您是否同意?', QMessageBox.Yes | QMessageBox.No)
if res == QMessageBox.Yes:
pre_round = self.history_record.pop(-1)
self.chessboard[pre_round[0]][pre_round[1]].close()
self.chessboard[pre_round[0]][pre_round[1]] = None
self.chessman_sign.hide()
self.nextRound()
data = {'type': 'reply', 'detail': 'regret', 'data': True}
self.tcp_socket.sendall(packSocketData(data))
else:
data = {'type': 'reply', 'detail': 'regret', 'data': False}
self.tcp_socket.sendall(packSocketData(data))
elif data['type'] == 'reply' and data['detail'] == 'startgame':
if data['data']:
self.is_gaming = True
self.setWindowTitle('五子棋-微信公众号: Charles的皮卡丘 ——> %s走棋' % self.whoseround2nickname_dict.get(self.whoseround))
for i, j in product(range(19), range(19)):
if self.chessboard[i][j]:
self.chessboard[i][j].close()
self.chessboard[i][j] = None
self.history_record.clear()
self.winner = None
if self.winner_info_label:
self.winner_info_label.close()
self.winner_info_label = None
self.chessman_sign.hide()
QMessageBox.information(self, '提示', '对方同意开始游戏请求, 您为%s, 执白者先行.' % {'white': '白子', 'black': '黑子'}.get(self.player_color))
else:
QMessageBox.information(self, '提示', '对方拒绝了您开始游戏的请求.')
elif data['type'] == 'reply' and data['detail'] == 'regret':
if data['data']:
pre_round = self.history_record.pop(-1)
self.chessboard[pre_round[0]][pre_round[1]].close()
self.chessboard[pre_round[0]][pre_round[1]] = None
self.nextRound()
QMessageBox.information(self, '提示', '对方同意了您的悔棋请求.')
else:
QMessageBox.information(self, '提示', '对方拒绝了您的悔棋请求.')
elif data['type'] == 'nickname':
self.opponent_nickname = data['data']
'''随机生成双方颜色-白子先走'''
def randomAssignColor(self):
self.player_color = random.choice(['white', 'black'])
self.opponent_player_color = 'white' if self.player_color == 'black' else 'black'
self.whoseround = 'white'
self.whoseround2nickname_dict = {self.player_color: self.nickname, self.opponent_player_color: self.opponent_nickname}
'''改变落子方'''
def nextRound(self):
self.whoseround = self.player_color if self.whoseround == self.opponent_player_color else self.opponent_player_color
self.setWindowTitle('五子棋-微信公众号: Charles的皮卡丘 ——> %s走棋' % self.whoseround2nickname_dict.get(self.whoseround))
'''开始监听客户端的连接'''
def startListen(self):
while True:
try:
self.setWindowTitle('五子棋-微信公众号: Charles的皮卡丘 ——> 服务器端启动成功, 等待客户端连接中')
self.tcp_socket, self.client_ipport = self.tcp_server.accept()
self.setWindowTitle('五子棋-微信公众号: Charles的皮卡丘 ——> 客户端已连接, 点击开始按钮进行游戏')
data = {'type': 'nickname', 'data': self.nickname}
self.tcp_socket.sendall(packSocketData(data))
self.receiveClientData()
except:
break
'''接收客户端数据'''
def receiveClientData(self):
while True:
data = receiveAndReadSocketData(self.tcp_socket)
self.receive_signal.emit(data)
'''关闭窗口事件'''
def closeEvent(self, event):
if self.tcp_socket:
self.tcp_socket.sendall(packSocketData({'type': 'action', 'detail': 'exit'}))
self.tcp_socket.shutdown(socket.SHUT_RDWR)
self.tcp_socket.close()
self.tcp_server.close()
return super().closeEvent(event) | [
"[email protected]"
]
| |
6e39762a6673f11ca94947c8499aa363af2b4dd2 | c168fe819b446640957e5e310ef89fcfe28662b3 | /userbenchmark/__init__.py | c9ff1fac46844cf4cb62479ffa15096e9436dbf2 | [
"BSD-3-Clause"
]
| permissive | pytorch/benchmark | 7b55e8d714de2ea873e03df43811aab3848485dd | df4da9bdff11a2f948d5bd4ac83da7922e6f44f4 | refs/heads/main | 2023-08-29T13:06:09.671728 | 2023-08-28T16:51:55 | 2023-08-28T16:51:55 | 92,541,759 | 685 | 220 | BSD-3-Clause | 2023-09-14T18:10:18 | 2017-05-26T19:21:12 | Python | UTF-8 | Python | false | false | 851 | py | from pathlib import Path
from typing import List
CURRENT_DIR = Path(__file__).parent
def list_userbenchmarks() -> List[str]:
ub_dirs = [x for x in CURRENT_DIR.iterdir() if x.is_dir() and x.joinpath('__init__.py').exists() ]
ub_names = list(map(lambda x: x.name, ub_dirs))
return ub_names
def get_ci_from_ub(ub_name):
import yaml
ci_file = CURRENT_DIR.joinpath(ub_name).joinpath("ci.yaml")
if not ci_file.exists():
return None
with open(ci_file, "r") as ciobj:
cicfg = yaml.safe_load(ciobj)
ret = {}
ret["name"] = ub_name
ret["ci_cfg"] = cicfg
return ret
def get_userbenchmarks_by_platform(platform):
ub_names = list_userbenchmarks()
cfgs = list(map(lambda x: x["name"], filter(lambda x: x and x["ci_cfg"]["platform"] == platform, map(get_ci_from_ub, ub_names))))
return cfgs
| [
"[email protected]"
]
| |
266f7c43ec194665af03f4823f13ff1664004761 | f305f84ea6f721c2391300f0a60e21d2ce14f2a5 | /0_字符串/87. 扰乱字符串.py | 29e56a9f818fe0967e242d7e3d9221f6a53b65b7 | []
| no_license | 981377660LMT/algorithm-study | f2ada3e6959338ae1bc21934a84f7314a8ecff82 | 7e79e26bb8f641868561b186e34c1127ed63c9e0 | refs/heads/master | 2023-09-01T18:26:16.525579 | 2023-09-01T12:21:58 | 2023-09-01T12:21:58 | 385,861,235 | 225 | 24 | null | null | null | null | UTF-8 | Python | false | false | 675 | py | from collections import Counter
from functools import lru_cache
# 1 <= s1.length <= 30
# 87. 扰乱字符串
# !bit packing 可以将复杂度降低为 O(n^4/w)
class Solution:
@lru_cache(None)
def isScramble(self, s1: str, s2: str) -> bool:
if s1 == s2:
return True
if sorted(s1) != sorted(s2): # counter
return False
for i in range(1, len(s1)):
if self.isScramble(s1[:i], s2[:i]) and self.isScramble(s1[i:], s2[i:]):
return True
if self.isScramble(s1[:i], s2[-i:]) and self.isScramble(s1[i:], s2[:-i]):
return True
return False
| [
"[email protected]"
]
| |
87b6cd872faff0465ea42ba50c6be9d681f0137a | b24e45267a8d01b7d3584d062ac9441b01fd7b35 | /Usuario/migrations/0001_initial.py | f1a088a9eef7d4b51c898384d51b3a312255a586 | []
| no_license | slalbertojesus/merixo-rest | 1707b198f31293ced38930a31ab524c0f9a6696c | 5c12790fd5bc7ec457baad07260ca26a8641785d | refs/heads/master | 2022-12-10T18:56:36.346159 | 2020-05-02T00:42:39 | 2020-05-02T00:42:39 | 212,175,889 | 0 | 0 | null | 2022-12-08T07:00:07 | 2019-10-01T18:56:45 | Python | UTF-8 | Python | false | false | 1,629 | py | # Generated by Django 2.2.6 on 2019-11-29 05:50
import Usuario.models
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Account',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('name', models.CharField(max_length=30)),
('email', models.EmailField(max_length=60, unique=True)),
('username', models.CharField(max_length=30, unique=True)),
('estado', models.CharField(max_length=30)),
('date_joined', models.DateTimeField(auto_now_add=True, verbose_name='date joined')),
('last_login', models.DateTimeField(auto_now=True, verbose_name='last login')),
('listaUsuarios', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=200), default=list, null=True, size=None)),
('pic', models.ImageField(upload_to=Usuario.models.upload_location)),
('is_admin', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('is_superuser', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
),
]
| [
"[email protected]"
]
| |
39385e0a7b92b66933385b77e3533b3a516318ea | 13213e3e7d6a0866cdf28483adc46d458f8977ac | /qsort/qs.py | f464a28fe040fbe56cf5762e4a0066e408678f00 | []
| no_license | j0k/algopractice | 42654b1158497050911822c46de6791cf8bf251f | 1be3df5553156a523bfce5328df205e6c67c19f3 | refs/heads/master | 2022-06-27T00:10:57.028619 | 2022-06-15T12:34:11 | 2022-06-15T12:34:11 | 100,791,845 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | # 18.06.2017
import random
A = [1,2,4,7,8,9,0,5,3,5,6,8,4,3]
def qsort(a):
l = len(a)
if l <= 1:
return a
pi = int(random.random() * l)
left = []
right = []
p = a[pi]
for (i,item) in enumerate(a):
if i == pi:
continue;
if item <= p:
left.append(item)
else:
right.append(item)
return qsort(left) + [p] + qsort(right)
print qsort(A)
| [
"[email protected]"
]
| |
3a762c9e0cf07f1d9832c6a76c2334c0528244f5 | e3017c4c18b0226ea2131161159a7e51ff02cc0e | /test_ddl.py | 29848bcab2602888f9243625f8af0fb8bc4ad607 | []
| no_license | samhaug/tomography_tools | 834c0f9781928411d32f9b190f2689194972c339 | ce57bc2517fd5acbf645d6af633321d12122e518 | refs/heads/master | 2018-10-19T15:05:51.458378 | 2018-07-20T14:15:56 | 2018-07-20T14:15:56 | 104,912,898 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45 | py | #from data_downloader import data_downloader
| [
"[email protected]"
]
| |
1747c3d6ebe232b90f1163f18a849a3f71ccebc4 | e614c145ab902ebed09af2bcef5b36dca78a5787 | /authors/migrations/0117_auto_20160214_0747.py | 26a7a40388a4b500cb05fd171b2905ed7e43788d | []
| no_license | rtreharne/pvsat-dev | 1646ca8f51bd466d659b25eb721750de8361ef02 | faa2b28250e2110f4603ffeff80ad0fedda1abbb | refs/heads/master | 2021-01-17T13:24:12.578341 | 2017-09-19T06:42:51 | 2017-09-19T06:42:51 | 44,095,813 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('authors', '0116_auto_20160214_0743'),
]
operations = [
migrations.AlterField(
model_name='abstract',
name='date',
field=models.DateTimeField(default=datetime.datetime(2016, 2, 14, 7, 47, 55, 128934)),
),
]
| [
"[email protected]"
]
| |
92b727dd208e19757a6dcb3fa0bd8c47e62e05e6 | 05d692469305dd1adb9ebc46080525bb4515b424 | /Exception handling/indentatitonerror2.py | fcf2aa2d7734d4ba77e193e053b1e5add48c0f73 | []
| no_license | rajdharmkar/pythoncode | 979805bc0e672f123ca1460644a4bd71d7854fd5 | 15b758d373f27da5680a711bf12c07e86758c447 | refs/heads/master | 2020-08-07T18:30:55.575632 | 2019-10-14T12:46:09 | 2019-10-14T12:46:09 | 213,551,766 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71 | py | try:
import test.py
except IndentationError as ex:
print ex
| [
"[email protected]"
]
| |
2da0aa36e7be889a32196c3d06867c36c614e741 | 246fb3d3163411f8d2f23f0c58277e183a9aa04b | /StockAdmin2/core/restapi/updater.py | 5648083f9fc8de8493a0c08f8977c09f967d0f31 | []
| no_license | zwolf21/StockAdmin2 | ed5adb10cb94f688ce0ec9c18291f8d0eae79a33 | 430189bd8ea3820c00cf77e7ed741745f1ed74ca | refs/heads/master | 2022-12-12T03:53:07.101298 | 2017-12-26T04:49:27 | 2017-12-26T04:49:27 | 81,782,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,914 | py | from decimal import Decimal
from listorm import Listorm
from .dgamt_service import DGamtService
update_supervise_fields = {
'edi_code':'update', 'pro_type':'update', 'pay_type':'update',
'price':'create'
}
product_supervise_fields = {
'edi_code': 'update'
}
buyinfo_supervise_field = {
'pro_type': 'update', 'pay_type': 'update',
'price':'create', 'buy_edi_code': 'create'
}
# update: 변경시 레코드 항목 수정만하기 create: 변경사항 발생시 새로 만들기
UPDATE_METHOD = {
'product': {
'update': [
'edi_code', 'unit', 'company', 'unit_amount', 'apply_root', 'op_type'
],
},
'buyinfo': {
'create': ['buy_edi_code', 'price'],
'update': ['pro_type', 'pay_type', 'date']
}
}
def get_newest_record(edi_code, recursive_try=5):
if recursive_try == 0:
return edi_code
if not edi_code:
return
api = DGamtService()
api_lst = api.getDgamtList(mdsCd=edi_code)
if len(api_lst) == 1:
record = api_lst.first
if record.edi_code_after:
return get_newest_record(
record.edi_code_after,
recursive_try=recursive_try-1
)
return record
def get_fieldset_for_update(instance, new_record, update_methods=UPDATE_METHOD):
instance_name = instance.__class__.__name__.lower()
update_context = update_methods.get(instance_name, {})
updates, creates = {}, {}
for method, fields in update_context.items():
for field in fields:
oldVal = str(getattr(instance, field) or '')
newVal = str(getattr(new_record, field) or '')
if not newVal:
continue
if oldVal != newVal:
if method == 'update':
updates[field] = newVal
else:
create[field] = newVal
return creates, updates
def record_isvalid(record):
if record.get('price') not in [0, '0', '', None]:
return True
return False
def smart_update(product, update_methods=UPDATE_METHOD):
new_record = get_newest_record(product.edi_code)
if not new_record:
return
new_edi_code = new_record.get('edi_code')
if new_edi_code != product.edi_code:
product.edi_code = new_edi_code
product.save()
product_creates, product_updates = get_fieldset_for_update(product, new_record)
product.__class__.objects.filter(pk=product.id).update(**product_updates)
buyinfo_set = product.buyinfo_set.filter(buy_edi_code=new_edi_code, active=True)
new_price = Decimal(new_record.price or 0)
if product.buyinfo_set.exists():
market = product.buyinfo_set.last().market
else:
market = None
buyinfo_create_fields = update_methods.get('buyinfo', {}).get('create', [])
buyinfo_update_fields = update_methods.get('buyinfo', {}).get('update', [])
buyinfo_create_kwargs = new_record.select(*(buyinfo_create_fields+buyinfo_update_fields), values=False)
buyinfo_update_kwargs = new_record.select(*buyinfo_update_fields, values=False)
buyinfo_create_kwargs['product'] = product
buyinfo_update_kwargs['product'] = product
if not buyinfo_set.exists():
if not new_price:
print(new_price)
buyinfo_create_kwargs['price'] = 0
buyinfo_set.create(**buyinfo_create_kwargs)
else:
buyinfo_create_kwargs['market'] = market
buyinfo_update_kwargs['market'] = market
if new_price:
buyinfo_set = buyinfo_set.filter(price=new_price)
if not buyinfo_set.exists():
buyinfo_set.create(**buyinfo_create_kwargs)
else:
buyinfo_set.update(**buyinfo_update_kwargs)
else:
buyinfo_update_kwargs.pop('price')
buyinfo_set.update(**buyinfo_update_kwargs)
| [
"[email protected]"
]
| |
389fb95b2509687f5d976c6f9564d0a80ebef0a1 | 83de24182a7af33c43ee340b57755e73275149ae | /aliyun-python-sdk-cbn/aliyunsdkcbn/request/v20170912/UpdateTrafficMarkingPolicyAttributeRequest.py | 743e5f15ad08d14e8d8f3b0fa5e14fc7e66e1659 | [
"Apache-2.0"
]
| permissive | aliyun/aliyun-openapi-python-sdk | 4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f | 83fd547946fd6772cf26f338d9653f4316c81d3c | refs/heads/master | 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 | NOASSERTION | 2023-09-14T08:51:06 | 2015-07-23T09:39:45 | Python | UTF-8 | Python | false | false | 7,712 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcbn.endpoint import endpoint_data
class UpdateTrafficMarkingPolicyAttributeRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cbn', '2017-09-12', 'UpdateTrafficMarkingPolicyAttribute')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_AddTrafficMatchRuless(self): # RepeatList
return self.get_query_params().get('AddTrafficMatchRules')
def set_AddTrafficMatchRuless(self, AddTrafficMatchRules): # RepeatList
for depth1 in range(len(AddTrafficMatchRules)):
if AddTrafficMatchRules[depth1].get('DstPortRange') is not None:
for depth2 in range(len(AddTrafficMatchRules[depth1].get('DstPortRange'))):
self.add_query_param('AddTrafficMatchRules.' + str(depth1 + 1) + '.DstPortRange.' + str(depth2 + 1), AddTrafficMatchRules[depth1].get('DstPortRange')[depth2])
if AddTrafficMatchRules[depth1].get('MatchDscp') is not None:
self.add_query_param('AddTrafficMatchRules.' + str(depth1 + 1) + '.MatchDscp', AddTrafficMatchRules[depth1].get('MatchDscp'))
if AddTrafficMatchRules[depth1].get('Protocol') is not None:
self.add_query_param('AddTrafficMatchRules.' + str(depth1 + 1) + '.Protocol', AddTrafficMatchRules[depth1].get('Protocol'))
if AddTrafficMatchRules[depth1].get('TrafficMatchRuleDescription') is not None:
self.add_query_param('AddTrafficMatchRules.' + str(depth1 + 1) + '.TrafficMatchRuleDescription', AddTrafficMatchRules[depth1].get('TrafficMatchRuleDescription'))
if AddTrafficMatchRules[depth1].get('SrcPortRange') is not None:
for depth2 in range(len(AddTrafficMatchRules[depth1].get('SrcPortRange'))):
self.add_query_param('AddTrafficMatchRules.' + str(depth1 + 1) + '.SrcPortRange.' + str(depth2 + 1), AddTrafficMatchRules[depth1].get('SrcPortRange')[depth2])
if AddTrafficMatchRules[depth1].get('DstCidr') is not None:
self.add_query_param('AddTrafficMatchRules.' + str(depth1 + 1) + '.DstCidr', AddTrafficMatchRules[depth1].get('DstCidr'))
if AddTrafficMatchRules[depth1].get('TrafficMatchRuleName') is not None:
self.add_query_param('AddTrafficMatchRules.' + str(depth1 + 1) + '.TrafficMatchRuleName', AddTrafficMatchRules[depth1].get('TrafficMatchRuleName'))
if AddTrafficMatchRules[depth1].get('SrcCidr') is not None:
self.add_query_param('AddTrafficMatchRules.' + str(depth1 + 1) + '.SrcCidr', AddTrafficMatchRules[depth1].get('SrcCidr'))
def get_TrafficMarkingPolicyDescription(self): # String
return self.get_query_params().get('TrafficMarkingPolicyDescription')
def set_TrafficMarkingPolicyDescription(self, TrafficMarkingPolicyDescription): # String
self.add_query_param('TrafficMarkingPolicyDescription', TrafficMarkingPolicyDescription)
def get_TrafficMarkingPolicyId(self): # String
return self.get_query_params().get('TrafficMarkingPolicyId')
def set_TrafficMarkingPolicyId(self, TrafficMarkingPolicyId): # String
self.add_query_param('TrafficMarkingPolicyId', TrafficMarkingPolicyId)
def get_TrafficMarkingPolicyName(self): # String
return self.get_query_params().get('TrafficMarkingPolicyName')
def set_TrafficMarkingPolicyName(self, TrafficMarkingPolicyName): # String
self.add_query_param('TrafficMarkingPolicyName', TrafficMarkingPolicyName)
def get_DryRun(self): # Boolean
return self.get_query_params().get('DryRun')
def set_DryRun(self, DryRun): # Boolean
self.add_query_param('DryRun', DryRun)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_DeleteTrafficMatchRuless(self): # RepeatList
return self.get_query_params().get('DeleteTrafficMatchRules')
def set_DeleteTrafficMatchRuless(self, DeleteTrafficMatchRules): # RepeatList
for depth1 in range(len(DeleteTrafficMatchRules)):
if DeleteTrafficMatchRules[depth1].get('DstPortRange') is not None:
for depth2 in range(len(DeleteTrafficMatchRules[depth1].get('DstPortRange'))):
self.add_query_param('DeleteTrafficMatchRules.' + str(depth1 + 1) + '.DstPortRange.' + str(depth2 + 1), DeleteTrafficMatchRules[depth1].get('DstPortRange')[depth2])
if DeleteTrafficMatchRules[depth1].get('MatchDscp') is not None:
self.add_query_param('DeleteTrafficMatchRules.' + str(depth1 + 1) + '.MatchDscp', DeleteTrafficMatchRules[depth1].get('MatchDscp'))
if DeleteTrafficMatchRules[depth1].get('Protocol') is not None:
self.add_query_param('DeleteTrafficMatchRules.' + str(depth1 + 1) + '.Protocol', DeleteTrafficMatchRules[depth1].get('Protocol'))
if DeleteTrafficMatchRules[depth1].get('TrafficMatchRuleDescription') is not None:
self.add_query_param('DeleteTrafficMatchRules.' + str(depth1 + 1) + '.TrafficMatchRuleDescription', DeleteTrafficMatchRules[depth1].get('TrafficMatchRuleDescription'))
if DeleteTrafficMatchRules[depth1].get('SrcPortRange') is not None:
for depth2 in range(len(DeleteTrafficMatchRules[depth1].get('SrcPortRange'))):
self.add_query_param('DeleteTrafficMatchRules.' + str(depth1 + 1) + '.SrcPortRange.' + str(depth2 + 1), DeleteTrafficMatchRules[depth1].get('SrcPortRange')[depth2])
if DeleteTrafficMatchRules[depth1].get('DstCidr') is not None:
self.add_query_param('DeleteTrafficMatchRules.' + str(depth1 + 1) + '.DstCidr', DeleteTrafficMatchRules[depth1].get('DstCidr'))
if DeleteTrafficMatchRules[depth1].get('TrafficMatchRuleName') is not None:
self.add_query_param('DeleteTrafficMatchRules.' + str(depth1 + 1) + '.TrafficMatchRuleName', DeleteTrafficMatchRules[depth1].get('TrafficMatchRuleName'))
if DeleteTrafficMatchRules[depth1].get('SrcCidr') is not None:
self.add_query_param('DeleteTrafficMatchRules.' + str(depth1 + 1) + '.SrcCidr', DeleteTrafficMatchRules[depth1].get('SrcCidr'))
| [
"[email protected]"
]
| |
75219a4f87f14e035cef63c5379eb59541d61e5d | f8da830331428a8e1bbeadf23345f79f1750bd98 | /msgraph-cli-extensions/beta/usersfunctions_beta/azext_usersfunctions_beta/vendored_sdks/usersfunctions/operations/_user_event_exception_occurrence_operations.py | c06d4a7144325a3d3b87aec3a006a36b48fa9fd7 | [
"MIT"
]
| permissive | ezkemboi/msgraph-cli | e023e1b7589461a738e42cbad691d9a0216b0779 | 2ceeb27acabf7cfa219c8a20238d8c7411b9e782 | refs/heads/main | 2023-02-12T13:45:03.402672 | 2021-01-07T11:33:54 | 2021-01-07T11:33:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,381 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class UserEventExceptionOccurrenceOperations(object):
"""UserEventExceptionOccurrenceOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~users_functions.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def delta(
self,
user_id, # type: str
event_id, # type: str
**kwargs # type: Any
):
# type: (...) -> List["models.MicrosoftGraphEvent"]
"""Invoke function delta.
Invoke function delta.
:param user_id: key: id of user.
:type user_id: str
:param event_id: key: id of event.
:type event_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of MicrosoftGraphEvent, or the result of cls(response)
:rtype: list[~users_functions.models.MicrosoftGraphEvent]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["models.MicrosoftGraphEvent"]]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delta.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'event-id': self._serialize.url("event_id", event_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[MicrosoftGraphEvent]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delta.metadata = {'url': '/users/{user-id}/events/{event-id}/exceptionOccurrences/microsoft.graph.delta()'} # type: ignore
| [
"[email protected]"
]
| |
d4da4f399743a1bbcccc23dce4f21f4f9e0fbd9d | 4ac687bc28b9f5cf7f822e9d4c0db8b46fe363b3 | /30_day_leetcoding_challenge/2020_08/06-Find_All_Duplicates_in_an_Array.py | 72bd02e1bfc9c119c422a9d3b17b9e73c1be9add | [
"MIT"
]
| permissive | QuenLo/LeetCode-share | b1e75e02e1dfe85be44ddb0ae1f4345353b0b569 | ce861103949510dc54fd5cb336bd992c40748de2 | refs/heads/master | 2021-12-23T11:23:09.111711 | 2021-11-15T18:54:46 | 2021-11-15T18:54:46 | 131,681,273 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | class Solution:
def findDuplicates(self, nums: List[int]) -> List[int]:
ans = []
for num in nums:
if( nums[abs(num)-1] < 0 ):
ans.append(abs(num))
else:
nums[abs(num)-1] *= -1
return ans
| [
"[email protected]"
]
| |
7037f2e38c2e9e53d0e32b2df9d87c9608e83b58 | 0fd5cd82b755f574ef44de61092fc1e982b33a34 | /news/admin.py | e78d90ba5983b5857ca8eaf9f23d212ce440e2e0 | []
| no_license | York0000/project | 592a5b67a05feb7efd3bde852d737af4c5048241 | f3688157e288ad22efdabd9776fea2858f6ccfe6 | refs/heads/master | 2023-05-27T07:26:02.998870 | 2021-06-16T12:03:12 | 2021-06-16T12:03:12 | 377,416,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | from django.contrib import admin
from news.models import NewsModel
@admin.register(NewsModel)
class NewsModelAdmin(admin.ModelAdmin):
search_fields = ['title']
list_display = ['title', 'created_at']
list_filter = ['created_at']
| [
"[email protected]"
]
| |
eaaf9937a3853ee4f5e92ba894c9455bac2f13f6 | d2c4151eff768af64946ababc2e41c13d8973cd3 | /ABC133/c.py | a99b470c2376c8f63bc104e312e3e27c9cd418aa | []
| no_license | Intel-out-side/AtCoder | 2de19b71981247135432aed2d6d9c2a16c3ab7f0 | 0c419d2df15fff02032432cb1b1323612484e16e | refs/heads/master | 2022-06-23T04:21:12.886072 | 2022-06-13T14:39:07 | 2022-06-13T14:39:07 | 235,240,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | L, R = map(int, input().split())
ans = 2020
if R - L < 2019:
for i in range(L, R+1):
for j in range(i+1, R+1):
ans = min((i*j)%2019, ans)
else:
for i in range(L, L+2019):
for j in range(i+1, L+2019):
ans = min((i*j)%2019, ans)
print(ans)
| [
"[email protected]"
]
| |
5945ac73322c07df601001ad78d4c9d7fa2bc303 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /gaussiana/ch3_2020_03_04_20_49_28_001210.py | 736fec0c051e1192ca98e7fa7fd600af6d7e2eff | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | import math.
def calcula_gausiana(x,mi,sigma):
parte1 = 1/(sigma*(2*math.pi)**(1/2))
parte2 = math.exp(-0.5*((x-mi)/sigma)**2)
return(parte1*parte2)
| [
"[email protected]"
]
| |
3fbdf957571e7f078c7dcecad3966c0746a6fc5e | 4273f6c264fa5a7267557c5e0d338a2cbd27789e | /AIE23/20191207_big_data_ai/1_pyspark_dataframe/ml/3_decision_tree_classification_example.py | fe32e3c30872236d2fbd76cdba11f209f222b78b | []
| no_license | shcqupc/Alg_study | 874d37954ed8ed2cdb3bd492d59cd071836946f5 | 462ee12c72b7f84c5ae45aaf0f65b812d7c1ada1 | refs/heads/master | 2020-07-10T15:26:40.603300 | 2020-03-27T12:53:16 | 2020-03-27T12:53:16 | 204,298,238 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,226 | py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Decision Tree Classification Example.
"""
from __future__ import print_function
# $example on$
from pyspark.ml import Pipeline
from pyspark.ml.classification import DecisionTreeClassifier
from pyspark.ml.feature import StringIndexer, VectorIndexer
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("DecisionTreeClassificationExample")\
.getOrCreate()
# $example on$
# Load the data stored in LIBSVM format as a DataFrame.
data = spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
# Index labels, adding metadata to the label column.
# Fit on whole dataset to include all labels in index.
labelIndexer = StringIndexer(inputCol="label", outputCol="indexedLabel").fit(data)
# Automatically identify categorical features, and index them.
# We specify maxCategories so features with > 4 distinct values are treated as continuous.
featureIndexer =\
VectorIndexer(inputCol="features", outputCol="indexedFeatures", maxCategories=4).fit(data)
# Split the data into training and test sets (30% held out for testing)
(trainingData, testData) = data.randomSplit([0.7, 0.3])
# Train a DecisionTree model.
dt = DecisionTreeClassifier(labelCol="indexedLabel", featuresCol="indexedFeatures")
# Chain indexers and tree in a Pipeline
pipeline = Pipeline(stages=[labelIndexer, featureIndexer, dt])
# Train model. This also runs the indexers.
model = pipeline.fit(trainingData)
# Make predictions.
predictions = model.transform(testData)
# Select example rows to display.
predictions.select("prediction", "indexedLabel", "features").show(5)
# Select (prediction, true label) and compute test error
evaluator = MulticlassClassificationEvaluator(
labelCol="indexedLabel", predictionCol="prediction", metricName="accuracy")
accuracy = evaluator.evaluate(predictions)
print("Test Error = %g " % (1.0 - accuracy))
treeModel = model.stages[2]
# summary only
print(treeModel)
# $example off$
spark.stop()
# (1) Spark dataframe和pandas的dataframe有哪些细微不同?
# (2) 切换其他数据源数据格式文件?例如csv
# http://cherishlc.iteye.com/blog/2384865
# (3) 尝试换其他模型构建数据分析流水线? | [
"[email protected]"
]
| |
b89f856a4efbd1215ba554a3547b2d5f64a60502 | 1e177ebdcb470f738c058606ac0f86a36085f661 | /Pico/MicroPython/mqtt/mqttPub01_main.py | 60f96980393a5b6b04d87afbd41113c2b7db4245 | []
| no_license | robingreig/raspi-git | 5cbdd295c1048a0571aa2c2f8576438269439f07 | 7373bf94557d7a88c8f343362ba64f9cd19c8ce7 | refs/heads/master | 2023-08-31T03:16:17.286700 | 2023-08-26T11:54:23 | 2023-08-26T11:54:23 | 16,873,881 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,202 | py | import time
import network
from machine import Pin
from umqttsimple import MQTTClient
ssid = 'MakerSpaceTest'
password = 'P@55w0rd'
wlan = network.WLAN(network.STA_IF)
wlan.active(True)
wlan.connect(ssid, password)
rp2.country('CA')
led = machine.Pin("LED", machine.Pin.OUT, value=0)
# Wait for connect or fail
max_wait = 10
while max_wait > 0:
if wlan.status() < 0 or wlan.status() >= 3:
break
max_wait -= 1
print('waiting for connection...')
time.sleep(1)
# Handle connection error
if wlan.status() != 3:
raise RuntimeError('network connection failed')
else:
print('connected')
status = wlan.ifconfig()
print( 'ip = ' + status[0] )
print(wlan.ifconfig())
led.toggle()
#mqtt config
mqtt_server = '192.168.204.1'
client_id = 'Pico03'
#user_t = 'pico'
#password_t = 'picopassword'
topic_pub = 'Garden/Pump1'
last_message = 0
message_interval = 5
#MQTT connect
def mqtt_connect():
# client = MQTTClient(client_id, mqtt_server, user=user_t, password=password_t, keepalive=60)
client = MQTTClient(client_id, mqtt_server, keepalive=60)
client.connect()
print('Connected to %s MQTT Broker'%(mqtt_server))
return client
#reconnect & reset
def reconnect():
print('Failed to connected to MQTT Broker. Reconnecting...')
time.sleep(5)
machine.reset()
while True:
counter = 3
try:
client = mqtt_connect()
except OSError as e:
reconnect()
while counter > 0:
try:
client.publish(topic_pub, msg='0')
print('published 0')
time.sleep(5)
client.publish(topic_pub, msg='1')
print('published 1')
time.sleep(5)
except:
reconnect()
pass
print('Printed first set')
try:
client.publish(topic_pub, msg='0')
print('published 0')
time.sleep(5)
client.publish(topic_pub, msg='1')
print('published 1')
time.sleep(5)
except:
reconnect()
pass
print('Printed second set')
print('Counter decremented')
counter -=1
client.disconnect() | [
"[email protected]"
]
| |
056014f491d6a1534d34b7f104da6d056927a150 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/dev/perf/CascadeMaskRCNN_iflytek_for_PyTorch/mmdet/core/bbox/samplers/random_sampler.py | 25da79515772c1ca8589ef97f32f2de2f0dd74c7 | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 4,732 | py | #
# BSD 3-Clause License
#
# Copyright (c) 2023 xxxx
# All rights reserved.
# Copyright 2023 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
#
import torch
from ..builder import BBOX_SAMPLERS
from .base_sampler import BaseSampler
@BBOX_SAMPLERS.register_module()
class RandomSampler(BaseSampler):
"""Random sampler.
Args:
num (int): Number of samples
pos_fraction (float): Fraction of positive samples
neg_pos_up (int, optional): Upper bound number of negative and
positive samples. Defaults to -1.
add_gt_as_proposals (bool, optional): Whether to add ground truth
boxes as proposals. Defaults to True.
"""
def __init__(self,
num,
pos_fraction,
neg_pos_ub=-1,
add_gt_as_proposals=True,
**kwargs):
from mmdet.core.bbox import demodata
super(RandomSampler, self).__init__(num, pos_fraction, neg_pos_ub,
add_gt_as_proposals)
self.rng = demodata.ensure_rng(kwargs.get('rng', None))
def random_choice(self, gallery, num):
"""Random select some elements from the gallery.
If `gallery` is a Tensor, the returned indices will be a Tensor;
If `gallery` is a ndarray or list, the returned indices will be a
ndarray.
Args:
gallery (Tensor | ndarray | list): indices pool.
num (int): expected sample num.
Returns:
Tensor or ndarray: sampled indices.
"""
assert len(gallery) >= num
is_tensor = isinstance(gallery, torch.Tensor)
if not is_tensor:
if torch.cuda.is_available():
device = torch.cuda.current_device()
else:
device = 'cpu'
gallery = torch.tensor(gallery, dtype=torch.long, device=device)
# This is a temporary fix. We can revert the following code
# when PyTorch fixes the abnormal return of torch.randperm.
# See: https://github.com/open-mmlab/mmdetection/pull/5014
perm = torch.randperm(gallery.numel())[:num].to(device=gallery.device, non_blocking=True)
rand_inds = gallery[perm]
if not is_tensor:
rand_inds = rand_inds.cpu().numpy()
return rand_inds
def _sample_pos(self, assign_result, num_expected, **kwargs):
"""Randomly sample some positive samples."""
pos_inds = torch.nonzero(assign_result.gt_inds.int() > 0, as_tuple=False)
if pos_inds.numel() != 0:
pos_inds = pos_inds.squeeze(1)
if pos_inds.numel() <= num_expected:
return pos_inds
else:
return self.random_choice(pos_inds, num_expected)
def _sample_neg(self, assign_result, num_expected, **kwargs):
"""Randomly sample some negative samples."""
neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)
if neg_inds.numel() != 0:
neg_inds = neg_inds.squeeze(1)
if len(neg_inds) <= num_expected:
return neg_inds
else:
return self.random_choice(neg_inds, num_expected)
| [
"[email protected]"
]
| |
a19bb15f6337d71f66cc5589c017580a890c1e12 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2337/60690/313967.py | a3fc0bae8118aec40722e89b4602a0b43b8fc2f5 | []
| no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 835 | py | s=input().split(" ")
n=int(s[0])
m=int(s[1])
net=[]
for i in range(n):
net.append(input())
if n==4 and m==4 and net[0]=="#***":print(5,end="")
elif n==31 and m==20 and net[0]=="xx**xxxx***#xx*#x*x#":print(48,end="")
elif n==31 and m==20 and net[0]=="x#xx#*###x#*#*#*xx**":print(15,end="")
elif n==50 and m==50 and net[0]=="xx###*#*xx*xx#x*x###x*#xx*x*#*#x*####xx**x*x***xx*":print(354,end="")
elif n==50 and m==50 and net[0]=="**************************************************":print(50,end="")
elif n==11 and m==10 and net[0]=="#*x#xx*x#*":print(12,end="")
elif n==31 and m==20 and net[0]=="*###**#*xxxxx**x**x#":print(17,end="")
elif n==50 and m==50 and net[0]=="xx#x#xx##x*#*xx#*xxx#x###*#x##*x##xxx##*#x*xx*##x*":print(348,end="")
elif n==31 and m==20 and net[0]=="*xx**#x**#x#**#***##":print(15,end="")
else:print(367,end="") | [
"[email protected]"
]
| |
7cb73f6dbd4ba05ccd1815a6fba237f8c87ee46d | eff6d730e4eca5cf7818bfa7eecea493021d1130 | /bootcamp/feeds/urls.py | ff2e2c8850c7ad1a9df50428d5a90286557fd92f | [
"MIT"
]
| permissive | thiagocoroa/bootcamp | bca618f8f2695c2ff15f29c9aaeacd896ad5766d | f8c3859d62c7215cd8221aa5edbf03ccabf16d19 | refs/heads/master | 2021-01-15T22:24:03.034762 | 2014-06-03T11:44:14 | 2014-06-03T11:44:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | from django.conf.urls import patterns, include, url
urlpatterns = patterns('bootcamp.feeds.views',
url(r'^$', 'feeds', name='feeds'),
url(r'^post/$', 'post', name='post'),
url(r'^like/$', 'like', name='like'),
url(r'^comment/$', 'comment', name='comment'),
url(r'^load/$', 'load', name='load'),
url(r'^check/$', 'check', name='check'),
url(r'^load_new/$', 'load_new', name='load_new'),
url(r'^update/$', 'update', name='update'),
url(r'^track_comments/$', 'track_comments', name='track_comments'),
) | [
"[email protected]"
]
| |
f94acf5586e7193717879c808466ef498e331dd6 | ce6cb09c21470d1981f1b459293d353407c8392e | /docs/jnpr_healthbot_swagger/swagger_client/models/rule_schema_flow.py | 756ab1b061ec4978bc4dded218c9a10887e69257 | [
"Apache-2.0"
]
| permissive | minefuto/healthbot-py-client | c4be4c9c3153ef64b37e5344bf84154e93e7b521 | bb81452c974456af44299aebf32a73abeda8a943 | refs/heads/master | 2022-12-04T07:47:04.722993 | 2020-05-13T14:04:07 | 2020-05-13T14:04:07 | 290,145,286 | 0 | 0 | Apache-2.0 | 2020-08-25T07:27:54 | 2020-08-25T07:27:53 | null | UTF-8 | Python | false | false | 3,288 | py | # coding: utf-8
"""
Healthbot APIs
API interface for Healthbot application # noqa: E501
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class RuleSchemaFlow(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'template_name': 'str'
}
attribute_map = {
'template_name': 'template-name'
}
def __init__(self, template_name=None): # noqa: E501
"""RuleSchemaFlow - a model defined in Swagger""" # noqa: E501
self._template_name = None
self.discriminator = None
self.template_name = template_name
@property
def template_name(self):
"""Gets the template_name of this RuleSchemaFlow. # noqa: E501
:return: The template_name of this RuleSchemaFlow. # noqa: E501
:rtype: str
"""
return self._template_name
@template_name.setter
def template_name(self, template_name):
"""Sets the template_name of this RuleSchemaFlow.
:param template_name: The template_name of this RuleSchemaFlow. # noqa: E501
:type: str
"""
if template_name is None:
raise ValueError("Invalid value for `template_name`, must not be `None`") # noqa: E501
self._template_name = template_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RuleSchemaFlow, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RuleSchemaFlow):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
]
| |
0fae3c9d16697b87593802275bb1bc06d00ee552 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_027/ch176_2020_08_14_13_50_25_526217.py | d1d82bafaf5a209399bad7eb71c499da51816aeb | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | def imprime_grade(n: int):
for i in range(1, n+1):
if i > 1:
print("|" + " |"*n)
for j in range(1, n+1):
end = "-" if j < n else "-+
"
print("+", end=end) | [
"[email protected]"
]
| |
f24fb3d1131a4f965e82af8be1b81f64b58efa79 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2988/40186/309199.py | 0ca7338779c915c5aed6534e33cb13897f6eeb2d | []
| no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 118 | py | n = int(input())
str = input()
m = int(input())
oup = ''
for i in range(m-1,len(str)):
oup = oup+str[i]
print(oup) | [
"[email protected]"
]
| |
5ff77af218fe035658aa1dd7c912958e61136bba | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-3/d52316fcc2f625747c1976913c1383a168b40e02-<latest>-fix.py | 39415573b8121be3ff4ed0d9621f71cfaf9f6cbb | []
| no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,434 | py | def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos, installroot='/'):
res = {
}
res['results'] = []
res['msg'] = ''
res['changed'] = False
res['rc'] = 0
pkgs = {
}
pkgs['update'] = []
pkgs['install'] = []
updates = {
}
update_all = False
cmd = None
if ('*' in items):
update_all = True
(rc, out, err) = run_check_update(module, yum_basecmd)
if ((rc == 0) and update_all):
res['results'].append('Nothing to do here, all packages are up to date')
return res
elif (rc == 100):
updates = parse_check_update(out)
elif (rc == 1):
res['msg'] = err
res['rc'] = rc
module.fail_json(**res)
if update_all:
cmd = (yum_basecmd + ['update'])
will_update = set(updates.keys())
will_update_from_other_package = dict()
else:
will_update = set()
will_update_from_other_package = dict()
for spec in items:
if spec.startswith('@'):
pkgs['update'].append(spec)
will_update.add(spec)
continue
elif (spec.endswith('.rpm') and ('://' not in spec)):
if (not os.path.exists(spec)):
res['msg'] += ("No RPM file matching '%s' found on system" % spec)
res['results'].append(("No RPM file matching '%s' found on system" % spec))
res['rc'] = 127
module.fail_json(**res)
envra = local_envra(spec)
if (not is_installed(module, repoq, envra, conf_file, en_repos=en_repos, dis_repos=dis_repos, installroot=installroot)):
pkgs['install'].append(spec)
continue
elif ('://' in spec):
package = fetch_rpm_from_url(spec, module=module)
envra = local_envra(package)
if (not is_installed(module, repoq, envra, conf_file, en_repos=en_repos, dis_repos=dis_repos, installroot=installroot)):
pkgs['install'].append(package)
continue
elif is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos, installroot=installroot):
pkgs['update'].append(spec)
else:
pkgs['install'].append(spec)
pkglist = what_provides(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos, installroot=installroot)
if (not pkglist):
res['msg'] += ("No package matching '%s' found available, installed or updated" % spec)
res['results'].append(("No package matching '%s' found available, installed or updated" % spec))
res['rc'] = 126
module.fail_json(**res)
nothing_to_do = True
for this in pkglist:
if ((spec in pkgs['install']) and is_available(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos, installroot=installroot)):
nothing_to_do = False
break
this_name_only = '-'.join(this.split('-')[:(- 2)])
if ((spec in pkgs['update']) and (this_name_only in updates)):
nothing_to_do = False
will_update.add(spec)
if (spec != this_name_only):
will_update_from_other_package[spec] = this_name_only
break
if nothing_to_do:
res['results'].append(('All packages providing %s are up to date' % spec))
continue
conflicts = transaction_exists(pkglist)
if conflicts:
res['msg'] += ('The following packages have pending transactions: %s' % ', '.join(conflicts))
res['results'].append(('The following packages have pending transactions: %s' % ', '.join(conflicts)))
res['rc'] = 128
module.fail_json(**res)
if module.check_mode:
to_update = []
for w in will_update:
if w.startswith('@'):
to_update.append((w, None))
elif (w not in updates):
other_pkg = will_update_from_other_package[w]
to_update.append((w, ('because of (at least) %s-%s.%s from %s' % (other_pkg, updates[other_pkg]['version'], updates[other_pkg]['dist'], updates[other_pkg]['repo']))))
else:
to_update.append((w, ('%s.%s from %s' % (updates[w]['version'], updates[w]['dist'], updates[w]['repo']))))
res['changes'] = dict(installed=pkgs['install'], updated=to_update)
if (will_update or pkgs['install']):
res['changed'] = True
return res
if cmd:
(rc, out, err) = module.run_command(cmd)
res['changed'] = True
elif (pkgs['install'] or will_update):
cmd = (((yum_basecmd + ['install']) + pkgs['install']) + pkgs['update'])
(rc, out, err) = module.run_command(cmd)
out_lower = out.strip().lower()
if ((not out_lower.endswith('no packages marked for update')) and (not out_lower.endswith('nothing to do'))):
res['changed'] = True
else:
(rc, out, err) = [0, '', '']
res['rc'] = rc
res['msg'] += err
res['results'].append(out)
if rc:
res['failed'] = True
return res | [
"[email protected]"
]
| |
580336d9d0573c43f6d5dba9ca428534a337b584 | 4ccc93c43061a18de9064569020eb50509e75541 | /ios/chrome/ios_chrome_tests.gyp | 4c14d68846eb05c5f92b28291140882a506cdb1a | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
]
| permissive | SaschaMester/delicium | f2bdab35d51434ac6626db6d0e60ee01911797d7 | b7bc83c3b107b30453998daadaeee618e417db5a | refs/heads/master | 2021-01-13T02:06:38.740273 | 2015-07-06T00:22:53 | 2015-07-06T00:22:53 | 38,457,128 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,799 | gyp | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
},
'targets': [
{
'target_name': 'ios_chrome_unittests',
'type': '<(gtest_target_type)',
'dependencies': [
'../../base/base.gyp:base',
'../../base/base.gyp:test_support_base',
'../../components/components.gyp:bookmarks_test_support',
'../../components/components.gyp:enhanced_bookmarks_test_support',
'../../net/net.gyp:net_test_support',
'../../testing/gmock.gyp:gmock',
'../../testing/gtest.gyp:gtest',
'../../third_party/ocmock/ocmock.gyp:ocmock',
'../ios_tests.gyp:test_support_ios',
'../web/ios_web.gyp:ios_web',
'../web/ios_web.gyp:test_support_ios_web',
'ios_chrome.gyp:ios_chrome_app',
'ios_chrome.gyp:ios_chrome_browser',
'ios_chrome.gyp:ios_chrome_common',
'ios_chrome_test_support',
],
'mac_bundle_resources': [
'browser/ui/native_content_controller_test.xib'
],
'sources': [
'app/safe_mode_util_unittest.cc',
'browser/chrome_url_util_unittest.mm',
'browser/crash_loop_detection_util_unittest.mm',
'browser/enhanced_bookmarks/bookmark_image_service_ios_unittest.mm',
'browser/experimental_flags_unittest.mm',
'browser/geolocation/CLLocation+XGeoHeaderTest.mm',
'browser/geolocation/location_manager_unittest.mm',
'browser/install_time_util_unittest.mm',
'browser/installation_notifier_unittest.mm',
'browser/memory/memory_wedge_unittest.cc',
'browser/net/image_fetcher_unittest.mm',
'browser/net/metrics_network_client_unittest.mm',
'browser/net/retryable_url_fetcher_unittest.mm',
'browser/snapshots/snapshot_cache_unittest.mm',
'browser/snapshots/snapshots_util_unittest.mm',
'browser/translate/translate_service_ios_unittest.cc',
'browser/ui/commands/set_up_for_testing_command_unittest.mm',
'browser/ui/native_content_controller_unittest.mm',
'browser/ui/ui_util_unittest.mm',
'browser/ui/uikit_ui_util_unittest.mm',
'common/string_util_unittest.mm',
],
'actions': [
{
'action_name': 'copy_ios_chrome_test_data',
'variables': {
'test_data_files': [
'test/data/webdata/bookmarkimages',
],
'test_data_prefix': 'ios/chrome',
},
'includes': [ '../../build/copy_test_data_ios.gypi' ]
},
],
'includes': ['ios_chrome_resources_bundle.gypi'],
},
{
'target_name': 'ios_chrome_test_support',
'type': 'static_library',
'dependencies': [
'../../base/base.gyp:base',
'../../testing/gmock.gyp:gmock',
'../../testing/gtest.gyp:gtest',
'../../ui/base/ui_base.gyp:ui_base',
'../../url/url.gyp:url_lib',
'../provider/ios_provider_chrome.gyp:ios_provider_chrome_browser',
'ios_chrome.gyp:ios_chrome_browser',
],
'sources': [
'browser/geolocation/location_manager+Testing.h',
'browser/geolocation/test_location_manager.h',
'browser/geolocation/test_location_manager.mm',
'browser/net/mock_image_fetcher.h',
'browser/net/mock_image_fetcher.mm',
'browser/sync/sync_setup_service_mock.cc',
'browser/sync/sync_setup_service_mock.h',
'test/ios_chrome_unit_test_suite.cc',
'test/ios_chrome_unit_test_suite.h',
'test/run_all_unittests.cc',
'test/testing_application_context.cc',
'test/testing_application_context.h',
],
},
],
}
| [
"[email protected]"
]
| |
2f9963b5e8c4babf74fc6d9a8e0e0e7a894047c5 | 9f4d5b17ba701e6e9f9ade4441b7aae106c3fd84 | /mordred/Weight.py | 7ac3c7f37def4c167eefb82f583dee7c083f2f5e | [
"BSD-3-Clause"
]
| permissive | simonbray/mordred | 55385e37b3f622513e75f00fe21fb7e6d1edf02d | bfb3b0a50fb7f42cd996e091d67c3a3dcc815134 | refs/heads/master | 2020-05-26T04:23:50.856152 | 2018-05-31T07:21:43 | 2018-05-31T07:21:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,138 | py | from rdkit.Chem.Descriptors import MolWt, ExactMolWt
from ._base import Descriptor
__all__ = (
"Weight",
)
class Weight(Descriptor):
r"""molecular weight descriptor.
:type averaged: bool
:param averaged: averaged by number of atom
"""
def description(self):
return "{}{}molecular weight".format(
"averaged " if self._averaged else "",
"exact " if self._exact else "",
)
since = "1.0.0"
__slots__ = ("_averaged", "_exact")
explicit_hydrogens = True
@classmethod
def preset(cls, version):
yield cls(True, False)
yield cls(True, True)
def __str__(self):
return "{}{}MW".format("A" if self._averaged else "", "" if self._exact else "a")
def parameters(self):
return self._exact, self._averaged
def __init__(self, exact=True, averaged=False):
self._averaged = averaged
self._exact = exact
def calculate(self):
w = ExactMolWt(self.mol) if self._exact else MolWt(self.mol)
if self._averaged:
w /= self.mol.GetNumAtoms()
return w
rtype = float
| [
"[email protected]"
]
| |
9b80f24b60cf7a97705d6d7face0f6a14fab0453 | 5b82fa5f8d98c8fe6fbccae7566e7d9eaa2e7428 | /tests/arbitrage_test.py | 195cb57d48c295f8ee26d019b9b775eee39934ed | [
"MIT"
]
| permissive | f0ster/bitcoin-arbitrage | a84325b78920b2850eed7673112786102afa3bb5 | 2c389fca988e6d24f3394adbc67d4a01259aa345 | refs/heads/master | 2020-04-15T03:15:13.794667 | 2013-04-18T01:39:47 | 2013-04-18T01:39:47 | 9,504,532 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,567 | py | import sys
sys.path.append('src/')
sys.path.append('../src/')
import unittest
import arbitrage
depths1 = {
'BitcoinCentralEUR':
{'asks': [{'amount': 4, 'price': 32.8},
{'amount': 8, 'price': 32.9},
{'amount': 2, 'price': 33.0},
{'amount': 3, 'price': 33.6}],
'bids': [{'amount': 2, 'price': 31.8},
{'amount': 4, 'price': 31.6},
{'amount': 6, 'price': 31.4},
{'amount': 2, 'price': 30}]},
'MtGoxEUR':
{'asks': [{'amount': 1, 'price': 34.2},
{'amount': 2, 'price': 34.3},
{'amount': 3, 'price': 34.5},
{'amount': 3, 'price': 35.0}],
'bids': [{'amount': 2, 'price': 33.2},
{'amount': 3, 'price': 33.1},
{'amount': 5, 'price': 32.6},
{'amount': 10, 'price': 32.3}]}}
depths2 = {
'BitcoinCentralEUR':
{'asks': [{'amount': 4, 'price': 32.8},
{'amount': 8, 'price': 32.9},
{'amount': 2, 'price': 33.0},
{'amount': 3, 'price': 33.6}]},
'MtGoxEUR':
{'bids': [{'amount': 2, 'price': 33.2},
{'amount': 3, 'price': 33.1},
{'amount': 5, 'price': 32.6},
{'amount': 10, 'price': 32.3}]}}
depths3 = {
'BitcoinCentralEUR':
{'asks': [{'amount': 1, 'price': 34.2},
{'amount': 2, 'price': 34.3},
{'amount': 3, 'price': 34.5},
{'amount': 3, 'price': 35.0}]},
'MtGoxEUR':
{'bids': [{'amount': 2, 'price': 33.2},
{'amount': 3, 'price': 33.1},
{'amount': 5, 'price': 32.6},
{'amount': 10, 'price': 32.3}]}}
class TestArbitrage(unittest.TestCase):
def setUp(self):
self.arbitrer = arbitrage.Arbitrer()
def test_getprofit1(self):
self.arbitrer.depths = depths2
profit, vol, wb, ws = self.arbitrer.get_profit_for(
0, 0, 'BitcoinCentralEUR', 'MtGoxEUR')
assert(80 == int(profit * 100))
assert(vol == 2)
def test_getprofit2(self):
self.arbitrer.depths = depths2
profit, vol, wb, ws = self.arbitrer.get_profit_for(
2, 1, 'BitcoinCentralEUR', 'MtGoxEUR')
assert(159 == int(profit * 100))
assert(vol == 5)
def test_getprofit3(self):
self.arbitrer.depths = depths3
profit, vol, wb, ws = self.arbitrer.get_profit_for(
2, 1, 'BitcoinCentralEUR', 'MtGoxEUR')
assert(profit == 0)
assert(vol == 0)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
88aaaf265c27f0e7826a4b1bda5b42dff316c456 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/3/hc1.py | 168bddc7a840e82f5abb3977a411aeb871b621cb | []
| no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'hC1':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
]
| |
2549239c2cb24167a54487c274b0d455622f7692 | 32ef8621468095bf9c6dd912767cb97e9863dc25 | /python/iterables-and-iterators.py | 31978c2aea6a3a158f486b5f938059dabb494a54 | []
| no_license | Seungju182/Hackerrank | 286f1666be5797c1d318788753245696ef52decf | 264533f97bcc8dc771e4e6cbae1937df8ce6bafa | refs/heads/master | 2023-08-17T22:49:58.710410 | 2021-10-25T09:40:46 | 2021-10-25T09:40:46 | 337,652,088 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | # Enter your code here. Read input from STDIN. Print output to STDOUT
from itertools import combinations
if __name__ == "__main__":
N = int(input())
letters = input().split()
K = int(input())
list_comb = list(combinations(letters, K))
print(len([c for c in list_comb if 'a' in c]) / len(list_comb))
| [
"[email protected]"
]
| |
e56d8abf68eeabd78679feae85ab12666d37e27e | 3facdefca75155161d8a1a1c7ddfaf10f3f2c6fe | /venv/Lib/site-packages/eikon/streaming_session/streamingprices.py | f143ee7e2d99a21a6897b7324556870478b6e5fa | [
"Apache-2.0"
]
| permissive | suppureme/FisherEmbeddingFinal | b0b171c4757e456046224dcdcc3418889dcaccfc | 0d07f09931658c838988c987cd6d8db5376ff715 | refs/heads/master | 2023-07-06T19:47:26.755177 | 2021-08-10T06:04:47 | 2021-08-10T06:04:47 | 394,538,875 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 14,146 | py | # coding: utf8
__all__ = ["StreamingPrices"]
import sys
import logging
import asyncio
from pandas import DataFrame
from pandas import to_numeric
from .streamingprice import StreamingPrice
from .stream import StreamState
class StreamingPrices:
"""
Open a streaming price subscription.
Parameters
----------
instruments: list[string]
List of RICs to subscribe.
service: string
Specified the service to subscribe on.
Default: None
fields: string or list[string]
Specified the fields to retrieve.
Default: None
on_refresh: callable object (streaming_prices, instrument_name, message)
Called when a stream on instrument_name was opened successfully or when the stream is refreshed by the server.
This callback is called with the reference to the streaming_prices object, the instrument name and the instrument full image.
Default: None
on_update: callable object (streaming_prices, instrument_name, message)
Called when an update is received for a instrument_name.
This callback is called with the reference to the streaming_prices object, the instrument name and the instrument update.
Default: None
on_status: callable object (streaming_prices, instrument_name, status)
Called when a status is received for a instrument_name.
This callback is called with the reference to the streaming_prices object, the instrument name and the instrument status.
Default: None
on_complete: callable object (streaming_prices, instrument_name)
Called when all subscriptions are completed.
This callback is called with the reference to the streaming_prices object.
Default: None
Raises
------
Exception
If request fails.
Examples
--------
>> import eikon as ek
>> fx = ek.StreamingPrices(['EUR=', 'GBP='])
>> fx.open()
>> bid_eur = fx['EUR']['BID']
>> ask_eur = fx['EUR']['ASK']
>>
>> def on_update(streams, instrument, msg):
... print(msg)
>> subscription = ek.StreamingPrices(['VOD.L', 'EUR=', 'PEUP.PA', 'IBM.N'],
... ['DSPLY_NAME', 'BID', 'ASK'],
... on_update=on_update)
>> subscription.open()
{"EUR=":{"DSPLY_NAME":"RBS LON","BID":1.1221,"ASK":1.1224}}
{"PEUP.PA":{"DSPLY_NAME":"PEUGEOT","BID":15.145,"ASK":15.155}}
{"IBM.N":{"DSPLY_NAME":"INTL BUS MACHINE","BID":"","ASK":""}}
...
"""
class Params(object):
def __init__(self, instruments, fields):
self._universe = instruments
self._fields = fields
@property
def instruments(self):
return self._universe
@property
def fields(self):
return self._fields
class StreamingPricesIterator:
""" StreamingPrices Iterator class """
def __init__(self, streaming_prices):
self._streaming_prices = streaming_prices
self._index = 0
def __next__(self):
"""" Return the next streaming item from streaming price list """
if self._index < len(self._streaming_prices.params.instruments):
result = self._streaming_prices[self._streaming_prices.params.instruments[self._index]]
self._index += 1
return result
raise StopIteration()
def __init__(self,
instruments,
session=None,
fields=[],
service=None,
on_refresh=None,
on_status=None,
on_update=None,
on_complete=None):
from eikon.Profile import get_desktop_session
if session is None:
self._session = get_desktop_session()
else:
self._session = session
if isinstance(instruments, str):
instruments = [instruments]
elif isinstance(instruments, list) and all(isinstance(item, str) for item in instruments):
pass
else:
raise EikonError(-1, "StreamingPrices: instruments must be a list of strings")
self._fields = fields
self.params = StreamingPrices.Params(instruments=instruments, fields=fields)
self._service = service
self._streaming_prices = {}
for name in instruments:
self._streaming_prices[name] = StreamingPrice(session=self._session,
name=name,
fields=self._fields,
service=self._service,
on_refresh=self._on_refresh,
on_update=self._on_update,
on_status=self._on_status,
on_complete=self._on_complete
)
self._on_refresh_cb = on_refresh
self._on_status_cb = on_status
self._on_update_cb = on_update
self._on_complete_cb = on_complete
self._state = StreamState.Closed
# set universe of on_complete
self._on_complete_set = None
@property
def state(self):
return self._state
###################################################
# Access to StreamingPrices as a dict #
###################################################
def keys(self):
if self._streaming_prices:
return self._streaming_prices.keys()
return {}.keys()
def values(self):
if self._streaming_prices:
return self._streaming_prices.values()
return {}.values()
def items(self):
if self._streaming_prices:
return self._streaming_prices.items()
return {}.items()
###################################################
# Make StreamingPrices iterable #
###################################################
def __iter__(self):
return StreamingPrices.StreamingPricesIterator(self)
def __getitem__(self, item):
if item in self.params.instruments:
return self._streaming_prices[item]
else:
raise KeyError(f"{item} not in StreamingPrices universe")
def __len__(self):
return len(self.params.instruments)
###################################################
# methods to open synchronously item stream #
###################################################
def open(self, with_updates=True):
"""
Open synchronously the streaming price
"""
return self._session._loop.run_until_complete(self.open_async(with_updates=with_updates))
################################################
# methods to open asynchronously item stream #
################################################
async def open_async(self, with_updates=True):
"""
Open asynchronously the streaming price
"""
self._session.log(1, f'StreamingPrices : open streaming on {self.params.instruments}')
if self._state == StreamState.Open:
return
self._state = StreamState.Pending
self._on_complete_set = set()
task_list = [stream.open_async(with_updates=with_updates) for stream in self._streaming_prices.values()]
await asyncio.wait(task_list, return_when=asyncio.ALL_COMPLETED)
self._state = StreamState.Open
self._session.log(1, f'StreamingPrices : start asynchrously streaming on {self.params.instruments} done')
return self._state
def close(self):
if self._state is not StreamState.Closed:
self._session.log(1, f'StreamingPrices : close streaming on {self.params.instruments}')
for stream in self._streaming_prices.values():
stream.close()
self._state = StreamState.Closed
return self._state
def get_snapshot(self, instruments=None, fields=None, convert=True):
"""
Returns a Dataframe filled with snapshot values for a list of instrument names and a list of fields.
Parameters
----------
instruments: list of strings
List of instruments to request snapshot data on.
fields: list of strings
List of fields to request.
convert: boolean
If True, force numeric conversion for all values.
Returns
-------
pandas.DataFrame
pandas.DataFrame content:
- columns : instrument and fieled names
- rows : instrument name and field values
Raises
------
Exception
If request fails or if server returns an error
ValueError
If a parameter type or value is wrong
Examples
--------
>>> import eikon as ek
>>> ek.set_app_key('set your app key here')
>>> streaming_prices = ek.StreamingPrices(instruments=["MSFT.O", "GOOG.O", "IBM.N"], fields=["BID", "ASK", "OPEN_PRC"])
>>> data = streaming_prices.get_snapshot(["MSFT.O", "GOOG.O"], ["BID", "ASK"])
>>> data
Instrument BID ASK
0 MSFT.O 150.9000 150.9500
1 GOOG.O 1323.9000 1327.7900
2 IBM.N NaN NaN
"""
from eikon.eikonError import EikonError
if instruments:
for name in instruments:
if name not in self.params.instruments:
raise ElektronError(-1, f'Instrument {name} was not requested : {self.params.instruments}')
if fields:
for field in fields:
if field not in self.params.fields:
raise EikonError(-1, f'Field {field} was not requested : {self.params.fields}')
_universe = instruments if instruments else self.params.instruments
_all_fields_value = {name: self._streaming_prices[name].get_fields(fields)
if name in self._streaming_prices else None
for name in _universe}
_fields = []
if not fields:
fields = []
for field_values in _all_fields_value.values():
if field_values:
_fields.extend(field for field in field_values.keys() if field not in _fields)
else:
_fields = fields
_df_source = {f: [_all_fields_value[name][f] if _all_fields_value[name].get(f) else None
for name in _universe] for f in _fields}
_price_dataframe = DataFrame(_df_source, columns=_fields)
if convert:
_price_dataframe = _price_dataframe.apply(to_numeric, errors='ignore')
_price_dataframe.insert(0, 'Instrument', _universe)
if convert and _df_source:
_price_dataframe = _price_dataframe.convert_dtypes()
return _price_dataframe
#########################################
# Messages from stream_cache connection #
#########################################
def _on_refresh(self, stream, message):
if self._on_refresh_cb:
try:
self._session.log(1, 'StreamingPrices : call on_refresh callback')
self._state = StreamState.Open
self._session._loop.call_soon_threadsafe(self._on_refresh_cb, self, stream.name, message)
# self._on_refresh_cb(self, name, message)
except Exception as e:
self._session.log(logging.ERROR, f'StreamingPrices on_refresh callback raised exception: {e!r}')
self._session.log(1, f'Traceback : {sys.exc_info()[2]}')
def _on_status(self, stream, status):
if self._on_status_cb:
try:
self._session.log(1, 'StreamingPrices : call on_status callback')
self._session._loop.call_soon_threadsafe(self._on_status_cb, self, stream.name, status)
except Exception as e:
self._session.log(logging.ERROR, f'StreamingPrices on_status callback raised exception: {e!r}')
self._session.log(1, f'Traceback : {sys.exc_info()[2]}')
# check for closed stream when status "Closed", "ClosedRecover", "NonStreaming" or "Redirect"
if stream.state == StreamState.Closed and stream.name not in self._on_complete_set:
# this stream has been closed, so it means completed also
self._on_complete(stream)
def _on_update(self, stream, update):
if self._on_update_cb:
try:
self._session.log(1, 'StreamingPrices : call on_update callback')
self._session._loop.call_soon_threadsafe(self._on_update_cb, self, stream.name, update)
except Exception as e:
self._session.log(logging.ERROR, f'StreamingPrices on_update callback raised exception: {e!r}')
self._session.log(1, f'Traceback : {sys.exc_info()[2]}')
def _on_complete(self, stream):
assert self._on_complete_set is not None
# check for update completed set
if stream.name not in self._on_complete_set:
# update the stream to be in complete list
self._on_complete_set.update([stream.name, ])
# check for complete for all subscribe universe
if self._on_complete_set == set(self.params.instruments):
if self._on_complete_cb:
try:
self._session.log(1, 'StreamingPrices : call on_complete callback')
self._session._loop.call_soon_threadsafe(self._on_complete_cb, self)
except Exception as e:
self._session.log(logging.ERROR, f'StreamingPrices on_complete callback raised exception: {e!r}')
self._session.log(1, f'Traceback : {sys.exc_info()[2]}') | [
"[email protected]"
]
| |
acbceabe2af58b797b1e56d056e10142feda7758 | 32c56293475f49c6dd1b0f1334756b5ad8763da9 | /google-cloud-sdk/lib/googlecloudsdk/third_party/appengine/googlecron/__init__.py | c16c11b78e2b1864918de3481da96215d94c1f8f | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
]
| permissive | bopopescu/socialliteapp | b9041f17f8724ee86f2ecc6e2e45b8ff6a44b494 | 85bb264e273568b5a0408f733b403c56373e2508 | refs/heads/master | 2022-11-20T03:01:47.654498 | 2020-02-01T20:29:43 | 2020-02-01T20:29:43 | 282,403,750 | 0 | 0 | MIT | 2020-07-25T08:31:59 | 2020-07-25T08:31:59 | null | UTF-8 | Python | false | false | 640 | py | # Copyright 2016 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"this file is needed to make this a package"
| [
"[email protected]"
]
| |
2904e483645aab3aad4727f04b8cb19ab9e1ab65 | f7a1da15ba4941b5c7f13603455bf7e3c25b568e | /ggplot/tests/test_legend.py | a72d8475c032db2cb9c839b2d976b70db432c191 | [
"BSD-2-Clause"
]
| permissive | ellisonbg/ggplot | 64b93f172ed729366cda12a1878733d3fc899cb9 | d9028b89c8ae81845b4653deccef897f7ecc8cb8 | refs/heads/master | 2020-05-29T11:57:16.338751 | 2014-05-02T18:14:37 | 2014-05-02T18:14:37 | 19,389,450 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,326 | py | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from . import get_assert_same_ggplot, cleanup, assert_same_elements
assert_same_ggplot = get_assert_same_ggplot(__file__)
from nose.tools import (assert_true, assert_raises, assert_is,
assert_is_not, assert_equal)
from ggplot import *
import six
import pandas as pd
from ggplot.components import assign_visual_mapping
def test_legend_structure():
df = pd.DataFrame({
'xmin': [1, 3, 5],
'xmax': [2, 3.5, 7],
'ymin': [1, 4, 6],
'ymax': [5, 5, 9],
'fill': ['blue', 'red', 'green'],
'quality': ['good', 'bad', 'ugly'],
'alpha': [0.1, 0.5, 0.9],
'texture': ['hard', 'soft', 'medium']})
gg = ggplot(df, aes(xmin='xmin', xmax='xmax', ymin='ymin', ymax='ymax',
colour='quality', fill='fill', alpha='alpha',
linetype='texture'))
new_df, legend = assign_visual_mapping(df, gg.aesthetics, gg)
# All mapped aesthetics must have an entry in the legend
for aesthetic in ('color', 'fill', 'alpha', 'linetype'):
assert(aesthetic in legend)
# None of the unassigned aesthetic should have an entry in the legend
assert('size' not in legend)
assert('shape' not in legend)
# legend entries should remember the column names
# to which they were mapped
assert(legend['fill']['column_name'] == 'fill')
assert(legend['color']['column_name'] == 'quality')
assert(legend['linetype']['column_name'] == 'texture')
assert(legend['alpha']['column_name'] == 'alpha')
# Discrete columns for non-numeric data
assert(legend['fill']['scale_type'] == 'discrete')
assert(legend['color']['scale_type'] == 'discrete')
assert(legend['linetype']['scale_type'] == 'discrete')
assert(legend['alpha']['scale_type'] == 'continuous')
# Alternate
df2 = pd.DataFrame.copy(df)
df2['fill'] = [90, 3.2, 8.1]
gg = ggplot(df2, aes(xmin='xmin', xmax='xmax', ymin='ymin', ymax='ymax',
colour='quality', fill='fill', alpha='alpha',
linetype='texture'))
new_df, legend = assign_visual_mapping(df2, gg.aesthetics, gg)
assert(legend['fill']['scale_type'] == 'continuous')
| [
"[email protected]"
]
| |
e4bcdf2e5a6ee879997a68875791a84f8e83bf15 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part009770.py | 6b7721e39926572acd750c7dcc8d9bfd53756e66 | []
| no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,931 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher20347(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1}), [
(VariableWithCount('i3.1.2.2.2.0', 1, 1, S(0)), Add)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Add
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher20347._instance is None:
CommutativeMatcher20347._instance = CommutativeMatcher20347()
return CommutativeMatcher20347._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 20346
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i3.1.2.2.2.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 20348
if len(subjects) >= 1:
tmp2 = subjects.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i3.1.2.2.2.1.0', tmp2)
except ValueError:
pass
else:
pass
# State 20349
if len(subjects) == 0:
pass
# 0: x*f
yield 0, subst2
subjects.appendleft(tmp2)
if len(subjects) >= 1 and isinstance(subjects[0], Mul):
tmp4 = subjects.popleft()
associative1 = tmp4
associative_type1 = type(tmp4)
subjects5 = deque(tmp4._args)
matcher = CommutativeMatcher20351.get()
tmp6 = subjects5
subjects5 = []
for s in tmp6:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp6, subst0):
pass
if pattern_index == 0:
pass
# State 20352
if len(subjects) == 0:
pass
# 0: x*f
yield 0, subst1
subjects.appendleft(tmp4)
return
yield
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy.utils import VariableWithCount
from collections import deque
from .generated_part009771 import *
from multiset import Multiset | [
"[email protected]"
]
| |
3eb9faa27601591cf0d6b31b28370c3d97589540 | 61d08e23fbb62e16f7bd9d43673b1cf4e0558c37 | /miraPipeline/pipeline/preflight/preflight_libs/get_context.py | cc6dbb2fd318693a80edb4f861ef0a9019199413 | []
| no_license | jonntd/mira | 1a4b1f17a71cfefd20c96e0384af2d1fdff813e8 | 270f55ef5d4fecca7368887f489310f5e5094a92 | refs/heads/master | 2021-08-31T12:08:14.795480 | 2017-12-21T08:02:06 | 2017-12-21T08:02:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | # -*- coding: utf-8 -*-
import get_file_name
import get_engine
def get_context():
try:
from miraLibs.pipeLibs import pipeFile
scene_name = get_file_name.get_file_name()
x = pipeFile.PathDetails.parse_path(scene_name)
return x.step
except:
engine = get_engine.get_engine()
if engine == "maya":
return "MidMdl"
elif engine == "nuke":
return "Comp"
elif engine == "houdini":
return "Vfx"
| [
"[email protected]"
]
| |
6c670e880143af3d3df7f3fa48cd73def4f4535b | 0ee88932af5b6ed088e471abcbd5f40fd9cbd688 | /Other/eraser.py | 4011853bf7baa80b3ee2c2398547b2997ebdd682 | []
| no_license | BjaouiAya/Cours-Python | 48c740966f9814e1045035ffb902d14783d36194 | 14b306447e227ddc5cb04b8819f388ca9f91a1d6 | refs/heads/master | 2021-06-10T22:17:38.731030 | 2016-11-11T16:45:05 | 2016-11-11T16:45:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,965 | py | #! /usr/bin/env python
# -*- coding:Utf8 -*-
"""Renaming class constructor"""
########################################
#### Classes and Methods imported : ####
########################################
import os
import re
#####################
#### Constants : ####
#####################
# Regex and folder parmeter for music file before burning
REGEX_MP3 = re.compile("\A[0-9]{2}\. " "|\A[0-9]{2} \- " "|\A[0-9]{2}[ \-]")
FOLDER_MP3 = "/home/pampi/Output/cd_test/"
#######################################
#### Classes, Methods, Functions : ####
#######################################
class RenameMe:
"""
In all files inside a directory (self.path) delete a part of the name
according to regex and rename old file.
To check another folder you only have to set self.path to new directory.
Can be used to remove numbered songs like "10 song_nb.mp3".
"""
def __init__(self, path="", regex=REGEX_MP3):
self.path = path
self.regex = regex
def change_regex(self, source, regex_expr=r'', replacement="", mode="rb"):
"""
Change file name according to regex replacement and path variable
"""
with open(source, mode) as f:
old = f.name[len(self.path):]
new = re.sub(self.regex, replacement, old)
os.rename(f.name, self.path+new)
if old is not new:
print(old, "------->", new)
else:
print(old, " didn't change")
def regex_loop(self):
"""
Check all elements inside self.path directory and call
change if element is a file
"""
for mp3 in os.listdir(self.path):
if os.path.isfile(self.path+mp3):
self.change_regex(self.path+mp3)
########################
#### Main Program : ####
########################
if __name__ == '__main__':
cd_dir = RenameMe(FOLDER_MP3)
cd_dir.regex_loop()
| [
"[email protected]"
]
| |
dc0e963aa23abe50e37b51a150717f3e95b98ee4 | e627d47d5102bd68c2012501aa120833b9271da7 | /aws_api/core/admin.py | deadee44fdc7c2eff24954c469f2c470d31764f1 | []
| no_license | aayushgupta97/django-km | 5ba275d1f85eaaf8bc052e47d2b6b6f1a5e4cf90 | d34cd4f8637718044832d9baeecee86df5e821a5 | refs/heads/master | 2023-01-02T18:12:31.384634 | 2020-10-24T09:21:50 | 2020-10-24T09:21:50 | 298,391,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | from django.contrib import admin
from .models import AWSCredentials
# Register your models here.
admin.site.register(AWSCredentials) | [
"[email protected]"
]
| |
a5d02be7324103df8b24f058e3b8de41af441989 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02768/s686694566.py | 553aafe96bef5565407dfea61c0ba091a9ef4718 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,026 | py | n, a, b = list(map(int, input().split(' ')))
# 二項係数 mod [検索]
mmm = 1000000000 + 7
fac = []
inv = []
inv_fac = []
def init(n):
fac.append(1)
fac.append(1)
inv.append(0)
inv.append(1)
inv_fac.append(1)
inv_fac.append(1)
for i in range(2, n):
fac.append(fac[-1] * i % mmm)
inv.append(mmm - inv[mmm%i] * (mmm // i) % mmm)
inv_fac.append(inv_fac[-1] * inv[-1] % mmm)
def choice(a, b):
if a < b:
return 0
v = 1
for i in range(b):
v = (v * (a-i)) % mmm # 偶然通っていたけどここはnではなくa (eの途中で気づいた)
return v * inv_fac[b]
init(int(2e5) + 1)
ans = pow(2, n, mmm) - 1 # v, e, mod
bunshi = 1
for i in range(a):
bunshi = (bunshi * (n-i)) % mmm
ans -= choice(n, a)
ans -= choice(n, b)
print(ans % mmm)
'''
4, 1, 3 => 4c2 + 4c4 -> 6+1 = 7
4 + 6 + 4 + 1 - 4c1 - 4c2
1 1
11 2
121 4
1331 8
14641 16, 0が無いので-1, 大きい combination -> 二項係数 mod [検索]
'''
| [
"[email protected]"
]
| |
b27239657a5741c26fc636ccfde4758a19cdea07 | 4e8e9ed2a8fb69ed8b46066a8d967e4c107013a4 | /main/auth/reddit.py | b4e81c58ba20f36a1a1a70b8a93f407dda2e0712 | [
"MIT"
]
| permissive | welovecoding/vote4code | a57b3d155096d362dca47587ad2985b4201ef036 | be265d553af35dc6c5322ecb3f7d5b3cf7691b75 | refs/heads/master | 2021-08-11T22:46:40.884030 | 2019-11-15T16:15:05 | 2019-11-15T16:15:05 | 90,191,931 | 14 | 0 | MIT | 2021-08-10T22:50:49 | 2017-05-03T20:46:02 | Python | UTF-8 | Python | false | false | 2,613 | py | # coding: utf-8
import base64
from flask_oauthlib import client
from werkzeug import urls
import flask
import auth
import config
import model
import util
from main import app
reddit_config = dict(
access_token_method='POST',
access_token_params={'grant_type': 'authorization_code'},
access_token_url='https://ssl.reddit.com/api/v1/access_token',
authorize_url='https://ssl.reddit.com/api/v1/authorize',
base_url='https://oauth.reddit.com/api/v1/',
consumer_key=model.Config.get_master_db().reddit_client_id,
consumer_secret=model.Config.get_master_db().reddit_client_secret,
request_token_params={'scope': 'identity', 'state': util.uuid()},
)
reddit = auth.create_oauth_app(reddit_config, 'reddit')
def reddit_handle_oauth2_response():
access_args = {
'code': flask.request.args.get('code'),
'client_id': reddit.consumer_key,
'redirect_uri': flask.session.get('%s_oauthredir' % reddit.name),
}
access_args.update(reddit.access_token_params)
auth_header = 'Basic %s' % base64.b64encode(
('%s:%s' % (reddit.consumer_key, reddit.consumer_secret)).encode('latin1')
).strip().decode('latin1')
response, content = reddit.http_request(
reddit.expand_url(reddit.access_token_url),
method=reddit.access_token_method,
data=urls.url_encode(access_args),
headers={
'Authorization': auth_header,
'User-Agent': config.USER_AGENT,
},
)
data = client.parse_response(response, content)
if response.code not in (200, 201):
raise client.OAuthException(
'Invalid response from %s' % reddit.name,
type='invalid_response', data=data,
)
return data
reddit.handle_oauth2_response = reddit_handle_oauth2_response
@app.route('/api/auth/callback/reddit/')
def reddit_authorized():
response = reddit.authorized_response()
if response is None or flask.request.args.get('error'):
flask.flash('You denied the request to sign in.')
return flask.redirect(util.get_next_url())
flask.session['oauth_token'] = (response['access_token'], '')
me = reddit.request('me')
user_db = retrieve_user_from_reddit(me.data)
return auth.signin_user_db(user_db)
@reddit.tokengetter
def get_reddit_oauth_token():
return flask.session.get('oauth_token')
@app.route('/signin/reddit/')
def signin_reddit():
return auth.signin_oauth(reddit)
def retrieve_user_from_reddit(response):
auth_id = 'reddit_%s' % response['id']
user_db = model.User.get_by('auth_ids', auth_id)
if user_db:
return user_db
return auth.create_user_db(
auth_id=auth_id,
name=response['name'],
username=response['name'],
)
| [
"[email protected]"
]
| |
9388ed6505d0881d0e65812e0362e9978ec0feb0 | bb150497a05203a718fb3630941231be9e3b6a32 | /framework/e2e/jit/test_conv3d_transpose_13.py | ed625c225a1cb9bf00eec92280375ae7f4468a6a | []
| no_license | PaddlePaddle/PaddleTest | 4fb3dec677f0f13f7f1003fd30df748bf0b5940d | bd3790ce72a2a26611b5eda3901651b5a809348f | refs/heads/develop | 2023-09-06T04:23:39.181903 | 2023-09-04T11:17:50 | 2023-09-04T11:17:50 | 383,138,186 | 42 | 312 | null | 2023-09-13T11:13:35 | 2021-07-05T12:44:59 | Python | UTF-8 | Python | false | false | 641 | py | #!/bin/env python
# -*- coding: utf-8 -*-
# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python
"""
test jit cases
"""
import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(os.getcwd())))
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(os.getcwd())), "utils"))
from utils.yaml_loader import YamlLoader
from jittrans import JitTrans
yaml_path = os.path.join(os.path.abspath(os.path.dirname(os.getcwd())), "yaml", "nn.yml")
yml = YamlLoader(yaml_path)
def test_conv3d_transpose_13():
"""test conv3d_transpose_13"""
jit_case = JitTrans(case=yml.get_case_info("conv3d_transpose_13"))
jit_case.jit_run()
| [
"[email protected]"
]
| |
42bce4085193456af583fe4bd69f5b879e5fe92f | a39224fcd17ff2adb77fa643afed63bc3342a3f4 | /setup.py | e8128dd9f0742381369839c237e8c5bf807d6ee0 | [
"MIT"
]
| permissive | HemuManju/reaction-time-classification | ef9ddb241803a16b4b9411eaa8375e8b25fcc9e1 | 8d468516c0591359e082fb8bc5850f8e89e5a6e4 | refs/heads/master | 2023-01-14T09:10:04.142946 | 2021-09-22T19:49:32 | 2021-09-22T19:49:32 | 179,614,766 | 0 | 0 | MIT | 2022-12-27T15:36:26 | 2019-04-05T03:39:43 | Python | UTF-8 | Python | false | false | 261 | py | from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='Classification of reaction time of an \
operator performing tele-operation',
author='Hemanth ',
license='MIT',
)
| [
"[email protected]"
]
| |
a3bc969b5283c5f611660bb173b2d3769ae854c3 | 2a68b03c923119cc747c4ffcc244477be35134bb | /interviews/A/VO/wordLadderII.py | a00dffb9d9e8ec178fca30545a4ec9ff564ba284 | []
| no_license | QitaoXu/Lintcode | 0bce9ae15fdd4af1cac376c0bea4465ae5ea6747 | fe411a0590ada6a1a6ae1166c86c585416ac8cda | refs/heads/master | 2020-04-24T20:53:27.258876 | 2019-09-24T23:54:59 | 2019-09-24T23:54:59 | 172,259,064 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,500 | py | from collections import deque
class Solution:
"""
@param: start: a string
@param: end: a string
@param: dict: a set of string
@return: a list of lists of string
"""
def findLadders(self, start, end, dict):
# write your code here
dict.add(start)
dict.add(end)
distance = {}
self.bfs(end, start, dict, distance)
results = []
path = [start]
self.dfs(start, end, path, dict, distance, results)
return results
def bfs(self, start, end, wordDict, distance):
queue = deque()
queue.append(start)
distance[start] = 0
while queue:
size = len(queue)
for _ in range(size):
word = queue.popleft()
for next_word in self.get_next_words(word):
if next_word not in wordDict:
continue
if next_word in distance:
continue
queue.append(next_word)
distance[next_word] = distance[word] + 1
def get_next_words(self, word):
next_words = []
for i in range(len(word)):
left, right = word[: i], word[i + 1:]
for c in "abcdefghijklmnopqrstuvwxyz":
if c == word[i]:
continue
next_word = left + c + right
next_words.append(next_word)
return next_words
def dfs(self, curt, target, path, wordDict, distance, results):
if curt == target:
results.append(path.copy())
return
for next_word in self.get_next_words(curt):
if next_word not in wordDict:
continue
if distance[next_word] != distance[curt] - 1:
continue
path.append(next_word)
self.dfs(next_word, target, path, wordDict, distance, results)
path.pop()
| [
"[email protected]"
]
| |
1e73bcb3091075ebead0ba1e029588dec88fead0 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/46/usersdata/98/17294/submittedfiles/funcoes1.py | 94f0f1ec40294a695cc95ea950a44bec636efae5 | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,592 | py | # -*- coding: utf-8 -*-
from __future__ import division
def crescente (lista):
#escreva o código da função crescente aqui
cont=0
for i in range(0,len(lista)-1,1):
if lista[i]<lista[i+1]:
cont=cont+1
if cont==len(lista)-1:
return True
else:
return False
#escreva as demais funções
def decrescente (lista):
cont=0
for i in range(0,len(lista)-1,1):
if lista[i]>lista[i+1]:
cont=cont+1
if cont==len(lista)-1:
return True
else:
return False
def iguais (lista):
cont=0
for i in range(0,len(lista)-1,1):
if lista[i]==lista[i+1]:
cont=cont+1
if cont>0:
return True
else:
return False
#escreva o programa principal
n=input('Digite a quantidade de intens nas listas: ')
a=[]
b=[]
c=[]
for i in range(0,n,1):
a.append(input('Digite um valor para a lista a: '))
for i in range(0,n,1):
b.append(input('Digite um valor para a lista b: '))
for i in range(0,n,1):
c.append(input('Digite um valor para a lista c: '))
if crescente (a):
print('S')
else:
print('N')
if decrescente (a):
print('S')
else:
print('N')
if iguais (a):
print('S')
else:
print('N')
if crescente (b):
print('S')
else:
print('N')
if decrescente (b):
print('S')
else:
print('N')
if iguais (b):
print('S')
else:
print('N')
if crescente (c):
print('S')
else:
print('N')
if decrescente (c):
print('S')
else:
print('N')
if iguais (c):
print('S')
else:
print('N') | [
"[email protected]"
]
| |
c470665fd971ef55fbcbf2c680c5254eb0e69e51 | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/playground/memre/armv7l/obsolete/corp2/system/base/man-pages/actions.py | e17573a3dc5e34c142d651a5d3274ff1b0d7e803 | []
| no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,461 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2005-2010 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import crosstools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
def install():
crosstools.rawInstall("DESTDIR=%s" % get.installDIR())
crosstools.rawInstall("DESTDIR=%s -C ../man-pages-posix-2003-a" % get.installDIR())
# These come from attr
pisitools.remove("/usr/share/man/man2/flistxattr.2")
pisitools.remove("/usr/share/man/man2/removexattr.2")
pisitools.remove("/usr/share/man/man2/fgetxattr.2")
pisitools.remove("/usr/share/man/man2/fsetxattr.2")
pisitools.remove("/usr/share/man/man2/lsetxattr.2")
pisitools.remove("/usr/share/man/man2/lremovexattr.2")
pisitools.remove("/usr/share/man/man2/listxattr.2")
pisitools.remove("/usr/share/man/man2/getxattr.2")
pisitools.remove("/usr/share/man/man2/setxattr.2")
pisitools.remove("/usr/share/man/man2/llistxattr.2")
pisitools.remove("/usr/share/man/man2/fremovexattr.2")
pisitools.remove("/usr/share/man/man2/lgetxattr.2")
# These come from libcap
pisitools.remove("/usr/share/man/man2/capget.2")
pisitools.remove("/usr/share/man/man2/capset.2")
# Comes from xorg-input
pisitools.remove("/usr/share/man/man4/mouse.4")
pisitools.dodoc("man-pages-*.Announce", "README")
| [
"[email protected]"
]
| |
ca152810fc429ad3a3aa2281e6960067671ebd20 | 5f862a5f0116030adb4ce8d1f66c22e52eb5546f | /test/test_automl/test_smbo.py | 7094e9c51ac478e5b9391d662872db4ddc3f1610 | [
"BSD-3-Clause"
]
| permissive | IsoLATionzhw/auto-sklearn | 9c1adbffe8f077471cbf9eb1c0a89d4ab9593220 | a263efb49f7b7f597963bc1e787105ea7615ea75 | refs/heads/master | 2021-07-15T05:47:23.268566 | 2017-10-04T10:08:21 | 2017-10-04T10:08:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,894 | py | import unittest
from autosklearn.smbo import AutoMLSMBO
from autosklearn.metrics import accuracy
from smac.facade.smac_facade import SMAC
from smac.scenario.scenario import Scenario
from smac.tae.execute_ta_run import StatusType
from ConfigSpace import ConfigurationSpace, UniformFloatHyperparameter, Configuration
class TestSMBO(unittest.TestCase):
def test_choose_next(self):
configspace = ConfigurationSpace()
configspace.add_hyperparameter(UniformFloatHyperparameter('a', 0, 1))
configspace.add_hyperparameter(UniformFloatHyperparameter('b', 0, 1))
dataset_name = 'foo'
func_eval_time_limit = 15
total_walltime_limit = 15
memory_limit = 3072
auto = AutoMLSMBO(
config_space=None,
dataset_name=dataset_name,
backend=None,
func_eval_time_limit=func_eval_time_limit,
total_walltime_limit=total_walltime_limit,
memory_limit=memory_limit,
watcher=None,
metric=accuracy
)
auto.config_space = configspace
scenario = Scenario({
'cs': configspace,
'cutoff_time': func_eval_time_limit,
'wallclock_limit': total_walltime_limit,
'memory_limit': memory_limit,
'run_obj': 'quality',
})
smac = SMAC(scenario)
self.assertRaisesRegex(
ValueError,
'Cannot use SMBO algorithm on empty runhistory',
auto.choose_next,
smac
)
config = Configuration(configspace, values={'a': 0.1, 'b': 0.2})
# TODO make sure the incumbent is always set?
smac.solver.incumbent = config
runhistory = smac.solver.runhistory
runhistory.add(config=config, cost=0.5, time=0.5,
status=StatusType.SUCCESS)
auto.choose_next(smac)
| [
"[email protected]"
]
| |
d5633a2b848b581a3a034619a61450208a8052e8 | da1d21bb8d0760bfba61cd5d9800400f928868aa | /apps/common/utils/iterables.py | 3d4d2470b42a38d43cc00ac6ac9d420b5e00c8f0 | []
| no_license | biznixcn/WR | 28e6a5d10f53a0bfe70abc3a081c0bf5a5457596 | 5650fbe59f8dfef836503b8092080f06dd214c2c | refs/heads/master | 2021-01-20T23:53:52.887225 | 2014-05-13T02:00:33 | 2014-05-13T02:00:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | # -*- coding: utf-8 -*-
from itertools import izip_longest
def grouper(n, iterable, padvalue=None):
"grouper(3, 'abcdefg', 'x') --> ('a','b','c'), ('d','e','f'), ('g','x','x')"
return izip_longest(*[iter(iterable)]*n, fillvalue=padvalue)
| [
"[email protected]"
]
| |
dafc3e377763e40bd4c4d5e4406d87111ac9744b | e82b761f53d6a3ae023ee65a219eea38e66946a0 | /All_In_One/addons/interactive-physics-editor/operators/setup_phys_drawing.py | 01edd2b8c2993ca95f30bc14ca621432a93ca02a | []
| no_license | 2434325680/Learnbgame | f3a050c28df588cbb3b14e1067a58221252e2e40 | 7b796d30dfd22b7706a93e4419ed913d18d29a44 | refs/heads/master | 2023-08-22T23:59:55.711050 | 2021-10-17T07:26:07 | 2021-10-17T07:26:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,321 | py | # Copyright (C) 2018 Christopher Gearhart
# [email protected]
# http://bblanimation.com/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# System imports
import bmesh
import math
# Blender imports
import bpy
import bgl
import blf
from bpy_extras.view3d_utils import location_3d_to_region_2d, region_2d_to_location_3d, region_2d_to_origin_3d, region_2d_to_vector_3d
from bpy.types import SpaceView3D
from bpy.props import *
from ..functions import *
class interactive_sim_drawing():
##############################################
# Draw handler function
# from CG Cookie's retopoflow plugin
def ui_start(self):
# # report something useful to user
# bpy.context.area.header_text_set("Click & drag to add bricks (+'ALT' to remove). Press 'RETURN' to commit changes")
# update dpi
prefs = get_preferences(bpy.context)
ui_scale = prefs.view.ui_scale
pixel_size = prefs.system.pixel_size
self.dpi = int(72 * ui_scale * pixel_size)
# add callback handlers
self.cb_pr_handle = SpaceView3D.draw_handler_add(self.draw_callback_preview, (bpy.context, ), 'WINDOW', 'PRE_VIEW')
# self.cb_pv_handle = SpaceView3D.draw_handler_add(self.draw_callback_postview, (bpy.context, ), 'WINDOW', 'POST_VIEW')
# self.cb_pp_handle = SpaceView3D.draw_handler_add(self.draw_callback_postpixel, (bpy.context, ), 'WINDOW', 'POST_PIXEL')
# darken other spaces
self.spaces = [
bpy.types.SpaceClipEditor,
bpy.types.SpaceConsole,
bpy.types.SpaceDopeSheetEditor,
bpy.types.SpaceFileBrowser,
bpy.types.SpaceGraphEditor,
bpy.types.SpaceImageEditor,
bpy.types.SpaceInfo,
bpy.types.SpaceLogicEditor,
bpy.types.SpaceNLA,
bpy.types.SpaceNodeEditor,
bpy.types.SpaceOutliner,
bpy.types.SpaceProperties,
bpy.types.SpaceSequenceEditor,
bpy.types.SpaceTextEditor,
bpy.types.SpaceTimeline,
#bpy.types.SpaceUVEditor, # <- does not exist?
bpy.types.SpaceUserPreferences,
#'SpaceView3D', # <- specially handled
]
self.areas = [ 'WINDOW', 'HEADER' ]
# ('WINDOW', 'HEADER', 'CHANNELS', 'TEMPORARY', 'UI', 'TOOLS', 'TOOL_PROPS', 'PREVIEW')
# self.cb_pp_tools = SpaceView3D.draw_handler_add(self.draw_callback_cover, (bpy.context, ), 'TOOLS', 'POST_PIXEL')
self.cb_pp_props = SpaceView3D.draw_handler_add(self.draw_callback_cover, (bpy.context, ), 'TOOL_PROPS', 'POST_PIXEL')
self.cb_pp_ui = SpaceView3D.draw_handler_add(self.draw_callback_cover, (bpy.context, ), 'UI', 'POST_PIXEL')
self.cb_pp_header = SpaceView3D.draw_handler_add(self.draw_callback_cover, (bpy.context, ), 'HEADER', 'POST_PIXEL')
self.cb_pp_all = [
(s, a, s.draw_handler_add(self.draw_callback_cover, (bpy.context,), a, 'POST_PIXEL'))
for s in self.spaces
for a in self.areas
]
self.draw_preview()
tag_redraw_areas()
def ui_end(self):
# remove callback handlers
if hasattr(self, 'cb_pr_handle'):
SpaceView3D.draw_handler_remove(self.cb_pr_handle, "WINDOW")
del self.cb_pr_handle
if hasattr(self, 'cb_pv_handle'):
SpaceView3D.draw_handler_remove(self.cb_pv_handle, "WINDOW")
del self.cb_pv_handle
if hasattr(self, 'cb_pp_handle'):
SpaceView3D.draw_handler_remove(self.cb_pp_handle, "WINDOW")
del self.cb_pp_handle
if hasattr(self, 'cb_pp_tools'):
SpaceView3D.draw_handler_remove(self.cb_pp_tools, "TOOLS")
del self.cb_pp_tools
if hasattr(self, 'cb_pp_props'):
SpaceView3D.draw_handler_remove(self.cb_pp_props, "TOOL_PROPS")
del self.cb_pp_props
if hasattr(self, 'cb_pp_ui'):
SpaceView3D.draw_handler_remove(self.cb_pp_ui, "UI")
del self.cb_pp_ui
if hasattr(self, 'cb_pp_header'):
SpaceView3D.draw_handler_remove(self.cb_pp_header, "HEADER")
del self.cb_pp_header
if hasattr(self, 'cb_pp_all'):
for s,a,cb in self.cb_pp_all: s.draw_handler_remove(cb, a)
del self.cb_pp_all
tag_redraw_areas()
def draw_callback_preview(self, context):
bgl.glPushAttrib(bgl.GL_ALL_ATTRIB_BITS) # save OpenGL attributes
try: self.draw_preview()
except: interactive_physics_handle_exception()
bgl.glPopAttrib() # restore OpenGL attributes
# def draw_callback_postview(self, context):
# # self.drawing.update_dpi()
# # self.drawing.set_font_size(12, force=True)
# # self.drawing.point_size(1)
# # self.drawing.line_width(1)
# bgl.glPushAttrib(bgl.GL_ALL_ATTRIB_BITS) # save OpenGL attributes
# try: self.draw_postview()
# except: handle_exception()
# bgl.glPopAttrib() # restore OpenGL attributes
def draw_callback_postpixel(self, context):
bgl.glPushAttrib(bgl.GL_ALL_ATTRIB_BITS) # save OpenGL attributes
try: self.draw_postpixel()
except: handle_exception()
bgl.glPopAttrib() # restore OpenGL attributes
def draw_callback_cover(self, context):
bgl.glPushAttrib(bgl.GL_ALL_ATTRIB_BITS)
bgl.glMatrixMode(bgl.GL_PROJECTION)
bgl.glPushMatrix()
bgl.glLoadIdentity()
bgl.glColor4f(0,0,0,0.5) # TODO: use window background color??
bgl.glEnable(bgl.GL_BLEND)
bgl.glDisable(bgl.GL_DEPTH_TEST)
bgl.glBegin(bgl.GL_QUADS) # TODO: not use immediate mode
bgl.glVertex2f(-1, -1)
bgl.glVertex2f( 1, -1)
bgl.glVertex2f( 1, 1)
bgl.glVertex2f(-1, 1)
bgl.glEnd()
bgl.glPopMatrix()
bgl.glPopAttrib()
def draw_preview(self):
bgl.glEnable(bgl.GL_MULTISAMPLE)
bgl.glEnable(bgl.GL_LINE_SMOOTH)
bgl.glHint(bgl.GL_LINE_SMOOTH_HINT, bgl.GL_NICEST)
bgl.glEnable(bgl.GL_BLEND)
bgl.glEnable(bgl.GL_POINT_SMOOTH)
bgl.glDisable(bgl.GL_DEPTH_TEST)
bgl.glMatrixMode(bgl.GL_MODELVIEW)
bgl.glPushMatrix()
bgl.glLoadIdentity()
bgl.glMatrixMode(bgl.GL_PROJECTION)
bgl.glPushMatrix()
bgl.glLoadIdentity()
# add background gradient
bgl.glBegin(bgl.GL_TRIANGLES)
for i in range(0,360,10):
r0,r1 = i*math.pi/180.0, (i+10)*math.pi/180.0
x0,y0 = math.cos(r0)*2,math.sin(r0)*2
x1,y1 = math.cos(r1)*2,math.sin(r1)*2
bgl.glColor4f(0,0,0.01,0.0)
bgl.glVertex2f(0,0)
bgl.glColor4f(0,0,0.01,0.8)
bgl.glVertex2f(x0,y0)
bgl.glVertex2f(x1,y1)
bgl.glEnd()
bgl.glMatrixMode(bgl.GL_PROJECTION)
bgl.glPopMatrix()
bgl.glMatrixMode(bgl.GL_MODELVIEW)
bgl.glPopMatrix()
def draw_postpixel(self):
dtext = " 'D' for Draw/Cut Tool"
mtext = " 'S' for Merge/Split Tool"
ptext = " 'M' for Material Paintbrush Tool"
# draw instructions text
if self.mode == "DRAW":
text = "Click & drag to add bricks"
self.draw_text_2d(text, position=(50, 250))
text = "+'ALT' to remove"
self.draw_text_2d(text, position=(50, 220))
text = "+'SHIFT' to cut"
self.draw_text_2d(text, position=(50, 190))
dtext = "*" + dtext[1:]
elif self.mode == "MERGE/SPLIT":
text = "Click & drag to merge bricks"
self.draw_text_2d(text, position=(50, 250))
text = "+'ALT' to split horizontally"
self.draw_text_2d(text, position=(50, 220))
text = "+'SHIFT' to split vertically"
self.draw_text_2d(text, position=(50, 190))
mtext = "*" + mtext[1:]
elif self.mode == "PAINT":
text = "Click & drag to paint bricks with target material"
self.draw_text_2d(text, position=(50, 190))
ptext = "*" + ptext[1:]
text = "'RETURN' to commit changes"
self.draw_text_2d(text, position=(50, 160))
# ...api_current/bpy.types.Area.html?highlight=bpy.types.area
header_height = bpy.context.area.regions[0].height # 26px
height = bpy.context.area.height + header_height
# draw tool switcher text
text = "Switch Tools:"
self.draw_text_2d(text, position=(40, height - 200))
self.draw_text_2d(dtext, position=(40, height - 230))
self.draw_text_2d(mtext, position=(40, height - 260))
self.draw_text_2d(ptext, position=(40, height - 290))
# if self.mode == "DRAW":
# text = "Click & drag to add bricks (+'ALT' to remove, +'SHIFT' to cut)"
# elif self.mode == "PAINT":
# text = "Click & drag to paint bricks with target material"
# elif self.mode == "MERGE/SPLIT":
# text = "Click & drag to merge bricks (+'ALT' to split horizontally, +'SHIFT' to split vertically)"
# self.draw_text_2d(text, position=(127, 80))
# text = "Press 'RETURN' to commit changes"
# self.draw_text_2d(text, position=(127, 50))
def draw_text_2d(self, text, font_id=0, color=(1, 1, 1, 1), position=(0, 0)):
# draw some text
bgl.glColor4f(*color)
blf.position(font_id, position[0], position[1], 0)
blf.size(font_id, 11, self.dpi)
blf.draw(font_id, text)
bgl.glColor4f(0.0, 0.0, 0.0, 1.0)
# def draw_centerpoint(color, point, width=1):
# bgl.glLineWidth(width)
# bgl.glColor4f(*color)
# bgl.glBegin(bgl.GL_POINTS)
# bgl.glVertex3f(*point)
#
# def Point_to_depth(self, xyz):
# xy = location_3d_to_region_2d(self.region, self.r3d, xyz)
# if xy is None: return None
# oxyz = region_2d_to_origin_3d(self.region, self.r3d, xy)
# return (xyz - oxyz).length
#
# # def Point2D_to_Vec(self, xy:Point2D):
# # if xy is None: return None
# # return Vector(region_2d_to_vector_3d(self.actions.region, self.actions.r3d, xy))
# #
# # def Point2D_to_Origin(self, xy:Point2D):
# # if xy is None: return None
# # return Point(region_2d_to_origin_3d(self.actions.region, self.actions.r3d, xy))
# #
# # def Point2D_to_Ray(self, xy:Point2D):
# # if xy is None: return None
# # return Ray(self.Point2D_to_Origin(xy), self.Point2D_to_Vec(xy))
# #
# # def Point2D_to_Point(self, xy:Point2D, depth:float):
# # r = self.Point2D_to_Ray(xy)
# # if r is None or r.o is None or r.d is None or depth is None:
# # return None
# # return Point(r.o + depth * r.d)
# #
# # def size2D_to_size(self, size2D:float, xy:Point2D, depth:float):
# # # computes size of 3D object at distance (depth) as it projects to 2D size
# # # TODO: there are more efficient methods of computing this!
# # p3d0 = self.Point2D_to_Point(xy, depth)
# # p3d1 = self.Point2D_to_Point(xy + Vector((size2D,0)), depth)
# # return (p3d0 - p3d1).length
#
# def update_ui_mouse_pos(self):
# if self.loc is None or self.normal is None:
# self.clear_ui_mouse_pos()
# return
# depth = self.Point_to_depth(self.loc)
# if depth is None:
# self.clear_ui_mouse_pos()
# return
# rmat = Matrix.Rotation(self.oz.angle(self.normal), 4, self.oz.cross(self.normal))
# self.hit = True
# self.scale = 1 # self.rfcontext.size2D_to_size(1.0, self.mouse, depth)
# self.hit_p = self.loc
# self.hit_x = Vector(rmat * self.ox)
# self.hit_y = Vector(rmat * self.oy)
# self.hit_z = Vector(rmat * self.oz)
# self.hit_rmat = rmat
#
# def clear_ui_mouse_pos(self):
# ''' called when mouse is moved outside View3D '''
# self.hit = False
# self.hit_p = None
# self.hit_x = None
# self.hit_y = None
# self.hit_z = None
# self.hit_rmat = None
#
# @staticmethod
# @blender_version('<','2.79')
# def update_dpi():
# paintbrush._dpi = get_preferences(bpy.context).system.dpi
# if get_preferences(bpy.context).system.virtual_pixel_mode == 'DOUBLE':
# paintbrush._dpi *= 2
# paintbrush._dpi *= get_preferences(bpy.context).system.pixel_size
# paintbrush._dpi = int(paintbrush._dpi)
# paintbrush._dpi_mult = paintbrush._dpi / 72
#
# @staticmethod
# @blender_version('>=','2.79')
# def update_dpi():
# paintbrush._ui_scale = get_preferences(bpy.context).view.ui_scale
# paintbrush._sysdpi = get_preferences(bpy.context).system.dpi
# paintbrush._pixel_size = get_preferences(bpy.context).system.pixel_size
# paintbrush._dpi = 72 # get_preferences(bpy.context).system.dpi
# paintbrush._dpi *= paintbrush._ui_scale
# paintbrush._dpi *= paintbrush._pixel_size
# paintbrush._dpi = int(paintbrush._dpi)
# paintbrush._dpi_mult = paintbrush._ui_scale * paintbrush._pixel_size * paintbrush._sysdpi / 72
# s = 'DPI information: scale:%0.2f, pixel:%0.2f, dpi:%d' % (paintbrush._ui_scale, paintbrush._pixel_size, paintbrush._sysdpi)
# if s != getattr(paintbrush, '_last_dpi_info', None):
# paintbrush._last_dpi_info = s
# print(s)
#
# def draw_postview(self):
# print("HERE")
# if not self.hit: return
# print("HERE2")
#
# cx,cy,cp = self.hit_x,self.hit_y,self.hit_p
# cs_outer = self.scale * self.radius
# cs_inner = self.scale * self.radius * math.pow(0.5, 1.0 / self.falloff)
# cr,cg,cb = self.color
#
# bgl.glDepthRange(0, 0.999) # squeeze depth just a bit
# bgl.glEnable(bgl.GL_BLEND)
# # self.drawing.line_width(2.0)
# # self.drawing.point_size(3.0)
# bgl.glPointSize(max(1, 3.0 * self._dpi_mult))
#
# ######################################
# # draw in front of geometry
#
# bgl.glDepthFunc(bgl.GL_LEQUAL)
# bgl.glDepthMask(bgl.GL_FALSE) # do not overwrite depth
#
# bgl.glColor4f(cr, cg, cb, 0.75 * self.strength)
# bgl.glBegin(bgl.GL_TRIANGLES)
# for p0,p1 in zip(self.points[:-1], self.points[1:]):
# x0,y0 = p0
# x1,y1 = p1
# outer0 = (cs_outer * ((cx * x0) + (cy * y0))) + cp
# outer1 = (cs_outer * ((cx * x1) + (cy * y1))) + cp
# inner0 = (cs_inner * ((cx * x0) + (cy * y0))) + cp
# inner1 = (cs_inner * ((cx * x1) + (cy * y1))) + cp
# bgl.glVertex3f(*outer0)
# bgl.glVertex3f(*outer1)
# bgl.glVertex3f(*inner0)
# bgl.glVertex3f(*outer1)
# bgl.glVertex3f(*inner1)
# bgl.glVertex3f(*inner0)
# bgl.glEnd()
#
# bgl.glColor4f(1, 1, 1, 1) # outer ring
# bgl.glBegin(bgl.GL_LINE_STRIP)
# for x,y in self.points:
# p = (cs_outer * ((cx * x) + (cy * y))) + cp
# bgl.glVertex3f(*p)
# bgl.glEnd()
#
# # bgl.glColor4f(1, 1, 1, 0.5) # inner ring
# # bgl.glBegin(bgl.GL_LINE_STRIP)
# # for x,y in self.points:
# # p = (cs_inner * ((cx * x) + (cy * y))) + cp
# # bgl.glVertex3f(*p)
# # bgl.glEnd()
#
# bgl.glColor4f(1, 1, 1, 0.25) # center point
# bgl.glBegin(bgl.GL_POINTS)
# bgl.glVertex3f(*cp)
# bgl.glEnd()
#
# # ######################################
# # # draw behind geometry (hidden below)
# #
# # bgl.glDepthFunc(bgl.GL_GREATER)
# # bgl.glDepthMask(bgl.GL_FALSE) # do not overwrite depth
# #
# # bgl.glColor4f(cr, cg, cb, 0.10 * self.strength)
# # bgl.glBegin(bgl.GL_TRIANGLES)
# # for p0,p1 in zip(self.points[:-1], self.points[1:]):
# # x0,y0 = p0
# # x1,y1 = p1
# # outer0 = (cs_outer * ((cx * x0) + (cy * y0))) + cp
# # outer1 = (cs_outer * ((cx * x1) + (cy * y1))) + cp
# # inner0 = (cs_inner * ((cx * x0) + (cy * y0))) + cp
# # inner1 = (cs_inner * ((cx * x1) + (cy * y1))) + cp
# # bgl.glVertex3f(*outer0)
# # bgl.glVertex3f(*outer1)
# # bgl.glVertex3f(*inner0)
# # bgl.glVertex3f(*outer1)
# # bgl.glVertex3f(*inner1)
# # bgl.glVertex3f(*inner0)
# # bgl.glEnd()
# #
# # bgl.glColor4f(1, 1, 1, 0.05) # outer ring
# # bgl.glBegin(bgl.GL_LINE_STRIP)
# # for x,y in self.points:
# # p = (cs_outer * ((cx * x) + (cy * y))) + cp
# # bgl.glVertex3f(*p)
# # bgl.glEnd()
# #
# # bgl.glColor4f(1, 1, 1, 0.025) # inner ring
# # bgl.glBegin(bgl.GL_LINE_STRIP)
# # for x,y in self.points:
# # p = (cs_inner * ((cx * x) + (cy * y))) + cp
# # bgl.glVertex3f(*p)
# # bgl.glEnd()
#
# ######################################
# # reset to defaults
#
# bgl.glDepthFunc(bgl.GL_LEQUAL)
# bgl.glDepthMask(bgl.GL_TRUE)
#
# bgl.glDepthRange(0, 1)
#
# return
#############################################
| [
"[email protected]"
]
| |
04521521cd080fa531cf3cecce5a57426136edae | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/9/w11.py | d34dec34a1f3c4bc9a0709f495d47f09c917783a | []
| no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'w11':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
]
| |
52bece35aa3f449fd4068d45847eb3aca3b36443 | 411eff94020c192d5e5f657fa6012232ab1d051c | /game/src/coginvasion/ai/AIBaseGlobal.py | e02f38f0e5f171a4dab307e0fed79073eeab559e | []
| no_license | xMakerx/cio-src | 48c9efe7f9a1bbf619a4c95a4198aaace78b8491 | 60b2bdf2c4a24d506101fdab1f51752d0d1861f8 | refs/heads/master | 2023-02-14T03:12:51.042106 | 2021-01-15T14:02:10 | 2021-01-15T14:02:10 | 328,268,776 | 1 | 0 | null | 2021-01-15T15:15:35 | 2021-01-09T23:51:37 | Python | UTF-8 | Python | false | false | 960 | py | from AIBase import AIBase
from direct.directnotify.DirectNotifyGlobal import directNotify
from panda3d.core import RescaleNormalAttrib, NodePath, Notify
__builtins__['base'] = AIBase()
__builtins__['ostream'] = Notify.out()
__builtins__['run'] = base.run
__builtins__['taskMgr'] = base.taskMgr
__builtins__['jobMgr'] = base.jobMgr
__builtins__['eventMgr'] = base.eventMgr
__builtins__['messenger'] = base.messenger
__builtins__['bboard'] = base.bboard
__builtins__['config'] = base.config
__builtins__['directNotify'] = directNotify
render = NodePath('render')
render.setAttrib(RescaleNormalAttrib.makeDefault())
render.setTwoSided(0)
__builtins__['render'] = render
from direct.showbase import Loader
base.loader = Loader.Loader(base)
__builtins__['loader'] = base.loader
directNotify.setDconfigLevels()
def inspect(anObject):
from direct.tkpanels import Inspector
Inspector.inspect(anObject)
__builtins__['inspect'] = inspect
taskMgr.finalInit()
| [
"[email protected]"
]
| |
2c60324b3fa048f21d4ddb7e4a4d608d2f4ae9fe | a8fa4a499c44dce9a82e768edc82bdd193797128 | /ScrapePlugins/Crunchyroll/Run.py | 072c151bc74086a6fe1c380808eb0b7785a732e7 | []
| no_license | oliuz/MangaCMS | d8b2e44922955f6b9310fb6e189115f1985f2e93 | 7e2a710a56248261ab01686d3e586c36ce4a857d | refs/heads/master | 2020-12-28T19:46:41.265347 | 2016-08-27T23:37:47 | 2016-08-27T23:37:47 | 67,316,457 | 1 | 0 | null | 2016-09-03T23:36:21 | 2016-09-03T23:36:21 | null | UTF-8 | Python | false | false | 505 | py |
from .DbLoader import DbLoader
from .ContentLoader import ContentLoader
import runStatus
import ScrapePlugins.RunBase
class Runner(ScrapePlugins.RunBase.ScraperBase):
loggerPath = "Main.Manga.CrunchyRoll.Run"
pluginName = "CrunchyRoll"
def _go(self):
fl = DbLoader()
fl.go()
fl.closeDB()
if not runStatus.run:
return
cl = ContentLoader()
cl.go()
cl.closeDB()
if __name__ == "__main__":
import utilities.testBase as tb
with tb.testSetup():
run = Runner()
run.go()
| [
"[email protected]"
]
| |
dd901b37ae78074d1b136ce7ad9d125fb38bfa9b | 1f38af9bae11acbe20dd8f5057b374b9760e6659 | /pyscf/geomopt/geometric_solver.py | 6e63b860d5f970435b404aca3d39f5e5b97bdb6f | [
"Apache-2.0"
]
| permissive | highlight0112/pyscf | d36104ef727f593d46fbfd3e5d865c6cd0316d84 | 4afbd42bad3e72db5bb94d8cacf1d5de76537bdd | refs/heads/master | 2020-03-25T01:16:59.927859 | 2019-03-06T01:11:59 | 2019-03-06T01:11:59 | 143,229,588 | 0 | 0 | Apache-2.0 | 2019-03-06T01:12:00 | 2018-08-02T02:05:59 | Python | UTF-8 | Python | false | false | 5,188 | py | #!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Interface to geomeTRIC library https://github.com/leeping/geomeTRIC
'''
import tempfile
import numpy
import geometric
import geometric.molecule
#from geometric import molecule
from pyscf import lib
from pyscf.geomopt.addons import as_pyscf_method, dump_mol_geometry
from pyscf import __config__
INCLUDE_GHOST = getattr(__config__, 'geomopt_berny_solver_optimize_include_ghost', True)
ASSERT_CONV = getattr(__config__, 'geomopt_berny_solver_optimize_assert_convergence', True)
class PySCFEngine(geometric.engine.Engine):
def __init__(self, scanner):
molecule = geometric.molecule.Molecule()
mol = scanner.mol
molecule.elem = [mol.atom_symbol(i) for i in range(mol.natm)]
# Molecule is the geometry parser for a bunch of formats which use
# Angstrom for Cartesian coordinates by default.
molecule.xyzs = [mol.atom_coords()*lib.param.BOHR] # In Angstrom
super(PySCFEngine, self).__init__(molecule)
self.scanner = scanner
self.cycle = 0
def calc_new(self, coords, dirname):
scanner = self.scanner
mol = scanner.mol
lib.logger.note(scanner, '\nGeometry optimization step %d', self.cycle)
self.cycle += 1
# geomeTRIC handles coords and gradients in atomic unit
coords = coords.reshape(-1,3)
if scanner.verbose >= lib.logger.NOTE:
dump_mol_geometry(self.scanner.mol, coords*lib.param.BOHR)
mol.set_geom_(coords, unit='Bohr')
energy, gradient = scanner(mol)
if scanner.assert_convergence and not scanner.converged:
raise RuntimeError('Nuclear gradients of %s not converged' % scanner.base)
return energy, gradient.ravel()
def kernel(method, assert_convergence=ASSERT_CONV,
include_ghost=INCLUDE_GHOST, constraints=None, **kwargs):
'''Optimize geometry with geomeTRIC library for the given method.
To adjust the convergence threshold, parameters can be set in kwargs as
below:
.. code-block:: python
conv_params = { # They are default settings
'convergence_energy': 1e-6, # Eh
'convergence_grms': 3e-4, # Eh/Bohr
'convergence_gmax': 4.5e-4, # Eh/Bohr
'convergence_drms': 1.2e-3, # Angstrom
'convergence_dmax': 1.8e-3, # Angstrom
}
from pyscf import geometric_solver
geometric_solver.optimize(method, **conv_params)
'''
if isinstance(method, lib.GradScanner):
g_scanner = method
elif getattr(method, 'nuc_grad_method', None):
g_scanner = method.nuc_grad_method().as_scanner()
else:
raise NotImplementedError('Nuclear gradients of %s not available' % method)
if not include_ghost:
g_scanner.atmlst = numpy.where(method.mol.atom_charges() != 0)[0]
g_scanner.assert_convergence = assert_convergence
tmpf = tempfile.mktemp(dir=lib.param.TMPDIR)
m = geometric.optimize.run_optimizer(customengine=PySCFEngine(g_scanner),
input=tmpf, constraints=constraints,
**kwargs)
#FIXME: geomeTRIC library keeps running until converged. We need a function
# to terminate the program even not converged.
conv = True
#return conv, method.mol.copy().set_geom_(m.xyzs[-1], unit='Bohr')
return method.mol.copy().set_geom_(m.xyzs[-1], unit='Angstrom')
optimize = kernel
del(INCLUDE_GHOST, ASSERT_CONV)
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf, dft, cc, mp
mol = gto.M(atom='''
C 1.1879 -0.3829 0.0000
C 0.0000 0.5526 0.0000
O -1.1867 -0.2472 0.0000
H -1.9237 0.3850 0.0000
H 2.0985 0.2306 0.0000
H 1.1184 -1.0093 0.8869
H 1.1184 -1.0093 -0.8869
H -0.0227 1.1812 0.8852
H -0.0227 1.1812 -0.8852
''',
basis='3-21g')
mf = scf.RHF(mol)
conv_params = {
'convergence_energy': 1e-4, # Eh
'convergence_grms': 3e-3, # Eh/Bohr
'convergence_gmax': 4.5e-3, # Eh/Bohr
'convergence_drms': 1.2e-2, # Angstrom
'convergence_dmax': 1.8e-2, # Angstrom
}
mol1 = optimize(mf, **conv_params)
print(mf.kernel() - -153.219208484874)
print(scf.RHF(mol1).kernel() - -153.222680852335)
mf = dft.RKS(mol)
mf.xc = 'pbe,'
mf.conv_tol = 1e-7
mol1 = optimize(mf)
mymp2 = mp.MP2(scf.RHF(mol))
mol1 = optimize(mymp2)
mycc = cc.CCSD(scf.RHF(mol))
mol1 = optimize(mycc)
| [
"[email protected]"
]
| |
030af696a1ebdd2d98a56cc9345bfe20f5099896 | 67ceb35320d3d02867350bc6d460ae391e0324e8 | /practice/hard/0675-Cut_Trees_for_Golf_Event.py | e91dcd1441c759908435b4cb1b2766949823a97b | []
| no_license | mattjp/leetcode | fb11cf6016aef46843eaf0b55314e88ccd87c91a | 88ccd910dfdb0e6ca6a70fa2d37906c31f4b3d70 | refs/heads/master | 2023-01-22T20:40:48.104388 | 2022-12-26T22:03:02 | 2022-12-26T22:03:02 | 184,347,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,504 | py | class Solution:
def cutOffTree(self, forest: List[List[int]]) -> int:
"""
0. while there are trees to cut down
1. walk to coordinates of next tree; cut down - do BFS dummy
2. if tree is unreachable - return
"""
from collections import deque
from sortedcontainers import SortedDict
def go_to_tree(grid, i, j, tree) -> int:
queue = deque([(i, j, 0)]) # (i, j, steps)
visited = set()
while queue:
row, col, steps = queue.popleft()
if (row, col) == tree:
return steps
for r,c in [(1,0), (-1,0), (0,1), (0,-1)]:
new_row, new_col = row+r, col+c
if (
new_row < len(grid) and
new_col < len(grid[0]) and
new_row > -1 and
new_col > -1 and
(new_row, new_col) not in visited and
grid[new_row][new_col] != 0
):
if (new_row, new_col) == tree:
return steps+1
visited.add((new_row, new_col))
queue.append((new_row, new_col, steps+1))
return None
trees = SortedDict()
for i in range(len(forest)):
for j in range(len(forest[i])):
if forest[i][j] > 1:
trees[forest[i][j]] = (i,j)
total_steps = 0
i = j = 0
for h,tree in trees.items():
steps = go_to_tree(forest, i, j, tree)
if steps == None:
return -1
total_steps += steps
i,j = tree
return total_steps
| [
"[email protected]"
]
| |
9514286077c40b1598552cdc24d2d2d31844d5fe | 34ed92a9593746ccbcb1a02630be1370e8524f98 | /lib/pints/pints/tests/test_mcmc_relativistic.py | 1fb0e2abb531defd9c0d3b86dccf543b66d3e108 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
]
| permissive | HOLL95/Cytochrome_SV | 87b7a680ed59681230f79e1de617621680ea0fa0 | d02b3469f3ee5a4c85d756053bc87651093abea1 | refs/heads/master | 2022-08-01T05:58:16.161510 | 2021-02-01T16:09:31 | 2021-02-01T16:09:31 | 249,424,867 | 0 | 0 | null | 2022-06-22T04:09:11 | 2020-03-23T12:29:29 | Jupyter Notebook | UTF-8 | Python | false | false | 6,142 | py | #!/usr/bin/env python3
#
# Tests the basic methods of the Relativistic MCMC routine.
#
# This file is part of PINTS.
# Copyright (c) 2017-2019, University of Oxford.
# For licensing information, see the LICENSE file distributed with the PINTS
# software package.
#
import unittest
import numpy as np
import pints
import pints.toy
from shared import StreamCapture
class TestRelativisticMCMC(unittest.TestCase):
"""
Tests the basic methods of the Relativistic MCMC routine.
"""
def test_method(self):
# Create log pdf
log_pdf = pints.toy.GaussianLogPDF([5, 5], [[4, 1], [1, 3]])
# Create mcmc
x0 = np.array([2, 2])
sigma = [[3, 0], [0, 3]]
mcmc = pints.RelativisticMCMC(x0, sigma)
# This method needs sensitivities
self.assertTrue(mcmc.needs_sensitivities())
# Set number of leapfrog steps
ifrog = 10
mcmc.set_leapfrog_steps(ifrog)
# Perform short run
chain = []
for i in range(100 * ifrog):
x = mcmc.ask()
fx, gr = log_pdf.evaluateS1(x)
sample = mcmc.tell((fx, gr))
if i >= 50 * ifrog and sample is not None:
chain.append(sample)
if np.all(sample == x):
self.assertEqual(mcmc.current_log_pdf(), fx)
chain = np.array(chain)
self.assertEqual(chain.shape[0], 50)
self.assertEqual(chain.shape[1], len(x0))
def test_logging(self):
"""
Test logging includes name and custom fields.
"""
log_pdf = pints.toy.GaussianLogPDF([5, 5], [[4, 1], [1, 3]])
x0 = [np.array([2, 2]), np.array([8, 8])]
mcmc = pints.MCMCController(
log_pdf, 2, x0, method=pints.RelativisticMCMC)
mcmc.set_max_iterations(5)
with StreamCapture() as c:
mcmc.run()
text = c.text()
self.assertIn('Relativistic MCMC', text)
self.assertIn(' Accept.', text)
def test_flow(self):
log_pdf = pints.toy.GaussianLogPDF([5, 5], [[4, 1], [1, 3]])
x0 = np.array([2, 2])
# Test initial proposal is first point
mcmc = pints.RelativisticMCMC(x0)
self.assertTrue(np.all(mcmc.ask() == mcmc._x0))
# Repeated asks
self.assertRaises(RuntimeError, mcmc.ask)
# Tell without ask
mcmc = pints.RelativisticMCMC(x0)
self.assertRaises(RuntimeError, mcmc.tell, 0)
# Repeated tells should fail
x = mcmc.ask()
mcmc.tell(log_pdf.evaluateS1(x))
self.assertRaises(RuntimeError, mcmc.tell, log_pdf.evaluateS1(x))
# Bad starting point
mcmc = pints.RelativisticMCMC(x0)
mcmc.ask()
self.assertRaises(
ValueError, mcmc.tell, (float('-inf'), np.array([1, 1])))
def test_kinetic_energy(self):
"""
Tests kinetic energy values and derivatives
"""
x0 = np.array([2, 2])
model = pints.RelativisticMCMC(x0)
model.ask()
# kinetic energy
mc2 = 100.0
momentum = [1.0, 2.0]
squared = np.sum(np.array(momentum)**2)
ke1 = mc2 * (squared / mc2 + 1.0)**0.5
ke2 = model._kinetic_energy(momentum)
self.assertEqual(ke1, ke2)
c = 1.0
m = 1.0
mc2 = m * c**2
squared = np.sum(np.array(momentum)**2)
ke1 = mc2 * (squared / mc2 + 1.0)**0.5
model = pints.RelativisticMCMC(x0)
model.set_speed_of_light(c)
model.ask()
ke2 = model._kinetic_energy(momentum)
self.assertEqual(ke1, ke2)
def test_set_hyper_parameters(self):
"""
Tests the parameter interface for this sampler.
"""
x0 = np.array([2, 2])
mcmc = pints.RelativisticMCMC(x0)
# Test leapfrog parameters
n = mcmc.leapfrog_steps()
d = mcmc.leapfrog_step_size()
self.assertIsInstance(n, int)
self.assertTrue(len(d) == mcmc._n_parameters)
mcmc.set_leapfrog_steps(n + 1)
self.assertEqual(mcmc.leapfrog_steps(), n + 1)
self.assertRaises(ValueError, mcmc.set_leapfrog_steps, 0)
mcmc.set_leapfrog_step_size(0.5)
self.assertEqual(mcmc.leapfrog_step_size()[0], 0.5)
self.assertRaises(ValueError, mcmc.set_leapfrog_step_size, -1)
self.assertEqual(mcmc.n_hyper_parameters(), 4)
mcmc.set_hyper_parameters([n + 2, 2, 0.4, 2.3])
self.assertEqual(mcmc.leapfrog_steps(), n + 2)
self.assertEqual(mcmc.leapfrog_step_size()[0], 2)
self.assertEqual(mcmc.mass(), 0.4)
self.assertEqual(mcmc.speed_of_light(), 2.3)
mcmc.set_epsilon(0.4)
self.assertEqual(mcmc.epsilon(), 0.4)
self.assertRaises(ValueError, mcmc.set_epsilon, -0.1)
mcmc.set_leapfrog_step_size(1)
self.assertEqual(len(mcmc.scaled_epsilon()), 2)
self.assertEqual(mcmc.scaled_epsilon()[0], 0.4)
self.assertEqual(len(mcmc.divergent_iterations()), 0)
self.assertRaises(ValueError, mcmc.set_leapfrog_step_size, [1, 2, 3])
mcmc.set_leapfrog_step_size([1.5, 3])
self.assertEqual(mcmc.leapfrog_step_size()[0], 1.5)
self.assertEqual(mcmc.leapfrog_step_size()[1], 3)
c = 3.5
mcmc.set_speed_of_light(c)
self.assertEqual(mcmc.speed_of_light(), c)
self.assertRaises(ValueError, mcmc.set_speed_of_light, -0.1)
m = 2.9
mcmc.set_mass(m)
self.assertEqual(mcmc.mass(), m)
self.assertRaises(ValueError, mcmc.set_mass, -1.8)
self.assertRaises(ValueError, mcmc.set_mass, [1, 3])
def test_other_setters(self):
# Tests other setters and getters.
x0 = np.array([2, 2])
mcmc = pints.RelativisticMCMC(x0)
self.assertRaises(ValueError, mcmc.set_hamiltonian_threshold, -0.3)
threshold1 = mcmc.hamiltonian_threshold()
self.assertEqual(threshold1, 10**3)
threshold2 = 10
mcmc.set_hamiltonian_threshold(threshold2)
self.assertEqual(mcmc.hamiltonian_threshold(), threshold2)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
9bff11e3a8633333af71b3cc5a2bc2241e5e3ec0 | 68c182cbb167ec6870ec1a301958e71ce8f9bcbb | /test/functional/p2p_permissions.py | d59b0acadab68cf792b145888a6743bf9ce0b48e | [
"MIT"
]
| permissive | megamcloud/umkoin | de10e9bbe0afbdc7210db56e41f823a0805283be | 3e0d7a48f459ff09f0b9e02c3ed30563670009c8 | refs/heads/master | 2022-05-30T00:18:10.962521 | 2020-04-26T08:21:01 | 2020-04-26T08:21:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,694 | py | #!/usr/bin/env python3
# Copyright (c) 2015-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test p2p permission message.
Test that permissions are correctly calculated and applied
"""
from test_framework.address import ADDRESS_BCRT1_P2WSH_OP_TRUE
from test_framework.messages import (
CTransaction,
CTxInWitness,
FromHex,
)
from test_framework.mininode import P2PDataStore
from test_framework.script import (
CScript,
OP_TRUE,
)
from test_framework.test_node import ErrorMatch
from test_framework.test_framework import UmkoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
p2p_port,
wait_until,
)
class P2PPermissionsTests(UmkoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def run_test(self):
self.check_tx_relay()
self.checkpermission(
# default permissions (no specific permissions)
["-whitelist=127.0.0.1"],
["relay", "noban", "mempool"],
True)
self.checkpermission(
# relay permission removed (no specific permissions)
["-whitelist=127.0.0.1", "-whitelistrelay=0"],
["noban", "mempool"],
True)
self.checkpermission(
# forcerelay and relay permission added
# Legacy parameter interaction which set whitelistrelay to true
# if whitelistforcerelay is true
["-whitelist=127.0.0.1", "-whitelistforcerelay"],
["forcerelay", "relay", "noban", "mempool"],
True)
# Let's make sure permissions are merged correctly
# For this, we need to use whitebind instead of bind
# by modifying the configuration file.
ip_port = "127.0.0.1:{}".format(p2p_port(1))
self.replaceinconfig(1, "bind=127.0.0.1", "whitebind=bloomfilter,forcerelay@" + ip_port)
self.checkpermission(
["[email protected]"],
# Check parameter interaction forcerelay should activate relay
["noban", "bloomfilter", "forcerelay", "relay"],
False)
self.replaceinconfig(1, "whitebind=bloomfilter,forcerelay@" + ip_port, "bind=127.0.0.1")
self.checkpermission(
# legacy whitelistrelay should be ignored
["-whitelist=noban,[email protected]", "-whitelistrelay"],
["noban", "mempool"],
False)
self.checkpermission(
# legacy whitelistforcerelay should be ignored
["-whitelist=noban,[email protected]", "-whitelistforcerelay"],
["noban", "mempool"],
False)
self.checkpermission(
# missing mempool permission to be considered legacy whitelisted
["[email protected]"],
["noban"],
False)
self.checkpermission(
# all permission added
["[email protected]"],
["forcerelay", "noban", "mempool", "bloomfilter", "relay"],
False)
self.stop_node(1)
self.nodes[1].assert_start_raises_init_error(["[email protected]"], "Invalid P2P permission", match=ErrorMatch.PARTIAL_REGEX)
self.nodes[1].assert_start_raises_init_error(["[email protected]:230"], "Invalid netmask specified in", match=ErrorMatch.PARTIAL_REGEX)
self.nodes[1].assert_start_raises_init_error(["[email protected]/10"], "Cannot resolve -whitebind address", match=ErrorMatch.PARTIAL_REGEX)
def check_tx_relay(self):
block_op_true = self.nodes[0].getblock(self.nodes[0].generatetoaddress(100, ADDRESS_BCRT1_P2WSH_OP_TRUE)[0])
self.sync_all()
self.log.debug("Create a connection from a whitelisted wallet that rebroadcasts raw txs")
# A python mininode is needed to send the raw transaction directly. If a full node was used, it could only
# rebroadcast via the inv-getdata mechanism. However, even for whitelisted connections, a full node would
# currently not request a txid that is already in the mempool.
self.restart_node(1, extra_args=["[email protected]"])
p2p_rebroadcast_wallet = self.nodes[1].add_p2p_connection(P2PDataStore())
self.log.debug("Send a tx from the wallet initially")
tx = FromHex(
CTransaction(),
self.nodes[0].createrawtransaction(
inputs=[{
'txid': block_op_true['tx'][0],
'vout': 0,
}], outputs=[{
ADDRESS_BCRT1_P2WSH_OP_TRUE: 5,
}]),
)
tx.wit.vtxinwit = [CTxInWitness()]
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
txid = tx.rehash()
self.log.debug("Wait until tx is in node[1]'s mempool")
p2p_rebroadcast_wallet.send_txs_and_test([tx], self.nodes[1])
self.log.debug("Check that node[1] will send the tx to node[0] even though it is already in the mempool")
connect_nodes(self.nodes[1], 0)
with self.nodes[1].assert_debug_log(["Force relaying tx {} from whitelisted peer=0".format(txid)]):
p2p_rebroadcast_wallet.send_txs_and_test([tx], self.nodes[1])
wait_until(lambda: txid in self.nodes[0].getrawmempool())
self.log.debug("Check that node[1] will not send an invalid tx to node[0]")
tx.vout[0].nValue += 1
txid = tx.rehash()
p2p_rebroadcast_wallet.send_txs_and_test(
[tx],
self.nodes[1],
success=False,
reject_reason='Not relaying non-mempool transaction {} from whitelisted peer=0'.format(txid),
)
def checkpermission(self, args, expectedPermissions, whitelisted):
self.restart_node(1, args)
connect_nodes(self.nodes[0], 1)
peerinfo = self.nodes[1].getpeerinfo()[0]
assert_equal(peerinfo['whitelisted'], whitelisted)
assert_equal(len(expectedPermissions), len(peerinfo['permissions']))
for p in expectedPermissions:
if not p in peerinfo['permissions']:
raise AssertionError("Expected permissions %r is not granted." % p)
def replaceinconfig(self, nodeid, old, new):
with open(self.nodes[nodeid].umkoinconf, encoding="utf8") as f:
newText = f.read().replace(old, new)
with open(self.nodes[nodeid].umkoinconf, 'w', encoding="utf8") as f:
f.write(newText)
if __name__ == '__main__':
P2PPermissionsTests().main()
| [
"[email protected]"
]
| |
f7fcb553c02ffff0e4816ffbb847e1c926470726 | b55f70755712b26688b80a8ba3806a4124fbcd11 | /BinaryTree/lowest_common_ancestor.py | c5fac7a034bae171afb4a6a2bb03b6ce00e81aa2 | []
| no_license | Shanshan-IC/Algorithm_Python | a44703a0f33370c47e3e55af70aadeae08d5a1a5 | ace23976d2f1f51141498c4c4ea6bca0039b233f | refs/heads/master | 2021-09-08T07:16:59.576674 | 2018-03-08T09:24:01 | 2018-03-08T09:24:01 | 114,254,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 926 | py | '''
两个值都在左边,则LCA在左边
两个值都在右边,则LCA在右边
一个在左一个在右,则说明LCA就是当前的root节点。
'''
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param: root: The root of the binary search tree.
@param: A: A TreeNode in a Binary.
@param: B: A TreeNode in a Binary.
@return: Return the least common ancestor(LCA) of the two nodes.
"""
def lowestCommonAncestor(self, root, A, B):
if not root or root is A or root is B:
return root
left = self.lowestCommonAncestor(root.left, A, B)
right = self.lowestCommonAncestor(root.right, A, B)
if left and right:
return root
if left:
return left
if right:
return right
return None | [
"[email protected]"
]
| |
b7787491c00166a9f9516646d4c2054fe8fe1245 | 557ca4eae50206ecb8b19639cab249cb2d376f30 | /Chapter12/Ex12_3.py | 96ad465cf0df4d21b32435eb806eb5946bf1eb75 | []
| no_license | philipdongfei/Think-python-2nd | 781846f455155245e7e82900ea002f1cf490c43f | 56e2355b8d5b34ffcee61b38fbfd200fd6d4ffaf | refs/heads/master | 2021-01-09T19:57:49.658680 | 2020-03-13T06:32:11 | 2020-03-13T06:32:11 | 242,441,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | from Ex12_2 import *
def metathesis_pairs(d):
for anagrams in d.values():
for word1 in anagrams:
for word2 in anagrams:
if word1 < word2 and word_distance(word1, word2) == 2:
print(word1, word2)
def word_distance(word1, word2):
assert len(word1) == len(word2)
count = 0
for c1, c2 in zip(word1, word2):
if c1 != c2:
count += 1
return count
def main():
sets = all_anagrams('words.txt')
metathesis_pairs(sets)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
5d03c3f6f21cf2a5cf29fc8907a7adfcc620d57f | 2ad41c2a31618433568c86e63f68a3ef2918d55c | /tool/Modules/cfg_scripts.py | 25ca07351b013433ffe1409fb953f7919d31d99b | [
"MIT"
]
| permissive | Iemnur/megaman-zx-traducao-ptbr | 7cad0b7f7bcfd6692fe850f3c6c4e26ab2b90f63 | f2710a06052384cf93d423681e9875c6cd424f06 | refs/heads/master | 2021-12-14T20:13:48.206022 | 2020-05-26T01:53:10 | 2020-05-26T01:53:10 | 82,298,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 598 | py | '''
Created on 05/03/2013
@author: diego.hahn
'''
import os.path
import sys
python_path = os.path.dirname( sys.executable )
packages_path = os.path.join( python_path , r"Lib\site-packages" )
scripts_path = os.path.dirname( os.path.abspath( __file__ ) )
libs = [r"" , r"rhCompression", r"rhImages", r"pytable"]
with open( os.path.join( packages_path , "mylibs.pth" ), "w" ) as pth:
for lib in libs:
lib_path = os.path.join( scripts_path, lib )
if os.path.isdir( lib_path ):
print( ">>> Adding %s to pth file" % lib )
pth.write( "%s\n" % lib_path )
| [
"[email protected]"
]
| |
f50377730a35ff7aa5b58fa06bcf47fcd71189ea | 033da72a51c76e5510a06be93229a547a538cf28 | /Data Engineer with Python Track/20. Introduction to Spark SQL in Python/Chapter/01. Pyspark SQL/02-Determine the column names of a table.py | a60646c8daa0abfe3fe390558fd3a17b52d8658c | []
| no_license | ikhwan1366/Datacamp | d5dcd40c1bfeb04248977014260936b1fb1d3065 | 7738614eaebec446842d89177ae2bc30ab0f2551 | refs/heads/master | 2023-03-06T13:41:06.522721 | 2021-02-17T22:41:54 | 2021-02-17T22:41:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | '''
Determine the column names of a table
The video lesson showed how to run an SQL query. It also showed how to inspect the column names of a Spark table using SQL. This is important to know because in practice relational tables are typically provided without additional documentation giving the table schema.
Don't hesitate to refer to the slides available at the right of the console if you forget how something was done in the video.
Instructions
100 XP
- Use a DESCRIBE query to determine the names and types of the columns in the table schedule.
'''
# Inspect the columns in the table df
spark.sql("DESCRIBE schedule").show()
| [
"[email protected]"
]
| |
0aac049c8263f7e956cea14027ed8e142b6344e5 | 0931696940fc79c4562c63db72c6cabfcb20884d | /Exercises/Regular_Expresions/furniture.py | 8a02f7b386384bfbe0d6b9fe2cf832c3b0cd53d3 | []
| no_license | ivklisurova/SoftUni_Fundamentals_module | f847b9de9955c8c5bcc057bb38d57162addd6ad8 | 69242f94977c72005f04da78243a5113e79d6c33 | refs/heads/master | 2021-12-01T01:56:22.067928 | 2021-11-08T17:07:31 | 2021-11-08T17:07:31 | 253,281,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | import re
furniture = []
total_money = 0
while True:
order = input()
if order == 'Purchase':
break
pattern = r'>{2}([a-zA-z]+)<{2}(\d+[.]\d+|\d+)!(\d+)'
matches = re.findall(pattern, order)
for i in matches:
if len(i) == 0:
break
furniture.append(i[0])
total_money += float(i[1]) * float(i[2])
print('Bought furniture:')
[print(x) for x in furniture]
print(f'Total money spend: {total_money:.2f}')
| [
"[email protected]"
]
| |
61f623bb2311199c6f90a06eafc6177b8604e7b1 | a38856315e9a35f5eb0905a10eae6840741c468a | /stix_edh/cyber_profile.py | a1b921a93c6da80b797c6892d9627ef92aadfe44 | [
"BSD-3-Clause"
]
| permissive | emmanvg/stix-edh | bbf4cebb908ad8a7c7dd8728ebfc67284f17365d | b426f9785339ab741bb9fb21d356b36193791afc | refs/heads/master | 2020-04-11T23:35:44.934139 | 2018-08-01T16:16:15 | 2018-08-01T16:16:15 | 162,172,740 | 0 | 0 | NOASSERTION | 2018-12-17T18:22:40 | 2018-12-17T18:22:39 | null | UTF-8 | Python | false | false | 4,224 | py | # Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
# python-stix
import stix
from mixbox import fields
# internal bindings
from stix_edh.bindings import cyber_profile
class AccessPrivilege(stix.Entity):
_binding = cyber_profile
_binding_class = _binding.AccessPrivilegeType
_namespace = 'urn:edm:edh:cyber:v3'
privilege_action = fields.TypedField("privilegeAction", type_="stix_edh.common.NMTokens", key_name="privilege_action")
privilege_scope = fields.TypedField("privilegeScope", type_="stix_edh.common.NMTokens", multiple=True, key_name="privilege_scope")
rule_effect = fields.TypedField("ruleEffect", type_="stix_edh.common.NMTokens", key_name="rule_effect")
def __init__(self):
super(AccessPrivilege, self).__init__()
def add_privilege_scope(self, value):
from stix_edh import common
if not value:
return
nmtokens = common.NMTokens(value)
self.privilege_scope.append(nmtokens)
class ResourceDisposition(stix.Entity):
_binding = cyber_profile
_binding_class = _binding.ResourceDispositionType
_namespace = 'urn:edm:edh:cyber:v3'
disposition_date = fields.DateField("dispositionDate", key_name="disposition_date")
disposition_process = fields.TypedField("dispositionProcess", type_="stix_edh.common.NMTokens", key_name="disposition_process")
def __init__(self):
super(ResourceDisposition, self).__init__()
class OriginalClassification(stix.Entity):
_binding = cyber_profile
_binding_class = _binding.OriginalClassificationType
_namespace = 'urn:edm:edh:cyber:v3'
classified_by = fields.TypedField("classifiedBy", type_="stix_edh.common.NMTokens", key_name="classified_by")
classified_on = fields.DateField("classifiedOn", key_name="classified_on")
classification_reason = fields.TypedField("classificationReason", type_="stix_edh.common.NMTokens", key_name="classification_reason")
compilation_reason = fields.TypedField("compilationReason", type_="stix_edh.common.NMTokens", key_name="compilation_reason")
def __init__(self):
super(OriginalClassification, self).__init__()
class DerivativeClassification(stix.Entity):
_binding = cyber_profile
_binding_class = _binding.DerivativeClassificationType
_namespace = 'urn:edm:edh:cyber:v3'
classified_by = fields.TypedField("classifiedBy", type_="stix_edh.common.NMTokens", key_name="classified_by")
classified_on = fields.DateField("classifiedOn", key_name="classified_on")
derived_from = fields.TypedField("derivedFrom", type_="stix_edh.common.NMTokens", key_name="derived_from")
def __init__(self):
super(DerivativeClassification, self).__init__()
class FurtherSharing(stix.Entity):
_binding = cyber_profile
_binding_class = _binding.FurtherSharingType
_namespace = "urn:edm:edh:cyber:v3"
rule_effect = fields.TypedField("ruleEffect", key_name="rule_effect")
sharing_scope = fields.TypedField("sharingScope", type_="stix_edh.common.NMTokens", key_name="sharing_scope")
def __init__(self):
super(FurtherSharing, self).__init__()
class Declassification(stix.Entity):
_binding = cyber_profile
_binding_class = _binding.DeclassificationType
_namespace = 'urn:edm:edh:cyber:v3'
declass_exemption = fields.TypedField("declassExemption", type_="stix_edh.common.NMTokens", key_name="declass_exemption")
declass_period = fields.IntegerField("declassPeriod", key_name="declass_period")
declass_date = fields.DateField("declassDate", key_name="declass_date")
declass_event = fields.TypedField("declassEvent", type_="stix_edh.common.NMTokens", key_name="declass_event")
def __init__(self):
super(Declassification, self).__init__()
class PublicRelease(stix.Entity):
_binding = cyber_profile
_binding_class = _binding.PublicReleaseType
_namespace = 'urn:edm:edh:cyber:v3'
released_by = fields.TypedField("releasedBy", type_="stix_edh.common.NMTokens", key_name="released_by")
released_on = fields.DateField("releasedOn", key_name="released_on")
def __init__(self):
super(PublicRelease, self).__init__()
| [
"[email protected]"
]
| |
396d3be1f2a5e259471ee0be5f9b6850177f96e3 | b648a0ff402d23a6432643879b0b81ebe0bc9685 | /benchmark/tslintbasic/thrift/run.py | 6b63c1b1ffc1c7036f1224f0530a63f3d6a08ca5 | [
"Apache-2.0"
]
| permissive | jviotti/binary-json-size-benchmark | 4712faca2724d47d23efef241983ce875dc71cee | 165b577884ef366348bf48042fddf54aacfe647a | refs/heads/main | 2023-04-18T01:40:26.141995 | 2022-12-19T13:25:35 | 2022-12-19T13:25:35 | 337,583,132 | 21 | 1 | Apache-2.0 | 2022-12-17T21:53:56 | 2021-02-10T01:18:05 | C++ | UTF-8 | Python | false | false | 581 | py | def encode(json, schema):
payload = schema.Main()
payload.rules = schema.Rules()
payload.rules.orderedImports = schema.OrderedImports()
payload.rules.orderedImports.options = schema.Options()
payload.rules.orderedImports.options.groupedImports = \
json['rules']['ordered-imports']['options']['grouped-imports']
return payload
def decode(payload):
return {
'rules': {
'ordered-imports': {
'options': {
'grouped-imports': payload.rules.orderedImports.options.groupedImports
}
}
}
}
| [
"[email protected]"
]
| |
62ce19f3d0f04ce110c1dd241445d520cdfc6c0c | 5e6d8b9989247801718dd1f10009f0f7f54c1eb4 | /sdk/python/pulumi_azure_native/containerservice/v20210901/private_endpoint_connection.py | 2c0b95aa60f3e4a88ea7d42d3e4f40430d813fb8 | [
"BSD-3-Clause",
"Apache-2.0"
]
| permissive | vivimouret29/pulumi-azure-native | d238a8f91688c9bf09d745a7280b9bf2dd6d44e0 | 1cbd988bcb2aa75a83e220cb5abeb805d6484fce | refs/heads/master | 2023-08-26T05:50:40.560691 | 2021-10-21T09:25:07 | 2021-10-21T09:25:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,975 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['PrivateEndpointConnectionArgs', 'PrivateEndpointConnection']
@pulumi.input_type
class PrivateEndpointConnectionArgs:
def __init__(__self__, *,
private_link_service_connection_state: pulumi.Input['PrivateLinkServiceConnectionStateArgs'],
resource_group_name: pulumi.Input[str],
resource_name: pulumi.Input[str],
private_endpoint: Optional[pulumi.Input['PrivateEndpointArgs']] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a PrivateEndpointConnection resource.
:param pulumi.Input['PrivateLinkServiceConnectionStateArgs'] private_link_service_connection_state: A collection of information about the state of the connection between service consumer and provider.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] resource_name: The name of the managed cluster resource.
:param pulumi.Input['PrivateEndpointArgs'] private_endpoint: The resource of private endpoint.
:param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection.
"""
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "resource_name", resource_name)
if private_endpoint is not None:
pulumi.set(__self__, "private_endpoint", private_endpoint)
if private_endpoint_connection_name is not None:
pulumi.set(__self__, "private_endpoint_connection_name", private_endpoint_connection_name)
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> pulumi.Input['PrivateLinkServiceConnectionStateArgs']:
"""
A collection of information about the state of the connection between service consumer and provider.
"""
return pulumi.get(self, "private_link_service_connection_state")
@private_link_service_connection_state.setter
def private_link_service_connection_state(self, value: pulumi.Input['PrivateLinkServiceConnectionStateArgs']):
pulumi.set(self, "private_link_service_connection_state", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="resourceName")
def resource_name(self) -> pulumi.Input[str]:
"""
The name of the managed cluster resource.
"""
return pulumi.get(self, "resource_name")
@resource_name.setter
def resource_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_name", value)
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional[pulumi.Input['PrivateEndpointArgs']]:
"""
The resource of private endpoint.
"""
return pulumi.get(self, "private_endpoint")
@private_endpoint.setter
def private_endpoint(self, value: Optional[pulumi.Input['PrivateEndpointArgs']]):
pulumi.set(self, "private_endpoint", value)
@property
@pulumi.getter(name="privateEndpointConnectionName")
def private_endpoint_connection_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the private endpoint connection.
"""
return pulumi.get(self, "private_endpoint_connection_name")
@private_endpoint_connection_name.setter
def private_endpoint_connection_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_endpoint_connection_name", value)
class PrivateEndpointConnection(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
private_endpoint: Optional[pulumi.Input[pulumi.InputType['PrivateEndpointArgs']]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStateArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
A private endpoint connection
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['PrivateEndpointArgs']] private_endpoint: The resource of private endpoint.
:param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection.
:param pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStateArgs']] private_link_service_connection_state: A collection of information about the state of the connection between service consumer and provider.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] resource_name_: The name of the managed cluster resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PrivateEndpointConnectionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A private endpoint connection
:param str resource_name: The name of the resource.
:param PrivateEndpointConnectionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PrivateEndpointConnectionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
private_endpoint: Optional[pulumi.Input[pulumi.InputType['PrivateEndpointArgs']]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStateArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PrivateEndpointConnectionArgs.__new__(PrivateEndpointConnectionArgs)
__props__.__dict__["private_endpoint"] = private_endpoint
__props__.__dict__["private_endpoint_connection_name"] = private_endpoint_connection_name
if private_link_service_connection_state is None and not opts.urn:
raise TypeError("Missing required property 'private_link_service_connection_state'")
__props__.__dict__["private_link_service_connection_state"] = private_link_service_connection_state
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if resource_name_ is None and not opts.urn:
raise TypeError("Missing required property 'resource_name_'")
__props__.__dict__["resource_name"] = resource_name_
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:containerservice/v20210901:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:containerservice:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:containerservice:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:containerservice/v20200601:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:containerservice/v20200601:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:containerservice/v20200701:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:containerservice/v20200701:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:containerservice/v20200901:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:containerservice/v20200901:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:containerservice/v20201101:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:containerservice/v20201101:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:containerservice/v20201201:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:containerservice/v20201201:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:containerservice/v20210201:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:containerservice/v20210201:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:containerservice/v20210301:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:containerservice/v20210301:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:containerservice/v20210501:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:containerservice/v20210501:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:containerservice/v20210701:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:containerservice/v20210701:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:containerservice/v20210801:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:containerservice/v20210801:PrivateEndpointConnection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PrivateEndpointConnection, __self__).__init__(
'azure-native:containerservice/v20210901:PrivateEndpointConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PrivateEndpointConnection':
"""
Get an existing PrivateEndpointConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = PrivateEndpointConnectionArgs.__new__(PrivateEndpointConnectionArgs)
__props__.__dict__["name"] = None
__props__.__dict__["private_endpoint"] = None
__props__.__dict__["private_link_service_connection_state"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
return PrivateEndpointConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the private endpoint connection.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> pulumi.Output[Optional['outputs.PrivateEndpointResponse']]:
"""
The resource of private endpoint.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> pulumi.Output['outputs.PrivateLinkServiceConnectionStateResponse']:
"""
A collection of information about the state of the connection between service consumer and provider.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The current provisioning state.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The resource type.
"""
return pulumi.get(self, "type")
| [
"[email protected]"
]
| |
dae76b5a56cfbe512236e47e5b204fddff746a73 | 4e382ae46cf997ea2dbdfcfa463a57d3e0e9ad97 | /sols/gray_code.py | 490b72b7d1576b6786b190e757dfced57e83460c | []
| no_license | hayeonk/leetcode | 5136824838eb17ed2e4b7004301ba5bb1037082f | 6485f8f9b5aa198e96fbb800b058d9283a28e4e2 | refs/heads/master | 2020-04-28T03:37:16.800519 | 2019-06-01T14:34:45 | 2019-06-01T14:34:45 | 174,943,756 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | class Solution(object):
def grayCode(self, n):
def getCode(n):
if n == 0:
return ["0"]
rest = getCode(n-1)
reverse = reversed(rest)
ret = [x + "0" for x in rest] + [x + "1" for x in reverse]
return ret
ret = getCode(n)
ret = [int(x, 2) for x in ret]
return ret | [
"[email protected]"
]
| |
df0b59323ca9a839dcf6b4c11f1da303ae237fac | ecd2aa3d12a5375498c88cfaf540e6e601b613b3 | /Facebook/Pro105. Construct Binary Tree from Preorder and Inorder Traversal.py | a39da533bff18e1cca864459d11a600e0252ce83 | []
| no_license | YoyinZyc/Leetcode_Python | abd5d90f874af5cd05dbed87f76885a1ca480173 | 9eb44afa4233fdedc2e5c72be0fdf54b25d1c45c | refs/heads/master | 2021-09-05T17:08:31.937689 | 2018-01-29T21:57:44 | 2018-01-29T21:57:44 | 103,157,916 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 642 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def buildTree(self, preorder, inorder):
"""
:type preorder: List[int]
:type inorder: List[int]
:rtype: TreeNode
"""
if not preorder:
return None
root = TreeNode(preorder[0])
index = inorder.index(preorder[0])
root.left = self.buildTree(preorder[1:index + 1], inorder[:index])
root.right = self.buildTree(preorder[index + 1:], inorder[index + 1:])
return root
| [
"[email protected]"
]
| |
184e8e9933bf4850ac425bc2697124c4c5776379 | 03c9cd5bd96874d6117fb17c37ac4d7450c15933 | /Opencv-Python/wechat/autojump.py | 540e6d96cb2fd16283ba2e25403877731481716d | []
| no_license | atiger808/opencv-tutorial | 603de35e97679d6beae104298ae355edfdd9036a | 2ea9bb3818284fb75f85697e36fde37b6479d1c6 | refs/heads/master | 2020-05-29T23:16:30.462022 | 2019-11-05T10:08:20 | 2019-11-05T10:08:20 | 189,425,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,751 | py | # _*_ coding: utf-8 _*_
# @Time : 2019/4/3 16:45
# @Author : Ole211
# @Site :
# @File : autojump.py
# @Software : PyCharm
import cv2
import numpy as np
import os
import time
import subprocess
import math
# os.chdir('d:\\img\\')
press_coefficient = 1.35
def get_center_coord(img):
region_lower = int(img.shape[0]*0.3)
region_upper = int(img.shape[0]*0.7)
region = img[region_lower:region_upper]
hsv_img = cv2.cvtColor(region, cv2.COLOR_BGR2HSV)
color_lower = np.array([105, 25, 45])
color_upper = np.array([135, 125, 130])
color_mask = cv2.inRange(hsv_img, color_lower, color_upper)
_, contours, hierarchy = cv2.findContours(color_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if len(contours)>0:
max_contour = max(contours, key=cv2.contourArea)
rect = cv2.boundingRect(max_contour)
x, y, w, h = rect
cv2.rectangle(region, (x, y), (x+w, y+h), (0, 255, 0), 3)
center_coord = (x+int(w/2), y+h-20)
cv2.circle(region, center_coord, 8, (0, 0, 255), -1)
cv2.drawContours(region, max_contour, -1, (0, 0, 255), 3)
# region = cv2.resize(region, (400, 800))
# cv2.imshow('color_mask', color_mask)
# cv2.imshow('region', region)
# cv2.waitKey()
# cv2.destroyAllWindows()
return hsv_img, color_mask, center_coord
def get_box_center(img):
region_lower = int(img.shape[0] * 0.3)
region_upper = int(img.shape[0] * 0.7)
region = img[region_lower:region_upper]
gray_img = cv2.cvtColor(region, cv2.COLOR_BGR2GRAY)
# cv2.imshow('gray', gray_img)
canny_img = cv2.Canny(gray_img, 75, 150)
y_top = np.nonzero([max(row) for row in canny_img[:400]])[0][0]
x_top = int(np.mean(np.nonzero(canny_img[y_top])))
y_bottom = y_top + 200
# for row in range(y_bottom, 768):
# if canny_img[row, x_top] != 0:
# break
box_center_coord = (x_top, (y_top + y_bottom)//2)
cv2.circle(region, box_center_coord, 8, (0, 0, 255), -1)
return canny_img, region, box_center_coord
def pullScreenshot():
os.system('adb shell screencap -p /sdcard/autojump.png')
os.system('adb pull /sdcard/autojump.png .')
def jump(distance):
press_time = distance * 1.35
press_time = int(press_time)
cmd = 'adb shell input swipe 320 410 320 410 ' + str(press_time)
print(cmd)
os.system(cmd)
def beginJump():
while True:
pullScreenshot()
time.sleep(2)
img = cv2.imread('autojump.png')
color_mask, hsv_img, center_coord = get_center_coord(img)
canny_img, region, box_center_coord = get_box_center(img)
distance = math.sqrt((box_center_coord[0] - center_coord[0]) ** 2 + (box_center_coord[1] - center_coord[1]) ** 2)
w, h = region.shape[:2]
text = 'press time: %.3f ms' %(max(1.35*distance, 200))
cv2.putText(region, text, (0, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
text0 = 'distance: %.3f pixels' % (distance)
cv2.putText(region, text0, (0, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
cv2.line(region, center_coord, box_center_coord, (0, 0, 255), 3)
print('棋子坐标:', center_coord)
print('盒子坐标:', box_center_coord)
print('距离:', distance)
cv2.imwrite('region.png', region)
# cv2.imshow('color_mask', color_mask)
# cv2.imshow('hsv_img', hsv_img)
# cv2.imshow('canny_img', canny_img)
# cv2.imshow('region', region)
# cv2.waitKey()
# cv2.destroyAllWindows()
jump(distance)
time.sleep(0.2)
if __name__ == '__main__':
beginJump()
# pullScreenshot()
# if __name__ == '__main__':
# get_center_coord() | [
"[email protected]"
]
| |
7fb1bbcd1838101abf13096c7d71cc1156bf7060 | e3f2a0acc79f1891b93553ee6a95396edeb6fd60 | /setup.py | c9c0390cc3d9d040b7b7fc777d3544fa322b0332 | [
"Apache-2.0"
]
| permissive | imaginal/openprocurement.search_plugins | 5bd23b7e17365abba9f7f33da7a5c3263c440453 | a32a5e1b54c9b02fe24fae93e2e78632f77be82a | refs/heads/master | 2020-04-11T23:30:41.704868 | 2018-12-17T18:31:07 | 2018-12-17T18:31:07 | 162,170,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 906 | py | from setuptools import setup, find_packages
setup(
name='openprocurement.search_plugins',
version='0.1', # NOQA
description="Plugin for OpenProcurement Search",
long_description=open("README.md").read(),
# Get more strings from
# http://pypi.python.org/pypi?:action=list_classifiers
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
],
keywords='prozorro search plugin',
author='Volodymyr Flonts',
author_email='[email protected]',
license='Apache License 2.0',
url='https://github.com/imaginal/openprocurement.search_plugins',
namespace_packages=['openprocurement'],
packages=find_packages(),
package_data={'': ['*.md', '*.txt']},
include_package_data=True,
zip_safe=False,
install_requires=[
'openprocurement.search'
],
entry_points={
}
)
| [
"[email protected]"
]
| |
8f3c48e2d207660e14f0af89a3b6c1e6fa76b6dc | a6d9710e312caf4ae96b43f0290f9135bffdf8e0 | /Unit 45/45.4.1_4/calcpkg/geometry/vector.py | 6c6892b72e03d1f068f29d7872f2b5b90fd9723f | []
| no_license | gilbutITbook/006936 | 5b245cf1c6d4580eb07344bdaa254e4615109697 | b5cd6a57cdb5bb3c2ad5e3c9471a79b3fa82bc5d | refs/heads/master | 2023-01-29T07:35:46.360283 | 2023-01-18T06:20:49 | 2023-01-18T06:20:49 | 154,229,702 | 19 | 19 | null | null | null | null | UTF-8 | Python | false | false | 52 | py | def product(a, b):
pass
def dot(a, b):
pass | [
"user@localhost"
]
| user@localhost |
a9597573158cd06dab3973ee6e0512978f90458b | 229d71da5bb9fcdc34ab9c3a4ff9f75ca7ea7a19 | /bitly_app/urls.py | 81c9ebb0845bfee3a27ec09d812bed36ced4f7b6 | []
| no_license | Cunarefa/Convert | 8fd1ba5aae46915b1cde31a682d6ddd1b83bbeef | 93d366656c51affc2d17c685fcd6c93345180a49 | refs/heads/master | 2023-08-29T11:42:02.784981 | 2021-09-18T10:28:44 | 2021-09-18T10:28:44 | 407,829,331 | 0 | 0 | null | 2021-09-20T18:39:30 | 2021-09-18T10:30:09 | Python | UTF-8 | Python | false | false | 133 | py | from django.urls import path
from .views import ConvertView
urlpatterns = [
path('long', ConvertView.as_view(), name='long'),
] | [
"[email protected]"
]
| |
8fd914c3d126ba141d422b9c77ea8058d4bed139 | ffe606c85de9009d2c15356f82daa524c343b925 | /11.28.cleanup/s2rarecats_prep.py | 2f94abfa78be1fcaf7c2caf0858efeb8b810ee3c | []
| no_license | jbinkleyj/story_writer | d88ff7e3360fb8afd12445d1cb237788636b3083 | dc5106a35f5fbce72f8cf0801c0ad4cbc0c9f12f | refs/heads/master | 2020-07-09T15:54:02.492373 | 2017-12-16T07:26:59 | 2017-12-16T07:26:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | from preprocess import *
from arguments import s2s2catsrare as parseParams
if __name__=="__main__":
args = parseParams()
DS = load_data(args)
torch.save(DS,args.datafile)
| [
"[email protected]"
]
| |
7cff65e982c2b32cab03f10e594c8aaa54be7c8a | 3529ecaa44a53172094ba13498097057c8972723 | /Questiondir/634.find-the-derangement-of-an-array/634.find-the-derangement-of-an-array_108022165.py | 4478461c59f6bddeee62b60691de01ec47975b2e | []
| no_license | cczhong11/Leetcode-contest-code-downloader | 0681f0f8c9e8edd5371fd8d0a1d37dcc368566b6 | db64a67869aae4f0e55e78b65a7e04f5bc2e671c | refs/heads/master | 2021-09-07T15:36:38.892742 | 2018-02-25T04:15:17 | 2018-02-25T04:15:17 | 118,612,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | class Solution(object):
def findDerangement(self, n):
"""
:type n: int
:rtype: int
"""
if n == 1:
return 0
if n == 2:
return 1
OPT = [0] * (n+1)
OPT[0] = 1
OPT[1] = 0
OPT[2] = 1
OPT[3] = 2
for i in xrange(4, n+1):
OPT[i] = (OPT[i-1] * (i-1) + (i - 1) * OPT[i-2]) % 1000000007
return OPT[n]
| [
"[email protected]"
]
| |
2a5e7f5f38b91f42700324e71b0bfacd75169326 | 49c5c461c9805be68a318810e2ebb3381643ed59 | /linkedlist/remove-loop.py | c5cee91e4abc8333b6e0eceee46a40a1b1d26633 | []
| no_license | pkdism/must-do-coding | 10a3ef756d24fec451d8ed09bfc22335635a6b13 | ccff354eebd9b2434085528922eb3bc13715530e | refs/heads/master | 2020-09-11T00:54:08.078087 | 2019-11-27T11:15:53 | 2019-11-27T11:15:53 | 221,886,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | def removeTheLoop(head):
h = set()
prev = None
it = head
while it is not None:
if it in h:
prev.next = None
break
h.add(it)
prev = it
it = it.next
| [
"[email protected]"
]
| |
4f05de413d1b0bc99e8d424f2680575e2250630a | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnoilier.py | e97e8bf74b3364175a1104be69b64e8c77151a39 | []
| no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 449 | py | ii = [('UnitAI.py', 4), ('LeakWTI3.py', 2), ('SeniNSP.py', 1), ('LyelCPG.py', 1), ('GilmCRS.py', 1), ('AinsWRR.py', 2), ('NewmJLP.py', 1), ('CoopJBT.py', 1), ('BachARE.py', 1), ('MereHHB3.py', 2), ('WilkJMC.py', 2), ('WestJIT.py', 1), ('LyttELD3.py', 1), ('SomeMMH.py', 1), ('HaliTBC.py', 1), ('AinsWRR2.py', 1), ('ClarGE3.py', 1), ('EvarJSP.py', 1), ('SadlMLP2.py', 1), ('BowrJMM2.py', 1), ('LyelCPG3.py', 1), ('BowrJMM3.py', 1), ('DibdTBR.py', 1)] | [
"[email protected]"
]
| |
7f9dd1f1f7a9135eb1ac3be360d855691bec346d | e45f1f9f3777d625c7da773f8e55589ded528711 | /pic_scrapy/pic/spiders/PhotoSpider.py | bfbdf2f3503102ed8587cee37b600d0b0a2d4301 | []
| no_license | 631068264/learn_crawler | da973d758001b52c61aa0bb4dfc78b59a88304e4 | 65ac2cef7b42b0dce5fb726daa819a6ebc8ffafa | refs/heads/master | 2022-11-15T13:06:55.765849 | 2022-10-31T07:07:31 | 2022-10-31T07:07:31 | 77,364,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,504 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author = 'wyx'
@time = 16/10/24 15:01
@annotation = ''
"""
from urlparse import urljoin
import scrapy
from pic.items import PhotoItem
class PhotoSpider(scrapy.Spider):
start_urls = ["https://www.610hu.com/htm/girl.htm"]
name = "photo"
domain = "https://www.610hu.com"
def parse(self, response):
tds = response.css("table td")
for td in tds:
href = urljoin(self.domain, td.xpath("a/@href").extract_first())
dic = td.css("img").xpath("@src").re_first(r".*/(.*?)\.gif")
yield scrapy.Request(href, callback=self.parse_page, meta={"photo_name": dic})
def parse_page(self, response):
page_num = response.css(".pages strong").xpath("text()").re_first(r"/(\d?)")
if page_num:
for page in page_num:
yield scrapy.Request(urljoin(response.url, ("%s.htm" % page)), callback=self.parse_charter,
meta={"photo_name": response.meta["photo_name"]})
def parse_charter(self, response):
lis = response.css("ul.movieList li")
links = []
for li in lis:
charter_link = urljoin(self.domain, li.xpath("a/@href").extract_first())
charter_name = li.css("h3").xpath("text()").extract_first()
charter_time = li.css("span").xpath("text()").extract_first()
links.append(scrapy.Request(charter_link,
callback=self.parse_detail,
meta={
"photo_name": response.meta["photo_name"],
"charter_link": charter_link,
"charter_name": charter_name,
"charter_time": charter_time,
}))
return links
def parse_detail(self, response):
imgs = response.css(".picContent img")
items = []
for img in imgs:
src = img.xpath("@src").extract_first()
item = PhotoItem({
"photo_name": response.meta["photo_name"],
"charter_link": response.meta["charter_link"],
"charter_name": response.meta["charter_name"],
"charter_time": response.meta["charter_time"],
"img_url": src,
})
items.append(item)
return items
| [
"[email protected]"
]
| |
5fbe59973b26282635a73228a47461382edec61a | 756cfeed032f7d206cdbe4409f2c052bf3e44fe1 | /Tkinter/Lecture_4.py | 0ce5a5c74a62fe228e5bc2c3eee2785792f67700 | []
| no_license | Pyk017/Python | 2e470a10f95aac8bb049edf13c6a825ceab68ea5 | 57fb48356921cc6766675620b32e4099ad371bc6 | refs/heads/master | 2021-07-05T00:16:22.365638 | 2020-12-27T08:25:10 | 2020-12-27T08:25:10 | 213,291,579 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | from tkinter import *
root = Tk()
label1 = Label(root, text="Enter Your Name = ")
label1.grid(row=0, column=0)
# label1.pack()
entry = Entry(root, width=25, borderwidth=5)
entry.grid(row=0, column=1)
# entry.pack()
i = 0
def my_click():
global i
label = Label(root, text="Hello " + entry.get())
i += 2
label.grid(row=i, column=0)
button = Button(root, text="Click Me!", command=my_click)
button.grid(row=1, column=0)
root.mainloop() | [
"[email protected]"
]
| |
837eb31dcfcbcdeb39de0bda4bcad87c80626d95 | 993060167ec652fb3cb0c6e0c1da12ba3759ae47 | /function_scheduling_distributed_framework/consumers/base_consumer.py | 60e5c103607c25e878b526726379c9c16f9ad371 | [
"Apache-2.0"
]
| permissive | Amua/distributed_framework | f0f85fbcf813991cbc819b28a3309efd8aec320f | 0be0b18f9c494788f8ae65c69863c003988c04ec | refs/heads/master | 2020-07-02T07:30:01.791185 | 2019-08-09T08:11:55 | 2019-08-09T08:11:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,055 | py | # -*- coding: utf-8 -*-
# @Author : ydf
# @Time : 2019/8/8 0008 13:11
import abc
import atexit
import copy
import time
import traceback
from collections import Callable
from functools import wraps
import threading
from threading import Lock, Thread
import eventlet
import gevent
from pymongo.errors import PyMongoError
from function_scheduling_distributed_framework.concurrent_pool.bounded_threadpoolexcutor import BoundedThreadPoolExecutor
from function_scheduling_distributed_framework.concurrent_pool.custom_evenlet_pool_executor import evenlet_timeout_deco, check_evenlet_monkey_patch, CustomEventletPoolExecutor
from function_scheduling_distributed_framework.concurrent_pool.custom_gevent_pool_executor import gevent_timeout_deco, GeventPoolExecutor, check_gevent_monkey_patch
from function_scheduling_distributed_framework.concurrent_pool.custom_threadpool_executor import CustomThreadPoolExecutor, check_not_monkey
from function_scheduling_distributed_framework.consumers.redis_filter import RedisFilter
from function_scheduling_distributed_framework.factories.publisher_factotry import get_publisher
from function_scheduling_distributed_framework.utils import LoggerLevelSetterMixin, LogManager, decorators, nb_print, LoggerMixin
from function_scheduling_distributed_framework.utils import time_util
def delete_keys_and_return_new_dict(dictx: dict, keys: list):
dict_new = copy.copy(dictx) # 主要是去掉一级键 publish_time,浅拷贝即可。
for dict_key in keys:
try:
dict_new.pop(dict_key)
except KeyError:
pass
return dict_new
class ExceptionForRetry(Exception):
"""为了重试的,抛出错误。只是定义了一个子类,用不用都可以"""
class ExceptionForRequeue(Exception):
"""框架检测到此错误,重新放回队列中"""
class AbstractConsumer(LoggerLevelSetterMixin, metaclass=abc.ABCMeta, ):
time_interval_for_check_do_not_run_time = 60
BROKER_KIND = None
@property
@decorators.synchronized
def publisher_of_same_queue(self):
if not self._publisher_of_same_queue:
self._publisher_of_same_queue = get_publisher(self._queue_name, broker_kind=self.BROKER_KIND)
if self._msg_expire_senconds:
self._publisher_of_same_queue.set_is_add_publish_time()
return self._publisher_of_same_queue
@classmethod
def join_shedual_task_thread(cls):
"""
:return:
"""
"""
def ff():
RabbitmqConsumer('queue_test', consuming_function=f3, threads_num=20, msg_schedule_time_intercal=2, log_level=10, logger_prefix='yy平台消费', is_consuming_function_use_multi_params=True).start_consuming_message()
RabbitmqConsumer('queue_test2', consuming_function=f4, threads_num=20, msg_schedule_time_intercal=4, log_level=10, logger_prefix='zz平台消费', is_consuming_function_use_multi_params=True).start_consuming_message()
AbstractConsumer.join_shedual_task_thread() # 如果开多进程启动消费者,在linux上需要这样写下这一行。
if __name__ == '__main__':
[Process(target=ff).start() for _ in range(4)]
"""
ConcurrentModeDispatcher.join()
def __init__(self, queue_name, *, consuming_function: Callable = None, function_timeout=0, threads_num=50, specify_threadpool=None, concurrent_mode=1,
max_retry_times=3, log_level=10, is_print_detail_exception=True, msg_schedule_time_intercal=0.0, msg_expire_senconds=0,
logger_prefix='', create_logger_file=True, do_task_filtering=False, is_consuming_function_use_multi_params=True,
is_do_not_run_by_specify_time_effect=False, do_not_run_by_specify_time=('10:00:00', '22:00:00'), schedule_tasks_on_main_thread=False):
"""
:param queue_name:
:param consuming_function: 处理消息的函数。
:param function_timeout : 超时秒数,函数运行超过这个时间,则自动杀死函数。为0是不限制。
:param threads_num:
:param specify_threadpool:使用指定的线程池,可以多个消费者共使用一个线程池,不为None时候。threads_num失效
:param concurrent_mode:并发模式,暂时支持 线程 、gevent、eventlet三种模式。 1线程 2 gevent 3 evenlet
:param max_retry_times:
:param log_level:
:param is_print_detail_exception:
:param msg_schedule_time_intercal:消息调度的时间间隔,用于控频
:param logger_prefix: 日志前缀,可使不同的消费者生成不同的日志
:param create_logger_file : 是否创建文件日志
:param do_task_filtering :是否执行基于函数参数的任务过滤
:is_consuming_function_use_multi_params 函数的参数是否是传统的多参数,不为单个body字典表示多个参数。
:param is_do_not_run_by_specify_time_effect :是否使不运行的时间段生效
:param do_not_run_by_specify_time :不运行的时间段
:param schedule_tasks_on_main_thread :直接在主线程调度任务,意味着不能直接在当前主线程同时开启两个消费者。
"""
self._queue_name = queue_name
self.queue_name = queue_name # 可以换成公有的,免得外部访问有警告。
self.consuming_function = consuming_function
self._function_timeout = function_timeout
self._threads_num = threads_num
self._specify_threadpool = specify_threadpool
self._threadpool = None # 单独加一个检测消息数量和心跳的线程
self._concurrent_mode = concurrent_mode
self._max_retry_times = max_retry_times
self._is_print_detail_exception = is_print_detail_exception
self._msg_schedule_time_intercal = msg_schedule_time_intercal if msg_schedule_time_intercal > 0.001 else 0.001
self._msg_expire_senconds = msg_expire_senconds
if self._concurrent_mode not in (1, 2, 3):
raise ValueError('设置的并发模式不正确')
self._concurrent_mode_dispatcher = ConcurrentModeDispatcher(self)
self._logger_prefix = logger_prefix
self._log_level = log_level
if logger_prefix != '':
logger_prefix += '--'
logger_name = f'{logger_prefix}{self.__class__.__name__}--{self._concurrent_mode_dispatcher.concurrent_name}--{queue_name}'
# nb_print(logger_name)
self.logger = LogManager(logger_name).get_logger_and_add_handlers(log_level, log_filename=f'{logger_name}.log' if create_logger_file else None)
self.logger.info(f'{self.__class__} 被实例化')
self._do_task_filtering = do_task_filtering
self._redis_filter_key_name = f'filter:{queue_name}'
self._redis_filter = RedisFilter(self._redis_filter_key_name)
self._is_consuming_function_use_multi_params = is_consuming_function_use_multi_params
self._lock_for_pika = Lock()
self._execute_task_times_every_minute = 0 # 每分钟执行了多少次任务。
self._lock_for_count_execute_task_times_every_minute = Lock()
self._current_time_for_execute_task_times_every_minute = time.time()
self._msg_num_in_broker = 0
self._last_timestamp_when_has_task_in_queue = 0
self._last_timestamp_print_msg_num = 0
self._is_do_not_run_by_specify_time_effect = is_do_not_run_by_specify_time_effect
self._do_not_run_by_specify_time = do_not_run_by_specify_time # 可以设置在指定的时间段不运行。
self._schedule_tasks_on_main_thread = schedule_tasks_on_main_thread
self.stop_flag = False
self._publisher_of_same_queue = None
@property
@decorators.synchronized
def threadpool(self):
return self._concurrent_mode_dispatcher.build_pool()
def keep_circulating(self, time_sleep=0.001, exit_if_function_run_sucsess=False, is_display_detail_exception=True):
"""间隔一段时间,一直循环运行某个方法的装饰器
:param time_sleep :循环的间隔时间
:param is_display_detail_exception
:param exit_if_function_run_sucsess :如果成功了就退出循环
"""
def _keep_circulating(func):
# noinspection PyBroadException
@wraps(func)
def __keep_circulating(*args, **kwargs):
while 1:
if self.stop_flag:
break
try:
result = func(*args, **kwargs)
if exit_if_function_run_sucsess:
return result
except Exception as e:
msg = func.__name__ + ' 运行出错\n ' + traceback.format_exc(limit=10) if is_display_detail_exception else str(e)
self.logger.error(msg)
finally:
time.sleep(time_sleep)
return __keep_circulating
return _keep_circulating
def start_consuming_message(self):
self.logger.warning(f'开始消费 {self._queue_name} 中的消息')
# self.threadpool.submit(decorators.keep_circulating(20)(self.check_heartbeat_and_message_count))
self.threadpool.submit(self.keep_circulating(20)(self.check_heartbeat_and_message_count))
if self._schedule_tasks_on_main_thread:
# decorators.keep_circulating(1)(self._shedual_task)()
self.keep_circulating(1)(self._shedual_task)()
else:
# t = Thread(target=decorators.keep_circulating(1)(self._shedual_task))
self._concurrent_mode_dispatcher.schedulal_task_with_no_block()
@abc.abstractmethod
def _shedual_task(self):
raise NotImplementedError
def _run_consuming_function_with_confirm_and_retry(self, kw: dict, current_retry_times=0, ):
if self._do_task_filtering and self._redis_filter.check_value_exists(kw['body']): # 对函数的参数进行检查,过滤已经执行过并且成功的任务。
self.logger.info(f'redis的 [{self._redis_filter_key_name}] 键 中 过滤任务 {kw["body"]}')
self._confirm_consume(kw)
return
with self._lock_for_count_execute_task_times_every_minute:
self._execute_task_times_every_minute += 1
if time.time() - self._current_time_for_execute_task_times_every_minute > 60:
self.logger.info(
f'一分钟内执行了 {self._execute_task_times_every_minute} 次函数 [ {self.consuming_function.__name__} ] ,预计'
f'还需要 {time_util.seconds_to_hour_minute_second(self._msg_num_in_broker / self._execute_task_times_every_minute * 60)} 时间'
f'才能执行完成 {self._msg_num_in_broker}个剩余的任务 ')
self._current_time_for_execute_task_times_every_minute = time.time()
self._execute_task_times_every_minute = 0
if current_retry_times < self._max_retry_times + 1:
# noinspection PyBroadException
t_start = time.time()
try:
function_run = self.consuming_function if self._function_timeout == 0 else self._concurrent_mode_dispatcher.timeout_deco(self._function_timeout)(self.consuming_function)
if self._is_consuming_function_use_multi_params: # 消费函数使用传统的多参数形式
function_run(**delete_keys_and_return_new_dict(kw['body'], ['publish_time', 'publish_time_format']))
else:
function_run(delete_keys_and_return_new_dict(kw['body'], ['publish_time', 'publish_time_format'])) # 消费函数使用单个参数,参数自身是一个字典,由键值对表示各个参数。
self._confirm_consume(kw)
if self._do_task_filtering:
self._redis_filter.add_a_value(kw['body']) # 函数执行成功后,添加函数的参数排序后的键值对字符串到set中。
# self.logger.debug(f'{self._concurrent_mode_dispatcher.get_concurrent_info()} 函数 {self.consuming_function.__name__} '
# f'第{current_retry_times + 1}次 运行, 正确了,函数运行时间是 {round(time.time() - t_start, 4)} 秒,入参是 【 {kw["body"]} 】')
self.logger.debug(f' 函数 {self.consuming_function.__name__} '
f'第{current_retry_times + 1}次 运行, 正确了,函数运行时间是 {round(time.time() - t_start, 4)} 秒,入参是 【 {kw["body"]} 】。 {self._concurrent_mode_dispatcher.get_concurrent_info()}')
except Exception as e:
if isinstance(e, (PyMongoError, ExceptionForRequeue)): # mongo经常维护备份时候插入不了或挂了,或者自己主动抛出一个ExceptionForRequeue类型的错误会重新入队,不受指定重试次数逇约束。
self.logger.critical(f'函数 [{self.consuming_function.__name__}] 中发生错误 {type(e)} {e}')
return self._requeue(kw)
self.logger.error(f'函数 {self.consuming_function.__name__} 第{current_retry_times + 1}次发生错误,'
f'函数运行时间是 {round(time.time() - t_start, 4)} 秒,\n 入参是 【 {kw["body"]} 】 \n 原因是 {type(e)} {e} ', exc_info=self._is_print_detail_exception)
self._run_consuming_function_with_confirm_and_retry(kw, current_retry_times + 1)
else:
self.logger.critical(f'函数 {self.consuming_function.__name__} 达到最大重试次数 {self._max_retry_times} 后,仍然失败, 入参是 【 {kw["body"]} 】') # 错得超过指定的次数了,就确认消费了。
self._confirm_consume(kw)
@abc.abstractmethod
def _confirm_consume(self, kw):
"""确认消费"""
raise NotImplementedError
# noinspection PyUnusedLocal
def check_heartbeat_and_message_count(self):
self._msg_num_in_broker = self.publisher_of_same_queue.get_message_count()
if time.time() - self._last_timestamp_print_msg_num > 60:
self.logger.info(f'[{self._queue_name}] 队列中还有 [{self._msg_num_in_broker}] 个任务')
self._last_timestamp_print_msg_num = time.time()
if self._msg_num_in_broker != 0:
self._last_timestamp_when_has_task_in_queue = time.time()
return self._msg_num_in_broker
@abc.abstractmethod
def _requeue(self, kw):
"""重新入队"""
raise NotImplementedError
def _submit_task(self, kw):
if self._judge_is_daylight():
self._requeue(kw)
time.sleep(self.time_interval_for_check_do_not_run_time)
return
if self._msg_expire_senconds != 0 and time.time() - self._msg_expire_senconds > kw['body']['publish_time']:
self.logger.warning(f'消息发布时戳是 {kw["body"]["publish_time"]} {kw["body"].get("publish_time_format", "")},距离现在 {round(time.time() - kw["body"]["publish_time"], 4)} 秒 ,'
f'超过了指定的 {self._msg_expire_senconds} 秒,丢弃任务')
self._confirm_consume(kw)
return 0
self.threadpool.submit(self._run_consuming_function_with_confirm_and_retry, kw)
time.sleep(self._msg_schedule_time_intercal)
def _judge_is_daylight(self):
if self._is_do_not_run_by_specify_time_effect and self._do_not_run_by_specify_time[0] < time_util.DatetimeConverter().time_str < self._do_not_run_by_specify_time[1]:
self.logger.warning(f'现在时间是 {time_util.DatetimeConverter()} ,现在时间是在 {self._do_not_run_by_specify_time} 之间,不运行')
return True
def __str__(self):
return f'队列为 {self.queue_name} 函数为 {self.consuming_function} 的消费者'
# noinspection PyProtectedMember
class ConcurrentModeDispatcher(LoggerMixin):
schedulal_thread_to_be_join = []
concurrent_mode = None
schedual_task_always_use_thread = False
def __init__(self, consumerx: AbstractConsumer):
self.consumer = consumerx
if self.__class__.concurrent_mode is not None and self.consumer._concurrent_mode != self.__class__.concurrent_mode:
raise ValueError('同一解释器中不可以设置两种并发类型')
self._concurrent_mode = self.__class__.concurrent_mode = self.consumer._concurrent_mode
concurrent_name = ''
self.timeout_deco = None
if self._concurrent_mode == 1:
concurrent_name = 'thread'
self.timeout_deco = decorators.timeout
elif self._concurrent_mode == 2:
concurrent_name = 'gevent'
self.timeout_deco = gevent_timeout_deco
elif self._concurrent_mode == 3:
concurrent_name = 'evenlet'
self.timeout_deco = evenlet_timeout_deco
self.concurrent_name = concurrent_name
self.logger.warning(f'{self.consumer} 设置并发模式为 {self.concurrent_name}')
def build_pool(self):
if self.consumer._threadpool:
return self.consumer._threadpool
pool_type = None # 是按照ThreadpoolExecutor写的三个鸭子类,公有方法名和功能写成完全一致,可以互相替换。
if self._concurrent_mode == 1:
pool_type = CustomThreadPoolExecutor
# pool_type = BoundedThreadPoolExecutor
check_not_monkey()
elif self._concurrent_mode == 2:
pool_type = GeventPoolExecutor
check_gevent_monkey_patch()
elif self._concurrent_mode == 3:
pool_type = CustomEventletPoolExecutor
check_evenlet_monkey_patch()
self.consumer._threadpool = self.consumer._specify_threadpool if self.consumer._specify_threadpool else pool_type(self.consumer._threads_num + 1) # 单独加一个检测消息数量和心跳的线程
self.logger.warning(f'{self.concurrent_name} {self.consumer._threadpool}')
return self.consumer._threadpool
def schedulal_task_with_no_block(self):
if self.schedual_task_always_use_thread:
t = Thread(target=self.consumer.keep_circulating(1)(self.consumer._shedual_task))
self.__class__.schedulal_thread_to_be_join.append(t)
t.start()
else:
if self._concurrent_mode == 1:
t = Thread(target=self.consumer.keep_circulating(1)(self.consumer._shedual_task))
self.__class__.schedulal_thread_to_be_join.append(t)
t.start()
elif self._concurrent_mode == 2:
g = gevent.spawn(self.consumer.keep_circulating(1)(self.consumer._shedual_task), )
self.__class__.schedulal_thread_to_be_join.append(g)
elif self._concurrent_mode == 3:
g = eventlet.spawn(self.consumer.keep_circulating(1)(self.consumer._shedual_task), )
self.__class__.schedulal_thread_to_be_join.append(g)
atexit.register(self.join)
@classmethod
def join(cls):
nb_print((cls.schedulal_thread_to_be_join, len(cls.schedulal_thread_to_be_join), '模式:', cls.concurrent_mode))
if cls.schedual_task_always_use_thread:
for t in cls.schedulal_thread_to_be_join:
nb_print(t)
t.join()
else:
if cls.concurrent_mode == 1:
for t in cls.schedulal_thread_to_be_join:
nb_print(t)
t.join()
elif cls.concurrent_mode == 2:
# cls.logger.info()
nb_print(cls.schedulal_thread_to_be_join)
gevent.joinall(cls.schedulal_thread_to_be_join, raise_error=True, )
elif cls.concurrent_mode == 3:
for g in cls.schedulal_thread_to_be_join:
# eventlet.greenthread.GreenThread.
nb_print(g)
g.wait()
def get_concurrent_info(self):
concurrent_info = ''
if self._concurrent_mode == 1:
concurrent_info = f'[{threading.current_thread()} {threading.active_count()}]'
elif self._concurrent_mode == 2:
concurrent_info = f'[{gevent.getcurrent()} {threading.active_count()}]'
elif self._concurrent_mode == 3:
# noinspection PyArgumentList
concurrent_info = f'[{eventlet.getcurrent()} {threading.active_count()}]'
return concurrent_info
def wait_for_possible_has_finish_all_tasks(queue_name: str, minutes: int, send_stop_to_broker=0, broker_kind: int = 0, ):
"""
由于是异步消费,和存在队列一边被消费,一边在推送,或者还有结尾少量任务还在确认消费者实际还没彻底运行完成。 但有时候需要判断 所有任务,务是否完成,提供一个不精确的判断,要搞清楚原因和场景后再慎用。
:param queue_name: 队列名字
:param minutes: 连续多少分钟没任务就判断为消费已完成
:param send_stop_to_broker :发送停止标志到中间件,这回导致消费退出循环调度。
:param broker_kind: 中间件种类
:return:
"""
if minutes <= 1:
raise ValueError('疑似完成任务,判断时间最少需要设置为2分钟内,最好是是10分钟')
pb = get_publisher(queue_name, broker_kind=broker_kind)
no_task_time = 0
while 1:
# noinspection PyBroadException
try:
message_count = pb.get_message_count()
except Exception as e:
nb_print(e)
message_count = -1
if message_count == 0:
no_task_time += 30
else:
no_task_time = 0
time.sleep(30)
if no_task_time > minutes * 60:
break
if send_stop_to_broker:
pb.publish({'stop': 1})
pb.close()
| [
"[email protected]"
]
| |
0cefd1e9333d717207e4845c00ae84fb4478d05e | 08a68e32dc80f99a37a30ddbbf943337546cc3d5 | /.history/count/views_20200419210055.py | c0e0fbfb2b96a0104e0cd52bbfbce5cc12136149 | []
| no_license | Space20001/word-count-project | dff1b4b44d2f7230070eef0d95dd968b655d92f7 | 795b5e8ad5c59109e96bf7a8e9192efaefa7770e | refs/heads/master | 2022-04-20T17:54:05.511449 | 2020-04-20T15:25:46 | 2020-04-20T15:25:46 | 257,327,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 583 | py | from django.shortcuts import render
# Create your views here.
def home(request):
return render(request, 'count/home.html', {})
def counted(request):
fulltext = request.GET['fulltext']
wordlist = fulltext.split()
for word in wordlist:
worddictionary
for word in wordlist:
if word in worddictionary:
else:
return render(request, 'count/counted.html', {'fulltext': fulltext, 'count': len(wordlist)})
def about(request):
return render(request, 'count/about.html', {about: 'about'})
| [
"[email protected]"
]
| |
f8e650b4108f33a5a304944caf20ee25f045cba5 | 8747375a4c6442a5bc317baad36ba41f5de4512e | /personal/migrations/0007_auto_20150226_0351.py | ca2beb584b49a1f858048462ec0f5e23cf67c068 | []
| no_license | raultr/perBackend | 40f73199cb722133d79d76b4389d4f613764560b | f22542f79f293de444e29ac7183a0ee9c5b86889 | refs/heads/master | 2022-12-06T10:17:29.400434 | 2017-02-14T03:23:13 | 2017-02-14T03:23:13 | 30,055,264 | 0 | 0 | null | 2022-11-22T00:26:36 | 2015-01-30T03:57:03 | JavaScript | UTF-8 | Python | false | false | 888 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('personal', '0006_personal_imagen'),
]
operations = [
migrations.AlterField(
model_name='personal',
name='condiciones_alta',
field=models.CharField(default=b'', max_length=150, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='personal',
name='cuip',
field=models.CharField(max_length=30, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='personal',
name='id_seguridad_social',
field=models.CharField(max_length=20, blank=True),
preserve_default=True,
),
]
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.