blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
82a567e6b34c488be5c6911c9716d0e5e39267f5 | 51ce07a419abe50f49e7bb6a6c036af291ea2ef5 | /3.Algorithm/03. String/회문.py | 3d2cc4d08812c91f1e68a534526433192562357f | []
| no_license | salee1023/TIL | c902869e1359246b6dd926166f5ac9209af7b1aa | 2905bd331e451673cbbe87a19e658510b4fd47da | refs/heads/master | 2023-03-10T09:48:41.377704 | 2021-02-24T10:47:27 | 2021-02-24T10:47:27 | 341,129,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 644 | py | def is_pal(matrix,N,M):
# 행
for i in range(N):
for j in range(N-M+1):
string = matrix[i][j:j+M]
if string == string[::-1]:
return string
# 열
for i in range(N):
for j in range(N-M+1):
c_string = ''
for m in range(M):
c_string += matrix[j+m][i]
if c_string == c_string[::-1]:
return c_string
# ---------------------------------------------
T = int(input())
for tc in range(1, 1 + T):
N, M = map(int, input().split())
matrix = [input() for _ in range(N)]
print(f'#{tc} {is_pal(matrix,N,M)}') | [
"[email protected]"
]
| |
da63621588aba35744cad652738150688fbf66e9 | caf8cbcafd448a301997770165b323438d119f5e | /.history/chapter01/python_03_list_20201124200454.py | 73877292c577b91f0b75b424a0b7b8670e42cef1 | [
"MIT"
]
| permissive | KustomApe/nerdape | 03e0691f675f13ce2aefa46ee230111247e90c72 | aef6fb2d1f8c364b26d91bf8570b4487a24de69a | refs/heads/main | 2023-01-23T10:13:26.584386 | 2020-11-28T22:29:49 | 2020-11-28T22:29:49 | 309,897,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31 | py | """[リストについて]]
""" | [
"[email protected]"
]
| |
a6559663df3b494d7fa2c3b72625adbfc4ff5eb5 | d6fe71e3e995c03b8f5151ab1d53411b77b325ba | /walklist_api_service/models/ping_response_payload_headers.py | b0cc9e09ae0c79a6d0ca74aee306b8002286ff8f | []
| no_license | mwilkins91/petpoint-scraper | 95468ae9951deaa8bd3bef7d88c0ff660146c1a3 | dd0c60c68fc6a7d11358aa63d28fdf07fff3c7cd | refs/heads/master | 2022-11-27T00:02:50.654404 | 2020-08-09T18:41:40 | 2020-08-09T18:41:40 | 286,180,666 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,252 | py | # coding: utf-8
"""
The Enrichment List
The THS enrichment list # noqa: E501
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class PingResponsePayloadHeaders(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'content_type': 'str'
}
attribute_map = {
'content_type': 'Content-Type'
}
def __init__(self, content_type=None): # noqa: E501
"""PingResponsePayloadHeaders - a model defined in Swagger""" # noqa: E501
self._content_type = None
self.discriminator = None
if content_type is not None:
self.content_type = content_type
@property
def content_type(self):
"""Gets the content_type of this PingResponsePayloadHeaders. # noqa: E501
:return: The content_type of this PingResponsePayloadHeaders. # noqa: E501
:rtype: str
"""
return self._content_type
@content_type.setter
def content_type(self, content_type):
"""Sets the content_type of this PingResponsePayloadHeaders.
:param content_type: The content_type of this PingResponsePayloadHeaders. # noqa: E501
:type: str
"""
self._content_type = content_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PingResponsePayloadHeaders, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PingResponsePayloadHeaders):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
]
| |
af611b2c565ba96222cf52efd00902fede83b397 | 7cf119239091001cbe687f73018dc6a58b5b1333 | /datashufflepy-zeus/src/branch_scripts2/NEWS/ZX_CJXW_GJJRJG/ZX_CJXW_GJJRJG_GZW_DFSM.py | 61ffc39ae2d35de2d050385674f862a2e53b7081 | [
"Apache-2.0"
]
| permissive | ILKKAI/dataETL | 0f5b80c3482994f735f092a1e01fa1009bac4109 | 32f7ec3aaaf32b5074536a615cb9cd5c28bd499c | refs/heads/master | 2022-04-04T19:27:05.747852 | 2020-02-28T11:17:48 | 2020-02-28T11:17:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | # -*- coding: utf-8 -*-
from database._mongodb import MongoClient
def data_shuffle(data):
return data
if __name__ == '__main__':
main_mongo = MongoClient(entity_code="ZX_CJXW_GJJRJG_GZW_DFSM", mongo_collection="ZX_CJXW_HY")
data_list = main_mongo.main()
for data in data_list:
re_data = data_shuffle(data)
print(re_data)
| [
"[email protected]"
]
| |
595aa5a10ef9b0fb0a73ced16c54c8e221cf046c | 2e1617aec5614ad695fd6ee8dfc0ffb77c54dad1 | /sources/Yalkut Shimoni/Nach/Yalkut_Shimoni_on_Nach.py | 3ca221c24e7c961394c09c1cbcfc94c0e0676dbc | []
| no_license | bachrach44/Sefaria-Data | ad875a552b858828ca2bbd4bbf1da8363dfef038 | b33d781c1bde12568caa01c19e5ad9ec874d160f | refs/heads/master | 2020-12-14T18:50:44.616694 | 2015-10-19T13:59:18 | 2015-10-19T13:59:18 | 17,557,774 | 0 | 0 | null | 2015-08-24T20:59:26 | 2014-03-09T04:33:13 | HTML | UTF-8 | Python | false | false | 1,248 | py | # -*- coding: utf-8 -*-
import urllib
import urllib2
from urllib2 import URLError, HTTPError
import json
import pdb
import os
import sys
p = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, p)
os.environ['DJANGO_SETTINGS_MODULE'] = "sefaria.settings"
from local_settings import *
sys.path.insert(0, SEFARIA_PROJECT_PATH)
from sefaria.model import *
def post_index(index):
url = SEFARIA_SERVER+'api/v2/raw/index/'+index["title"].replace(" ", "_")
indexJSON = json.dumps(index)
values = {
'json': indexJSON,
'apikey': API_KEY
}
data = urllib.urlencode(values)
req = urllib2.Request(url, data)
try:
response = urllib2.urlopen(req)
print response.read()
except HTTPError, e:
print 'Error code: ', e.code
root = JaggedArrayNode()
root.key = "yalkut_on_nach"
root.add_title("Yalkut Shimoni on Nach", "en", primary=True)
root.add_title(u"""ילקות שמעוני על נ״ח""", "he", primary=True)
root.depth = 2
root.sectionNames = ["Remez", "Paragraph"]
root.heSectionNames = [u"רמז", u"פסקה"]
root.addressTypes = ["Integer", "Integer"]
root.validate()
index = {
"title": "Yalkut Shimoni on Nach",
"categories": ["Midrash"],
"schema": root.serialize()
}
post_index(index)
| [
"[email protected]"
]
| |
956b53ebd42005143cbd096e32d0ce899e750207 | f38db79439185ab6062294e1d82f6e909d2be81e | /avazacli/models/payment_allocation.py | 18823ae27ae286ff74624e1739cafa135180f970 | []
| no_license | ContainerSolutions/avazacli | 3a37f8500ad1f1843acbdbb413d4949e00ec6f91 | 49618314f15d8cb2bda36e6019670fdfbed1524f | refs/heads/master | 2020-06-18T18:44:58.594385 | 2019-07-11T14:23:10 | 2019-07-11T14:23:10 | 196,406,206 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 8,569 | py | # coding: utf-8
"""
Avaza API Documentation
Welcome to the autogenerated documentation & test tool for Avaza's API. <br/><br/><strong>API Security & Authentication</strong><br/>Authentication options include OAuth2 Implicit and Authorization Code flows, and Personal Access Token. All connections should be encrypted over SSL/TLS <br/><br/>You can set up and manage your api authentication credentials from within your Avaza account. (requires Administrator permissions on your Avaza account).<br/><br/> OAuth2 Authorization endpoint: https://any.avaza.com/oauth2/authorize <br/>OAuth2 Token endpoint: https://any.avaza.com/oauth2/token<br/>Base URL for subsequent API Requests: https://api.avaza.com/ <br/><br/>Blogpost about authenticating with Avaza's API: https://www.avaza.com/avaza-api-oauth2-authentication/ <br/>Blogpost on using Avaza's webhooks: https://www.avaza.com/avaza-api-webhook-notifications/<br/>The OAuth flow currently issues Access Tokens that last 1 day, and Refresh tokens that last 180 days<br/>The Api respects the security Roles assigned to the authenticating Avaza user and filters the data return appropriately. <br/><br><strong>Support</strong><br/>For API Support, and to request access please contact Avaza Support Team via our support chat. <br/><br/><strong>User Contributed Libraries:</strong><br/>Graciously contributed by 3rd party users like you. <br/>Note these are not tested or endorsesd by Avaza. We encourage you to review before use, and use at own risk.<br/> <ul><li> - <a target='blank' href='https://packagist.org/packages/debiprasad/oauth2-avaza'>PHP OAuth Client Package for Azava API (by Debiprasad Sahoo)</a></li></ul> # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class PaymentAllocation(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'transaction_allocation_id': 'int',
'invoice_transaction_idfk': 'int',
'payment_transaction_idfk': 'int',
'allocation_date': 'datetime',
'allocation_amount': 'float'
}
attribute_map = {
'transaction_allocation_id': 'TransactionAllocationID',
'invoice_transaction_idfk': 'InvoiceTransactionIDFK',
'payment_transaction_idfk': 'PaymentTransactionIDFK',
'allocation_date': 'AllocationDate',
'allocation_amount': 'AllocationAmount'
}
def __init__(self, transaction_allocation_id=None, invoice_transaction_idfk=None, payment_transaction_idfk=None, allocation_date=None, allocation_amount=None): # noqa: E501
"""PaymentAllocation - a model defined in Swagger""" # noqa: E501
self._transaction_allocation_id = None
self._invoice_transaction_idfk = None
self._payment_transaction_idfk = None
self._allocation_date = None
self._allocation_amount = None
self.discriminator = None
if transaction_allocation_id is not None:
self.transaction_allocation_id = transaction_allocation_id
if invoice_transaction_idfk is not None:
self.invoice_transaction_idfk = invoice_transaction_idfk
if payment_transaction_idfk is not None:
self.payment_transaction_idfk = payment_transaction_idfk
if allocation_date is not None:
self.allocation_date = allocation_date
if allocation_amount is not None:
self.allocation_amount = allocation_amount
@property
def transaction_allocation_id(self):
"""Gets the transaction_allocation_id of this PaymentAllocation. # noqa: E501
:return: The transaction_allocation_id of this PaymentAllocation. # noqa: E501
:rtype: int
"""
return self._transaction_allocation_id
@transaction_allocation_id.setter
def transaction_allocation_id(self, transaction_allocation_id):
"""Sets the transaction_allocation_id of this PaymentAllocation.
:param transaction_allocation_id: The transaction_allocation_id of this PaymentAllocation. # noqa: E501
:type: int
"""
self._transaction_allocation_id = transaction_allocation_id
@property
def invoice_transaction_idfk(self):
"""Gets the invoice_transaction_idfk of this PaymentAllocation. # noqa: E501
:return: The invoice_transaction_idfk of this PaymentAllocation. # noqa: E501
:rtype: int
"""
return self._invoice_transaction_idfk
@invoice_transaction_idfk.setter
def invoice_transaction_idfk(self, invoice_transaction_idfk):
"""Sets the invoice_transaction_idfk of this PaymentAllocation.
:param invoice_transaction_idfk: The invoice_transaction_idfk of this PaymentAllocation. # noqa: E501
:type: int
"""
self._invoice_transaction_idfk = invoice_transaction_idfk
@property
def payment_transaction_idfk(self):
"""Gets the payment_transaction_idfk of this PaymentAllocation. # noqa: E501
:return: The payment_transaction_idfk of this PaymentAllocation. # noqa: E501
:rtype: int
"""
return self._payment_transaction_idfk
@payment_transaction_idfk.setter
def payment_transaction_idfk(self, payment_transaction_idfk):
"""Sets the payment_transaction_idfk of this PaymentAllocation.
:param payment_transaction_idfk: The payment_transaction_idfk of this PaymentAllocation. # noqa: E501
:type: int
"""
self._payment_transaction_idfk = payment_transaction_idfk
@property
def allocation_date(self):
"""Gets the allocation_date of this PaymentAllocation. # noqa: E501
:return: The allocation_date of this PaymentAllocation. # noqa: E501
:rtype: datetime
"""
return self._allocation_date
@allocation_date.setter
def allocation_date(self, allocation_date):
"""Sets the allocation_date of this PaymentAllocation.
:param allocation_date: The allocation_date of this PaymentAllocation. # noqa: E501
:type: datetime
"""
self._allocation_date = allocation_date
@property
def allocation_amount(self):
"""Gets the allocation_amount of this PaymentAllocation. # noqa: E501
:return: The allocation_amount of this PaymentAllocation. # noqa: E501
:rtype: float
"""
return self._allocation_amount
@allocation_amount.setter
def allocation_amount(self, allocation_amount):
"""Sets the allocation_amount of this PaymentAllocation.
:param allocation_amount: The allocation_amount of this PaymentAllocation. # noqa: E501
:type: float
"""
self._allocation_amount = allocation_amount
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PaymentAllocation):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
]
| |
0f30313b2ba18bc539916241ea7b0c1a993812bd | b77e12c0fc66cf47f83359fe8a04890669058f08 | /day_3/callMainTest.py | ad3ff1f699f7f0f03e8d47dafb7d95a992e5dff1 | []
| no_license | dydy061951/SeleniumProject | 177bb8bdd9f3f586d63c7330d4e5bcdc473cf7c8 | 857fddb74748133475e5f4583007446ab7d2184f | refs/heads/master | 2021-08-23T04:41:30.705381 | 2017-12-03T09:26:14 | 2017-12-03T09:26:14 | 112,907,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45 | py | from day_3.mainTest import printHW
printHW() | [
"51Testing"
]
| 51Testing |
729c10802c03ffac6b1d4ba1c426d90bdb7d05e5 | 89c677ead412795b56a03d7209b6dffa10dee346 | /orcamentos/core/management/commands/create_admin.py | 8b99126f0f5040af0f1caaab258f1b840a35d448 | [
"MIT"
]
| permissive | rg3915/orcamentos | 009a5115e6cdc38976285d1867c56e3907898b03 | 5cd8c4f6cbeabeb5e069c58f583c38f44c3af99a | refs/heads/master | 2022-06-21T22:54:49.575533 | 2021-08-10T21:31:38 | 2021-08-10T21:31:38 | 39,759,924 | 97 | 44 | MIT | 2022-06-16T23:17:19 | 2015-07-27T06:58:45 | HTML | UTF-8 | Python | false | false | 842 | py | from django.core.management.base import BaseCommand
from orcamentos.crm.models import Employee
class Command(BaseCommand):
help = ''' Cria um usuário admin. '''
def handle(self, *args, **kwargs):
'''
Cria um Employee.
Precisamos de Employee para fazer todas as transações no sistema.
'''
username = 'admin'
first_name = 'Admin'
last_name = 'Admin'
email = '[email protected]'
user = Employee.objects.create(
username=username,
first_name=first_name,
last_name=last_name,
email=email,
gender='I'
)
user.set_password('admin')
user.is_staff = True
user.is_superuser = True
user.is_active = True
user.save()
print('Usuário criado com sucesso.')
| [
"[email protected]"
]
| |
7a378105c860099dd61297282e87b87d6ce4006d | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /EHzL3v25wYp7E4AFC_5.py | 8a5ec4b5bd16208bd51e95590a5ec8900a2b9c45 | []
| no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 656 | py | """
Write a function that returns `True` if you can use the letters of the first
string to create the second string. Letters are **case sensitive**.
### Examples
can_build("aPPleAL", "PAL") ➞ True
can_build("aPPleAL", "apple") ➞ False
can_build("a", "") ➞ True
can_build("aa", "aaa") ➞ False
### Notes
Letters in the first string can be used only once.
"""
def can_build(s1, s2):
result = True
for i in s2:
if i not in s2:
result = False
if s1.count(i) < s2.count(i):
result = False
if result is False:
return False
else:
return True
| [
"[email protected]"
]
| |
062d52021c8a30a2bffcf1ee2952f42480968f14 | 27c8fc9bc336c49dbe172df5774e786106b4d6b7 | /api/migrations/0001_initial.py | eb1b71fb66d5a2dc1eaac76e0a337a3677b5772f | []
| no_license | BunmiAdeleke123/blog | 18169c3ab8b75742bf8bc3c73a20f25f02166533 | 5dcfb10cb844b62871d442f72cc37d136092adfe | refs/heads/main | 2023-09-01T02:00:39.123781 | 2021-10-26T13:11:49 | 2021-10-26T13:11:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 985 | py | # Generated by Django 3.2.8 on 2021-10-26 04:27
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=30)),
('slug', models.SlugField()),
('description', models.TextField()),
('date_added', models.DateField(auto_now=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-date_added'],
},
),
]
| [
"[email protected]"
]
| |
9f8493b375639b3b737ef050cb029a002c01a974 | 02338bb8111fc1aa88e830ac09a11664720eb2d4 | /tmp/azure_rm_rule.py | b793c87171d1316e012545380daec23b6a3aca7e | []
| no_license | Fred-sun/fred_yaml | a49977b0e8505c7447df23dd80c7fef1be70e6bc | 295ca4cd2b59b8d2758f06eb7fd79920327ea524 | refs/heads/master | 2023-04-28T05:51:56.599488 | 2023-04-25T13:52:10 | 2023-04-25T13:52:10 | 131,376,340 | 0 | 1 | null | 2020-07-06T14:22:46 | 2018-04-28T05:34:49 | TSQL | UTF-8 | Python | false | false | 17,182 | py | #!/usr/bin/python
#
# Copyright (c) 2020 GuopengLin, (@t-glin)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_rule
version_added: '2.9'
short_description: Manage Azure Rule instance.
description:
- 'Create, update and delete instance of Azure Rule.'
options:
resource_group_name:
description:
- Name of the Resource group within the Azure subscription.
required: true
type: str
namespace_name:
description:
- The namespace name
required: true
type: str
topic_name:
description:
- The topic name.
required: true
type: str
subscription_name:
description:
- The subscription name.
required: true
type: str
rule_name:
description:
- The rule name.
required: true
type: str
action:
description:
- >-
Represents the filter actions which are allowed for the transformation
of a message that have been matched by a filter expression.
type: dict
suboptions:
sqlexpression:
description:
- SQL expression. e.g. MyProperty='ABC'
type: str
compatibility_level:
description:
- >-
This property is reserved for future use. An integer value showing
the compatibility level, currently hard-coded to 20.
type: integer
requires_preprocessing:
description:
- Value that indicates whether the rule action requires preprocessing.
type: bool
filter_type:
description:
- Filter type that is evaluated against a BrokeredMessage.
type: sealed-choice
sqlfilter:
description:
- Properties of sqlFilter
type: dict
suboptions:
sqlexpression:
description:
- The SQL expression. e.g. MyProperty='ABC'
type: str
compatibility_level:
description:
- >-
This property is reserved for future use. An integer value showing
the compatibility level, currently hard-coded to 20.
type: integer
requires_preprocessing:
description:
- Value that indicates whether the rule action requires preprocessing.
type: bool
properties:
description:
- dictionary object for custom filters
type: dictionary
correlation_id:
description:
- Identifier of the correlation.
type: str
message_id:
description:
- Identifier of the message.
type: str
to:
description:
- Address to send to.
type: str
reply_to:
description:
- Address of the queue to reply to.
type: str
label:
description:
- Application specific label.
type: str
session_id:
description:
- Session identifier.
type: str
reply_to_session_id:
description:
- Session identifier to reply to.
type: str
content_type:
description:
- Content type of the message.
type: str
requires_preprocessing:
description:
- Value that indicates whether the rule action requires preprocessing.
type: bool
state:
description:
- Assert the state of the Rule.
- Use C(present) to create or update an Rule and C(absent) to delete it.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
author:
- GuopengLin (@t-glin)
'''
EXAMPLES = '''
- name: RulesCreateCorrelationFilter
azure_rm_rule:
namespace_name: sdk-Namespace-1319
resource_group_name: resourceGroupName
rule_name: sdk-Rules-6571
subscription_name: sdk-Subscriptions-8691
topic_name: sdk-Topics-2081
properties:
correlation_filter:
properties:
topic_hint: Crop
filter_type: CorrelationFilter
- name: RulesCreateOrUpdate
azure_rm_rule:
namespace_name: sdk-Namespace-1319
resource_group_name: resourceGroupName
rule_name: sdk-Rules-6571
subscription_name: sdk-Subscriptions-8691
topic_name: sdk-Topics-2081
- name: RulesCreateSqlFilter
azure_rm_rule:
namespace_name: sdk-Namespace-1319
resource_group_name: resourceGroupName
rule_name: sdk-Rules-6571
subscription_name: sdk-Subscriptions-8691
topic_name: sdk-Topics-2081
properties:
filter_type: SqlFilter
sql_filter:
sql_expression: myproperty=test
- name: RulesDelete
azure_rm_rule:
namespace_name: sdk-Namespace-1319
resource_group_name: ArunMonocle
rule_name: sdk-Rules-6571
subscription_name: sdk-Subscriptions-8691
topic_name: sdk-Topics-2081
'''
RETURN = '''
id:
description:
- Resource Id
returned: always
type: str
sample: null
name:
description:
- Resource name
returned: always
type: str
sample: null
type:
description:
- Resource type
returned: always
type: str
sample: null
action:
description:
- >-
Represents the filter actions which are allowed for the transformation of
a message that have been matched by a filter expression.
returned: always
type: dict
sample: null
contains:
sqlexpression:
description:
- SQL expression. e.g. MyProperty='ABC'
returned: always
type: str
sample: null
compatibility_level:
description:
- >-
This property is reserved for future use. An integer value showing the
compatibility level, currently hard-coded to 20.
returned: always
type: integer
sample: null
requires_preprocessing:
description:
- Value that indicates whether the rule action requires preprocessing.
returned: always
type: bool
sample: null
filter_type:
description:
- Filter type that is evaluated against a BrokeredMessage.
returned: always
type: sealed-choice
sample: null
sqlfilter:
description:
- Properties of sqlFilter
returned: always
type: dict
sample: null
contains:
sqlexpression:
description:
- The SQL expression. e.g. MyProperty='ABC'
returned: always
type: str
sample: null
compatibility_level:
description:
- >-
This property is reserved for future use. An integer value showing the
compatibility level, currently hard-coded to 20.
returned: always
type: integer
sample: null
requires_preprocessing:
description:
- Value that indicates whether the rule action requires preprocessing.
returned: always
type: bool
sample: null
properties:
description:
- dictionary object for custom filters
returned: always
type: dictionary
sample: null
correlation_id:
description:
- Identifier of the correlation.
returned: always
type: str
sample: null
message_id:
description:
- Identifier of the message.
returned: always
type: str
sample: null
to:
description:
- Address to send to.
returned: always
type: str
sample: null
reply_to:
description:
- Address of the queue to reply to.
returned: always
type: str
sample: null
label:
description:
- Application specific label.
returned: always
type: str
sample: null
session_id:
description:
- Session identifier.
returned: always
type: str
sample: null
reply_to_session_id:
description:
- Session identifier to reply to.
returned: always
type: str
sample: null
content_type:
description:
- Content type of the message.
returned: always
type: str
sample: null
requires_preprocessing:
description:
- Value that indicates whether the rule action requires preprocessing.
returned: always
type: bool
sample: null
'''
import time
import json
import re
from ansible.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt
from copy import deepcopy
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.service import ServiceBusManagementClient
from msrestazure.azure_operation import AzureOperationPoller
from msrest.polling import LROPoller
except ImportError:
# This is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMRule(AzureRMModuleBaseExt):
def __init__(self):
self.module_arg_spec = dict(
resource_group_name=dict(
type='str',
required=True
),
namespace_name=dict(
type='str',
required=True
),
topic_name=dict(
type='str',
required=True
),
subscription_name=dict(
type='str',
required=True
),
rule_name=dict(
type='str',
required=True
),
action=dict(
type='dict',
disposition='/action',
options=dict(
sqlexpression=dict(
type='str',
disposition='sqlexpression'
),
compatibility_level=dict(
type='integer',
disposition='compatibility_level'
),
requires_preprocessing=dict(
type='bool',
disposition='requires_preprocessing'
)
)
),
filter_type=dict(
type='sealed-choice',
disposition='/filter_type'
),
sqlfilter=dict(
type='dict',
disposition='/sqlfilter',
options=dict(
sqlexpression=dict(
type='str',
disposition='sqlexpression'
),
compatibility_level=dict(
type='integer',
disposition='compatibility_level'
),
requires_preprocessing=dict(
type='bool',
disposition='requires_preprocessing'
)
)
),
properties=dict(
type='dictionary',
disposition='/properties'
),
correlation_id=dict(
type='str',
disposition='/correlation_id'
),
message_id=dict(
type='str',
disposition='/message_id'
),
to=dict(
type='str',
disposition='/to'
),
reply_to=dict(
type='str',
disposition='/reply_to'
),
label=dict(
type='str',
disposition='/label'
),
session_id=dict(
type='str',
disposition='/session_id'
),
reply_to_session_id=dict(
type='str',
disposition='/reply_to_session_id'
),
content_type=dict(
type='str',
disposition='/content_type'
),
requires_preprocessing=dict(
type='bool',
disposition='/requires_preprocessing'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.resource_group_name = None
self.namespace_name = None
self.topic_name = None
self.subscription_name = None
self.rule_name = None
self.body = {}
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.to_do = Actions.NoAction
super(AzureRMRule, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
elif kwargs[key] is not None:
self.body[key] = kwargs[key]
self.inflate_parameters(self.module_arg_spec, self.body, 0)
old_response = None
response = None
self.mgmt_client = self.get_mgmt_svc_client(ServiceBusManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2017-04-01')
old_response = self.get_resource()
if not old_response:
if self.state == 'present':
self.to_do = Actions.Create
else:
if self.state == 'absent':
self.to_do = Actions.Delete
else:
modifiers = {}
self.create_compare_modifiers(self.module_arg_spec, '', modifiers)
self.results['modifiers'] = modifiers
self.results['compare'] = []
if not self.default_compare(modifiers, self.body, old_response, '', self.results):
self.to_do = Actions.Update
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.results['changed'] = True
if self.check_mode:
return self.results
response = self.create_update_resource()
elif self.to_do == Actions.Delete:
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_resource()
else:
self.results['changed'] = False
response = old_response
return self.results
def create_update_resource(self):
try:
response = self.mgmt_client.rules.create_or_update(resource_group_name=self.resource_group_name,
namespace_name=self.namespace_name,
topic_name=self.topic_name,
subscription_name=self.subscription_name,
rule_name=self.rule_name,
parameters=self.body)
if isinstance(response, AzureOperationPoller) or isinstance(response, LROPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create the Rule instance.')
self.fail('Error creating the Rule instance: {0}'.format(str(exc)))
return response.as_dict()
def delete_resource(self):
try:
response = self.mgmt_client.rules.delete(resource_group_name=self.resource_group_name,
namespace_name=self.namespace_name,
topic_name=self.topic_name,
subscription_name=self.subscription_name,
rule_name=self.rule_name)
except CloudError as e:
self.log('Error attempting to delete the Rule instance.')
self.fail('Error deleting the Rule instance: {0}'.format(str(e)))
return True
def get_resource(self):
try:
response = self.mgmt_client.rules.get(resource_group_name=self.resource_group_name,
namespace_name=self.namespace_name,
topic_name=self.topic_name,
subscription_name=self.subscription_name,
rule_name=self.rule_name)
except CloudError as e:
return False
return response.as_dict()
def main():
AzureRMRule()
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
03e77f8ed10d786ef8de43b60b2bbf9005ab7364 | 4b396133518de31120045ebca90b6679b243784c | /LEVEL_3/Food_Security/LEVEL_3_Calc_Food_Security.py | 1187bd026a1baec068ab7bf30f4a595fb93c3df2 | [
"MIT"
]
| permissive | metameta-research/WaporTranslator | 55657af8eee574f02fdfdbb7bd755b3de0538255 | 2827c464268037817b309b229af8b4a69014e47b | refs/heads/master | 2020-08-27T18:42:57.025814 | 2019-10-23T12:39:16 | 2019-10-23T12:39:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,972 | py | # -*- coding: utf-8 -*-
"""
WaterSat
author: Tim Martijn Hessels
Created on Tue Oct 1 13:25:22 2019
"""
import os
import gdal
import numpy as np
import pandas as pd
import warnings
import datetime
import WaporTranslator.LEVEL_1.Input_Data as Inputs
import WaporTranslator.LEVEL_1.DataCube as DataCube
import WaporTranslator.LEVEL_2.Functions as Functions
import watertools.General.raster_conversions as RC
def main(Start_year_analyses, End_year_analyses, output_folder):
# Do not show non relevant warnings
warnings.filterwarnings("ignore")
warnings.filterwarnings("ignore", category=FutureWarning)
warnings.filterwarnings("ignore", category=RuntimeWarning)
Startdate = "%s-01-01" %Start_year_analyses
Enddate = "%s-12-31" %End_year_analyses
# Define dates
Dates = Functions.Get_Dekads(Start_year_analyses, End_year_analyses)
Dates_Years = pd.date_range(Startdate, Enddate, freq = "AS")
# Get path and formats
Paths = Inputs.Input_Paths()
Formats = Inputs.Input_Formats()
Conversions = Inputs.Input_Conversions()
# Set example file
example_file = os.path.join(output_folder, "LEVEL_1", "MASK", "MASK.tif")
# Open Mask
dest_mask = gdal.Open(example_file)
MASK = dest_mask.GetRasterBand(1).ReadAsArray()
# Define output folder LEVEL 3
output_folder_L3 = os.path.join(output_folder, "LEVEL_3", "Food_Security")
if not os.path.exists(output_folder_L3):
os.makedirs(output_folder_L3)
################################# Dynamic maps #################################
CropType = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.CropType), Formats.CropType, list(Dates_Years), Conversion = Conversions.CropType, Variable = 'CropType', Product = '', Unit = '-')
CropClass = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.CropClass), Formats.CropClass, list(Dates_Years), Conversion = Conversions.CropClass, Variable = 'CropClass', Product = '', Unit = '-')
ET0 = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.ET0), Formats.ET0, Dates, Conversion = Conversions.ET0, Example_Data = example_file, Mask_Data = example_file, gap_filling = 1, reprojection_type = 2, Variable = 'ET0', Product = 'WAPOR', Unit = 'mm/day')
ET = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.ET), Formats.ET, Dates, Conversion = Conversions.ET, Example_Data = example_file, Mask_Data = example_file, gap_filling = 1, reprojection_type = 2, Variable = 'ET', Product = 'WAPOR', Unit = 'mm/day')
P = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.P), Formats.P, Dates, Conversion = Conversions.P, Example_Data = example_file, Mask_Data = example_file, gap_filling = 1, reprojection_type = 2, Variable = 'P', Product = 'WAPOR', Unit = 'mm/day')
NPP = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.NPP), Formats.NPP, Dates, Conversion = Conversions.NPP, Example_Data = example_file, Mask_Data = example_file, Variable = 'NPP', Product = 'WAPOR', Unit = 'kg/ha/day')
Pcum = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.Cumulative_P), Formats.Cumulative_P, Dates, Conversion = Conversions.Cumulative_P, Variable = 'Pcum', Product = '', Unit = 'mm')
ETcum = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.Cumulative_ET), Formats.Cumulative_ET, Dates, Conversion = Conversions.Cumulative_ET, Variable = 'ETcum', Product = '', Unit = 'mm')
NPPcum = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.Cumulative_NPP), Formats.Cumulative_NPP, Dates, Conversion = Conversions.Cumulative_NPP, Variable = 'NPPcum', Product = '', Unit = 'kg/ha')
Avail_Water_Depl = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.Available_Before_Depletion), Formats.Available_Before_Depletion, Dates, Conversion = Conversions.Available_Before_Depletion, Variable = 'Available Water Depletion', Product = '', Unit = 'mm')
Critical_Soil_Moisture = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.Critical_Soil_Moisture), Formats.Critical_Soil_Moisture, Dates, Conversion = Conversions.Critical_Soil_Moisture, Variable = 'Critical Soil Moisture', Product = 'SoilGrids', Unit = 'cm3/cm3')
Soil_Moisture = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.Soil_Moisture), Formats.Soil_Moisture, Dates, Conversion = Conversions.Soil_Moisture, Variable = 'Soil Moisture', Product = '', Unit = 'cm3/cm3')
Crop_S1_End = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.Season_End_S1), Formats.Season_End_S1, list(Dates_Years), Conversion = Conversions.Season_End_S1, Variable = 'Season 1 End', Product = '', Unit = 'DOY')
Crop_S2_End = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.Season_End_S2), Formats.Season_End_S2, list(Dates_Years), Conversion = Conversions.Season_End_S2, Variable = 'Season 2 End', Product = '', Unit = 'DOY')
Per_Start = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.Perenial_Start), Formats.Perenial_Start, list(Dates_Years), Conversion = Conversions.Perenial_Start, Variable = 'Perenial Start', Product = '', Unit = 'DOY')
Per_End = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.Perenial_End), Formats.Perenial_End, list(Dates_Years), Conversion = Conversions.Perenial_End, Variable = 'Perenial End', Product = '', Unit = 'DOY')
################################# Static maps #################################
Clay = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.Clay), Formats.Clay.format(level=6), Dates = None, Conversion = Conversions.Clay, Example_Data = example_file, Mask_Data = example_file, reprojection_type = 2, Variable = 'Clay', Product = 'SoilGrids', Unit = 'Percentage')
Silt = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.Silt), Formats.Silt.format(level=6), Dates = None, Conversion = Conversions.Silt, Example_Data = example_file, Mask_Data = example_file, reprojection_type = 2, Variable = 'Silt', Product = 'SoilGrids', Unit = 'Percentage')
Sand = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.Sand), Formats.Sand.format(level=6), Dates = None, Conversion = Conversions.Sand, Example_Data = example_file, Mask_Data = example_file, reprojection_type = 2, Variable = 'Sand', Product = 'SoilGrids', Unit = 'Percentage')
DEM = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.DEM), Formats.DEM, Dates = None, Conversion = Conversions.DEM, Example_Data = example_file, Mask_Data = example_file, reprojection_type = 2, Variable = 'DEM', Product = 'SRTM', Unit = 'm')
Theta_WP_Subsoil = DataCube.Rasterdata_tiffs(os.path.join(output_folder, Paths.Theta_WP_Subsoil), Formats.Theta_WP_Subsoil, Dates = None, Conversion = Conversions.Theta_WP_Subsoil, Example_Data = example_file, Mask_Data = example_file, reprojection_type = 2, Variable = 'Theta Wilting Point Subsoil', Product = 'SoilGrids', Unit = 'cm3/cm3')
################################# Calculate Irrigation yes/no #################################
Irrigation = Calc_Irrigation(Pcum, ETcum, Avail_Water_Depl, MASK)
Irrigation.Save_As_Tiff(os.path.join(output_folder_L3, "Irrigation"))
################################# Calculate Irrigation yes/no #################################
Grassland_Data = np.where(CropType.Data==3, 1, np.nan)
Grassland = DataCube.Rasterdata_Empty()
Grassland.Data = Grassland_Data * MASK
Grassland.Projection = Irrigation.Projection
Grassland.GeoTransform = Irrigation.GeoTransform
Grassland.Ordinal_time = np.array(list(map(lambda i : i.toordinal(), Dates_Years)))
Grassland.Size = Grassland_Data.shape
Grassland.Variable = "Grassland Yearly"
Grassland.Unit = "-"
Grassland.Save_As_Tiff(os.path.join(output_folder_L3, "Grassland"))
######################## Calculate days in each dekads #################################
Days_in_Dekads = np.append(ET.Ordinal_time[1:] - ET.Ordinal_time[:-1], 11)
################################# Calculate yearly irrigation maps #################################
Threshold = 10 # if this threshold or more decades were irrigation, the irrigation type is irrigation
Irrigation_year_Data = np.ones([len(Dates_Years), Irrigation.Size[1], Irrigation.Size[2]]) * np.nan
for Date_Year in Dates_Years:
Start = (Date_Year.year - Dates_Years[0].year) * 36
End = Start + 36
Irrigation_year_Data[Dates_Years == Date_Year, :, :] = np.where(np.nansum(Irrigation.Data[Start:End, :, :], axis = 0)>=Threshold, 1, 0)
Irrigation_Yearly = DataCube.Rasterdata_Empty()
Irrigation_Yearly.Data = Irrigation_year_Data * MASK
Irrigation_Yearly.Projection = Irrigation.Projection
Irrigation_Yearly.GeoTransform = Irrigation.GeoTransform
Irrigation_Yearly.Ordinal_time = np.array(list(map(lambda i : i.toordinal(), Dates_Years)))
Irrigation_Yearly.Size = Irrigation_year_Data.shape
Irrigation_Yearly.Variable = "Irrigation Yearly"
Irrigation_Yearly.Unit = "-"
Irrigation_Yearly.Save_As_Tiff(os.path.join(output_folder_L3, "Irrigation_Maps_Yearly"))
################################# Calculate Aridity Array #################################
Aridity = Calc_Aridity(ET0, P, MASK)
################################# Calculate Slope Array #################################
Slope = Calc_Slope(DEM, MASK)
################################# Calculate Crop Season and LU #################################
Season_Type = Calc_Crops(CropType, CropClass, MASK)
################################# Created AEZ numbers #################################
AEZ = Calc_AEZ(Irrigation_Yearly, Aridity, Slope, DEM, Clay, Silt, Sand, Season_Type, MASK)
AEZ.Save_As_Tiff(os.path.join(output_folder_L3, "AEZ"))
################################# Create dictionary for all AEZ #################################
L3_AEZ_AREA = dict()
L3_AEZ_Yield = dict()
################################# Calculate Areas every AEZ per year #################################
AREA_M2 = Degrees_to_m2(example_file)
for AEZ_ID in np.unique(AEZ.Data[~np.isnan(AEZ.Data)]):
L3_AEZ_AREA[int(AEZ_ID)] = np.nansum(np.where(AEZ.Data == AEZ_ID, AREA_M2[None, :, :], np.nan), axis = (1,2))/1e4 # in hectare
################################# Calculate actual biomass production #################################
C3_C4 = 0.45
Actual_Biomass_Production_Data = NPP.Data/C3_C4
Actual_Biomass_Production = DataCube.Rasterdata_Empty()
Actual_Biomass_Production.Data = Actual_Biomass_Production_Data * MASK
Actual_Biomass_Production.Projection = NPP.Projection
Actual_Biomass_Production.GeoTransform = NPP.GeoTransform
Actual_Biomass_Production.Ordinal_time = NPP.Ordinal_time
Actual_Biomass_Production.Size = Actual_Biomass_Production_Data.shape
Actual_Biomass_Production.Variable = "Actual Biomass Production"
Actual_Biomass_Production.Unit = "kg-ha-1-d-1"
Actual_Biomass_Production.Save_As_Tiff(os.path.join(output_folder_L3, "Actual_Biomass_Production"))
del Actual_Biomass_Production_Data
################################# Calculate Spatial Target Actual Biomass Production #################################
AEZ_decads = np.repeat(AEZ.Data, 36, 0).reshape([36 * AEZ.Size[0], AEZ.Size[1], AEZ.Size[2]])
L3_AEZ_NPP = dict()
for AEZ_ID in np.unique(AEZ.Data[~np.isnan(AEZ.Data)]):
L3_AEZ_NPP[int(AEZ_ID)] = np.nanpercentile(np.where(AEZ_decads == AEZ_ID, Actual_Biomass_Production.Data, np.nan), 99, axis=(1,2))
################################# Create spatial target maps #################################
NPP_target_Data = np.ones(Actual_Biomass_Production.Size) * np.nan
for AEZ_ID in np.unique(AEZ.Data[~np.isnan(AEZ.Data)]):
NPP_target_Data = np.where(AEZ_decads == AEZ_ID, L3_AEZ_NPP[int(AEZ_ID)][:, None, None], NPP_target_Data)
Target_Biomass_Production = DataCube.Rasterdata_Empty()
Target_Biomass_Production.Data = NPP_target_Data * MASK
Target_Biomass_Production.Projection = NPP.Projection
Target_Biomass_Production.GeoTransform = NPP.GeoTransform
Target_Biomass_Production.Ordinal_time = NPP.Ordinal_time
Target_Biomass_Production.Size = NPP_target_Data.shape
Target_Biomass_Production.Variable = "Target Biomass Production"
Target_Biomass_Production.Unit = "kg-ha-1-d-1"
Target_Biomass_Production.Save_As_Tiff(os.path.join(output_folder_L3, "Target_Biomass_Production"))
del NPP_target_Data
################################# Dekadal production gap Spatial #################################
Production_Gap_Spatial_Data = np.minimum(Actual_Biomass_Production.Data - Target_Biomass_Production.Data, 0)
Production_Gap_Spatial = DataCube.Rasterdata_Empty()
Production_Gap_Spatial.Data = Production_Gap_Spatial_Data * MASK
Production_Gap_Spatial.Projection = NPP.Projection
Production_Gap_Spatial.GeoTransform = NPP.GeoTransform
Production_Gap_Spatial.Ordinal_time = NPP.Ordinal_time
Production_Gap_Spatial.Size = Production_Gap_Spatial_Data.shape
Production_Gap_Spatial.Variable = "Production Gap Spatial"
Production_Gap_Spatial.Unit = "kg-ha-1-d-1"
Production_Gap_Spatial.Save_As_Tiff(os.path.join(output_folder_L3, "Production_Gap_Spatial"))
del Production_Gap_Spatial_Data
################################# Calculate 10 year mean biomass production #################################
Total_years = int(np.ceil(Actual_Biomass_Production.Size[0]/36))
Mean_Biomass_Production_Data = np.ones([36, Actual_Biomass_Production.Size[1], Actual_Biomass_Production.Size[2]]) * np.nan
for dekad in range(0,36):
IDs = np.array(range(0, Total_years)) * 36 + dekad
IDs_good = IDs[IDs<=Actual_Biomass_Production.Size[0]]
Mean_Biomass_Production_Data[dekad, :, :] = np.nanmean(Actual_Biomass_Production.Data[IDs_good,:,:], axis = 0)
Mean_Biomass_Production = DataCube.Rasterdata_Empty()
Mean_Biomass_Production.Data = Mean_Biomass_Production_Data * MASK
Mean_Biomass_Production.Projection = Actual_Biomass_Production.Projection
Mean_Biomass_Production.GeoTransform = Actual_Biomass_Production.GeoTransform
Mean_Biomass_Production.Ordinal_time = "Long_Term_Decade"
Mean_Biomass_Production.Size = Mean_Biomass_Production_Data.shape
Mean_Biomass_Production.Variable = "10Y Mean Biomass Production"
Mean_Biomass_Production.Unit = "kg-ha-1-d-1"
Mean_Biomass_Production.Save_As_Tiff(os.path.join(output_folder_L3, "Mean_Biomass_Production"))
del Mean_Biomass_Production_Data
################################# production gap Temporal #################################
Production_Gap_Temporal_Data = (Actual_Biomass_Production.Data - np.tile(Mean_Biomass_Production.Data, (Total_years, 1, 1)))
Production_Gap_Temporal = DataCube.Rasterdata_Empty()
Production_Gap_Temporal.Data = Production_Gap_Temporal_Data * MASK
Production_Gap_Temporal.Projection = NPP.Projection
Production_Gap_Temporal.GeoTransform = NPP.GeoTransform
Production_Gap_Temporal.Ordinal_time = NPP.Ordinal_time
Production_Gap_Temporal.Size = Production_Gap_Temporal_Data.shape
Production_Gap_Temporal.Variable = "Production Gap Temporal"
Production_Gap_Temporal.Unit = "kg-ha-1-d-1"
Production_Gap_Temporal.Save_As_Tiff(os.path.join(output_folder_L3, "Production_Gap_Temporal"))
del Production_Gap_Temporal_Data
################################# Soil Moisture stress #################################
Soil_Moisture_Stress_Data = np.maximum(0.0,(Critical_Soil_Moisture.Data-Soil_Moisture.Data)/(Critical_Soil_Moisture.Data-Theta_WP_Subsoil.Data[None, :, :]))
Soil_Moisture_Stress = DataCube.Rasterdata_Empty()
Soil_Moisture_Stress.Data = Soil_Moisture_Stress_Data * MASK
Soil_Moisture_Stress.Projection = NPP.Projection
Soil_Moisture_Stress.GeoTransform = NPP.GeoTransform
Soil_Moisture_Stress.Ordinal_time = NPP.Ordinal_time
Soil_Moisture_Stress.Size = Soil_Moisture_Stress_Data.shape
Soil_Moisture_Stress.Variable = "Soil Moisture Stress"
Soil_Moisture_Stress.Unit = "-"
del Soil_Moisture_Stress_Data
Soil_Moisture_Stress.Save_As_Tiff(os.path.join(output_folder_L3, "Soil_Moisture_Stress"))
################################# Production gap due to soil moisture #################################
Production_Gap_Soil_Moisture_Data = np.where(Soil_Moisture_Stress.Data<0., 0., -Actual_Biomass_Production.Data * Soil_Moisture_Stress.Data) # !!! heb hier keer van gemaakt
Production_Gap_Soil_Moisture_Data = Production_Gap_Soil_Moisture_Data.clip(-10000, 0)
Production_Gap_Soil_Moisture = DataCube.Rasterdata_Empty()
Production_Gap_Soil_Moisture.Data = Production_Gap_Soil_Moisture_Data * MASK
Production_Gap_Soil_Moisture.Projection = NPP.Projection
Production_Gap_Soil_Moisture.GeoTransform = NPP.GeoTransform
Production_Gap_Soil_Moisture.Ordinal_time = NPP.Ordinal_time
Production_Gap_Soil_Moisture.Size = Production_Gap_Soil_Moisture_Data.shape
Production_Gap_Soil_Moisture.Variable = "Production Gap Due to Soil Moisture"
Production_Gap_Soil_Moisture.Unit = "kg-ha-1-d-1"
Production_Gap_Soil_Moisture.Save_As_Tiff(os.path.join(output_folder_L3, "Production_Gap_Soil_Moisture"))
del Production_Gap_Soil_Moisture_Data
################################# Water Unlimited Biomass Production #################################
Water_Unlimited_Biomass_Production_Data = (Actual_Biomass_Production.Data - Production_Gap_Soil_Moisture.Data)/Days_in_Dekads[:, None, None]
Water_Unlimited_Biomass_Production = DataCube.Rasterdata_Empty()
Water_Unlimited_Biomass_Production.Data = Water_Unlimited_Biomass_Production_Data * MASK
Water_Unlimited_Biomass_Production.Projection = NPP.Projection
Water_Unlimited_Biomass_Production.GeoTransform = NPP.GeoTransform
Water_Unlimited_Biomass_Production.Ordinal_time = NPP.Ordinal_time
Water_Unlimited_Biomass_Production.Size = Water_Unlimited_Biomass_Production_Data.shape
Water_Unlimited_Biomass_Production.Variable = "Water Unlimited Biomass Production"
Water_Unlimited_Biomass_Production.Unit = "kg-ha-1-d-1"
Water_Unlimited_Biomass_Production.Save_As_Tiff(os.path.join(output_folder_L3, "Water_Unlimited_Biomass_Production"))
del Water_Unlimited_Biomass_Production_Data
################################# Accumulated NPP season #################################
# For perenial crop clip the season at start and end year
Accumulated_NPP_Data_Start = np.ones(Per_Start.Size) * np.nan
Accumulated_NPP_Data_End = np.ones(Per_Start.Size) * np.nan
'''
Start_Array = np.maximum(0, Per_Start.Data)
End_Array = np.minimum(35, Per_End.Data)
for Date_Year in Dates_Years:
year_diff = int(Date_Year.year - Dates_Years[0].year)
for dekad in range(0,36):
Accumulated_NPP_Data_Start[year_diff, Start_Array[year_diff, :, :] == dekad] = NPPcum.Data[int(year_diff * 36 + dekad), Start_Array[year_diff, :, :] == dekad]
Accumulated_NPP_Data_End[year_diff, End_Array[year_diff, :, :] == dekad] = NPPcum.Data[int(year_diff * 36 + dekad-1), End_Array[year_diff, :, :] == dekad]
'''
for Date_Year in Dates_Years:
year_diff = int(Date_Year.year - Dates_Years[0].year)
Harvest = np.where(Per_End.Data[year_diff, :, :]<37, 1, 0)
for dekad in range(int(np.nanmin(Per_Start.Data[year_diff, :, :])), 36):
Accumulated_NPP_Data_Start[year_diff, np.logical_and(Per_Start.Data[year_diff, :, :] == dekad, Harvest==1)] = NPPcum.Data[int(year_diff * 36 + dekad), np.logical_and(Per_Start.Data[year_diff, :, :] == dekad, Harvest==1)]
for dekad in range(0,37):
Accumulated_NPP_Data_End[year_diff, Per_End.Data[year_diff, :, :] == dekad] = NPPcum.Data[int(year_diff * 36 + dekad-1), Per_End.Data[year_diff, :, :] == dekad]
Accumulated_NPP_Data_Per = Accumulated_NPP_Data_End - Accumulated_NPP_Data_Start
# For other crops (double and single) take the start and end of the seasons
Accumulated_NPP_Data_Start_S1 = np.ones(Per_Start.Size) * np.nan
Accumulated_NPP_Data_Start_S2 = np.ones(Per_Start.Size) * np.nan
if not np.isnan(np.nanmean(Crop_S1_End.Data)):
for Date_Year in Dates_Years:
year_diff = int(Date_Year.year - Dates_Years[0].year)
for dekad in range(0,int(np.nanmax(Crop_S2_End.Data))):
Accumulated_NPP_Data_Start_S1[year_diff, Crop_S1_End.Data[year_diff, :, :] == dekad] = NPPcum.Data[np.minimum(NPPcum.Size[0]-1, int(year_diff * 36 + dekad)), Crop_S1_End.Data[year_diff, :, :] == dekad]
Accumulated_NPP_Data_Start_S2[year_diff, Crop_S2_End.Data[year_diff, :, :] == dekad] = NPPcum.Data[np.minimum(NPPcum.Size[0]-1, int(year_diff * 36 + dekad-1)), Crop_S2_End.Data[year_diff, :, :] == dekad]
Accumulated_NPP_Data_Start_S1[np.isnan(Accumulated_NPP_Data_Start_S1)] = 0
Accumulated_NPP_Data_Start_S2[np.isnan(Accumulated_NPP_Data_Start_S2)] = 0
# Calculate pasture
Accumulated_NPP_Data_Past = np.ones(Per_Start.Size) * np.nan
for Date_Year in Dates_Years:
year_diff = int(Date_Year.year - Dates_Years[0].year)
dekad = 35 # Always take end in pasture
Accumulated_NPP_Data_Past[year_diff, Season_Type.Data[year_diff, :, :] == 4] = NPPcum.Data[int(year_diff * 36 + dekad), Season_Type.Data[year_diff, :, :] == 4]
Accumulated_NPP_Data_Past[np.isnan(Accumulated_NPP_Data_Past)] = 0
Accumulated_NPP_Data_Per[np.isnan(Accumulated_NPP_Data_Per)] = 0
# Add all seasons
Accumulated_NPP_Data = Accumulated_NPP_Data_Start_S1 + Accumulated_NPP_Data_Start_S2 + Accumulated_NPP_Data_Per + Accumulated_NPP_Data_Past
Accumulated_NPP_Data[Accumulated_NPP_Data==0] = np.nan
Accumulated_NPP = DataCube.Rasterdata_Empty()
Accumulated_NPP.Data = Accumulated_NPP_Data * MASK
Accumulated_NPP.Projection = Per_Start.Projection
Accumulated_NPP.GeoTransform = Per_Start.GeoTransform
Accumulated_NPP.Ordinal_time = Per_Start.Ordinal_time
Accumulated_NPP.Size = Accumulated_NPP_Data.shape
Accumulated_NPP.Variable = "Accumulated NPP Season"
Accumulated_NPP.Unit = "kg-ha-1-season-1"
Accumulated_NPP.Save_As_Tiff(os.path.join(output_folder_L3, "Accumulated_NPP_Season"))
################################# Accumulated Biomass Production season #################################
Accumulated_Biomass_Production_Data = Accumulated_NPP.Data/C3_C4
Accumulated_Biomass_Production = DataCube.Rasterdata_Empty()
Accumulated_Biomass_Production.Data = Accumulated_Biomass_Production_Data * MASK
Accumulated_Biomass_Production.Projection = Per_Start.Projection
Accumulated_Biomass_Production.GeoTransform = Per_Start.GeoTransform
Accumulated_Biomass_Production.Ordinal_time = Per_Start.Ordinal_time
Accumulated_Biomass_Production.Size = Accumulated_Biomass_Production_Data.shape
Accumulated_Biomass_Production.Variable = "Accumulated Biomass Production"
Accumulated_Biomass_Production.Unit = "kg-ha-1-season-1"
Accumulated_Biomass_Production.Save_As_Tiff(os.path.join(output_folder_L3, "Accumulated_Biomass_Production_Season"))
################################# Calculate Yield season #################################
Harvest_Index = 0.35
Moisture_Index = 0.15
Yield_Data = Harvest_Index * ((Accumulated_NPP.Data)/C3_C4)/(1 - Moisture_Index)
Yield = DataCube.Rasterdata_Empty()
Yield.Data = Yield_Data * MASK
Yield.Projection = Per_Start.Projection
Yield.GeoTransform = Per_Start.GeoTransform
Yield.Ordinal_time = Per_Start.Ordinal_time
Yield.Size = Yield_Data.shape
Yield.Variable = "Yield Season"
Yield.Unit = "kg-ha-1-season-1"
Yield.Save_As_Tiff(os.path.join(output_folder_L3, "Yield"))
################################# Calculate Fresh Grass Yield season #################################
Yield_Fresh_Grass_Data = 0.45 * ((Accumulated_NPP.Data)/C3_C4)/(1 - 0.6)
Yield_Fresh_Grass = DataCube.Rasterdata_Empty()
Yield_Fresh_Grass.Data = Yield_Fresh_Grass_Data * MASK
Yield_Fresh_Grass.Projection = Per_Start.Projection
Yield_Fresh_Grass.GeoTransform = Per_Start.GeoTransform
Yield_Fresh_Grass.Ordinal_time = Per_Start.Ordinal_time
Yield_Fresh_Grass.Size = Yield_Fresh_Grass_Data.shape
Yield_Fresh_Grass.Variable = "Accumulated Yield Season"
Yield_Fresh_Grass.Unit = "kg-ha-1-season-1"
Yield_Fresh_Grass.Save_As_Tiff(os.path.join(output_folder_L3, "Yield_Fresh_Grass"))
################################# Calculate Mean Biomass Production over every AEZ per year #################################
L3_AEZ_Bio = dict()
for AEZ_ID in np.unique(AEZ.Data[~np.isnan(AEZ.Data)]):
L3_AEZ_Bio[int(AEZ_ID)] = np.nanmean(np.where(AEZ.Data == AEZ_ID, Accumulated_Biomass_Production.Data, np.nan), axis = (1,2))
################################# Calculate Mean Yield over every AEZ per year #################################
L3_AEZ_Yield = dict()
for AEZ_ID in np.unique(AEZ.Data[~np.isnan(AEZ.Data)]):
L3_AEZ_Yield[int(AEZ_ID)] = np.nanmean(np.where(AEZ.Data == AEZ_ID, Yield.Data, np.nan), axis = (1,2))
################################# Calculate Mean Yield Fresh Grass over every AEZ per year #################################
L3_AEZ_Yield_Fresh_Grass = dict()
for AEZ_ID in np.unique(AEZ.Data[~np.isnan(AEZ.Data)]):
L3_AEZ_Yield_Fresh_Grass[int(AEZ_ID)] = np.nanmean(np.where(AEZ.Data == AEZ_ID, Yield_Fresh_Grass.Data, np.nan), axis = (1,2))
################################# Create CSVs #################################
# Create Aridity AEZ
dict_names = AEZ_Names()
for Date_Year in Dates_Years:
year_diff = int(Date_Year.year - Dates_Years[0].year)
year = Date_Year.year
filename_logfile =os.path.join(output_folder_L3, "CSV_%s.csv" %year)
textfile = open(filename_logfile,'w')
text_first_line = "AEZ, Croptype, Rainfed/Irrigated, Soiltype, Elevation, Slope, Climate, Area (ha), Biomass Production (kg/ha/Season), Yield (ton/ha), Food Production (tonnes)\n"
textfile.write(text_first_line)
for AEZ_ID in np.unique(AEZ.Data[~np.isnan(AEZ.Data)]):
AEZ_str = str(int(AEZ_ID))
CROPTYPE = dict_names["Crop"][int(AEZ_str[0:1])]
RAINIRRI = dict_names["Irrigated"][int(AEZ_str[1:2])]
SOILTYPE = dict_names["Soil"][int(AEZ_str[2:4])]
ELEVATION = dict_names["Elevation"][int(AEZ_str[4:5])]
SLOPE = dict_names["Slope"][int(AEZ_str[5:6])]
CLIMATE = dict_names["Aridity"][int(AEZ_str[6:7])]
AREA = L3_AEZ_AREA[AEZ_ID][year_diff]
BIO = L3_AEZ_Bio[AEZ_ID][year_diff]
if int(AEZ_str[0:1]) < 4:
YIELD = L3_AEZ_Yield[AEZ_ID][year_diff]
else:
YIELD = L3_AEZ_Yield_Fresh_Grass[AEZ_ID][year_diff]
FOOD = YIELD * AREA
text_one_line = "%s, %s, %s, %s, %s, %s, %s, %.2f, %.2f, %.2f, %.2f\n" %(int(AEZ_ID), CROPTYPE, RAINIRRI, SOILTYPE, ELEVATION, SLOPE, CLIMATE, AREA, BIO, YIELD, FOOD)
textfile.write(text_one_line)
textfile.close()
return()
def Calc_Irrigation(Pcum, ETcum, Avail_Water_Depl, MASK):
Irrigation_Data = np.where(np.abs(Pcum.Data - ETcum.Data) > Avail_Water_Depl.Data, 1, 0)
Irrigation = DataCube.Rasterdata_Empty()
Irrigation.Data = Irrigation_Data * MASK
Irrigation.Projection = Pcum.Projection
Irrigation.GeoTransform = Pcum.GeoTransform
Irrigation.Ordinal_time = Pcum.Ordinal_time
Irrigation.Size = Irrigation_Data.shape
Irrigation.Variable = "Irrigation"
Irrigation.Unit = "-"
return(Irrigation)
def Calc_Dekads_range(Startdate, Enddate):
# Get dekads time steps
Enddate_datetime = datetime.datetime.strptime(Enddate, "%Y-%m-%d")
Years = pd.date_range(Startdate, Enddate, freq = "AS")
Dates_dek = []
for Year in Years:
Year_nmbr = Year.year
# Find dates dekades for one year
Startdate_Year = "%d-01-01" %Year_nmbr
Enddate_Year = "%d-12-31" %Year_nmbr
day_dekad_end = 2
if Year == Years[-1]:
Enddate_Year = Enddate_datetime
day_dekad_end = int("%d" %int(np.minimum(int(("%02d" %int(str(Enddate_datetime.day)))[0]), 2)))
# Define dates
Dates = pd.date_range(Startdate_Year, Enddate_Year, freq = "MS")
# Define decade dates
for Date in Dates:
if Date != Dates[-1]:
Dates_dek.append(pd.Timestamp(datetime.datetime(Date.year, Date.month, 1)))
Dates_dek.append(pd.Timestamp(datetime.datetime(Date.year, Date.month, 11)))
Dates_dek.append(pd.Timestamp(datetime.datetime(Date.year, Date.month, 21)))
else:
Dates_dek.append(pd.Timestamp(datetime.datetime(Date.year, Date.month, 1)))
if day_dekad_end > 0:
Dates_dek.append(pd.Timestamp(datetime.datetime(Date.year, Date.month, 11)))
if day_dekad_end > 1:
Dates_dek.append(pd.Timestamp(datetime.datetime(Date.year, Date.month, 21)))
return(Dates_dek)
def Calc_Aridity(ET0, P, MASK):
# Calculate Aridity long term
ET0_long_term = np.nansum(ET0.Data, axis = 0)
P_long_term = np.nansum(P.Data, axis = 0)
# Calculate aridity and reproject to LU map
Aridity_index_data = ET0_long_term / P_long_term
Aridity_index = DataCube.Rasterdata_Empty()
Aridity_index.Data = Aridity_index_data * MASK
Aridity_index.Projection = ET0.Projection
Aridity_index.GeoTransform = ET0.GeoTransform
Aridity_index.Ordinal_time = ''
Aridity_index.Size = Aridity_index_data.shape
Aridity_index.Variable = "Aridity Index"
Aridity_index.Unit = "-"
return(Aridity_index)
def Calc_Slope(DEM, MASK):
rad2deg = 180.0 / np.pi # Factor to transform from rad to degree
pixel_spacing = 100 # LVL 2 of WAPOR is 100m resolution
DEM_data = DEM.Data
# Calculate slope
x, y = np.gradient(DEM_data, pixel_spacing, pixel_spacing)
hypotenuse_array = np.hypot(x,y)
Slope_data = np.arctan(hypotenuse_array) * rad2deg
Slope = DataCube.Rasterdata_Empty()
Slope.Data = Slope_data * MASK
Slope.Projection = DEM.Projection
Slope.GeoTransform = DEM.GeoTransform
Slope.Ordinal_time = ''
Slope.Size = Slope_data.shape
Slope.Variable = "Slope"
Slope.Unit = "Degrees"
return(Slope)
def Calc_Crops(CropType, CropClass, MASK):
Type_Data = CropType.Data
Class_Data = CropClass.Data
# Create Season Type map
Season_Type_Data = np.where(Type_Data == 3, 4, Class_Data)
Season_Type = DataCube.Rasterdata_Empty()
Season_Type.Data = Season_Type_Data * MASK
Season_Type.Projection = CropType.Projection
Season_Type.GeoTransform = CropType.GeoTransform
Season_Type.Ordinal_time = CropType.Ordinal_time
Season_Type.Size = Season_Type_Data.shape
Season_Type.Variable = "Season_Type"
Season_Type.Unit = "-"
return(Season_Type)
def Calc_AEZ(Irrigation_Yearly, Aridity, Slope, DEM, Clay, Silt, Sand, Season_Type, MASK):
# Create Aridity AEZ
dict_ipi = AEZ_Conversions()
AEZ1 = np.ones(Season_Type.Size) * np.nan
AEZ2 = np.ones(Irrigation_Yearly.Size) * np.nan
AEZ3 = np.ones(Clay.Size) * np.nan
AEZ4 = np.ones(DEM.Size) * np.nan
AEZ5 = np.ones(Slope.Size) * np.nan
AEZ6 = np.ones(Aridity.Size) * np.nan
for ID, value in dict_ipi['Crop'].items():
AEZ1[Season_Type.Data==ID] = value
for ID, value in dict_ipi['Irrigated'].items():
AEZ2[Irrigation_Yearly.Data==ID] = value
for ID, value in dict_ipi['Soil'].items():
AEZ3[np.logical_and.reduce((Clay.Data > value[0][0], Clay.Data <= value[0][1],Silt.Data > value[1][0], Silt.Data <= value[1][1], Sand.Data > value[2][0], Sand.Data <= value[2][1]))] = ID
for ID, value in dict_ipi['Elevation'].items():
AEZ4[np.logical_and(DEM.Data > value[0], DEM.Data <= value[1])] = ID
for ID, value in dict_ipi['Slope'].items():
AEZ5[np.logical_and(Slope.Data > value[0], Slope.Data <= value[1])] = ID
for ID, value in dict_ipi['Aridity'].items():
AEZ6[np.logical_and(Aridity.Data > value[0], Aridity.Data <= value[1])] = ID
AEZ_Data = AEZ1 * 1000000 + AEZ2 * 100000 + AEZ3[None, :, :] * 1000 + AEZ4[None, :, :] * 100 + AEZ5[None, :, :] * 10 + AEZ6
AEZ = DataCube.Rasterdata_Empty()
AEZ.Data = AEZ_Data * MASK
AEZ.Projection = Irrigation_Yearly.Projection
AEZ.GeoTransform = Irrigation_Yearly.GeoTransform
AEZ.Ordinal_time = Irrigation_Yearly.Ordinal_time
AEZ.Size = AEZ_Data.shape
AEZ.Variable = "Agro_Ecological_Zonation"
AEZ.Unit = "AEZ"
return(AEZ)
def Degrees_to_m2(Reference_data):
"""
This functions calculated the area of each pixel in squared meter.
Parameters
----------
Reference_data: str
Path to a tiff file or nc file or memory file of which the pixel area must be defined
Returns
-------
area_in_m2: array
Array containing the area of each pixel in squared meters
"""
try:
# Get the extension of the example data
filename, file_extension = os.path.splitext(Reference_data)
# Get raster information
if str(file_extension) == '.tif':
geo_out, proj, size_X, size_Y = RC.Open_array_info(Reference_data)
elif str(file_extension) == '.nc':
geo_out, epsg, size_X, size_Y, size_Z, Time = RC.Open_nc_info(Reference_data)
except:
geo_out = Reference_data.GetGeoTransform()
size_X = Reference_data.RasterXSize()
size_Y = Reference_data.RasterYSize()
# Calculate the difference in latitude and longitude in meters
dlat, dlon = Calc_dlat_dlon(geo_out, size_X, size_Y)
# Calculate the area in squared meters
area_in_m2 = dlat * dlon
return(area_in_m2)
def Calc_dlat_dlon(geo_out, size_X, size_Y):
"""
This functions calculated the distance between each pixel in meter.
Parameters
----------
geo_out: array
geo transform function of the array
size_X: int
size of the X axis
size_Y: int
size of the Y axis
Returns
-------
dlat: array
Array containing the vertical distance between each pixel in meters
dlon: array
Array containing the horizontal distance between each pixel in meters
"""
# Create the lat/lon rasters
lon = np.arange(size_X + 1)*geo_out[1]+geo_out[0] - 0.5 * geo_out[1]
lat = np.arange(size_Y + 1)*geo_out[5]+geo_out[3] - 0.5 * geo_out[5]
dlat_2d = np.array([lat,]*int(np.size(lon,0))).transpose()
dlon_2d = np.array([lon,]*int(np.size(lat,0)))
# Radius of the earth in meters
R_earth = 6371000
# Calculate the lat and lon in radians
lonRad = dlon_2d * np.pi/180
latRad = dlat_2d * np.pi/180
# Calculate the difference in lat and lon
lonRad_dif = abs(lonRad[:,1:] - lonRad[:,:-1])
latRad_dif = abs(latRad[:-1] - latRad[1:])
# Calculate the distance between the upper and lower pixel edge
a = np.sin(latRad_dif[:,:-1]/2) * np.sin(latRad_dif[:,:-1]/2)
clat = 2 * np.arctan2(np.sqrt(a), np.sqrt(1-a));
dlat = R_earth * clat
# Calculate the distance between the eastern and western pixel edge
b = np.cos(latRad[1:,:-1]) * np.cos(latRad[:-1,:-1]) * np.sin(lonRad_dif[:-1,:]/2) * np.sin(lonRad_dif[:-1,:]/2)
clon = 2 * np.arctan2(np.sqrt(b), np.sqrt(1-b));
dlon = R_earth * clon
return(dlat, dlon)
def AEZ_Conversions(version = '2.0'):
AEZ_V1 = {
'Aridity': {1: [3, 9999],
2: [2, 3],
3: [1.5, 2],
4: [1, 1.5],
5: [0.7, 1],
6: [0.35, 0.7],
7: [-9999, 0.35]},
'Slope': {1: [8, 9999],
2: [4, 8],
3: [1, 4],
4: [-9999, 1]},
'Elevation': {1: [2000, 9999],
2: [1000, 2000],
3: [500, 1000],
4: [100, 500],
5: [-9999, 100]},
'Soil': {1: [[40, 9999],[-9999, 40],[-9999, 45]],
2: [[25, 40],[20, 9999],[20, 9999]],
3: [[25, 40],[40, 9999],[-9999, 20]],
4: [[40, 9999],[40, 9999],[-9999, 20]],
5: [[35, 9999],[-9999, 20],[45, 9999]],
6: [[25, 35],[-9999, 30],[45, 9999]],
7: [[-9999, 20],[-9999, 20],[50, 9999]],
8: [[10, 25],[-9999, 50],[-9999, 50]],
9: [[-9999, 30],[50, 80],[-9999, 50]],
10: [[-9999, 15],[80, 9999],[-9999, 20]],
11: [[-9999, 20],[-9999, 20],[70, 9999]],
12: [[-9999, 10],[-9999, 10],[90, 9999]]},
'Irrigated': {0: 1,
1: 2},
'Crop': {1: 1,
2: 2,
3: 3,
4: 4}
}
AEZ_V2 = {
'Aridity': {1: [2, 9999],
2: [0.5, 2],
3: [-9999, 0.5]},
'Slope': {1: [2, 9999],
2: [-9999, 2]},
'Elevation': {1: [1000, 9999],
2: [500, 1000],
3: [-9999, 500]},
'Soil': {1: [[35, 9999],[-9999, 9999],[-9999, 9999]],
2: [[-9999, 35],[-9999, 9999],[-9999, 55]],
3: [[-9999, 35],[-9999, 9999],[55, 9999]]},
'Irrigated': {0: 1,
1: 2},
'Crop': {1: 1,
2: 2,
3: 3,
4: 4}
}
AEZ_Conversions = dict()
AEZ_Conversions['1.0'] = AEZ_V1
AEZ_Conversions['2.0'] = AEZ_V2
return AEZ_Conversions[version]
def AEZ_Names(version = '2.0'):
AEZ_V1 = {
'Aridity': {1: "Hyper-Arid",
2: "Arid",
3: "Semi-Arid",
4: "Dry Sub-Humid",
5: "Dry Sub-Humid",
6: "Humid",
7: "Hyper-Humid"},
'Slope': {1: "Flat",
2: "Intermediate",
3: "Steep",
4: "Very Steep"},
'Elevation': {1: "Sea Level",
2: "Lowland",
3: "Intermediate",
4: "Highland",
5: "Alpine"},
'Soil': {1: "Clay",
2: "Clay Loam",
3: "Silty Clay Loam",
4: "Silty Clay",
5: "Sandy Clay",
6: "Sandy Clay Loam",
7: "Sandy Loam",
8: "Medium Loam",
9: "Silty Loam",
10: "Silt",
11: "Loamy Sand",
12: "Sand"},
'Irrigated': {1: "Irrigated",
2: "Rainfed"},
'Crop': {1: "Single",
2: "Double",
3: "Perennial",
4: "Pasture"}
}
AEZ_V2 = {
'Aridity': {1: "Arid",
2: "Average",
3: "Humid"},
'Slope': {1: "Flat",
2: "Steep"},
'Elevation': {1: "Lowland",
2: "Intermediate",
3: "Highland"},
'Soil': {1: "Clay",
2: "Loam",
3: "Sand"},
'Irrigated': {1: "Irrigated",
2: "Rainfed"},
'Crop': {1: "Single",
2: "Double",
3: "Perennial",
4: "Pasture"}
}
AEZ_Names = dict()
AEZ_Names['1.0'] = AEZ_V1
AEZ_Names['2.0'] = AEZ_V2
return AEZ_Names[version]
| [
"[email protected]"
]
| |
5a38b04405d890de23e3283d6e0d1a7fbcc357bb | 33a5bce52506b8c01ee250830f28aacc46382989 | /accounts/forms.py | e0b7f5b79f689f4afce7274ef52e5b9dc3bbbe7c | []
| no_license | stsh1119/django_request_trap | 76ecac7709fc8c7847c479afa9b2cc384cf9cd0c | c55bfb8f0316f8ac03ffb7d60da48599289c0b81 | refs/heads/main | 2023-07-06T18:58:30.063801 | 2021-07-25T08:39:42 | 2021-07-25T08:39:42 | 380,741,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | from django.contrib.auth import get_user_model
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
class CustomUserCreationForm(UserCreationForm):
class Meta:
model = get_user_model()
fields = ('email', 'username', )
class CustomUserChangeForm(UserChangeForm):
class Meta:
model = get_user_model()
fields = ('email', 'username',)
| [
"[email protected]"
]
| |
bfa0d79f8e6172fe2680868e91558d1789cb9387 | cee4dd54ea44f91511a8b886b2d763604afeb95d | /app/emails.py | 7cada0fc646cae0a6c88583b4bd278ef9a973824 | []
| no_license | damnedsteven/emcn_wh | 0520ebe12b8d986905e2d7d8f7eea530f69fba96 | 0180d47e8484e691b880433f3d07f6c3068477b8 | refs/heads/master | 2021-01-19T12:29:25.767510 | 2017-04-13T02:09:18 | 2017-04-13T02:09:18 | 88,033,338 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 876 | py | from .decorators import async
from flask_mail import Message
from app import app, mail
from flask import render_template
from config import ADMINS
@async
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
send_async_email(app, msg)
def follower_notification(followed, follower):
send_email("[microblog] %s is now following you!" % follower.username,
ADMINS[0],
[followed.email],
render_template("follower_email.txt",
user=followed, follower=follower),
render_template("follower_email.html",
user=followed, follower=follower)) | [
"[email protected]"
]
| |
5724646c2d8664888cbf6def9ea0bdcc3cc5374a | 36a782b4fe282b96803458f9f6bf44355e4cf71f | /rotkehlchen/fval.py | bb601200d30b198e9dde67eb0d36edbb5ad6be60 | [
"BSD-3-Clause"
]
| permissive | DUMIE505/rotki | 23911887c7cc741a5d682fb90c98e09828b8c66f | 046169bc068c7b29a271d6e80d978e02d0ea76ac | refs/heads/develop | 2023-01-07T20:32:57.215529 | 2020-01-29T22:58:28 | 2020-01-31T08:58:03 | 237,394,542 | 0 | 0 | BSD-3-Clause | 2022-12-27T15:35:36 | 2020-01-31T09:04:50 | null | UTF-8 | Python | false | false | 6,879 | py | from decimal import Decimal, InvalidOperation
from typing import Any, Union
from rotkehlchen.errors import ConversionError
# Here even though we got __future__ annotations using FVal does not seem to work
AcceptableFValInitInput = Union[float, bytes, Decimal, int, str, 'FVal']
AcceptableFValOtherInput = Union[int, 'FVal']
class FVal():
"""A value to represent numbers for financial applications. At the moment
we use the python Decimal library but the abstraction will help us change the
underlying implementation if needed.
At the moment we do not allow any operations against floating points. Even though
floating points could be converted to Decimals before each operation we will
use this restriction to make sure floating point numbers are rooted from the codebase first.
"""
__slots__ = ('num',)
def __init__(self, data: AcceptableFValInitInput):
try:
if isinstance(data, float):
self.num = Decimal(str(data))
elif isinstance(data, bytes):
# assume it's an ascii string and try to decode the bytes to one
self.num = Decimal(data.decode())
elif isinstance(data, bool):
# This elif has to come before the isinstance(int) check due to
# https://stackoverflow.com/questions/37888620/comparing-boolean-and-int-using-isinstance
raise ValueError(f'Invalid type bool for data given to FVal constructor')
elif isinstance(data, (Decimal, int, str)):
self.num = Decimal(data)
elif isinstance(data, FVal):
self.num = data.num
else:
raise ValueError(f'Invalid type {type(data)} of data given to FVal constructor')
except InvalidOperation:
raise ValueError(
'Expected string, int, float, or Decimal to initialize an FVal.'
'Found {}.'.format(type(data)),
)
def __str__(self) -> str:
return str(self.num)
def __repr__(self) -> str:
return 'FVal({})'.format(str(self.num))
def __gt__(self, other: AcceptableFValOtherInput) -> bool:
evaluated_other = evaluate_input(other)
return self.num.compare_signal(evaluated_other) == Decimal('1')
def __lt__(self, other: AcceptableFValOtherInput) -> bool:
evaluated_other = evaluate_input(other)
return self.num.compare_signal(evaluated_other) == Decimal('-1')
def __le__(self, other: AcceptableFValOtherInput) -> bool:
evaluated_other = evaluate_input(other)
return self.num.compare_signal(evaluated_other) in (Decimal('-1'), Decimal('0'))
def __ge__(self, other: AcceptableFValOtherInput) -> bool:
evaluated_other = evaluate_input(other)
return self.num.compare_signal(evaluated_other) in (Decimal('1'), Decimal('0'))
def __eq__(self, other: object) -> bool:
evaluated_other = evaluate_input(other)
return self.num.compare_signal(evaluated_other) == Decimal('0')
def __add__(self, other: AcceptableFValOtherInput) -> 'FVal':
evaluated_other = evaluate_input(other)
return FVal(self.num.__add__(evaluated_other))
def __sub__(self, other: AcceptableFValOtherInput) -> 'FVal':
evaluated_other = evaluate_input(other)
return FVal(self.num.__sub__(evaluated_other))
def __mul__(self, other: AcceptableFValOtherInput) -> 'FVal':
evaluated_other = evaluate_input(other)
return FVal(self.num.__mul__(evaluated_other))
def __truediv__(self, other: AcceptableFValOtherInput) -> 'FVal':
evaluated_other = evaluate_input(other)
return FVal(self.num.__truediv__(evaluated_other))
def __floordiv__(self, other: AcceptableFValOtherInput) -> 'FVal':
evaluated_other = evaluate_input(other)
return FVal(self.num.__floordiv__(evaluated_other))
def __pow__(self, other: AcceptableFValOtherInput) -> 'FVal':
evaluated_other = evaluate_input(other)
return FVal(self.num.__pow__(evaluated_other))
def __radd__(self, other: AcceptableFValOtherInput) -> 'FVal':
evaluated_other = evaluate_input(other)
return FVal(self.num.__radd__(evaluated_other))
def __rsub__(self, other: AcceptableFValOtherInput) -> 'FVal':
evaluated_other = evaluate_input(other)
return FVal(self.num.__rsub__(evaluated_other))
def __rmul__(self, other: AcceptableFValOtherInput) -> 'FVal':
evaluated_other = evaluate_input(other)
return FVal(self.num.__rmul__(evaluated_other))
def __rtruediv__(self, other: AcceptableFValOtherInput) -> 'FVal':
evaluated_other = evaluate_input(other)
return FVal(self.num.__rtruediv__(evaluated_other))
def __rfloordiv__(self, other: AcceptableFValOtherInput) -> 'FVal':
evaluated_other = evaluate_input(other)
return FVal(self.num.__rfloordiv__(evaluated_other))
def __float__(self) -> float:
return float(self.num)
# --- Unary operands
def __neg__(self) -> 'FVal':
return FVal(self.num.__neg__())
def __abs__(self) -> 'FVal':
return FVal(self.num.copy_abs())
# --- Other operations
def fma(self, other: AcceptableFValOtherInput, third: AcceptableFValOtherInput) -> 'FVal':
"""
Fused multiply-add. Return self*other+third with no rounding of the
intermediate product self*other
"""
evaluated_other = evaluate_input(other)
evaluated_third = evaluate_input(third)
return FVal(self.num.fma(evaluated_other, evaluated_third))
def to_percentage(self, precision: int = 4) -> str:
return '{:.{}%}'.format(self.num, precision)
def to_int(self, exact: bool) -> int:
"""
Tries to convert to int, If `exact` is true then it will convert only if
it is a whole decimal number; i.e.: if it has got nothing after the decimal point
Raises:
ConversionError: If exact was True but the FVal is actually not an exact integer.
"""
if exact and self.num.to_integral_exact() != self.num:
raise ConversionError(f'Tried to ask for exact int from {self.num}')
return int(self.num)
def is_close(self, other: AcceptableFValInitInput, max_diff: str = "1e-6") -> bool:
evaluated_max_diff = FVal(max_diff)
if not isinstance(other, FVal):
other = FVal(other)
diff_num = abs(self.num - other.num)
return diff_num <= evaluated_max_diff.num
def evaluate_input(other: Any) -> Union[Decimal, int]:
"""Evaluate 'other' and return its Decimal representation"""
if isinstance(other, FVal):
return other.num
elif not isinstance(other, int):
raise NotImplementedError("Expected either FVal or int.")
return other
| [
"[email protected]"
]
| |
cf396a701c4fabd74562aaed43cb8505fbdc5b23 | f95d2646f8428cceed98681f8ed2407d4f044941 | /numpydemo/01/day01/demo02_ndarray.py | d7decab2c5e6ccd28291779733520ff24d15eec1 | []
| no_license | q2806060/python-note | 014e1458dcfa896f2749c7ebce68b2bbe31a3bf8 | fbe107d668b44b78ae0094dbcc7e8ff8a4f8c983 | refs/heads/master | 2020-08-18T01:12:31.227654 | 2019-10-17T07:40:40 | 2019-10-17T07:40:40 | 215,731,114 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | """
demo02_ndarray.py
"""
import numpy as np
a = np.array([[1, 2, 3, 4],
[5, 6, 7, 8]])
print(a, a.shape)
# 起始值1, 终止值10, 步长1
b = np.arange(1, 10, 2)
print(b)
# 创建5个元素全为0的数组
c = np.zeros(5, dtype='int32')
print(c, c.dtype)
# 创建5个元素全为1的数组
d = np.ones(5, dtype='int32')
print(d, d.dtype)
# 创建数组e与f, 结构与a相同, e中全0, f中全1
e = np.zeros_like(a)
f = np.ones_like(a)
print(e)
print(f / 5)
| [
"[email protected]"
]
| |
5d810a67447f245b0101199251949b51ea6c8b82 | 2fe34a287bc79da86243ec7b9532e74258311cb8 | /thumbnails/conf/__init__.py | a7c1200db62bd0c6ccf546dff13cee033401bd6a | [
"MIT"
]
| permissive | INARIcompany/python-thumbnails | 44c228a1a57843bf001fa342ae25e7e79644402f | f8836a60ef3f6130e38a9b40c7f4bcb5a3d7c02d | refs/heads/master | 2023-02-14T05:15:43.521090 | 2021-01-14T20:58:41 | 2021-01-14T20:58:41 | 329,731,071 | 0 | 0 | MIT | 2021-01-14T20:49:51 | 2021-01-14T20:49:50 | null | UTF-8 | Python | false | false | 77 | py | # -*- coding: utf-8 -*-
from thumbnails.conf.wrapper import settings # noqa
| [
"[email protected]"
]
| |
ae1ad986a8d66093295f5b63687111f255f1453e | 521efcd158f4c69a686ed1c63dd8e4b0b68cc011 | /airflow/providers/google/cloud/transfers/gdrive_to_local.py | 113a389c6fbf22314e9345d4caac708ef5871c83 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
]
| permissive | coutureai/RaWorkflowOrchestrator | 33fd8e253bfea2f9a82bb122ca79e8cf9dffb003 | cd3ea2579dff7bbab0d6235fcdeba2bb9edfc01f | refs/heads/main | 2022-10-01T06:24:18.560652 | 2021-12-29T04:52:56 | 2021-12-29T04:52:56 | 184,547,783 | 5 | 12 | Apache-2.0 | 2022-11-04T00:02:55 | 2019-05-02T08:38:38 | Python | UTF-8 | Python | false | false | 3,834 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Optional, Sequence, Union
from airflow.models import BaseOperator
from airflow.providers.google.suite.hooks.drive import GoogleDriveHook
class GoogleDriveToLocalOperator(BaseOperator):
"""
Writes a Google Drive file into local Storage.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleDriveToLocalOperator`
:param output_file: Path to downloaded file
:type output_file: str
:param folder_id: The folder id of the folder in which the Google Drive file resides
:type folder_id: str
:param file_name: The name of the file residing in Google Drive
:type file_name: str
:param drive_id: Optional. The id of the shared Google Drive in which the file resides.
:type drive_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = [
"output_file",
"folder_id",
"file_name",
"drive_id",
"impersonation_chain",
]
def __init__(
self,
*,
output_file: str,
file_name: str,
folder_id: str,
drive_id: Optional[str] = None,
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.output_file = output_file
self.folder_id = folder_id
self.drive_id = drive_id
self.file_name = file_name
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def execute(self, context):
self.log.info('Executing download: %s into %s', self.file_name, self.output_file)
gdrive_hook = GoogleDriveHook(
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
file_metadata = gdrive_hook.get_file_id(
folder_id=self.folder_id, file_name=self.file_name, drive_id=self.drive_id
)
with open(self.output_file, "wb") as file:
gdrive_hook.download_file(file_id=file_metadata["id"], file_handle=file)
| [
"[email protected]"
]
| |
906ab9e89725e37b6440b90331db571633a5fd2f | 8bbeb7b5721a9dbf40caa47a96e6961ceabb0128 | /python3/396.Rotate Function(旋转函数).py | 5a9f84b92f2c5deadb136fc43540cab6fbb4e3a4 | [
"MIT"
]
| permissive | lishulongVI/leetcode | bb5b75642f69dfaec0c2ee3e06369c715125b1ba | 6731e128be0fd3c0bdfe885c1a409ac54b929597 | refs/heads/master | 2020-03-23T22:17:40.335970 | 2018-07-23T14:46:06 | 2018-07-23T14:46:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,078 | py | """
<p>
Given an array of integers <code>A</code> and let <i>n</i> to be its length.
</p>
<p>
Assume <code>B<sub>k</sub></code> to be an array obtained by rotating the array <code>A</code> <i>k</i> positions clock-wise, we define a "rotation function" <code>F</code> on <code>A</code> as follow:
</p>
<p>
<code>F(k) = 0 * B<sub>k</sub>[0] + 1 * B<sub>k</sub>[1] + ... + (n-1) * B<sub>k</sub>[n-1]</code>.</p>
<p>Calculate the maximum value of <code>F(0), F(1), ..., F(n-1)</code>.
</p>
<p><b>Note:</b><br />
<i>n</i> is guaranteed to be less than 10<sup>5</sup>.
</p>
<p><b>Example:</b>
<pre>
A = [4, 3, 2, 6]
F(0) = (0 * 4) + (1 * 3) + (2 * 2) + (3 * 6) = 0 + 3 + 4 + 18 = 25
F(1) = (0 * 6) + (1 * 4) + (2 * 3) + (3 * 2) = 0 + 4 + 6 + 6 = 16
F(2) = (0 * 2) + (1 * 6) + (2 * 4) + (3 * 3) = 0 + 6 + 8 + 9 = 23
F(3) = (0 * 3) + (1 * 2) + (2 * 6) + (3 * 4) = 0 + 2 + 12 + 12 = 26
So the maximum value of F(0), F(1), F(2), F(3) is F(3) = 26.
</pre>
</p><p>给定一个长度为 <em>n</em> 的整数数组 <code>A</code> 。</p>
<p>假设 <code>B<sub>k</sub></code> 是数组 <code>A</code> 顺时针旋转 <em>k</em> 个位置后的数组,我们定义 <code>A</code> 的“旋转函数” <code>F</code> 为:</p>
<p><code>F(k) = 0 * B<sub>k</sub>[0] + 1 * B<sub>k</sub>[1] + ... + (n-1) * B<sub>k</sub>[n-1]</code>。</p>
<p>计算<code>F(0), F(1), ..., F(n-1)</code>中的最大值。</p>
<p><strong>注意:</strong><br />
可以认为<em> n</em> 的值小于 10<sup>5</sup>。</p>
<p><strong>示例:</strong></p>
<pre>
A = [4, 3, 2, 6]
F(0) = (0 * 4) + (1 * 3) + (2 * 2) + (3 * 6) = 0 + 3 + 4 + 18 = 25
F(1) = (0 * 6) + (1 * 4) + (2 * 3) + (3 * 2) = 0 + 4 + 6 + 6 = 16
F(2) = (0 * 2) + (1 * 6) + (2 * 4) + (3 * 3) = 0 + 6 + 8 + 9 = 23
F(3) = (0 * 3) + (1 * 2) + (2 * 6) + (3 * 4) = 0 + 2 + 12 + 12 = 26
所以 F(0), F(1), F(2), F(3) 中的最大值是 F(3) = 26 。
</pre>
<p>给定一个长度为 <em>n</em> 的整数数组 <code>A</code> 。</p>
<p>假设 <code>B<sub>k</sub></code> 是数组 <code>A</code> 顺时针旋转 <em>k</em> 个位置后的数组,我们定义 <code>A</code> 的“旋转函数” <code>F</code> 为:</p>
<p><code>F(k) = 0 * B<sub>k</sub>[0] + 1 * B<sub>k</sub>[1] + ... + (n-1) * B<sub>k</sub>[n-1]</code>。</p>
<p>计算<code>F(0), F(1), ..., F(n-1)</code>中的最大值。</p>
<p><strong>注意:</strong><br />
可以认为<em> n</em> 的值小于 10<sup>5</sup>。</p>
<p><strong>示例:</strong></p>
<pre>
A = [4, 3, 2, 6]
F(0) = (0 * 4) + (1 * 3) + (2 * 2) + (3 * 6) = 0 + 3 + 4 + 18 = 25
F(1) = (0 * 6) + (1 * 4) + (2 * 3) + (3 * 2) = 0 + 4 + 6 + 6 = 16
F(2) = (0 * 2) + (1 * 6) + (2 * 4) + (3 * 3) = 0 + 6 + 8 + 9 = 23
F(3) = (0 * 3) + (1 * 2) + (2 * 6) + (3 * 4) = 0 + 2 + 12 + 12 = 26
所以 F(0), F(1), F(2), F(3) 中的最大值是 F(3) = 26 。
</pre>
"""
class Solution:
def maxRotateFunction(self, A):
"""
:type A: List[int]
:rtype: int
"""
| [
"[email protected]"
]
| |
bdc59bd33735fb55293048af6b11e36c2cd99e7f | a7b592be95dc2af9fdb56725f44e98cc59166e6f | /apps/subway/download.py | e480b2677fcf65687d50862cb2018926dab6ab96 | []
| no_license | cash2one/CRM-3 | bc864c462d155b5dc6a51a5edbd564574b3e2f94 | cedcaeb397ccadb36952534242bd296c5b4513bb | refs/heads/master | 2021-06-14T07:40:53.572013 | 2017-05-23T15:52:06 | 2017-05-23T15:52:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,800 | py | # coding=UTF-8
import datetime
from mongoengine.document import Document
from mongoengine.fields import IntField, DateTimeField, StringField
from apilib import get_tapi, TopError, SessionCache
from apilib.app import QNApp
from apps.common.utils.utils_log import log
from apps.common.utils.utils_datetime import time_is_someday, format_datetime, datetime_2string, time_is_recent
from apps.common.utils.utils_cacheadpter import CacheAdpter
from apps.common.cachekey import CacheKey
from apps.common.biz_utils.utils_misc import set_cache_progress, get_cache_progress
from models_account import Account
from models_adgroup import Adgroup
from models_campaign import Campaign
from models_creative import Creative
from models_keyword import Keyword
from models_item import Item
def get_klass_name(klass):
return klass.__name__.lower()
def get_klass(name):
return globals()[name]
def check_sync_struct(dl, is_created):
if is_created:
return True
elif dl.ok_time and dl.ok_time.date() < datetime.date.today():
return True
elif not dl.init_status or not dl.init_status:
return True
elif dl.init_status and dl.init_status != 'done':
return True
elif dl.init_status and dl.inc_status != 'done':
return True
return False
def check_global_mutual(dl_type):
# TODO: wangqi 20140528 下载互斥还没想好互斥时的处理,后续再优化
"""全网如果有互斥缓存,则不下载,主要分结构与报表,可以将颗粒度细化"""
def _is_mutual(fn):
def __is_mutual(self = None, *args, **kwargs):
key_name = CacheKey.SUBWAY_DOWNLOAD_MUTUAL_LOCK % (self.shop_id, dl_type)
cached_lock = CacheAdpter.get(key_name, 'web')
if cached_lock: # 没有缓存,直接下载
return False
else:
CacheAdpter.set(key_name, True, 'web', 30 * 60)
result = fn(self, *args, **kwargs)
CacheAdpter.delete(key_name, 'web')
return result
return __is_mutual
return _is_mutual
def parse_status(status_str):
"""
parse_status('2013-7-30-15-20_OK')
>>> ('OK',datetime.datetime(2013,7,30,15,20))
parse_status('2013-7-30-15-20_2013-7-30-16-40_DOING')
>>> ('OK',datetime.datetime(2013,7,30,15,20), datetime.datetime(2013,7,30,16,40))
"""
if status_str:
temp_list = status_str.split('_')
status = temp_list[-1]
result_list = [status, datetime.datetime.strptime(temp_list[0], '%Y-%m-%d-%H-%M')]
if status == 'DOING':
result_list.append(datetime.datetime.strptime(temp_list[1], '%Y-%m-%d-%H-%M'))
return tuple(result_list)
else:
return None
DL_ORDER_LIST = ['Account', 'Campaign', 'Adgroup', 'Creative', 'Keyword', 'Item'] # 下载的顺序控制
class Downloader(Document):
"""记录了各个结构的整体状态(结构同步状态,报表同步状态)"""
shop_id = IntField(verbose_name = '店铺ID', primary_key = True)
init_status = StringField(verbose_name = "初始化状态", default = 'ready')
inc_status = StringField(verbose_name = "增量状态", default = 'ready')
ok_time = DateTimeField(verbose_name = "结构同步成功的时间")
failed_time = DateTimeField(verbose_name = "增量结构同步失败的时间")
accountrpt_status = StringField(verbose_name = '账户报表状态', db_field = 'acctrpt_status', default = '')
campaignrpt_status = StringField(verbose_name = '计划报表状态', db_field = 'camprpt_status', default = '')
adgrouprpt_status = StringField(verbose_name = '广告组报表状态', db_field = 'adgrpt_status', default = '')
keywordrpt_status = StringField(verbose_name = '关键词报表状态', db_field = 'kwrpt_status', default = '') # 单个依赖于adgroup
creativerpt_status = StringField(verbose_name = '创意报表状态', db_field = 'crtrpt_status', default = '') # 单个依赖于adgroup
api_valid_time = DateTimeField(verbose_name = "API有效时间")
top_refresh_token = StringField(verbose_name = "top的session刷新令牌")
top_refresh_time = DateTimeField(verbose_name = "top的session刷新时间")
meta = {'db_alias': "mnt-db", 'collection':'subway_downloader'}
@property
def tapi(self):
if not hasattr(self, '_tapi'):
tapi = get_tapi(shop_id = self.shop_id)
if not time_is_someday(self.api_valid_time):
is_ok, _ = QNApp.test_api(tapi)
if not is_ok:
SessionCache.del_cache(self.shop_id)
tapi = get_tapi(shop_id = self.shop_id)
is_ok, reason = QNApp.test_api(tapi)
if not is_ok:
log.error("invalid tapi, error=%s" % reason)
tapi = None
if tapi:
self.api_valid_time = datetime.datetime.now()
self.save()
self._tapi = tapi
return self._tapi
@property
def token(self):
"""获取淘宝的临时验证码"""
if not hasattr(self, '_token'):
cachekey = CacheKey.SUBWAY_TOKEN % self.shop_id
token = CacheAdpter.get(cachekey, 'web')
if not token:
token = self._get_subway_token()
CacheAdpter.set(cachekey, token, 'web', timeout = 60*60*2)
self._token = token
return self._token
def _get_subway_token(self):
temp_token = ''
if self.tapi:
try:
top_obj = self.tapi.simba_login_authsign_get()
if top_obj and hasattr(top_obj, "subway_token"):
temp_token = top_obj.subway_token
except TopError, e:
log.error('simba_login_authsign_get TopError, shop_id=%s, error=%s' % (self.shop_id, e))
return temp_token
# @property
# def refresh_token(self, sessionkey):
# import settings, md5, requests
# from urllib import urlencode
# appkey = settings.APP['web']['APP_KEY']
# appsecret = settings.APP['web']['APP_SECRET']
# try:
# refresh_token = sessionkey
# sign = md5.new('appkey%srefresh_token%ssessionkey%s%s' % (appkey, refresh_token, sessionkey, appsecret)).hexdigest().upper()
# data = {
# 'appkey':appkey,
# 'refresh_token':refresh_token,
# 'sign':sign,
# 'sessionkey':sessionkey
# }
# url_data = urlencode(data)
# r = requests.post('http://container.api.taobao.com/container/refresh?%s' % url_data).json()
# if not 're_expires_in' in r or not r['re_expires_in']:
# return sessionkey
# return r['top_session']
# except Exception, e:
# log.error('get refresh token error and the error=%s' % e)
# return sessionkey
def check_todayrpt_isok(self, need_detail = False): # 是否需要关键词详情
"""检查今天所有报表数据是否OK"""
today = datetime.date.today()
if self.ok_time.date() < today:
return False
check_class_list = ['account', 'campaign', 'adgroup']
if need_detail:
check_class_list.extend(['keyword', 'creative'])
for cls_name in check_class_list:
status_result = parse_status(getattr(self, '%srpt_status' % (cls_name)))
if not status_result:
return False
elif status_result[0] not in ['OK', 'EMPTY']:
return False
elif status_result[1].date() < today:
return False
else:
continue
return True
def check_rptstatus_isok(self, cls_name):
"""判断状态是否OK"""
status_result = parse_status(getattr(self, '%srpt_status' % (cls_name)))
if status_result:
if status_result[0] in ['OK', 'EMPTY']:
return True
return False
def check_struct_isok(self, cls_name):
import copy
temp_list = copy.deepcopy(DL_ORDER_LIST)
temp_list.insert('ready')
temp_list.append('done')
try:
return temp_list.index(self.init_status) > temp_list.index(cls_name) and temp_list.index(self.inc_status) > temp_list.index(cls_name)
except Exception, e:
log.error('cls_name not found in DL_ORDER_LIST, cls_name=%s, e=%s' % (cls_name, e))
return False
finally:
del temp_list
def __delattr__(self, *args, **kwargs): # why? by wangqi 20150928
super(Downloader, self).__delattr__(*args, **kwargs)
def check_rpt_isok(self, need_detail = False): # 是否需要关键词详情
"""检查所有报表数据是否OK"""
temp_result = self.check_rptstatus_isok('account') and self.check_rptstatus_isok('campaign') and self.check_rptstatus_isok('adgroup')
if not need_detail:
return temp_result
else:
return temp_result and self.check_rptstatus_isok('creative') and self.check_rptstatus_isok('keyword')
def check_status_4rpt(self, klass, check_type = 'rpt'):
"""
0.报表、UDP下载
1.状态OK-->时间是否今天-->不必下载
| -->下载
2.状态FAILED-->重新下载
3.状态DOING -->检查上次成功时间是否是今天-->不必下载
|---->检查运行时间是否在10分钟内--->等待
|------>重新下载
4.状态是空-->初始化下载
"""
# TODO: wangqi 20140711 下载报表的控制写的不够好,有空可以优化,没有利用好缓存的状态
cls_name = get_klass_name(klass)
status_result = parse_status(getattr(self, '%s%s_status' % (cls_name, check_type)))
if status_result:
status, last_time = status_result[0], status_result[1]
if status in ('OK', 'EMPTY'):
if time_is_someday(last_time):
return False, None
else:
return True, last_time
elif status == 'FAILED':
return True, last_time
elif status == 'DOING':
if time_is_someday(last_time):
return False, None
else:
if time_is_recent(status_result[2], minutes = cls_name == 'keyword' and 40 or 10): # 下载关键词报表时,这个时间限制可以大一些
return False, None
else:
return True, last_time
else:
# TODO: wangqi 20151102 这里先这样写吧,后期把status去掉,数据库只保存成功下载的时间即可
return True, datetime.datetime.today() - datetime.timedelta(days = klass.Report.INIT_DAYS)
def download_struct(self, klass):
return klass.struct_download(shop_id = self.shop_id, tapi = self.tapi)
def download_increase(self, klass, last_time):
if not last_time:
last_time = datetime.datetime.now()
last_sync_time = last_time - datetime.timedelta(hours = 1)
return klass.increase_download(shop_id = self.shop_id, tapi = self.tapi, last_sync_time = format_datetime(last_sync_time))
def download_rpt(self, klass, last_time, is_force = False):
attr_name = '%s%s_status' % (get_klass_name(klass), 'rpt')
setattr(self, attr_name, '%s_%s_DOING' % (datetime_2string(last_time), datetime_2string()))
self.save()
# 报表的特殊时间处理:1,上次同步时间太久置为15天前;2,上次同步时间不能超过今天;3,当前时间小于6点,因淘宝数据尚未准备好只下载前天数据
# 强制上次同步时间不超过前天,原因:1,淘宝报表效果数据经常延时,这样第2天会自动修复报表;2,历史数据为3天转化数据,比1天转化数据更可靠。
last_date = last_time.date()
init_start_date = datetime.date.today() - datetime.timedelta(days = klass.Report.INIT_DAYS)
valid_rpt_days = datetime.datetime.now().hour < 6 and 2 or 1
default_end_date = datetime.date.today() - datetime.timedelta(days = valid_rpt_days)
default_start_date = default_end_date - datetime.timedelta(days = 2)
if last_date < init_start_date:
last_date = init_start_date
elif last_date > default_start_date:
last_date = default_start_date
time_scope = last_date, default_end_date
if is_force and get_klass_name(klass) in ['keyword', 'creative']:
result = klass.force_download_rpt(shop_id = self.shop_id, tapi = self.tapi, token = self.token, time_scope = time_scope)
else:
result = klass.report_download(shop_id = self.shop_id, tapi = self.tapi, token = self.token, time_scope = time_scope)
# TODO: wangqi 2014-4-14 同样,由于上面同步时间的不同,这里也要在此基础上作改动,后续考虑再优化
record_datetime = valid_rpt_days == 1 and datetime.datetime.now() or datetime.datetime.now() - datetime.timedelta(days = 1)
status_str = '%s_%s' % (result in ['OK', 'EMPTY'] and datetime_2string(record_datetime) or datetime_2string(last_time), result)
setattr(self, attr_name, status_str)
self.save()
return result
def auto_sync_struct(self, stop_status = "done"):
"""
0.首先是下载顺序:['ready','Account','Campaign','Adgroup','Creative','Keyword','Item','done']
1.有两个字段保存状态,一个是init_status、一个是inc_status,状态都在上面的列表中。
2.自动下载时,首先根据init_status来判断,在init_status顺序之前的,就下增量,以之后的,就下载结构
3.下载增量时,根据inc_status来判断,在inc_status之前的,下载时间是ok_time,之后的,就按failed_time来下载
"""
init_date = datetime.datetime.now() - datetime.timedelta(days = 28)
if (self.ok_time and self.ok_time <= init_date) or (self.failed_time and self.failed_time <= init_date) : # 只要ok_time与failed_time中有一个时间比较久,就初始化下载
self.init_status = 'ready'
self.inc_status = 'ready'
try:
init_index = DL_ORDER_LIST.index(self.init_status)
except ValueError:
if self.init_status == 'ready':
init_index = -1
elif self.init_status == 'done': # 如果已经OK,则全部下载增量?
init_index = len(DL_ORDER_LIST)
else:
return False
try:
inc_index = DL_ORDER_LIST.index(self.inc_status)
except ValueError:
if self.inc_status in ['ready', 'done']:
inc_index = len(DL_ORDER_LIST)
else:
return False
try:
for i, cls_name in enumerate(DL_ORDER_LIST):
if cls_name == stop_status:
raise Exception(cls_name, i < init_index and "inc" or "init") # 状态跟其它状态一致
set_cache_progress(self.shop_id, 'struct_' + cls_name.lower() + '_downing')
if i < init_index:
if not self.download_increase(klass = get_klass(cls_name), last_time = i < inc_index and self.ok_time or self.failed_time):
raise Exception(cls_name, 'inc')
else:
if not self.download_struct(klass = get_klass(cls_name)):
raise Exception(cls_name, 'init')
set_cache_progress(self.shop_id, 'struct_' + cls_name.lower() + '_finished')
self.init_status = 'done'
self.inc_status = 'done'
self.failed_time = None # 下载成功,失败时间记为空
log.info('sync ALL struct OK, shop_id=%s' % self.shop_id)
return True
except Exception, e:
log.error('sync ALL struct FAILED, shop_id=%s, stopped at %s, download %s' % (self.shop_id, e[0], e[1]))
if e[1] == 'inc':
self.failed_time = self.ok_time
self.inc_status = e[0]
else:
self.init_status = e[0]
return False
finally:
self.ok_time = datetime.datetime.now()
self.save()
def sync_rpt(self, klass, is_force = False, rpt_days = None):
cls_name = get_klass_name(klass)
if is_force: # 如果强制下载,则使用给定的rpt_days,或者使用该类自定的报表保留天数
if rpt_days is None:
rpt_days = klass.Report.INIT_DAYS
flag, last_time = True, datetime.datetime.today() - datetime.timedelta(days = rpt_days)
else:
flag, last_time = self.check_status_4rpt(klass, check_type = 'rpt')
set_cache_progress(self.shop_id, 'report_' + cls_name.lower() + '_downing')
if flag:
result = self.download_rpt(klass = klass, last_time = last_time, is_force = is_force)
set_cache_progress(self.shop_id, 'report_' + cls_name.lower() + '_finished')
return result
else:
set_cache_progress(self.shop_id, 'report_' + cls_name.lower() + '_finished')
return True
# @check_global_mutual('struct')
def sync_all_struct(self, is_force = False):
if self.tapi:
if is_force: # 强制下载时,将字段重置即可重新下载
self.init_status = 'ready'
self.inc_status = 'ready'
self.save()
return self.auto_sync_struct()
else:
return False
def sync_all(self):
# 下载所有数据
if self.sync_all_struct():
self.sync_all_rpt()
return True
else:
return False
# @check_global_mutual('rpt')
def sync_all_rpt(self, detail_flag = False, is_force = False, rpt_days = None, quick_flag = False):
if self.tapi:
acct_rpt_status = self.sync_rpt(klass = Account, is_force = is_force, rpt_days = rpt_days)
if acct_rpt_status == 'EMPTY':
if datetime.datetime.now().hour < 9: # TODO: wangqi 20140725 该特性很久没有用上了,是否考虑去掉。如果淘宝返回空并且时间是9点以前,将状态设置为FAILED
status_str = '%s_%s' % (datetime_2string(), 'FAILED')
self.accountrpt_status = status_str
self.save()
return False
else:
return True
else:
self.sync_rpt(klass = Campaign, is_force = is_force, rpt_days = rpt_days)
if quick_flag:
return True
self.sync_rpt(klass = Adgroup, is_force = is_force, rpt_days = rpt_days)
if detail_flag:
self.sync_rpt(klass = Creative, is_force = is_force, rpt_days = rpt_days)
self.sync_rpt(klass = Keyword, is_force = is_force, rpt_days = rpt_days)
return self.check_rpt_isok(need_detail = detail_flag)
else:
return False
@staticmethod
def download_all_struct(shop_id, is_force = False):
dl, _ = Downloader.objects.get_or_create(shop_id = shop_id)
return dl.sync_all_struct(is_force = is_force)
@staticmethod
def download_all_rpt(shop_id, detail_flag = False, is_force = False, rpt_days = None):
dl , is_created = Downloader.objects.get_or_create(shop_id = shop_id)
if is_created:
dl.sync_all_struct(is_force = is_force)
return dl.sync_all_rpt(detail_flag = detail_flag, is_force = is_force, rpt_days = rpt_days)
@staticmethod
def quick_download(shop_id):
dl, _ = Downloader.objects.get_or_create(shop_id = shop_id)
if dl.tapi:
dl.auto_sync_struct(stop_status = "Adgroup")
dl.sync_all_rpt(quick_flag = True)
@staticmethod
def download_all(shop_id):
dl, _ = Downloader.objects.get_or_create(shop_id = shop_id)
if dl.tapi:
dl.sync_all()
@classmethod
def often_sync_struct(cls, shop_id):
dl, _ = Downloader.objects.get_or_create(shop_id = shop_id)
if (not dl.ok_time) or dl.ok_time <= datetime.datetime.now() - datetime.timedelta(hours = 1):
return dl.sync_all_struct()
return True
dler_coll = Downloader._get_collection()
| [
"[email protected]"
]
| |
fdfe2f9a3d3e3b660f7c7340ac430fa1a6359434 | 0b88a8e39e894fe72fd389505bdad786a6713439 | /lib/setup3.py | 51f4b3fbb0457190d4eb492ba1a479eb2d2b9855 | [
"Apache-2.0"
]
| permissive | Trevol/pytorch-retinanet | d61a76c4a1cc14667ee0149440da865a8628d81f | e9c87a6c195d814d19edeca19166a5559df7da4a | refs/heads/master | 2020-05-24T17:41:33.785410 | 2019-05-29T08:52:39 | 2019-05-29T08:52:39 | 187,391,911 | 0 | 0 | null | 2019-05-18T18:23:50 | 2019-05-18T18:23:50 | null | UTF-8 | Python | false | false | 5,010 | py |
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
from distutils.core import setup
from Cython.Build import cythonize
from distutils.extension import Extension
from Cython.Distutils import build_ext
import subprocess
import numpy as np
import os
from os.path import join as pjoin
def find_in_path(name, path):
"Find a file in a search path"
# Adapted fom
# http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/
for dir in path.split(os.pathsep):
binpath = pjoin(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
def locate_cuda():
"""Locate the CUDA environment on the system
Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'
and values giving the absolute path to each directory.
Starts by looking for the CUDAHOME env variable. If not found, everything
is based on finding 'nvcc' in the PATH.
"""
# first check if the CUDAHOME env variable is in use
if 'CUDAHOME' in os.environ:
home = os.environ['CUDAHOME']
nvcc = pjoin(home, 'bin', 'nvcc')
else:
# otherwise, search the PATH for NVCC
default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin')
nvcc = find_in_path('nvcc', os.environ['PATH'] + os.pathsep + default_path)
if nvcc is None:
raise EnvironmentError('The nvcc binary could not be '
'located in your $PATH. Either add it to your path, or set $CUDAHOME')
home = os.path.dirname(os.path.dirname(nvcc))
cudaconfig = {'home':home, 'nvcc':nvcc,
'include': pjoin(home, 'include'),
'lib64': pjoin(home, 'lib64')}
for k, v in cudaconfig.items():
if not os.path.exists(v):
raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v))
return cudaconfig
CUDA = locate_cuda()
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
def customize_compiler_for_nvcc(self):
"""inject deep into distutils to customize how the dispatch
to gcc/nvcc works.
If you subclass UnixCCompiler, it's not trivial to get your subclass
injected in, and still have the right customizations (i.e.
distutils.sysconfig.customize_compiler) run on it. So instead of going
the OO route, I have this. Note, it's kindof like a wierd functional
subclassing going on."""
# tell the compiler it can processes .cu
self.src_extensions.append('.cu')
# save references to the default compiler_so and _comple methods
default_compiler_so = self.compiler_so
super = self._compile
# now redefine the _compile method. This gets executed for each
# object but distutils doesn't have the ability to change compilers
# based on source extension: we add it.
def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
if os.path.splitext(src)[1] == '.cu':
# use the cuda for .cu files
self.set_executable('compiler_so', CUDA['nvcc'])
# use only a subset of the extra_postargs, which are 1-1 translated
# from the extra_compile_args in the Extension class
postargs = extra_postargs['nvcc']
else:
postargs = extra_postargs['gcc']
super(obj, src, ext, cc_args, postargs, pp_opts)
# reset the default compiler_so, which we might have changed for cuda
self.compiler_so = default_compiler_so
# inject our redefined _compile method into the class
self._compile = _compile
# run the customize_compiler
class custom_build_ext(build_ext):
def build_extensions(self):
customize_compiler_for_nvcc(self.compiler)
build_ext.build_extensions(self)
ext_modules = [Extension('nms.gpu_nms',
['nms/nms_kernel.cu', 'nms/gpu_nms.pyx'],
library_dirs=[CUDA['lib64']],
libraries=['cudart'],
language='c++',
runtime_library_dirs=[CUDA['lib64']],
# this syntax is specific to this build system
# we're only going to use certain compiler args with nvcc and not with
# gcc the implementation of this trick is in customize_compiler() below
extra_compile_args={'gcc': ["-Wno-unused-function"],
'nvcc': ['-arch=sm_35',
'--ptxas-options=-v',
'-c',
'--compiler-options',
"'-fPIC'"]},
include_dirs = [numpy_include, CUDA['include']]
)]
setup(
name='fast_rcnn',
ext_modules=ext_modules,
# inject our custom trigger
cmdclass={'build_ext': custom_build_ext},
)
# python setup3.py build_ext --inplace
| [
"[email protected]"
]
| |
90751ed36f2869cf56e4df448cb42533a8bd624c | fcde32709c62b8ee86da459bb7c8eee52c848118 | /code/day17/作业00.py | 18b8f5c56d28eafa61ce240cdb8b8aa5ca0c34f6 | []
| no_license | klaus2015/py_base | 6b92d362c3d7dc0e09205a037f4d580381dac94d | ec32c731c1c2f6a0dab87f1d167397e4fa86b8de | refs/heads/master | 2022-07-28T15:49:30.383648 | 2020-05-11T15:31:43 | 2020-05-11T15:31:43 | 261,777,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,485 | py | """
3. 定义敌人类(姓名,攻击力,防御力,血量)
创建敌人列表,使用list_helper实现下列功能.
(1) 查找姓名是"灭霸"的敌人
(2) 查找攻击力大于10的所有敌人
(3) 查找活的敌人数量
"""
from common.list_helper import *
class Enemy:
"""
敌人类
"""
def __init__(self,name,hp,basic_damage,defensive):
self.name = name
self.hp = hp
self.basic_damage = basic_damage
self.defensive = defensive
def __str__(self):
return "%s,%d,,%d,,%d" % (self.name, self.hp, self.basic_damage, self.defensive)
list_enemy = [
Enemy("红骷髅", 200, 50, 5),
Enemy("灭霸", 500, 150, 20),
Enemy("海拉", 250, 100, 6),
Enemy("奥创", 0, 100, 12),
Enemy("蜘蛛侠", 0, 80, 11),
Enemy("成昆",80,30,10)
]
# re = ListHelper.find_single(list_enemy,lambda item:item.name =="灭霸")
# print(re)
re = ListHelper.find_all(list_enemy,lambda item:item.basic_damage > 10)
# result = list(re)
# for item in result:
# print(item)
print("-------------")
for item in re:
print(item)
re = ListHelper.get_count(list_enemy,lambda item:item.hp > 0)
print(re)
# 判断敌人列表中是否存在"成昆"
re = ListHelper.is_exits(list_enemy,lambda item:item.name == "成昆")
print(re)
#判断敌人列表中是否攻击力小于5或者防御力小于10的敌人
re = ListHelper.is_exits(list_enemy,lambda item:item.basic_damage < 5 or item.defensive <10)
print(re)
| [
"[email protected]"
]
| |
5f42709ff47192bb9f442f01303bd687ae7209b6 | 7f8db5b974a747632729d16c431de7aca007af00 | /0x03-python-data_structures/5-no_c.py | 2a1bab55ec3424fa4324cbf2cbf8bae79f944bf2 | []
| no_license | thomasmontoya123/holbertonschool-higher_level_programming | 6f5ceb636167efba1e36ed2dee7bf83b458f6751 | 48b7c9dccac77ccb0f57da1dc1d150f356612b13 | refs/heads/master | 2020-07-22T22:31:13.744490 | 2020-02-13T22:54:17 | 2020-02-13T22:54:17 | 207,351,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | #!/usr/bin/python3
def no_c(my_string):
string = ""
for character in my_string:
if character != 'c' and character != 'C':
string = string + character
return string
| [
"[email protected]"
]
| |
c5f0c7f55c1bb473ee3127e3fb4c8ada7b3f9263 | 32cd5452417a6637f5e087c1a0c2c6405fbaf915 | /src/app/share/caheFiles.py | bf976ef14e20e73e12adaf7d364be8fe1b80b256 | []
| no_license | 549982170/SZLife_assisttant | 766bedff61cabe73513a449525f57c8bb5a9afb6 | 014ebc526a671e3b3c972d476ba29439fd7decbf | refs/heads/master | 2021-01-01T03:58:45.182066 | 2018-04-12T10:49:46 | 2018-04-12T10:49:46 | 97,097,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | # coding:utf-8
# !/user/bin/python
'''
Created on 2018年4月12日
@author: yizhiwu
内存缓存对象
'''
CACHE = {} # 函数值缓存
LOCK_DICT = {} # 线程池字典 | [
"[email protected]"
]
| |
8e50dd5499fa0d29586e52cf45552d1f423fc3df | f6c6b7ac39deb9f2cc0a7ef8fb6459ca09fc27dd | /handlers/async_tasks.py | 8b33938195c156d334b41d736e78f0fca2f49e83 | [
"MIT"
]
| permissive | via-jiang/celery-demo | 3112b28de81b3bc477629d51453d069e814e8adc | 48eaa732bc9270f6f54eab48ecc57ed98ee52e44 | refs/heads/master | 2021-09-13T18:27:42.893328 | 2018-04-16T01:48:41 | 2018-04-16T01:48:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,044 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/1/16 下午4:12
# @Author : Matrix
# @Github : https://github.com/blackmatrix7/
# @Blog : http://www.cnblogs.com/blackmatrix/
# @File : async_task.py
# @Software: PyCharm
import logging
from time import sleep
from manage import celery
__author__ = 'blackmatrix'
@celery.task
def async_send_email(send_from, send_to, subject, content):
"""
模拟异步发送邮件的操作
:param send_from:
:param send_to:
:param subject:
:param content:
:return:
"""
logging.info('模拟异步发送邮件的操作')
logging.info(send_from, send_to, subject, content)
# 休眠
sleep(5)
@celery.task
def async_push_message(send_to, content):
"""
模拟异步推送消息
:param send_to:
:param content:
:return:
"""
logging.info('模拟异步推送消息')
logging.info('send_to: {}'.format(send_to))
logging.info('content: {}'.format(content))
# 休眠
sleep(10)
if __name__ == '__main__':
pass
| [
"[email protected]"
]
| |
cfd915494a9b7b83019c8450e29a4f903f9f4f18 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/2/fk8.py | 227030f6733236cc971789868e190ee296b49776 | []
| no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'fK8':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
]
| |
50319c876ad3c8f6f001c1a658cf74c13b34cf69 | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog/optimized_26371.py | ff1f16eecf5e83d96d4fb23f7bcb43fa298ba04c | []
| no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,843 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((562.956, 606.319, 542.196), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((598.927, 664.466, 547.104), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((645.243, 731.321, 541.835), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((592.522, 674.052, 426.156), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((759.202, 885.626, 574.56), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((595.975, 638.826, 544.225), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((595.929, 637.9, 544.437), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((620.278, 641.186, 558.718), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((641.038, 622.894, 554.318), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((635.906, 622.418, 526.652), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((651.817, 600.624, 518.694), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((661.159, 575.333, 527.009), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((572.778, 627.147, 555.985), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((752.186, 523.515, 505.736), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((812.378, 708.375, 561.014), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((812.378, 708.375, 561.014), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((784.744, 700.703, 555.708), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((758.637, 689.506, 549.735), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((731.852, 678.504, 548.081), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((704.692, 670.172, 548.629), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((677.016, 666.583, 554.659), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((650.097, 666.686, 563.922), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((881.64, 606.623, 464.7), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((417.405, 721.536, 663.307), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((650.76, 705.733, 581.794), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((650.76, 705.733, 581.794), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((644.523, 699.968, 554.244), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((640.146, 702.53, 525.844), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((631.417, 725.935, 511.35), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((547.772, 636.033, 494.28), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((711.716, 821.821, 525.234), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((585.639, 660.6, 523.058), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((585.303, 660.409, 522.636), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((579.947, 632.76, 515.926), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((586.282, 608.67, 530.694), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((599.815, 594.142, 551.617), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((619.355, 579.085, 565.787), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((639.905, 560.419, 571.567), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((659.487, 577.131, 560.173), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((625.652, 636.703, 612.493), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((692.507, 519.993, 503.511), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((601.128, 675.644, 608.623), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((602.091, 683.543, 583.391), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((606.316, 702.222, 529.232), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((610.545, 720.778, 474.952), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((533.719, 694.295, 470.952), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((672.807, 765.909, 404.604), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((546.476, 632.909, 551.211), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((558.019, 658.362, 547.204), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((570.304, 684.31, 544.861), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((581.01, 711.076, 549.984), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((595.054, 737.023, 552.579), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((609.495, 762.959, 556.117), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((590.523, 686.806, 560.614), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((632.939, 843.689, 553.686), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"[email protected]"
]
| |
e8dc54e0d29dcbd3078510bb079021898840fe06 | 26d5c795d8aa83bf5cb3f228675ff51e2f704f57 | /scripts/tuple2json | eb4a6177f70c27c5ba91553a6c9515e04d247a90 | []
| no_license | binarymachines/mercury | 8e13bb10c67a056fe88e02f558d73f1f1b95d028 | db3e2425f4e77a44a97c740f7fff90312a1bd33f | refs/heads/master | 2023-07-08T11:35:26.867494 | 2023-06-25T00:46:23 | 2023-06-25T00:46:23 | 94,708,610 | 2 | 6 | null | 2023-02-15T21:50:06 | 2017-06-18T19:31:50 | Python | UTF-8 | Python | false | false | 2,595 | #!/usr/bin/env python
'''
Usage:
tuple2json --delimiter <delimiter> --keys=<key>... [--skip <num_lines>] [--limit=<limit>]
tuple2json --delimiter <delimiter> --datafile <file> --keys=<key>... [--skip <num_lines>] [--limit=<limit>]
'''
'''
+mdoc+
tuple2json takes a list of tuples (represented as CSV records with the specified delimiter) and turns them
into a list of corresponding key:value JSON records whose keys are the comma-separated, ORDERED list of names
passed to the --keys parameter.
If the --datafile option is set, tuple2json reads its input records from the <file> parameter; if not,
it reads them from standard input.
tuple2json assumes a headlerless CSV file; it depends solely on the keys passed to it. If you are transforming a CSV
file which contains a header, you must either remove it before passing the data, or use the --skip parameter;
otherwise the first record it generates will be a nonsense record.
tuple2json is often used in conjunction with tuplegen to turn a set of lists into a single JSONL file.
+mdoc+
'''
import os, sys
import json
import docopt
from snap import common
from mercury.utils import read_stdin
def generate_dict_from_tuple(line: str, delimiter: str, keys: list):
# TODO: decide what to do if key count and token count do not match
output_record = {}
tokens = line.split(delimiter)
index = 0
for key in keys:
output_record[key] = tokens[index]
index += 1
return output_record
def main(args):
keys = args['--keys'][0].split(',')
delimiter = args['<delimiter>']
limit = int(args.get('--limit') or -1)
skip_count = int(args.get('<num_lines>') or -1)
line_count = 0
if args['--datafile']:
with open(args['<file>'], 'r') as f:
for line in f:
if line_count == limit:
break
if line_count < skip_count:
line_count += 1
continue
record = generate_dict_from_tuple(line.strip(), delimiter, keys)
print(json.dumps(record))
line_count += 1
else: # read data from standard input
for line in read_stdin():
if line_count == limit:
break
if line_count < skip_count:
line_count += 1
continue
record = generate_dict_from_tuple(line.strip(), delimiter, keys)
print(json.dumps(record))
line_count += 1
if __name__ == '__main__':
args = docopt.docopt(__doc__)
main(args) | [
"[email protected]"
]
| ||
c91f3a8f8c97bee6dfad05db9551a7fd74a02a78 | 59880d47a533cf1f45f927adafff22d5ffb4796a | /Python/fullStackDjango/fullStackBooks/apps/books/migrations/0002_auto_20170524_0029.py | 9208e593e61e618fa8fa5d072fff9f74e252ea46 | []
| no_license | mightymcalpine/DojoAssignments | 2bc7bb791630040dbb62da917a26b74bbdd574e4 | 9c0d80953f6ddbe840314f3d333b5f4590e0c9f4 | refs/heads/master | 2021-01-18T00:07:07.128554 | 2017-06-05T16:38:35 | 2017-06-05T16:38:35 | 84,257,743 | 0 | 0 | null | 2017-06-02T05:34:36 | 2017-03-07T23:47:27 | Python | UTF-8 | Python | false | false | 383 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-24 00:29
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('books', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='booksDB',
new_name='bookDB',
),
]
| [
"[email protected]"
]
| |
2fece7e214d7330dd51e1119ce24086e0cdf399f | 02b9a9f40f56502c94d064654e030c521c2f325a | /django_task/utils.py | 533c749435edbf9ae8321797f72884ec9c9c8e23 | [
"MIT"
]
| permissive | samlex20/django-task | 7c454922cf667853bc8678bfbb871e0f014b33b1 | 9966dd9a3366c10b3658298ff29c62250e5ec46f | refs/heads/master | 2020-05-30T21:48:11.947777 | 2019-06-03T04:50:53 | 2019-06-03T04:50:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,459 | py | from __future__ import unicode_literals
import uuid
import os
import time
from django.http import Http404
from django.shortcuts import get_object_or_404
from django.utils import formats
from django.utils import timezone
from django.apps import apps
def get_object_by_uuid_or_404(model, uuid_pk):
"""
Calls get_object_or_404(model, pk=uuid_pk)
but also prevents "badly formed hexadecimal UUID string" unhandled exception
"""
try:
uuid.UUID(uuid_pk)
except Exception as e:
raise Http404(str(e))
return get_object_or_404(model, pk=uuid_pk)
def format_datetime(dt, include_time=True):
"""
Apply datetime format suggested for all admin views.
Here we adopt the following rule:
1) format date according to active localization
2) append time in military format
"""
if dt is None:
return ''
# convert to localtime
try:
dt = timezone.localtime(dt)
except ValueError:
# Probably 'astimezone() cannot be applied to a naive datetime'
pass
text = formats.date_format(dt, use_l10n=True, format='SHORT_DATE_FORMAT')
if include_time:
text += dt.strftime(' %H:%M:%S')
return text
def remove_file_and_cleanup(filepath):
"""
Removes specified file, than it's folder if left empty
"""
folder = os.path.dirname(filepath)
# remove file
if os.path.isfile(filepath):
os.remove(filepath)
# finally, remove folder if empty
if os.path.isdir(folder) and len(os.listdir(folder)) <= 0:
os.rmdir(folder)
def get_model_from_id(model_cls, id, timeout=1000, retry_count=10):
"""
Retrieve a record
"""
dt = timeout / retry_count
for i in range(retry_count):
try:
task = model_cls.objects.get(id=id)
return task
except model_cls.DoesNotExist:
pass
time.sleep(dt / 1000.0)
return None
def revoke_pending_tasks():
from .models import Task
models = apps.get_models()
task_models = [model for model in models if issubclass(model, Task) and model != Task]
counter = 0
for model in task_models:
queryset = model.objects.filter(status__in=Task.TASK_STATUS_PENDING_VALUES)
n = queryset.count()
print('revoking %s objects (%d) ...' % (model.__name__, n))
#model.objects.all().delete()
queryset.update(status='REVOKED')
counter += n
return counter
| [
"[email protected]"
]
| |
45e666a317a1cd2194028b38c269dcc81f2249a7 | 02406958bffadbce13240ea1fb5013bc005fa332 | /src/main/python/matrixext.py | 2b40104f311e9b6f6405f5791190fb618f232aec | []
| no_license | Jcamilorada/Networks | 4b03209f0324e7a00c5236b7215158684ea969ee | 1200529e501f9366bc38bb02b1d45f3079c976d3 | refs/heads/master | 2021-01-10T16:36:06.311138 | 2015-11-17T03:05:44 | 2015-11-17T03:05:44 | 45,878,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,925 | py | from BeautifulSoup import BeautifulSoup
import urllib2
import re
from subprocess import call
from os import walk, path
from joblib import Parallel, delayed
hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
def download_file(download_url):
req = urllib2.Request(download_url, headers=hdr)
try:
page = urllib2.urlopen(req)
except urllib2.HTTPError, e:
print e.fp.read()
index = download_url.rfind('/')
file_name = download_url[index + 1:]
f = open(file_name, 'w')
f.write(page.read())
f.close()
def download_pass_file(match_url):
req = urllib2.Request(match_url, headers=hdr)
try:
page = urllib2.urlopen(req)
except urllib2.HTTPError, e:
print e.fp.read()
html_page = page.read()
soup = BeautifulSoup(html_page)
for link in soup.findAll('a', attrs={'href': re.compile("passingdistribution\.pdf$")}):
download_file(link.get('href'))
def get_fifa_pass_distributions(championship_url, base_url):
req = urllib2.Request(championship_url, headers=hdr)
try:
page = urllib2.urlopen(req)
except urllib2.HTTPError, e:
print e.fp.read()
html_page = page.read()
soup = BeautifulSoup(html_page)
matches = []
for link in soup.findAll('a', attrs={'href': re.compile("^/worldcup/matches/round=")}):
download_pass_file(base_url + link.get('href'))
def export_fifa_pass_dist_to_csv(file_path, output_path):
file_name = file_path[file_path.rfind('/') + 1:file_path.rfind('.pdf')]
file_name_header = 'header_' + file_name + '.csv'
file_name_body = 'body_' + file_name + '.csv'
call(['jruby -S tabula '+ file_path + ' -a 20.00,12.75,200.5,561 ' + '-o ' + output_path+file_name_header], shell=True)
call(['jruby -S tabula ' + file_path + ' -a 200.00,12.75,700.5,561 ' + '-o ' + output_path+file_name_body], shell=True)
def export_fifa_dir(files_path, output_path):
file_names = []
for root, dirs, files in walk(files_path):
for name in files:
if 'pdf' in name:
file_names.append(path.join(root, name))
else:
continue
n_jobs = 10
Parallel(n_jobs=n_jobs, verbose=50)(delayed(export_fifa_pass_dist_to_csv)(file_name, output_path) for file_name in file_names)
export_fifa_dir(
'/Users/developer3/git/Networks/fifa_2014_pass_distributions/pdf_raw_data/',
'/Users/developer3/git/Networks/fifa_2014_pass_distributions/csv/'
)
#get_fifa_pass_distributions('http://www.fifa.com/worldcup/archive/brazil2014/matches/index.html', 'http://www.fifa.com/')
| [
"[email protected]"
]
| |
960761f93b8ca58af370503f84744e74126847fb | 74f68049235709841416010ec1e18e8085762113 | /.history/recursion_20191203122123.py | 7190b3a32cacbf96d5a3743a23aa3f6f8bcaf7a3 | []
| no_license | camilooob/pythonisfun | 749692517a6c6b147c428996c01fb3b2fa1aef14 | 55453e1f80f92c3756ee5f0338f93fc2a0d9beec | refs/heads/master | 2020-09-21T21:59:44.308526 | 2020-01-20T17:41:56 | 2020-01-20T17:41:56 | 224,947,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 188 | py | def rec_count (number):
print (number)
# Base case
if number == 0:
return
rec_count (number + 1) # A recursive call with a different argument
print (number)
rec_count (5)
| [
"[email protected]"
]
| |
3f06ec717874a0469c361153d673f147c29904fb | b5a6f10c886fba6584d2ac7b4a29c69975826dbb | /clients/python/pricemonitor_api_client/models/get_all_domains_v3_api_response.py | 12e4f77837ed38236957ce51bf022e5fd7b9404f | []
| no_license | Patagona/pricemonitor-clients | 8c4f842ca3d4e459c77ac329ad488cb3e4c858bf | cf2d689bf9ed6ddea9501324cada918c3a88b4f8 | refs/heads/master | 2023-08-31T20:12:58.844253 | 2023-08-31T15:26:25 | 2023-08-31T15:26:25 | 279,618,794 | 1 | 1 | null | 2023-07-03T13:55:28 | 2020-07-14T15:09:38 | Python | UTF-8 | Python | false | false | 3,671 | py | # coding: utf-8
"""
Pricemonitor API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 0.0.6561
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from pricemonitor_api_client.configuration import Configuration
class GetAllDomainsV3ApiResponse(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'data': 'ComPatagonaPricemonitorShareApiGetAllDomainsV3'
}
attribute_map = {
'data': 'data'
}
def __init__(self, data=None, local_vars_configuration=None): # noqa: E501
"""GetAllDomainsV3ApiResponse - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._data = None
self.discriminator = None
self.data = data
@property
def data(self):
"""Gets the data of this GetAllDomainsV3ApiResponse. # noqa: E501
:return: The data of this GetAllDomainsV3ApiResponse. # noqa: E501
:rtype: ComPatagonaPricemonitorShareApiGetAllDomainsV3
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this GetAllDomainsV3ApiResponse.
:param data: The data of this GetAllDomainsV3ApiResponse. # noqa: E501
:type: ComPatagonaPricemonitorShareApiGetAllDomainsV3
"""
if self.local_vars_configuration.client_side_validation and data is None: # noqa: E501
raise ValueError("Invalid value for `data`, must not be `None`") # noqa: E501
self._data = data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GetAllDomainsV3ApiResponse):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, GetAllDomainsV3ApiResponse):
return True
return self.to_dict() != other.to_dict()
| [
"[email protected]"
]
| |
648499454a893c6b90a2797d465b43402210e6f4 | e3db01c4f135b4a350e0a326a88015ce99e632f3 | /verce-hpc-pe/src/prov/serializers/provjson.py | a8ce82138203cae2c8e6207c841befd287536974 | [
"MIT"
]
| permissive | KNMI/VERCE | a5eb51fb09051e6bffdb6639798a5732a0e07470 | c2f9eaa70ecf1621a218afc5d73ca1304ca8ed36 | refs/heads/master | 2021-05-04T10:14:31.072177 | 2019-01-07T10:19:14 | 2019-01-07T10:19:14 | 51,840,168 | 2 | 4 | MIT | 2019-01-07T10:19:15 | 2016-02-16T14:05:03 | JavaScript | UTF-8 | Python | false | false | 13,610 | py | from __future__ import (absolute_import, division, print_function,
unicode_literals)
__author__ = 'Trung Dong Huynh'
__email__ = '[email protected]'
import logging
logger = logging.getLogger(__name__)
from collections import defaultdict
import datetime
import io
import json
from prov.serializers import Serializer, Error
from prov.constants import *
from prov.model import (Literal, Identifier, QualifiedName,
Namespace, ProvDocument, ProvBundle, first,
parse_xsd_datetime)
class ProvJSONException(Error):
pass
class AnonymousIDGenerator:
def __init__(self):
self._cache = {}
self._count = 0
def get_anon_id(self, obj, local_prefix='id'):
if obj not in self._cache:
self._count += 1
self._cache[obj] = Identifier(
'_:%s%d' % (local_prefix, self._count)
)
return self._cache[obj]
# Reverse map for prov.model.XSD_DATATYPE_PARSERS
LITERAL_XSDTYPE_MAP = {
float: 'xsd:double',
int: 'xsd:int'
# boolean, string values are supported natively by PROV-JSON
# datetime values are converted separately
}
# Add long on Python 2
if six.integer_types[-1] not in LITERAL_XSDTYPE_MAP:
LITERAL_XSDTYPE_MAP[six.integer_types[-1]] = 'xsd:long'
class ProvJSONSerializer(Serializer):
"""
PROV-JSON serializer for :class:`~prov.model.ProvDocument`
"""
def serialize(self, stream, **kwargs):
"""
Serializes a :class:`~prov.model.ProvDocument` instance to
`PROV-JSON <https://provenance.ecs.soton.ac.uk/prov-json/>`_.
:param stream: Where to save the output.
"""
if six.PY2:
buf = io.BytesIO()
try:
json.dump(self.document, buf, cls=ProvJSONEncoder,
**kwargs)
buf.seek(0, 0)
# Right now this is a bytestream. If the object to stream to is
# a text object is must be decoded. We assume utf-8 here which
# should be fine for almost every case.
if isinstance(stream, io.TextIOBase):
stream.write(buf.read().decode('utf-8'))
else:
stream.write(buf.read())
finally:
buf.close()
else:
buf = io.StringIO()
try:
json.dump(self.document, buf, cls=ProvJSONEncoder,
**kwargs)
buf.seek(0, 0)
# Right now this is a bytestream. If the object to stream to is
# a text object is must be decoded. We assume utf-8 here which
# should be fine for almost every case.
if isinstance(stream, io.TextIOBase):
stream.write(buf.read())
else:
stream.write(buf.read().encode('utf-8'))
finally:
buf.close()
def deserialize(self, stream, **kwargs):
"""
Deserialize from the `PROV JSON
<https://provenance.ecs.soton.ac.uk/prov-json/>`_ representation to a
:class:`~prov.model.ProvDocument` instance.
:param stream: Input data.
"""
if not isinstance(stream, io.TextIOBase):
buf = io.StringIO(stream.read().decode('utf-8'))
stream = buf
return json.load(stream, cls=ProvJSONDecoder, **kwargs)
class ProvJSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, ProvDocument):
return encode_json_document(o)
else:
return super(ProvJSONEncoder, self).encode(o)
class ProvJSONDecoder(json.JSONDecoder):
def decode(self, s, *args, **kwargs):
container = super(ProvJSONDecoder, self).decode(s, *args, **kwargs)
document = ProvDocument()
decode_json_document(container, document)
return document
# Encoding/decoding functions
def valid_qualified_name(bundle, value):
if value is None:
return None
qualified_name = bundle.valid_qualified_name(value)
return qualified_name
def encode_json_document(document):
container = encode_json_container(document)
for bundle in document.bundles:
# encoding the sub-bundle
bundle_json = encode_json_container(bundle)
container['bundle'][six.text_type(bundle.identifier)] = bundle_json
return container
def encode_json_container(bundle):
container = defaultdict(dict)
prefixes = {}
for namespace in bundle._namespaces.get_registered_namespaces():
prefixes[namespace.prefix] = namespace.uri
if bundle._namespaces._default:
prefixes['default'] = bundle._namespaces._default.uri
if prefixes:
container['prefix'] = prefixes
id_generator = AnonymousIDGenerator()
real_or_anon_id = \
lambda r: (
r._identifier if r._identifier else id_generator.get_anon_id(r)
)
for record in bundle._records:
rec_type = record.get_type()
rec_label = PROV_N_MAP[rec_type]
identifier = six.text_type(real_or_anon_id(record))
record_json = {}
if record._attributes:
for (attr, values) in record._attributes.items():
if not values:
continue
attr_name = six.text_type(attr)
if attr in PROV_ATTRIBUTE_QNAMES:
# TODO: QName export
record_json[attr_name] = six.text_type(first(values))
elif attr in PROV_ATTRIBUTE_LITERALS:
record_json[attr_name] = first(values).isoformat()
else:
if len(values) == 1:
# single value
record_json[attr_name] = encode_json_representation(
first(values)
)
else:
# multiple values
record_json[attr_name] = list(
encode_json_representation(value)
for value in values
)
# Check if the container already has the id of the record
if identifier not in container[rec_label]:
# this is the first instance, just put in the new record
container[rec_label][identifier] = record_json
else:
# the container already has some record(s) of the same identifier
# check if this is the second instance
current_content = container[rec_label][identifier]
if hasattr(current_content, 'items'):
# this is a dict, make it a singleton list
container[rec_label][identifier] = [current_content]
# now append the new record to the list
container[rec_label][identifier].append(record_json)
return container
def decode_json_document(content, document):
bundles = dict()
if 'bundle' in content:
bundles = content['bundle']
del content['bundle']
decode_json_container(content, document)
for bundle_id, bundle_content in bundles.items():
bundle = ProvBundle(document=document)
decode_json_container(bundle_content, bundle)
document.add_bundle(bundle, bundle.valid_qualified_name(bundle_id))
def decode_json_container(jc, bundle):
if 'prefix' in jc:
prefixes = jc['prefix']
for prefix, uri in prefixes.items():
if prefix != 'default':
bundle.add_namespace(Namespace(prefix, uri))
else:
bundle.set_default_namespace(uri)
del jc['prefix']
for rec_type_str in jc:
rec_type = PROV_RECORD_IDS_MAP[rec_type_str]
for rec_id, content in jc[rec_type_str].items():
if hasattr(content, 'items'): # it is a dict
# There is only one element, create a singleton list
elements = [content]
else:
# expect it to be a list of dictionaries
elements = content
for element in elements:
attributes = dict()
other_attributes = []
# this is for the multiple-entity membership hack to come
membership_extra_members = None
for attr_name, values in element.items():
attr = (
PROV_ATTRIBUTES_ID_MAP[attr_name]
if attr_name in PROV_ATTRIBUTES_ID_MAP
else valid_qualified_name(bundle, attr_name)
)
if attr in PROV_ATTRIBUTES:
if isinstance(values, list):
# only one value is allowed
if len(values) > 1:
# unless it is the membership hack
if rec_type == PROV_MEMBERSHIP and \
attr == PROV_ATTR_ENTITY:
# This is a membership relation with
# multiple entities
# HACK: create multiple membership
# relations, one for each entity
# Store all the extra entities
membership_extra_members = values[1:]
# Create the first membership relation as
# normal for the first entity
value = values[0]
else:
error_msg = (
'The prov package does not support PROV'
' attributes having multiple values.'
)
logger.error(error_msg)
raise ProvJSONException(error_msg)
else:
value = values[0]
else:
value = values
value = (
valid_qualified_name(bundle, value)
if attr in PROV_ATTRIBUTE_QNAMES
else parse_xsd_datetime(value)
)
attributes[attr] = value
else:
if isinstance(values, list):
other_attributes.extend(
(
attr,
decode_json_representation(value, bundle)
)
for value in values
)
else:
# single value
other_attributes.append(
(
attr,
decode_json_representation(values, bundle)
)
)
bundle.new_record(
rec_type, rec_id, attributes, other_attributes
)
# HACK: creating extra (unidentified) membership relations
if membership_extra_members:
collection = attributes[PROV_ATTR_COLLECTION]
for member in membership_extra_members:
bundle.membership(
collection, valid_qualified_name(bundle, member)
)
def encode_json_representation(value):
if isinstance(value, Literal):
return literal_json_representation(value)
elif isinstance(value, datetime.datetime):
return {'$': value.isoformat(), 'type': 'xsd:dateTime'}
elif isinstance(value, QualifiedName):
# TODO Manage prefix in the whole structure consistently
# TODO QName export
return {'$': str(value), 'type': PROV_QUALIFIEDNAME._str}
elif isinstance(value, Identifier):
return {'$': value.uri, 'type': 'xsd:anyURI'}
elif type(value) in LITERAL_XSDTYPE_MAP:
return {'$': value, 'type': LITERAL_XSDTYPE_MAP[type(value)]}
else:
return value
def decode_json_representation(literal, bundle):
if isinstance(literal, dict):
# complex type
value = literal['$']
datatype = literal['type'] if 'type' in literal else None
datatype = valid_qualified_name(bundle, datatype)
langtag = literal['lang'] if 'lang' in literal else None
if datatype == XSD_ANYURI:
return Identifier(value)
elif datatype == PROV_QUALIFIEDNAME:
return valid_qualified_name(bundle, value)
else:
# The literal of standard Python types is not converted here
# It will be automatically converted when added to a record by
# _auto_literal_conversion()
return Literal(value, datatype, langtag)
else:
# simple type, just return it
return literal
def literal_json_representation(literal):
# TODO: QName export
value, datatype, langtag = literal.value, literal.datatype, literal.langtag
if langtag:
return {'$': value, 'lang': langtag}
else:
return {'$': value, 'type': six.text_type(datatype)}
| [
"[email protected]"
]
| |
58e97efc151d9f3a2da1fd1d8c4410862d39e548 | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/django-1.5/django/contrib/localflavor/ar/forms.py | 05e085c6b238aa4077d4a3d12a5509635708e82d | []
| no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | /home/action/.parts/packages/googleappengine/1.9.4/lib/django-1.5/django/contrib/localflavor/ar/forms.py | [
"[email protected]"
]
| |
ddbc423127a5ff18708f91ef19571a84ab3e11c5 | 483d0589f1efc10af3cbffe44b8abfe8031bf776 | /descarteslabs/third_party/funcsigs/funcsigs/__init__.py | 78600102e03ac1514a7c19b08f3162208df98f7d | [
"Apache-2.0"
]
| permissive | grpecunia/descarteslabs-python | 3f4129748c8b251c76cf094d34485ea2e563d174 | c99c3091f2629c758a27de4d8fd6e4a39a7b3013 | refs/heads/master | 2020-09-11T14:51:00.390072 | 2019-11-15T20:41:27 | 2019-11-15T21:02:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,787 | py | # Copyright 2001-2013 Python Software Foundation; All Rights Reserved
"""Function signature objects for callables
Back port of Python 3.3's function signature tools from the inspect module,
modified to be compatible with Python 2.6, 2.7 and 3.3+.
"""
from __future__ import absolute_import, division, print_function
import itertools
import functools
import re
import types
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from .version import __version__ # noqa: F401
__all__ = ["BoundArguments", "Parameter", "Signature", "signature"]
_WrapperDescriptor = type(type.__call__)
_MethodWrapper = type(all.__call__)
_NonUserDefinedCallables = (
_WrapperDescriptor,
_MethodWrapper,
types.BuiltinFunctionType,
)
def formatannotation(annotation, base_module=None):
if isinstance(annotation, type):
if annotation.__module__ in ("builtins", "__builtin__", base_module):
return annotation.__name__
return annotation.__module__ + "." + annotation.__name__
return repr(annotation)
def _get_user_defined_method(cls, method_name, *nested):
try:
if cls is type:
return
meth = getattr(cls, method_name)
for name in nested:
meth = getattr(meth, name, meth)
except AttributeError:
return
else:
if not isinstance(meth, _NonUserDefinedCallables):
# Once '__signature__' will be added to 'C'-level
# callables, this check won't be necessary
return meth
def signature(obj):
"""Get a signature object for the passed callable."""
if not callable(obj):
raise TypeError("{0!r} is not a callable object".format(obj))
if isinstance(obj, types.MethodType):
sig = signature(obj.__func__)
if obj.__self__ is None:
# Unbound method - preserve as-is.
return sig
else:
# Bound method. Eat self - if we can.
params = tuple(sig.parameters.values())
if not params or params[0].kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
raise ValueError("invalid method signature")
kind = params[0].kind
if kind in (_POSITIONAL_OR_KEYWORD, _POSITIONAL_ONLY):
# Drop first parameter:
# '(p1, p2[, ...])' -> '(p2[, ...])'
params = params[1:]
else:
if kind is not _VAR_POSITIONAL:
# Unless we add a new parameter type we never
# get here
raise ValueError("invalid argument type")
# It's a var-positional parameter.
# Do nothing. '(*args[, ...])' -> '(*args[, ...])'
return sig.replace(parameters=params)
try:
sig = obj.__signature__
except AttributeError:
pass
else:
if sig is not None:
return sig
try:
# Was this function wrapped by a decorator?
wrapped = obj.__wrapped__
except AttributeError:
pass
else:
return signature(wrapped)
if isinstance(obj, types.FunctionType):
return Signature.from_function(obj)
if isinstance(obj, functools.partial):
sig = signature(obj.func)
new_params = OrderedDict(sig.parameters.items())
partial_args = obj.args or ()
partial_keywords = obj.keywords or {}
try:
ba = sig.bind_partial(*partial_args, **partial_keywords)
except TypeError:
msg = "partial object {0!r} has incorrect arguments".format(obj)
raise ValueError(msg)
for arg_name, arg_value in ba.arguments.items():
param = new_params[arg_name]
if arg_name in partial_keywords:
# We set a new default value, because the following code
# is correct:
#
# >>> def foo(a): print(a)
# >>> print(partial(partial(foo, a=10), a=20)())
# 20
# >>> print(partial(partial(foo, a=10), a=20)(a=30))
# 30
#
# So, with 'partial' objects, passing a keyword argument is
# like setting a new default value for the corresponding
# parameter
#
# We also mark this parameter with '_partial_kwarg'
# flag. Later, in '_bind', the 'default' value of this
# parameter will be added to 'kwargs', to simulate
# the 'functools.partial' real call.
new_params[arg_name] = param.replace(
default=arg_value, _partial_kwarg=True
)
elif (
param.kind not in (_VAR_KEYWORD, _VAR_POSITIONAL)
and not param._partial_kwarg
):
new_params.pop(arg_name)
return sig.replace(parameters=list(new_params.values()))
sig = None
if isinstance(obj, type):
# obj is a class or a metaclass
# First, let's see if it has an overloaded __call__ defined
# in its metaclass
call = _get_user_defined_method(type(obj), "__call__")
if call is not None:
sig = signature(call)
else:
# Now we check if the 'obj' class has a '__new__' method
new = _get_user_defined_method(obj, "__new__")
if new is not None:
sig = signature(new)
else:
# Finally, we should have at least __init__ implemented
init = _get_user_defined_method(obj, "__init__")
if init is not None:
sig = signature(init)
elif not isinstance(obj, _NonUserDefinedCallables):
# An object with __call__
# We also check that the 'obj' is not an instance of
# _WrapperDescriptor or _MethodWrapper to avoid
# infinite recursion (and even potential segfault)
call = _get_user_defined_method(type(obj), "__call__", "im_func")
if call is not None:
sig = signature(call)
if sig is not None:
# For classes and objects we skip the first parameter of their
# __call__, __new__, or __init__ methods
return sig.replace(parameters=tuple(sig.parameters.values())[1:])
if isinstance(obj, types.BuiltinFunctionType):
# Raise a nicer error message for builtins
msg = "no signature found for builtin function {0!r}".format(obj)
raise ValueError(msg)
raise ValueError("callable {0!r} is not supported by signature".format(obj))
class _void(object):
"""A private marker - used in Parameter & Signature"""
class _empty(object):
pass
class _ParameterKind(int):
def __new__(self, *args, **kwargs):
obj = int.__new__(self, *args)
obj._name = kwargs["name"]
return obj
def __str__(self):
return self._name
def __repr__(self):
return "<_ParameterKind: {0!r}>".format(self._name)
_POSITIONAL_ONLY = _ParameterKind(0, name="POSITIONAL_ONLY")
_POSITIONAL_OR_KEYWORD = _ParameterKind(1, name="POSITIONAL_OR_KEYWORD")
_VAR_POSITIONAL = _ParameterKind(2, name="VAR_POSITIONAL")
_KEYWORD_ONLY = _ParameterKind(3, name="KEYWORD_ONLY")
_VAR_KEYWORD = _ParameterKind(4, name="VAR_KEYWORD")
class Parameter(object):
"""Represents a parameter in a function signature.
Has the following public attributes:
* name : str
The name of the parameter as a string.
* default : object
The default value for the parameter if specified. If the
parameter has no default value, this attribute is not set.
* annotation
The annotation for the parameter if specified. If the
parameter has no annotation, this attribute is not set.
* kind : str
Describes how argument values are bound to the parameter.
Possible values: `Parameter.POSITIONAL_ONLY`,
`Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`,
`Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`.
"""
__slots__ = ("_name", "_kind", "_default", "_annotation", "_partial_kwarg")
POSITIONAL_ONLY = _POSITIONAL_ONLY
POSITIONAL_OR_KEYWORD = _POSITIONAL_OR_KEYWORD
VAR_POSITIONAL = _VAR_POSITIONAL
KEYWORD_ONLY = _KEYWORD_ONLY
VAR_KEYWORD = _VAR_KEYWORD
empty = _empty
def __init__(
self, name, kind, default=_empty, annotation=_empty, _partial_kwarg=False
):
if kind not in (
_POSITIONAL_ONLY,
_POSITIONAL_OR_KEYWORD,
_VAR_POSITIONAL,
_KEYWORD_ONLY,
_VAR_KEYWORD,
):
raise ValueError("invalid value for 'Parameter.kind' attribute")
self._kind = kind
if default is not _empty:
if kind in (_VAR_POSITIONAL, _VAR_KEYWORD):
msg = "{0} parameters cannot have default values".format(kind)
raise ValueError(msg)
self._default = default
self._annotation = annotation
if name is None:
if kind != _POSITIONAL_ONLY:
raise ValueError(
"None is not a valid name for a " "non-positional-only parameter"
)
self._name = name
else:
name = str(name)
if kind != _POSITIONAL_ONLY and not re.match(r"[a-z_]\w*$", name, re.I):
msg = "{0!r} is not a valid parameter name".format(name)
raise ValueError(msg)
self._name = name
self._partial_kwarg = _partial_kwarg
@property
def name(self):
return self._name
@property
def default(self):
return self._default
@property
def annotation(self):
return self._annotation
@property
def kind(self):
return self._kind
def replace(
self,
name=_void,
kind=_void,
annotation=_void,
default=_void,
_partial_kwarg=_void,
):
"""Creates a customized copy of the Parameter."""
if name is _void:
name = self._name
if kind is _void:
kind = self._kind
if annotation is _void:
annotation = self._annotation
if default is _void:
default = self._default
if _partial_kwarg is _void:
_partial_kwarg = self._partial_kwarg
return type(self)(
name,
kind,
default=default,
annotation=annotation,
_partial_kwarg=_partial_kwarg,
)
def __str__(self):
kind = self.kind
formatted = self._name
if kind == _POSITIONAL_ONLY:
if formatted is None:
formatted = ""
formatted = "<{0}>".format(formatted)
# Add annotation and default value
if self._annotation is not _empty:
formatted = "{0}:{1}".format(formatted, formatannotation(self._annotation))
if self._default is not _empty:
formatted = "{0}={1}".format(formatted, repr(self._default))
if kind == _VAR_POSITIONAL:
formatted = "*" + formatted
elif kind == _VAR_KEYWORD:
formatted = "**" + formatted
return formatted
def __repr__(self):
return "<{0} at {1:#x} {2!r}>".format(
self.__class__.__name__, id(self), self.name
)
def __hash__(self):
msg = "unhashable type: '{0}'".format(self.__class__.__name__)
raise TypeError(msg)
def __eq__(self, other):
return (
issubclass(other.__class__, Parameter)
and self._name == other._name
and self._kind == other._kind
and self._default == other._default
and self._annotation == other._annotation
)
def __ne__(self, other):
return not self.__eq__(other)
class BoundArguments(object):
"""Result of `Signature.bind` call. Holds the mapping of arguments
to the function's parameters.
Has the following public attributes:
* arguments : OrderedDict
An ordered mutable mapping of parameters' names to arguments' values.
Does not contain arguments' default values.
* signature : Signature
The Signature object that created this instance.
* args : tuple
Tuple of positional arguments values.
* kwargs : dict
Dict of keyword arguments values.
"""
def __init__(self, signature, arguments):
self.arguments = arguments
self._signature = signature
@property
def signature(self):
return self._signature
@property
def args(self):
args = []
for param_name, param in self._signature.parameters.items():
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or param._partial_kwarg:
# Keyword arguments mapped by 'functools.partial'
# (Parameter._partial_kwarg is True) are mapped
# in 'BoundArguments.kwargs', along with VAR_KEYWORD &
# KEYWORD_ONLY
break
try:
arg = self.arguments[param_name]
except KeyError:
# We're done here. Other arguments
# will be mapped in 'BoundArguments.kwargs'
break
else:
if param.kind == _VAR_POSITIONAL:
# *args
args.extend(arg)
else:
# plain argument
args.append(arg)
return tuple(args)
@property
def kwargs(self):
kwargs = {}
kwargs_started = False
for param_name, param in self._signature.parameters.items():
if not kwargs_started:
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or param._partial_kwarg:
kwargs_started = True
else:
if param_name not in self.arguments:
kwargs_started = True
continue
if not kwargs_started:
continue
try:
arg = self.arguments[param_name]
except KeyError:
pass
else:
if param.kind == _VAR_KEYWORD:
# **kwargs
kwargs.update(arg)
else:
# plain keyword argument
kwargs[param_name] = arg
return kwargs
def apply_defaults(self):
"""Set default values for missing arguments.
For variable-positional arguments (*args) the default is an
empty tuple.
For variable-keyword arguments (**kwargs) the default is an
empty dict.
"""
arguments = self.arguments
new_arguments = []
for name, param in self._signature.parameters.items():
try:
new_arguments.append((name, arguments[name]))
except KeyError:
if param.default is not _empty:
val = param.default
elif param.kind is _VAR_POSITIONAL:
val = ()
elif param.kind is _VAR_KEYWORD:
val = {}
else:
# This BoundArguments was likely produced by
# Signature.bind_partial().
continue
new_arguments.append((name, val))
self.arguments = OrderedDict(new_arguments)
def __hash__(self):
msg = "unhashable type: '{0}'".format(self.__class__.__name__)
raise TypeError(msg)
def __eq__(self, other):
return (
issubclass(other.__class__, BoundArguments)
and self.signature == other.signature
and self.arguments == other.arguments
)
def __ne__(self, other):
return not self.__eq__(other)
class Signature(object):
"""A Signature object represents the overall signature of a function.
It stores a Parameter object for each parameter accepted by the
function, as well as information specific to the function itself.
A Signature object has the following public attributes and methods:
* parameters : OrderedDict
An ordered mapping of parameters' names to the corresponding
Parameter objects (keyword-only arguments are in the same order
as listed in `code.co_varnames`).
* return_annotation : object
The annotation for the return type of the function if specified.
If the function has no annotation for its return type, this
attribute is not set.
* bind(*args, **kwargs) -> BoundArguments
Creates a mapping from positional and keyword arguments to
parameters.
* bind_partial(*args, **kwargs) -> BoundArguments
Creates a partial mapping from positional and keyword arguments
to parameters (simulating 'functools.partial' behavior.)
"""
__slots__ = ("_return_annotation", "_parameters")
_parameter_cls = Parameter
_bound_arguments_cls = BoundArguments
empty = _empty
def __init__(
self, parameters=None, return_annotation=_empty, __validate_parameters__=True
):
"""Constructs Signature from the given list of Parameter
objects and 'return_annotation'. All arguments are optional.
"""
if parameters is None:
params = OrderedDict()
else:
if __validate_parameters__:
params = OrderedDict()
top_kind = _POSITIONAL_ONLY
for idx, param in enumerate(parameters):
kind = param.kind
if kind < top_kind:
msg = "wrong parameter order: {0} before {1}"
msg = msg.format(top_kind, param.kind)
raise ValueError(msg)
else:
top_kind = kind
name = param.name
if name is None:
name = str(idx)
param = param.replace(name=name)
if name in params:
msg = "duplicate parameter name: {0!r}".format(name)
raise ValueError(msg)
params[name] = param
else:
params = OrderedDict(((param.name, param) for param in parameters))
self._parameters = params
self._return_annotation = return_annotation
@classmethod
def from_function(cls, func):
"""Constructs Signature for the given python function"""
if not isinstance(func, types.FunctionType):
raise TypeError("{0!r} is not a Python function".format(func))
Parameter = cls._parameter_cls
# Parameter information.
func_code = func.__code__
pos_count = func_code.co_argcount
arg_names = func_code.co_varnames
positional = tuple(arg_names[:pos_count])
keyword_only_count = getattr(func_code, "co_kwonlyargcount", 0)
keyword_only = arg_names[pos_count : (pos_count + keyword_only_count)]
annotations = getattr(func, "__annotations__", {})
defaults = func.__defaults__
kwdefaults = getattr(func, "__kwdefaults__", None)
if defaults:
pos_default_count = len(defaults)
else:
pos_default_count = 0
parameters = []
# Non-keyword-only parameters w/o defaults.
non_default_count = pos_count - pos_default_count
for name in positional[:non_default_count]:
annotation = annotations.get(name, _empty)
parameters.append(
Parameter(name, annotation=annotation, kind=_POSITIONAL_OR_KEYWORD)
)
# ... w/ defaults.
for offset, name in enumerate(positional[non_default_count:]):
annotation = annotations.get(name, _empty)
parameters.append(
Parameter(
name,
annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD,
default=defaults[offset],
)
)
# *args
if func_code.co_flags & 0x04:
name = arg_names[pos_count + keyword_only_count]
annotation = annotations.get(name, _empty)
parameters.append(
Parameter(name, annotation=annotation, kind=_VAR_POSITIONAL)
)
# Keyword-only parameters.
for name in keyword_only:
default = _empty
if kwdefaults is not None:
default = kwdefaults.get(name, _empty)
annotation = annotations.get(name, _empty)
parameters.append(
Parameter(
name, annotation=annotation, kind=_KEYWORD_ONLY, default=default
)
)
# **kwargs
if func_code.co_flags & 0x08:
index = pos_count + keyword_only_count
if func_code.co_flags & 0x04:
index += 1
name = arg_names[index]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation, kind=_VAR_KEYWORD))
return cls(
parameters,
return_annotation=annotations.get("return", _empty),
__validate_parameters__=False,
)
@property
def parameters(self):
try:
return types.MappingProxyType(self._parameters)
except AttributeError:
return OrderedDict(self._parameters.items())
@property
def return_annotation(self):
return self._return_annotation
def replace(self, parameters=_void, return_annotation=_void):
"""Creates a customized copy of the Signature.
Pass 'parameters' and/or 'return_annotation' arguments
to override them in the new copy.
"""
if parameters is _void:
parameters = self.parameters.values()
if return_annotation is _void:
return_annotation = self._return_annotation
return type(self)(parameters, return_annotation=return_annotation)
def __hash__(self):
msg = "unhashable type: '{0}'".format(self.__class__.__name__)
raise TypeError(msg)
def __eq__(self, other):
if (
not issubclass(type(other), Signature)
or self.return_annotation != other.return_annotation
or len(self.parameters) != len(other.parameters)
):
return False
other_positions = dict(
(param, idx) for idx, param in enumerate(other.parameters.keys())
)
for idx, (param_name, param) in enumerate(self.parameters.items()):
if param.kind == _KEYWORD_ONLY:
try:
other_param = other.parameters[param_name]
except KeyError:
return False
else:
if param != other_param:
return False
else:
try:
other_idx = other_positions[param_name]
except KeyError:
return False
else:
if idx != other_idx or param != other.parameters[param_name]:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def _bind(self, args, kwargs, partial=False):
"""Private method. Don't use directly."""
arguments = OrderedDict()
parameters = iter(self.parameters.values())
parameters_ex = ()
arg_vals = iter(args)
if partial:
# Support for binding arguments to 'functools.partial' objects.
# See 'functools.partial' case in 'signature()' implementation
# for details.
for param_name, param in self.parameters.items():
if param._partial_kwarg and param_name not in kwargs:
# Simulating 'functools.partial' behavior
kwargs[param_name] = param.default
while True:
# Let's iterate through the positional arguments and corresponding
# parameters
try:
arg_val = next(arg_vals)
except StopIteration:
# No more positional arguments
try:
param = next(parameters)
except StopIteration:
# No more parameters. That's it. Just need to check that
# we have no `kwargs` after this while loop
break
else:
if param.kind == _VAR_POSITIONAL:
# That's OK, just empty *args. Let's start parsing
# kwargs
break
elif param.name in kwargs:
if param.kind == _POSITIONAL_ONLY:
msg = (
"{arg!r} parameter is positional only, "
"but was passed as a keyword"
)
msg = msg.format(arg=param.name)
raise TypeError(msg)
parameters_ex = (param,)
break
elif param.kind == _VAR_KEYWORD or param.default is not _empty:
# That's fine too - we have a default value for this
# parameter. So, lets start parsing `kwargs`, starting
# with the current parameter
parameters_ex = (param,)
break
else:
# No default, not VAR_KEYWORD, not VAR_POSITIONAL,
# not in `kwargs`
if partial:
parameters_ex = (param,)
break
else:
msg = "missing a required argument: {arg!r}"
msg = msg.format(arg=param.name)
raise TypeError(msg)
else:
# We have a positional argument to process
try:
param = next(parameters)
except StopIteration:
raise TypeError("too many positional arguments")
else:
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
# Looks like we have no parameter for this positional
# argument
raise TypeError("too many positional arguments")
if param.kind == _VAR_POSITIONAL:
# We have an '*args'-like argument, let's fill it with
# all positional arguments we have left and move on to
# the next phase
values = [arg_val]
values.extend(arg_vals)
arguments[param.name] = tuple(values)
break
if param.name in kwargs:
raise TypeError(
"multiple values for argument "
"{arg!r}".format(arg=param.name)
)
arguments[param.name] = arg_val
# Now, we iterate through the remaining parameters to process
# keyword arguments
kwargs_param = None
for param in itertools.chain(parameters_ex, parameters):
if param.kind == _VAR_KEYWORD:
# Memorize that we have a '**kwargs'-like parameter
kwargs_param = param
continue
if param.kind == _VAR_POSITIONAL:
# Named arguments don't refer to '*args'-like parameters.
# We only arrive here if the positional arguments ended
# before reaching the last parameter before *args.
continue
param_name = param.name
try:
arg_val = kwargs.pop(param_name)
except KeyError:
# We have no value for this parameter. It's fine though,
# if it has a default value, or it is an '*args'-like
# parameter, left alone by the processing of positional
# arguments.
if (
not partial
and param.kind != _VAR_POSITIONAL
and param.default is _empty
):
raise TypeError(
"missing a required argument: {arg!r}".format(arg=param_name)
)
else:
if param.kind == _POSITIONAL_ONLY:
# This should never happen in case of a properly built
# Signature object (but let's have this check here
# to ensure correct behaviour just in case)
raise TypeError(
"{arg!r} parameter is positional only, "
"but was passed as a keyword".format(arg=param.name)
)
arguments[param_name] = arg_val
if kwargs:
if kwargs_param is not None:
# Process our '**kwargs'-like parameter
arguments[kwargs_param.name] = kwargs
else:
raise TypeError(
"got an unexpected keyword argument {arg!r}".format(
arg=next(iter(kwargs))
)
)
return self._bound_arguments_cls(self, arguments)
def bind(*args, **kwargs):
"""Get a BoundArguments object, that maps the passed `args`
and `kwargs` to the function's signature. Raises `TypeError`
if the passed arguments can not be bound.
"""
return args[0]._bind(args[1:], kwargs)
def bind_partial(self, *args, **kwargs):
"""Get a BoundArguments object, that partially maps the
passed `args` and `kwargs` to the function's signature.
Raises `TypeError` if the passed arguments can not be bound.
"""
return self._bind(args, kwargs, partial=True)
def __str__(self):
result = []
render_kw_only_separator = True
for idx, param in enumerate(self.parameters.values()):
formatted = str(param)
kind = param.kind
if kind == _VAR_POSITIONAL:
# OK, we have an '*args'-like parameter, so we won't need
# a '*' to separate keyword-only arguments
render_kw_only_separator = False
elif kind == _KEYWORD_ONLY and render_kw_only_separator:
# We have a keyword-only parameter to render and we haven't
# rendered an '*args'-like parameter before, so add a '*'
# separator to the parameters list ("foo(arg1, *, arg2)" case)
result.append("*")
# This condition should be only triggered once, so
# reset the flag
render_kw_only_separator = False
result.append(formatted)
rendered = "({0})".format(", ".join(result))
if self.return_annotation is not _empty:
anno = formatannotation(self.return_annotation)
rendered += " -> {0}".format(anno)
return rendered
| [
"[email protected]"
]
| |
7ffb45eecdac616fe9c5ce26f57f0bae55224092 | 96dcea595e7c16cec07b3f649afd65f3660a0bad | /tests/components/number/test_recorder.py | 635354b11760014111d91d22eb6242e3d9db539a | [
"Apache-2.0"
]
| permissive | home-assistant/core | 3455eac2e9d925c92d30178643b1aaccf3a6484f | 80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743 | refs/heads/dev | 2023-08-31T15:41:06.299469 | 2023-08-31T14:50:53 | 2023-08-31T14:50:53 | 12,888,993 | 35,501 | 20,617 | Apache-2.0 | 2023-09-14T21:50:15 | 2013-09-17T07:29:48 | Python | UTF-8 | Python | false | false | 2,035 | py | """The tests for number recorder."""
from __future__ import annotations
from datetime import timedelta
from unittest.mock import patch
import pytest
from homeassistant.components import number
from homeassistant.components.number import ATTR_MAX, ATTR_MIN, ATTR_MODE, ATTR_STEP
from homeassistant.components.recorder import Recorder
from homeassistant.components.recorder.history import get_significant_states
from homeassistant.const import ATTR_FRIENDLY_NAME, Platform
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from homeassistant.util import dt as dt_util
from tests.common import async_fire_time_changed
from tests.components.recorder.common import async_wait_recording_done
@pytest.fixture(autouse=True)
async def number_only() -> None:
"""Enable only the number platform."""
with patch(
"homeassistant.components.demo.COMPONENTS_WITH_CONFIG_ENTRY_DEMO_PLATFORM",
[Platform.NUMBER],
):
yield
async def test_exclude_attributes(recorder_mock: Recorder, hass: HomeAssistant) -> None:
"""Test number registered attributes to be excluded."""
assert await async_setup_component(hass, "homeassistant", {})
await async_setup_component(
hass, number.DOMAIN, {number.DOMAIN: {"platform": "demo"}}
)
await hass.async_block_till_done()
now = dt_util.utcnow()
async_fire_time_changed(hass, now + timedelta(minutes=5))
await hass.async_block_till_done()
await async_wait_recording_done(hass)
states = await hass.async_add_executor_job(
get_significant_states, hass, now, None, hass.states.async_entity_ids()
)
assert len(states) > 1
for entity_states in states.values():
for state in entity_states:
assert ATTR_MIN not in state.attributes
assert ATTR_MAX not in state.attributes
assert ATTR_STEP not in state.attributes
assert ATTR_MODE not in state.attributes
assert ATTR_FRIENDLY_NAME in state.attributes
| [
"[email protected]"
]
| |
2688ed15e9167a2616e98fcead68c08c2428e039 | d53a274a61ffe894a6e5648edf86f78145f0af7b | /tests/modes/test_stream.py | 921a3c083d2c4c932abd4ccd6c99093791ebc99f | [
"MIT"
]
| permissive | deresmos/delogger | 8611d88714ffffddb5fa9bc12586fffb89fb8c11 | c185e4fd844414d561f521103975b95bd31aff43 | refs/heads/main | 2021-06-02T23:10:02.814473 | 2020-10-19T13:53:27 | 2020-10-19T13:53:27 | 153,235,270 | 5 | 0 | MIT | 2020-10-19T13:53:29 | 2018-10-16T06:40:46 | Python | UTF-8 | Python | false | false | 1,391 | py | from pathlib import Path
from delogger import Delogger
from delogger.modes.stream import StreamColorDebugMode
from delogger.modes.stream import StreamDebugMode
from delogger.modes.stream import StreamInfoMode
from tests.lib.base import Assert
from tests.lib.base import DeloggerTestBase
class TestStreamMode(DeloggerTestBase):
def test_stream_info_mode(self, capsys):
delogger = Delogger("stream_info_mode")
delogger.load_modes(StreamInfoMode())
logger = delogger.get_logger()
self.execute_log(logger)
self.check_normal_stream_log(logger, capsys, is_color=False)
Assert._bool(not Path(self.OUTPUT_DIRPATH).is_dir())
def test_stream_debug_mode(self, capsys):
delogger = Delogger(name="stream_debug_mode")
delogger.load_modes(StreamDebugMode())
logger = delogger.get_logger()
self.execute_log(logger)
self.check_debug_stream_log(logger, capsys, is_color=False)
Assert._bool(not Path(self.OUTPUT_DIRPATH).is_dir())
def test_stream_color_debug_mode(self, capsys):
delogger = Delogger(name="stream_color_debug_mode")
delogger.load_modes(StreamColorDebugMode())
logger = delogger.get_logger()
self.execute_log(logger)
self.check_debug_stream_log(logger, capsys, is_color=True)
Assert._bool(not Path(self.OUTPUT_DIRPATH).is_dir())
| [
"[email protected]"
]
| |
ceef2844a259de469756bc457bcfb2a756811678 | 8e1668e35a8df9968ab14d16db089b51dbe6dd51 | /python/algorithms/sort/merge_sort.py | 167273e24de284bb4cbd1cbfa418f619369e139c | []
| no_license | Chalmiller/competitive_programming | f1ec0184d1ff247201522ab90ca8e66b3f326afc | b437080d1ba977c023baf08b7dc5c3946784e183 | refs/heads/master | 2021-03-24T05:11:59.383916 | 2020-08-24T22:07:41 | 2020-08-24T22:07:41 | 247,519,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 867 | py | def merge(S1, S2, S):
"""Merge two sorted Python lists S1 and S2 into properly sized list S."""
i = j = 0
while i + j < len(S):
if j == len(S2) or (i < len(S1) and S1[i] < S2[j]):
S[i+j] = S1[i] # copy ith element of S1 as next item of S
i += 1
else:
S[i+j] = S2[j] # copy jth element of S2 as next item of S
j += 1
def merge_sort(S):
"""Sort the elements of Python list S using the merge-sort algorithm."""
n = len(S)
if n < 2:
return # list is already sorted
# divide
mid = n // 2
S1 = S[0:mid] # copy of first half
S2 = S[mid:n] # copy of second half
# conquer (with recursion)
merge_sort(S1) # sort copy of first half
merge_sort(S2) # sort copy of second half
# merge results
merge(S1, S2, S) # merge sorted halves back into S | [
"[email protected]"
]
| |
d9fbe4fbc19c9b645f05143eda745eb788154644 | bf9d325c1154632cd56b860c35fb743f9750e93f | /Configurations/VBSjjlnu/Full2018v7/conf_fit_v4/samples.py | c34dd7e0be32ecd498d6e6dd1ca9633debde4dd3 | []
| no_license | dermotmoran/PlotsConfigurations | 3b1577f068215e08683d9309d3485459f8130ca6 | 062417c706918ccfc5b53bb5cf232b547d245f1b | refs/heads/master | 2023-03-15T11:44:29.444071 | 2023-03-08T10:10:53 | 2023-03-08T10:10:53 | 240,536,872 | 0 | 3 | null | 2020-12-21T15:47:04 | 2020-02-14T15:17:29 | Python | UTF-8 | Python | false | false | 16,871 | py | import os
import subprocess
import string
from LatinoAnalysis.Tools.commonTools import *
def nanoGetSampleFiles(inputDir, sample):
return getSampleFiles(inputDir, sample, True, 'nanoLatino_')
############################################
############ MORE MC STAT ##################
############################################
def CombineBaseW(samples, proc, samplelist):
newbaseW = getBaseWnAOD(directory_bkg, 'Autumn18_102X_nAODv7_Full2018v7', samplelist)
for s in samplelist:
addSampleWeight(samples, proc, s, newbaseW+'/baseW')
##############################################
###### Tree Directory according to site ######
##############################################
samples={}
# Steps
mcSteps = 'MCl1loose2018v7__MCCorr2018v7__MCCombJJLNu2018'
dataSteps = 'DATAl1loose2018v7__DATACombJJLNu2018'
SITE=os.uname()[1]
xrootdPath=''
if 'cern' in SITE :
#xrootdPath='root://eoscms.cern.ch/'
treeBaseDir = '/eos/cms/store/group/phys_higgs/cmshww/amassiro/HWWNano/'
treeBaseDir_SMP = '/eos/cms/store/group/phys_smp/VJets_NLO_VBSanalyses/'
directory_bkg = treeBaseDir + 'Autumn18_102X_nAODv7_Full2018v7/' + mcSteps
directory_signal = treeBaseDir_SMP + 'Autumn18_102X_nAODv7_Full2018v7/' + mcSteps
directory_data = treeBaseDir + 'Run2018_102X_nAODv7_Full2018v7/' + dataSteps
################################################
############ NUMBER OF LEPTONS #################
################################################
Nlep='1'
#Nlep='3'
#Nlep='4'
################################################
############### Lepton WP ######################
################################################
eleWP='mvaFall17V1Iso_WP90'
muWP='cut_Tight_HWWW'
LepWPCut_1l = '(Lepton_isTightElectron_'+eleWP+'[0]>0.5 || Lepton_isTightMuon_'+muWP+'[0]>0.5)'
LepWPWeight_1l = 'Lepton_tightElectron_'+eleWP+'_IdIsoSF'+'[0]*\
Lepton_tightMuon_'+muWP+'_IdIsoSF'+'[0]'
LepWPCut = LepWPCut_1l
LepWPWeight = LepWPWeight_1l
################################################
############ BASIC MC WEIGHTS ##################
################################################
XSWeight = 'XSWeight'
SFweight1l = 'puWeight*\
TriggerEffWeight_1l*\
Lepton_RecoSF[0]'
SFweight = SFweight1l+'*'+LepWPWeight_1l+'*'+LepWPCut_1l
SFweight += '* btagSF * PUJetIdSF * BoostedWtagSF_nominal'
GenLepMatch = 'Lepton_genmatched[0]'
####
# NVTX reweighting
# SFweight += '*nvtx_reweighting'
################################################
############ MET FILTERS ###################
################################################
METFilter_MC = 'METFilter_MC'
METFilter_DATA = 'METFilter_DATA'
################################################
############ DATA DECLARATION ##################
################################################
DataRun = [
['A','Run2018A-02Apr2020-v1'] ,
['B','Run2018B-02Apr2020-v1'] ,
['C','Run2018C-02Apr2020-v1'] ,
['D','Run2018D-02Apr2020-v1'] ,
]
DataSets = ['SingleMuon','EGamma']
DataTrig = {
'SingleMuon' : 'Trigger_sngMu' ,
'EGamma' : '!Trigger_sngMu && Trigger_sngEl'
}
###########################################
############# BACKGROUNDS ###############
##########################################
########### DY ############
DY_photon_filter = '( !(Sum$(PhotonGen_isPrompt==1 && PhotonGen_pt>15 && abs(PhotonGen_eta)<2.6) > 0 && Sum$(LeptonGen_isPrompt==1 && LeptonGen_pt>15)>=2) )'
samples['DY'] = { 'name' : nanoGetSampleFiles(directory_bkg,'DYJetsToLL_M-50') #Don't use LO(_ext0)! DYMVA Training!
+ nanoGetSampleFiles(directory_bkg,'DYJetsToLL_M-50_ext2')
+ nanoGetSampleFiles(directory_bkg,'DYJetsToLL_M-10to50-LO_ext1')
+ nanoGetSampleFiles(directory_bkg,'DYJetsToLL_M-50_HT-70to100')
+ nanoGetSampleFiles(directory_bkg,'DYJetsToLL_M-50_HT-100to200')
+ nanoGetSampleFiles(directory_bkg,'DYJetsToLL_M-50_HT-200to400')
+ nanoGetSampleFiles(directory_bkg,'DYJetsToLL_M-50_HT-400to600')
+ nanoGetSampleFiles(directory_bkg,'DYJetsToLL_M-50_HT-600to800')
+ nanoGetSampleFiles(directory_bkg,'DYJetsToLL_M-50_HT-800to1200')
+ nanoGetSampleFiles(directory_bkg,'DYJetsToLL_M-50_HT-1200to2500')
+ nanoGetSampleFiles(directory_bkg,'DYJetsToLL_M-50_HT-2500toInf')
+ nanoGetSampleFiles(directory_bkg,'DYJetsToLL_M-4to50_HT-100to200')
+ nanoGetSampleFiles(directory_bkg,'DYJetsToLL_M-4to50_HT-200to400')
+ nanoGetSampleFiles(directory_bkg,'DYJetsToLL_M-4to50_HT-400to600')
+ nanoGetSampleFiles(directory_bkg,'DYJetsToLL_M-4to50_HT-600toInf'),
'weight' : XSWeight+'*'+SFweight+'*'+GenLepMatch+'*'+METFilter_MC + '*' + DY_photon_filter,
'FilesPerJob' : 6,
'EventsPerJob' : 70000,
'suppressNegative' :['all'],
'suppressNegativeNuisances' :['all'],
}
CombineBaseW(samples, 'DY', ['DYJetsToLL_M-50', 'DYJetsToLL_M-50_ext2'])
addSampleWeight(samples,'DY','DYJetsToLL_M-50','DY_NLO_pTllrw')
addSampleWeight(samples,'DY','DYJetsToLL_M-50_ext2','DY_NLO_pTllrw')
addSampleWeight(samples,'DY','DYJetsToLL_M-10to50-LO_ext1','DY_LO_pTllrw')
addSampleWeight(samples,'DY','DYJetsToLL_M-50', '(LHE_HT < 70)')
addSampleWeight(samples,'DY','DYJetsToLL_M-50_ext2', '(LHE_HT < 70)')
addSampleWeight(samples,'DY','DYJetsToLL_M-10to50-LO_ext1', '(LHE_HT < 100)')
addSampleWeight(samples,'DY','DYJetsToLL_M-50_HT-70to100', 'DY_LO_pTllrw') #Are LO!
addSampleWeight(samples,'DY','DYJetsToLL_M-50_HT-100to200', 'DY_LO_pTllrw')
addSampleWeight(samples,'DY','DYJetsToLL_M-50_HT-200to400', 'DY_LO_pTllrw')
addSampleWeight(samples,'DY','DYJetsToLL_M-50_HT-400to600', 'DY_LO_pTllrw')
addSampleWeight(samples,'DY','DYJetsToLL_M-50_HT-600to800', 'DY_LO_pTllrw')
addSampleWeight(samples,'DY','DYJetsToLL_M-50_HT-800to1200', 'DY_LO_pTllrw')
addSampleWeight(samples,'DY','DYJetsToLL_M-50_HT-1200to2500', 'DY_LO_pTllrw')
addSampleWeight(samples,'DY','DYJetsToLL_M-50_HT-2500toInf', 'DY_LO_pTllrw')
addSampleWeight(samples,'DY','DYJetsToLL_M-4to50_HT-100to200','DY_LO_pTllrw')
addSampleWeight(samples,'DY','DYJetsToLL_M-4to50_HT-200to400','DY_LO_pTllrw')
addSampleWeight(samples,'DY','DYJetsToLL_M-4to50_HT-400to600','DY_LO_pTllrw')
addSampleWeight(samples,'DY','DYJetsToLL_M-4to50_HT-600toInf','DY_LO_pTllrw')
################################
############ Top ############
samples['top'] = { 'name' : nanoGetSampleFiles(directory_bkg,'TTTo2L2Nu')
+ nanoGetSampleFiles(directory_bkg,'ST_s-channel_ext1')
+ nanoGetSampleFiles(directory_bkg,'ST_t-channel_antitop')
+ nanoGetSampleFiles(directory_bkg,'ST_t-channel_top')
+ nanoGetSampleFiles(directory_bkg,'ST_tW_antitop_ext1')
+ nanoGetSampleFiles(directory_bkg,'ST_tW_top_ext1')
+ nanoGetSampleFiles(directory_bkg,'TTToSemiLeptonic')
+ nanoGetSampleFiles(directory_bkg,'TTZjets')
+ nanoGetSampleFiles(directory_bkg,'TTWjets'),
# + nanoGetSampleFiles(directory_bkg,'TTWJetsToLNu'), #also this is available
'weight' : XSWeight+'*'+SFweight+'*'+GenLepMatch+'*'+METFilter_MC ,
'FilesPerJob' : 4,
'EventsPerJob' : 70000,
'suppressNegative' :['all'],
'suppressNegativeNuisances' :['all'],
}
addSampleWeight(samples,'top','TTTo2L2Nu','Top_pTrw')
addSampleWeight(samples,'top','TTToSemiLeptonic','Top_pTrw')
#addSampleWeight(samples,'top','TTZjets','Top_pTrw')
#addSampleWeight(samples,'top','TTWjets','Top_pTrw')
#Not corrected in baseW, so we should correct the XS here
addSampleWeight(samples,'top','ST_t-channel_top', "100. / 32.4 ") # N.B We are using inclusive sample with leptonic-only XS
addSampleWeight(samples,'top','ST_t-channel_antitop', "100. / 32.4")
################################
### Wjets samples
samples['Wjets_HT'] = { 'name' :
# nanoGetSampleFiles(directory_bkg, 'WJetsToLNu-LO_ext1')
nanoGetSampleFiles(directory_bkg, 'WJetsToLNu-LO')
+ nanoGetSampleFiles(directory_bkg, 'WJetsToLNu_HT70_100')
+ nanoGetSampleFiles(directory_bkg, 'WJetsToLNu_HT100_200')
+ nanoGetSampleFiles(directory_bkg, 'WJetsToLNu_HT200_400')
+ nanoGetSampleFiles(directory_bkg, 'WJetsToLNu_HT400_600')
+ nanoGetSampleFiles(directory_bkg, 'WJetsToLNu_HT600_800')
+ nanoGetSampleFiles(directory_bkg, 'WJetsToLNu_HT800_1200')
+ nanoGetSampleFiles(directory_bkg, 'WJetsToLNu_HT1200_2500')
+ nanoGetSampleFiles(directory_bkg, 'WJetsToLNu_HT2500_inf')
,
'weight': XSWeight+'*'+SFweight+'*'+METFilter_MC+'*'+GenLepMatch,
'FilesPerJob' : 5,
'EventsPerJob' : 70000
# 'subsamples': {
# "boost1" : "(VBS_category==0) && (deltaeta_vbs < 5)",
# "boost2" : "(VBS_category==0) && (deltaeta_vbs >= 5)",
# "deta1_jpt1": "(VBS_category==1) && (deltaeta_vbs < 5 ) && vbs_1_pt < 75",
# "deta2_jpt1": "(VBS_category==1) && (deltaeta_vbs >= 5) && vbs_1_pt < 75",
# "deta1_jpt2": "(VBS_category==1) && (deltaeta_vbs < 4 ) && ( vbs_1_pt >= 75 && vbs_1_pt <150)",
# "deta2_jpt2": "(VBS_category==1) && (deltaeta_vbs >= 4) && ( vbs_1_pt >= 75 && vbs_1_pt <150)",
# "jpt3": "(VBS_category==1) && ( vbs_1_pt >= 150)",
# }
}
# Fix Wjets binned + LO
addSampleWeight(samples,'Wjets_HT', 'WJetsToLNu-LO', '(LHE_HT < 70)')
############
# HT stiching corrections 2018
addSampleWeight(samples,'Wjets_HT', 'WJetsToLNu_HT100_200', '0.993')
addSampleWeight(samples,'Wjets_HT', 'WJetsToLNu_HT200_400', '1.002')
addSampleWeight(samples,'Wjets_HT', 'WJetsToLNu_HT400_600', '1.009')
addSampleWeight(samples,'Wjets_HT', 'WJetsToLNu_HT600_800', '1.120')
addSampleWeight(samples,'Wjets_HT', 'WJetsToLNu_HT800_1200', '1.202')
addSampleWeight(samples,'Wjets_HT', 'WJetsToLNu_HT1200_2500', '1.332')
addSampleWeight(samples,'Wjets_HT', 'WJetsToLNu_HT2500_inf', '4.200')
###############################################
samples['VV'] = { 'name' :
nanoGetSampleFiles(directory_signal,'WmToLNu_WmTo2J_QCD') +
nanoGetSampleFiles(directory_signal,'WmToLNu_ZTo2J_QCD',) +
nanoGetSampleFiles(directory_signal,'WmTo2J_ZTo2L_QCD', ) +
nanoGetSampleFiles(directory_signal,'WpTo2J_WmToLNu_QCD') +
nanoGetSampleFiles(directory_signal,'WpTo2J_ZTo2L_QCD', ) +
nanoGetSampleFiles(directory_signal,'WpToLNu_WpTo2J_QCD') +
nanoGetSampleFiles(directory_signal,'WpToLNu_WmTo2J_QCD') +
nanoGetSampleFiles(directory_signal,'WpToLNu_ZTo2J_QCD',) +
nanoGetSampleFiles(directory_signal,'ZTo2L_ZTo2J_QCD', ) ,
'weight': XSWeight+'*'+SFweight+'*'+METFilter_MC+'*'+GenLepMatch , # TO BE CORRECTED: + '* ewknloW',
'FilesPerJob' : 8,
'EventsPerJob' : 70000,
}
############ VVV ############
samples['VVV'] = { 'name' : nanoGetSampleFiles(directory_bkg,'ZZZ')
+ nanoGetSampleFiles(directory_bkg,'WZZ')
+ nanoGetSampleFiles(directory_bkg,'WWZ')
+ nanoGetSampleFiles(directory_bkg,'WWW'),
#+ nanoGetSampleFiles(directory_bkg,'WWG'), #should this be included? or is it already taken into account in the WW sample?
'weight' : XSWeight+'*'+SFweight+'*'+METFilter_MC+'*'+GenLepMatch ,
'FilesPerJob' : 8,
'EventsPerJob' : 70000,
}
############## VBF-V ########
samples['VBF-V'] = { 'name' :
nanoGetSampleFiles(directory_bkg,'WLNuJJ_EWK') +
nanoGetSampleFiles(directory_bkg,'EWKZ2Jets_ZToLL_M-50'),
'weight' : XSWeight+'*'+SFweight+'*'+METFilter_MC+'*'+GenLepMatch,
'FilesPerJob' : 8,
'EventsPerJob' : 70000,
}
##################################################
############ Vg ###################################
samples['Vg'] = { 'name' : nanoGetSampleFiles(directory_bkg,'Wg_MADGRAPHMLM')
+ nanoGetSampleFiles(directory_bkg,'ZGToLLG'),
'weight' : XSWeight+'*'+SFweight+'*'+METFilter_MC+'*(Gen_ZGstar_mass <= 0)',
'FilesPerJob' : 8,
'EventsPerJob' : 70000,
'suppressNegative' :['all'],
'suppressNegativeNuisances' :['all'],
}
# the following baseW correction is needed in both v5 and v6 (for Zg, Not for ZGToLLG)
#addSampleWeight(samples, 'Vg', 'Zg', '0.448')
############ VgS ############
samples['VgS'] = { 'name' : nanoGetSampleFiles(directory_bkg,'Wg_MADGRAPHMLM')
+ nanoGetSampleFiles(directory_bkg,'ZGToLLG')
+ nanoGetSampleFiles(directory_bkg,'WZTo3LNu_mllmin01'),
'weight' : XSWeight+'*'+SFweight+'*'+GenLepMatch+'*'+METFilter_MC + ' * (gstarLow * 0.94 + gstarHigh * 1.14)',
'FilesPerJob' : 8,
'EventsPerJob' : 70000,
'suppressNegative' :['all'],
'suppressNegativeNuisances' :['all'],
# 'subsamples': {
# 'L': 'gstarLow',
# 'H': 'gstarHigh'
# }
}
addSampleWeight(samples,'VgS','Wg_MADGRAPHMLM', '(Gen_ZGstar_mass > 0 && Gen_ZGstar_mass < 0.1)')
addSampleWeight(samples,'VgS','ZGToLLG', '(Gen_ZGstar_mass > 0)') # *0.448 XS correction for Zg
addSampleWeight(samples,'VgS','WZTo3LNu_mllmin01', '(Gen_ZGstar_mass > 0.1)')
##########################################
################ SIGNALS #################
##########################################
#
samples['VBS'] = { 'name' :
nanoGetSampleFiles(directory_signal,'WmToLNu_ZTo2J',) +
nanoGetSampleFiles(directory_signal,'WmTo2J_ZTo2L', ) +
nanoGetSampleFiles(directory_signal,'WpTo2J_ZTo2L', ) +
nanoGetSampleFiles(directory_signal,'WpToLNu_ZTo2J',) +
nanoGetSampleFiles(directory_signal,'WpToLNu_WpTo2J') +
nanoGetSampleFiles(directory_signal,'WmToLNu_WmTo2J') +
nanoGetSampleFiles(directory_signal,'WpToLNu_WmTo2J') +
nanoGetSampleFiles(directory_signal,'WpTo2J_WmToLNu') +
nanoGetSampleFiles(directory_signal,'ZTo2L_ZTo2J', ),
'weight': XSWeight+'*'+SFweight+'*'+METFilter_MC+'*'+GenLepMatch,
'FilesPerJob' : 10,
'EventsPerJob' : 70000,
}
# Then corrected
#fakeW = 'fakeW_ele_mvaFall17V1Iso_WP90_mu_cut_Tight_HWWW_mu20_ele35'
### Fakes
samples['Fake'] = {
'name': [],
'weight': METFilter_DATA+'* fake_weight_corrected',
'weights': [],
'isData': ['all'],
'FilesPerJob' : 10,
}
for _, sd in DataRun:
for pd in DataSets:
# BE Careful --> we use directory_data because the Lepton tight cut was not applied in post-processing
files = nanoGetSampleFiles(directory_data, pd + '_' + sd)
samples['Fake']['name'].extend(files)
samples['Fake']['weights'].extend([DataTrig[pd]] * len(files))
#########################################
################ DATA ###################
#########################################
samples['DATA'] = { 'name': [ ] ,
'weight' : METFilter_DATA+'*'+LepWPCut,
'weights' : [ ],
'isData': ['all'],
'FilesPerJob' : 10,
}
for Run in DataRun :
for DataSet in DataSets :
FileTarget = nanoGetSampleFiles(directory_data,DataSet+'_'+Run[1])
for iFile in FileTarget:
samples['DATA']['name'].append(iFile)
samples['DATA']['weights'].append(DataTrig[DataSet])
samples = { key:v for key,v in samples.items() if key in ["VgS"]}
| [
"[email protected]"
]
| |
23e3d90f8c6fac25f16b1ffcc6d2d56accb6bb1b | 66c3ff83c3e3e63bf8642742356f6c1817a30eca | /.vim/tmp/neocomplete/buffer_cache/=+home=+dante=+proyectos=+django-1.9=+mysite=+pruebaarbol=+models.py | 745addfd4f6e0f9b47c28f906f2c9fe123424849 | []
| no_license | pacifi/vim | 0a708e8bc741b4510a8da37da0d0e1eabb05ec83 | 22e706704357b961acb584e74689c7080e86a800 | refs/heads/master | 2021-05-20T17:18:10.481921 | 2020-08-06T12:38:58 | 2020-08-06T12:38:58 | 30,074,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | {'from', '__future__', 'import', 'unicode_literals', 'django', 'db', 'models', 'treebeard', 'mp_tree', 'MP_Node', 'ns_tree', 'NS_Node', 'al_tree', 'AL_Node', 'class', 'Category', 'name', 'CharField', 'max_length', 'node_order_by', 'def', '__str__', 'self', 'return', 's', '__unicode__'}
| [
"[email protected]"
]
| |
c3adb838965981ec18e6903f217ac461b3ef093c | 35fc084d330e62575c12bc714cbf414c082e9f8a | /ramdisk/target/common/usr/lib/python3.7/site-packages/typepy/checker/_ipaddress.py | 4ae9e749ecdb997ae763955301b1fca4c387ae1e | [
"Python-2.0"
]
| permissive | BM1880-BIRD/bm1880-system-sdk | 8de97c6c0985b3bee8b06fb5fd2ee8daec693665 | eff2d6f5442676c04a221a62139864658208f57e | refs/heads/master | 2022-04-08T09:20:47.919696 | 2020-03-09T02:43:08 | 2020-03-09T02:43:08 | 159,283,885 | 29 | 9 | null | null | null | null | UTF-8 | Python | false | false | 1,262 | py | # encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <[email protected]>
"""
from __future__ import absolute_import
from ._checker import (
TypeChecker,
TypeCheckerStrictLevel,
)
from ._common import isstring
class IpAddressTypeCheckerStrictLevel0(TypeCheckerStrictLevel):
def is_instance(self):
return self._is_ipaddress(self._value)
def is_valid_after_convert(self, converted_value):
return self._is_ipaddress(converted_value)
@staticmethod
def _is_ipaddress(value):
import ipaddress
return isinstance(
value, (ipaddress.IPv4Address, ipaddress.IPv6Address))
class IpAddressTypeCheckerStrictLevel1(IpAddressTypeCheckerStrictLevel0):
def is_exclude_instance(self):
return (
isstring(self._value) or
super(IpAddressTypeCheckerStrictLevel1, self).is_exclude_instance()
)
class IpAddressTypeChecker(TypeChecker):
def __init__(self, value, strict_level):
super(IpAddressTypeChecker, self).__init__(
value=value,
checker_mapping={
0: IpAddressTypeCheckerStrictLevel0,
1: IpAddressTypeCheckerStrictLevel1,
},
strict_level=strict_level)
| [
"[email protected]"
]
| |
f49a70a71099e94c0c3d967c18ae18bd219fede9 | c8036f589aac6da4dfe88ea2c86935045d06b73d | /falconcv/models/__init__.py | 89107a84cdd4a9134c9440f0812452a774eacd16 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT",
"BSD-2-Clause"
]
| permissive | CP-NEMO/FalconCV | 40740825becaada01460716e4db531619b830603 | 2662cb9aa68d91b52818626bb817568503bd6ef5 | refs/heads/master | 2022-09-21T18:55:52.366534 | 2020-06-08T03:27:47 | 2020-06-08T03:27:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 112 | py | from .api_installer import ApiInstaller
from .model_builder import ModelBuilder
from .api_model import ApiModel
| [
"[email protected]"
]
| |
e7b9210164d0d161651c3bb898343d30f63cee3b | 696972c107ba96341875bab03dbb92e9337e2924 | /train_bert/train12_aux.py | caff89211d0cae53a9f6714674554104742fa1f9 | []
| no_license | Dongfeng-He/nb | 577d3d5a5f0ec585f132946eb9b6475f6e6856bb | 1d65be2f98a72ae1bd58363bba42b5f1e8e7ac49 | refs/heads/master | 2020-05-29T15:37:29.882797 | 2019-08-09T02:11:25 | 2019-08-09T02:11:25 | 189,225,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,444 | py | import os
import pandas as pd
from evaluation import *
import random
import copy
import torch
from torch import nn
from torch.utils import data
from torch.nn import functional as F
import numpy as np
import time
import math
import gc
from pytorch_pretrained_bert import convert_tf_checkpoint_to_pytorch
from pytorch_pretrained_bert import BertTokenizer, BertAdam, BertModel
from pytorch_pretrained_bert import BertConfig
from pytorch_pretrained_bert.modeling import BertPreTrainedModel
from apex import amp
class BertNeuralNet(BertPreTrainedModel):
def __init__(self, config):
super(BertNeuralNet, self).__init__(config)
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
dense_size = config.hidden_size
# 输出层
self.linear_out = nn.Linear(dense_size, 1)
self.linear_aux_out = nn.Linear(dense_size, 5)
self.linear_identity_out = nn.Linear(dense_size, 9)
self.linear_np_out = nn.Linear(dense_size, 4)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
_, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
bert_output = self.dropout(pooled_output)
# 拼接
hidden = bert_output
# 输出层,用 sigmoid 就用 BCELoss,不用 sigmoid 就用 BCEWithLogitsLoss
result = self.linear_out(hidden)
aux_result = self.linear_aux_out(hidden)
identity_result = self.linear_identity_out(hidden)
np_result = self.linear_np_out(hidden)
out = torch.cat([result, aux_result, identity_result, np_result], 1)
return out
class FocalLoss(nn.Module):
def __init__(self, alpha=1, gamma=2, logits=True, reduce=False):
super(FocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.logits = logits
self.reduce = reduce
def forward(self, inputs, targets):
if self.logits:
bce_loss = nn.BCEWithLogitsLoss(reduction="none")(inputs, targets)
else:
bce_loss = nn.BCELoss(reduction="none")(inputs, targets)
pt = torch.exp(-bce_loss)
focal_loss = self.alpha * (1-pt)**self.gamma * bce_loss
#focal_loss = (1 - pt) ** self.gamma * bce_loss
if self.reduce:
return torch.mean(focal_loss)
else:
return focal_loss
class Trainer:
def __init__(self, data_dir, model_name, epochs=4, batch_size=64, base_batch_size=32, part=1., seed=1234, debug_mode=False):
self.device = torch.device('cuda')
self.data_dir = data_dir
self.debug_mode = debug_mode
self.model_name = model_name
self.seed = seed
self.identity_list = ['male', 'female', 'homosexual_gay_or_lesbian', 'christian', 'jewish', 'muslim', 'black', 'white', 'psychiatric_or_mental_illness']
self.toxicity_type_list = ['severe_toxicity', 'obscene', 'identity_attack', 'insult', 'threat']
if part == 1.:
self.weight_dict = {"severe_toxicity": 1000, "obscene": 235, "identity_attack": 236, "insult": 22,
"threat": 646, "male": 45, "female": 35, "homosexual_gay_or_lesbian": 176, "christian": 50,
"jewish": 249, "muslim": 91, "black": 130, "white": 75, "psychiatric_or_mental_illness": 442,
"pp": 101, "np": 13, "pn": 20, "nn": 1,
"pp_male": 431, "np_male": 50, "pn_male": 17, "nn_male": 1,
"pp_female": 384, "np_female": 39, "pn_female": 17, "nn_female": 1,
"pp_homosexual_gay_or_lesbian": 900, "np_homosexual_gay_or_lesbian": 219, "pn_homosexual_gay_or_lesbian": 17, "nn_homosexual_gay_or_lesbian": 1,
"pp_christian": 859, "np_christian": 54, "pn_christian": 17, "nn_christian": 1,
"pp_jewish": 2365, "np_jewish": 278, "pn_jewish": 17, "nn_jewish": 1,
"pp_muslim": 606, "np_muslim": 108, "pn_muslim": 17, "nn_muslim": 1,
"pp_black": 586, "np_black": 167, "pn_black": 17, "nn_black": 1,
"pp_white": 387, "np_white": 94, "pn_white": 17, "nn_white": 1,
"pp_psychiatric_or_mental_illness": 2874, "np_psychiatric_or_mental_illness": 523, "pn_psychiatric_or_mental_illness": 17, "nn_psychiatric_or_mental_illness": 1}
else:
self.weight_dict = {"severe_toxicity": 1000, "obscene": 196, "identity_attack": 278, "insult": 22,
"threat": 609, "male": 45, "female": 33, "homosexual_gay_or_lesbian": 198, "christian": 48,
"jewish": 243, "muslim": 133, "black": 131, "white": 90, "psychiatric_or_mental_illness": 369,
"pp": 107, "np": 13, "pn": 19, "nn": 1,
"pp_male": 434, "np_male": 51, "pn_male": 17, "nn_male": 1,
"pp_female": 324, "np_female": 37, "pn_female": 17, "nn_female": 1,
"pp_homosexual_gay_or_lesbian": 1055, "np_homosexual_gay_or_lesbian": 244, "pn_homosexual_gay_or_lesbian": 17, "nn_homosexual_gay_or_lesbian": 1,
"pp_christian": 986, "np_christian": 50, "pn_christian": 17, "nn_christian": 1,
"pp_jewish": 2680, "np_jewish": 268, "pn_jewish": 16, "nn_jewish": 1,
"pp_muslim": 772, "np_muslim": 161, "pn_muslim": 17, "nn_muslim": 1,
"pp_black": 633, "np_black": 165, "pn_black": 17, "nn_black": 1,
"pp_white": 465, "np_white": 111, "pn_white": 17, "nn_white": 1,
"pp_psychiatric_or_mental_illness": 2748, "np_psychiatric_or_mental_illness": 427, "pn_psychiatric_or_mental_illness": 16, "nn_psychiatric_or_mental_illness": 1}
self.stopwords = '!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n“”’\'∞θ÷α•à−β∅³π‘₹´°£€\×™√²—'
self.seed_everything()
self.max_len = 220
self.epochs = epochs
self.base_batch_size = base_batch_size
self.batch_size = batch_size
self.split_ratio = 0.90
self.sample_num = 1804874
if not self.debug_mode:
self.train_df = pd.read_csv(os.path.join(self.data_dir, "train.csv")).sample(int(self.sample_num * part), random_state=1234).fillna(0.)
self.test_df = pd.read_csv(os.path.join(self.data_dir, "test.csv"))
else:
self.train_df = pd.read_csv(os.path.join(self.data_dir, "train.csv")).head(1000).fillna(0.)
self.test_df = pd.read_csv(os.path.join(self.data_dir, "test.csv")).head(1000)
self.train_len = int(len(self.train_df) * self.split_ratio)
self.evaluator = self.init_evaluator()
self.bert_config = BertConfig(os.path.join(self.data_dir, "uncased_L-12_H-768_A-12/bert_config.json"))
self.bert_model_path = os.path.join(self.data_dir, "uncased_L-12_H-768_A-12/")
def seed_everything(self):
random.seed(self.seed)
os.environ['PYTHONHASHSEED'] = str(self.seed)
np.random.seed(self.seed)
torch.manual_seed(self.seed)
torch.cuda.manual_seed(self.seed)
torch.backends.cudnn.deterministic = True
def init_evaluator(self):
# 初始化评分函数类
y_true = self.train_df['target'].values
y_identity = self.train_df[self.identity_list].values
valid_y_true = y_true[self.train_len:]
valid_y_identity = y_identity[self.train_len:]
evaluator = JigsawEvaluator(valid_y_true, valid_y_identity) # y_true 必须是0或1,不能是离散值
return evaluator
def convert_lines(self, text_series, max_seq_length, bert_tokenizer):
max_seq_length -= 2
all_tokens = []
for text in text_series:
tokens = bert_tokenizer.tokenize(text)
if len(tokens) > max_seq_length:
tokens = tokens[:max_seq_length]
one_token = bert_tokenizer.convert_tokens_to_ids(["[CLS]"] + tokens + ["[SEP]"]) + [0] * (max_seq_length - len(tokens))
all_tokens.append(one_token)
return np.array(all_tokens)
def create_dataloader(self):
# 读取输入输出
train_comments = self.train_df["comment_text"].astype(str)
train_label = self.train_df["target"].values
train_type_labels = self.train_df[self.toxicity_type_list].values
# 新的 np 任务
train_np_labels = np.zeros((len(self.train_df), 4))
train_np_identity_labels = np.zeros((len(self.train_df), len(self.identity_list) * 4))
train_df_copy = self.train_df[self.identity_list + ["target"]]
for column in self.identity_list + ["target"]:
train_df_copy[column] = np.where(train_df_copy[column] > 0.5, True, False)
pp_label_bool = train_df_copy["target"] & np.where(train_df_copy[self.identity_list].sum(axis=1) > 0, True, False)
np_label_bool = ~train_df_copy["target"] & np.where(train_df_copy[self.identity_list].sum(axis=1) > 0, True, False)
pn_label_bool = train_df_copy["target"] & np.where((train_df_copy[self.identity_list]).sum(axis=1) == 0, True, False)
nn_label_bool = ~train_df_copy["target"] & np.where((train_df_copy[self.identity_list]).sum(axis=1) == 0, True, False)
train_np_labels[:, 0] = np.where(pp_label_bool > 0, 1, 0)
train_np_labels[:, 1] = np.where(np_label_bool > 0, 1, 0)
train_np_labels[:, 2] = np.where(pn_label_bool > 0, 1, 0)
train_np_labels[:, 3] = np.where(nn_label_bool > 0, 1, 0)
for i, column in enumerate(self.identity_list):
pp_label_bool = train_df_copy["target"] & train_df_copy[column]
np_label_bool = ~train_df_copy["target"] & train_df_copy[column]
pn_label_bool = train_df_copy["target"] & (~train_df_copy[column])
nn_label_bool = ~train_df_copy["target"] & (~train_df_copy[column])
train_np_identity_labels[:, i * 4 + 0] = np.where(pp_label_bool > 0, 1, 0)
train_np_identity_labels[:, i * 4 + 1] = np.where(np_label_bool > 0, 1, 0)
train_np_identity_labels[:, i * 4 + 2] = np.where(pn_label_bool > 0, 1, 0)
train_np_identity_labels[:, i * 4 + 3] = np.where(nn_label_bool > 0, 1, 0)
# 身份原始值
train_identity_values = self.train_df[self.identity_list].fillna(0.).values
# 所有身份原始值之和
train_identity_sum = train_identity_values.sum(axis=1)
# 将身份之和限制在1以下(sigmoid)
train_identity_sum_label = np.where(train_identity_sum > 1, 1, train_identity_sum)
# 身份01值
train_identity_binary = copy.deepcopy(self.train_df[self.identity_list])
for column in self.identity_list:
train_identity_binary[column] = np.where(train_identity_binary[column] > 0.5, 1, 0)
# 身份01值有一个就算1
train_identity_binary_sum = train_identity_binary.sum(axis=1)
train_identity_or_binary = np.where(train_identity_binary_sum >= 1, 1, 0)
# 所有身份标签
train_identity_type_labels = train_identity_values
train_identity_type_binary_lables = train_identity_binary
train_identity_sum_label = train_identity_sum_label
train_identity_binary_label = train_identity_or_binary
# tokenizer 训练
bert_tokenizer = BertTokenizer.from_pretrained(self.bert_model_path, cache_dir=None, do_lower_case=True)
train_bert_tokens = self.convert_lines(self.train_df["comment_text"].fillna("DUMMY_VALUE"), self.max_len, bert_tokenizer)
# 划分训练集和验证集
valid_tokens = train_bert_tokens[self.train_len:]
valid_label = train_label[self.train_len:]
valid_type_labels = train_type_labels[self.train_len:]
train_tokens = train_bert_tokens[:self.train_len]
train_label = train_label[:self.train_len]
train_type_labels = train_type_labels[:self.train_len]
valid_identity_type_labels = train_identity_type_labels[self.train_len:]
train_identity_type_labels = train_identity_type_labels[:self.train_len]
valid_identity_type_binary_lables = train_identity_type_binary_lables[self.train_len:]
train_identity_type_binary_lables = train_identity_type_binary_lables[:self.train_len]
valid_identity_sum_label = train_identity_sum_label[self.train_len:]
train_identity_sum_label = train_identity_sum_label[:self.train_len]
valid_identity_binary_label = train_identity_binary_label[self.train_len:]
train_identity_binary_label = train_identity_binary_label[:self.train_len]
valid_np_labels = train_np_labels[self.train_len:]
train_np_labels = train_np_labels[:self.train_len]
valid_np_identity_labels = train_np_identity_labels[self.train_len:]
train_np_identity_labels = train_np_identity_labels[:self.train_len]
# 计算样本权重
target_weight, aux_weight, identity_weight, np_weight, np_identity_weight = self.cal_sample_weights()
# 将符号化数据转成 tensor
train_x_tensor = torch.tensor(train_tokens, dtype=torch.long)
valid_x_tensor = torch.tensor(valid_tokens, dtype=torch.long)
train_y_tensor = torch.tensor(np.hstack([train_label[:, np.newaxis], train_type_labels, train_identity_type_labels, train_np_labels]), dtype=torch.float32)
valid_y_tensor = torch.tensor(np.hstack([valid_label[:, np.newaxis], valid_type_labels, valid_identity_type_labels, valid_np_labels]), dtype=torch.float32)
target_weight_tensor = torch.tensor(target_weight, dtype=torch.float32)
aux_weight_tensor = torch.tensor(aux_weight, dtype=torch.float32)
identity_weight_tensor = torch.tensor(identity_weight, dtype=torch.float32)
np_weight_tensor = torch.tensor(np_weight, dtype=torch.float32)
np_identity_weight_tensor = torch.tensor(np_identity_weight, dtype=torch.float32)
train_attention_mask_tensor = train_x_tensor > 0
valid_attention_mask_tensor = valid_x_tensor > 0
if torch.cuda.is_available():
train_x_tensor = train_x_tensor.to(self.device)
valid_x_tensor = valid_x_tensor.to(self.device)
train_y_tensor = train_y_tensor.to(self.device)
valid_y_tensor = valid_y_tensor.to(self.device)
target_weight_tensor = target_weight_tensor.to(self.device)
aux_weight_tensor = aux_weight_tensor.to(self.device)
identity_weight_tensor = identity_weight_tensor.to(self.device)
train_attention_mask_tensor = train_attention_mask_tensor.to(self.device)
valid_attention_mask_tensor = valid_attention_mask_tensor.to(self.device)
np_weight_tensor = np_weight_tensor.to(self.device)
np_identity_weight_tensor = np_identity_weight_tensor.to(self.device)
# 将 tensor 转成 dataset,训练数据和标签一一对应,用 dataloader 加载的时候 dataset[:-1] 是 x,dataset[-1] 是 y
train_dataset = data.TensorDataset(train_x_tensor, train_y_tensor, target_weight_tensor, aux_weight_tensor, identity_weight_tensor, train_attention_mask_tensor, np_weight_tensor)
valid_dataset = data.TensorDataset(valid_x_tensor, valid_y_tensor, valid_attention_mask_tensor)
# 将 dataset 转成 dataloader
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=self.base_batch_size, shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=self.base_batch_size, shuffle=False)
# 返回训练数据
return train_loader, valid_loader
def cal_sample_weights(self):
# aux weight
aux_weight = np.zeros((len(self.train_df), len(self.toxicity_type_list)))
for i, column in enumerate(self.toxicity_type_list):
weight = math.pow(self.weight_dict[column], 0.5)
aux_weight[:, i] = np.where(self.train_df[column] > 0.5, weight, 1)
# identity weight
identity_weight = np.zeros((len(self.train_df), len(self.identity_list)))
for i, column in enumerate(self.identity_list):
weight = math.pow(self.weight_dict[column], 0.5)
identity_weight[:, i] = np.where(self.train_df[column] > 0.5, weight, 1)
# np weight
np_weight = np.zeros((len(self.train_df), 4))
np_identity_weight = np.zeros((len(self.train_df), len(self.identity_list) * 4))
train_df_copy = self.train_df[self.identity_list + ["target"]]
for column in self.identity_list + ["target"]:
train_df_copy[column] = np.where(train_df_copy[column] > 0.5, True, False)
pp_label_bool = train_df_copy["target"] & np.where(train_df_copy[self.identity_list].sum(axis=1) > 0, True, False)
np_label_bool = ~train_df_copy["target"] & np.where(train_df_copy[self.identity_list].sum(axis=1) > 0, True, False)
pn_label_bool = train_df_copy["target"] & np.where((train_df_copy[self.identity_list]).sum(axis=1) == 0, True, False)
nn_label_bool = ~train_df_copy["target"] & np.where((train_df_copy[self.identity_list]).sum(axis=1) == 0, True, False)
np_weight[:, 0] = np.where(pp_label_bool > 0, 1, 1)
np_weight[:, 1] = np.where(np_label_bool > 0, 1, 1)
np_weight[:, 2] = np.where(pn_label_bool > 0, 1, 1)
np_weight[:, 3] = np.where(nn_label_bool > 0, 1, 1)
for i, column in enumerate(self.identity_list):
pp_label_bool = train_df_copy["target"] & train_df_copy[column]
np_label_bool = ~train_df_copy["target"] & train_df_copy[column]
pn_label_bool = train_df_copy["target"] & (~train_df_copy[column])
nn_label_bool = ~train_df_copy["target"] & (~train_df_copy[column])
np_identity_weight[:, i * 4 + 0] = np.where(pp_label_bool > 0, self.weight_dict["pp_%s" % column], 1)
np_identity_weight[:, i * 4 + 1] = np.where(np_label_bool > 0, self.weight_dict["np_%s" % column], 1)
np_identity_weight[:, i * 4 + 2] = np.where(pn_label_bool > 0, self.weight_dict["pn_%s" % column], 1)
np_identity_weight[:, i * 4 + 3] = np.where(nn_label_bool > 0, self.weight_dict["nn_%s" % column], 1)
# target weight
for column in self.identity_list + ["target"]:
self.train_df[column] = np.where(self.train_df[column] > 0.5, True, False)
target_weight = np.ones(len(self.train_df))
target_weight += self.train_df["target"]
if False:
target_weight += (~self.train_df["target"]) * self.train_df[self.identity_list].sum(axis=1)
target_weight += self.train_df["target"] * (~self.train_df[self.identity_list]).sum(axis=1) * 5
else:
target_weight += (~self.train_df["target"]) * np.where(self.train_df[self.identity_list].sum(axis=1) > 0, 1, 0) * 3
target_weight += self.train_df["target"] * np.where((~self.train_df[self.identity_list]).sum(axis=1) > 0, 1, 0) * 3
target_weight /= target_weight.mean()
# 只留训练集
target_weight = np.array(target_weight)
target_weight = target_weight[:self.train_len]
aux_weight = aux_weight[:self.train_len, :]
identity_weight = identity_weight[:self.train_len, :]
np_weight = np_weight[:self.train_len, :]
np_identity_weight = np_identity_weight[:self.train_len, :]
return target_weight, aux_weight, identity_weight, np_weight, np_identity_weight
def sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def custom_loss(self, y_pred, y_batch, epoch, target_weight=1., aux_weight=1., identity_weight=1., np_weight=1.):
target_pred = y_pred[:, 0]
target_true = y_batch[:, 0]
aux_pred = y_pred[:, 1: 6]
aux_true = y_batch[:, 1: 6]
identity_pred = y_pred[:, 6: 15]
identity_true = y_batch[:, 6: 15]
np_pred = y_pred[:, 15: 19]
np_true = y_batch[:, 15: 19]
if epoch > 9:
target_loss = FocalLoss()(target_pred, target_true)
else:
target_loss = nn.BCEWithLogitsLoss(reduction="none")(target_pred, target_true)
target_loss = torch.mean(target_loss * target_weight)
if epoch > 9:
aux_loss = FocalLoss()(aux_pred, aux_true)
else:
aux_loss = nn.BCEWithLogitsLoss(reduction="none")(aux_pred, aux_true)
aux_loss = torch.mean(aux_loss * aux_weight)
if epoch > 9:
identity_loss = FocalLoss()(identity_pred, identity_true)
else:
identity_loss = nn.BCEWithLogitsLoss(reduction="none")(identity_pred, identity_true)
identity_loss = torch.mean(identity_loss * identity_weight)
if epoch > 9:
np_loss = FocalLoss()(np_pred, np_true)
else:
np_loss = nn.BCEWithLogitsLoss(reduction="none")(np_pred, np_true)
np_loss = torch.mean(np_loss * np_weight)
return target_loss, aux_loss, identity_loss, np_loss
def train(self):
if self.debug_mode: self.epochs = 1
# 加载 dataloader
train_loader, valid_loader = self.create_dataloader()
# 训练
self.seed_everything()
lr = 2e-5
accumulation_steps = math.ceil(self.batch_size / self.base_batch_size)
# 预训练 bert 转成 pytorch
if os.path.exists(self.bert_model_path + "pytorch_model.bin") is False:
convert_tf_checkpoint_to_pytorch.convert_tf_checkpoint_to_pytorch(
self.bert_model_path + 'bert_model.ckpt',
self.bert_model_path + 'bert_config.json',
self.bert_model_path + 'pytorch_model.bin')
# 加载预训练模型
model = BertNeuralNet.from_pretrained(self.bert_model_path, cache_dir=None)
model.zero_grad()
model = model.to(self.device)
# 不同的参数组设置不同的 weight_decay
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
epoch_steps = int(self.train_len / self.base_batch_size / accumulation_steps)
num_train_optimization_steps = int(self.epochs * epoch_steps)
valid_every = math.floor(epoch_steps * accumulation_steps / 5)
optimizer = BertAdam(optimizer_grouped_parameters, lr=lr, warmup=0.02, t_total=num_train_optimization_steps)
# 渐变学习速率
#scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lambda epoch: 0.6 ** epoch)
model, optimizer = amp.initialize(model, optimizer, opt_level="O1", verbosity=0)
# 开始训练
for epoch in range(self.epochs):
train_start_time = time.time()
model.train()
optimizer.zero_grad()
# 加载每个 batch 并训练
for i, batch_data in enumerate(train_loader):
x_batch = batch_data[0]
y_batch = batch_data[1]
target_weight_batch = batch_data[2]
aux_weight_batch = batch_data[3]
identity_weight_batch = batch_data[4]
x_mask = batch_data[5]
np_weight_batch = batch_data[6]
y_pred = model(x_batch, attention_mask=x_mask, labels=None)
target_loss, aux_loss, identity_loss, np_loss = self.custom_loss(y_pred, y_batch, epoch, target_weight_batch, aux_weight_batch, identity_weight_batch, np_weight_batch)
loss = target_loss * 0.7 + aux_loss * 0.3
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
if (i + 1) % accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
# 验证
if (i + 1) % valid_every == 0:
valid_start_time = time.time()
model.eval()
y_pred = np.zeros((len(self.train_df) - self.train_len))
for j, valid_batch_data in enumerate(valid_loader):
x_batch = valid_batch_data[0]
x_mask = valid_batch_data[2]
batch_y_pred = self.sigmoid(model(x_batch, attention_mask=x_mask, labels=None).detach().cpu().numpy())[:, 0]
y_pred[j * self.base_batch_size: (j + 1) * self.base_batch_size] = batch_y_pred
# 计算得分
auc_score = self.evaluator.get_final_metric(y_pred)
print("epoch: %d duration: %d min auc_score: %.4f" % (epoch, int((time.time() - train_start_time) / 60), auc_score))
if not self.debug_mode:
state_dict = model.state_dict()
stage = int((i + 1) / valid_every)
train_duration = int((time.time() - train_start_time) / 60)
valid_duration = int((time.time() - valid_start_time) / 60)
train_start_time = time.time()
# model[bert][seed][epoch][stage][model_name][stage_train_duration][valid_duration][score].bin
model_name = "model2/model_%s_%d_%d_%d_%dmin_%dmin_%.4f.bin" % (self.model_name, self.seed, epoch + 1, stage, train_duration, valid_duration, auc_score)
torch.save(state_dict, os.path.join(self.data_dir, model_name))
model.train()
# del 训练相关输入和模型
training_history = [train_loader, valid_loader, model, optimizer, param_optimizer, optimizer_grouped_parameters]
for variable in training_history:
del variable
gc.collect()
if __name__ == "__main__":
data_dir = "/Users/hedongfeng/PycharmProjects/unintended_bias/data/"
trainer = Trainer(data_dir, "model_name1", debug_mode=True)
trainer.train()
| [
"[email protected]"
]
| |
a5b0fc1063e34fd3268bf0dbb5dee86d3b3a34f8 | c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce | /flask/flaskenv/Lib/site-packages/tensorflow/python/keras/testing_utils.py | 57c5585363b515e8ffb00717a04f25b25e9cec10 | []
| no_license | AhsonAslam/webapi | 54cf7466aac4685da1105f9fb84c686e38f92121 | 1b2bfa4614e7afdc57c9210b0674506ea70b20b5 | refs/heads/master | 2020-07-27T06:05:36.057953 | 2019-09-17T06:35:33 | 2019-09-17T06:35:33 | 208,895,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:f948b539999a928461c50492838f0aa264bc8aa39b722055b4f4a8317d5e8e0b
size 25030
| [
"github@cuba12345"
]
| github@cuba12345 |
9ae2fca88541a98fc212a099fea1c9bbf40bfec5 | d4e573e8eae32db155fe5931b3e2dcd3aa48969b | /indigo/lib/python2.7/dist-packages/rocon_app_manager_msgs/srv/_Init.py | c91a36b46fa4aec26c1d464d23d1f93378458753 | []
| no_license | javierdiazp/myros | ee52b0a7c972d559a1a377f8de4eb37878b8a99b | 7571febdfa881872cae6378bf7266deca7901529 | refs/heads/master | 2022-11-09T09:24:47.708988 | 2016-11-10T16:56:28 | 2016-11-10T16:56:28 | 73,733,895 | 0 | 1 | null | 2022-10-25T05:16:35 | 2016-11-14T18:19:06 | C++ | UTF-8 | Python | false | false | 7,687 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from rocon_app_manager_msgs/InitRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class InitRequest(genpy.Message):
_md5sum = "c1f3d28f1b044c871e6eff2e9fc3c667"
_type = "rocon_app_manager_msgs/InitRequest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """
string name
"""
__slots__ = ['name']
_slot_types = ['string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
name
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(InitRequest, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.name is None:
self.name = ''
else:
self.name = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.name = str[start:end].decode('utf-8')
else:
self.name = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.name = str[start:end].decode('utf-8')
else:
self.name = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from rocon_app_manager_msgs/InitResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class InitResponse(genpy.Message):
_md5sum = "eb13ac1f1354ccecb7941ee8fa2192e8"
_type = "rocon_app_manager_msgs/InitResponse"
_has_header = False #flag to mark the presence of a Header object
_full_text = """bool result
"""
__slots__ = ['result']
_slot_types = ['bool']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
result
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(InitResponse, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.result is None:
self.result = False
else:
self.result = False
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_struct_B.pack(self.result))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 1
(self.result,) = _struct_B.unpack(str[start:end])
self.result = bool(self.result)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_struct_B.pack(self.result))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 1
(self.result,) = _struct_B.unpack(str[start:end])
self.result = bool(self.result)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_B = struct.Struct("<B")
class Init(object):
_type = 'rocon_app_manager_msgs/Init'
_md5sum = 'ee91d68745ef4d7a247816a59dffedf2'
_request_class = InitRequest
_response_class = InitResponse
| [
"[email protected]"
]
| |
1a2ff067aa33a3067f665f3d85d66752e7419587 | 22d487f52d50cbd07487a0f9c2fb40d9b0673d24 | /tehbot/plugins/shadowlamb/__init__.py | 1591618c6d027b8b211d86fa320ea098dcbf2d5d | [
"MIT"
]
| permissive | LostandFound5306/tehbot | 8ca76c7dc65c1c9586cd4f2c80639678eeec8fee | f5807379ff4599518f1f9bb46d549c4e1ee18375 | refs/heads/master | 2021-10-25T00:46:12.476077 | 2019-03-30T14:30:14 | 2019-03-30T14:30:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,286 | py | # -*- coding: utf-8 -*-
from tehbot.plugins import *
from tehbot.plugins import say, me
import tehbot.plugins as plugins
import irc.client
import model
import threading
import time
from random import randint, random
import os.path
#import gettext
#t = gettext.translation("Shadowlamb", os.path.dirname(__file__) + "/i18n")
#_ = t.gettext
_ = lambda x: x
class ShadowlambPlugin(StandardPlugin, PrivilegedPlugin):
"""What, you don't know what shadowlamb is??"""
def __init__(self):
StandardPlugin.__init__(self)
def command(self, connection, event, extra, dbconn):
return "Shadowlamb is teh greatest!"
register_plugin("sl", ShadowlambPlugin())
class ShadowlambHandler(PrefixHandler, AuthedPlugin):
def command_prefix(self):
#return "+"
return u'\u00a5';
def __init__(self):
PrefixHandler.__init__(self)
AuthedPlugin.__init__(self)
self.cmd2action = {
"start" : self.start,
"reset" : self.reset,
"s" : self.status,
"status" : self.status,
"a" : self.attributes,
"attributes" : self.attributes,
"kw" : self.known_words,
"known_words" : self.known_words,
"kp" : self.known_places,
"known_places" : self.known_places,
"time" : self.show_time,
"p" : self.party_status,
"party" : self.party_status,
}
def postinit(self, dbconn):
model.init()
self.quit = False
self.thread = threading.Thread(target=self.timerfunc)
self.thread.start()
def deinit(self, dbconn):
self.quit = True
self.thread.join()
def cmd(self, args):
return u"\x02%s%s\x02" % (self.command_prefix(), args)
def sltime(self):
with model.db_session:
return model.Shadowlamb[1].time
def announce(self, msg, dbconn):
where = {"WeChall IRC" : ["#net-force"]}
for network in where:
for conn in self.tehbot.core.reactor.connections:
if conn.name != network:
continue
for ch in where[network]:
if ch in conn.channels:
plugins.say(conn, ch, msg, dbconn)
def player_age(self, player):
d = self.sltime() - player.birthday
years = d / 60 / 60 / 24 / 365
return max(1, int(round(years)))
def give_item(self, player, item):
pi = model.PlayerItem(base=item, player=player)
player.inventory += pi
return [_("You received 1x%s" % item.name)]
def give_word(self, player, word):
if word in player.known_words:
return []
player.known_words += word
return [_("You know a new Word: \x02%s\x02" % word.name)]
def give_place(self, player, place):
if place in player.known_places:
return []
player.known_places += place
return [_("You know a new Place: \x02%s\x02" % place.name)]
def party_push_action(self, party, action, target=None, eta=None):
with_events = action is not model.PartyAction.delete
party.last_action = party.action
party.last_target = party.target
party.last_eta = party.eta
party.action = action
if target is not None:
party.target = target
party.eta = self.sltime() + (eta or 0)
#if with_events:
#self.announce_party(party
def timerfunc(self):
while not self.quit:
nxt = time.time() + 1
while time.time() < nxt:
time.sleep(0.1)
with model.db_session:
model.Shadowlamb[1].time += 1
def start(self, connection, event, extra, dbconn, msg_type):
with model.db_session:
genders = [to_utf8(g.name) for g in model.Gender.select()]
races = [to_utf8(r.name) for r in model.Race.select(lambda x: not x.is_npc)]
parser = plugins.ThrowingArgumentParser(prog="start", description=self.__doc__)
parser.add_argument("gender", metavar="gender", choices=genders, help=", ".join(genders))
parser.add_argument("race", metavar="race", choices=races, help=", ".join(races))
try:
pargs = parser.parse_args(extra["args"], decode=False)
if parser.help_requested:
return parser.format_help().strip()
except Exception as e:
return u"Error: %s" % exc2str(e)
def random_birthday(s):
tm = time.gmtime(s)
return time.mktime((tm.tm_year, randint(1, 12), randint(1, 31), randint(0, 23), randint(0, 59), randint(0, 61), 0, 1, -1))
def random_value(val, p):
rand = 1.0 + p * (2 * random() - 1)
return val * rand
with model.db_session:
network_id = self.tehbot.settings.network_id(connection)
r = model.Race.get(name=pargs.race)
p = model.Player(
network_id=network_id,
nick=event.source.nick,
name=event.source.nick,
gender=model.Gender.get(name=pargs.gender),
race=r,
birthday = random_birthday(self.sltime() - random_value(r.age, 0.2) * 365 * 24 * 60 * 60),
height = random_value(r.height, 0.2),
own_weight = random_value(r.own_weight, 0.2),
options = {},
hp = 0,
mp = 0,
distance = 0,
xp = 0,
xp_level = 0,
karma = 0,
bad_karma = 0,
level = 0,
bounty = 0,
bounty_done = 0,
quests_done = 0,
nuyen = 0,
known_words = [],
known_spells = [],
known_places = "",
bank_nuyen = 0,
bank_items = "",
effects = "",
const_vars = "",
combat_ai = "",
game_ai = "",
feelings = model.Feelings(),
attributes = model.Attributes(),
skills = model.Skills(),
knowledge = model.Knowledge(),
lock = 0,
transport = 0,
stats = model.PlayerStats(),
equipment = model.Equipment(),
)
p.init_player()
party = model.Party(options={})
p.party = party
msgs = [
_("Your character has been created with \x02Age\x02: %dyears, \x02Height\x02: %.2fm, \x02Weight\x02: %.2fkg.") % (self.player_age(p), p.height, p.own_weight),
]
notices = [
_("You wake up in a bright room... It seems like it is past noon...looks like you are in a hotel room."),
_("What happened... You can`t remember anything.... Gosh, you even forgot your name."),
_("You check your %s and find a pen from 'Renraku Inc.'. You leave your room and walk to the counter. Use %s to talk with the hotelier." % (self.cmd("inventory"), self.cmd("talk"))),
_("Use %s to see all available commands. Check %s to browse the Shadowlamb help files. Use %s to see the help for a command or subject." % (self.cmd("commands"), self.cmd("help"), self.cmd("help <word>")))
]
notices += self.give_item(p, model.Item.get(name="Pen"))
notices += self.give_word(p, model.Word.get(name="Shadowrun"))
notices += self.give_word(p, model.Word.get(name="Renraku"))
notices += self.give_place(p, model.Place.get(name="Redmond_Hotel"))
self.party_push_action(party, model.PartyAction.inside, "Redmond_Hotel")
if msg_type == "notice":
notices = msgs + notices
msgs = []
else:
for m in msgs:
if irc.client.is_channel(event.target):
plugins.say_nick(connection, event.target, event.source.nick, m, dbconn)
else:
plugins.say(connection, event.source.nick, m, dbconn)
for m in notices:
plugins.notice(connection, event.source.nick, m, dbconn)
self.announce("Welcome a new player: %s the %s %s." % (p.fullname(), p.gender.name, p.race.name), dbconn)
return None
def reset(self, args, player):
parser = plugins.ThrowingArgumentParser(prog="reset")
parser.add_argument("confirmation", nargs="?")
try:
pargs = parser.parse_args(args)
if parser.help_requested:
return parser.format_help().strip()
except Exception as e:
return u"Error: %s" % exc2str(e)
if not pargs.confirmation:
player.set_option("deletion_started", True)
return "This will completely delete your character. Type %s to confirm." % self.cmd("reset i_am_sure")
if not player.option("deletion_started"):
return "Deletion not started yet. Type %s to start deletion." % self.cmd("reset")
if "".join(pargs.confirmation) != "i_am_sure":
player.set_option("deletion_started", False)
return "Deletion aborted. Type %s to start deletion again." % self.cmd("reset")
player.delete()
return "Your character has been deleted. You may issue %s again." % self.cmd("start")
def status(self, args, player):
# male troll L0(0). HP:35/35, Atk:22.8, Def:0.1, Dmg:1.8-7.5, Arm(M/F):0/0, XP:0, Karma:0, ¥:50, Weight:0g/18.5kg.
# female fairy L0(0). HP:10/10, MP:36/36, Atk:2.8, Def:1.5, Dmg:-0.2-2.5, Arm(M/F):0/0, XP:0, Karma:0, ¥:0, Weight:0g/3.5kg.
# male gremlin L0(0). HP:30/30, Atk:20.6, Def:0.8, Dmg:1-5.5, Arm(M/F):0/0, XP:0, Karma:0, ¥:0, Weight:50g/12.5kg.
attack, defense, min_dmg, max_dmg, marm, farm = player.combat_stats()
return "%s %s L%d(%d): \x02HP\x02:%.2f/%.2f, \x02MP\x02:%.2f/%.2f, \x02Atk\x02:%.2f, " \
u"\x02Def\x02:%.2f, \x02Dmg\x02:%.2f–%.2f, \x02Arm\x02(M/F):%.2f/%.2f, " \
u"\x02XP\x02:%.2f, \x02Karma\x02:%d, \x02¥\x02:%.2f, \x02Weight\x02:%.2f/%.2fkg" % (
player.gender.name,
player.race.name,
player.level,
player.effective_level(),
player.hp,
player.max_hp(),
player.mp,
player.max_mp(),
attack,
defense,
min_dmg,
max_dmg,
marm,
farm,
player.xp,
player.karma,
player.nuyen,
player.weight(),
player.max_weight(),
)
def attributes(self, args, player):
# Your attributes: body:1(5), strength:0(4), quickness:2(3), wisdom:0(1), intelligence:0, luck:0, reputation:0(2), essence:6(5.5).
# Your attributes: body:4, strength:3(4), quickness:1, wisdom:0(1), intelligence:0, luck:0, reputation:2, essence:5.5.
lst = []
attr = ["body", "magic", "strength", "quickness", "wisdom", "intelligence", "charisma", "luck", "reputation", "essence"]
for a in attr:
base = getattr(player.race.base_attributes, a)
now = getattr(player, a)()
if now >= 0:
lst.append("\x02%s\x02:%g%s" % (a, base, "" if base == now else "(%g)" % now))
return "Your attributes: %s." % ", ".join(lst)
def known_words(self, args, player):
words = []
for w in player.known_words:
words.append((w.id, "\x02%d\x02-%s" % (w.id, w.name)))
return "Known Words: %s." % ", ".join(w[1] for w in sorted(words))
def known_places(self, args, player):
places = []
for p in player.known_places:
places.append((p.id, "\x02%d\x02-%s" % (p.id, p.name)))
return "Known Places: %s." % ", ".join(p[1] for p in sorted(places))
def show_time(self, args, player):
now = time.gmtime(self.sltime())
return time.strftime("It is %H:%M:%S, %Y-%m-%d in Shadowland", now)
def party_status(self, args, player):
if player.party.action == model.PartyAction.inside:
return "You are \x02inside\x02 %s." % player.party.target
return "hu!? should never get here"
def execute(self, connection, event, extra, dbconn):
cmd = extra["cmd"].lower()
msg_type = "say_nick" if irc.client.is_channel(event.target) else "notice"
with model.db_session:
p = model.Player.get(name=event.source.nick, network_id=self.tehbot.settings.network_id(connection))
if p is not None and not irc.client.is_channel(event.target):
msg_type = p.option("msg_type", msg_type)
if not self.cmd2action.has_key(cmd):
return [(msg_type, "The command is not available for your current action or location. Try %s to see all currently available commands." % self.cmd("commands [--long]"))]
if cmd == "start" and p:
return [(msg_type, "Your character has been created already. You can type %s to start over." % self.cmd("reset"))]
if cmd != "start" and not p:
return [(msg_type, "You haven't started the game yet. Type %s to begin playing." % self.cmd("start"))]
try:
if cmd == "start":
res = self.start(connection, event, extra, dbconn, msg_type)
else:
res = self.cmd2action[cmd](extra["args"], p)
except Exception as e:
import traceback
traceback.print_exc()
return [(msg_type, u"Error: %s" % exc2str(e))]
if isinstance(res, list):
return [(msg_type, m) for m in res]
if isinstance(res, basestring):
return [(msg_type, res)]
register_prefix_handler(ShadowlambHandler())
| [
"[email protected]"
]
| |
80e572201fdf14e9dc184eee509e4fb9a400124e | fd899e63d4c33261911f0e35fb8cb286332b7a95 | /algorithm_202108/baek1075.py | b968e46f31d92d03a5084f6f6149807b322ee73b | []
| no_license | choyj0920/algorithm | 948452d652455973e928ef537729174109972252 | e1287799e7073232cbf8904cea7c348a2f2a9c30 | refs/heads/master | 2023-05-25T22:23:11.044049 | 2021-09-08T03:40:42 | 2021-08-13T08:04:46 | 225,736,983 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | # baek1075 나누기
n=int(input().rstrip()[:-2]+'00')
f=int(input())
print(format((f-n%f)%f,'02')) | [
"[email protected]"
]
| |
5be7597d9b79497f129d448a582e4da7ba3720fe | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r9/Gen/DecFiles/options/11164450.py | aa5dc4673014eec3a42cde5a51ad6a896418a48f | []
| no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 917 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r9/Gen/DecFiles/options/11164450.py generated: Fri, 27 Mar 2015 16:10:17
#
# Event Type: 11164450
#
# ASCII decay Descriptor: {[[B0]nos -> (D- => K+ pi- pi- pi0) pi+]cc, [[B0]os -> (D+ => K- pi+ pi+ pi0) pi-]cc}
#
from Configurables import Generation
Generation().EventType = 11164450
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bd_Dpi,Kpipipi0=phsp,DecProdCut.dec"
Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 511,-511 ]
| [
"[email protected]"
]
| |
b79d7f133f726c28e34a6124100816702b418473 | d0320f7a0d23cf6dbc93bed6af5cf22625a1998c | /QueensPuzzleFaster.py | afa69f2f96a4ebb46146776d6760744c448c41ad | [
"MIT"
]
| permissive | cmcclana/CIS2001-Fall2017 | f8ba05cd20406bd3e743d1abd13320dfb0aaf234 | d601f1ef87fb49852ce7a7223537baa5f3a3e664 | refs/heads/master | 2021-08-08T06:59:37.543336 | 2017-11-09T20:40:44 | 2017-11-09T20:40:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,766 | py | class QueensPuzzle:
QUEEN = 'Q'
SPACE = ' '
def __init__(self, number_of_queens):
self.number_of_queens_on_board = 0
self.board = [ [ QueensPuzzle.SPACE ] * number_of_queens ] * number_of_queens
self.diagonals_minus = [True] * number_of_queens * 2
self.diagonals_plus = [True] * number_of_queens * 2
self.rows = [True] * number_of_queens
self.total_number_of_solutions = 0
self.total_queens = number_of_queens
def Print(self):
for row in self.board:
print('-' * ( len(self.board) * 2 + 1) )
print('|', end="")
for character in row:
print( character, end='|')
print()
print('-' * ( len(self.board) * 2 + 1) )
print()
def IsRowOpen(self, row_number):
return self.rows[row_number]
def IsDiagonalOpen(self, row_number, col_number):
return self.diagonals_minus[( row_number - col_number )] and self.diagonals_plus[( row_number + col_number )]
def CanPlaceQueen(self, row_number, col_number):
return self.IsRowOpen(row_number) and self.IsDiagonalOpen(row_number, col_number)
def Solve(self):
if ( self.number_of_queens_on_board == len(self.board) ):
# self.Print()
# for row in range(len(self.board)):
# for col in range(len(self.board)):
# if self.board[row][col] == QueensPuzzle.QUEEN:
# print( "%d" % (row-col), end=" ")
# print()
self.total_number_of_solutions += 1
#print(self.total_number_of_solutions)
else:
for row in range(len(self.board)):
if self.CanPlaceQueen(row, self.number_of_queens_on_board):
self.rows[row] = False
self.diagonals_minus[(row - self.number_of_queens_on_board )] = False
self.diagonals_plus[(row + self.number_of_queens_on_board )] = False
self.board[row][self.number_of_queens_on_board] = QueensPuzzle.QUEEN
self.number_of_queens_on_board += 1
self.Solve()
self.number_of_queens_on_board -= 1
self.board[row][self.number_of_queens_on_board] = QueensPuzzle.SPACE
self.rows[row] = True
self.diagonals_minus[(row - self.number_of_queens_on_board )] = True
self.diagonals_plus[(row + self.number_of_queens_on_board )] = True
number_of_queens = int( input("How many queens do you want to try and put on the board?"))
queensPuzzle = QueensPuzzle(number_of_queens)
queensPuzzle.Solve()
print("Total Solutions: %d" % queensPuzzle.total_number_of_solutions ) | [
"[email protected]"
]
| |
170f7f94ed25f2759dcdc8e16985bde2afe610d5 | 5f67c696967456c063e5f8a0d14cf18cf845ad38 | /archiv/_python/py4inf/open.py | 8431e67ce7563c4ff42278b098951cc12a1fedb3 | []
| no_license | wuxi20/Pythonista | 3f2abf8c40fd6554a4d7596982c510e6ba3d6d38 | acf12d264615749f605a0a6b6ea7ab72442e049c | refs/heads/master | 2020-04-02T01:17:39.264328 | 2019-04-16T18:26:59 | 2019-04-16T18:26:59 | 153,848,116 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 106 | py | fhand = open('mbox.txt')
count = 0
for line in fhand:
count = count + 1
print('Line Count:', count)
| [
"[email protected]"
]
| |
b33d3bf4771d52646ef6acbedbd83ff655913fe4 | 8fbe463322c675d1e1b11adbf5ddfbca77a71c3b | /utils/code.py | 56280c1e875f22c62163b1ab9e26a6ab05dc25c5 | []
| no_license | qhuydtvt/tk-poll | 7cdcd456c3fdca226a31868297df891d8ba2a890 | 1a0b9898f7ae9e3ab9f4d81ee8ee737b581de0d6 | refs/heads/master | 2021-05-06T13:36:04.976284 | 2018-10-28T09:42:42 | 2018-10-28T09:42:42 | 113,245,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | from shortuuid import ShortUUID
import string
alphabet = string.ascii_uppercase
helper = ShortUUID(alphabet=alphabet)
def code(length):
return helper.random(length=length)
def code_6():
return code(6)
| [
"[email protected]"
]
| |
e705fa50c065351bd59b8171e8b499d80879553e | 98e79aff0c504ae703df1b4b6a621caedb5be4de | /v1.2/client/gwElements/__init__.py | 1d3b635476e73e5a0e6ec743178e066ab5ce82e2 | []
| no_license | lotaku/board-game | 26b97b3f6cb241e55852b6d60da3073a9347e4d3 | d90fe5ffabef0cdfa0538be14f69afdc0ca0badc | refs/heads/master | 2020-06-03T05:00:59.617051 | 2014-05-18T14:44:00 | 2014-05-18T14:44:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16 | py | #import gameWin
| [
"[email protected]"
]
| |
fcd708af9bfc4b3f389194c77969d032fa8bedc7 | b5e10b2cfe261beca4a275d5b4b562acaa12bd06 | /zentral/contrib/monolith/events/__init__.py | c0fbc28569df9cce42affaf22b924388cb7df0d3 | [
"Apache-2.0"
]
| permissive | mikemcdonald/zentral | 29dc0a6b3284be00ccc99ca3eb4ac2f4474c12a7 | 4aa03937abfbcea6480aa04bd99f4da7b8dfc923 | refs/heads/master | 2021-06-24T16:16:26.216665 | 2017-09-11T18:53:21 | 2017-09-11T18:53:21 | 103,157,487 | 0 | 1 | null | 2017-09-11T16:02:43 | 2017-09-11T16:02:43 | null | UTF-8 | Python | false | false | 3,038 | py | import logging
from zentral.core.events.base import BaseEvent, EventMetadata, EventRequest, register_event_type
logger = logging.getLogger('zentral.contrib.monolith.events')
ALL_EVENTS_SEARCH_DICT = {"tag": "monolith"}
class MonolithMunkiRequestEvent(BaseEvent):
event_type = "monolith_munki_request"
tags = ["monolith", "heartbeat"]
register_event_type(MonolithMunkiRequestEvent)
class MonolithSyncCatalogsRequestEvent(BaseEvent):
event_type = "monolith_sync_catalogs_request"
tags = ["monolith"]
register_event_type(MonolithSyncCatalogsRequestEvent)
class MonolithUpdateCacheServerRequestEvent(BaseEvent):
event_type = "monolith_update_cache_server_request"
tags = ["monolith"]
register_event_type(MonolithUpdateCacheServerRequestEvent)
class MonolithRepositoryUpdateEvent(BaseEvent):
event_type = "monolith_repository_update"
tags = ["monolith"]
payload_aggregations = [
("action", {"type": "terms", "bucket_number": 4, "label": "Decisions"}),
]
register_event_type(MonolithRepositoryUpdateEvent)
# Utility functions
def post_monolith_munki_request(msn, user_agent, ip, **payload):
MonolithMunkiRequestEvent.post_machine_request_payloads(msn, user_agent, ip, [payload])
def post_monolith_sync_catalogs_request(user_agent, ip):
event_class = MonolithSyncCatalogsRequestEvent
if user_agent or ip:
request = EventRequest(user_agent, ip)
else:
request = None
metadata = EventMetadata(event_class.event_type,
request=request,
tags=event_class.tags)
event = event_class(metadata, {})
event.post()
def post_monolith_cache_server_update_request(user_agent, ip, cache_server=None, errors=None):
event_class = MonolithUpdateCacheServerRequestEvent
if user_agent or ip:
request = EventRequest(user_agent, ip)
else:
request = None
metadata = EventMetadata(event_class.event_type,
request=request,
tags=event_class.tags)
if cache_server:
payload = cache_server.serialize()
payload["status"] = 0
else:
# flatten errors
payload = {"errors": {attr: ", ".join(err) for attr, err in errors.items()}}
payload["status"] = 1
event = event_class(metadata, payload)
event.post()
def post_monolith_repository_updates(repository, payloads, request=None):
event_class = MonolithRepositoryUpdateEvent
repository_serialized_info = repository.serialize_for_event()
if request:
request = EventRequest.build_from_request(request)
for index, payload in enumerate(payloads):
metadata = EventMetadata(event_class.event_type,
index=index,
request=request,
tags=event_class.tags)
payload.update({"repository": repository_serialized_info})
event = event_class(metadata, payload)
event.post()
| [
"[email protected]"
]
| |
ff0eb43da6776fc0eac6b6f8c96830917c6afff1 | ba602dc67ad7bb50133aeb312f3c6c54627b3dec | /data/3955/WA_py/508593.py | 06675fcbb26d3e323e2a68115c546a11b7a4de8d | []
| no_license | Dearyyyyy/TCG | 0d21d89275906157372d775f33309ce337e6bc95 | 7b80de16de2d3f5d95a7c4ed95d45a9e38882e67 | refs/heads/master | 2020-12-27T23:19:44.845918 | 2020-02-04T01:59:23 | 2020-02-04T01:59:23 | 238,101,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | # coding=utf-8
while True:
a,b=input().split(" ")
a=str(a)
b=str(b)
for j in a:
c=j
break
for p in b:
d=p
if c==d:
print("YES")
else:
print("NO") | [
"[email protected]"
]
| |
c0512504cf1270b966a95b5ebacf96088a6be4f1 | 89f0df65abe01e273fd7cf0606727c777352ba47 | /Python/code_comp/Prog_camp/py_temp/app.py | 0c661bc68a19723f5a486a52948b941385912ac2 | []
| no_license | cqann/PRGM | 486122601b959cfbf7d9d2dc2a37caa858cf15a8 | 7387dafb65895528c042a3f1ab605fa5325056ce | refs/heads/master | 2022-02-16T00:59:32.342327 | 2022-01-27T16:55:46 | 2022-01-27T16:55:46 | 226,111,892 | 0 | 1 | null | 2020-11-16T17:41:44 | 2019-12-05T13:45:21 | Python | UTF-8 | Python | false | false | 15 | py | print("asdasd") | [
"[email protected]"
]
| |
45b72754c05463b85f4f32701ecd1784ceb7c7ed | fb7f1533b03d5ea083da8c7dce448c914f25d5a3 | /bookbuilder/book/migrations/0003_chapter_image_paragraph.py | cc6c0e1cfbab3c14f2533278220074b4f6f12cee | []
| no_license | Mark-Seaman/Book-Builder | 7fccfe60afe10564e666fd125ae22b4362abfac2 | 18b5c14c11000da576ea16908b019de0aab50d0b | refs/heads/master | 2023-01-09T14:23:25.641654 | 2020-11-11T22:52:39 | 2020-11-11T22:52:39 | 293,887,524 | 0 | 8 | null | null | null | null | UTF-8 | Python | false | false | 1,640 | py | # Generated by Django 3.1.1 on 2020-09-24 17:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('book', '0002_auto_20200915_1812'),
]
operations = [
migrations.CreateModel(
name='Chapter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('chapter_num', models.IntegerField()),
('book', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='book.book')),
],
),
migrations.CreateModel(
name='Paragraph',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('order', models.IntegerField()),
('chapter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='book.chapter')),
],
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('src', models.CharField(max_length=100)),
('alt', models.CharField(max_length=100)),
('order', models.IntegerField()),
('chapter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='book.chapter')),
],
),
]
| [
"[email protected]"
]
| |
ceabf86ddd55006ceff709e98ee806d5ec567bd0 | a54007706a09b387690f79fd7ffd889decad42f1 | /day32/18_特殊的where方法.py | dd0736b5dc3881b37cdcb9223e7e78cb823cfa21 | []
| no_license | lvah/201903python | d425534544a1f91e5b80b5ff0de5ca34037fe6e9 | 1415fcb7697dfa2884d94dcd8963477e12fe0624 | refs/heads/master | 2020-07-06T16:45:37.882819 | 2019-09-08T10:13:07 | 2019-09-08T10:13:07 | 203,082,401 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | import pandas as pd
import numpy as np
import string
# &**********series中的where方法运行结果和numpy中完全不同;
s1 = pd.Series(np.arange(5), index=list(string.ascii_lowercase[:5]))
# print(s1.where(s1 > 3))
# 对象中不大于3的元素赋值为10;
print(s1.where(s1 > 3, 10))
# 对象中大于3的元素赋值为10;
print(s1.mask(s1 > 3, 10))
| [
"[email protected]"
]
| |
324f72699782843650fe7fd88f2690d0f6747fcb | 6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386 | /google/cloud/lifesciences/v2beta/lifesciences-v2beta-py/google/cloud/lifesciences_v2beta/services/workflows_service_v2_beta/transports/grpc_asyncio.py | 26c6624f2b74e07859902273ec771ea853a3ab51 | [
"Apache-2.0"
]
| permissive | oltoco/googleapis-gen | bf40cfad61b4217aca07068bd4922a86e3bbd2d5 | 00ca50bdde80906d6f62314ef4f7630b8cdb6e15 | refs/heads/master | 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,640 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.lifesciences_v2beta.types import workflows
from google.longrunning import operations_pb2 # type: ignore
from .base import WorkflowsServiceV2BetaTransport, DEFAULT_CLIENT_INFO
from .grpc import WorkflowsServiceV2BetaGrpcTransport
class WorkflowsServiceV2BetaGrpcAsyncIOTransport(WorkflowsServiceV2BetaTransport):
"""gRPC AsyncIO backend transport for WorkflowsServiceV2Beta.
A service for running workflows, such as pipelines consisting
of Docker containers.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(cls,
host: str = 'lifesciences.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
def __init__(self, *,
host: str = 'lifesciences.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def run_pipeline(self) -> Callable[
[workflows.RunPipelineRequest],
Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the run pipeline method over gRPC.
Runs a pipeline. The returned Operation's [metadata]
[google.longrunning.Operation.metadata] field will contain a
[google.cloud.lifesciences.v2beta.Metadata][google.cloud.lifesciences.v2beta.Metadata]
object describing the status of the pipeline execution. The
[response][google.longrunning.Operation.response] field will
contain a
[google.cloud.lifesciences.v2beta.RunPipelineResponse][google.cloud.lifesciences.v2beta.RunPipelineResponse]
object if the pipeline completes successfully.
**Note:** Before you can use this method, the *Life Sciences
Service Agent* must have access to your project. This is done
automatically when the Cloud Life Sciences API is first enabled,
but if you delete this permission you must disable and re-enable
the API to grant the Life Sciences Service Agent the required
permissions. Authorization requires the following `Google
IAM <https://cloud.google.com/iam/>`__ permission:
- ``lifesciences.workflows.run``
Returns:
Callable[[~.RunPipelineRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'run_pipeline' not in self._stubs:
self._stubs['run_pipeline'] = self.grpc_channel.unary_unary(
'/google.cloud.lifesciences.v2beta.WorkflowsServiceV2Beta/RunPipeline',
request_serializer=workflows.RunPipelineRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['run_pipeline']
__all__ = (
'WorkflowsServiceV2BetaGrpcAsyncIOTransport',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
]
| bazel-bot-development[bot]@users.noreply.github.com |
fdc71274dc682931542826201880a243b8d96ffc | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/templates/_algorithms_challenges/leetcode/leetCode/DP/BitwiseORsOfSubarray.py | 436f6200d70c69b2173aec53cf6d08c850f9a229 | []
| no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 2,238 | py | """
We have an array A of non-negative integers.
For every (contiguous) subarray B = [A[i], A[i+1], ..., A[j]] (with i <= j), we take the bitwise OR of all the elements in B, obtaining a result A[i] | A[i+1] | ... | A[j].
Return the number of possible results. (Results that occur more than once are only counted once in the final answer.)
Example 1:
Input: [0]
Output: 1
Explanation:
There is only one possible result: 0.
Example 2:
Input: [1,1,2]
Output: 3
Explanation:
The possible subarrays are [1], [1], [2], [1, 1], [1, 2], [1, 1, 2].
These yield the results 1, 1, 2, 1, 3, 3.
There are 3 unique values, so the answer is 3.
Example 3:
Input: [1,2,4]
Output: 6
Explanation:
The possible results are 1, 2, 3, 4, 6, and 7.
对子数组进行或运算,最后结果是有多少个唯一的解。
思路是DP:
走的弯路:
一开始写的:
[1, 1, 2, 2, 4]
A[0] = {1}
基于A[0],判断是否在A[0]里,不在的话在添加,在的话就继承A[0]。
A[1] = {1}
A[2] = {1, 2, 3}
A[3] = {1, 2 ,3}
运行到这里都没什么错误,因为就碰巧进行了一次相邻的或运算。
A[4] = {1, 2, 3, 4, 5, 6, 7}
到了这里就有了错误,4不应该与这么多进行或运算。
这里就不知道怎么做了,如果要把上一次的结果也加到里面,怎么才能保证所进行的或运算不包含不相邻的两个点如:
[1, 2, 4]
不会进行 [1,4]的运算。
重新的梳理应该是:
[1]
A[0] = {1}
[ 1] [1, 1]
A[1] = {1}
注意,这里与上一个进行或运算,但不把上一个也存到A[2]里面,
[ 2] [1, 1, 2] [ 1, 2]
A[2] = {2, 3}
基于上一个,但不会将上一个的结果加到本次里影响最终运算。
---
最终输出结果时,进行一次全部的set整理。
测试地址:
https://leetcode.com/contest/weekly-contest-100/problems/bitwise-ors-of-subarrays/
Accepted.
"""
c.. Solution o..
___ subarrayBitwiseORs A
"""
:type A: List[int]
:rtype: int
"""
__ n.. A:
r_ 0
dp = [{A[0]}]
___ i __ r..(1, l..(A)):
new = {A[i]}
___ j __ dp[i-1]:
new.add(j|A[i])
dp.a.. new)
r_ l..(s...union(*dp))
| [
"[email protected]"
]
| |
9a22c52ec3248feaf483c3d56b8667dd2f1e8c3d | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_coding.py | a254547576f9229c8cf3428e8dfbf7c636522ad4 | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py |
from xai.brain.wordbase.nouns._cod import _COD
#calss header
class _CODING(_COD, ):
def __init__(self,):
_COD.__init__(self)
self.name = "CODING"
self.specie = 'nouns'
self.basic = "cod"
self.jsondata = {}
| [
"[email protected]"
]
| |
505b98c848c3caaceaa003ae39780cba0e75b26c | 23e55ab51e322a3c0f967976a84f42f70f8ab701 | /tensorflow/python/distribute/distribute_lib_test.py | bac623ada52edd601957dc1e7ebf2d63f452fb72 | [
"Apache-2.0"
]
| permissive | thangnvit/tensorflow | f58e7c2f95690f337361aa2973f2b84ac7e7f947 | c83887196eb717af66a7b3f008e970b4a226ff8f | refs/heads/master | 2021-02-21T17:51:56.030461 | 2020-03-06T07:55:33 | 2020-03-06T07:58:38 | 245,362,540 | 3 | 0 | Apache-2.0 | 2020-03-06T08:05:41 | 2020-03-06T08:05:40 | null | UTF-8 | Python | false | false | 21,712 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test DistributionStrategy, ReplicaContext, and supporting APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.autograph.core import converter_testing
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import reduce_util
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.util import nest
class _TestReplicaContext(distribute_lib.ReplicaContext):
def merge_call(self, fn, *args, **kwargs):
return kwargs["test_arg"]
def _get_test_variable(name, synchronization, aggregation):
return {
"name": name,
"synchronization": synchronization,
"aggregation": aggregation
}
def _test_input_fn(input_context):
del input_context
return dataset_ops.DatasetV2.from_tensors(1.).repeat()
class _TestStrategy(distribute_lib.Strategy):
def __init__(self):
super(_TestStrategy, self).__init__(_TestExtended(self))
class _TestExtended(distribute_lib.StrategyExtendedV1):
def __init__(self, distribute):
super(_TestExtended, self).__init__(distribute)
worker_device_pairs = [("", ["/device:CPU:0"])]
self._input_workers = input_lib.InputWorkers(worker_device_pairs)
def _call_for_each_replica(self, fn, args, kwargs):
with _TestReplicaContext(
self._container_strategy(),
replica_id_in_sync_group=constant_op.constant(0, dtypes.int32)):
return fn(*args, **kwargs)
def _create_variable(self, next_creator, **kwargs):
return _get_test_variable(kwargs["name"], kwargs["synchronization"],
kwargs["aggregation"])
def _make_input_fn_iterator(
self,
input_fn,
replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):
return input_lib.InputFunctionIterator(input_fn, self._input_workers,
[distribute_lib.InputContext()],
self._container_strategy())
def _experimental_distribute_datasets_from_function(self, dataset_fn):
return dataset_fn(distribute_lib.InputContext())
def _local_results(self, value):
return (value,)
def _reduce_to(self, reduce_op, value, destinations, experimental_hints):
del reduce_op, destinations, experimental_hints
return value
def _experimental_make_numpy_dataset(self, numpy_input, session):
del session
return dataset_ops.DatasetV2.from_tensor_slices(numpy_input)
def _experimental_run_steps_on_iterator(self, fn, iterator, iterations,
initial_loop_values=None):
# TODO(tomhennigan) This is missing many things (e.g. ctx.run_op).
ctx = input_lib.MultiStepContext()
for _ in range(iterations):
fn(ctx, iterator.get_next())
return ctx
def _update(self, var, fn, args, kwargs, group):
# The implementations of _update() and _update_non_slot() are identical
# except _update() passes `var` as the first argument to `fn()`.
return self._update_non_slot(var, fn, (var,) + tuple(args), kwargs, group)
def _update_non_slot(self, colocate_with, fn, args, kwargs, group):
del colocate_with
result = fn(*args, **kwargs)
if group:
return result
else:
return nest.map_structure(self._unwrap, result)
def _assert_in_default_state(t):
t.assertIs(ds_context._get_default_replica_context(),
ds_context.get_replica_context())
t.assertIs(None, ds_context.get_cross_replica_context())
t.assertFalse(ds_context.in_cross_replica_context())
t.assertIs(ds_context._get_default_strategy(), ds_context.get_strategy())
t.assertFalse(ds_context.has_strategy())
def _run_in_and_out_of_scope(unbound_test_method):
def wrapper(test_case):
dist = _TestStrategy()
# Running in the default (replica) scope should be supported.
_assert_in_default_state(test_case)
unbound_test_method(test_case, dist)
# As well as running in the strategy scope.
with dist.scope():
unbound_test_method(test_case, dist)
_assert_in_default_state(test_case)
# When run under a different strategy the test method should fail.
another_strategy = _TestStrategy()
msg = "Mixing different .*Strategy objects"
with test_case.assertRaisesRegexp(RuntimeError, msg):
with another_strategy.scope():
unbound_test_method(test_case, dist)
return wrapper
class TestStrategyTest(test.TestCase):
def testCallForEachReplica(self):
_assert_in_default_state(self)
dist = _TestStrategy()
def run_fn():
replica_context = ds_context.get_replica_context()
self.assertTrue(replica_context is not None)
self.assertIs(None, ds_context.get_cross_replica_context())
self.assertFalse(ds_context.in_cross_replica_context())
self.assertTrue(ds_context.has_strategy())
self.assertIs(dist, ds_context.get_strategy())
self.assertEqual("foo", replica_context.merge_call(None, test_arg="foo"))
expected_value = _get_test_variable(
"bar", variable_scope.VariableSynchronization.AUTO,
variable_scope.VariableAggregation.NONE)
self.assertDictEqual(expected_value,
variable_scope.variable(1.0, name="bar"))
dist.extended.call_for_each_replica(run_fn)
with dist.scope():
dist.extended.call_for_each_replica(run_fn)
_assert_in_default_state(self)
def testScope(self):
_assert_in_default_state(self)
dist = _TestStrategy()
with dist.scope():
self.assertIs(None, ds_context.get_replica_context())
self.assertIs(dist, ds_context.get_cross_replica_context())
self.assertTrue(ds_context.in_cross_replica_context())
self.assertTrue(ds_context.has_strategy())
self.assertIs(dist, ds_context.get_strategy())
expected_value = _get_test_variable(
"baz", variable_scope.VariableSynchronization.AUTO,
variable_scope.VariableAggregation.NONE)
self.assertDictEqual(expected_value,
variable_scope.variable(1.0, name="baz"))
_assert_in_default_state(self)
def testScopeDeviceNestingError(self):
_assert_in_default_state(self)
dist = _TestStrategy()
# Open a device scope with dist.scope().
dist.extended._default_device = "/device:GPU:0"
scope = dist.scope()
scope.__enter__()
self.assertIs(dist, ds_context.get_strategy())
with ops.device("/device:CPU:0"):
with self.assertRaisesRegexp(RuntimeError, "Device scope nesting error"):
scope.__exit__(None, None, None)
scope.__exit__(None, None, None)
_assert_in_default_state(self)
def testScopeVarCreatorNestingError(self):
def creator(next_creator, **kwargs):
return next_creator(**kwargs)
_assert_in_default_state(self)
dist = _TestStrategy()
scope = dist.scope()
scope.__enter__()
self.assertIs(dist, ds_context.get_strategy())
with variable_scope.variable_creator_scope(creator):
with self.assertRaisesRegexp(RuntimeError,
"Variable creator scope nesting error"):
scope.__exit__(None, None, None)
scope.__exit__(None, None, None)
_assert_in_default_state(self)
def testScopeVarScopeNestingError(self):
# We create a new graph here to simplify clean-up, since the error
# we are triggering happens in the middle of scope.__exit__() and
# leaves us in a weird state.
with ops.Graph().as_default():
_assert_in_default_state(self)
dist = _TestStrategy()
scope = dist.scope()
scope.__enter__()
self.assertIs(dist, ds_context.get_strategy())
with variable_scope.variable_scope("AA"):
with self.assertRaisesRegexp(RuntimeError,
"Variable scope nesting error"):
scope.__exit__(None, None, None)
_assert_in_default_state(self)
def testSettingSynchronizationAndAggregation(self):
_assert_in_default_state(self)
dist = _TestStrategy()
with dist.scope():
expected_value = _get_test_variable(
"baz", variable_scope.VariableSynchronization.ON_WRITE,
variable_scope.VariableAggregation.MEAN)
self.assertDictEqual(
expected_value,
variable_scope.variable(
1.0,
name="baz",
synchronization=variable_scope.VariableSynchronization.ON_WRITE,
aggregation=variable_scope.VariableAggregation.MEAN))
_assert_in_default_state(self)
def testSetStrategy(self):
_assert_in_default_state(self)
dist = _TestStrategy()
dist2 = _TestStrategy()
ds_context.experimental_set_strategy(dist)
self.assertIs(None, ds_context.get_replica_context())
self.assertIs(dist, ds_context.get_cross_replica_context())
self.assertTrue(ds_context.in_cross_replica_context())
self.assertTrue(ds_context.has_strategy())
self.assertIs(dist, ds_context.get_strategy())
expected_value = _get_test_variable(
"baz", variable_scope.VariableSynchronization.AUTO,
variable_scope.VariableAggregation.NONE)
self.assertDictEqual(expected_value,
variable_scope.variable(1.0, name="baz"))
ds_context.experimental_set_strategy(dist2)
self.assertIs(dist2, ds_context.get_strategy())
ds_context.experimental_set_strategy(None)
_assert_in_default_state(self)
def testSetStrategyInScope(self):
_assert_in_default_state(self)
dist = _TestStrategy()
with dist.scope():
with self.assertRaisesRegexp(
RuntimeError,
"Must not be called inside a `tf.distribute.Strategy` scope"):
ds_context.experimental_set_strategy(_TestStrategy())
with self.assertRaisesRegexp(
RuntimeError,
"Must not be called inside a `tf.distribute.Strategy` scope"):
ds_context.experimental_set_strategy(dist)
with self.assertRaisesRegexp(
RuntimeError,
"Must not be called inside a `tf.distribute.Strategy` scope"):
ds_context.experimental_set_strategy(None)
_assert_in_default_state(self)
def testSameScopeNesting(self):
_assert_in_default_state(self)
dist = _TestStrategy()
scope_a = dist.scope()
with scope_a:
self.assertIs(dist, ds_context.get_strategy())
scope_b = dist.scope()
with scope_b:
self.assertIs(dist, ds_context.get_strategy())
with scope_a:
self.assertIs(dist, ds_context.get_strategy())
self.assertIs(dist, ds_context.get_strategy())
self.assertIs(dist, ds_context.get_strategy())
dist2 = _TestStrategy()
scope2 = dist2.scope()
with self.assertRaisesRegexp(
RuntimeError,
"Mixing different tf.distribute.Strategy objects"):
with scope2:
pass
_assert_in_default_state(self)
with scope_b:
self.assertIs(dist, ds_context.get_strategy())
_assert_in_default_state(self)
@_run_in_and_out_of_scope
def testMakeInputFnIterator(self, dist):
self.assertIsNotNone(dist.make_input_fn_iterator(_test_input_fn))
@_run_in_and_out_of_scope
def testReduce(self, dist):
x = constant_op.constant(1.)
x_r = dist.reduce(reduce_util.ReduceOp.MEAN, x, axis=None)
self.assertEqual(self.evaluate(x), self.evaluate(x_r))
def testReductions_acceptStringOps(self):
dist = _TestStrategy()
for op in ("mean", "MEAN", "sum", "SUM"):
x = constant_op.constant(1.)
y = constant_op.constant(1.)
x_r = dist.reduce(op, x, axis=None)
self.assertEqual(self.evaluate(x), self.evaluate(x_r))
x_r = dist.extended.reduce_to(op, x, "/CPU:0")
self.assertEqual(self.evaluate(x), self.evaluate(x_r))
x_r, y_r = dist.extended.batch_reduce_to(op,
((x, "/CPU:0"), (y, "/CPU:0")))
self.assertEqual(self.evaluate(x), self.evaluate(x_r))
self.assertEqual(self.evaluate(y), self.evaluate(y_r))
@_run_in_and_out_of_scope
def testExperimentalMakeNumpyDataset(self, dist):
numpy_input = np.ones([10], dtype=np.float32)
dataset = dist.experimental_make_numpy_dataset(numpy_input)
self.assertEqual(
self.evaluate(dataset.reduce(0., lambda a, b: a + b)), 10.)
@_run_in_and_out_of_scope
def testExperimentalRunStepsOnIterator(self, dist):
all_inputs = []
dataset = dataset_ops.Dataset.from_tensors(1.).repeat()
dist.extended.experimental_run_steps_on_iterator(
lambda _, inputs: all_inputs.append(self.evaluate(inputs)),
dataset_ops.make_one_shot_iterator(dataset))
self.assertEqual(all_inputs, [1.])
@_run_in_and_out_of_scope
def testReduceTo(self, dist):
x = constant_op.constant(1.)
x_r = dist.extended.reduce_to(reduce_util.ReduceOp.MEAN, x, "/CPU:0")
self.assertEqual(self.evaluate(x), self.evaluate(x_r))
@_run_in_and_out_of_scope
def testBatchReduceTo(self, dist):
x = constant_op.constant(1.)
y = constant_op.constant(1.)
x_r, y_r = dist.extended.batch_reduce_to(reduce_util.ReduceOp.MEAN,
((x, "/CPU:0"), (y, "/CPU:0")))
self.assertEqual(self.evaluate(x), self.evaluate(x_r))
self.assertEqual(self.evaluate(y), self.evaluate(y_r))
@_run_in_and_out_of_scope
def testUpdate(self, dist):
with dist.scope():
v = variables.Variable(1.)
t = constant_op.constant(2.)
def assign_fn(vv, tt):
self.assertIs(vv, v)
self.assertIs(tt, t)
dist.extended.update(v, assign_fn, (t,))
@_run_in_and_out_of_scope
def testUpdateAutoGraph(self, dist):
with dist.scope():
v = variables.Variable(1.)
t = constant_op.constant(2.)
def assign_fn(unused_vv, unused_tt):
self.assertTrue(converter_testing.is_inside_generated_code())
@def_function.function # AutoGraph is default-on only within tf.function
def test_fn():
dist.extended.update(v, assign_fn, (t,))
test_fn()
@_run_in_and_out_of_scope
def testUpdateNonSlot(self, dist):
t = constant_op.constant(2.)
update_calls = []
dist.extended.update_non_slot(t, lambda: update_calls.append(1))
self.assertEqual(len(update_calls), 1)
@_run_in_and_out_of_scope
def testUpdateNonSlotAutoGraph(self, dist):
t = constant_op.constant(2.)
def update_fn():
self.assertTrue(converter_testing.is_inside_generated_code())
@def_function.function # AutoGraph is default-on only within tf.function
def test_fn():
dist.extended.update_non_slot(t, update_fn)
test_fn()
# _TestStrategy2 is like _TestStrategy, except it doesn't change variable
# creation.
class _TestStrategy2(distribute_lib.Strategy):
def __init__(self):
super(_TestStrategy2, self).__init__(_TestExtended2(self))
class _TestExtended2(_TestExtended):
def _create_variable(self, next_creator, **kwargs):
return next_creator(**kwargs)
class DefaultDistributionStrategyTest(test.TestCase, parameterized.TestCase):
def testMergeCall(self):
_assert_in_default_state(self)
def merge_fn(dist, s):
self.assertIs(ds_context._get_default_strategy(), dist)
self.assertIs(None, ds_context.get_replica_context())
self.assertIs(dist, ds_context.get_cross_replica_context())
self.assertTrue(ds_context.in_cross_replica_context())
self.assertIs(dist, ds_context.get_strategy())
self.assertFalse(ds_context.has_strategy())
return "foo_" + s
replica_ctx = ds_context.get_replica_context()
self.assertIs(ds_context._get_default_replica_context(), replica_ctx)
self.assertEqual("foo_bar", replica_ctx.merge_call(merge_fn, args=("bar",)))
_assert_in_default_state(self)
def testMergeCallAutoGraph(self):
_assert_in_default_state(self)
def merge_fn(_, s):
self.assertTrue(converter_testing.is_inside_generated_code())
return s
@def_function.function # AutoGraph is default-on only within tf.function
def test_fn():
replica_ctx = ds_context.get_replica_context()
replica_ctx.merge_call(merge_fn, args=("bar",))
test_fn()
def testScopeMostlyNoOp(self):
_assert_in_default_state(self)
test_strategy = _TestStrategy2()
with test_strategy.scope():
variable_scope.variable(1.0, name="before")
default_strategy = ds_context._get_default_strategy()
scope = default_strategy.scope()
with scope:
_assert_in_default_state(self)
with test_strategy.scope():
with self.assertRaisesRegexp(
RuntimeError, "Mixing different tf.distribute.Strategy objects"):
variable_scope.variable(1.0, name="error")
with scope:
_assert_in_default_state(self)
with test_strategy.scope():
with self.assertRaisesRegexp(
RuntimeError, "Mixing different tf.distribute.Strategy objects"):
variable_scope.variable(1.0, name="also_error")
_assert_in_default_state(self)
_assert_in_default_state(self)
with test_strategy.scope():
variable_scope.variable(1.0, name="after")
def testExperimentalRunV2(self):
default_strategy = ds_context._get_default_strategy()
dataset = dataset_ops.Dataset.range(10).batch(2)
iterator = default_strategy.extended._make_dataset_iterator(dataset)
next_val = iterator.get_next()
def train_step(input_data):
return input_data
for _ in range(2):
default_strategy.experimental_run_v2(train_step, args=(next_val,))
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testDistributedDatasets(self):
default_strategy = ds_context._get_default_strategy()
if context.executing_eagerly():
dataset_fn = lambda _: dataset_ops.DatasetV2.range(10).batch(2)
dist_dataset = default_strategy.experimental_distribute_dataset(
dataset_fn(distribute_lib.InputContext()))
next_val = next(iter(dist_dataset))
else:
dataset_fn = lambda _: dataset_ops.DatasetV1.range(10).batch(2)
dist_dataset = default_strategy.experimental_distribute_dataset(
dataset_fn(distribute_lib.InputContext()))
iterator = dist_dataset.make_initializable_iterator()
self.evaluate(iterator.initializer)
next_val = iterator.get_next()
self.assertAllEqual([0, 1], self.evaluate(next_val))
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testDistributedDatasetsFromFunction(self):
default_strategy = ds_context._get_default_strategy()
if context.executing_eagerly():
dataset_fn = lambda _: dataset_ops.DatasetV2.range(10).batch(2)
dist_dataset_from_func = \
default_strategy.experimental_distribute_datasets_from_function(
dataset_fn)
next_val = next(iter(dist_dataset_from_func))
self.assertAllEqual([0, 1], self.evaluate(next_val))
else:
dataset_fn = lambda _: dataset_ops.DatasetV2.range(10).batch(2)
dist_dataset_from_func = \
default_strategy.experimental_distribute_datasets_from_function(
dataset_fn)
dataset_ops.make_initializable_iterator(dist_dataset_from_func)
class InputContextTest(test.TestCase):
def testProperties(self):
input_context = distribute_lib.InputContext(
num_input_pipelines=2, input_pipeline_id=1, num_replicas_in_sync=6)
self.assertEqual(6, input_context.num_replicas_in_sync)
self.assertEqual(1, input_context.input_pipeline_id)
self.assertEqual(2, input_context.num_input_pipelines)
def testPerReplicaBatchSize(self):
input_context = distribute_lib.InputContext(
num_input_pipelines=2, input_pipeline_id=1, num_replicas_in_sync=6)
self.assertEqual(2, input_context.get_per_replica_batch_size(12))
with self.assertRaises(ValueError):
input_context.get_per_replica_batch_size(13)
def testStr(self):
input_context = distribute_lib.InputContext(
num_input_pipelines=1, input_pipeline_id=0, num_replicas_in_sync=42)
self.assertEqual(
"tf.distribute.InputContext(input pipeline id 0, total: 1)",
str(input_context))
input_context = distribute_lib.InputContext(
num_input_pipelines=3, input_pipeline_id=1, num_replicas_in_sync=42)
self.assertEqual(
"tf.distribute.InputContext(input pipeline id 1, total: 3)",
str(input_context))
if __name__ == "__main__":
test.main()
| [
"[email protected]"
]
| |
9b803a8bef5841f4caa1aca59c39e166c2b74190 | dcab6930a95a3c5530e9b9bfba0e495667c98599 | /Data_Analysis/Data_camp_lecture/Manipulation(Pandas)/summarystatistics_ex4.py | e9819acf44e289ed025cdce3b8e86af349c1d586 | []
| no_license | wxlovolxw/GIWON-S-STUDY | 7db1fb30dfc16c8bc60592d0696434f1482ecdde | 6a622b5d372741b4f9d215f649235353f3e645cd | refs/heads/master | 2023-06-23T06:05:03.058692 | 2021-07-26T05:37:35 | 2021-07-26T05:37:35 | 283,812,802 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | # Sort sales_1_1 by date
sales_1_1 = sales_1_1.sort_values("date",ascending=True)
# Get the cumulative sum of weekly_sales, add as cum_weekly_sales col
sales_1_1["cum_weekly_sales"] = sales_1_1.weekly_sales.cumsum()
# Get the cumulative max of weekly_sales, add as cum_max_sales col
sales_1_1["cum_max_sales"] = sales_1_1.weekly_sales.cummax()
# See the columns you calculated
print(sales_1_1[["date", "weekly_sales", "cum_weekly_sales", "cum_max_sales"]])
| [
"[email protected]"
]
| |
491113ea1a9970929b7916b82c56331f33432aee | abfcee924f57ee2011443703d4869f828e548910 | /account_move_import/__openerp__.py | be0b63316135994bd728cd6f5bb5313233dd3bcd | []
| no_license | Comunitea/external_modules | fb68cbf84cee1c6aa748f4f10e2999b9bb6aadf5 | 9718281e31b4a4f6395d8bed54adf02799df6221 | refs/heads/8.0 | 2023-09-03T17:18:37.652200 | 2022-02-10T09:33:33 | 2022-02-10T09:33:33 | 49,890,295 | 4 | 33 | null | 2022-10-10T11:34:24 | 2016-01-18T16:47:40 | Python | UTF-8 | Python | false | false | 1,427 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# Odoo, Open Source Management Solution
#
# Copyright (c) 2009-2015 Noviat nv/sa (www.noviat.com).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Account Move Import',
'version': '8.0.0.4.1',
'license': 'AGPL-3',
'author': 'Noviat',
'website': 'http://www.noviat.com',
'category': 'Accounting & Finance',
'summary': 'Import Accounting Entries',
'depends': ['account'],
'data': [
'views/account_move.xml',
'wizard/import_move_line_wizard.xml',
],
'demo': [
'demo/account_move.xml',
],
'installable': True,
}
| [
"[email protected]"
]
| |
1b735b2356f26bdb52cb9b1903b806556a163b23 | 6fce025097cebfd9d1dd37f6611e7fdfdbea90e6 | /rainfields/band_quest/data_loader.py | 1e4559f99a809d5068facd1ae4e57f3b0fd4f369 | []
| no_license | ANU-WALD/pluvi_pondus | ec0439d19acdcf4fdf712d6b14a1714297d661b2 | ff8680f7115ab2cb75138bf6705abb59618e47d1 | refs/heads/master | 2021-07-01T14:32:14.501631 | 2020-08-22T09:41:28 | 2020-08-22T09:41:28 | 138,804,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,872 | py | import xarray as xr
import tensorflow as tf
import numpy as np
import datetime
import os
import random
tf.compat.v1.enable_eager_execution()
class gen:
def __call__(self, fname, band, jitter=False):
dsg = xr.open_dataset(fname.decode("utf-8"))
for t in dsg.time:
d = datetime.datetime.utcfromtimestamp(t.astype(int) * 1e-9)
if not os.path.isfile("/data/pluvi_pondus/Rainfields/310_{}.prcp-c10.nc".format(d.strftime("%Y%m%d_%H%M%S"))):
continue
if np.datetime64(d) not in dsg.time.data:
continue
rf_fp = "/data/pluvi_pondus/Rainfields/310_{}.prcp-c10.nc".format(d.strftime("%Y%m%d_%H%M%S"))
dsp = xr.open_dataset(rf_fp)
prec = dsp['precipitation'].data[2::2, 402::2]
b = dsg['B{}'.format(band)].sel(time=t).data[2::2, 402::2]
# Added Normalisation
b = b / 273
yield (b[:, :, None], prec[:, :, None])
dsg.close()
def CompleteFNames(fnames, band):
d = None
if band in [8, 12]:
d = '0812'
elif band in [9, 10]:
d = '0910'
elif band in [11, 13]:
d = '1113'
elif band in [14, 15]:
d = '1415'
return [path.format(d) for path in fnames]
def HimfieldsDataset(fnames, band, batch_size=2):
fnames = CompleteFNames(fnames, band)
print(fnames)
ds = tf.data.Dataset.from_tensor_slices(fnames)
ds = ds.interleave(lambda fname: tf.data.Dataset.from_generator(gen(), (tf.float32, tf.float32), (tf.TensorShape([1024, 1024, 1]), tf.TensorShape([1024, 1024, 1])), args=(fname, band)), cycle_length=len(fnames), block_length=1, num_parallel_calls=None)
ds = ds.shuffle(128, seed=None)
ds = ds.batch(batch_size)
return ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
| [
"[email protected]"
]
| |
782fb8abbf0dca149bb77484bf1ff34f827f910f | 52fc25b679bfb962a17c18420d16692706f8697e | /WebScrape3.py | 9b2b919f854ffe3786389c0ee5b7495e3988c244 | []
| no_license | adanque/Data-Gathering-Techniques-using-APIs-and-Web-scraping-with-Python | 15fecd4da2f71a065cff6be2f2aa67410bc0bb75 | 7d3dc205e4aba5bd9444513fdce465d31805f178 | refs/heads/main | 2023-03-15T08:42:27.212870 | 2021-03-21T21:08:01 | 2021-03-21T21:08:01 | 349,256,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | #from BeautifulSoup import BeautifulSoup
from urllib.request import urlopen
from bs4 import BeautifulSoup as bs
soup = bs(
urllib.urlopen('kitco.com/kitco-gold-index.html').read()) | [
"[email protected]"
]
| |
30731d061dac5ddc8fbcbd873b5ac8a79e08f327 | 665d9bad46e68f779512640e582d2522867b0dba | /Heap/378. Kth Smallest Element in a Sorted Matrix.py | 3569b8b060beb9f019c34d33633df7b42a22d291 | []
| no_license | RahatIbnRafiq/leetcodeProblems | 6fd1e9726b14b7ad3571e5a4af5665b72f7aee0a | 2d35590d189938e0705a21be110e75e0b209ea96 | refs/heads/master | 2021-11-25T11:58:20.498911 | 2021-11-25T00:43:04 | 2021-11-25T00:43:04 | 72,865,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | import heapq
class Solution(object):
def kthSmallest(self, matrix, k):
return list(heapq.merge(*matrix))[k-1]
| [
"[email protected]"
]
| |
6f46684631905e99275e3e85d175b2b16e8a63d0 | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/merra_scripts/03_model_fitting/merraRF882/391-tideGauge.py | 17fa5644f4a0e20042369b1e1ddd12b589bc5156 | []
| no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,456 | py | # -*- coding: utf-8 -*-
"""
Created on Mon May 4 15:51:30 2020
This program is designed to validate a Random Forest
model by using the KFOLD method
@author: Michael Tadesse
"""
#import packages
import os
import glob
import numpy as np
import pandas as pd
from sklearn import metrics
from scipy import stats
import seaborn as sns
import matplotlib.pyplot as plt
from datetime import datetime
from sklearn.ensemble import RandomForestRegressor
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
def validateRF():
"""
run KFOLD method for regression
"""
#defining directories
dir_in = "/lustre/fs0/home/mtadesse/merraAllLagged"
dir_out = "/lustre/fs0/home/mtadesse/merraRFValidation"
surge_path = "/lustre/fs0/home/mtadesse/05_dmax_surge_georef"
#cd to the lagged predictors directory
os.chdir(dir_in)
x = 391
y = 392
#empty dataframe for model validation
df = pd.DataFrame(columns = ['tg', 'lon', 'lat', 'num_year', \
'num_95pcs','corrn', 'rmse'])
#looping through
for tg in range(x,y):
os.chdir(dir_in)
#filter only .csv files
tgNames = []
for file in glob.glob("*.csv"):
tgNames.append(file)
tg_name = sorted(tgNames)[tg]
print(tg_name)
##########################################
#check if this tg is already taken care of
##########################################
os.chdir(dir_out)
if os.path.isfile(tg_name):
print("this tide gauge is already taken care of")
return "file already analyzed!"
os.chdir(dir_in)
#load predictor
pred = pd.read_csv(tg_name)
pred.drop('Unnamed: 0', axis = 1, inplace = True)
#add squared and cubed wind terms (as in WPI model)
pickTerms = lambda x: x.startswith('wnd')
wndTerms = pred.columns[list(map(pickTerms, pred.columns))]
wnd_sqr = pred[wndTerms]**2
wnd_cbd = pred[wndTerms]**3
pred = pd.concat([pred, wnd_sqr, wnd_cbd], axis = 1)
#standardize predictor data
dat = pred.iloc[:,1:]
scaler = StandardScaler()
print(scaler.fit(dat))
dat_standardized = pd.DataFrame(scaler.transform(dat), \
columns = dat.columns)
pred_standardized = pd.concat([pred['date'], dat_standardized], axis = 1)
#load surge data
os.chdir(surge_path)
surge = pd.read_csv(tg_name)
surge.drop('Unnamed: 0', axis = 1, inplace = True)
#remove duplicated surge rows
surge.drop(surge[surge['ymd'].duplicated()].index, axis = 0, inplace = True)
surge.reset_index(inplace = True)
surge.drop('index', axis = 1, inplace = True)
#adjust surge time format to match that of pred
time_str = lambda x: str(datetime.strptime(x, '%Y-%m-%d'))
surge_time = pd.DataFrame(list(map(time_str, surge['ymd'])), columns = ['date'])
time_stamp = lambda x: (datetime.strptime(x, '%Y-%m-%d %H:%M:%S'))
surge_new = pd.concat([surge_time, surge[['surge', 'lon', 'lat']]], axis = 1)
#merge predictors and surge to find common time frame
pred_surge = pd.merge(pred_standardized, surge_new.iloc[:,:2], on='date', how='right')
pred_surge.sort_values(by = 'date', inplace = True)
#find rows that have nans and remove them
row_nan = pred_surge[pred_surge.isna().any(axis =1)]
pred_surge.drop(row_nan.index, axis = 0, inplace = True)
pred_surge.reset_index(inplace = True)
pred_surge.drop('index', axis = 1, inplace = True)
#in case pred and surge don't overlap
if pred_surge.shape[0] == 0:
print('-'*80)
print('Predictors and Surge don''t overlap')
print('-'*80)
continue
pred_surge['date'] = pd.DataFrame(list(map(time_stamp, \
pred_surge['date'])), \
columns = ['date'])
#prepare data for training/testing
X = pred_surge.iloc[:,1:-1]
y = pd.DataFrame(pred_surge['surge'])
y = y.reset_index()
y.drop(['index'], axis = 1, inplace = True)
#apply PCA
pca = PCA(.95)
pca.fit(X)
X_pca = pca.transform(X)
#apply 10 fold cross validation
kf = KFold(n_splits=10, random_state=29)
metric_corr = []; metric_rmse = []; #combo = pd.DataFrame(columns = ['pred', 'obs'])
for train_index, test_index in kf.split(X):
X_train, X_test = X_pca[train_index], X_pca[test_index]
y_train, y_test = y['surge'][train_index], y['surge'][test_index]
#train regression model
rf= RandomForestRegressor(n_estimators = 50, random_state = 101, \
min_samples_leaf = 1)
rf.fit(X_train, y_train)
#predictions
predictions = rf.predict(X_test)
# pred_obs = pd.concat([pd.DataFrame(np.array(predictions)), \
# pd.DataFrame(np.array(y_test))], \
# axis = 1)
# pred_obs.columns = ['pred', 'obs']
# combo = pd.concat([combo, pred_obs], axis = 0)
#evaluation matrix - check p value
if stats.pearsonr(y_test, predictions)[1] >= 0.05:
print("insignificant correlation!")
continue
else:
print(stats.pearsonr(y_test, predictions))
metric_corr.append(stats.pearsonr(y_test, predictions)[0])
print(np.sqrt(metrics.mean_squared_error(y_test, predictions)))
print()
metric_rmse.append(np.sqrt(metrics.mean_squared_error(y_test, predictions)))
#number of years used to train/test model
num_years = (pred_surge['date'][pred_surge.shape[0]-1] -\
pred_surge['date'][0]).days/365
longitude = surge['lon'][0]
latitude = surge['lat'][0]
num_pc = X_pca.shape[1] #number of principal components
corr = np.mean(metric_corr)
rmse = np.mean(metric_rmse)
print('num_year = ', num_years, ' num_pc = ', num_pc ,'avg_corr = ',np.mean(metric_corr), ' - avg_rmse (m) = ', \
np.mean(metric_rmse), '\n')
#original size and pca size of matrix added
new_df = pd.DataFrame([tg_name, longitude, latitude, num_years, num_pc, corr, rmse]).T
new_df.columns = ['tg', 'lon', 'lat', 'num_year', \
'num_95pcs','corrn', 'rmse']
df = pd.concat([df, new_df], axis = 0)
#save df as cs - in case of interruption
os.chdir(dir_out)
df.to_csv(tg_name)
#run script
validateRF()
| [
"[email protected]"
]
| |
488dc76a0442a7b08a9df5d702b3718760e75d5e | 9851c3f47c1aa165bc0d239074fe238f82055875 | /LeetCode/0412. Fizz Buzz/solution.py | bc2af790c4bad5cd76258e058f6079a7df8e9841 | [
"Apache-2.0"
]
| permissive | InnoFang/algo-set | 12f886dbec0da664327d26bcaf02c1316151a643 | 2419a7d720bea1fd6ff3b75c38342a0ace18b205 | refs/heads/master | 2023-03-16T09:51:24.631068 | 2023-03-13T11:08:54 | 2023-03-13T11:08:54 | 86,413,001 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | """
8 / 8 test cases passed.
Runtime: 28 ms
Memory Usage: 15.2 MB
"""
class Solution:
def fizzBuzz(self, n: int) -> List[str]:
return ["FizzBuzz" if i % 3 == 0 and i % 5 == 0 else \
"Fizz" if i % 3 == 0 else \
"Buzz" if i % 5 == 0 else \
str(i) for i in range(1, n + 1)]
| [
"[email protected]"
]
| |
9d0ed4a9bba517cc8e6767aa4c0fff77878212c4 | 1edfd072fae205d766e7c488f1af64f3af9fc23a | /src/python/sensors/microphone/microphone.py | c2f1fa4bf16a131848871f8e709d0692804cbd54 | []
| no_license | kth-social-robotics/multisensoryprocessing | 17fc96eb3776642de1075103eeb461125020c892 | 867abe6c921fbf930ac26e0f43a8be0404817bcd | refs/heads/master | 2021-01-21T11:50:16.348566 | 2018-11-05T14:48:42 | 2018-11-05T14:48:42 | 102,027,696 | 4 | 2 | null | 2018-02-20T15:14:22 | 2017-08-31T17:39:58 | C++ | UTF-8 | Python | false | false | 2,429 | py | import pyaudio
import sys
import time
import msgpack
sys.path.append('../..')
import numpy as np
import re
from shared import create_zmq_server, MessageQueue
import sys
import wave
import datetime
if len(sys.argv) != 2:
exit('please only supply sound card name')
device_names_string = sys.argv[1]
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
CHUNK = 2000
zmq_socket_1, zmq_server_addr_1 = create_zmq_server()
zmq_socket_2, zmq_server_addr_2 = create_zmq_server()
mq = MessageQueue('microphone-sensor')
p = pyaudio.PyAudio()
device_index = None
for i in range(p.get_device_count()):
device = p.get_device_info_by_index(i)
if device['name'].startswith('[{}]'.format(device_names_string)):
device_index = i
if not device_index:
exit('please connect a proper soundcard')
device_names = device_names_string.split(',')
mq.publish(
exchange='sensors',
routing_key='microphone.new_sensor.{}'.format(device_names[0]),
body={'address': zmq_server_addr_1, 'file_type': 'audio'}
)
mq.publish(
exchange='sensors',
routing_key='microphone.new_sensor.{}'.format(device_names[1]),
body={'address': zmq_server_addr_2, 'file_type': 'audio'}
)
session_name = datetime.datetime.now().isoformat().replace('.', '_').replace(':', '_') + device_names_string
# Let's be on the safe side and recording this to the computer...
waveFile = wave.open('{}.wav'.format(session_name), 'wb')
waveFile.setnchannels(CHANNELS)
waveFile.setsampwidth(p.get_sample_size(FORMAT))
waveFile.setframerate(RATE)
def callback(in_data, frame_count, time_info, status):
result = np.fromstring(in_data, dtype=np.uint16)
result = np.reshape(result, (frame_count, 2))
the_time = mq.get_shifted_time()
zmq_socket_1.send(msgpack.packb((result[:, 0].tobytes(), the_time)))
zmq_socket_2.send(msgpack.packb((result[:, 1].tobytes(), the_time)))
waveFile.writeframes(in_data)
return None, pyaudio.paContinue
stream = p.open(
format=FORMAT,
channels=CHANNELS,
rate=RATE,
input_device_index=device_index,
input=True,
frames_per_buffer=CHUNK,
stream_callback=callback
)
try:
input('[*] Serving at {} and {}. To exit press enter'.format(zmq_server_addr_1, zmq_server_addr_2))
finally:
waveFile.close()
stream.stop_stream()
stream.close()
zmq_socket_1.send(b'CLOSE')
zmq_socket_2.send(b'CLOSE')
zmq_socket_1.close()
zmq_socket_2.close()
| [
"[email protected]"
]
| |
bc7bec551e2c03787c3416668a264b12cadc4258 | c18e1fa174e1b0e6d56e9f1a8a3708099c3cd248 | /learning_from_mcvine/res_sims/Ei_30/E-7.98574177896_hkl-1.2631196834,-0.985399386564,0.344825718364/run.py | c8c8bb10719d8e3c5cea76da9babf3f432f8b649 | []
| no_license | pnave95/ORNL_public_research | e0662657b41969f3f3dc263ea4c62a042d85547a | 58cad7508f9d29c17af5419f05522c2f724e717e | refs/heads/master | 2021-01-20T00:39:58.308250 | 2017-06-22T20:03:57 | 2017-06-22T20:03:57 | 89,160,804 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,185 | py | #!/usr/bin/env python
import mcvine.cli
from numpy import array
from mcvine_workflow.singlextal.resolution import use_res_comps as urc
beam_neutrons_path = '/SNS/users/p63/ORNL_public_research/MCViNE_Covmat_comparison/mcvine_resolution/beams/beam_30_1e9/out/neutrons'
instrument = urc.instrument('ARCS', '3.*meter', '13.6*meter', '-0.15*meter')
samplexmlpath = '/SNS/users/p63/ORNL_public_research/learning_from_mcvine/res_sims/Ei_30/E-7.98574177896_hkl-1.2631196834,-0.985399386564,0.344825718364/sample/sampleassembly.xml'
psi = 9.102125241479965e-05
hkl2Q = array([[ -6.60765593e-01, 9.34283256e-01, -7.78047243e-17],
[ 6.60638026e-01, 4.67231832e-01, -8.09165116e-01],
[ -6.60638026e-01, -4.67231832e-01, -8.09165116e-01]])
pp = array([ 2.71888109, 1.26794542, -0.36478751])
pixel = urc.pixel('0.5*inch', 'meter/128', '10*atm', position=(pp[1], pp[2], pp[0]))
t_m2p = 0.0068059207318674521
Q = array([-0.04417126, -1.80163508, 0.51832987])
E = -7.9857417789580012
hkl_projection = array([-0.94671581, -0.60236987, 0.47499947])
urc.run(
beam_neutrons_path, instrument, samplexmlpath, psi, hkl2Q, pixel, t_m2p,
Q, E, hkl_projection, Nbuffer=100000)
| [
"[email protected]"
]
| |
dee6e4cddedf7e291e576951014671d28cee09bb | 9b4bd7bb36d6e2d63973c724ca1ceb1c5e123ee1 | /launcher/deployment/migrations/0014_auto__add_field_deployment_remote_container_id__add_field_deployment_r.py | 5ad9d4cab5e81057db503de2651b3868113f73af | []
| no_license | zennro/launcher | 39de39345a15b1f544222503e5cf82992c1e62c8 | 25651d1ffa29adad18d1e003f69720bea9671d7c | refs/heads/master | 2020-04-20T19:16:59.492609 | 2014-02-27T16:22:30 | 2014-02-27T16:22:30 | 17,900,826 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,171 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Deployment.remote_container_id'
db.add_column(u'deployment_deployment', 'remote_container_id',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
# Adding field 'Deployment.remote_app_id'
db.add_column(u'deployment_deployment', 'remote_app_id',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Deployment.remote_container_id'
db.delete_column(u'deployment_deployment', 'remote_container_id')
# Deleting field 'Deployment.remote_app_id'
db.delete_column(u'deployment_deployment', 'remote_app_id')
models = {
u'deployment.deployment': {
'Meta': {'object_name': 'Deployment'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deploy_id': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'expiration_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'launch_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'deployments'", 'to': u"orm['deployment.Project']"}),
'reminder_mail_sent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'remote_app_id': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'remote_container_id': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Deploying'", 'max_length': '50'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'deployment.deploymenterrorlog': {
'Meta': {'object_name': 'DeploymentErrorLog'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deployment': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'error_log'", 'unique': 'True', 'to': u"orm['deployment.Deployment']"}),
'error_log': ('django.db.models.fields.TextField', [], {}),
'http_status': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'deployment.project': {
'Meta': {'ordering': "['name']", 'object_name': 'Project'},
'default_password': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'default_username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'github_url': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_name': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'ports': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'status': ('model_utils.fields.StatusField', [], {'default': "'Inactive'", 'max_length': '100', u'no_check_for_status': 'True'}),
'survey_form_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
}
}
complete_apps = ['deployment'] | [
"[email protected]"
]
| |
e7959142b07770d2de4bb62d1aee359a7aaf3c7b | 161e01f92e3810edff17054851259e54a1432921 | /rapid7vmconsole/models/resources_user_account.py | 4c2e48f07118ad180885bdef93b98cfe414424cf | [
"MIT"
]
| permissive | Tofuhippo/vm-console-client-python | f3ffa3257b1928791fef090404377b43c3ff28d5 | 3c856923be1caf22c29a5d309713b8940546b57b | refs/heads/master | 2020-06-26T04:07:37.251955 | 2019-07-29T20:45:40 | 2019-07-29T20:45:40 | 199,524,127 | 0 | 0 | MIT | 2019-07-29T20:42:37 | 2019-07-29T20:42:37 | null | UTF-8 | Python | false | false | 4,187 | py | # coding: utf-8
"""
Python InsightVM API Client
OpenAPI spec version: 3
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from rapid7vmconsole.models.link import Link # noqa: F401,E501
from rapid7vmconsole.models.user_account import UserAccount # noqa: F401,E501
class ResourcesUserAccount(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'links': 'list[Link]',
'resources': 'list[UserAccount]'
}
attribute_map = {
'links': 'links',
'resources': 'resources'
}
def __init__(self, links=None, resources=None): # noqa: E501
"""ResourcesUserAccount - a model defined in Swagger""" # noqa: E501
self._links = None
self._resources = None
self.discriminator = None
if links is not None:
self.links = links
if resources is not None:
self.resources = resources
@property
def links(self):
"""Gets the links of this ResourcesUserAccount. # noqa: E501
Hypermedia links to corresponding or related resources. # noqa: E501
:return: The links of this ResourcesUserAccount. # noqa: E501
:rtype: list[Link]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this ResourcesUserAccount.
Hypermedia links to corresponding or related resources. # noqa: E501
:param links: The links of this ResourcesUserAccount. # noqa: E501
:type: list[Link]
"""
self._links = links
@property
def resources(self):
"""Gets the resources of this ResourcesUserAccount. # noqa: E501
The resources returned. # noqa: E501
:return: The resources of this ResourcesUserAccount. # noqa: E501
:rtype: list[UserAccount]
"""
return self._resources
@resources.setter
def resources(self, resources):
"""Sets the resources of this ResourcesUserAccount.
The resources returned. # noqa: E501
:param resources: The resources of this ResourcesUserAccount. # noqa: E501
:type: list[UserAccount]
"""
self._resources = resources
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ResourcesUserAccount, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResourcesUserAccount):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
]
| |
aef74e6af7e67ac384a7f081ccb14a6821297285 | 7f76ae284ab2649def3da9d609beb4dbad9cb57d | /SnakesLadders/make_game.py | 718247d55956e1bcbfbca7d83b279b140d16efda | [
"CC0-1.0"
]
| permissive | robclewley/DataScotties | 0f696fe32debe1aee4f5fdc8e5fac4d9b94eeb99 | 63cca1c2fb5ffd75f4c99507ac497ae7cefec04d | refs/heads/master | 2021-01-10T05:17:12.412352 | 2016-02-29T23:51:11 | 2016-02-29T23:51:11 | 50,062,479 | 9 | 17 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | import snakesladders as SL
def make_game_from_dict(setup):
"""
`setup` parameter is a dictionary, e.g. loaded from JSON file
Returns a GameFSM object configured from the dictionary.
"""
game = SL.GameFSM(setup['size'])
for s1, s2 in setup['snakes']:
game.all_states[s1].link = s2
for l1, l2 in setup['ladders']:
game.all_states[l1].link = l2
game.make_state_kinds()
return game
| [
"[email protected]"
]
| |
c39c260b4df726acbb3317afe5c554771a0f41df | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/abc073/A/4905897.py | 82d6df3f77d1a5ec614d18c5565e9bea8047e7fd | []
| no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 71 | py | n = input()
if n.count('9') > 0:
print('Yes')
else:
print('No') | [
"[email protected]"
]
| |
9359c407b694b3443b2748371e2164ab388b93b7 | b155be1edeac8183736ababc64b52f07f15e3269 | /appengine/swarming/handlers_exceptions.py | dcacd94a38946b90180532a2ab8c9ffdfdf66fd9 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | luci/luci-py | d9ef3325a2b193d3f127363c012fe60860ea91fd | 10cc5fdcca53e2a1690867acbe6fce099273f092 | refs/heads/main | 2022-11-26T09:32:20.640834 | 2022-11-24T15:11:30 | 2022-11-24T15:11:30 | 33,140,918 | 84 | 36 | Apache-2.0 | 2022-11-23T13:56:13 | 2015-03-30T18:39:14 | Python | UTF-8 | Python | false | false | 477 | py | # Copyright 2021 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Exceptions raised by methods called by prpc/endpoints handlers."""
class BadRequestException(Exception):
"""The request is invalid."""
class PermissionException(Exception):
"""Permission requirements are not fulfilled."""
class InternalException(Exception):
"""Unexpected error occurred."""
| [
"[email protected]"
]
| |
0fc9798741c58e929507d74f4a8591a6aef58c50 | 817defeeecbe0f9da401671dadecc3a4319e1915 | /ex43.py | b6ba10aa1a9b22214466786e2d7b1d0a76e65ce1 | []
| no_license | mrech/LearnPython_TheHardWay | 5099cb3f21b7f64b7c936efe2381fff00178e2be | 47b049c1f6539b9257a53e442a5ba701de3f75a3 | refs/heads/master | 2020-04-16T15:30:44.013671 | 2019-02-20T17:33:54 | 2019-02-20T17:33:54 | 165,705,302 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,564 | py | # Gothons from Planet Percal #25
'''
* noun (classes)
- verbs (functions)
* Map
- next_scene
- opening_scene
* Engine
- play
* Scene
-enter
* Death
* Central Corridor
* Laser Weapon Armory
* The Bridge
* Escape Pod
'''
# Basic imports for the game
from sys import exit # exit the interpreter
from random import randint
# base class Scene that will have the common things that all scenes do
class Scene(object):
def enter(self):
print('This scene is not yet configured. Subclass it and implement enter().')
exit(1)
class Engine(object):
def __init__(self, scene_map):
self.scene_map = scene_map # Engine has-a scene_map of some kind
def play(self):
current_scene = self.scene_map.opening_scene()
while True:
print('\n--------')
next_scene_name = current_scene.enter()
current_scene = self.scene_map.next_scene(next_scene_name)
# first scene Death
class Death(Scene):
quips = [
'You died. You kinda suck at this.',
'Such a loser.',
'I have a small puppy that\'s beter at this.'
]
def enter(self):
print(Death.quips[randint(0, len(self.quips)-1)])
exit(1)
# CentralCorridor the start of the game
# I'm doing the scenes for the game before the Map because I need to reference them later
class CentralCorridor(Scene):
def enter(self):
print("The Gothons of Planet Percal #25 have invaded your ship and destroyed")
print("your entire crew. You are the last surviving member and your last")
print("mission is to get the neutron destruct bomb from the Weapons Armory,")
print("put it in the bridge, and blow the ship up after getting into an ")
print("escape pod.")
print("\n")
print("You're running down the central corridor to the Weapons Armory when")
print(
"a Gothon jumps out, red scaly skin, dark grimy teeth, and evil clown costume")
print("flowing around his hate filed body. He's blocking the door to the")
print("Armory and about to pull a weapon to blast you.")
action = input("> ")
if action == "shoot!":
print(
"Quick on the draw you yank out your blaster and fire it at the Gothon.")
print("His clown costume is flowing and moving around his body, which throws")
print(
"off your aim. Your laser hits his costume but misses him entirely. This")
print("makes him fly into a rage and blast you repeatedly in the face until")
print("you are dead. Then he eats you.")
return 'death'
elif action == "dodge!":
print("Like a world class boxer you dodge, weave, slip and slide right")
print("as the Gothon's blaster cranks a laser past your head.")
print("In the middle of your artful dodge your foot slips and you")
print("bang your head on the metal wall and pass out.")
print("You wake up shortly after only to die as the Gothon stomps on")
print("your head and eats you.")
return 'death'
elif action == "tell ajoke":
print("Lucky for you they made you learn Gothon insults in the academy.")
print("You tell the one Gothon joke you know:")
print("Ljb jfkdlas tyjkdfak jkfldsaufd, fullrearfek fdfas thiirs.")
print(
"The Gothon stops, tries not to laugh, then busts out laughing and can't move.")
print("While he's laughing you run up and shoot him square in the head")
print("putting him down, then jump through the Weapon Armory door.")
return 'laser_weapon_armory'
else:
print("DOES NOT COMPUTE!")
return 'central_corridor'
class LaserWeaponArmory(Scene):
def enter(self):
print("You do a dive roll into the Wapon Armory, crouch and scan the room")
print("for more Gothons that might be hiding. It's dead quiet, too quiet.")
print("You stand up and run to the far side of the room and find the")
print("neutron bomb in its container. There's a keypad lock on the box")
print("and you need the code to get the bomb out. If you get the code")
print("wrong 10 times then the lcok closes forever and you can't")
print("get the bomb. The code is 3 digits.")
# Permutation with repetitions 9**3
code = "{}{}{}".format(randint(1, 9), randint(1, 9), randint(1, 9))
guess = input("[keypad]> ")
guesses = 0
while guess != code and guesses < 10:
print("BZZZZZEDDD")
guesses += 1
guess = input("[keypad]> ")
if guess == code:
print("The container clicks open and the seal breaks, letting gas out.")
print("You grab the neutron bomb and run as fast as you can to the")
print("bridge where you must place it in the right spot.")
return 'the_bridge'
else:
print("The lock buzzes one last time and then you hear a sickening")
print("melting sound as the mechanism is fused together.")
print("You decided to sit there, and finally the Gothons blow up the")
print("ship from their ship and you die.")
return 'death'
class TheBridge(Scene):
def enter(self):
print("You burst onto the Bridge with the neutron destruct bomb")
print("under your arm and surprise 5 Gothons who are trying to")
print("take control of the ship. Each of them has an even uglier")
print("clown costume than the last. They haven't pulled their")
print("weapons out yet, as they see the active bomb under your")
print("arm and don't want to set it off.")
action = input("> ")
if action == "throw the bomb":
print("In a panic you throw the bomb at the group of Gothons")
print("and make a leap for the door. Right as you drop it a")
print("Gothon shoots you right in the back killing you.")
return 'death'
elif action == "slowly place the bomb":
print("You point your blaster at the bomb under your arm")
print("and the Gothons put their hands up and start to sweat.")
print("Now that the bomb is placed you run to the escape pod to")
return 'escape_pod'
else:
print("DOES NOT COMPUTE!")
return 'the bridge'
class EscapePod(Scene):
def enter(self):
print("You rush through the ship desperately tring to make it")
print("You get to the chamber with the escape pods, and now need")
print("to pick one to take. Some of them could be damaged but you")
print("don't gave time to look. There's 5 pods, which one")
print("do you take?")
good_pod = randint(1, 5)
guess = input("[pod #]> ")
if int(guess) != good_pod:
print("You jump into pod {} and hit the eject button.".format(guess))
print("The pod escapes out into the void of space.")
return 'death'
else:
print("You jump into pod {} and hit the eject button.".format(guess))
print("The pod easily slides out into space heading to")
print("the planet below. The Gothon ship explode. You won!")
return 'finished'
class Map(object):
# Map's methods
# storing each scene by name in a dictionary, ref. to it Map.scenes
# Map come after scenes, because the dic btionary has to refer to them so they have to exist
scenes = {
'central_corridor': CentralCorridor(),
'laser_weapon_armory': LaserWeaponArmory(),
'the_bridge': TheBridge(),
'escape_pod': EscapePod(),
'death': Death()
}
def __init__(self, start_scene):
self.start_scene = start_scene
def next_scene(self, scene_name):
return Map.scenes.get(scene_name)
def opening_scene(self):
return self.next_scene(self.start_scene)
a_map = Map('central_corridor') # code that runs the game by making map
a_game = Engine(a_map) # handling that map to an Engine
a_game.play() # running - play to make the game work
| [
"[email protected]"
]
| |
55c96c6259dbae33f993fa4591c05bbd163957fc | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/41/usersdata/138/21859/submittedfiles/gravitacional.py | e1574ee30ef0e241c176b4af46ebad86bb520b4d | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,380 | py | # -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
import funcoes
#ENTRADA
dimensao = input('Digite a dimensao das matrizes: ')
matrizA = input('Digite a Matriz A como uma única linha entre aspas: ')
matrizD = input('Digite a Matriz D como uma única linha entre aspas: ')
alfa = input('Digite o valor de alfa: ')
#PREPARANDO A ENTRADA
T = np.zeros((dimensao,dimensao))
A = np.fromstring(matrizA, sep=' ').reshape(dimensao, dimensao)
d = np.fromstring(matrizD, sep=' ').reshape(dimensao, dimensao)
#comece aqui...
#INÍCIO
def somaC(m):
b=[]
for j in range(0,a.shape[1],1):
soma=0
for i in range(0,a.shape[0],):
soma=soma+a[i,j]
b.append(soma)
return b
def somaL(m):
b=[]
for i in range(0,a.shape[0],1):
soma=0
for j in range(0,a.shape[1],1):
soma= soma+a[i,j]
b.append(soma)
return b
def matrizT(a,o,d):
a=somaC(m)
o=somaL(m)
T=[]
for i in range(0,d.shape[0],1):
for j in range(0,d.shape[1],1):
Tc[i,j]=(o[i])*(a[i])*((1/d[i,j]))**alfa
soma=0
for k in range(0,dimensao,1):
soma[i,k]=soma+(a[k]*(1/d[i,k]))
T[i,j]=Tc[i,j]/soma[i,k]
T.append(T[i,j])
return T
#SAÍDA
somatorio = sum(sum(T))
print('%.4f' % somatorio)
| [
"[email protected]"
]
| |
7d2bda3c4063f35013dcfc03874bce7500ebf9a5 | 4f3d283fc8f07af65d857370294986dbc37c520c | /kata/done/masodfok2.py | 4a39751880ab6565e0832f3ad85bfe51d95b1aa1 | []
| no_license | csxeba/wiw | 69a36ed6fee4240d6ed545e4f4e6d60c46090921 | 7251eeaaa98424a95c5837bddd6979ddbf0dd1ec | refs/heads/master | 2020-06-28T11:46:21.993337 | 2017-02-16T21:20:25 | 2017-02-16T21:20:25 | 67,785,378 | 0 | 0 | null | 2016-09-13T09:52:33 | 2016-09-09T09:22:54 | Python | UTF-8 | Python | false | false | 965 | py | """
Ez egy dokumentációs string a programunk elején.
Ide szokták beírni, hogy mit csinál a program.
Ez a program definiál egy másodfokú egyenlet megoldó
függvényt és megold néhány másodfokú egyenletet.
Írj egy függvényt, ami paraméterként a másodfokú
egyenlet a, b és c együtthatóit várja:
ax**2 + bx + c = 0 forma esetén.
-b +- gyök(b**2 - 4ac)
----------------------
2a
négyzetgyököt az importált sqrt() függvénnyel tudsz vonni.
"""
from math import sqrt
def megoldo(a, b, c):
"""Másodfokú egyenlet megoldó.
Visszatér a másodfokú egyenlet listába rendezett két gyökével."""
pass
def main():
"""Megoldandó másodfokú egyenletek"""
egyenletek = [
[1, -3, -10],
[2, -9, 4],
[1, -3, -4],
[1, -7, 0],
[1, -2, 3],
[1, -3, 2],
[4, -11, 6]
]
# Oldd meg és írasd ki ciklussal!
if __name__ == '__main__':
main() | [
"[email protected]"
]
| |
1436bed40ecd073c5238666b7406512170c8414c | d7cfe98faeb0fe1b4ce02d54d8bbedaca82764f7 | /1106_문제풀이/swea_5251_최소이동거리_solution(heap).py | 2182c4588a300d2baac9a7f0efef028ae351d25d | []
| no_license | Anseik/algorithm | 27cb5c8ec9692cf705a8cea1d60e079a7d78ef72 | 925404006b84178682206fbbb3b989dcf4c3dee9 | refs/heads/master | 2023-02-26T00:02:01.696624 | 2021-02-03T14:10:28 | 2021-02-03T14:10:28 | 301,753,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 860 | py | import sys
import heapq
sys.stdin = open('swea_5251_최소이동거리_solution.txt')
def dijkstra_heap():
dist = [987654321] * (V + 1)
visited = [False] * (V + 1)
heap = []
dist[0] = 0
heapq.heappush(heap, (0, 0))
while heap:
w, v = heapq.heappop(heap)
if not visited[v]:
visited[v] = True
dist[v] = w
for i in range(V + 1):
if not visited[i] and dist[i] > dist[v] + adj[v][i]:
heapq.heappush(heap, (dist[v] + adj[v][i], i))
return dist[V]
T = int(input())
for tc in range(1, T + 1):
V, E = map(int, input().split())
adj = [[987654321] * (V + 1) for _ in range(V + 1)]
for i in range(E):
st, ed, w = map(int, input().split())
adj[st][ed] = w
ans = dijkstra_heap()
print('#{} {}'.format(tc, ans)) | [
"[email protected]"
]
| |
352f6e17c51d8f3ef0e4cc8edce87cce6667407d | bb981602d111b709efec6279c3fccc6ef9efcc13 | /blog/migrations/0001_initial.py | 35b8244eae51e2a60ba29dd8edcdd84583a02924 | []
| no_license | sompodsign/shampad_blog_pro | 3897e5f95b48341b4058a5e42bb1ea70f1f9866e | b031b950e778b1534f433a33b84b37e93186e9b2 | refs/heads/main | 2023-02-27T13:15:00.984530 | 2021-02-11T16:06:01 | 2021-02-11T16:06:01 | 337,414,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,548 | py | # Generated by Django 3.1.5 on 2021-01-24 04:04
import ckeditor.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import taggit.managers
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('taggit', '0003_taggeditem_add_unique_index'),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250)),
('subheading', models.CharField(max_length=300, null=True)),
('slug', models.SlugField(max_length=250, unique_for_date='publish')),
('body', ckeditor.fields.RichTextField(blank=True, null=True)),
('publish', models.DateTimeField(default=django.utils.timezone.now)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('status', models.CharField(choices=[('draft', 'Draft'), ('published', 'Published')], default='draft', max_length=10)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='blog_posts', to=settings.AUTH_USER_MODEL)),
('tags', taggit.managers.TaggableManager(help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags')),
],
options={
'ordering': ('-publish',),
},
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=80)),
('email', models.EmailField(max_length=254)),
('body', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('active', models.BooleanField(default=True)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='blog.post')),
],
options={
'ordering': ('created',),
},
),
]
| [
"[email protected]"
]
| |
b0b78586d69b34335c3a959605e75ab6c38817cf | 73758dde83d1a1823c103e1a4ba71e7c95168f71 | /nsd2004/devweb/mysite/mysite/settings.py | 58e918a61f92bb516161549dc5c59d8d67ec1e17 | []
| no_license | tonggh220/md_5_nsd_notes | 07ffdee7c23963a7a461f2a2340143b0e97bd9e1 | a58a021ad4c7fbdf7df327424dc518f4044c5116 | refs/heads/master | 2023-07-02T01:34:38.798929 | 2021-05-12T08:48:40 | 2021-05-12T08:48:40 | 393,885,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,240 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.12.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'w)u!*(f(^*=k_^m&%cu8+%qyp)z($2!$r=nr68&_&xbllq&d*n'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'polls',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'dj2004',
'USER': 'root',
'PASSWORD': 'tedu.cn',
'HOST': '127.0.0.1',
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
]
| |
2d1650cd972e452861a4ceffa9d5cd93f683468b | c8c4f6c72ffc801cc24d69617c170c3ee093dc86 | /ReadMeCleaner.py | 8ae4a9379ea46720a301f31a7d8736eb1dde9e5f | []
| no_license | PasaOpasen/MathClasses | 831d85eab3c038841a40ae3e7b0896dcf6706993 | d11f124e09217fdc6deccfc59feb1a81378be1af | refs/heads/master | 2021-12-14T17:54:36.235687 | 2021-11-30T08:15:21 | 2021-11-30T08:15:21 | 237,803,522 | 8 | 3 | null | 2020-12-03T14:07:40 | 2020-02-02T16:53:44 | C# | UTF-8 | Python | false | false | 246 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 25 16:10:36 2020
@author: qtckp
"""
with open('README.md', 'r') as f:
t = f.readlines()
with open('README.md', 'w') as f:
for l in t:
f.write(l.lstrip() if l != '\n' else l)
| [
"[email protected]"
]
| |
827ef53c1ea908502f11b3dce5f71710fb9c9100 | 93bd129c0d189124bb690670b22c4a80edda95b2 | /pg/libs/log_lib.py | 41f888b4bebff25a45783e32903571945e0d7155 | []
| no_license | vdeandrade/32id-tomo | 0690564d263392f52c3d239bd7aec7a0bfc507e1 | ea0f5debf121648c1366c50f0a239ee938e6a32f | refs/heads/master | 2020-12-22T16:00:35.300527 | 2020-01-28T21:51:35 | 2020-01-28T21:51:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,320 | py | '''
Log Lib for Sector 2-BM
'''
import logging
# Logging defines
__GREEN = "\033[92m"
__RED = '\033[91m'
__YELLOW = '\033[33m'
__ENDC = '\033[0m'
logger = None
info_extra={'endColor': __ENDC, 'color': __GREEN}
warn_extra={'endColor': __ENDC, 'color': __YELLOW}
error_extra={'endColor': __ENDC, 'color': __RED}
def info(msg):
global logger
global info_extra
logger.info(msg, extra=info_extra)
def error(msg):
global logger
global error_extra
logger.error(msg, extra=error_extra)
def warning(msg):
global logger
global warn_extra
logger.warning(msg, extra=warn_extra)
def setup_logger(log_name, stream_to_console=True):
global logger
global info_extra
global warn_extra
global error_extra
info_extra['logger_name'] = log_name
warn_extra['logger_name'] = log_name
error_extra['logger_name'] = log_name
logger = logging.getLogger(log_name)
fHandler = logging.FileHandler(log_name)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s %(color)s %(message)s %(endColor)s")
fHandler.setFormatter(formatter)
logger.addHandler(fHandler)
if stream_to_console:
ch = logging.StreamHandler()
ch.setFormatter(formatter)
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
| [
"[email protected]"
]
| |
54ddf4c157f62fca30b578f135e52359bccbe23c | a644ae249712bddb9cb1b8d2a75812c107736fe5 | /test/test2.py | 362c1e5a84349bcf063528ac0c493fc057941bfd | [
"MIT"
]
| permissive | linsalrob/SEED_Servers_Python | a834b7ce763e9d1e89cb76530e847f3fe6422df8 | a2d2aa8c64547e94c3d6d031ebba46b8f9ed5716 | refs/heads/master | 2021-06-02T17:40:30.301307 | 2020-02-04T20:37:01 | 2020-02-04T20:37:01 | 38,650,052 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 234 | py |
import sys
from servers.SAP import SAPserver
server=SAPserver()
genomeID = '83333.1'
sys.stderr.write("Genome: " + str(genomeID) + "\n")
prots = server.all_proteins( {"-id" : genomeID} )
print("protein length " + str(len(prots)))
| [
"[email protected]"
]
| |
96b61046248a3e72e93e236aec968705b8d8a09c | cc78de009a8e7805f9f6a852774e0384b11bfdcb | /testcase/common/basePage/basePage.py | e16a2bdc7ec581f43c1f2ee9d521c66c74831b5f | []
| no_license | williamzxl/app_test_many_devices | c1806e54c17a84f18a04c3808604633c2deba052 | dd5434018fadd11d5462903cafaafbb5b387c24a | refs/heads/master | 2020-03-29T21:20:51.239295 | 2019-03-05T03:13:56 | 2019-03-05T03:13:56 | 150,361,766 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,861 | py | from testcase.common.basePage.web_view import WebView
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from utils.log import logger
class BasePage(WebView):
def __init__(self, page=None, browser_type=None):
if page:
self.driver = page.driver
else:
super(BasePage, self).__init__(browser_type=browser_type)
def get_driver(self):
return self.driver
def open(self, appium_url, desired_caps):
try:
logger.info("Open appium_url: {}".format(appium_url))
logger.info("desired_caps:{}".format(desired_caps))
self.get(appium_url, desired_caps)
except:
logger.warning("Cant open appium url:{}".format(appium_url))
raise ValueError("Connect appium failed!")
def find_element(self, *loc):
try:
WebDriverWait(self.driver, 30).until(EC.visibility_of_element_located(loc))
logger.info("Success return self.driver.find_element(*loc):{}".format(loc))
return self.driver.find_element(*loc)
except TimeoutError:
logger.error("In {} cant find {}".format(self, loc))
return False
def find_elements(self, *loc):
try:
WebDriverWait(self.driver, 30).until(EC.visibility_of_element_located(loc))
logger.info("Success return self.driver.find_element(*loc):{}".format(loc))
return self.driver.find_elements(*loc)
except TimeoutError:
# print("In {} cant find {}".format(self, loc))
logger.error("In {} cant find {}".format(self, loc))
return False
# def script(self, src):
# self.driver.execute_script(src)
def sendKeys(self, loc, value, clear_first=True, click_first=True):
try:
# loc = getattr(self, "_{}".format(loc))
if click_first:
# self.find_element(*loc).click()
loc.click()
if clear_first:
# self.find_element(*loc).clear()
loc.clear()
# self.find_element(*loc).send_keys(value)
loc.send_keys(value)
except AttributeError:
logger.error("{} page cant find {} element".format(self, loc))
def get_url(self):
return self.driver.current_url
def getEleText(self,ele):
return ele.text
def getEleSize(self, ele):
return ele.size
def getEleLocation(self, ele):
return ele.location
def is_selected(self, element):
element.is_selected()
def is_enabled(self, element):
element.is_enabled()
def is_displayed(self, element):
element.is_displayed()
def enter(self, element):
element.send_keys(Keys.RETURN)
def click(self, element):
element.click()
def submit(self):
pass
def getEleAttribute(self, element, attribute):
return element.get_attribute(attribute)
# def getAttribute(self, ele, name):
# return ele.get_attribute(name)
def getText(self, element):
try:
return element.text
except SyntaxError:
logger.error("No such element TEXT")
def getTitle(self):
return self.driver.title
def getCurrentUrl(self):
return self.driver.current_url
def get_contexts(self):
return self.driver.contexts()
def get_current_context(self):
return self.driver.current_context()
def get_context(self):
return self.driver.context()
def page_source(self):
return self.driver.page_source
def page_source_test(self):
return self.driver.page_source
if __name__ == "__main__":
test = BasePage()
test.open() | [
"[email protected]"
]
| |
986c55e0d84ee0bd44afcf5e2e73e30436d3f834 | a3746020cf091f433beb41bde1b62818b4de569b | /new_rule/ticket-rules/oracle/DML_SORT.py | 3010c2c185dc831b9fbf2b84a1610544000dec4e | []
| no_license | kk71/sqlaudit | 59bab5765a67f56f1dd2f3103812051c5acbbc49 | 747aaa02573a9c2b46a9e14415d27c0ab8e6158c | refs/heads/master | 2023-02-04T18:38:46.125746 | 2020-06-05T09:49:46 | 2020-06-05T09:49:46 | 323,559,338 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | import re
def code(rule, entries, **kwargs):
single_sql: dict = kwargs.get("single_sql")
sql_text: str = single_sql["sql_text_no_comment"]
dml_sort = re.compile("(\\s)?((update )|(delete )).*order by")
if dml_sort.search(sql_text):
return -rule.weight, []
return None, []
code_hole.append(code)
| [
"[email protected]"
]
| |
24d3b4732a400d5973fc072d2830077bbc9fe7ec | dd143f56fbceecf039d139e2bc07f27f4837be82 | /manage.py | b0b78f6ced3bddcde196e42c492dc4e98c6f29ec | []
| no_license | maxweaker/OVSMS-backend | 1468924b109bb8717f6337664c0601d2af18ea7c | 7372e40e62c6eaf0c9fd309a47b798c1795e6007 | refs/heads/master | 2023-06-05T11:46:48.472605 | 2021-06-18T14:48:49 | 2021-06-18T14:48:49 | 378,168,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'OVSMS.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
305b4d8a0ed416ed5c175894f2c49dbe30f16039 | c570dcfc3ec166f73719a81b02262bf2885b458b | /setup.py | efa6fdfa8b383ac34d7981c70b9a5883ca632879 | []
| no_license | thatch45/archinator | 9eb139a064b8dafe3a63ec9a27177a21b96357ef | 2e4d9874b0cd9fee68f90ebe4bf2c20dfb1fb220 | refs/heads/master | 2020-06-01T03:52:43.793091 | 2013-06-24T05:24:04 | 2013-06-24T05:24:04 | 10,711,450 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | #!/usr/bin/env python
from distutils.core import setup
setup(name='archinator',
version='0.5.0',
description='Virtual machine generator for ArchLinux',
author='Thomas S Hatch',
author_email='[email protected]',
url='https://github.com/thatch45/archinator',
packages=[
'archinator',
'archinator.utils',
],
scripts=['scripts/archinator'],
)
| [
"[email protected]"
]
| |
eaa85e885404219c0a831b463a0c456fffe1d7f0 | 4546398a18590e4e182629fb55d185547dd6df0a | /2023/problems/millifaersla/submissions/partially_accepted/strings_0.py | a370ff95a0790d6ce6228aa8fa0be95e244d3908 | []
| no_license | ForritunarkeppniFramhaldsskolanna/Keppnir | 352341fa97c6349af65b513c03171f3e706f7db2 | 65c8eb5358d8a49f956edf76c2d47b9372accc3c | refs/heads/master | 2023-04-28T15:33:36.396225 | 2023-04-23T15:00:15 | 2023-04-23T15:00:15 | 78,303,702 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | #!/usr/bin/python3
a = input()
b = input()
c = input()
if a < b and a < c:
print("Monnei")
elif b < a and b < c:
print("Fjee")
else:
print("Dolladollabilljoll")
| [
"[email protected]"
]
| |
18939fdae293b7a96059b4ed05b61fab9a65a3e3 | c4e97f2eb1081d8fad5e64872c3d6acf9a89d445 | /Solutions/0135_candy.py | af76e71034174d61806ca337a98996a7aa0af28e | []
| no_license | YoupengLi/leetcode-sorting | 0efb3f4d7269c76a3ed11caa3ab48c8ab65fea25 | 3d9e0ad2f6ed92ec969556f75d97c51ea4854719 | refs/heads/master | 2020-05-18T23:28:51.363862 | 2019-09-12T00:42:14 | 2019-09-12T00:42:14 | 184,712,501 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,312 | py | # -*- coding: utf-8 -*-
# @Time : 2019/7/18 0018 09:54
# @Author : Youpeng Li
# @Site :
# @File : 0135_candy.py
# @Software: PyCharm
'''
135. Candy
There are N children standing in a line. Each child is assigned a rating value.
You are giving candies to these children subjected to the following requirements:
Each child must have at least one candy.
Children with a higher rating get more candies than their neighbors.
What is the minimum candies you must give?
Example 1:
Input: [1,0,2]
Output: 5
Explanation: You can allocate to the first, second and third child with 2, 1, 2 candies respectively.
Example 2:
Input: [1,2,2]
Output: 4
Explanation: You can allocate to the first, second and third child with 1, 2, 1 candies respectively.
The third child gets 1 candy because it satisfies the above two conditions.
'''
class Solution:
def candy(self, ratings: 'List[int]') -> 'int':
if not ratings:
return 0
res = [1] * len(ratings)
lbase, rbase = 1, 1
for i in range(1, len(ratings)): # 从左向右扫描
lbase = lbase + 1 if ratings[i] > ratings[i - 1] else 1
res[i] = lbase
for i in range(len(ratings) - 2, -1, -1): # 从右向左扫描
rbase = rbase + 1 if ratings[i] > ratings[i + 1] else 1
res[i] = max(rbase, res[i])
return sum(res)
def candy_1(self, ratings: 'List[int]') -> 'int':
peak = down = up = 0
res = 1
for i in range(1, len(ratings)):
if ratings[i - 1] < ratings[i]:
up += 1
down = 0
peak = up
res += 1 + up
elif ratings[i - 1] == ratings[i]:
up = down = peak = 0
res += 1
else:
up = 0
down += 1
res += 1 + down + ((-1) if peak >= down else 0)
return res
if __name__ == "__main__":
a = Solution()
ratings = [1, 0, 2]
print(a.candy(ratings))
print(a.candy_1(ratings))
ratings = [1, 2, 2]
print(a.candy(ratings))
print(a.candy_1(ratings))
ratings = [1, 2, 3, 2, 1, 0]
print(a.candy(ratings))
print(a.candy_1(ratings)) | [
"[email protected]"
]
| |
fcfd0e41293518178f353971f8e706f2fb7b44c2 | 8bbeb7b5721a9dbf40caa47a96e6961ceabb0128 | /python3/212.Word Search II(单词搜索 II).py | b33e11ee9eccfd5d5831b32835b4be8c3ca57304 | [
"MIT"
]
| permissive | lishulongVI/leetcode | bb5b75642f69dfaec0c2ee3e06369c715125b1ba | 6731e128be0fd3c0bdfe885c1a409ac54b929597 | refs/heads/master | 2020-03-23T22:17:40.335970 | 2018-07-23T14:46:06 | 2018-07-23T14:46:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,685 | py | """
<p>Given a 2D board and a list of words from the dictionary, find all words in the board.</p>
<p>Each word must be constructed from letters of sequentially adjacent cell, where "adjacent" cells are those horizontally or vertically neighboring. The same letter cell may not be used more than once in a word.</p>
<p><strong>Example:</strong></p>
<pre>
<strong>Input:</strong>
<b>words</b> = <code>["oath","pea","eat","rain"]</code> and <b>board </b>=
[
['<span style="color:#d70">o</span>','<span style="color:#d70">a</span>','a','n'],
['e','<span style="color:#d30">t</span>','<span style="color:#d00">a</span>','<span style="color:#d00">e</span>'],
['i','<span style="color:#d70">h</span>','k','r'],
['i','f','l','v']
]
<strong>Output: </strong><code>["eat","oath"]</code>
</pre>
<p><b>Note:</b><br />
You may assume that all inputs are consist of lowercase letters <code>a-z</code>.</p><p>给定一个二维网格 <strong>board </strong>和一个字典中的单词列表 <strong>words</strong>,找出所有同时在二维网格和字典中出现的单词。</p>
<p>单词必须按照字母顺序,通过相邻的单元格内的字母构成,其中“相邻”单元格是那些水平相邻或垂直相邻的单元格。同一个单元格内的字母在一个单词中不允许被重复使用。</p>
<p><strong>示例:</strong></p>
<pre><strong>输入:</strong>
<strong>words</strong> = <code>["oath","pea","eat","rain"]</code> and <strong>board </strong>=
[
['<strong>o</strong>','<strong>a</strong>','a','n'],
['e','<strong>t</strong>','<strong>a</strong>','<strong>e</strong>'],
['i','<strong>h</strong>','k','r'],
['i','f','l','v']
]
<strong>输出: </strong><code>["eat","oath"]</code></pre>
<p><strong>说明:</strong><br>
你可以假设所有输入都由小写字母 <code>a-z</code> 组成。</p>
<p><strong>提示:</strong></p>
<ul>
<li>你需要优化回溯算法以通过更大数据量的测试。你能否早点停止回溯?</li>
<li>如果当前单词不存在于所有单词的前缀中,则可以立即停止回溯。什么样的数据结构可以有效地执行这样的操作?散列表是否可行?为什么? 前缀树如何?如果你想学习如何实现一个基本的前缀树,请先查看这个问题: <a href="/problems/implement-trie-prefix-tree/description/">实现Trie(前缀树)</a>。</li>
</ul>
<p>给定一个二维网格 <strong>board </strong>和一个字典中的单词列表 <strong>words</strong>,找出所有同时在二维网格和字典中出现的单词。</p>
<p>单词必须按照字母顺序,通过相邻的单元格内的字母构成,其中“相邻”单元格是那些水平相邻或垂直相邻的单元格。同一个单元格内的字母在一个单词中不允许被重复使用。</p>
<p><strong>示例:</strong></p>
<pre><strong>输入:</strong>
<strong>words</strong> = <code>["oath","pea","eat","rain"]</code> and <strong>board </strong>=
[
['<strong>o</strong>','<strong>a</strong>','a','n'],
['e','<strong>t</strong>','<strong>a</strong>','<strong>e</strong>'],
['i','<strong>h</strong>','k','r'],
['i','f','l','v']
]
<strong>输出: </strong><code>["eat","oath"]</code></pre>
<p><strong>说明:</strong><br>
你可以假设所有输入都由小写字母 <code>a-z</code> 组成。</p>
<p><strong>提示:</strong></p>
<ul>
<li>你需要优化回溯算法以通过更大数据量的测试。你能否早点停止回溯?</li>
<li>如果当前单词不存在于所有单词的前缀中,则可以立即停止回溯。什么样的数据结构可以有效地执行这样的操作?散列表是否可行?为什么? 前缀树如何?如果你想学习如何实现一个基本的前缀树,请先查看这个问题: <a href="/problems/implement-trie-prefix-tree/description/">实现Trie(前缀树)</a>。</li>
</ul>
"""
class Solution:
def findWords(self, board, words):
"""
:type board: List[List[str]]
:type words: List[str]
:rtype: List[str]
"""
| [
"[email protected]"
]
| |
a1c525970930a33a6c0f1bf1920e3c29220e62b8 | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/M/mgrollins/eu_location_scraper.py | f3f5904113f77d083f6938f71bc20e35e5486059 | []
| no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,476 | py | import scraperwiki
import lxml.html
from lxml import etree
html = scraperwiki.scrape("http://www.edd.ca.gov/jobs_and_training/Experience_Unlimited_Local_Information.htm")
root = lxml.html.fromstring(html)
locations = []
#for lel in root.cssselect("div.main_content"):
print "in lel loop"
for el in root.cssselect("div.content_left_column h2"):
if el.text_content() != "More Information":
locations.append(el.text_content())
print "in el loop"
# for lel in el.cssselect("*"):
# print lel.text_content()
# break
# place holder
for loc in locations:
print loc +",",
# scraperwiki.sqlite.save(unique_keys = ['locations'], data = locations)
# print lxml.html.tostring(el)import scraperwiki
import lxml.html
from lxml import etree
html = scraperwiki.scrape("http://www.edd.ca.gov/jobs_and_training/Experience_Unlimited_Local_Information.htm")
root = lxml.html.fromstring(html)
locations = []
#for lel in root.cssselect("div.main_content"):
print "in lel loop"
for el in root.cssselect("div.content_left_column h2"):
if el.text_content() != "More Information":
locations.append(el.text_content())
print "in el loop"
# for lel in el.cssselect("*"):
# print lel.text_content()
# break
# place holder
for loc in locations:
print loc +",",
# scraperwiki.sqlite.save(unique_keys = ['locations'], data = locations)
# print lxml.html.tostring(el) | [
"[email protected]"
]
| |
566aba3d4ea799a084e1bf8b391feb92af2aee30 | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /ec2_write_f/security-group-egres_revoke.py | 36344b5fae7779de59a4902f9442cdb2b9e42c4b | []
| no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html
if __name__ == '__main__':
"""
authorize-security-group-egress : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/authorize-security-group-egress.html
"""
write_parameter("ec2", "revoke-security-group-egress") | [
"[email protected]"
]
| |
825f13b7aa37b7b3cd56b91a7ee8f3c5a8daf35d | 1a166165ab8287d01cbb377a13efdb5eff5dfef0 | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_06_01/operations/_connection_monitors_operations.py | 7af61de42b8c2663b9b4e05b55533dcf559929a0 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
]
| permissive | manoj0806/azure-sdk-for-python | 7a14b202ff80f528abd068bf50334e91001a9686 | aab999792db1132232b2f297c76800590a901142 | refs/heads/master | 2023-04-19T16:11:31.984930 | 2021-04-29T23:19:49 | 2021-04-29T23:19:49 | 363,025,016 | 1 | 0 | MIT | 2021-04-30T04:23:35 | 2021-04-30T04:23:35 | null | UTF-8 | Python | false | false | 45,729 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ConnectionMonitorsOperations(object):
"""ConnectionMonitorsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
parameters, # type: "_models.ConnectionMonitor"
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectionMonitorResult"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ConnectionMonitor')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
parameters, # type: "_models.ConnectionMonitor"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ConnectionMonitorResult"]
"""Create or update a connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:param parameters: Parameters that define the operation to create a connection monitor.
:type parameters: ~azure.mgmt.network.v2020_06_01.models.ConnectionMonitor
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ConnectionMonitorResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_06_01.models.ConnectionMonitorResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectionMonitorResult"
"""Gets a connection monitor by name.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionMonitorResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_06_01.models.ConnectionMonitorResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectionMonitorResult"
"""Update tags of the specified connection monitor.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:param parameters: Parameters supplied to update connection monitor tags.
:type parameters: ~azure.mgmt.network.v2020_06_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionMonitorResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_06_01.models.ConnectionMonitorResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def _stop_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self._stop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/stop'} # type: ignore
def begin_stop(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Stops the specified connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._stop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/stop'} # type: ignore
def _start_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self._start_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/start'} # type: ignore
def begin_start(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Starts the specified connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._start_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/start'} # type: ignore
def _query_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectionMonitorQueryResult"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorQueryResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self._query_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_query_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/query'} # type: ignore
def begin_query(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ConnectionMonitorQueryResult"]
"""Query a snapshot of the most recent connection states.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name given to the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ConnectionMonitorQueryResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_06_01.models.ConnectionMonitorQueryResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorQueryResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._query_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_query.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/query'} # type: ignore
def list(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ConnectionMonitorListResult"]
"""Lists all connection monitors for the specified Network Watcher.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ConnectionMonitorListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_06_01.models.ConnectionMonitorListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ConnectionMonitorListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors'} # type: ignore
| [
"[email protected]"
]
| |
29e1406fefde1df7fe0c61d695ea159a957875f9 | 81579ecd0678d652bbb57ff97529631fcfb74b12 | /corehq/ex-submodules/dimagi/utils/tests/test_rate_limit.py | 184a6cec42531b4957a7d54581d3e65eb21f137c | [
"BSD-3-Clause"
]
| permissive | dungeonmaster51/commcare-hq | 64fece73671b03c1bca48cb9d1a58764d92796ea | 1c70ce416564efa496fb4ef6e9130c188aea0f40 | refs/heads/master | 2022-12-03T21:50:26.035495 | 2020-08-11T07:34:59 | 2020-08-11T07:34:59 | 279,546,551 | 1 | 0 | BSD-3-Clause | 2020-07-31T06:13:03 | 2020-07-14T09:51:32 | Python | UTF-8 | Python | false | false | 1,707 | py | from dimagi.utils.rate_limit import rate_limit, DomainRateLimiter
from django.test import SimpleTestCase
# import the datetime module and not datetime.datetime:
# "datetime" has to be the datetime module since the tests/__init__.py file
# just imports * from all test files and the json_format_datetime doctest
# expects datetime to be the datetime module
import datetime
class RateLimitTestCase(SimpleTestCase):
def test_rate_limit(self):
start = datetime.datetime.utcnow()
rate_limit_count = 0
iteration_count = 0
while (datetime.datetime.utcnow() - start) < datetime.timedelta(seconds=5):
# Only allow 10 actions every 3 seconds in an 5 second period of time
if rate_limit('rate-limit-test', actions_allowed=10, how_often=3):
rate_limit_count += 1
iteration_count += 1
self.assertEqual(rate_limit_count, 20)
self.assertGreater(iteration_count, 20)
def test_domain_rate_limit(self):
rate_limiter = DomainRateLimiter('rate-limit-domain-', 10, 3)
domains = ('d1', 'd2')
domain_counts = {domain: 0 for domain in domains}
start = datetime.datetime.utcnow()
iteration_count = 0
while (datetime.datetime.utcnow() - start) < datetime.timedelta(seconds=5):
# Only allow 10 actions every 3 seconds in an 5 second period of time
for domain in domains:
if rate_limiter.can_perform_action(domain):
domain_counts[domain] += 1
iteration_count += 1
for domain in domains:
self.assertEqual(domain_counts[domain], 20)
self.assertGreater(iteration_count, 20)
| [
"[email protected]"
]
| |
96316fd59416a2a1748dfc80c77c04bf5370fef9 | b415e5f6f8da01961853a14fad31957a51500847 | /handle_input.py | 7f0c5dd670ee179cafbba09a4b01598859dc62e3 | []
| no_license | Zireael07/veins-of-the-earth-bearlib | 4ec1f6c477a503bb5549a3871023caafe345887f | 2b6c4ec6140a1e94551990e4de096a27d5b3d5f6 | refs/heads/master | 2020-04-05T14:05:46.445845 | 2018-07-01T19:01:06 | 2018-07-01T19:01:06 | 94,756,259 | 15 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,660 | py | from bearlibterminal import terminal as blt
import constants
from game_states import GameStates
import gui_menus
from renderer import pix_to_iso
from map_common import map_check_for_items, find_unexplored_closest, Directions, find_exit_for_pos
from tile_lookups import TileTypes, get_index
import game_vars
#def initialize_player(player):
# global PLAYER
# PLAYER = player
# nowhere else to put it?
def get_top_log_string_index():
# msg_num = -constants.NUM_MESSAGES
check = -4
#print("Checking " + str(check))
if not game_vars.message_history:
return None
if len(game_vars.message_history) < 4:
check = -len(game_vars.message_history)
if game_vars.message_history[check]:
return check
def click_on_msg_log(m_y):
log_h = blt.state(blt.TK_HEIGHT) - (constants.NUM_MESSAGES)
# which line?
if m_y == log_h:
# print("Clicked over line #1")
check = get_top_log_string_index()
if check is not None:
print(game_vars.message_history[check])
gui_menus.display_dmg_window(check)
elif m_y == log_h + 1:
check = get_top_log_string_index()
if check is not None:
print(game_vars.message_history[check + 1])
gui_menus.display_dmg_window(check + 1)
elif m_y == log_h + 2:
check = get_top_log_string_index()
if check is not None:
print(game_vars.message_history[check + 2])
gui_menus.display_dmg_window(check + 2)
elif m_y == log_h + 3:
check = get_top_log_string_index()
if check is not None:
print(game_vars.message_history[check + 3])
gui_menus.display_dmg_window(check + 3)
# player input
def game_handle_mouse_input(key):
# left key
if key == blt.TK_MOUSE_LEFT:
pix_x = blt.state(blt.TK_MOUSE_PIXEL_X)
pix_y = blt.state(blt.TK_MOUSE_PIXEL_Y)
m_x = blt.state(blt.TK_MOUSE_X)
m_y = blt.state(blt.TK_MOUSE_Y)
# did we click over the message log?
if m_x < 40:
click_on_msg_log(m_y)
# press over map
else:
# fake an offset of camera offset * cell width
pix_x = pix_x - game_vars.camera.offset[0] * blt.state(blt.TK_CELL_WIDTH)
# fake an offset of camera offset * cell height
pix_y = pix_y - game_vars.camera.offset[1] * blt.state(blt.TK_CELL_HEIGHT)
click_x, click_y = pix_to_iso(pix_x, pix_y)
if click_x >= 0 and click_x < len(game_vars.level.current_map):
if click_y >= 0 and click_y < len(game_vars.level.current_map[0]):
print "Clicked on tile " + str(click_x) + " " + str(click_y)
if click_x != game_vars.player.x or click_y != game_vars.player.y:
moved = game_vars.player.creature.move_towards(click_x, click_y, game_vars.level.current_map)
if (moved[0]):
game_vars.camera.move(moved[1], moved[2])
game_vars.fov_recompute = True
return "player-moved"
# pressed right mouse button
if key == blt.TK_MOUSE_RIGHT:
pix_x = blt.state(blt.TK_MOUSE_PIXEL_X)
pix_y = blt.state(blt.TK_MOUSE_PIXEL_Y)
print "Right clicked on tile " + str(pix_to_iso(pix_x, pix_y))
return "mouse_click"
# these directions mean that up goes up on the displayed map
KEY_TO_DIR = {
blt.TK_UP: Directions.NORTHWEST, blt.TK_DOWN: Directions.SOUTHEAST,
blt.TK_LEFT: Directions.SOUTHWEST, blt.TK_RIGHT: Directions.NORTHEAST,
blt.TK_HOME: Directions.WEST, blt.TK_PAGEUP: Directions.NORTH,
blt.TK_PAGEDOWN: Directions.SOUTH, blt.TK_END: Directions.EAST,
blt.TK_PERIOD: Directions.CENTER,
# numpad
blt.TK_KP_8: Directions.NORTHWEST, blt.TK_KP_2: Directions.SOUTHEAST,
blt.TK_KP_4: Directions.SOUTHWEST, blt.TK_KP_6: Directions.NORTHEAST,
blt.TK_KP_7: Directions.WEST, blt.TK_KP_9: Directions.NORTH,
blt.TK_KP_1: Directions.SOUTH, blt.TK_KP_3: Directions.EAST,
blt.TK_KP_ENTER: Directions.CENTER
}
KEY_TO_DIR_VI = {
blt.TK_K: Directions.NORTHWEST, blt.TK_J: Directions.SOUTHEAST,
blt.TK_H: Directions.SOUTHWEST, blt.TK_L: Directions.NORTHEAST,
blt.TK_Y: Directions.WEST, blt.TK_U: Directions.NORTH,
blt.TK_B: Directions.SOUTH, blt.TK_N: Directions.EAST,
blt.TK_PERIOD: Directions.CENTER,
}
def get_up_key():
# no way to convert the integer back to string...
#src = KEY_TO_DIR if not constants.VI_KEYS else KEY_TO_DIR_VI
#return str(src.keys()[0])
if constants.VI_KEYS:
return "K"
else:
return "UP/KP 8"
def game_key_move(key):
src = KEY_TO_DIR if not constants.VI_KEYS else KEY_TO_DIR_VI
if src[key] != Directions.CENTER and game_vars.player.creature.move(src[key][0], src[key][1], game_vars.level.current_map):
game_vars.camera.move(src[key][0], src[key][1])
game_vars.fov_recompute = True
# clear move queue
game_vars.player.creature.move_queue = []
# switch off a-e
game_vars.player.creature.player.autoexplore = False
elif src[key] == Directions.CENTER:
#print("Pass turn")
# clear move queue
game_vars.player.creature.move_queue = []
# switch off a-e
game_vars.player.creature.player.autoexplore = False
return "player-moved"
def game_player_turn_input(key):
if blt.check(blt.TK_SHIFT) and key == blt.TK_PERIOD:
if game_vars.level.current_map[game_vars.player.x][game_vars.player.y] == get_index(TileTypes.STAIRS): # .stairs:
if hasattr(game_vars.level, 'exits'):
dest = find_exit_for_pos(game_vars.player.x, game_vars.player.y)
game_vars.game_obj.next_level(dest)
else:
game_vars.game_obj.next_level()
return "redraw"
#return "player-moved"L
if not blt.check(blt.TK_SHIFT) and not constants.VI_KEYS and key in KEY_TO_DIR or constants.VI_KEYS and key in KEY_TO_DIR_VI:
return game_key_move(key)
if blt.check(blt.TK_SHIFT) and key == blt.TK_COMMA:
if game_vars.level.current_map[game_vars.player.x][game_vars.player.y] == get_index(TileTypes.STAIRS_UP):
game_vars.game_obj.previous_level(game_vars.level.gen_type)
return "redraw"
#return "player-moved"
# items
if key == blt.TK_G:
ents = map_check_for_items(game_vars.player.x, game_vars.player.y, game_vars.level.current_entities)
if ents is not None:
if len(ents) > 1:
chosen_item = gui_menus.pickup_menu(ents)
if chosen_item is not None:
chosen_item.item.pick_up(game_vars.player)
return "player-moved"
else:
ents[0].item.pick_up(game_vars.player)
return "player-moved"
if key == blt.TK_D:
chosen_item = gui_menus.drop_menu(game_vars.player)
if chosen_item is not None:
game_vars.player.container.inventory[chosen_item].item.drop(game_vars.player)
return "player-moved"
if key == blt.TK_I:
chosen_item = gui_menus.inventory_menu("Inventory", game_vars.player)
if chosen_item is not None:
if chosen_item.item:
action = gui_menus.item_actions_menu(chosen_item)
if action is None:
return
# actions
if action == 0:
chosen_item.item.use(game_vars.player)
return "player-moved"
elif action == 1:
chosen_item.item.drop(game_vars.player)
return "player-moved"
if key == blt.TK_C:
gui_menus.character_sheet_menu("Character sheet", game_vars.player)
if key == blt.TK_R:
game_vars.player.creature.player.rest_start(30)
return "player-moved"
# # testing
# if key == blt.TK_M:
# if game_vars.player.creature.player.autoexplore:
#
# # do we have a queue?
# if len(game_vars.player.creature.player.move_queue) > 1:
# print("We have a queue")
# moved = game_vars.player.creature.player.moves_from_queue()
#
# if (moved[0]):
# game_vars.camera.move(moved[1], moved[2])
# game_vars.fov_recompute = True
#
# return "player-moved"
#
# else:
# print("No queue!")
# else:
# print("Not autoexploring")
if key == blt.TK_E:
# toggle
if not game_vars.player.creature.player.autoexplore:
game_vars.player.creature.player.autoexplore = True
else:
game_vars.player.creature.player.autoexplore = False
if game_vars.player.creature.player.autoexplore:
# do we have a queue?
if len(game_vars.player.creature.move_queue) > 1:
print("We have a queue")
moved = game_vars.player.creature.moves_from_queue()
else:
x, y = find_unexplored_closest(game_vars.player.x, game_vars.player.y, game_vars.level.current_map,
game_vars.level.current_explored)
print("Closest unexplored is " + str(x) + " " + str(y))
game_vars.player.creature.move_towards_path_queue(x, y, game_vars.level.current_map)
moved = game_vars.player.creature.moves_from_queue()
if moved is not None and moved[0]:
game_vars.camera.move(moved[1], moved[2])
game_vars.fov_recompute = True
return "player-moved"
# test
if key == blt.TK_M:
return "redraw"
def game_handle_keys(key):
#key = blt.read()
if key in (blt.TK_ESCAPE, blt.TK_CLOSE):
return "QUIT"
if game_vars.game_state == GameStates.MAIN_MENU:
if key not in (blt.TK_S, blt.TK_L):
return "QUIT"
# don't handle keys at all if resting
if not game_vars.player.creature.player.resting:
if not constants.VI_KEYS and key == blt.TK_L or constants.VI_KEYS and blt.check(blt.TK_SHIFT) and key == blt.TK_M:
gui_menus.log_menu("Log history", 0, 26)
if blt.check(blt.TK_SHIFT) and key == blt.TK_SLASH:
gui_menus.help_menu()
# Debugging
if blt.check(blt.TK_SHIFT) and key == blt.TK_GRAVE:
# print("Debug mode on")
# constants.DEBUG = True
return gui_menus.debug_menu(game_vars.player)
# Toggle labels
if key == blt.TK_TAB:
#print("Toggle labels")
game_vars.labels = not game_vars.labels
print("Labels: " + " " + str(game_vars.labels))
blt.layer(4)
blt.clear_area(0, 0, blt.state(blt.TK_WIDTH, blt.state(blt.TK_HEIGHT)))
if game_vars.labels:
return "redraw"
if key == blt.TK_MOUSE_LEFT or key == blt.TK_MOUSE_RIGHT:
#print("Mouse input")
return game_handle_mouse_input(key)
if game_vars.game_state == GameStates.PLAYER_TURN:
#print (game_player_turn_input(key))
return game_player_turn_input(key)
return "no-action"
global ACT
ACT = None
def get_fake_action():
return ACT
def fake_action(action):
global ACT
ACT = action
def reset_fake_action():
global ACT
ACT = None | [
"[email protected]"
]
| |
555fc6f9037745f8d467cab753ff6fb8e6cb83c3 | 12f43487042025b5d27a1fba104bf1a7ce1f8cee | /src/single_sided_node_v1.py | f5027365074a79801182b0c123a7d54d0e967595 | []
| no_license | goerz-research/trajoct | e6c22a591fea98c3e287d135265a532001423fa9 | 601ef68465dff77552838c38bcbdfef510325289 | refs/heads/master | 2021-09-16T10:50:07.023190 | 2018-06-19T18:24:27 | 2018-06-19T18:24:27 | 79,431,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,161 | py | """Description of nodes consisting of a single-sided cavity with an atom
inside"""
import sympy
from sympy import symbols, sqrt
from qnet.algebra.hilbert_space_algebra import LocalSpace
from qnet.algebra.operator_algebra import Destroy, LocalSigma
from qnet.algebra.circuit_algebra import SLH, identity_matrix
def dagger(op):
return op.adjoint()
def syms_ops(node_index, n_cavity):
"""Define symbols and operators for a single node, required to write the
SLH for a single node"""
HilAtom = LocalSpace('q%d' % int(node_index), basis=('g', 'e'),
order_index=(2*node_index))
HilCavity = LocalSpace('c%d' % int(node_index), dimension=n_cavity,
order_index=(2*node_index+1))
Sym = {}
Sym['Delta'] = symbols(r'Delta_%s' % node_index, real=True)
Sym['g'] = symbols(r'g_%s' % node_index, positive=True)
Sym['Omega'] = symbols(r'Omega_%s' % node_index)
Sym['I'] = sympy.I
Sym['kappa'] = symbols(r'kappa', positive=True)
Op = {}
Op['a'] = Destroy(hs=HilCavity)
Op['|g><g|'] = LocalSigma('g', 'g', hs=HilAtom)
Op['|e><e|'] = LocalSigma('e', 'e', hs=HilAtom)
Op['|e><g|'] = LocalSigma('e', 'g', hs=HilAtom)
return Sym, Op
def node_hamiltonian(Sym, Op):
"""Symbolic Hamiltonian for a single node, in the RWA"""
# Symbols
Δ, g, Ω, I = (Sym['Delta'], Sym['g'], Sym['Omega'], Sym['I'])
δ = g**2 / Δ
# Cavity operators
Op_a = Op['a']; Op_a_dag = dagger(Op_a); Op_n = Op_a_dag * Op_a
# Qubit operators
Op_gg = Op['|g><g|']; Op_eg = Op['|e><g|']; Op_ge = dagger(Op_eg)
# Hamiltonian
H = -δ * Op_n + (g**2/Δ) * Op_n * Op_gg \
-I * (g / (2*Δ)) * Ω * (Op_eg*Op_a - Op_ge*Op_a_dag)
return H
def node_slh(node_index, n_cavity):
"""SLH description for a single node with the given `node_index` (which
will become the subscript in all symbols) and `n_cavity` number of levels
for the cavity
"""
Sym, Op = syms_ops(node_index, n_cavity)
S = identity_matrix(1)
κ = Sym['kappa']
L = [sqrt(2 * κ) * Op['a'], ]
H = node_hamiltonian(Sym, Op)
return SLH(S, L, H)
| [
"[email protected]"
]
| |
8f7e338e2a4ee08be8821b94e9b121b3d4183900 | 1a59a9076c1e9f1eb98e24ff41a4c1c95e2b353e | /xcp2k/classes/_program_run_info48.py | ad7fd8d53c9700a84b6260b359b618ea1c560e29 | []
| no_license | Roolthasiva/xcp2k | 66b2f30ebeae1a946b81f71d22f97ea4076e11dc | fc3b5885503c6f6dc549efeb4f89f61c8b6b8242 | refs/heads/master | 2022-12-23T06:03:14.033521 | 2020-10-07T08:01:48 | 2020-10-07T08:01:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | from xcp2k.inputsection import InputSection
from xcp2k.classes._each400 import _each400
class _program_run_info48(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Section_parameters = None
self.Add_last = None
self.Common_iteration_levels = None
self.Filename = None
self.Log_print_key = None
self.EACH = _each400()
self._name = "PROGRAM_RUN_INFO"
self._keywords = {'Add_last': 'ADD_LAST', 'Common_iteration_levels': 'COMMON_ITERATION_LEVELS', 'Filename': 'FILENAME', 'Log_print_key': 'LOG_PRINT_KEY'}
self._subsections = {'EACH': 'EACH'}
self._attributes = ['Section_parameters']
| [
"[email protected]"
]
| |
ecf84eae3c1133e0064c0034147e1be38ac43df8 | 3e24611b7315b5ad588b2128570f1341b9c968e8 | /pacbiolib/pacbio/pythonpkgs/pbfalcon/lib/python2.7/site-packages/pbfalcon/cli/hgap_run.py | 39e6bd872b1c9ab4cffaeaca7d0be2a224123c6d | [
"BSD-2-Clause"
]
| permissive | bioCKO/lpp_Script | dc327be88c7d12243e25557f7da68d963917aa90 | 0cb2eedb48d4afa25abc2ed7231eb1fdd9baecc2 | refs/heads/master | 2022-02-27T12:35:05.979231 | 2019-08-27T05:56:33 | 2019-08-27T05:56:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | from falcon_polish.pypeflow import hgap
import argparse
import sys
def main(argv=sys.argv):
parser = argparse.ArgumentParser()
parser.add_argument('--logging',
help='.ini or .json config file for Python logging module')
parser.add_argument('config',
help='.ini or .json of HGAP config. Available sections: "general", "hgap", "falcon", "pbsmrtpipe", "blasr", "quiver", ...')
args = parser.parse_args(argv[1:])
return hgap.run(args.config, args.logging)
if __name__ == "__main__":
main(sys.argv)
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.