blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2d2699072eae36c651fe088d627f69f90b657d58 | b39ec77a8f5a5779edcecf5a09c39224472fd139 | /Clase03/contar.py | 0b60d0a7381ff2765efd0891e4b3ce879ffe2a47 | [] | no_license | GonzaloMonteodorisio/ejercicios-python-unsam | 76b6288491ccba8f44b819c26bed4811268e995e | 37ba16197107717a4c582eb552175e1c981c286b | refs/heads/main | 2023-07-28T07:18:10.178029 | 2021-09-15T05:42:46 | 2021-09-15T05:42:46 | 406,627,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 56 | py | for n in range(10,0,-1):
print(n, end=' ') | [
"[email protected]"
] | |
3c70c973d79447bece9afe2d49c5fd583a4173dd | 4bfe4afd1b1e11f9a03d8e3640aa297c875c076d | /demos/basic.py | 9a86954581726ae9f13bad67294d6355e90d696a | [] | no_license | pankajti/capstone | 81cdd2187e71e8d1bf327579b574ea7cf91a7e76 | af57a52d34dbcdd40e8e81f1d72c142263a98893 | refs/heads/master | 2021-03-02T09:49:51.054153 | 2020-07-09T02:28:58 | 2020-07-09T02:28:58 | 245,857,468 | 0 | 0 | null | 2020-03-22T00:54:01 | 2020-03-08T17:26:43 | null | UTF-8 | Python | false | false | 240 | py | from tensorflow.keras.layers import Dense,SimpleRNN
from tensorflow.keras import Sequential
import numpy as np
from tensorflow.keras.utils import plot_model
model =Sequential()
model.add(Dense(2))
model.add(Dense(1))
plot_model(model)
| [
"[email protected]"
] | |
4faba1910def77457e265813a6749d9fcdc2c9fa | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /ec2_write_3/managed-prefix-list_create.py | a37a54b7d58925db27ffcd48c98d760451977f82 | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,393 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_three_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/create-managed-prefix-list.html
if __name__ == '__main__':
"""
delete-managed-prefix-list : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/delete-managed-prefix-list.html
describe-managed-prefix-lists : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-managed-prefix-lists.html
modify-managed-prefix-list : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/modify-managed-prefix-list.html
"""
parameter_display_string = """
# prefix-list-name : A name for the prefix list.
Constraints: Up to 255 characters in length. The name cannot start with com.amazonaws .
# max-entries : The maximum number of entries for the prefix list.
# address-family : The IP address type.
Valid Values: IPv4 | IPv6
"""
add_option_dict = {}
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_three_parameter("ec2", "create-managed-prefix-list", "prefix-list-name", "max-entries", "address-family", add_option_dict)
| [
"[email protected]"
] | |
5c80ed9e14391ad32e4cc6fd9fcae8dce388c672 | 479518429066a4200b0c9ffbc42f22620dee1749 | /app.py | 5074f7904d2af983e17faf125c1a1f1f6874b9a4 | [] | no_license | nikhilkumarsingh/nitdhack | d2b4871c2aa3ef461c409a2f75e4f346759f1797 | 633ddf770c19fb8b0dd66479bc8e865e36181ffa | refs/heads/master | 2021-01-19T21:33:27.880021 | 2017-04-18T23:43:06 | 2017-04-18T23:43:06 | 88,665,337 | 0 | 1 | null | 2018-10-03T05:33:57 | 2017-04-18T19:59:40 | JavaScript | UTF-8 | Python | false | false | 1,291 | py | import flask
app = flask.Flask(__name__,static_folder='static')
app.config.from_object(__name__)
app.config['SECRET_KEY'] = '7d441f27d441f27567d441f2b6176a'
@app.route('/')
def home():
return flask.render_template('index.html')
def NearbySearch(lat,lng,keyword,radius=1000):
key="AIzaSyApuFoKxVMRQ2einlsA0rkx2S4WJjJIh34"
url="https://maps.googleapis.com/maps/api/place/nearbysearch/json?"
url+="location=%f,%f&" % (lat,lng)
url+="radius=%i&" % radius
url+="type=%s&" % keyword
url+="key=%s" % key
response=requests.get(url)
json_dict=response.json()
res=json_dict['results']
info_pack=[]
for x in res:
placeid = x['place_id']
url = "https://maps.googleapis.com/maps/api/place/details/json?placeid={}&key={}".format(placeid,key)
r = requests.get(url).json()['result']
info = {}
info['name'] = r['name']
info['lat'] = r['geometry']['location']['lat']
info['lng'] = r['geometry']['location']['lng']
info_pack.append(info)
return info_pack
@app.route('/query', methods = ['POST'])
def query():
if flask.request.method == 'POST':
# lat,lang =
lat, lang = 28,76
data = {'locations':NearbySearch(lat,lng,'doctor')}
print(flask.request.form['query'])
return data
if __name__ == "__main__":
app.run(debug = True, port=5003)
| [
"[email protected]"
] | |
48201b6182773eb907fb42c0093c1f0bf47efc96 | 853c189602a667990eda858db98d163fb597caa1 | /tfx/orchestration/experimental/core/constants.py | e5a5208afa70ef912a346dc02d4fe9ccce962866 | [
"Apache-2.0"
] | permissive | swap-10/tfx | 9bef96fc592810ed2d7dfa5dd60044c9ac481e02 | 8e80ce2486b4d7b219dcff906d6930e62c5fdd45 | refs/heads/master | 2023-07-15T22:54:18.642120 | 2021-09-06T06:17:48 | 2021-09-06T06:17:48 | 402,296,955 | 0 | 0 | Apache-2.0 | 2021-09-02T05:49:03 | 2021-09-02T05:09:23 | null | UTF-8 | Python | false | false | 823 | py | # Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants shared across modules."""
EXECUTION_ERROR_MSG_KEY = '__execution_error_msg__'
IMPORTER_NODE_TYPE = 'tfx.dsl.components.common.importer.Importer'
RESOLVER_NODE_TYPE = 'tfx.dsl.components.common.resolver.Resolver'
| [
"[email protected]"
] | |
1186138ee1bd98ce6cc3c24b6d4b5d7158920d79 | f81099738d3ab7d4a4773a04ed9e36e493632590 | /angelos-portfolio/test/test_domain_update.py | 2ccd8c81f1a7ea5f7e2d64656a9b8ccd5a5df49a | [
"MIT"
] | permissive | kristoffer-paulsson/angelos | eff35753e4d7e4465d2aadac39265f206b09fcf9 | d789f47766fe3a63a6752b92e4ea955f420dbaf9 | refs/heads/master | 2022-05-05T15:16:59.340527 | 2022-03-27T16:05:51 | 2022-03-27T16:05:51 | 142,691,235 | 9 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,287 | py | #
# Copyright (c) 2018-2020 by Kristoffer Paulsson <[email protected]>.
#
# This software is available under the terms of the MIT license. Parts are licensed under
# different terms if stated. The legal terms are attached to the LICENSE file and are
# made available on:
#
# https://opensource.org/licenses/MIT
#
# SPDX-License-Identifier: MIT
#
# Contributors:
# Kristoffer Paulsson - initial implementation
#
"""Security tests putting the policies to the test."""
from unittest import TestCase
from angelos.common.policy import evaluate
from angelos.lib.policy.types import PersonData
from angelos.portfolio.domain.create import CreateDomain
from angelos.portfolio.domain.update import UpdateDomain
from angelos.portfolio.entity.create import CreatePersonEntity
from test.fixture.generate import Generate
class TestUpdateDomain(TestCase):
def test_perform(self):
data = PersonData(**Generate.person_data()[0])
portfolio = CreatePersonEntity().perform(data)
CreateDomain().perform(portfolio)
self.assertIsNotNone(portfolio.domain)
with evaluate("Domain:Update") as report:
domain = UpdateDomain().perform(portfolio)
self.assertIs(domain, portfolio.domain)
self.assertTrue(report) | [
"[email protected]"
] | |
482b54447b3f7cd5d3fb519221920951b5b68ed0 | d9764a604c85c134ff217747d243eac8fe28e792 | /src/demo2.py | e3c0801f18c91206c2e18df08c2caacf8e0007bf | [] | no_license | afcarl/INF421-project | 5a0130c3ba6e0c767323001048d3f191379dbc6e | dc6eef684f6d277b6a9bbbc227a9e20a1525e115 | refs/heads/master | 2020-03-19T21:21:53.465240 | 2017-08-14T13:39:52 | 2017-08-14T13:39:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,334 | py | #!/usr/bin/env python3
"""
Special notes :
This implementation supports MULTIPLE shortest path.
(except for the number_of_possible_locations_with_mindist_simple function)
"""
import random
from Graph import Graph
from algo import *
from unused import *
from Dijkstra import *
from util import timeit
from reach import reach
####################
data = '/Users/louisabraham/Downloads/RoadNetworks/data/france.in'
logging = '/Users/louisabraham/Downloads/RoadNetworks/vis/points.js'
hour = 3600000
# We can control the display of chronos using timeit.activated
timeit.activated = True
####################
# graph importation
g = Graph.from_file(data)
# we chose a random starting point
v = random.choice(list(g.keys()))
#
# # Question 1.1
# print(number_of_possible_locations(g, v, 1 * hour))
#
# # the same result is computed
# print(number_of_possible_locations_with_mindist_dijkstra(
# g, v, 1 * hour, 0))
# print(number_of_possible_locations_with_mindist_dijkstra(
# g, v, 1 * hour, 0))
print(number_of_possible_locations_with_mindist_dijkstra(
g, v, 1 * hour, 2 * hour, logging=logging))
input()
g.generate_converse()
print(number_of_possible_locations_with_mindist_dijkstra(
g.converse, v, 1 * hour, 2 * hour, logging=logging))
# print(reach(g, v))
#
# # We can free memory like this
# dijkstra.clean()
| [
"[email protected]"
] | |
136b1182e8e9b3bb6006d82097af6a64457a1413 | 817965ef6ee70672eabedbbafe336ca07d6443ff | /0x0B-python-input_output/8-load_from_json_file.py | 34f8ae593948ca8fc24e3410cf357a351c626b5f | [] | no_license | julianfrancor/holbertonschool-higher_level_programming | f021086eb2a86b366c391452b13581c87587a3a8 | bd2a291c725ba09d88e9a629d0b22cf4ed7122e7 | refs/heads/master | 2022-12-23T05:27:27.942300 | 2020-09-24T21:22:56 | 2020-09-24T21:22:56 | 257,935,813 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | #!/usr/bin/python3
"""
function that creates an Object from a “JSON file”
"""
import json
def load_from_json_file(filename):
"""
Args
filename: JSON file form where the string
is going to be read
json.dumps() method can convert a Python object into a JSON string.
json.dump() method can be used to write to file a JSON file directly.
can Write in an open file
json.loads() expects to get its text from a string object
json.load() expects to get the text from a file
can Read from an open file an convert
"""
with open(filename, mode="r", encoding="UTF8") as file:
return json.load(file)
| [
"[email protected]"
] | |
d6b4abc7fbe0628b62ce4ae5c4de91acedb25971 | 962feeffee41625ff841f6590f97bb09cef9be4c | /torch_glow/tests/nodes/avgpool3d_test.py | 93e26349ac4e677a2d89d2388568725436963f2f | [
"Apache-2.0"
] | permissive | SushantDaga/glow | 8c4c3fbc58c3ae760bdd8e1df2e8c05a72ff07bc | aab22c3e0421dadd29950c2ebfa88b86027cecf5 | refs/heads/master | 2022-11-03T08:39:33.958233 | 2020-06-19T17:03:14 | 2020-06-19T17:05:42 | 273,568,864 | 2 | 0 | Apache-2.0 | 2020-06-19T19:12:31 | 2020-06-19T19:12:30 | null | UTF-8 | Python | false | false | 860 | py | from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn.functional as F
from tests.utils import jitVsGlow
import unittest
class TestAvgPool3d(unittest.TestCase):
def test_avg_pool3d_basic(self):
"""Basic test of the PyTorch avg_pool3d Node on Glow."""
def test_f(inputs):
return F.avg_pool3d(inputs, 3)
inputs = torch.randn(1, 4, 5, 5, 5)
jitVsGlow(test_f, inputs, expected_fused_ops={"aten::avg_pool3d"})
def test_avg_pool3d_with_args(self):
"""Test of the PyTorch avg_pool3d Node with arguments on Glow."""
def test_f(inputs):
return F.avg_pool3d(inputs, padding=2, kernel_size=(4, 7, 7))
inputs = torch.randn(1, 4, 10, 10, 10)
jitVsGlow(test_f, inputs, expected_fused_ops={"aten::avg_pool3d"})
| [
"[email protected]"
] | |
e9f935855c936f7be736e9cada0f8dfb9d5cbf2c | 6f444f025f27a10dd7b1bf61083ea2832ffcb196 | /backend/location/api/v1/serializers.py | f4a37f977e26a6abd08e6dffcee6108c10dadd98 | [] | no_license | crowdbotics-apps/ledger-wallet-29295 | 2fe0eee9e06cb1f5c8e514ad650df8276aac789b | d96542a71685ce6d335882c10cf840355c8252f7 | refs/heads/master | 2023-06-24T00:46:30.889717 | 2021-07-30T20:37:03 | 2021-07-30T20:37:03 | 391,182,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 669 | py | from rest_framework import serializers
from location.models import TaskLocation, CustomerLocation, TaskerLocation, MapLocation
class CustomerLocationSerializer(serializers.ModelSerializer):
class Meta:
model = CustomerLocation
fields = "__all__"
class MapLocationSerializer(serializers.ModelSerializer):
class Meta:
model = MapLocation
fields = "__all__"
class TaskerLocationSerializer(serializers.ModelSerializer):
class Meta:
model = TaskerLocation
fields = "__all__"
class TaskLocationSerializer(serializers.ModelSerializer):
class Meta:
model = TaskLocation
fields = "__all__"
| [
"[email protected]"
] | |
8b95e2ada92485e2e3e8915583d7b6c7899d04f7 | 5022b48f311ba4710e1851855552b9546a3142c5 | /unittest/case_test.py | 3b355326b97f14c7a95801f1b8d7f47cb5b04d82 | [] | no_license | 18786262315/python_lx | a7a15a294312b8382c3d1fd97a8d0ede38f1c5a5 | a870d49cc4ca6efd1b54c2b89dfbf5e3d911a568 | refs/heads/master | 2020-03-21T12:37:30.748759 | 2020-03-18T09:31:31 | 2020-03-18T09:31:31 | 138,563,274 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,228 | py | '''
unittest条件断言
tester: cc
此文仅做翻译只用,不介绍具体使用
'''
Skiptest() # 在测试中引发此异常以跳过该异常。
_ShouldStop() # 停止测试
_UnexpectedSuccess() # 测试本来应该是失败的,但是没有失败
Skip() # 无条件跳过测试。
skipIf(condition, reason) # 条件为真时跳过测试
skipUnless(condition, reason) # 条件为假时跳过测试
expectedFailure(test_item) # 标记该测试预期就是失败,如果运行失败时,不算作失败用例。
_is_subtype(expected, basetype) # 判断类型是否符合预期
addTypeEqualityFunc(typeobj, function) # 为自定义检查类提供检查方法
addCleanup( function , *args , **kwargs ) #添加针对每个测试用例执行完tearDown()方法之后的清理方法,添加进去的函数按照后进先出(LIFO)的顺序执行,当然,如果setUp()方法执行失败,那么不会执行tearDown()方法,自然也不会执行addCleanup()里添加的函数。
setUp()#在执行每个测试用例之前被执行,任何异常(除了unittest.SkipTest和AssertionError异常以外)都会当做是error而不是failure,且会终止当前测试用例的执行。
tearDown()#执行了setUp()方法后,不论测试用例执行是否成功,都执行tearDown()方法。如果tearDown()的代码有异常(除了unittest.SkipTest和AssertionError异常以外),会多算一个error。
setUpClass( cls )与tearDownClass( cls )#测试用例们被执行前、后执行的方法,定义时必须加上classmethod装饰符
countTestCases()#返回测试用例的个数,对于TestCase实例来说,这个返回值一直是1.
defaultTestResult()#如果在run()方法中未提供result参数,该函数返回一个包含本用例测试结果的TestResult对象。
shortDescription()#返回测试用例的描述,即函数的docstring,如果没有,返回None。可以用于测试结果输出中描述测试内容。
id()#返回测试用例的编号,通常是如下格式:模块名.类名.函数名。可以用于测试结果的输出。
subTest( msg=_subtest_msg_sentinel, **params)#返回一个上下文管理器,它将返回由可选消息和关键字参数标识的子测试中的封闭代码块。子测试中的失败标志着测试用例失败,但在封闭块结束时恢复执行,允许执行进一步的测试代码。
run( result =None)#运行一个测试用例,将测试结果收集到result变量中,测试结果不返回给调用者。如果result参数的值为None,则测试结果在下面提到的defaultTestResult()方法的返回值中
doCleanups()#无条件强制调用addCleanup()添加的函数,适用于setUp()方法执行失败但是需要执行清理函数的场景,或者希望在tearDown()方法之前执行这些清理函数。
debug()#与run方法将测试结果存储到result变量中不同,debug方法运行测试用例将异常信息上报给调用者。
fail( msg =None)#无条件声明一个测试用例失败,msg是失败信息。
assertEqual(set1,set2,msg=None) #检测两个值是否相等
assertFalse( expr, msg=None) #检查表达式是否为假
assertTrue( expr, msg=None) #检查表达式是否为真
assertAlmostEqual与assertNotAlmostEqual(, first, second, places=None, msg=None,delta=None) #判断两个值是否约等于或者不约等于,places表示小数点后精确的位数
assertSequenceEqual(seq1, seq2, msg=None, seq_type=None) #有序序列的相等断言,如元组、列表
assertListEqual( list1, list2, msg=None) #列表相等的特定断言
assertTupleEqual(tuple1, tuple2, msg=None) #元组相等的特定断言
assertSetEqual( set1, set2, msg=None) #集合相等的特定断言
assertIn与assertNotIn( member, container, msg=None) #判断a 是否存在b中
assertIs与assertIsNot( expr1, expr2, msg=None) #判断a是不是b
assertDictEqual( d1, d2, msg=None) #检查两个字典是否相等
assertDictContainsSubset( subset, dictionary, msg=None) #检查字典是否是子集的超集。
assertCountEqual(first, second, msg=None) #判断两个无序列表内所出现的内容是否相等
assertMultiLineEqual( first, second, msg=None) #断言两个多行字符串相等
assertLess( a, b, msg=None) #断言a<b
assertLessEqual( a, b, msg=None) #断言a<=b
assertGreater( a, b, msg=None) #断言a>b
assertGreaterEqual(a, b, msg=None) #断言a>=b
assertIsNone与assertIsNotNone( obj, msg=None) #判断obj是否为空
assertIsInstance(a, b)与assertNotIsInstance(a, b)# 与assertTrue相同,其中的类型b,既可以是一个类型,也可以是类型组成的元组。
assertRaisesRegex( expected_exception, expected_regex,*args, **kwargs)#断言在引发异常中的消息与正则表达式匹配。
assertWarnsRegex( expected_warning, expected_regex,*args, **kwargs)#断言触发警告中的消息与ReGEXP匹配。基本功能类似于AdvestWr.NS.()只有消息与正则表达式匹配的警告。被认为是成功的匹配
assertRegex与assertNotRegex(text, expected_regex, msg=None) #判断文本与正则表达式是否匹配
shortDescription()#返回测试用例的描述,即函数的docstring,如果没有,返回None。可以用于测试结果输出中描述测试内容。
| [
"[email protected]"
] | |
8904819a1aed374abfd0b4aa31c6e9d42770301e | be8190250b78ced1dbc94ae8c9774299621c3905 | /boxtree/pyfmmlib_integration.py | 812eae4aa4d044bb5f198e8d8b8674066014057b | [
"MIT"
] | permissive | Dracogenius17/boxtree | 45b03f67df5c9faaffccec25d480dde787cd15ac | 313159f001c5d8c4bbe68de65ed1077a954ce2f9 | refs/heads/master | 2021-05-02T02:36:42.958288 | 2017-12-13T16:44:23 | 2017-12-13T16:44:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,929 | py | from __future__ import division
"""Integration between boxtree and pyfmmlib."""
__copyright__ = "Copyright (C) 2013 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
from pytools import memoize_method
import logging
logger = logging.getLogger(__name__)
__doc__ = """Integrates :mod:`boxtree` with
`pyfmmlib <http://pypi.python.org/pypi/pyfmmlib>`_.
"""
class FMMLibExpansionWrangler(object):
"""Implements the :class:`boxtree.fmm.ExpansionWranglerInterface`
by using pyfmmlib.
"""
# {{{ constructor
def __init__(self, tree, helmholtz_k, fmm_level_to_nterms=None, ifgrad=False,
dipole_vec=None, dipoles_already_reordered=False, nterms=None):
"""
:arg fmm_level_to_nterms: a callable that, upon being passed the tree
and the tree level as an integer, returns the value of *nterms* for the
multipole and local expansions on that level.
"""
if nterms is not None and fmm_level_to_nterms is not None:
raise TypeError("may specify either fmm_level_to_nterms or nterms, "
"but not both")
if nterms is not None:
from warnings import warn
warn("Passing nterms is deprecated. Pass fmm_level_to_nterms instead.",
DeprecationWarning, stacklevel=2)
def fmm_level_to_nterms(tree, level):
return nterms
self.tree = tree
if helmholtz_k == 0:
self.eqn_letter = "l"
self.kernel_kwargs = {}
self.rscale_factor = 1
else:
self.eqn_letter = "h"
self.kernel_kwargs = {"zk": helmholtz_k}
self.rscale_factor = abs(helmholtz_k)
self.level_nterms = np.array([
fmm_level_to_nterms(tree, lev) for lev in range(tree.nlevels)
], dtype=np.int32)
if helmholtz_k:
logger.info("expansion orders by level used in Helmholtz FMM: %s",
self.level_nterms)
self.dtype = np.complex128
self.ifgrad = ifgrad
self.dim = tree.dimensions
if dipole_vec is not None:
assert dipole_vec.shape == (self.dim, self.tree.nsources)
if not dipoles_already_reordered:
dipole_vec = self.reorder_sources(dipole_vec)
self.dipole_vec = dipole_vec.copy(order="F")
self.dp_suffix = "_dp"
else:
self.dipole_vec = None
self.dp_suffix = ""
# }}}
def level_to_rscale(self, level):
result = self.tree.root_extent * 2 ** -level * self.rscale_factor
if abs(result) > 1:
result = 1
return result
@memoize_method
def projection_quad_extra_kwargs(self, level=None, nterms=None):
if level is None and nterms is None:
raise TypeError("must pass exactly one of level or nterms")
if level is not None and nterms is not None:
raise TypeError("must pass exactly one of level or nterms")
if level is not None:
nterms = self.level_nterms[level]
common_extra_kwargs = {}
if self.dim == 3 and self.eqn_letter == "h":
nquad = max(6, int(2.5*nterms))
from pyfmmlib import legewhts
xnodes, weights = legewhts(nquad, ifwhts=1)
common_extra_kwargs = {
"xnodes": xnodes,
"wts": weights,
}
return common_extra_kwargs
# {{{ overridable target lists for the benefit of the QBX FMM
def box_target_starts(self):
return self.tree.box_target_starts
def box_target_counts_nonchild(self):
return self.tree.box_target_counts_nonchild
def targets(self):
return self.tree.targets
# }}}
# {{{ routine getters
def get_routine(self, name, suffix=""):
import pyfmmlib
return getattr(pyfmmlib, "%s%s%s" % (
self.eqn_letter,
name % self.dim,
suffix))
def get_vec_routine(self, name):
return self.get_routine(name, "_vec")
def get_translation_routine(self, name, vec_suffix="_vec"):
suffix = ""
if self.dim == 3:
suffix = "quadu"
suffix += vec_suffix
rout = self.get_routine(name, suffix)
if self.dim == 2:
def wrapper(*args, **kwargs):
# not used
kwargs.pop("level_for_projection", None)
return rout(*args, **kwargs)
else:
def wrapper(*args, **kwargs):
kwargs.pop("level_for_projection", None)
nterms2 = kwargs["nterms2"]
kwargs.update(self.projection_quad_extra_kwargs(nterms=nterms2))
val, ier = rout(*args, **kwargs)
if (ier != 0).any():
raise RuntimeError("%s failed with nonzero ier" % name)
return val
# Doesn't work in in Py2
# from functools import update_wrapper
# update_wrapper(wrapper, rout)
return wrapper
def get_direct_eval_routine(self):
if self.dim == 2:
rout = self.get_vec_routine("potgrad%ddall" + self.dp_suffix)
def wrapper(*args, **kwargs):
kwargs["ifgrad"] = self.ifgrad
kwargs["ifhess"] = False
pot, grad, hess = rout(*args, **kwargs)
if not self.ifgrad:
grad = 0
return pot, grad
# Doesn't work in in Py2
# from functools import update_wrapper
# update_wrapper(wrapper, rout)
return wrapper
elif self.dim == 3:
rout = self.get_vec_routine("potfld%ddall" + self.dp_suffix)
def wrapper(*args, **kwargs):
kwargs["iffld"] = self.ifgrad
pot, fld = rout(*args, **kwargs)
if self.ifgrad:
grad = -fld
else:
grad = 0
return pot, grad
# Doesn't work in in Py2
# from functools import update_wrapper
# update_wrapper(wrapper, rout)
return wrapper
else:
raise ValueError("unsupported dimensionality")
def get_expn_eval_routine(self, expn_kind):
name = "%%dd%seval" % expn_kind
rout = self.get_routine(name, "_vec")
if self.dim == 2:
def wrapper(*args, **kwargs):
kwargs["ifgrad"] = self.ifgrad
kwargs["ifhess"] = False
pot, grad, hess = rout(*args, **kwargs)
if not self.ifgrad:
grad = 0
return pot, grad
# Doesn't work in in Py2
# from functools import update_wrapper
# update_wrapper(wrapper, rout)
return wrapper
elif self.dim == 3:
def wrapper(*args, **kwargs):
kwargs["iffld"] = self.ifgrad
pot, fld, ier = rout(*args, **kwargs)
if (ier != 0).any():
raise RuntimeError("%s failed with nonzero ier" % name)
if self.ifgrad:
grad = -fld
else:
grad = 0
return pot, grad
# Doesn't work in in Py2
# from functools import update_wrapper
# update_wrapper(wrapper, rout)
return wrapper
else:
raise ValueError("unsupported dimensionality")
# }}}
# {{{ data vector utilities
def expansion_shape(self, nterms):
if self.dim == 2 and self.eqn_letter == "l":
return (nterms+1,)
elif self.dim == 2 and self.eqn_letter == "h":
return (2*nterms+1,)
elif self.dim == 3:
# This is the transpose of the Fortran format, to
# minimize mismatch between C and Fortran orders.
return (2*nterms+1, nterms+1,)
else:
raise ValueError("unsupported dimensionality")
def _expansions_level_starts(self, order_to_size):
result = [0]
for lev in range(self.tree.nlevels):
lev_nboxes = (
self.tree.level_start_box_nrs[lev+1]
- self.tree.level_start_box_nrs[lev])
expn_size = order_to_size(self.level_nterms[lev])
result.append(
result[-1]
+ expn_size * lev_nboxes)
return result
@memoize_method
def multipole_expansions_level_starts(self):
from pytools import product
return self._expansions_level_starts(
lambda nterms: product(self.expansion_shape(nterms)))
@memoize_method
def local_expansions_level_starts(self):
from pytools import product
return self._expansions_level_starts(
lambda nterms: product(self.expansion_shape(nterms)))
def multipole_expansions_view(self, mpole_exps, level):
box_start, box_stop = self.tree.level_start_box_nrs[level:level+2]
expn_start, expn_stop = \
self.multipole_expansions_level_starts()[level:level+2]
return (box_start,
mpole_exps[expn_start:expn_stop].reshape(
box_stop-box_start,
*self.expansion_shape(self.level_nterms[level])))
def local_expansions_view(self, local_exps, level):
box_start, box_stop = self.tree.level_start_box_nrs[level:level+2]
expn_start, expn_stop = \
self.local_expansions_level_starts()[level:level+2]
return (box_start,
local_exps[expn_start:expn_stop].reshape(
box_stop-box_start,
*self.expansion_shape(self.level_nterms[level])))
def multipole_expansion_zeros(self):
return np.zeros(
self.multipole_expansions_level_starts()[-1],
dtype=self.dtype)
def local_expansion_zeros(self):
return np.zeros(
self.local_expansions_level_starts()[-1],
dtype=self.dtype)
def output_zeros(self):
if self.ifgrad:
from pytools import make_obj_array
return make_obj_array([
np.zeros(self.tree.ntargets, self.dtype)
for i in range(1 + self.dim)])
else:
return np.zeros(self.tree.ntargets, self.dtype)
def add_potgrad_onto_output(self, output, output_slice, pot, grad):
if self.ifgrad:
output[0, output_slice] += pot
output[1:, output_slice] += grad
else:
output[output_slice] += pot
# }}}
# {{{ source/target particle wrangling
def _get_source_slice(self, ibox):
pstart = self.tree.box_source_starts[ibox]
return slice(
pstart, pstart + self.tree.box_source_counts_nonchild[ibox])
def _get_target_slice(self, ibox):
pstart = self.box_target_starts()[ibox]
return slice(
pstart, pstart + self.box_target_counts_nonchild()[ibox])
@memoize_method
def _get_single_sources_array(self):
return np.array([
self.tree.sources[idim]
for idim in range(self.dim)
], order="F")
def _get_sources(self, pslice):
return self._get_single_sources_array()[:, pslice]
@memoize_method
def _get_single_targets_array(self):
return np.array([
self.targets()[idim]
for idim in range(self.dim)
], order="F")
def _get_targets(self, pslice):
return self._get_single_targets_array()[:, pslice]
# }}}
def reorder_sources(self, source_array):
return source_array[..., self.tree.user_source_ids]
def reorder_potentials(self, potentials):
return potentials[self.tree.sorted_target_ids]
def get_source_kwargs(self, src_weights, pslice):
if self.dipole_vec is None:
return {
"charge": src_weights[pslice],
}
else:
if self.eqn_letter == "l" and self.dim == 2:
return {
"dipstr": -src_weights[pslice] * (
self.dipole_vec[0, pslice]
+ 1j * self.dipole_vec[1, pslice])
}
else:
return {
"dipstr": src_weights[pslice],
"dipvec": self.dipole_vec[:, pslice],
}
def form_multipoles(self, level_start_source_box_nrs, source_boxes, src_weights):
formmp = self.get_routine("%ddformmp" + self.dp_suffix)
mpoles = self.multipole_expansion_zeros()
for lev in range(self.tree.nlevels):
start, stop = level_start_source_box_nrs[lev:lev+2]
if start == stop:
continue
level_start_ibox, mpoles_view = self.multipole_expansions_view(
mpoles, lev)
rscale = self.level_to_rscale(lev)
for src_ibox in source_boxes[start:stop]:
pslice = self._get_source_slice(src_ibox)
if pslice.stop - pslice.start == 0:
continue
kwargs = {}
kwargs.update(self.kernel_kwargs)
kwargs.update(self.get_source_kwargs(src_weights, pslice))
ier, mpole = formmp(
rscale=rscale,
source=self._get_sources(pslice),
center=self.tree.box_centers[:, src_ibox],
nterms=self.level_nterms[lev],
**kwargs)
if ier:
raise RuntimeError("formmp failed")
mpoles_view[src_ibox-level_start_ibox] = mpole.T
return mpoles
def coarsen_multipoles(self, level_start_source_parent_box_nrs,
source_parent_boxes, mpoles):
tree = self.tree
mpmp = self.get_translation_routine("%ddmpmp")
# nlevels-1 is the last valid level index
# nlevels-2 is the last valid level that could have children
#
# 3 is the last relevant source_level.
# 2 is the last relevant target_level.
# (because no level 1 box will be well-separated from another)
for source_level in range(tree.nlevels-1, 2, -1):
target_level = source_level - 1
start, stop = level_start_source_parent_box_nrs[
target_level:target_level+2]
source_level_start_ibox, source_mpoles_view = \
self.multipole_expansions_view(mpoles, source_level)
target_level_start_ibox, target_mpoles_view = \
self.multipole_expansions_view(mpoles, target_level)
source_rscale = self.level_to_rscale(source_level)
target_rscale = self.level_to_rscale(target_level)
for ibox in source_parent_boxes[start:stop]:
parent_center = tree.box_centers[:, ibox]
for child in tree.box_child_ids[:, ibox]:
if child:
child_center = tree.box_centers[:, child]
kwargs = {}
if self.dim == 3 and self.eqn_letter == "h":
kwargs["radius"] = tree.root_extent * 2**(-target_level)
kwargs.update(self.kernel_kwargs)
new_mp = mpmp(
rscale1=source_rscale,
center1=child_center,
expn1=source_mpoles_view[
child - source_level_start_ibox].T,
rscale2=target_rscale,
center2=parent_center,
nterms2=self.level_nterms[target_level],
**kwargs)
target_mpoles_view[
ibox - target_level_start_ibox] += new_mp[..., 0].T
def eval_direct(self, target_boxes, neighbor_sources_starts,
neighbor_sources_lists, src_weights):
output = self.output_zeros()
ev = self.get_direct_eval_routine()
for itgt_box, tgt_ibox in enumerate(target_boxes):
tgt_pslice = self._get_target_slice(tgt_ibox)
if tgt_pslice.stop - tgt_pslice.start == 0:
continue
#tgt_result = np.zeros(tgt_pslice.stop - tgt_pslice.start, self.dtype)
tgt_pot_result = 0
tgt_grad_result = 0
start, end = neighbor_sources_starts[itgt_box:itgt_box+2]
for src_ibox in neighbor_sources_lists[start:end]:
src_pslice = self._get_source_slice(src_ibox)
if src_pslice.stop - src_pslice.start == 0:
continue
kwargs = {}
kwargs.update(self.kernel_kwargs)
kwargs.update(self.get_source_kwargs(src_weights, src_pslice))
tmp_pot, tmp_grad = ev(
sources=self._get_sources(src_pslice),
targets=self._get_targets(tgt_pslice),
**kwargs)
tgt_pot_result += tmp_pot
tgt_grad_result += tmp_grad
self.add_potgrad_onto_output(
output, tgt_pslice, tgt_pot_result, tgt_grad_result)
return output
def multipole_to_local(self,
level_start_target_or_target_parent_box_nrs,
target_or_target_parent_boxes,
starts, lists, mpole_exps):
tree = self.tree
local_exps = self.local_expansion_zeros()
mploc = self.get_translation_routine("%ddmploc", vec_suffix="_imany")
for lev in range(self.tree.nlevels):
lstart, lstop = level_start_target_or_target_parent_box_nrs[lev:lev+2]
if lstart == lstop:
continue
starts_on_lvl = starts[lstart:lstop+1]
source_level_start_ibox, source_mpoles_view = \
self.multipole_expansions_view(mpole_exps, lev)
target_level_start_ibox, target_local_exps_view = \
self.local_expansions_view(local_exps, lev)
ntgt_boxes = lstop-lstart
itgt_box_vec = np.arange(ntgt_boxes)
tgt_ibox_vec = target_or_target_parent_boxes[lstart:lstop]
nsrc_boxes_per_tgt_box = (
starts[lstart + itgt_box_vec+1] - starts[lstart + itgt_box_vec])
nsrc_boxes = np.sum(nsrc_boxes_per_tgt_box)
src_boxes_starts = np.empty(ntgt_boxes+1, dtype=np.int32)
src_boxes_starts[0] = 0
src_boxes_starts[1:] = np.cumsum(nsrc_boxes_per_tgt_box)
rscale = self.level_to_rscale(lev)
rscale1 = np.ones(nsrc_boxes) * rscale
rscale1_offsets = np.arange(nsrc_boxes)
kwargs = {}
if self.dim == 3 and self.eqn_letter == "h":
kwargs["radius"] = (
tree.root_extent * 2**(-lev)
* np.ones(ntgt_boxes))
rscale2 = np.ones(ntgt_boxes, np.float64) * rscale
# These get max'd/added onto: pass initialized versions.
if self.dim == 3:
ier = np.zeros(ntgt_boxes, dtype=np.int32)
kwargs["ier"] = ier
expn2 = np.zeros(
(ntgt_boxes,) + self.expansion_shape(self.level_nterms[lev]),
dtype=self.dtype)
kwargs.update(self.kernel_kwargs)
expn2 = mploc(
rscale1=rscale1,
rscale1_offsets=rscale1_offsets,
rscale1_starts=src_boxes_starts,
center1=tree.box_centers,
center1_offsets=lists,
center1_starts=starts_on_lvl,
expn1=source_mpoles_view.T,
expn1_offsets=lists - source_level_start_ibox,
expn1_starts=starts_on_lvl,
rscale2=rscale2,
# FIXME: wrong layout, will copy
center2=tree.box_centers[:, tgt_ibox_vec],
expn2=expn2.T,
nterms2=self.level_nterms[lev],
**kwargs).T
target_local_exps_view[tgt_ibox_vec - target_level_start_ibox] += expn2
return local_exps
def eval_multipoles(self, level_start_target_box_nrs, target_boxes,
sep_smaller_nonsiblings_by_level, mpole_exps):
output = self.output_zeros()
mpeval = self.get_expn_eval_routine("mp")
for isrc_level, ssn in enumerate(sep_smaller_nonsiblings_by_level):
source_level_start_ibox, source_mpoles_view = \
self.multipole_expansions_view(mpole_exps, isrc_level)
rscale = self.level_to_rscale(isrc_level)
for itgt_box, tgt_ibox in enumerate(target_boxes):
tgt_pslice = self._get_target_slice(tgt_ibox)
if tgt_pslice.stop - tgt_pslice.start == 0:
continue
tgt_pot = 0
tgt_grad = 0
start, end = ssn.starts[itgt_box:itgt_box+2]
for src_ibox in ssn.lists[start:end]:
tmp_pot, tmp_grad = mpeval(
rscale=rscale,
center=self.tree.box_centers[:, src_ibox],
expn=source_mpoles_view[
src_ibox - source_level_start_ibox].T,
ztarg=self._get_targets(tgt_pslice),
**self.kernel_kwargs)
tgt_pot = tgt_pot + tmp_pot
tgt_grad = tgt_grad + tmp_grad
self.add_potgrad_onto_output(
output, tgt_pslice, tgt_pot, tgt_grad)
return output
def form_locals(self,
level_start_target_or_target_parent_box_nrs,
target_or_target_parent_boxes, starts, lists, src_weights):
local_exps = self.local_expansion_zeros()
formta = self.get_routine("%ddformta" + self.dp_suffix)
for lev in range(self.tree.nlevels):
lev_start, lev_stop = \
level_start_target_or_target_parent_box_nrs[lev:lev+2]
if lev_start == lev_stop:
continue
target_level_start_ibox, target_local_exps_view = \
self.local_expansions_view(local_exps, lev)
rscale = self.level_to_rscale(lev)
for itgt_box, tgt_ibox in enumerate(
target_or_target_parent_boxes[lev_start:lev_stop]):
start, end = starts[lev_start+itgt_box:lev_start+itgt_box+2]
contrib = 0
for src_ibox in lists[start:end]:
src_pslice = self._get_source_slice(src_ibox)
tgt_center = self.tree.box_centers[:, tgt_ibox]
if src_pslice.stop - src_pslice.start == 0:
continue
kwargs = {}
kwargs.update(self.kernel_kwargs)
kwargs.update(self.get_source_kwargs(src_weights, src_pslice))
ier, mpole = formta(
rscale=rscale,
source=self._get_sources(src_pslice),
center=tgt_center,
nterms=self.level_nterms[lev],
**kwargs)
if ier:
raise RuntimeError("formta failed")
contrib = contrib + mpole.T
target_local_exps_view[tgt_ibox-target_level_start_ibox] = contrib
return local_exps
def refine_locals(self, level_start_target_or_target_parent_box_nrs,
target_or_target_parent_boxes, local_exps):
locloc = self.get_translation_routine("%ddlocloc")
for target_lev in range(1, self.tree.nlevels):
start, stop = level_start_target_or_target_parent_box_nrs[
target_lev:target_lev+2]
source_lev = target_lev - 1
source_level_start_ibox, source_local_exps_view = \
self.local_expansions_view(local_exps, source_lev)
target_level_start_ibox, target_local_exps_view = \
self.local_expansions_view(local_exps, target_lev)
source_rscale = self.level_to_rscale(source_lev)
target_rscale = self.level_to_rscale(target_lev)
for tgt_ibox in target_or_target_parent_boxes[start:stop]:
tgt_center = self.tree.box_centers[:, tgt_ibox]
src_ibox = self.tree.box_parent_ids[tgt_ibox]
src_center = self.tree.box_centers[:, src_ibox]
kwargs = {}
if self.dim == 3 and self.eqn_letter == "h":
kwargs["radius"] = self.tree.root_extent * 2**(-target_lev)
kwargs.update(self.kernel_kwargs)
tmp_loc_exp = locloc(
rscale1=source_rscale,
center1=src_center,
expn1=source_local_exps_view[
src_ibox - source_level_start_ibox].T,
rscale2=target_rscale,
center2=tgt_center,
nterms2=self.level_nterms[target_lev],
**kwargs)[..., 0]
target_local_exps_view[
tgt_ibox - target_level_start_ibox] += tmp_loc_exp.T
return local_exps
def eval_locals(self, level_start_target_box_nrs, target_boxes, local_exps):
output = self.output_zeros()
taeval = self.get_expn_eval_routine("ta")
for lev in range(self.tree.nlevels):
start, stop = level_start_target_box_nrs[lev:lev+2]
if start == stop:
continue
source_level_start_ibox, source_local_exps_view = \
self.local_expansions_view(local_exps, lev)
rscale = self.level_to_rscale(lev)
for tgt_ibox in target_boxes[start:stop]:
tgt_pslice = self._get_target_slice(tgt_ibox)
if tgt_pslice.stop - tgt_pslice.start == 0:
continue
tmp_pot, tmp_grad = taeval(
rscale=rscale,
center=self.tree.box_centers[:, tgt_ibox],
expn=source_local_exps_view[
tgt_ibox - source_level_start_ibox].T,
ztarg=self._get_targets(tgt_pslice),
**self.kernel_kwargs)
self.add_potgrad_onto_output(
output, tgt_pslice, tmp_pot, tmp_grad)
return output
def finalize_potentials(self, potential):
if self.eqn_letter == "l" and self.dim == 2:
scale_factor = -1/(2*np.pi)
elif self.eqn_letter == "h" and self.dim == 2:
scale_factor = 1
elif self.eqn_letter in ["l", "h"] and self.dim == 3:
scale_factor = 1/(4*np.pi)
else:
raise NotImplementedError(
"scale factor for pyfmmlib %s for %d dimensions" % (
self.eqn_letter,
self.dim))
if self.eqn_letter == "l" and self.dim == 2:
potential = potential.real
return potential * scale_factor
# vim: foldmethod=marker
| [
"[email protected]"
] | |
820ed298b2d0d51b64a647c759fec6a4a95c79e1 | 0c4b33d04cf7fb73b3752b03af89eeaf76b8a0d2 | /第14章-网络编程/client.py | 93a57207689113ca5cbd684fb77a81dba69d2db4 | [] | no_license | kingflyfly/python_study | 3b3ab427d23174b61b8f14c223059cfa9f303219 | 8a63a7c11b408bbc11a2b636517beaa424b37725 | refs/heads/master | 2020-06-11T01:39:52.655730 | 2020-03-24T16:09:39 | 2020-03-24T16:09:39 | 193,817,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | import socket
import sys
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = socket.gethostname()
port = 9992
# 连接服务,指定主机和端口
s.connect((host, port))
# 接收小于 1024 字节的数据
msg = s.recv(1024)
s.close()
print (msg.decode('utf-8')) | [
"[email protected]"
] | |
81f726744a38d25f6099ad36107663ac8a5d3212 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/stdlib-big-2805.py | b2c7ae07ef65cab60cc16a7073cc6a18c9d869b1 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,992 | py | # ChocoPy library functions
def int_to_str(x: int) -> str:
digits:[str] = None
result:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str2(x: int, x2: int) -> str:
digits:[str] = None
digits2:[str] = None
result:str = ""
result2:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str3(x: int, x2: int, x3: int) -> str:
digits:[str] = None
digits2:[str] = None
digits3:[str] = None
result:str = ""
result2:str = ""
result3:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str4(x: int, x2: int, x3: int, x4: int) -> str:
digits:[str] = None
digits2:[str] = None
digits3:[str] = None
digits4:[str] = None
result:str = ""
result2:str = ""
result3:str = ""
result4:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str5(x: int, x2: int, x3: int, x4: int, x5: int) -> str:
digits:[str] = None
digits2:[str] = None
digits3:[str] = None
digits4:[str] = None
digits5:[str] = None
result:str = ""
result2:str = ""
result3:str = ""
result4:str = ""
result5:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def str_to_int(x: str) -> int:
result:int = 0
digit:int = 0
char:str = ""
sign:int = 1
first_char:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int2(x: str, x2: str) -> int:
result:int = 0
result2:int = 0
digit:int = 0
digit2:int = 0
char:str = ""
char2:str = ""
sign:int = 1
sign2:int = 1
first_char:bool = True
first_char2:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int3(x: str, x2: str, x3: str) -> int:
result:int = 0
result2:int = 0
result3:int = 0
digit:int = 0
digit2:int = 0
digit3:int = 0
char:str = ""
char2:str = ""
char3:str = ""
sign:int = 1
sign2:int = 1
sign3:int = 1
first_char:bool = True
first_char2:bool = True
first_char3:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int4(x: str, x2: str, x3: str, x4: str) -> int:
result:int = 0
result2:int = 0
result3:int = 0
result4:int = 0
digit:int = 0
digit2:int = 0
digit3:int = 0
digit4:int = 0
char:str = ""
char2:str = ""
char3:str = ""
char4:str = ""
sign:int = 1
sign2:int = 1
sign3:int = 1
sign4:int = 1
first_char:bool = True
first_char2:bool = True
first_char3:bool = True
first_char4:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int5(x: str, x2: str, x3: str, x4: str, x5: str) -> int:
result:int = 0
result2:int = 0
result3:int = 0
result4:int = 0
result5:int = 0
digit:int = 0
digit2:int = 0
digit3:int = 0
digit4:int = 0
digit5:int = 0
char:str = ""
char2:str = ""
char3:str = ""
char4:str = ""
char5:str = ""
sign:int = 1
sign2:int = 1
sign3:int = 1
sign4:int = 1
sign5:int = 1
first_char:bool = True
first_char2:bool = True
$TypedVar = True
first_char4:bool = True
first_char5:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
# Input parameters
c:int = 42
c2:int = 42
c3:int = 42
c4:int = 42
c5:int = 42
n:int = 10
n2:int = 10
n3:int = 10
n4:int = 10
n5:int = 10
# Run [-nc, nc] with step size c
s:str = ""
s2:str = ""
s3:str = ""
s4:str = ""
s5:str = ""
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
i = -n * c
# Crunch
while i <= n * c:
s = int_to_str(i)
print(s)
i = str_to_int(s) + c
| [
"[email protected]"
] | |
b9691e61dfe1e73f0cfed348461860d2ce4d6495 | 16ecabb5d9010c7fa4aebb8ab852f7c6a19193db | /src/0809.py | 0ba2428a1bbf7638358e2412cd9b40399abf0b68 | [] | no_license | LeeSM0518/OpenCV-python | 74ff0d899d291a35f9cd82d2ef37835a0c5ccdf2 | 46c234879f5d48876ca0888bdede8bfb347b7c30 | refs/heads/master | 2020-04-30T19:35:33.201278 | 2020-02-25T14:35:20 | 2020-02-25T14:35:20 | 177,043,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | # 0809.py
import cv2
import numpy as np
#1
src = cv2.imread('./data/momentTest.jpg')
gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
ret, bImage = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY)
#2
##M = cv2.moments(bImage)
M = cv2.moments(bImage, True)
for key, value in M.items():
print('{}={}'.format(key, value))
#3
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
dst = src.copy()
cv2.circle(dst, (cx, cy), 5, (0,0,255), 2)
cv2.imshow('dst', dst)
cv2.waitKey()
cv2.destroyAllWindows()
| [
"[email protected]"
] | |
b08a51aeb6644672aa2d6a3f7fcc2d9b19c3f3a1 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/insights/v20210401/data_collection_rule_association.py | e5cc8f03d180c23ad08149bb40a76e212462e4f5 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 12,201 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = ['DataCollectionRuleAssociationArgs', 'DataCollectionRuleAssociation']
@pulumi.input_type
class DataCollectionRuleAssociationArgs:
def __init__(__self__, *,
resource_uri: pulumi.Input[str],
association_name: Optional[pulumi.Input[str]] = None,
data_collection_endpoint_id: Optional[pulumi.Input[str]] = None,
data_collection_rule_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a DataCollectionRuleAssociation resource.
:param pulumi.Input[str] resource_uri: The identifier of the resource.
:param pulumi.Input[str] association_name: The name of the association. The name is case insensitive.
:param pulumi.Input[str] data_collection_endpoint_id: The resource ID of the data collection endpoint that is to be associated.
:param pulumi.Input[str] data_collection_rule_id: The resource ID of the data collection rule that is to be associated.
:param pulumi.Input[str] description: Description of the association.
"""
pulumi.set(__self__, "resource_uri", resource_uri)
if association_name is not None:
pulumi.set(__self__, "association_name", association_name)
if data_collection_endpoint_id is not None:
pulumi.set(__self__, "data_collection_endpoint_id", data_collection_endpoint_id)
if data_collection_rule_id is not None:
pulumi.set(__self__, "data_collection_rule_id", data_collection_rule_id)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter(name="resourceUri")
def resource_uri(self) -> pulumi.Input[str]:
"""
The identifier of the resource.
"""
return pulumi.get(self, "resource_uri")
@resource_uri.setter
def resource_uri(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_uri", value)
@property
@pulumi.getter(name="associationName")
def association_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the association. The name is case insensitive.
"""
return pulumi.get(self, "association_name")
@association_name.setter
def association_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "association_name", value)
@property
@pulumi.getter(name="dataCollectionEndpointId")
def data_collection_endpoint_id(self) -> Optional[pulumi.Input[str]]:
"""
The resource ID of the data collection endpoint that is to be associated.
"""
return pulumi.get(self, "data_collection_endpoint_id")
@data_collection_endpoint_id.setter
def data_collection_endpoint_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "data_collection_endpoint_id", value)
@property
@pulumi.getter(name="dataCollectionRuleId")
def data_collection_rule_id(self) -> Optional[pulumi.Input[str]]:
"""
The resource ID of the data collection rule that is to be associated.
"""
return pulumi.get(self, "data_collection_rule_id")
@data_collection_rule_id.setter
def data_collection_rule_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "data_collection_rule_id", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the association.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
class DataCollectionRuleAssociation(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
association_name: Optional[pulumi.Input[str]] = None,
data_collection_endpoint_id: Optional[pulumi.Input[str]] = None,
data_collection_rule_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
resource_uri: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Definition of generic ARM proxy resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] association_name: The name of the association. The name is case insensitive.
:param pulumi.Input[str] data_collection_endpoint_id: The resource ID of the data collection endpoint that is to be associated.
:param pulumi.Input[str] data_collection_rule_id: The resource ID of the data collection rule that is to be associated.
:param pulumi.Input[str] description: Description of the association.
:param pulumi.Input[str] resource_uri: The identifier of the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DataCollectionRuleAssociationArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Definition of generic ARM proxy resource.
:param str resource_name: The name of the resource.
:param DataCollectionRuleAssociationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DataCollectionRuleAssociationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
association_name: Optional[pulumi.Input[str]] = None,
data_collection_endpoint_id: Optional[pulumi.Input[str]] = None,
data_collection_rule_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
resource_uri: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DataCollectionRuleAssociationArgs.__new__(DataCollectionRuleAssociationArgs)
__props__.__dict__["association_name"] = association_name
__props__.__dict__["data_collection_endpoint_id"] = data_collection_endpoint_id
__props__.__dict__["data_collection_rule_id"] = data_collection_rule_id
__props__.__dict__["description"] = description
if resource_uri is None and not opts.urn:
raise TypeError("Missing required property 'resource_uri'")
__props__.__dict__["resource_uri"] = resource_uri
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:insights/v20210401:DataCollectionRuleAssociation"), pulumi.Alias(type_="azure-native:insights:DataCollectionRuleAssociation"), pulumi.Alias(type_="azure-nextgen:insights:DataCollectionRuleAssociation"), pulumi.Alias(type_="azure-native:insights/v20191101preview:DataCollectionRuleAssociation"), pulumi.Alias(type_="azure-nextgen:insights/v20191101preview:DataCollectionRuleAssociation")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(DataCollectionRuleAssociation, __self__).__init__(
'azure-native:insights/v20210401:DataCollectionRuleAssociation',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'DataCollectionRuleAssociation':
"""
Get an existing DataCollectionRuleAssociation resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = DataCollectionRuleAssociationArgs.__new__(DataCollectionRuleAssociationArgs)
__props__.__dict__["data_collection_endpoint_id"] = None
__props__.__dict__["data_collection_rule_id"] = None
__props__.__dict__["description"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
return DataCollectionRuleAssociation(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="dataCollectionEndpointId")
def data_collection_endpoint_id(self) -> pulumi.Output[Optional[str]]:
"""
The resource ID of the data collection endpoint that is to be associated.
"""
return pulumi.get(self, "data_collection_endpoint_id")
@property
@pulumi.getter(name="dataCollectionRuleId")
def data_collection_rule_id(self) -> pulumi.Output[Optional[str]]:
"""
The resource ID of the data collection rule that is to be associated.
"""
return pulumi.get(self, "data_collection_rule_id")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description of the association.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
Resource entity tag (ETag).
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The resource provisioning state.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.DataCollectionRuleAssociationProxyOnlyResourceResponseSystemData']:
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
| [
"[email protected]"
] | |
d670fc71f610fb31b49e00a8c5c71b54ca6ed4ef | 83a59e255f681e85828399c6c2323f2cf0997e10 | /kibble/scanners/scanners/git-evolution.py | 8f4a83698faccdae147d2985f32bfb605884f6ff | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | kaxil/kibble | f4ab6f1039086adcb37c544c60bbbc27e8538128 | 96959acec06fed4d91d5da73fee1aa1200ffbb3c | refs/heads/main | 2023-02-01T03:14:53.813091 | 2020-12-16T23:04:45 | 2020-12-16T23:04:45 | 320,881,184 | 1 | 0 | Apache-2.0 | 2020-12-12T17:04:54 | 2020-12-12T17:04:54 | null | UTF-8 | Python | false | false | 8,447 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Git Evolution scanner """
import calendar
import datetime
import hashlib
import os
import subprocess
import time
from kibble.configuration import conf
from kibble.scanners.utils import sloc
title = "Git Evolution Scanner"
version = "0.1.0"
def accepts(source):
""" Do we accept this source? """
if source["type"] == "git":
return True
# There are cases where we have a github repo, but don't wanna analyze the code, just issues
if source["type"] == "github" and source.get("issuesonly", False) == False:
return True
return False
def get_first_ref(gpath):
try:
return subprocess.check_output(
"cd %s && git log `git rev-list --max-parents=0 HEAD` --pretty=format:%%ct"
% gpath,
shell=True,
)
except: # pylint: disable=bare-except
print("Could not get first ref, exiting!")
return None
def acquire(kibble_bit, source):
source["steps"]["evolution"] = {
"time": time.time(),
"status": "Evolution scan started at "
+ time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()),
"running": True,
"good": True,
}
kibble_bit.update_source(source)
def release(kibble_bit, source, status, exception=None, good=False):
source["steps"]["evolution"] = {
"time": time.time(),
"status": status,
"running": False,
"good": good,
}
if exception:
source["steps"]["evolution"].update({"exception": exception})
kibble_bit.update_source(source)
def check_branch(gpath, date, branch):
try:
subprocess.check_call(
'cd %s && git rev-list -n 1 --before="%s" %s' % (gpath, date, branch),
shell=True,
)
return True
except: # pylint: disable=bare-except
return False
def checkout(gpath, date, branch):
# print("Ready to cloc...checking out %s " % date)
try:
ref = (
subprocess.check_output(
'cd %s && git rev-list -n 1 --before="%s" "%s"' % (gpath, date, branch),
shell=True,
stderr=subprocess.STDOUT,
)
.decode("ascii", "replace")
.strip()
)
subprocess.check_output(
"cd %s && git checkout %s -- " % (gpath, ref),
shell=True,
stderr=subprocess.STDOUT,
)
except subprocess.CalledProcessError as err:
print(err.output)
def find_branch(date, gpath):
try:
os.chdir(gpath)
subprocess.check_call(
'cd %s && git rev-list -n 1 --before="%s" master' % (gpath, date),
shell=True,
stderr=subprocess.DEVNULL,
)
return "master"
except: # pylint: disable=bare-except
os.chdir(gpath)
try:
return (
subprocess.check_output(
"cd %s && git rev-parse --abbrev-ref HEAD" % gpath,
shell=True,
stderr=subprocess.DEVNULL,
)
.decode("ascii", "replace")
.strip()
.strip("* ")
)
except: # pylint: disable=bare-except
# print("meh! no branch")
return None
def scan(kibble_bit, source):
rid = source["sourceID"]
rootpath = "%s/%s/git" % (
conf.get("scanner", "scratchdir"),
source["organisation"],
)
gpath = os.path.join(rootpath, rid)
gname = source["sourceID"]
kibble_bit.pprint("Doing evolution scan of %s" % gname)
inp = get_first_ref(gpath)
if inp:
ts = int(inp.split()[0])
ts -= ts % 86400
date = time.strftime("%Y-%b-%d 0:00", time.gmtime(ts))
# print("Starting from %s" % date)
now = time.time()
rid = source["sourceID"]
url = source["sourceURL"]
rootpath = "%s/%s/git" % (
conf.get("scanner", "scratchdir"),
source["organisation"],
)
gpath = os.path.join(rootpath, rid)
if source["steps"]["sync"]["good"] and os.path.exists(gpath):
acquire(kibble_bit, source)
branch = find_branch(date, gpath)
if not branch:
release(
source,
"Could not do evolutionary scan of code",
"No default branch was found in this repository",
)
return
branch_exists = check_branch(gpath, date, branch)
if not branch_exists:
kibble_bit.pprint("Not trunk either (bad repo?), skipping")
release(
source,
"Could not do evolutionary scan of code",
"No default branch was found in this repository",
)
return
try:
d = time.gmtime(now)
year = d[0]
quarter = d[1] - (d[1] % 3)
if quarter <= 0:
quarter += 12
year -= 1
while now > ts:
pd = (
datetime.datetime(year, quarter, 1)
.replace(tzinfo=datetime.timezone.utc)
.timetuple()
)
date = time.strftime("%Y-%b-%d 0:00", pd)
unix = calendar.timegm(pd)
# Skip the dates we've already processed
dhash = hashlib.sha224(
(source["sourceID"] + date).encode("ascii", "replace")
).hexdigest()
found = kibble_bit.exists("evolution", dhash)
if not found:
checkout(gpath, date, branch)
kibble_bit.pprint(
"Running cloc on %s (%s) at %s"
% (gname, source["sourceURL"], date)
)
languages, codecount, comment, blank, years, cost = sloc.count(
gpath
)
js = {
"time": unix,
"sourceID": source["sourceID"],
"sourceURL": source["sourceURL"],
"organisation": source["organisation"],
"loc": codecount,
"comments": comment,
"blank": blank,
"years": years,
"cost": cost,
"languages": languages,
}
kibble_bit.index("evolution", dhash, js)
quarter -= 3
if quarter <= 0:
quarter += 12
year -= 1
# decrease month by 3
now = time.mktime(datetime.date(year, quarter, 1).timetuple())
except Exception as e:
kibble_bit.pprint(e)
release(
kibble_bit,
source,
"Evolution scan failed at "
+ time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()),
str(e),
)
return
release(
kibble_bit,
source,
"Evolution scan completed at "
+ time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()),
good=True,
)
| [
"[email protected]"
] | |
c031f3295b1ed90617b561c8f7640b752aad51fd | af53fb6bd0cd0ff70c68e43482b49420f0262764 | /odonto/odonto_submissions/supplier_testing/case_43.py | 84c48fea337719163487c20c990d24e7a60d00b3 | [] | no_license | gmolate/odonto | 34b41c18b972c7e10be46874a630c0016d6f7237 | f198608c41e9b991550a7929d28eb10002a3a664 | refs/heads/master | 2020-12-08T00:47:43.903738 | 2019-04-30T15:19:18 | 2019-04-30T15:19:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,711 | py | import datetime
from odonto.odonto_submissions.serializers import translate_to_bdcs1
from fp17 import treatments, exemptions
def annotate(bcds1):
bcds1.patient.surname = "CARTWRIGHT"
bcds1.patient.forename = "TOM"
bcds1.patient.address = ["40 HIGH STREET"]
bcds1.patient.sex = 'M'
bcds1.patient.date_of_birth = datetime.date(1978, 12, 31)
bcds1.date_of_acceptance = datetime.date(2017, 4, 1)
bcds1.date_of_completion = datetime.date(2017, 4, 1)
# "Universal Credit"
bcds1.exemption_remission = {
'code': exemptions.UNIVERSAL_CREDIT.EVIDENCE_SEEN,
}
# Treatments: "Examination, Extraction 1"
bcds1.treatments = [
treatments.EXAMINATION,
treatments.EXTRACTION(1),
# 'Band 4'
treatments.TREATMENT_CATEGORY_URGENT,
]
return bcds1
def from_model(bcds1, patient, episode):
demographics = patient.demographics()
demographics.surname = "CARTWRIGHT"
demographics.first_name = "TOM"
demographics.house_number_or_name = "40"
demographics.street = "HIGH STREET"
demographics.sex = "Male"
demographics.date_of_birth = datetime.date(1978, 12, 31)
demographics.save()
episode.fp17exemptions_set.update(
universal_credit=True,
evidence_of_exception_or_remission_seen=True
)
episode.fp17clinicaldataset_set.update(
examination=True,
extractions=1
)
episode.fp17treatmentcategory_set.update(
urgent_treatment=True,
)
episode.fp17incompletetreatment_set.update(
date_of_acceptance=datetime.date(2017, 4, 1),
completion_or_last_visit=datetime.date(2017, 4, 1)
)
translate_to_bdcs1(bcds1, episode)
| [
"[email protected]"
] | |
56cc543721a5b79b5868f04319f7b73cc77938e1 | 313bb88c43d74995e7426f9482c6c8e670fdb63c | /08-exceptions/example3.py | 1d5bd8590e2c604e419ba991a4bc99737535992e | [] | no_license | martakedzior/python-course | 8e93fcea3e9e1cb51920cb1fcf3ffbb310d1d654 | 3af2296c2092023d91ef5ff3b4ef9ea27ec2f227 | refs/heads/main | 2023-05-06T07:26:58.452520 | 2021-05-26T16:50:26 | 2021-05-26T16:50:26 | 339,822,876 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67 | py | class CustomError(Exception):
pass
raise CustomError('hahaha') | [
"[email protected]"
] | |
58d3cfda83ea5046fc57e7c8de3e95fa26d4f198 | 555b9f764d9bca5232360979460bc35c2f5ad424 | /google/ads/google_ads/v2/proto/resources/ad_group_audience_view_pb2.py | 26b4ed14fc842a81e3edeec29f2158892b497c43 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | juanmacugat/google-ads-python | b50256163782bc0223bcd8b29f789d74f4cfad05 | 0fc8a7dbf31d9e8e2a4364df93bec5f6b7edd50a | refs/heads/master | 2021-02-18T17:00:22.067673 | 2020-03-05T16:13:57 | 2020-03-05T16:13:57 | 245,215,877 | 1 | 0 | Apache-2.0 | 2020-03-05T16:39:34 | 2020-03-05T16:39:33 | null | UTF-8 | Python | false | true | 3,716 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v2/proto/resources/ad_group_audience_view.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v2/proto/resources/ad_group_audience_view.proto',
package='google.ads.googleads.v2.resources',
syntax='proto3',
serialized_options=_b('\n%com.google.ads.googleads.v2.resourcesB\030AdGroupAudienceViewProtoP\001ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v2/resources;resources\242\002\003GAA\252\002!Google.Ads.GoogleAds.V2.Resources\312\002!Google\\Ads\\GoogleAds\\V2\\Resources\352\002%Google::Ads::GoogleAds::V2::Resources'),
serialized_pb=_b('\nDgoogle/ads/googleads_v2/proto/resources/ad_group_audience_view.proto\x12!google.ads.googleads.v2.resources\x1a\x1cgoogle/api/annotations.proto\",\n\x13\x41\x64GroupAudienceView\x12\x15\n\rresource_name\x18\x01 \x01(\tB\x85\x02\n%com.google.ads.googleads.v2.resourcesB\x18\x41\x64GroupAudienceViewProtoP\x01ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v2/resources;resources\xa2\x02\x03GAA\xaa\x02!Google.Ads.GoogleAds.V2.Resources\xca\x02!Google\\Ads\\GoogleAds\\V2\\Resources\xea\x02%Google::Ads::GoogleAds::V2::Resourcesb\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_ADGROUPAUDIENCEVIEW = _descriptor.Descriptor(
name='AdGroupAudienceView',
full_name='google.ads.googleads.v2.resources.AdGroupAudienceView',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v2.resources.AdGroupAudienceView.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=137,
serialized_end=181,
)
DESCRIPTOR.message_types_by_name['AdGroupAudienceView'] = _ADGROUPAUDIENCEVIEW
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AdGroupAudienceView = _reflection.GeneratedProtocolMessageType('AdGroupAudienceView', (_message.Message,), dict(
DESCRIPTOR = _ADGROUPAUDIENCEVIEW,
__module__ = 'google.ads.googleads_v2.proto.resources.ad_group_audience_view_pb2'
,
__doc__ = """An ad group audience view. Includes performance data from interests and
remarketing lists for Display Network and YouTube Network ads, and
remarketing lists for search ads (RLSA), aggregated at the audience
level.
Attributes:
resource_name:
The resource name of the ad group audience view. Ad group
audience view resource names have the form: ``customers/{cust
omer_id}/adGroupAudienceViews/{ad_group_id}~{criterion_id}``
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v2.resources.AdGroupAudienceView)
))
_sym_db.RegisterMessage(AdGroupAudienceView)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
e5fc5f00fd14a45cd84e931f7688de9dc9f1f1d1 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /115_testing/examples/Github/_Level_2/unittest-master/python/csv_db.py | 786e3e036143a86b8c363cf013bd10f92db6061b | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 5,718 | py | # finalproject.py
# @author: Shubham Sachdeva
# @email:
# @date: 18-13-09
# reads data from input.csv
# For simplicity reads only ['GEO'], ['DGUID'], ['Food categories'], ['Commodity'] fields
# class Product - a class defining a record
# def read_csv - function that reads data from given file
import csv
import _mysql
import bisect
CONST_AUTHOR = "Shubham Sachdeva"
# Uses mysql database connection.
# Class Database simply wraps basic CRUD operations.
# @author: Shubham Sachdeva
class Database:
# Establishing a mysql connection
def __init__(self):
self.db = _mysql.connect("localhost", "root", "root", "student")
self._tablename = ""
# insert a record
def create(self, product):
query = ("INSERT INTO %s (geo, guid, category, commodity) VALUES('%s', '%s', '%s', '%s')" %
(self._tablename, product.geo, product.guid, product.category, product.commodity))
self.db.query(query)
# update a record based on id
def update(self, id, product):
query = ("UPDATE %s SET geo='%s', guid='%s', category='%s', commodity='%s' WHERE id=%d" %
(self._tablename, product.geo, product.guid, product.category, product.commodity, product.id))
self.db.query(query)
# get a record based on id
def read(self, id):
query = "SELECT * FROM %s WHERE id=%d" % (self._tablename, id)
self.db.query(query)
r = self.db.store_result()
product = Product()
for i in r.fetch_row(maxrows=1):
product.id = int(i[0])
product.geo = i[1]
product.guid = i[2]
product.category = i[3]
product.commodity = i[4]
return product
# delete a record based on id
def delete(self, id):
self.db.query("""DELETE FROM %s WHERE id=%d""" % (self._tablename, id))
# create table if it doesn't exist
def select_table(self, tablename):
self.db.query(
"CREATE TABLE IF NOT EXISTS " + tablename + " (`id` INT NOT NULL AUTO_INCREMENT , "
"`geo` VARCHAR(30) NOT NULL , "
"`guid` VARCHAR(30) NOT NULL , "
"`category` VARCHAR(100) NOT NULL , "
"`commodity` VARCHAR(100) NOT NULL , "
"PRIMARY KEY (`id`)) ENGINE = InnoDB;")
self._tablename = tablename
# custom sort function
# sort by guid
# @author: Shubham Sachdeva
def cmpFn(obj):
return obj.guid
# Class List - Custom list using standard list API library.
# Member function find and reverse_find returns index of given element.
# While find returns leftmost position, reverse_find returns rightmost position.
# This assumes that the list is sorted.
# @author: Shubham Sachdeva
class List:
def __init__(self):
self.lst = []
self.lstguid = []
def append(self, obj):
self.lst.append(obj)
def sort(self):
self.lst = sorted(self.lst, key=cmpFn)
self.lstguid = [obj.guid for obj in self.lst ]
def find(self, guid):
return bisect.bisect_left(self.lstguid, guid)
def reverse_find(self, guid):
return bisect.bisect_right(self.lstguid, guid)
# list iterator
# ListIterator simply operates on a list of primitive types.
# @author: Shubham Sachdeva
class ListIterator:
def __init__(self, lst):
self.lst = lst
self.cur = 0
def get(self):
if self.cur >=0 and self.cur < len(self.lst):
return self.lst[self.cur]
else:
return None
def next(self):
if self.cur < len(self.lst) -1:
self.cur += 1
return True
else:
return False
def prev(self):
if self.cur > 0:
self.cur -= 1
return True
else:
return False
def info(self):
return str(self.get())
# inherited from ListIterator
# Member function info has been overriden.
# @author: Shubham Sachdeva
class ObjectListIterator(ListIterator):
def info(self):
obj = self.get()
if obj == None:
return "None"
return "Current Object: " + ("%d\t%s\t%s\t%s\t%s" % (self.id, self.geo, self.guid, self.category, self.commodity))
# @author: Shubham Sachdeva
class Product:
# initialisation
def __init__(self, geo, guid, category, commodity):
self.id = 0
self.geo = geo
self.guid = guid
self.category = category
self.commodity = commodity
# for print
def __str__(self):
return ("%d\t%s\t%s\t%s\t%s" % (self.id, self.geo, self.guid, self.category, self.commodity))
# reads 4 fields from given file
# @author: Shubham Sachdeva
def read_csv(file_name):
lst = []
try:
with open(file_name, newline='', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
product = Product(row['GEO'], row['DGUID'], row['Food categories'], row['Commodity'])
print (product)
lst.append(product)
except:
print ('read_csv failed')
return lst
# @author: Shubham Sachdeva
def main():
lst = read_csv('input.csv')
n = len(lst)
db = Database()
db.select_table('products')
for item in lst:
db.create(item)
print ("Created " + str(len(lst)) + " items");
print("Programmed by " + CONST_AUTHOR)
if __name__ == '__main__':
print (CONST_AUTHOR)
main()
| [
"[email protected]"
] | |
b5c5f8e3ab90157f0a3222bf826041a3ef6bcb5b | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /2f9vjBiynkBtF3TBi_5.py | 2de62cfd6805420701dc4149649d92594859e806 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,072 | py | """
In this challenge, you must verify the equality of two different values given
the parameters `a` and `b`.
Both the _value_ and _type_ of the parameters need to be equal. The possible
types of the given parameters are:
* Numbers
* Strings
* Booleans (`False` or `True`)
* Special values: `None`
What have you learned so far that will permit you to do two different checks
(value **and** type) with a single statement?
Implement a function that returns `True` if the parameters are equal, and
`False` if they are not.
### Examples
check_equality(1, true) ➞ False
# A number and a boolean: the value and type are different.
check_equality(0, "0") ➞ False
# A number and a string: the type is different.
check_equality(1, 1) ➞ True
# A number and a number: the type and value are equal.
### Notes
* If you get stuck on a challenge, find help in the **Resources** tab.
* If you're _really_ stuck, unlock solutions in the **Solutions** tab.
"""
def check_equality(a, b):
return True if a is b else False
| [
"[email protected]"
] | |
e28e74228f1af21ae745a066e94997e5017c48a7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03060/s273862278.py | 42e46ce98c38ed062b3f6706f319584553664cc6 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | import sys
import math
import bisect
def main():
n = int(input())
A = list(map(int, input().split()))
B = list(map(int, input().split()))
for i in range(n):
A[i] -= B[i]
ans = 0
for a in A:
if a > 0:
ans += a
print(ans)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
79d2d7e1e1faba7d6c94883f29e01293b580434f | 00d7824d2699fc7a90de167e04ff49a210458f2c | /tests/base/datasets.py | fd9e0b5672f766bd4ea76762fd4259aa91bdc98d | [
"Apache-2.0",
"LicenseRef-scancode-proprietary-license"
] | permissive | jtamir/pytorch-lightning | 867feab3062ed2e3357b640588220efde349f97b | 9b89a24b04dff50c0595c5399e9ba61b39745def | refs/heads/master | 2021-07-10T19:40:53.410989 | 2020-11-04T05:59:16 | 2020-11-04T06:00:28 | 213,468,663 | 1 | 0 | Apache-2.0 | 2019-10-07T19:28:07 | 2019-10-07T19:28:06 | null | UTF-8 | Python | false | false | 8,570 | py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import random
import time
import urllib.request
from typing import Tuple, Optional, Sequence
import torch
from torch import Tensor
from torch.utils.data import Dataset
from tests import PACKAGE_ROOT
#: local path to test datasets
PATH_DATASETS = os.path.join(PACKAGE_ROOT, 'Datasets')
class MNIST(Dataset):
"""
Customized `MNIST <http://yann.lecun.com/exdb/mnist/>`_ dataset for testing Pytorch Lightning
without the torchvision dependency.
Part of the code was copied from
https://github.com/pytorch/vision/blob/build/v0.5.0/torchvision/datasets/mnist.py
Args:
root: Root directory of dataset where ``MNIST/processed/training.pt``
and ``MNIST/processed/test.pt`` exist.
train: If ``True``, creates dataset from ``training.pt``,
otherwise from ``test.pt``.
normalize: mean and std deviation of the MNIST dataset.
download: If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
Examples:
>>> dataset = MNIST(download=True)
>>> len(dataset)
60000
>>> torch.bincount(dataset.targets)
tensor([5923, 6742, 5958, 6131, 5842, 5421, 5918, 6265, 5851, 5949])
"""
RESOURCES = (
"https://pl-public-data.s3.amazonaws.com/MNIST/processed/training.pt",
"https://pl-public-data.s3.amazonaws.com/MNIST/processed/test.pt",
)
TRAIN_FILE_NAME = 'training.pt'
TEST_FILE_NAME = 'test.pt'
cache_folder_name = 'complete'
def __init__(self, root: str = PATH_DATASETS, train: bool = True,
normalize: tuple = (0.5, 1.0), download: bool = True):
super().__init__()
self.root = root
self.train = train # training set or test set
self.normalize = normalize
self.prepare_data(download)
if not self._check_exists(self.cached_folder_path):
raise RuntimeError('Dataset not found.')
data_file = self.TRAIN_FILE_NAME if self.train else self.TEST_FILE_NAME
self.data, self.targets = _try_load(os.path.join(self.cached_folder_path, data_file))
def __getitem__(self, idx: int) -> Tuple[Tensor, int]:
img = self.data[idx].float().unsqueeze(0)
target = int(self.targets[idx])
if self.normalize is not None:
img = normalize_tensor(img, mean=self.normalize[0], std=self.normalize[1])
return img, target
def __len__(self) -> int:
return len(self.data)
@property
def cached_folder_path(self) -> str:
return os.path.join(self.root, 'MNIST', self.cache_folder_name)
def _check_exists(self, data_folder: str) -> bool:
existing = True
for fname in (self.TRAIN_FILE_NAME, self.TEST_FILE_NAME):
existing = existing and os.path.isfile(os.path.join(data_folder, fname))
return existing
def prepare_data(self, download: bool):
if download:
self._download(self.cached_folder_path)
def _download(self, data_folder: str) -> None:
"""Download the MNIST data if it doesn't exist in cached_folder_path already."""
if self._check_exists(data_folder):
return
os.makedirs(data_folder, exist_ok=True)
for url in self.RESOURCES:
logging.info(f'Downloading {url}')
fpath = os.path.join(data_folder, os.path.basename(url))
urllib.request.urlretrieve(url, fpath)
def _try_load(path_data, trials: int = 30, delta: float = 1.):
"""Resolving loading from the same time from multiple concurrentprocesses."""
res, exp = None, None
assert trials, "at least some trial has to be set"
assert os.path.isfile(path_data), 'missing file: %s' % path_data
for _ in range(trials):
try:
res = torch.load(path_data)
except Exception as ex:
exp = ex
time.sleep(delta * random.random())
else:
break
else:
# raise the caught exception if any
if exp:
raise exp
return res
def normalize_tensor(tensor: Tensor, mean: float = 0.0, std: float = 1.0) -> Tensor:
tensor = tensor.clone()
mean = torch.as_tensor(mean, dtype=tensor.dtype, device=tensor.device)
std = torch.as_tensor(std, dtype=tensor.dtype, device=tensor.device)
tensor.sub_(mean).div_(std)
return tensor
class TrialMNIST(MNIST):
"""Constrain image dataset
Args:
root: Root directory of dataset where ``MNIST/processed/training.pt``
and ``MNIST/processed/test.pt`` exist.
train: If ``True``, creates dataset from ``training.pt``,
otherwise from ``test.pt``.
normalize: mean and std deviation of the MNIST dataset.
download: If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
num_samples: number of examples per selected class/digit
digits: list selected MNIST digits/classes
Examples:
>>> dataset = TrialMNIST(download=True)
>>> len(dataset)
300
>>> sorted(set([d.item() for d in dataset.targets]))
[0, 1, 2]
>>> torch.bincount(dataset.targets)
tensor([100, 100, 100])
"""
def __init__(
self,
root: str = PATH_DATASETS,
train: bool = True,
normalize: tuple = (0.5, 1.0),
download: bool = False,
num_samples: int = 100,
digits: Optional[Sequence] = (0, 1, 2),
):
# number of examples per class
self.num_samples = num_samples
# take just a subset of MNIST dataset
self.digits = digits if digits else list(range(10))
self.cache_folder_name = 'digits-' + '-'.join(str(d) for d in sorted(self.digits)) \
+ f'_nb-{self.num_samples}'
super().__init__(
root,
train=train,
normalize=normalize,
download=download
)
@staticmethod
def _prepare_subset(full_data: torch.Tensor, full_targets: torch.Tensor,
num_samples: int, digits: Sequence):
classes = {d: 0 for d in digits}
indexes = []
for idx, target in enumerate(full_targets):
label = target.item()
if classes.get(label, float('inf')) >= num_samples:
continue
indexes.append(idx)
classes[label] += 1
if all(classes[k] >= num_samples for k in classes):
break
data = full_data[indexes]
targets = full_targets[indexes]
return data, targets
def prepare_data(self, download: bool) -> None:
if self._check_exists(self.cached_folder_path):
return
if download:
self._download(super().cached_folder_path)
for fname in (self.TRAIN_FILE_NAME, self.TEST_FILE_NAME):
path_fname = os.path.join(super().cached_folder_path, fname)
assert os.path.isfile(path_fname), 'Missing cached file: %s' % path_fname
data, targets = _try_load(path_fname)
data, targets = self._prepare_subset(data, targets, self.num_samples, self.digits)
torch.save((data, targets), os.path.join(self.cached_folder_path, fname))
class AverageDataset(Dataset):
def __init__(self, dataset_len=300, sequence_len=100):
self.dataset_len = dataset_len
self.sequence_len = sequence_len
self.input_seq = torch.randn(dataset_len, sequence_len, 10)
top, bottom = self.input_seq.chunk(2, -1)
self.output_seq = top + bottom.roll(shifts=1, dims=-1)
def __len__(self):
return self.dataset_len
def __getitem__(self, item):
return self.input_seq[item], self.output_seq[item]
| [
"[email protected]"
] | |
5ab24c6a8ec0f36df320431b89ea6470b8909a7e | f4b5721c6b3f5623e306d0aa9a95ec53461c1f89 | /backend/src/gloader/xml/xslt/AttributeValueTemplate.py | 7f4c982f79e53d298825f773d7843f57e306cd56 | [
"MIT"
] | permissive | citelab/gini5 | b53e306eb5dabf98e9a7ded3802cf2c646f32914 | d095076113c1e84c33f52ef46a3df1f8bc8ffa43 | refs/heads/uml-rename | 2022-12-10T15:58:49.578271 | 2021-12-09T23:58:01 | 2021-12-09T23:58:01 | 134,980,773 | 12 | 11 | MIT | 2022-12-08T05:20:58 | 2018-05-26T17:16:50 | Python | UTF-8 | Python | false | false | 3,437 | py | ########################################################################
#
# File Name: AttributeValueTemplate.py
#
#
"""
Implementation of AVTs from the XSLT Spec.
WWW: http://4suite.com/4XSLT e-mail: [email protected]
Copyright (c) 1999-2000 FourThought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
import re, string
from xml.xslt import XsltException, Error
from xml.xpath import XPathParser, Conversions
g_braceSplitPattern = re.compile(r'([\{\}])')
class AttributeValueTemplate:
def __init__(self, source,reparse = 1):
self.source = source
if reparse:
self._plainParts = []
self._parsedParts = []
self._parse()
def _parse(self):
parser = XPathParser.XPathParser()
curr_plain_part = ''
curr_template_part = ''
in_plain_part = 1
split_form = re.split(g_braceSplitPattern, self.source)
skip_flag = 0
for i in range(len(split_form)):
segment = split_form[i]
if skip_flag:
skip_flag = skip_flag - 1
continue
if segment in ['{', '}']:
#Here we are accounting for a possible blank segment in between
try:
next = split_form[i + 1] + split_form[i + 2]
except IndexError:
next = None
if next == segment:
if in_plain_part:
curr_plain_part = curr_plain_part + segment
else:
curr_template_part = curr_template_part + segment
skip_flag = 2
elif segment == '{':
if in_plain_part:
self._plainParts.append(curr_plain_part)
in_plain_part = 0
curr_plain_part = ''
else:
raise XsltException(Error.AVT_SYNTAX)
else:
if not in_plain_part:
parsed = parser.parseExpression(curr_template_part)
self._parsedParts.append(parsed)
in_plain_part = 1
curr_template_part = ''
else:
raise XsltException(Error.AVT_SYNTAX)
else:
if in_plain_part:
curr_plain_part = curr_plain_part + segment
else:
curr_template_part = curr_template_part + segment
if in_plain_part:
self._plainParts.append(curr_plain_part)
else:
raise XsltException(Error.AVT_SYNTAX)
def evaluate(self, context):
result = ''
expansions = map(
lambda x, c=context: Conversions.StringValue(x.evaluate(c)),
self._parsedParts
)
for i in range(len(self._parsedParts)):
result = result + self._plainParts[i] + expansions[i]
result = result + self._plainParts[-1]
return result
def __repr__(self):
return self.source
def __getinitargs__(self):
return (self.source, 0)
def __getstate__(self):
return (self._plainParts,self._parsedParts)
def __setstate__(self, state):
# Nothing to do
self._plainParts,self._parsedParts = state
| [
"[email protected]"
] | |
83fef1df13d09343fd01f3337ac2d6bbc7f03c8d | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2618/60895/291182.py | ab6450cf38238d5387e2704d4907b7d62fce72fb | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | t=int(input())
while t>0:
t=t-1
n=int(input())
s=input()
if s=='2 3 1' or s=='2 1 3':
print(1)
elif s=='4 3 1 2' or s=='2':
print(2)
else:
print(s) | [
"[email protected]"
] | |
db6ce37579d5c07f61e8b4a4197f106ecf688cc5 | 2293c76c3d18e2fcd44ded90bd40113d26285663 | /pyeccodes/defs/mars/grib_efhs_cd_def.py | 7454080db49fb1b562273035d3aeca6265c63663 | [
"Apache-2.0"
] | permissive | ecmwf/pyeccodes | b1f121dbddf68d176a03805ed5144ba0b37ac211 | dce2c72d3adcc0cb801731366be53327ce13a00b | refs/heads/master | 2022-04-23T10:37:40.524078 | 2020-04-18T06:30:29 | 2020-04-18T06:30:29 | 255,554,540 | 9 | 3 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | import pyeccodes.accessors as _
def load(h):
h.alias('mars.step', 'stepRange')
h.alias('mars.quantile', 'quantile')
| [
"[email protected]"
] | |
3b8f140a37c4d7ec791530c2bab613446afc7ba6 | 8015f1c62a2cb4efd21aa8938336913bf8117868 | /bamap/ba2004.pngMap.py | 606da98a32c0cbc92bb93493e35a6e3aab1839af | [] | no_license | GamerNoTitle/Beepers-and-OLED | 675b5e3c179df0f0e27b42bf594c43860d03b9af | afe1340e5394ae96bda5f9022a8a66824368091e | refs/heads/master | 2020-04-20T00:09:47.122471 | 2019-04-29T04:59:35 | 2019-04-29T04:59:35 | 168,515,579 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 8,468 | py | ba2004.pngMap = [
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111',
'00000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000001111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000011111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000001111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000111111111111111',
'00100000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000001111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000101111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000011111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000001111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000011111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000011111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000011111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000011111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001011111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001100000000000000001111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000001100000000000000001111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111',
'00000000000000000000000000000000000000000000000000000000010000000000000001000000000000000000000000000100000000000000001111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111111',
'00000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000011111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000010000001111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000001111111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000001111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111000000000000000000000000000000000001111111111111111111111',
]
| [
"[email protected]"
] | |
86f816fa4c07689b4bbb27949c7e824974c6af10 | c46754b9600a12df4f9d7a6320dfc19aa96b1e1d | /tests/models/deit/test_image_processing_deit.py | 21dc3d9e95a79f48a9c4a6af5658a0715ce5faf6 | [
"Apache-2.0"
] | permissive | huggingface/transformers | ccd52a0d7c59e5f13205f32fd96f55743ebc8814 | 4fa0aff21ee083d0197a898cdf17ff476fae2ac3 | refs/heads/main | 2023-09-05T19:47:38.981127 | 2023-09-05T19:21:33 | 2023-09-05T19:21:33 | 155,220,641 | 102,193 | 22,284 | Apache-2.0 | 2023-09-14T20:44:49 | 2018-10-29T13:56:00 | Python | UTF-8 | Python | false | false | 4,508 | py | # coding=utf-8
# Copyright 2021 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
if is_vision_available():
from transformers import DeiTImageProcessor
class DeiTImageProcessingTester(unittest.TestCase):
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_center_crop=True,
crop_size=None,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
size = size if size is not None else {"height": 20, "width": 20}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
def expected_output_image_shape(self, images):
return self.num_channels, self.crop_size["height"], self.crop_size["width"]
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
class DeiTImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = DeiTImageProcessor if is_vision_available() else None
test_cast_dtype = True
def setUp(self):
self.image_processor_tester = DeiTImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "do_center_crop"))
self.assertTrue(hasattr(image_processing, "center_crop"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
def test_image_processor_from_dict_with_kwargs(self):
image_processor = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"height": 20, "width": 20})
self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18})
image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84)
self.assertEqual(image_processor.size, {"height": 42, "width": 42})
self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84})
| [
"[email protected]"
] | |
c5afaa2e84fa29e5ab2ebdf6d8bad5d14b00c86e | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/artificial/transf_Quantization/trend_ConstantTrend/cycle_12/ar_/test_artificial_1024_Quantization_ConstantTrend_12__0.py | 8099a2bf0f032e0aa56a37bec74624f179bb330f | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 272 | py | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 12, transform = "Quantization", sigma = 0.0, exog_count = 0, ar_order = 0); | [
"[email protected]"
] | |
08329b9459b84578dea46f87b58ec8643041c8b8 | 584f7b51d7cd529448e2fc0147557e26931ab17e | /test_UsePyFFTW.py | 5b4eb17729decdd1676234fbf4fc635aba9dee8e | [
"BSD-3-Clause"
] | permissive | opticspy/lightpipes | 8ca0d2221a1b893de5e51fec9061e90b9145f5f8 | f4ffdedb3ab2f9b5ae5a9a8e37985d2a7f8bb2ef | refs/heads/master | 2023-09-04T19:07:11.376631 | 2023-09-04T15:24:55 | 2023-09-04T15:24:55 | 80,127,706 | 191 | 55 | BSD-3-Clause | 2023-08-23T00:45:33 | 2017-01-26T15:39:28 | Python | UTF-8 | Python | false | false | 460 | py | #! /usr/bin/env python
"""
Script to test the new usePyFFTW option to compare pyFFTW and numpy FFT
"""
import time
from LightPipes import *
start_time = time.time()
wavelength = 500*nm
size = 25*mm
N = 1000
F=Begin(size, wavelength, N)
F=Fresnel(F, 100, usepyFFTW = True)
print(F.field[23,33])
#Fresnel: (1.0795142552372512+0.45098289321969964j)
#Forvard: (0.9865686238070652+0.16334733092228165j)
print("--- %s seconds ---" % (time.time() - start_time))
| [
"[email protected]"
] | |
3ac69e9105cdc2bfb5dd22f1c4bf0bb8a2ca87c4 | 3dc647cd07a7361ed401e40d2b7cce8c826c8f6c | /Lib/test/test_json/test_dump.py | 13b40020781bae33ea47c8ff5446030e7f348677 | [
"Python-2.0",
"CC-BY-4.0",
"MIT"
] | permissive | RustPython/RustPython | 5ddce4a9848b9de8c041ffd2634f83c0105d3f39 | b864e5da1f18897fc884180b7093df5aa170024f | refs/heads/main | 2023-09-04T12:38:29.458699 | 2023-09-03T12:33:42 | 2023-09-03T12:33:42 | 135,201,145 | 15,815 | 1,302 | MIT | 2023-09-14T08:11:45 | 2018-05-28T19:27:01 | Rust | UTF-8 | Python | false | false | 2,409 | py | from io import StringIO
from test.test_json import PyTest, CTest
from test.support import bigmemtest, _1G
class TestDump:
def test_dump(self):
sio = StringIO()
self.json.dump({}, sio)
self.assertEqual(sio.getvalue(), '{}')
def test_dumps(self):
self.assertEqual(self.dumps({}), '{}')
def test_dump_skipkeys(self):
v = {b'invalid_key': False, 'valid_key': True}
with self.assertRaises(TypeError):
self.json.dumps(v)
s = self.json.dumps(v, skipkeys=True)
o = self.json.loads(s)
self.assertIn('valid_key', o)
self.assertNotIn(b'invalid_key', o)
def test_encode_truefalse(self):
self.assertEqual(self.dumps(
{True: False, False: True}, sort_keys=True),
'{"false": true, "true": false}')
self.assertEqual(self.dumps(
{2: 3.0, 4.0: 5, False: 1, 6: True}, sort_keys=True),
'{"false": 1, "2": 3.0, "4.0": 5, "6": true}')
# Issue 16228: Crash on encoding resized list
def test_encode_mutated(self):
a = [object()] * 10
def crasher(obj):
del a[-1]
self.assertEqual(self.dumps(a, default=crasher),
'[null, null, null, null, null]')
# Issue 24094
def test_encode_evil_dict(self):
class D(dict):
def keys(self):
return L
class X:
def __hash__(self):
del L[0]
return 1337
def __lt__(self, o):
return 0
L = [X() for i in range(1122)]
d = D()
d[1337] = "true.dat"
self.assertEqual(self.dumps(d, sort_keys=True), '{"1337": "true.dat"}')
class TestPyDump(TestDump, PyTest): pass
class TestCDump(TestDump, CTest):
# The size requirement here is hopefully over-estimated (actual
# memory consumption depending on implementation details, and also
# system memory management, since this may allocate a lot of
# small objects).
@bigmemtest(size=_1G, memuse=1)
def test_large_list(self, size):
N = int(30 * 1024 * 1024 * (size / _1G))
l = [1] * N
encoded = self.dumps(l)
self.assertEqual(len(encoded), N * 3)
self.assertEqual(encoded[:1], "[")
self.assertEqual(encoded[-2:], "1]")
self.assertEqual(encoded[1:-2], "1, " * (N - 1))
| [
"[email protected]"
] | |
d543afbd88b02247daaffc3464471ccbfa5b366a | 03969015ab882f4751dc0e91beeda1212babca48 | /robot_code/Nimbus_ws/build/robotiq_85_gripper_actions/catkin_generated/pkg.develspace.context.pc.py | 5deddba43de547be76a27e50e515649c31ddd7ff | [] | no_license | lnairGT/Thesis_code | f3ad57f4344691227dcd128a741eb9c0e937738e | 6f5dbfc2510272f294a0e9bb4273beceeacbff2a | refs/heads/master | 2023-03-17T21:43:56.320553 | 2020-09-26T16:05:31 | 2020-09-26T16:05:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "robotiq_85_gripper_actions"
PROJECT_SPACE_DIR = "/home/lnair3/Nimbus_ws/devel"
PROJECT_VERSION = "0.0.1"
| [
"[email protected]"
] | |
8da6c731d5e0553722f2e56ef3a7a028a86cce95 | 4ca8df3a127e9b15cbfecea6505928741f685a63 | /gongfei/month03/Django/onlybuy/OnlyBuy/goods/migrations/0002_goods_saller.py | d6b69407f107d03ed0eace38b76d59329ac825ea | [] | no_license | gongfei6644/gongfei | 2beb082c56197bc23ca20a6927ff6c10d8beaa83 | bfdd5e6a3a8d76ad1e43cf54df186b944cad29e4 | refs/heads/master | 2022-11-30T20:49:22.213040 | 2020-08-16T12:52:28 | 2020-08-16T12:52:28 | 286,283,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 648 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2019-06-19 14:35
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('goods', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='goods',
name='saller',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"1"
] | 1 |
6267d7aa1c8e47d9c979f168d10dee757731de26 | 6a08edd0e30d12eb89e8de486e2d2d0dddff74d7 | /run_experiments/general_utils/lightgbm_optimizer.py | 07f654e8ef7d6c22e4eed79240bb1347e38b469c | [] | no_license | jrzaurin/tabulardl-benchmark | 63b0fa2c046f9900a51b0223a884c475ac66b17f | ceb7b7f8bc90666b2d010fe570a77eb3ff2dde78 | refs/heads/master | 2023-05-29T11:29:30.371284 | 2021-06-12T16:32:20 | 2021-06-12T16:32:20 | 356,328,779 | 46 | 7 | null | 2021-06-10T16:44:51 | 2021-04-09T16:08:21 | Python | UTF-8 | Python | false | false | 6,539 | py | import warnings
from typing import Any, Dict, Optional
import lightgbm as lgb
import pandas as pd
from hyperopt import Trials, fmin, hp, space_eval, tpe
from lightgbm import Dataset as lgbDataset
from optuna.integration.lightgbm import LightGBMTunerCV
from sklearn.metrics import log_loss, mean_squared_error
warnings.filterwarnings("ignore")
class LGBOptimizerHyperopt(object):
def __init__(
self,
objective: str = "binary",
is_unbalance: bool = False,
verbose: bool = False,
num_class: Optional[int] = None,
):
self.objective = objective
if objective == "multiclass" and not num_class:
raise ValueError("num_class must be provided for multiclass problems")
self.num_class = num_class
self.is_unbalance = is_unbalance
self.verbose = verbose
self.early_stop_dict: Dict = {}
def optimize(
self,
dtrain: lgbDataset,
deval: lgbDataset,
maxevals: int = 200,
):
if self.objective == "regression":
self.best = lgb.LGBMRegressor().get_params()
else:
self.best = lgb.LGBMClassifier().get_params()
del (self.best["silent"], self.best["importance_type"])
param_space = self.hyperparameter_space()
objective = self.get_objective(dtrain, deval)
objective.i = 0
trials = Trials()
best = fmin(
fn=objective,
space=param_space,
algo=tpe.suggest,
max_evals=maxevals,
trials=trials,
verbose=self.verbose,
)
self.trials = trials
best = space_eval(param_space, trials.argmin)
best["n_estimators"] = int(best["n_estimators"])
best["num_leaves"] = int(best["num_leaves"])
best["min_child_samples"] = int(best["min_child_samples"])
best["verbose"] = -1
best["objective"] = self.objective
self.best.update(best)
def get_objective(self, dtrain: lgbDataset, deval: lgbDataset):
def objective(params: Dict[str, Any]) -> float:
# hyperopt casts as float
params["n_estimators"] = int(params["n_estimators"])
params["num_leaves"] = int(params["num_leaves"])
params["min_child_samples"] = int(params["min_child_samples"])
params["verbose"] = -1
params["seed"] = 1
params["feature_pre_filter"] = False
params["objective"] = self.objective
if self.objective != "regression":
params["is_unbalance"] = self.is_unbalance
if self.objective == "multiclass":
params["num_class"] = self.num_class
model = lgb.train(
params,
dtrain,
valid_sets=[deval],
early_stopping_rounds=50,
verbose_eval=False,
)
preds = model.predict(deval.data)
if self.objective != "regression":
score = log_loss(deval.label, preds)
elif self.objective == "regression":
score = mean_squared_error(deval.label, preds)
objective.i += 1 # type: ignore
return score
return objective
def hyperparameter_space(
self, param_space: Dict[str, Any] = None
) -> Dict[str, Any]:
space = {
"learning_rate": hp.uniform("learning_rate", 0.01, 0.3),
"n_estimators": hp.quniform("n_estimators", 100, 1000, 50),
"num_leaves": hp.quniform("num_leaves", 20, 200, 10),
"min_child_samples": hp.quniform("min_child_samples", 20, 100, 20),
"colsample_bytree": hp.uniform("colsample_bytree", 0.5, 1.0),
"reg_alpha": hp.choice(
"reg_alpha", [0.01, 0.05, 0.1, 0.2, 0.4, 1.0, 2.0, 4.0, 10.0]
),
"reg_lambda": hp.choice(
"reg_lambda", [0.01, 0.05, 0.1, 0.2, 0.4, 1.0, 2.0, 4.0, 10.0]
),
}
if param_space:
return param_space
else:
return space
class LGBOptimizerOptuna(object):
def __init__(
self,
objective: str = "binary",
is_unbalance: bool = False,
verbose: bool = False,
num_class: Optional[int] = None,
):
self.objective = objective
if objective == "multiclass" and not num_class:
raise ValueError("num_class must be provided for multiclass problems")
self.num_class = num_class
self.is_unbalance = is_unbalance
self.verbose = verbose
self.best: Dict[str, Any] = {} # Best hyper-parameters
def optimize(self, dtrain: lgbDataset, deval: lgbDataset):
# Define the base parameters
if self.objective == "binary":
params: Dict = {"objective": self.objective}
elif self.objective == "multiclass":
params: Dict = {"objective": self.objective, "metric": "multi_logloss"}
elif self.objective == "regression":
params: Dict = {"objective": self.objective, "metric": "rmse"}
if self.verbose:
params["verbosity"] = 1
else:
params["verbosity"] = -1
if self.objective != "regression":
params["is_unbalance"] = self.is_unbalance
if self.objective == "multiclass":
params["num_class"] = self.num_class
# Reformat the data for LightGBM cross validation method
train_set = lgb.Dataset(
data=pd.concat([dtrain.data, deval.data]).reset_index(drop=True),
label=pd.concat([dtrain.label, deval.label]).reset_index(drop=True),
categorical_feature=dtrain.categorical_feature,
free_raw_data=False,
)
train_index = range(len(dtrain.data))
valid_index = range(len(dtrain.data), len(train_set.data))
# Run the hyper-parameter tuning
self.tuner = LightGBMTunerCV(
params=params,
train_set=train_set,
folds=[(train_index, valid_index)],
verbose_eval=False,
num_boost_round=1000,
early_stopping_rounds=50,
)
self.tuner.run()
self.best = self.tuner.best_params
# since n_estimators is not among the params that Optuna optimizes we
# need to add it manually. We add a high value since it will be used
# with early_stopping_rounds
self.best["n_estimators"] = 1000 # type: ignore
| [
"[email protected]"
] | |
8368a60298be2826652c9b2392af1de2414977d0 | 36df29dbd2c79f41ee5e70a6b836303d0f0fe186 | /day1-15/day01/temperature.py | 682675e9cff305a0db4848e6ddfe9d9035042a27 | [] | no_license | roohom/Program_100Days | abbe20d5df4444adadc937f23f1e402fce3a8273 | 3fd87da8b8edaaeb9349f68db0b9b3cd0db9f159 | refs/heads/master | 2021-01-13T18:06:52.899517 | 2020-08-30T15:37:07 | 2020-08-30T15:37:07 | 242,451,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/2/24 14:40
# @Author : Roohom
# @Site :
# @File : temperature.py
# @Software: PyCharm
"""
将华氏温度转化为摄氏温度
"""
F = float(input("请输入华氏温度:"))
C = (F - 32) / 1.8
print('%.2f华氏度 = %.1f摄氏度' % (F, C)) | [
"[email protected]"
] | |
f2bb534fa1b683ba85fc3a83e9e250269fa4c85b | 7b1de4a2607e3125b719c499a05bf6e2d3ec532d | /exceptions/chaining_demo.py | 714dc5ff28adfb14c75345702632fc8819a3e118 | [] | no_license | ganqzz/sandbox_py | 61345ac7bddb09081e02decb78507daa3030c1e8 | cc9e1ecca2ca99f350a3e2c3f51bbdb5eabc60e1 | refs/heads/master | 2022-12-01T21:54:38.461718 | 2021-09-04T03:47:14 | 2021-09-04T03:47:14 | 125,375,767 | 0 | 1 | null | 2023-04-16T00:55:51 | 2018-03-15T14:00:47 | Python | UTF-8 | Python | false | false | 830 | py | def func():
raise ValueError('from func()')
# set __cause__ = None
def demo1():
try:
func()
except ValueError:
raise RuntimeError('from demo1()')
# set __cause__ = e
def demo2():
try:
func()
except ValueError as e:
raise RuntimeError('from demo2()') from e
# set __cause__ = None, and suppress chaining
def demo3():
try:
func()
except ValueError:
raise RuntimeError('from demo3()') from None
def run_demo(f):
print('---', f.__name__)
try:
f()
except Exception as e:
print(e)
print('__context__:', repr(e.__context__))
print('__cause__:', repr(e.__cause__))
print()
if __name__ == "__main__":
# demo1()
# demo2()
# demo3()
run_demo(demo1)
run_demo(demo2)
run_demo(demo3)
| [
"[email protected]"
] | |
a70df48b3d89a7e34053a6ef0765c32639ea0b8b | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/django-1.4/tests/regressiontests/localflavor/it/tests.py | cce0b49576318774ae32ed146df39c7063a0b7b5 | [] | no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | /home/action/.parts/packages/googleappengine/1.9.4/lib/django-1.4/tests/regressiontests/localflavor/it/tests.py | [
"[email protected]"
] | |
e9bed052d8dc90762bbb0cc2031106059fedb6e3 | dcd8a0a9ce04818487ba7d46a1ba07d18fb08b9f | /torch/quantization/_quantize_script.py | 5d77785cc7742a543324d7df64ba47cd81852158 | [
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | thomaswang525/pytorch | 284efc782fdc333e24892ac10b4d8963f812bd0b | 9e3605de98abb969124faff96e6e90e4f4014eb6 | refs/heads/master | 2021-05-18T08:30:09.190932 | 2020-03-30T02:46:19 | 2020-03-30T02:48:29 | 251,193,560 | 1 | 0 | NOASSERTION | 2020-03-30T03:38:57 | 2020-03-30T03:38:57 | null | UTF-8 | Python | false | false | 4,668 | py | from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from .qconfig import QConfig
from torch.jit._recursive import wrap_cpp_module
class ConvPackedParams(torch.nn.Module):
def __init__(self):
super(ConvPackedParams, self).__init__()
wq = torch._empty_affine_quantized([1, 1, 1, 1], scale=1.0, zero_point=0, dtype=torch.qint8)
self.stride = [1, 1]
self.padding = [0, 0]
self.dilation = [1, 1]
self.groups = 1
self.set_weight_bias(wq, None)
@torch.jit.export
def set_conv_params(self, stride, padding, dilation, groups):
# type: (List[int], List[int], List[int], int) -> None
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
@torch.jit.export
def set_weight_bias(self, weight, bias):
# type: (torch.Tensor, Optional[torch.Tensor]) -> None
self._packed_params = torch.ops.quantized.conv2d_prepack(weight, bias, self.stride,
self.padding, self.dilation, self.groups)
@torch.jit.export
def _weight_bias(self):
return torch.ops.quantized.conv2d_unpack(self._packed_params)
def forward(self, x):
return x
@torch.jit.export
def __getstate__(self):
qweight, bias = self._weight_bias()
return (qweight,
bias,
self.stride,
self.padding,
self.dilation,
self.groups,
self.training)
@torch.jit.export
def __setstate__(self, state):
self.stride = state[2]
self.padding = state[3]
self.dilation = state[4]
self.groups = state[5]
self.set_weight_bias(state[0],
state[1])
self.training = state[6]
linear_packed_params = None
conv_packed_params = None
if 'fbgemm' in torch.backends.quantized.supported_engines:
linear_packed_params = torch.jit.script(torch.nn.quantized.modules.linear.LinearPackedParams())._c
conv_packed_params = torch.jit.script(ConvPackedParams())._c
def _check_is_script_module(model):
if not isinstance(model, torch.jit.ScriptModule):
raise ValueError('input must be a script module, got: ' + str(type(model)))
def script_qconfig(qconfig):
return QConfig(
activation=torch.jit.script(qconfig.activation())._c,
weight=torch.jit.script(qconfig.weight())._c)
def prepare_script(model, qconfig_dict, inplace=False):
_check_is_script_module(model)
scripted_qconfig_dict = {k: script_qconfig(v) if v else None for k, v in qconfig_dict.items()}
if not inplace:
model = model.copy()
model = wrap_cpp_module(torch._C._jit_pass_insert_observers(model._c,
'forward',
scripted_qconfig_dict,
False))
return model
def prepare_dynamic_script(model, qconfig_dict):
_check_is_script_module(model)
scripted_qconfig_dict = {k: script_qconfig(v) for k, v in qconfig_dict.items()}
model = wrap_cpp_module(torch._C._jit_pass_insert_observers(model._c,
'forward',
scripted_qconfig_dict,
False,
True))
return model
def convert_script(model, inplace=False, debug=False):
_check_is_script_module(model)
if not inplace:
model = model.copy()
model.eval()
model = wrap_cpp_module(torch._C._jit_pass_insert_quant_dequant(model._c, 'forward', False))
if not debug:
model = wrap_cpp_module(torch._C._jit_pass_quant_finalize(model._c))
return model
def quantize_script(model, qconfig_dict, run_fn, run_args, inplace=False, debug=False):
_check_is_script_module(model)
if not model._c._has_method('forward'):
raise ValueError('input script module does not have forward method')
assert not inplace, "We don't support inplace right now"
if not inplace:
model = model.copy()
torch._C._jit_pass_dedup_module_uses(model._c)
model = wrap_cpp_module(torch._C._jit_pass_fold_convbn(model._c))
model = prepare_script(model, qconfig_dict, True)
run_fn(model._c._get_method('forward'), *run_args)
model = convert_script(model, True, debug)
return model
| [
"[email protected]"
] | |
3a16940ab2a40e617ed92c2249c39f81f6e348a5 | b553e12ccd8d7d4653e8987688494e322602b146 | /scripts/process/hotfixes/hotfixes.py | c5ad7d2ee221c15e40f05ba30bc00eb6616c2370 | [
"MIT"
] | permissive | fossabot/forensicworkflows | 2a7339bc9e97f18e8a4f432e7a534f5318e1e8dc | fca4bcf5363163e6fdd78763fa4aa208c1f72d1f | refs/heads/master | 2022-04-14T21:36:26.770660 | 2020-04-13T15:24:58 | 2020-04-13T15:24:58 | 255,069,891 | 0 | 0 | null | 2020-04-12T11:41:41 | 2020-04-12T11:41:40 | null | UTF-8 | Python | false | false | 5,301 | py | #!/usr/bin/env python
# Copyright (c) 2019 Siemens AG
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Author(s): Demian Kellermann
"""
This plugin parses different registry entries for installed Hotfixes (patches) to the Windows system
as well as to other software components
"""
import logging
import re
import struct
from collections import defaultdict
from datetime import datetime
import forensicstore
from ...util import combined_conditions
LOGGER = logging.getLogger(__name__)
HOTFIX_PATHS_INSTALLER = [
'hkey_local_machine\\software\\microsoft\\windows\\currentversion\\component based servicing\\packages\\',
]
HOTFIX_PATHS_ADDITIONAL = [
'hkey_local_machine\\software\\wow6432node\\microsoft\\updates\\',
'hkey_local_machine\\software\\microsoft\\updates\\',
]
KB_REGEX = re.compile(r'KB\d+')
def _analyze_installer(obj):
entries = []
installer_entries = defaultdict(set)
hotfix_infos = {v["name"].lower(): v["data"] for v in obj["values"]}
if hotfix_infos.get('InstallClient') != 'WindowsUpdateAgent':
return []
hotfix = KB_REGEX.search(obj["key"].split('\\')[-1])
if not hotfix:
# some entries do not have the KB number in the title, but something like "RollupFix", check
# the InstallLocation value in this case
location = hotfix_infos.get('InstallLocation')
if location:
hotfix = KB_REGEX.search(location)
if not hotfix:
LOGGER.info("Non KB entry for WindowsUpdateAgent found: %s",
obj["key"])
return []
install_high = hotfix_infos.get('InstallTimeHigh')
install_low = hotfix_infos.get('InstallTimeLow')
if install_high and install_low:
timestamp = filetime_to_timestamp(
filetime_join(install_high, install_low))
else:
timestamp = ''
installer_entries[hotfix.group(0)].add(timestamp)
for hotfix in installer_entries:
entries.append({
'Hotfix':
hotfix,
'Installed':
sorted(installer_entries[hotfix])[0]
if installer_entries[hotfix] else '-',
'Source':
'Component Based Servicing',
"type":
"hotfix"
})
return entries
def _analyze_additional(key):
hotfix = key["key"].split('\\')[-1]
product = key["key"].split('\\')[-2]
return [{
'Hotfix': hotfix,
'Installed': key["modified"],
'Source': 'Microsoft Updates',
'Component': product,
"type": "hotfix"
}]
def transform(obj):
if any(map(lambda path: obj["key"].lower().startswith(path), HOTFIX_PATHS_INSTALLER)):
return _analyze_installer(obj)
if any(map(lambda path: obj["key"].lower().startswith(path), HOTFIX_PATHS_ADDITIONAL)):
return _analyze_additional(obj)
return []
def filetime_join(upper, lower):
"""
:param upper: upper part of the number
:param lower: lower part of the number
"""
return struct.unpack('Q', struct.pack('ii', lower, upper))[0]
def filetime_to_timestamp(filetime_64):
"""
The FILETIME timestamp is a 64-bit integer that contains the number
of 100th nano seconds since 1601-01-01 00:00:00.
The number is usually saved in the registry using two DWORD["values"]
:return: string of UTC time
"""
# pylint: disable=invalid-name
HUNDREDS_OF_NANOSECONDS_IN_A_SECOND = 10000000
UNIXEPOCH_AS_FILETIME = 116444736000000000
datetime_stamp = datetime.utcfromtimestamp(
(filetime_64 - UNIXEPOCH_AS_FILETIME) /
HUNDREDS_OF_NANOSECONDS_IN_A_SECOND)
return datetime_stamp.isoformat()
def main():
store = forensicstore.connect(".")
hklmsw = "HKEY_LOCAL_MACHINE\\SOFTWARE\\"
conditions = [{
'key':
hklmsw +
"Microsoft\\Windows\\CurrentVersion\\Component Based Servicing\\Packages\\%"
}, {
'key': hklmsw + "WOW6432Node\\Microsoft\\Updates\\%\\%"
}, {
'key': hklmsw + "Microsoft\\Updates\\%\\%"
}]
for item in store.select("windows-registry-key", combined_conditions(conditions)):
results = transform(item)
for result in results:
store.insert(result)
store.close()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
50cbc0a6b7378fde63f8deb76fd0bda5440b65e5 | 583d03a6337df9f1e28f4ef6208491cf5fb18136 | /dev4qx/madeira-stub/handlers/stub/niukou.py | 6b0e4e26edb01f71cb86b882a9492992f2eca35c | [] | no_license | lescpsn/lescpsn | ece4362a328f009931c9e4980f150d93c4916b32 | ef83523ea1618b7e543553edd480389741e54bc4 | refs/heads/master | 2020-04-03T14:02:06.590299 | 2018-11-01T03:00:17 | 2018-11-01T03:00:17 | 155,309,223 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,845 | py | import json
import logging
import tornado
import tornado.web
from tornado.httpclient import AsyncHTTPClient
from tornado.ioloop import IOLoop
request_log = logging.getLogger("madeira.request")
class NiukouOrderHandler(tornado.web.RequestHandler):
@tornado.gen.coroutine
def post(self):
try:
order_id = self.get_argument("OutTradeNo")
master_test = self.application.sentinel.master_for('madeira', db=3)
r2 = r1 = master_test.hget('result:' + order_id, 'result') # 根据redis判断订单状态 r2=r1='100,00;成功'
if ',' in r1:
r1, r2 = r1.split(',') # r1="100" r2="00;成功"
data = {"HEADER":{"SEQNO":"Q2015101209294910063131","SECERTKEY":"713B242546AA7239A572AE1E2103A777","APPID":"QuXun","TIMESTAMP":"20151012092949276","VERSION":"V1.0"},"MSGBODY":{"CONTENT":{"ORDERID":"144461347935975","EXTORDER":order_id},"RESP":{"RCODE":"00","RMSG":"OK"}}}
self.finish(json.dumps(data))
if r1 == '0':
IOLoop.current().call_later(10, niukou_callback, order_id, r2)
except Exception:
request_log.exception('FAIL')
def niukou_callback(order_id, result):
if ';' in result:
result = result.split(';')[0]
body = {"HEADER":{"VERSION":"V1.1","TIMESTAMP":'',"SEQNO":'',"APPID":"QuXun","SECERTKEY":"E4CF8702097BF3D3EFF03DF3ACFDEE5E"},"MSGBODY":{"CONTENT":{"ORDERID":"144461587745723","EXTORDER":order_id,"STATUS":"\u6210\u529f","CODE":"0"}}}
body = json.dumps(body)
url = 'http://localhost:8899/callback/niukou'
http_client = AsyncHTTPClient()
try:
request_log.info('YFLOW CALLBACK\n%s', body)
http_client.fetch(url, method='POST', body=body)
except Exception:
request_log.exception('FAIL')
finally:
http_client.close()
| [
"[email protected]"
] | |
b8ad77ebbc0f8d213a39e817e72baccde8bfd65f | 112f02c4be5176907766f7546de7d5d57a2df2af | /tutorial/tutorial_56.py | aea22de47ee4c3870ffbc5ddf5b27264f1cb2d8c | [] | no_license | ankitsingh03/code-python | 010efdcf157d5411f81b6fbfca74f8b36e3ea263 | 7fd33b9e7f269e3042bdb13a47a26a3da87a68bc | refs/heads/master | 2023-03-25T10:48:23.282822 | 2021-03-18T06:43:27 | 2021-03-18T06:43:27 | 289,693,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | name = input("enter your name : ")
i=0
temp = ""
while i<len(name):
if name[i] not in temp:
temp+=name[i]
print(f"{name[i]} : {name.count(name[i])}")
i+=1 | [
"[email protected]"
] | |
a4a415836a73c4b26dcef8193f52936e7df8c02a | f68710d7a8228805ab19430d72cefd6bbf1c4b91 | /src/routes/challenge.py | e1d954d670e6d9f4edf787dce1f4adc16e6579be | [] | no_license | quokkateam/quokka-api | 1aae2dd9694b09ff426fc8defcc8dd1d6536f016 | 081f22fe3bf81aee18cca05283384c4899923b88 | refs/heads/master | 2023-01-21T08:21:52.559310 | 2020-12-03T01:12:46 | 2020-12-03T01:12:46 | 100,311,727 | 0 | 0 | null | 2017-09-18T05:19:33 | 2017-08-14T21:42:08 | Python | UTF-8 | Python | false | false | 7,013 | py | from flask_restplus import Resource, fields
from src.routes import namespace, api
from src.helpers.user_helper import current_user
from src.helpers.prize_helper import format_prizes
from src.helpers.sponsor_helper import format_sponsors
from src.helpers.challenge_helper import format_challenges, current_week_num
from operator import attrgetter
from src.challenges import universal_challenge_info
from datetime import datetime, timedelta
from src import dbi, logger
from src.models import Challenge
from src.helpers.error_codes import CHALLENGE_NOT_EXIST, INVALID_CHALLENGE_ACCESS
update_challenge_section_model = api.model('Challenge', {
'id': fields.Integer(required=True),
'text': fields.String(required=True),
'points': fields.Integer(required=True)
})
# TODO: Validate JSON field types for 'suggestions' and 'challenges' below
# update_suggestions_model = api.model('Challenge', {
# 'id': fields.Integer(required=True),
# 'suggestions': fields.String(required=True)
# })
# update_challenges_model = api.model('Challenge', {
# 'challenges': fields.String(required=True),
# 'startDate': fields.String(required=True)
# })
@namespace.route('/challenge/<int:week_num>')
class GetChallenge(Resource):
"""Fetch data for a school's challenge page by week number"""
@namespace.doc('get_challenge')
def get(self, week_num):
user = current_user()
if not user:
return '', 403
school = user.school
week_index = week_num - 1
# Get challenges for school, sorted by date
challenges = sorted(school.active_challenges(), key=attrgetter('start_date'))
if week_num < 1 or week_num > len(challenges):
return {'error': 'Challenge does not exist', 'code': CHALLENGE_NOT_EXIST}, 400
curr_week_num = current_week_num(challenges)
# if this is a future week and the user isn't an admin, prevent access
if week_num > curr_week_num and not user.is_admin:
return {'error': 'Week not yet available to access', 'code': INVALID_CHALLENGE_ACCESS}, 400
# Find the challenge requested by week index
challenge = challenges[week_index]
if week_index == 0:
prev_habit = None
next_habit = {
'weekNum': 2,
'name': challenges[1].name
}
elif week_index == len(challenges) - 1:
prev_habit = {
'weekNum': week_index,
'name': challenges[week_index - 1].name
}
next_habit = None
else:
prev_habit = {
'weekNum': week_index,
'name': challenges[week_index - 1].name
}
next_habit = {
'weekNum': week_num + 1,
'name': challenges[week_num].name
}
# if this is the current week and the user isn't an admin, he/she shouldn't have a link to the next week yet
if week_num == curr_week_num and not user.is_admin:
next_habit = None
universal_challenge = universal_challenge_info.get(challenge.slug)
resp = {
'id': challenge.id,
'habit': {
'name': challenge.name,
'slug': challenge.slug,
'icon': universal_challenge['icon'],
'dates': {
'start': datetime.strftime(challenge.start_date, '%m/%d/%Y'),
'end': datetime.strftime(challenge.end_date, '%m/%d/%Y')
}
},
'overview': universal_challenge['overview'],
'challenge': {
'text': challenge.text,
'points': challenge.points
},
'prizes': format_prizes(challenge.active_prizes()),
'sponsors': format_sponsors(school.sponsors),
'suggestions': challenge.suggestions,
'adjHabits': {
'prev': prev_habit,
'next': next_habit
},
'links': universal_challenge['links'],
'extraInfo': universal_challenge['extra_info']
}
return resp
@namespace.route('/challenge/challenge')
class UpdateChallengeSection(Resource):
"""Save the text and points for a weekly challenge"""
@namespace.doc('update_challenge_section')
@namespace.expect(update_challenge_section_model, validate=True)
def put(self):
user = current_user()
if not user or not user.is_admin:
return '', 403
challenge = dbi.find_one(Challenge, {'id': api.payload['id']})
if not challenge:
logger.error('No challenge found for id: {}'.format(api.payload['id']))
return 'Challenge required to update text and points', 500
dbi.update(challenge, {
'text': api.payload['text'],
'points': api.payload['points'] or 0
})
return {'text': challenge.text, 'points': challenge.points}
@namespace.route('/challenge/suggestions')
class UpdateSuggestions(Resource):
"""Save the suggestions for a weekly challenge"""
@namespace.doc('update_suggestions')
# @namespace.expect(update_suggestions_model, validate=True)
def put(self):
user = current_user()
if not user or not user.is_admin:
return '', 403
challenge = dbi.find_one(Challenge, {'id': api.payload['id']})
if not challenge:
logger.error('No challenge found for id: {}'.format(api.payload['id']))
return 'Challenge required to update text and points', 500
dbi.update(challenge, {'suggestions': api.payload['suggestions']})
return {'suggestions': challenge.suggestions}
@namespace.route('/challenges')
class RestfulChallenges(Resource):
"""Fetch all challenges for a school"""
@namespace.doc('get_challenges')
def get(self):
user = current_user()
if not user:
return '', 403
# Get challenges for school, sorted by date
challenges = sorted(user.school.active_challenges(), key=attrgetter('start_date'))
curr_week_num = current_week_num(challenges)
challenges_data = format_challenges(challenges, user, curr_week_num=curr_week_num)
resp = {
'weekNum': curr_week_num,
'challenges': challenges_data
}
return resp
@namespace.doc('update_challenges')
# @namespace.expect(update_challenges_model, validate=True)
def put(self):
user = current_user()
if not user or not user.is_admin:
return '', 403
try:
start_date = datetime.strptime(api.payload['startDate'], '%m/%d/%y')
except:
return 'Invalid start date', 500
challenge_slugs = [c['slug'] for c in api.payload['challenges']]
school = user.school
challenges = dbi.find_all(Challenge, {
'school': user.school,
'slug': challenge_slugs
})
i = 0
for slug in challenge_slugs:
challenge = [c for c in challenges if c.slug == slug][0]
if i > 0:
start_date = start_date + timedelta(days=7)
end_date = start_date + timedelta(days=6)
dbi.update(challenge, {'start_date': start_date, 'end_date': end_date})
i += 1
challenges = sorted(school.active_challenges(), key=attrgetter('start_date'))
curr_week_num = current_week_num(challenges)
challenges_data = format_challenges(challenges, user, curr_week_num=curr_week_num)
resp = {
'weekNum': curr_week_num,
'challenges': challenges_data
}
return resp | [
"[email protected]"
] | |
f3287cdf45f3d65183544c35aca6db06772c239b | bd55c7d73a95caed5f47b0031264ec05fd6ff60a | /apps/nchat/migrations/0012_auto_20191113_1447.py | b7df57dbc71a1d5e13e95d92c30ea5bd1f8098ea | [] | no_license | phonehtetpaing/ebdjango | 3c8610e2d96318aff3b1db89480b2f298ad91b57 | 1b77d7662ec2bce9a6377690082a656c8e46608c | refs/heads/main | 2023-06-26T13:14:55.319687 | 2021-07-21T06:04:58 | 2021-07-21T06:04:58 | 381,564,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | # Generated by Django 2.0.5 on 2019-11-13 05:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nchat', '0011_enduser'),
]
operations = [
migrations.AddField(
model_name='enduser',
name='app_id',
field=models.CharField(default=1, max_length=256, verbose_name='app_id'),
preserve_default=False,
),
migrations.AddField(
model_name='enduser',
name='owner_id',
field=models.IntegerField(default=1, verbose_name='owner_id'),
preserve_default=False,
),
]
| [
"[email protected]"
] | |
4669336116ce7e560e82aa2f2fc0cf729f1a23d2 | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /workdocs_write_f/comment_delete.py | 79616abb93b0670f4aec69800235ff70fde5d896 | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html
if __name__ == '__main__':
"""
create-comment : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/workdocs/create-comment.html
describe-comments : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/workdocs/describe-comments.html
"""
write_parameter("workdocs", "delete-comment") | [
"[email protected]"
] | |
10b1131f1db5cefed204613e153ecc03d1a09ee3 | d47f5f59fc322aa2a82ea1c3a15f39b200dd95b2 | /bioinformatics_1/week_1/computing_frequencies.py | e8f5fabf0b6ca4a603575bdccc2ae3e7e537d4b5 | [] | no_license | nayanika2304/BioInformatics | baefb229e02397e06c634df44b82e72e9a235c77 | 977219bf4f3e4583b91df6308828d15bb1ad148d | refs/heads/master | 2023-01-01T05:24:58.401988 | 2020-10-20T12:52:30 | 2020-10-20T12:52:30 | 295,566,560 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,819 | py | def pattern_to_number(pattern):
if len(pattern) == 0:
return 0
symbol_to_number = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
n = len(pattern)
prefix = pattern[:n - 1]
symbol = pattern[n - 1]
return 4 * pattern_to_number(prefix) + symbol_to_number[symbol]
def computing_frequencies(text, k):
frequency_array = []
n = len(text)
for i in range(4 ** k):
frequency_array.append(0)
for i in range(n - k + 1):
pattern = text[i:i + k]
j = pattern_to_number(pattern)
frequency_array[j] = frequency_array[j] + 1
result = ""
for item in frequency_array:
result = result + " " + str(item)
return result
pattern="CGGCGTTGGAGTGGAAAA"
print(pattern_to_number(pattern))
#print(computing_frequencies(pattern,7))
#PatternToNumber(AGT) = 4 · PatternToNumber(AG) + SymbolToNumber(T) = 8 + 3 = 11
# where SymbolToNumber(symbol) is the function transforming symbols A, C, G, and T into the respective integers 0, 1, 2, and 3.
# patternToNumber = ATGCAA
# A=0 C=1 G=2 T=3
# 032100
# (4^5 *0=0)+(4^4 *3=768)+(4^3 *2=128)+(4^2 *1=16)+ (4^1 *0=0)+ (4^0 *0=0)=912
#numberToPattern
# To go backward from a base-anything number, you divide the final number (5437 in this case) by the base, 4, k = 7 times, keeping track of the remainder:
#
#
# 5437 / 4 = 1359 R 1
# 1359 / 4 = 339 R 3
# 339 / 4 = 84 R 3
# 84 / 4 = 21 R 0
# 21/4 = 5 R 1
# 5/4 = 1 R 1
# 1/4 = 0 R 1
# Take the remainders from the bottom up and you get:
#
# 1110331, corresponding lexicographically to CCCAGGC
#
# Similarly we can look at going backward from 912 (from previous question) to ATGCAA (k = 6) in the same way:
#
# 912/4 = 228 R 0
# 228/4 = 57 R 0
# 57/4 = 14 R 1
# 14/4 = 3 R 2
# 3/4 = 0 R 3
# 0/4 = 0 R 0
# Bottom up we get 032100 corresponding to ATGCAA.
| [
"[email protected]"
] | |
f95a4aa88f57289ef80b62ef84d6b9d5d9906074 | 050a01af15654c0708c2e747def7c33fe54cbe02 | /delivery_order/migrations/0001_initial.py | b9d564b5771452e38c9a53435e0538f295bc3d57 | [] | no_license | crowdbotics-apps/coddwebsite-17461 | 5d38d10294e5a9892028d11122174e9600790ac8 | eb9f22e52ec3c0c18fef55597c9e8aa3bf7cfe2d | refs/heads/master | 2023-05-13T13:28:47.125601 | 2020-05-27T17:32:07 | 2020-05-27T17:32:07 | 267,378,023 | 0 | 0 | null | 2021-06-10T09:23:01 | 2020-05-27T17:01:24 | Python | UTF-8 | Python | false | false | 2,609 | py | # Generated by Django 2.2.12 on 2020-05-27 17:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('menu', '0001_initial'),
('delivery_user_profile', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Bill',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('total_amount', models.FloatField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('contact_info', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='bill_contact_info', to='delivery_user_profile.ContactInfo')),
('profile', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='bill_profile', to='delivery_user_profile.Profile')),
],
),
migrations.CreateModel(
name='PaymentMethod',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('detail', models.TextField()),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField()),
('total_price', models.FloatField()),
('status', models.CharField(max_length=20)),
('notes', models.TextField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('bill', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='order_bill', to='delivery_order.Bill')),
('item_variant', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='order_item_variant', to='menu.ItemVariant')),
('payment_method', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='order_payment_method', to='delivery_order.PaymentMethod')),
('profile', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='order_profile', to='delivery_user_profile.Profile')),
],
),
]
| [
"[email protected]"
] | |
fe67af41766db65e264adb0e06c55f078b4eb952 | 4d8b9f5533671b15e124bfa025d672297384b434 | /tests/unit/common/test_ansible_common.py | 1ef8eee5f8bb756e57092dc2f2318b9a25b932be | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | Kryndex/yardstick | 6c37c4f752589c523be761e3980f7ca2c8fac798 | c191b305790b4528868725db82d1af6c0d95e22b | refs/heads/master | 2021-05-13T14:12:54.917653 | 2018-01-08T18:21:47 | 2018-01-08T18:21:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,700 | py | # Copyright (c) 2016-2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import tempfile
from collections import defaultdict
import mock
import unittest
from six.moves.configparser import ConfigParser
from six.moves import StringIO
from yardstick.common import ansible_common
PREFIX = 'yardstick.common.ansible_common'
class OverwriteDictTestCase(unittest.TestCase):
def test_overwrite_dict_cfg(self):
c = ConfigParser(allow_no_value=True)
d = {
"section_a": "empty_value",
"section_b": {"key_c": "Val_d", "key_d": "VAL_D"},
"section_c": ["key_c", "key_d"],
}
ansible_common.overwrite_dict_to_cfg(c, d)
# Python3 and Python2 convert empty values into None or ''
# we don't really care but we need to compare correctly for unittest
self.assertTrue(c.has_option("section_a", "empty_value"))
self.assertEqual(sorted(c.items("section_b")), [('key_c', 'Val_d'), ('key_d', 'VAL_D')])
self.assertTrue(c.has_option("section_c", "key_c"))
self.assertTrue(c.has_option("section_c", "key_d"))
class FilenameGeneratorTestCase(unittest.TestCase):
@mock.patch('{}.NamedTemporaryFile'.format(PREFIX))
def test__handle_existing_file(self, mock_tmp):
ansible_common.FileNameGenerator._handle_existing_file("/dev/null")
def test_get_generator_from_file(self):
ansible_common.FileNameGenerator.get_generator_from_filename("/dev/null", "", "", "")
def test_get_generator_from_file_middle(self):
ansible_common.FileNameGenerator.get_generator_from_filename("/dev/null", "", "",
"null")
def test_get_generator_from_file_prefix(self):
ansible_common.FileNameGenerator.get_generator_from_filename("/dev/null", "", "null",
"middle")
class AnsibleNodeTestCase(unittest.TestCase):
def test_ansible_node(self):
ansible_common.AnsibleNode()
def test_ansible_node_len(self):
a = ansible_common.AnsibleNode()
len(a)
def test_ansible_node_repr(self):
a = ansible_common.AnsibleNode()
repr(a)
def test_ansible_node_iter(self):
a = ansible_common.AnsibleNode()
for _ in a:
pass
def test_is_role(self):
a = ansible_common.AnsibleNode()
self.assertFalse(a.is_role("", default="foo"))
def test_ansible_node_get_tuple(self):
a = ansible_common.AnsibleNode({"name": "name"})
self.assertEqual(a.get_tuple(), ('name', a))
def test_gen_inventory_line(self):
a = ansible_common.AnsibleNode(defaultdict(str))
self.assertEqual(a.gen_inventory_line(), "")
def test_ansible_node_delitem(self):
a = ansible_common.AnsibleNode({"name": "name"})
del a['name']
def test_ansible_node_getattr(self):
a = ansible_common.AnsibleNode({"name": "name"})
self.assertEqual(getattr(a, "nosuch", None), None)
class AnsibleNodeDictTestCase(unittest.TestCase):
def test_ansible_node_dict(self):
n = ansible_common.AnsibleNode
ansible_common.AnsibleNodeDict(n, {})
def test_ansible_node_dict_len(self):
n = ansible_common.AnsibleNode
a = ansible_common.AnsibleNodeDict(n, {})
len(a)
def test_ansible_node_dict_repr(self):
n = ansible_common.AnsibleNode
a = ansible_common.AnsibleNodeDict(n, {})
repr(a)
def test_ansible_node_dict_iter(self):
n = ansible_common.AnsibleNode
a = ansible_common.AnsibleNodeDict(n, {})
for _ in a:
pass
def test_ansible_node_dict_get(self):
n = ansible_common.AnsibleNode
a = ansible_common.AnsibleNodeDict(n, {})
self.assertIsNone(a.get(""))
def test_gen_inventory_lines_for_all_of_type(self):
n = ansible_common.AnsibleNode
a = ansible_common.AnsibleNodeDict(n, {})
self.assertEqual(a.gen_inventory_lines_for_all_of_type(""), [])
def test_gen_inventory_lines(self):
n = ansible_common.AnsibleNode
a = ansible_common.AnsibleNodeDict(n, [{
"name": "name", "user": "user", "password": "PASS",
"role": "role",
}])
self.assertEqual(a.gen_all_inventory_lines(),
["name ansible_ssh_pass=PASS ansible_user=user"])
class AnsibleCommonTestCase(unittest.TestCase):
def test_get_timeouts(self):
self.assertAlmostEquals(ansible_common.AnsibleCommon.get_timeout(-100), 1200.0)
def test__init__(self):
ansible_common.AnsibleCommon({})
def test_reset(self):
a = ansible_common.AnsibleCommon({})
a.reset()
def test_do_install_no_dir(self):
a = ansible_common.AnsibleCommon({})
self.assertRaises(OSError, a.do_install, '', '')
def test_gen_inventory_dict(self):
nodes = [{
"name": "name", "user": "user", "password": "PASS",
"role": "role",
}]
a = ansible_common.AnsibleCommon(nodes)
a.gen_inventory_ini_dict()
self.assertEqual(a.inventory_dict, {
'nodes': ['name ansible_ssh_pass=PASS ansible_user=user'],
'role': ['name']
})
def test_deploy_dir(self):
a = ansible_common.AnsibleCommon({})
self.assertRaises(ValueError, getattr, a, "deploy_dir")
def test_deploy_dir_set(self):
a = ansible_common.AnsibleCommon({})
a.deploy_dir = ""
def test_deploy_dir_set_get(self):
a = ansible_common.AnsibleCommon({})
a.deploy_dir = "d"
self.assertEqual(a.deploy_dir, "d")
@mock.patch('{}.open'.format(PREFIX))
def test__gen_ansible_playbook_file_list(self, mock_open):
d = tempfile.mkdtemp()
try:
a = ansible_common.AnsibleCommon({})
a._gen_ansible_playbook_file(["a"], d)
finally:
os.rmdir(d)
@mock.patch('{}.NamedTemporaryFile'.format(PREFIX))
@mock.patch('{}.open'.format(PREFIX))
def test__gen_ansible_inventory_file(self, mock_open, mock_tmp):
nodes = [{
"name": "name", "user": "user", "password": "PASS",
"role": "role",
}]
d = tempfile.mkdtemp()
try:
a = ansible_common.AnsibleCommon(nodes)
a.gen_inventory_ini_dict()
inv_context = a._gen_ansible_inventory_file(d)
with inv_context:
c = StringIO()
inv_context.write_func(c)
self.assertIn("ansible_ssh_pass=PASS", c.getvalue())
finally:
os.rmdir(d)
@mock.patch('{}.NamedTemporaryFile'.format(PREFIX))
@mock.patch('{}.open'.format(PREFIX))
def test__gen_ansible_playbook_file_list_multiple(self, mock_open, mock_tmp):
d = tempfile.mkdtemp()
try:
a = ansible_common.AnsibleCommon({})
a._gen_ansible_playbook_file(["a", "b"], d)
finally:
os.rmdir(d)
@mock.patch('{}.NamedTemporaryFile'.format(PREFIX))
@mock.patch('{}.Popen'.format(PREFIX))
@mock.patch('{}.open'.format(PREFIX))
def test_do_install_tmp_dir(self, mock_open, mock_popen, mock_tmp):
mock_popen.return_value.communicate.return_value = "", ""
mock_popen.return_value.wait.return_value = 0
d = tempfile.mkdtemp()
try:
a = ansible_common.AnsibleCommon({})
a.do_install('', d)
finally:
os.rmdir(d)
@mock.patch('{}.NamedTemporaryFile'.format(PREFIX))
@mock.patch('{}.Popen'.format(PREFIX))
@mock.patch('{}.open'.format(PREFIX))
def test_execute_ansible_check(self, mock_open, mock_popen, mock_tmp):
mock_popen.return_value.communicate.return_value = "", ""
mock_popen.return_value.wait.return_value = 0
d = tempfile.mkdtemp()
try:
a = ansible_common.AnsibleCommon({})
a.execute_ansible('', d, ansible_check=True, verbose=True)
finally:
os.rmdir(d)
| [
"[email protected]"
] | |
fb9b679a11eb9c744907db626a9f6f8e52a5756a | b9db91bdb30ba99aad8bbea251e5e1e8c2a7fa45 | /opt/src/aoj/itp1/7_b.py | 839084e701a5b92b98f95369fb7f3d92fbcc2450 | [] | no_license | jacoloves/python_tool | 682c3a91b535f15f1f8c9299e9b4c9ccbd5eea79 | 93ba5de17a727d6ccf9c67e4bca37ea502d06e5d | refs/heads/master | 2021-03-01T04:25:49.581952 | 2021-01-27T13:52:50 | 2021-01-27T13:52:50 | 245,753,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | arr = []
while True:
n, x = map(int, input().split())
if n == 0 and x == 0:
break
arr.append([n, x])
for i in range(len(arr)):
n = arr[i][0]
x = arr[i][1]
num = 0
for j in range(1, n-1):
x2 = x-j
for k in range(j+1, n):
x3 = x2-k
for l in range(k+1, n+1):
x4 = x3-l
if x4 == 0:
num += 1
print(num)
| [
"[email protected]"
] | |
98e60f84759f1dabfe64292e06d96f5801a51c88 | ed60a26caa718cae99f97217e6664e5a23ce3d45 | /networkaccessmanager.py | f4b8737e2d2bd79571b271b5ee020b61bb1201e2 | [] | no_license | leonmvd/pdokservicesplugin | 47580e290c2ea686541c90e6c6c6a9bc9cd5d524 | 00ea86d49037e27dee7db443de932c0ca9168b81 | refs/heads/master | 2021-08-28T15:17:41.441931 | 2017-11-17T13:26:52 | 2017-11-17T13:26:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,313 | py | # -*- coding: utf-8 -*-
"""
***************************************************************************
An httplib2 replacement that uses QgsNetworkAccessManager
https://github.com/boundlessgeo/lib-qgis-commons/blob/master/qgiscommons2/network/networkaccessmanager.py
---------------------
Date : August 2016
Copyright : (C) 2016 Boundless, http://boundlessgeo.com
Email : apasotti at boundlessgeo dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
import sys
import os
sys.path.append(os.path.dirname(__file__))
from future import standard_library
standard_library.install_aliases()
#from builtins import str
#from builtins import object
__author__ = 'Alessandro Pasotti'
__date__ = 'August 2016'
import re
import urllib.request, urllib.error, urllib.parse
from PyQt4.QtCore import pyqtSlot, QUrl, QEventLoop, QTimer, QCoreApplication
from PyQt4.QtNetwork import QNetworkRequest, QNetworkReply
from qgis.core import QgsNetworkAccessManager, QgsMessageLog
# FIXME: ignored
DEFAULT_MAX_REDIRECTS = 4
class RequestsException(Exception):
pass
class RequestsExceptionTimeout(RequestsException):
pass
class RequestsExceptionConnectionError(RequestsException):
pass
class RequestsExceptionUserAbort(RequestsException):
pass
class Map(dict):
"""
Example:
m = Map({'first_name': 'Eduardo'}, last_name='Pool', age=24, sports=['Soccer'])
"""
def __init__(self, *args, **kwargs):
super(Map, self).__init__(*args, **kwargs)
for arg in args:
if isinstance(arg, dict):
for k, v in arg.items():
self[k] = v
if kwargs:
for k, v in kwargs.items():
self[k] = v
def __getattr__(self, attr):
return self.get(attr)
def __setattr__(self, key, value):
self.__setitem__(key, value)
def __setitem__(self, key, value):
super(Map, self).__setitem__(key, value)
self.__dict__.update({key: value})
def __delattr__(self, item):
self.__delitem__(item)
def __delitem__(self, key):
super(Map, self).__delitem__(key)
del self.__dict__[key]
class Response(Map):
pass
class NetworkAccessManager(object):
"""
This class mimicks httplib2 by using QgsNetworkAccessManager for all
network calls.
The return value is a tuple of (response, content), the first being and
instance of the Response class, the second being a string that contains
the response entity body.
Parameters
----------
debug : bool
verbose logging if True
exception_class : Exception
Custom exception class
Usage 1 (blocking mode)
-----
::
nam = NetworkAccessManager(authcgf)
try:
(response, content) = nam.request('http://www.example.com')
except RequestsException, e:
# Handle exception
pass
Usage 2 (Non blocking mode)
-------------------------
::
NOTE! if blocking mode returns immediatly
it's up to the caller to manage listeners in case
of non blocking mode
nam = NetworkAccessManager(authcgf)
try:
nam.request('http://www.example.com', blocking=False)
nam.reply.finished.connect(a_signal_listener)
except RequestsException, e:
# Handle exception
pass
Get response using method:
nam.httpResult() that return a dictionary with keys:
'status' - http code result come from reply.attribute(QNetworkRequest.HttpStatusCodeAttribute)
'status_code' - http code result come from reply.attribute(QNetworkRequest.HttpStatusCodeAttribute)
'status_message' - reply message string from reply.attribute(QNetworkRequest.HttpReasonPhraseAttribute)
'content' - bytearray returned from reply
'ok' - request success [True, False]
'headers' - Dicionary containing the reply header
'reason' - fomatted message string with reply.errorString()
'exception' - the exception returne dduring execution
"""
def __init__(self, authid=None, disable_ssl_certificate_validation=False, exception_class=None, debug=False):
self.disable_ssl_certificate_validation = disable_ssl_certificate_validation
self.authid = authid
self.reply = None
self.debug = debug
self.exception_class = exception_class
self.on_abort = False
self.blocking_mode = False
self.http_call_result = Response({
'status': 0,
'status_code': 0,
'status_message': '',
'content' : '',
'ok': False,
'headers': {},
'reason': '',
'exception': None,
})
def msg_log(self, msg):
if self.debug:
QgsMessageLog.logMessage(msg, "NetworkAccessManager")
def httpResult(self):
return self.http_call_result
def request(self, url, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None, blocking=True):
"""
Make a network request by calling QgsNetworkAccessManager.
redirections argument is ignored and is here only for httplib2 compatibility.
"""
self.msg_log(u'http_call request: {0}'.format(url))
self.blocking_mode = blocking
req = QNetworkRequest()
# Avoid double quoting form QUrl
url = urllib.parse.unquote(url)
req.setUrl(QUrl(url))
if headers is not None:
# This fixes a wierd error with compressed content not being correctly
# inflated.
# If you set the header on the QNetworkRequest you are basically telling
# QNetworkAccessManager "I know what I'm doing, please don't do any content
# encoding processing".
# See: https://bugs.webkit.org/show_bug.cgi?id=63696#c1
try:
del headers['Accept-Encoding']
except KeyError:
pass
for k, v in list(headers.items()):
self.msg_log("Setting header %s to %s" % (k, v))
req.setRawHeader(k, v)
# if self.authid:
# self.msg_log("Update request w/ authid: {0}".format(self.authid))
# QgsAuthManager.instance().updateNetworkRequest(req, self.authid)
if self.reply is not None and self.reply.isRunning():
self.reply.close()
if method.lower() == 'delete':
func = getattr(QgsNetworkAccessManager.instance(), 'deleteResource')
else:
func = getattr(QgsNetworkAccessManager.instance(), method.lower())
# Calling the server ...
# Let's log the whole call for debugging purposes:
self.msg_log("Sending %s request to %s" % (method.upper(), req.url().toString()))
self.on_abort = False
headers = {str(h): str(req.rawHeader(h)) for h in req.rawHeaderList()}
for k, v in list(headers.items()):
self.msg_log("%s: %s" % (k, v))
if method.lower() in ['post', 'put']:
if isinstance(body, file):
body = body.read()
self.reply = func(req, body)
else:
self.reply = func(req)
# if self.authid:
# self.msg_log("Update reply w/ authid: {0}".format(self.authid))
# QgsAuthManager.instance().updateNetworkReply(self.reply, self.authid)
# necessary to trap local timout manage by QgsNetworkAccessManager
# calling QgsNetworkAccessManager::abortRequest
QgsNetworkAccessManager.instance().requestTimedOut.connect(self.requestTimedOut)
self.reply.sslErrors.connect(self.sslErrors)
self.reply.finished.connect(self.replyFinished)
self.reply.downloadProgress.connect(self.downloadProgress)
# block if blocking mode otherwise return immediatly
# it's up to the caller to manage listeners in case of no blocking mode
if not self.blocking_mode:
return (None, None)
# Call and block
self.el = QEventLoop()
self.reply.finished.connect(self.el.quit)
# Catch all exceptions (and clean up requests)
try:
self.el.exec_(QEventLoop.ExcludeUserInputEvents)
except Exception as e:
raise e
if self.reply:
self.reply.finished.disconnect(self.el.quit)
# emit exception in case of error
if not self.http_call_result.ok:
if self.http_call_result.exception and not self.exception_class:
raise self.http_call_result.exception
else:
raise self.exception_class(self.http_call_result.reason)
return (self.http_call_result, self.http_call_result.content)
@pyqtSlot()
def downloadProgress(self, bytesReceived, bytesTotal):
"""Keep track of the download progress"""
#self.msg_log("downloadProgress %s of %s ..." % (bytesReceived, bytesTotal))
pass
@pyqtSlot()
def requestTimedOut(self, reply):
"""Trap the timeout. In Async mode requestTimedOut is called after replyFinished"""
# adapt http_call_result basing on receiving qgs timer timout signal
self.exception_class = RequestsExceptionTimeout
self.http_call_result.exception = RequestsExceptionTimeout("Timeout error")
@pyqtSlot()
def replyFinished(self):
err = self.reply.error()
httpStatus = self.reply.attribute(QNetworkRequest.HttpStatusCodeAttribute)
httpStatusMessage = self.reply.attribute(QNetworkRequest.HttpReasonPhraseAttribute)
self.http_call_result.status_code = httpStatus
self.http_call_result.status = httpStatus
self.http_call_result.status_message = httpStatusMessage
for k, v in self.reply.rawHeaderPairs():
self.http_call_result.headers[str(k)] = str(v)
self.http_call_result.headers[str(k).lower()] = str(v)
if err != QNetworkReply.NoError:
# handle error
# check if errorString is empty, if so, then set err string as
# reply dump
if re.match('(.)*server replied: $', self.reply.errorString()):
errString = self.reply.errorString() + self.http_call_result.content
else:
errString = self.reply.errorString()
# check if self.http_call_result.status_code is available (client abort
# does not produce http.status_code)
if self.http_call_result.status_code:
msg = "Network error #{0}: {1}".format(
self.http_call_result.status_code, errString)
else:
msg = "Network error: {0}".format(errString)
self.http_call_result.reason = msg
self.http_call_result.ok = False
self.msg_log(msg)
# set return exception
if err == QNetworkReply.TimeoutError:
self.http_call_result.exception = RequestsExceptionTimeout(msg)
elif err == QNetworkReply.ConnectionRefusedError:
self.http_call_result.exception = RequestsExceptionConnectionError(msg)
elif err == QNetworkReply.OperationCanceledError:
# request abort by calling NAM.abort() => cancelled by the user
if self.on_abort:
self.http_call_result.exception = RequestsExceptionUserAbort(msg)
else:
self.http_call_result.exception = RequestsException(msg)
else:
self.http_call_result.exception = RequestsException(msg)
# overload exception to the custom exception if available
if self.exception_class:
self.http_call_result.exception = self.exception_class(msg)
else:
# Handle redirections
redirectionUrl = self.reply.attribute(QNetworkRequest.RedirectionTargetAttribute)
if redirectionUrl is not None and redirectionUrl != self.reply.url():
if redirectionUrl.isRelative():
redirectionUrl = self.reply.url().resolved(redirectionUrl)
msg = "Redirected from '{}' to '{}'".format(
self.reply.url().toString(), redirectionUrl.toString())
self.msg_log(msg)
self.reply.deleteLater()
self.reply = None
self.request(redirectionUrl.toString())
# really end request
else:
msg = "Network success #{0}".format(self.reply.error())
self.http_call_result.reason = msg
self.msg_log(msg)
ba = self.reply.readAll()
self.http_call_result.content = bytes(ba)
self.http_call_result.ok = True
# Let's log the whole response for debugging purposes:
self.msg_log("Got response %s %s from %s" % \
(self.http_call_result.status_code,
self.http_call_result.status_message,
self.reply.url().toString()))
for k, v in list(self.http_call_result.headers.items()):
self.msg_log("%s: %s" % (k, v))
if len(self.http_call_result.content) < 1024:
self.msg_log("Payload :\n%s" % self.http_call_result.content)
else:
self.msg_log("Payload is > 1 KB ...")
# clean reply
if self.reply is not None:
if self.reply.isRunning():
self.reply.close()
self.msg_log("Deleting reply ...")
# Disconnect all slots
self.reply.sslErrors.disconnect(self.sslErrors)
self.reply.finished.disconnect(self.replyFinished)
self.reply.downloadProgress.disconnect(self.downloadProgress)
self.reply.deleteLater()
self.reply = None
else:
self.msg_log("Reply was already deleted ...")
@pyqtSlot()
def sslErrors(self, ssl_errors):
"""
Handle SSL errors, logging them if debug is on and ignoring them
if disable_ssl_certificate_validation is set.
"""
if ssl_errors:
for v in ssl_errors:
self.msg_log("SSL Error: %s" % v.errorString())
if self.disable_ssl_certificate_validation:
self.reply.ignoreSslErrors()
@pyqtSlot()
def abort(self):
"""
Handle request to cancel HTTP call
"""
if (self.reply and self.reply.isRunning()):
self.on_abort = True
self.reply.abort()
| [
"[email protected]"
] | |
4f1873b7edecc8b3be6649316dcba834b743f50e | de7127deabd34e17473fb94f48e033f482535ca7 | /virt/bin/markdown2 | 2f0becd3611f94bc2b1edf4b5c86a622fa7aa217 | [
"MIT"
] | permissive | annstella/One_Min_Pitch | a50d855423ad02fb46e8b6765c16cbf9d7a6e6ff | 86cd2426061df502adaffbf544589d54653df00c | refs/heads/master | 2020-03-28T05:54:11.687201 | 2018-09-17T08:00:08 | 2018-09-17T08:00:08 | 147,802,293 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 523 | #!/home/annstella/Documents/core/One_Min_Pitch/virt/bin/python3.6
import sys
from os.path import join, dirname, exists
# Use the local markdown2.py if we are in the source tree.
source_tree_markdown2 = join(dirname(__file__), "..", "lib", "markdown2.py")
if exists(source_tree_markdown2):
sys.path.insert(0, dirname(source_tree_markdown2))
try:
from markdown2 import main
finally:
del sys.path[0]
else:
from markdown2 import main
if __name__ == "__main__":
sys.exit( main(sys.argv) )
| [
"[email protected]"
] | ||
bacd5c10967e22cb2e03eb54ce3045346fa32f5e | fba45f3289a6de51eb7a9bfbee90d566181963b5 | /pagemat/lib/python3.6/site-packages/paypal/standard/pdt/admin.py | d68d7ccb506406c13ca5c7216b0f32afb93123cd | [
"MIT"
] | permissive | bharatpurohit97/PageMatrix | abb580787aecf656e5ff27f0c9d75e89f16e905d | 66ab9b1dd365a34f86dba110fe97c32cb7137bf2 | refs/heads/master | 2022-12-12T01:50:47.230219 | 2018-12-19T09:20:05 | 2018-12-19T09:20:05 | 162,409,793 | 1 | 0 | MIT | 2022-12-08T02:28:13 | 2018-12-19T08:54:22 | Python | UTF-8 | Python | false | false | 3,710 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from paypal.standard.pdt.models import PayPalPDT
# ToDo: How similiar is this to PayPalIPNAdmin? Could we just inherit off one common admin model?
class PayPalPDTAdmin(admin.ModelAdmin):
date_hierarchy = 'payment_date'
fieldsets = (
(None, {
"fields":
['flag',
'txn_id',
'txn_type',
'payment_status',
'payment_date',
'transaction_entity',
'reason_code',
'pending_reason',
'mc_gross',
'mc_fee',
'auth_status',
'auth_amount',
'auth_exp',
'auth_id',
],
}),
("Address", {
"description": "The address of the Buyer.",
'classes': ('collapse',),
"fields":
['address_city',
'address_country',
'address_country_code',
'address_name',
'address_state',
'address_status',
'address_street',
'address_zip',
],
}),
("Buyer", {
"description": "The information about the Buyer.",
'classes': ('collapse',),
"fields":
['first_name',
'last_name',
'payer_business_name',
'payer_email',
'payer_id',
'payer_status',
'contact_phone',
'residence_country'
],
}),
("Seller", {
"description": "The information about the Seller.",
'classes': ('collapse',),
"fields":
['business',
'item_name',
'item_number',
'quantity',
'receiver_email',
'receiver_id',
'custom',
'invoice',
'memo',
],
}),
("Subscriber", {
"description": "The information about the Subscription.",
'classes': ('collapse',),
"fields":
['subscr_id',
'subscr_date',
'subscr_effective',
],
}),
("Recurring", {
"description": "Information about recurring Payments.",
"classes": ("collapse",),
"fields":
['profile_status',
'initial_payment_amount',
'amount_per_cycle',
'outstanding_balance',
'period_type',
'product_name',
'product_type',
'recurring_payment_id',
'receipt_id',
'next_payment_date',
],
}),
("Admin", {
"description": "Additional Info.",
"classes": ('collapse',),
"fields":
['test_ipn',
'ipaddress',
'query',
'flag_code',
'flag_info',
],
}),
)
list_display = ["__unicode__",
"flag",
"invoice",
"custom",
"payment_status",
"created_at",
]
search_fields = ["txn_id",
"recurring_payment_id",
]
admin.site.register(PayPalPDT, PayPalPDTAdmin)
| [
"[email protected]"
] | |
0b14f4c050f42e06cf573a1f84e62522ac65add4 | c7d91529db199322e39e54fe4051a75704ea843e | /华为题库/最小覆盖串.py | df725d28bca625b4f4f23c73033173ff5af73345 | [] | no_license | 2226171237/Algorithmpractice | fc786fd47aced5cd6d96c45f8e728c1e9d1160b7 | 837957ea22aa07ce28a6c23ea0419bd2011e1f88 | refs/heads/master | 2020-12-26T07:20:37.226443 | 2020-09-13T13:31:05 | 2020-09-13T13:31:05 | 237,431,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,644 | py | '''
给你一个字符串 S、一个字符串 T,请在字符串 S 里面找出:包含 T 所有字母的最小子串。
示例:
输入: S = "ADOBECODEBANC", T = "ABC"
输出: "BANC"
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/minimum-window-substring
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
'''
from collections import Counter
class Solution:
def minWindow(self, s: str, t: str) -> str:
'''
双指针,滑动窗口
:param s:
:param t:
:return:
'''
needs=Counter(t)
need_matchs=len(needs)
match=0 # 有多字符符合要求了
window={}
left,right=0,0
start=0
minLens=2**32
while right<len(s):
ch=s[right]
if needs[ch]: # 需要匹配
window[ch]=window.get(ch,0)+1
if window[ch]==needs[ch]: # 该字符匹配成功
match+=1
right+=1
while match==need_matchs: # 所有都匹配成功,左边不断右移,直到不匹配
if right-left<minLens: # 更新最小子串
start=left
minLens=right-left
ch=s[left]
if needs[ch]:
window[ch]-=1
if window[ch]<needs[ch]: # 出现了不匹配
match-=1
left+=1
return '' if minLens==2**32 else s[start:start+minLens]
if __name__ == '__main__':
S=Solution()
print(S.minWindow("cabwefgewcwaefgcf","cae"))
| [
"[email protected]"
] | |
d18985bf92c950ffcf456b5ef4e4d773d7f1208e | 36dbd31536a4084db83d12b2bd12a9f22f4da636 | /geomdl/elements.py | 952f45421aee3bd3bc94e18a97dc66b2d65fa7a8 | [
"Python-2.0",
"MIT"
] | permissive | Hgser/NURBS-Python | 75d38a21721d9afd3d5f8491bf8ba56d71a2285a | ced4debdf4fc13afce9b830a2962da2789e5c45b | refs/heads/master | 2020-04-27T00:42:17.632484 | 2019-02-28T05:21:20 | 2019-02-28T05:21:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,863 | py | """
.. module:: elements
:platform: Unix, Windows
:synopsis: Provides classes representing geometry and topology entities
.. moduleauthor:: Onur Rauf Bingol <[email protected]>
"""
import abc
import copy
import six
from ._utilities import export
@six.add_metaclass(abc.ABCMeta)
class AbstractEntity(object):
""" Abstract base class for all geometric entities. """
def __init__(self, *args, **kwargs):
self._id = int(kwargs.get('id', 0)) # element identifier
self._data = [] # data storage array
def __copy__(self):
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
def __deepcopy__(self, memo):
# Don't copy self reference
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
# Copy all other attributes
for k, v in self.__dict__.items():
setattr(result, k, copy.deepcopy(v, memo))
return result
def __len__(self):
return len(self._data)
def __getitem__(self, key):
return self._data[key]
def __iter__(self):
return iter(self._data)
def __reversed__(self):
return reversed(self._data)
@property
def id(self):
""" Identifier for the geometric entity.
It must be an integer number, otherwise the setter will raise a *ValueError*.
:getter: Gets the identifier
:setter: Sets the identifier
:type: int
"""
return self._id
@id.setter
def id(self, value):
if not isinstance(value, int):
raise ValueError("Identifier value must be an integer")
self._id = value
@export
class Vertex(AbstractEntity):
""" 3-dimensional Vertex entity with spatial and parametric position. """
def __init__(self, *args, **kwargs):
super(Vertex, self).__init__(*args, **kwargs)
self.data = [float(arg) for arg in args] if args else [0.0, 0.0, 0.0] # spatial coordinates
self._uv = [0.0, 0.0] # parametric coordinates
self._inside = False # flag for trimming
def __str__(self):
return "Vertex " + str(self._id) + " " + str(self._data)
__repr__ = __str__
def __cmp__(self, other):
return (self.id > other.id) - (self.id < other.id)
def __eq__(self, other):
return self.id == other.id
def __ne__(self, other):
return self.id != other.id
def __lt__(self, other):
return self.id < other.id
def __le__(self, other):
return self.id <= other.id
def __gt__(self, other):
return self.id > other.id
def __ge__(self, other):
return self.id >= other.id
def __nonzero__(self):
# For Python 2 compatibility
return self.__bool__()
def __bool__(self):
# For Python 3 compatibility
return self.inside
def __add__(self, other):
if not isinstance(other, self.__class__):
raise TypeError("Can only add Vertex objects")
res_data = [0.0 for _ in range(3)]
for idx in range(3):
res_data[idx] = self.data[idx] + other.data[idx]
res_uv = [0.0 for _ in range(2)]
for idx in range(2):
res_uv[idx] = self.uv[idx] + other.uv[idx]
res_val = self.__class__()
res_val.data = res_data
res_val.uv = res_uv
return res_val
def __sub__(self, other):
if not isinstance(other, self.__class__):
raise TypeError("Can only subtract Vertex objects")
res_data = [0.0 for _ in range(3)]
for idx in range(3):
res_data[idx] = self.data[idx] - other.data[idx]
res_uv = [0.0 for _ in range(2)]
for idx in range(2):
res_uv[idx] = self.uv[idx] - other.uv[idx]
res_val = self.__class__()
res_val.data = res_data
res_val.uv = res_uv
return res_val
def __div__(self, other):
return self.__truediv__(other)
def __truediv__(self, other):
if not isinstance(other, (float, int)):
raise TypeError("Can only divide by a float or an integer")
res_data = [0.0 for _ in range(3)]
for idx in range(3):
res_data[idx] = self.data[idx] / float(other)
res_uv = [0.0 for _ in range(2)]
for idx in range(2):
res_uv[idx] = self.uv[idx] / float(other)
res_val = self.__class__()
res_val.data = res_data
res_val.uv = res_uv
return res_val
@property
def x(self):
""" x-component of the vertex
:getter: Gets the x-component of the vertex
:setter: Sets the x-component of the vertex
:type: float
"""
return self._data[0]
@x.setter
def x(self, value):
self._data[0] = float(value)
@property
def y(self):
""" y-component of the vertex
:getter: Gets the y-component of the vertex
:setter: Sets the y-component of the vertex
:type: float
"""
return self._data[1]
@y.setter
def y(self, value):
self._data[1] = float(value)
@property
def z(self):
""" z-component of the vertex
:getter: Gets the z-component of the vertex
:setter: Sets the z-component of the vertex
:type: float
"""
return self._data[2]
@z.setter
def z(self, value):
self._data[2] = float(value)
@property
def u(self):
""" Parametric u-component of the vertex
:getter: Gets the u-component of the vertex
:setter: Sets the u-component of the vertex
:type: float
"""
return self._uv[0]
@u.setter
def u(self, value):
self._uv[0] = float(value)
@property
def v(self):
""" Parametric v-component of the vertex
:getter: Gets the v-component of the vertex
:setter: Sets the v-component of the vertex
:type: float
"""
return self._uv[1]
@v.setter
def v(self, value):
self._uv[1] = float(value)
@property
def uv(self):
""" Parametric (u,v) pair of the vertex
:getter: Gets the uv-component of the vertex
:setter: Sets the uv-component of the vertex
:type: list, tuple
"""
return tuple(self._uv)
@uv.setter
def uv(self, value):
if not isinstance(value, (list, tuple)):
raise TypeError("UV data input must be a list or tuple")
if len(value) != 2:
raise ValueError("UV must have 2 components")
self._uv = list(value)
@property
def inside(self):
""" Inside-outside flag
:getter: Gets the flag
:setter: Sets the flag
:type: bool
"""
return self._inside
@inside.setter
def inside(self, value):
self._inside = bool(value)
@property
def data(self):
""" (x,y,z) components of the vertex.
:getter: Gets the 3-dimensional components
:setter: Sets the 3-dimensional components
"""
return tuple(self._data)
@data.setter
def data(self, value):
if not isinstance(value, (list, tuple)):
raise TypeError("Vertex data must be a list or tuple")
if len(value) != 3:
raise ValueError("Vertex can only store 3 components")
# Convert to float
self._data = [float(val) for val in value]
@export
class Triangle(AbstractEntity):
""" Triangle entity which represents a triangle composed of vertices. """
def __init__(self, *args, **kwargs):
super(Triangle, self).__init__(*args, **kwargs)
self._inside = False # flag for trimming
if args:
self.add_vertex(*args)
def __str__(self):
return "Triangle " + str(self._id)
__repr__ = __str__
@property
def vertices(self):
""" Vertices of the triangle
:getter: Gets the list of vertices
:type: tuple
"""
return tuple(self._data)
@property
def vertices_raw(self):
""" Vertices which generates a closed triangle
Adds the first vertex as a last element of the return value (good for plotting)
:getter: Gets the list of vertices
:type: list
"""
v_raw = []
for v in self._data:
v_raw.append(v.data)
# Add the first vertex data as a last element (for plotting modules)
if len(self._data) > 0:
v_raw.append(self._data[0].data)
return v_raw
@property
def vertices_uv(self):
""" Parametric coordinates of the triangle vertices
:getter: Gets the parametric coordinates of the vertices
:type: list
"""
data = self.vertices
res = [data[idx].uv for idx in range(3)]
return res
@property
def edges(self):
""" Edges of the triangle
:getter: Gets the list of vertices that generates the edges of the triangle
:type: list
"""
data = self.vertices_raw
res = [[] for _ in range(3)]
for idx in range(3):
res[idx] = [data[idx], data[idx + 1]]
return res
@property
def vertex_ids(self):
""" Vertex indices
Vertex numbering starts from 1.
:getter: Gets the vertex indices
:type: list
"""
v_idx = []
for v in self._data:
v_idx.append(v.id)
return v_idx
@property
def vertex_ids_zero(self):
""" Zero-indexed vertex indices
Vertex numbering starts from 0.
:getter: Gets the vertex indices
:type: list
"""
v_idx = []
for v in self._data:
v_idx.append(v.id - 1)
return v_idx
@property
def inside(self):
""" Inside-outside flag
:getter: Gets the flag
:setter: Sets the flag
:type: bool
"""
return self._inside
@inside.setter
def inside(self, value):
self._inside = bool(value)
def add_vertex(self, *args):
""" Adds vertices to the Triangle object.
This method takes a single or a list of vertices as its function arguments.
"""
if len(self._data) > 2:
raise ValueError("Cannot add more vertices")
res = []
for arg in args:
if isinstance(arg, Vertex):
res.append(arg)
else:
raise TypeError("Input must be a Vertex object")
self._data = res
@export
class Quad(AbstractEntity):
""" Quad entity which represents a quadrilateral structure composed of vertices. """
def __init__(self, *args, **kwargs):
super(Quad, self).__init__(*args, **kwargs)
if args:
self.data = args
def __str__(self):
return "Quad " + str(self._id) + " V: " + str(self._data)
__repr__ = __str__
@property
def data(self):
""" Vertex indices.
:getter: Gets the vertex indices
:setter: Sets the vertex indices
"""
return tuple(self._data)
@data.setter
def data(self, value):
if not isinstance(value, (list, tuple)):
raise TypeError("Input data must be a list or tuple")
if len(value) != 4:
raise ValueError("Quad can only have 4 vertices")
# Convert to int
self._data = [int(val) for val in value]
def add_vertex(self, *args):
""" Adds vertices to the Quad object.
This method takes a single or a list of vertices as its function arguments.
"""
if len(self._data) > 3:
raise ValueError("Cannot add more vertices")
res = []
for arg in args:
if isinstance(arg, Vertex):
res.append(arg.id)
else:
raise TypeError("Input must be a Vertex object")
self._data = res
@export
class Face(AbstractEntity):
""" Representation of Face entity which is composed of triangles or quads. """
def __init__(self, *args, **kwargs):
super(Face, self).__init__(*args, **kwargs)
if args:
self.add_triangle(*args)
def __str__(self):
return "Face " + str(self._id)
__repr__ = __str__
@property
def triangles(self):
""" Triangles of the face
:getter: Gets the list of triangles
:type: tuple
"""
return tuple(self._data)
def add_triangle(self, *args):
""" Adds triangles to the Face object.
This method takes a single or a list of triangles as its function arguments.
"""
res = []
for arg in args:
if isinstance(arg, Triangle):
res.append(arg)
else:
raise TypeError("Input must be a Triangle object")
self._data = res
@export
class Body(AbstractEntity):
""" Representation of Body entity which is composed of faces. """
def __init__(self, *args, **kwargs):
super(Body, self).__init__(*args, **kwargs)
if args:
self.add_face(*args)
def __str__(self):
return "Body " + str(self._id)
__repr__ = __str__
@property
def faces(self):
""" Faces of the body
:getter: Gets the list of faces
:type: tuple
"""
return tuple(self._data)
def add_face(self, *args):
""" Adds faces to the Body object.
This method takes a single or a list of faces as its function arguments.
"""
res = []
for arg in args:
if isinstance(arg, Face):
res.append(arg)
else:
raise TypeError("Input must be a Face object")
self._data = res
| [
"[email protected]"
] | |
e8fb7c4b15125ffbf91656ba6e26fa0b454304bb | 2ccba7b17b3ce15efa627ef25ff1a1e23c4b1dbd | /Week 02/PSet02 - problem_3.py | 95c7a03fbbaca44e1d0bb79106a4f6e45941938b | [
"MIT"
] | permissive | andresmachado/edx-mit-6.00 | ecf62954fbc2f77ad1e14e2e179e5c011ad50b1c | cbc9b1947116433d7f2a0b47935af648b3828702 | refs/heads/master | 2020-12-03T07:45:29.696290 | 2016-09-16T12:44:39 | 2016-09-16T12:44:39 | 67,264,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,550 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 16 09:31:26 2016
@author: andre
# edX MITx 6.00.1x
# Introduction to Computer Science and Programming Using Python
# Problem Set 2, problem 3
# Use bisection search to make the program faster
# The following variables contain values as described below:
# balance - the outstanding balance on the credit card
# annualInterestRate - annual interest rate as a decimal
# Monthly interest rate = (Annual interest rate) / 12.0
# Monthly payment lower bound = Balance / 12
# Monthly payment upper bound = (Balance x (1 + Monthly interest rate)12) / 12.0
# Problem Summary: Use bisection search to search for the smallest monthly payment
# to the cent such that we can pay off the entire balance within a year.
"""
# Test Cases, comment out before submitting for grading
#Test Case 1
balance = 320000
annualInterestRate = 0.2
monthly_interest_rate = (annualInterestRate / 12.0)
payment_lower = (balance / 12)
payment_upper = (balance * ((1 + monthly_interest_rate)**12)) / 12.0
original_balance = balance
while balance != 0.00:
# Set value for thePayment to midpoint of lower and upper
payment = (payment_lower + payment_upper) / 2
# Reset balance each time through while loop
balance = original_balance
for i in range(1,13):
balance = (balance - payment) * (1 + monthly_interest_rate)
if balance > 0:
payment_lower = payment
elif balance < 0:
payment_upper = payment
balance = round(balance, 2)
print("Lowest Payment:", round(payment,2)) | [
"[email protected]"
] | |
e9bab44c9c61265f1fb967c6700de4b3768157eb | ef42a6d8b25820dc4745ce04c415ae25e7f3ca4f | /rtl/udp_demux.py | 416bd0aa54358922b90574cb21bf1da7ac17d32a | [
"MIT"
] | permissive | sinamyth/verilog-ethernet | af363edad4b503584c1f4605c251c907fe03ec38 | cf6a01fffeda33b0748f942532ad91e945d4903f | refs/heads/master | 2021-01-19T17:38:43.580775 | 2017-07-22T18:07:23 | 2017-07-22T18:07:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,466 | py | #!/usr/bin/env python
"""
Generates a UDP demux with the specified number of ports
"""
from __future__ import print_function
import argparse
import math
from jinja2 import Template
def main():
parser = argparse.ArgumentParser(description=__doc__.strip())
parser.add_argument('-p', '--ports', type=int, default=4, help="number of ports")
parser.add_argument('-n', '--name', type=str, help="module name")
parser.add_argument('-o', '--output', type=str, help="output file name")
args = parser.parse_args()
try:
generate(**args.__dict__)
except IOError as ex:
print(ex)
exit(1)
def generate(ports=4, name=None, output=None):
if name is None:
name = "udp_demux_{0}".format(ports)
if output is None:
output = name + ".v"
print("Opening file '{0}'...".format(output))
output_file = open(output, 'w')
print("Generating {0} port UDP demux {1}...".format(ports, name))
select_width = int(math.ceil(math.log(ports, 2)))
t = Template(u"""/*
Copyright (c) 2014-2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
// Language: Verilog 2001
`timescale 1ns / 1ps
/*
* UDP {{n}} port demultiplexer
*/
module {{name}}
(
input wire clk,
input wire rst,
/*
* UDP frame input
*/
input wire input_udp_hdr_valid,
output wire input_udp_hdr_ready,
input wire [47:0] input_eth_dest_mac,
input wire [47:0] input_eth_src_mac,
input wire [15:0] input_eth_type,
input wire [3:0] input_ip_version,
input wire [3:0] input_ip_ihl,
input wire [5:0] input_ip_dscp,
input wire [1:0] input_ip_ecn,
input wire [15:0] input_ip_length,
input wire [15:0] input_ip_identification,
input wire [2:0] input_ip_flags,
input wire [12:0] input_ip_fragment_offset,
input wire [7:0] input_ip_ttl,
input wire [7:0] input_ip_protocol,
input wire [15:0] input_ip_header_checksum,
input wire [31:0] input_ip_source_ip,
input wire [31:0] input_ip_dest_ip,
input wire [15:0] input_udp_source_port,
input wire [15:0] input_udp_dest_port,
input wire [15:0] input_udp_length,
input wire [15:0] input_udp_checksum,
input wire [7:0] input_udp_payload_tdata,
input wire input_udp_payload_tvalid,
output wire input_udp_payload_tready,
input wire input_udp_payload_tlast,
input wire input_udp_payload_tuser,
/*
* UDP frame outputs
*/
{%- for p in ports %}
output wire output_{{p}}_udp_hdr_valid,
input wire output_{{p}}_udp_hdr_ready,
output wire [47:0] output_{{p}}_eth_dest_mac,
output wire [47:0] output_{{p}}_eth_src_mac,
output wire [15:0] output_{{p}}_eth_type,
output wire [3:0] output_{{p}}_ip_version,
output wire [3:0] output_{{p}}_ip_ihl,
output wire [5:0] output_{{p}}_ip_dscp,
output wire [1:0] output_{{p}}_ip_ecn,
output wire [15:0] output_{{p}}_ip_length,
output wire [15:0] output_{{p}}_ip_identification,
output wire [2:0] output_{{p}}_ip_flags,
output wire [12:0] output_{{p}}_ip_fragment_offset,
output wire [7:0] output_{{p}}_ip_ttl,
output wire [7:0] output_{{p}}_ip_protocol,
output wire [15:0] output_{{p}}_ip_header_checksum,
output wire [31:0] output_{{p}}_ip_source_ip,
output wire [31:0] output_{{p}}_ip_dest_ip,
output wire [15:0] output_{{p}}_udp_source_port,
output wire [15:0] output_{{p}}_udp_dest_port,
output wire [15:0] output_{{p}}_udp_length,
output wire [15:0] output_{{p}}_udp_checksum,
output wire [7:0] output_{{p}}_udp_payload_tdata,
output wire output_{{p}}_udp_payload_tvalid,
input wire output_{{p}}_udp_payload_tready,
output wire output_{{p}}_udp_payload_tlast,
output wire output_{{p}}_udp_payload_tuser,
{% endfor %}
/*
* Control
*/
input wire enable,
input wire [{{w-1}}:0] select
);
reg [{{w-1}}:0] select_reg = {{w}}'d0, select_next;
reg frame_reg = 1'b0, frame_next;
reg input_udp_hdr_ready_reg = 1'b0, input_udp_hdr_ready_next;
reg input_udp_payload_tready_reg = 1'b0, input_udp_payload_tready_next;
{% for p in ports %}
reg output_{{p}}_udp_hdr_valid_reg = 1'b0, output_{{p}}_udp_hdr_valid_next;
{%- endfor %}
reg [47:0] output_eth_dest_mac_reg = 48'd0, output_eth_dest_mac_next;
reg [47:0] output_eth_src_mac_reg = 48'd0, output_eth_src_mac_next;
reg [15:0] output_eth_type_reg = 16'd0, output_eth_type_next;
reg [3:0] output_ip_version_reg = 4'd0, output_ip_version_next;
reg [3:0] output_ip_ihl_reg = 4'd0, output_ip_ihl_next;
reg [5:0] output_ip_dscp_reg = 6'd0, output_ip_dscp_next;
reg [1:0] output_ip_ecn_reg = 2'd0, output_ip_ecn_next;
reg [15:0] output_ip_length_reg = 16'd0, output_ip_length_next;
reg [15:0] output_ip_identification_reg = 16'd0, output_ip_identification_next;
reg [2:0] output_ip_flags_reg = 3'd0, output_ip_flags_next;
reg [12:0] output_ip_fragment_offset_reg = 13'd0, output_ip_fragment_offset_next;
reg [7:0] output_ip_ttl_reg = 8'd0, output_ip_ttl_next;
reg [7:0] output_ip_protocol_reg = 8'd0, output_ip_protocol_next;
reg [15:0] output_ip_header_checksum_reg = 16'd0, output_ip_header_checksum_next;
reg [31:0] output_ip_source_ip_reg = 32'd0, output_ip_source_ip_next;
reg [31:0] output_ip_dest_ip_reg = 32'd0, output_ip_dest_ip_next;
reg [15:0] output_udp_source_port_reg = 16'd0, output_udp_source_port_next;
reg [15:0] output_udp_dest_port_reg = 16'd0, output_udp_dest_port_next;
reg [15:0] output_udp_length_reg = 16'd0, output_udp_length_next;
reg [15:0] output_udp_checksum_reg = 16'd0, output_udp_checksum_next;
// internal datapath
reg [7:0] output_udp_payload_tdata_int;
reg output_udp_payload_tvalid_int;
reg output_udp_payload_tready_int_reg = 1'b0;
reg output_udp_payload_tlast_int;
reg output_udp_payload_tuser_int;
wire output_udp_payload_tready_int_early;
assign input_udp_hdr_ready = input_udp_hdr_ready_reg;
assign input_udp_payload_tready = input_udp_payload_tready_reg;
{% for p in ports %}
assign output_{{p}}_udp_hdr_valid = output_{{p}}_udp_hdr_valid_reg;
assign output_{{p}}_eth_dest_mac = output_eth_dest_mac_reg;
assign output_{{p}}_eth_src_mac = output_eth_src_mac_reg;
assign output_{{p}}_eth_type = output_eth_type_reg;
assign output_{{p}}_ip_version = output_ip_version_reg;
assign output_{{p}}_ip_ihl = output_ip_ihl_reg;
assign output_{{p}}_ip_dscp = output_ip_dscp_reg;
assign output_{{p}}_ip_ecn = output_ip_ecn_reg;
assign output_{{p}}_ip_length = output_ip_length_reg;
assign output_{{p}}_ip_identification = output_ip_identification_reg;
assign output_{{p}}_ip_flags = output_ip_flags_reg;
assign output_{{p}}_ip_fragment_offset = output_ip_fragment_offset_reg;
assign output_{{p}}_ip_ttl = output_ip_ttl_reg;
assign output_{{p}}_ip_protocol = output_ip_protocol_reg;
assign output_{{p}}_ip_header_checksum = output_ip_header_checksum_reg;
assign output_{{p}}_ip_source_ip = output_ip_source_ip_reg;
assign output_{{p}}_ip_dest_ip = output_ip_dest_ip_reg;
assign output_{{p}}_udp_source_port = output_udp_source_port_reg;
assign output_{{p}}_udp_dest_port = output_udp_dest_port_reg;
assign output_{{p}}_udp_length = output_udp_length_reg;
assign output_{{p}}_udp_checksum = output_udp_checksum_reg;
{% endfor %}
// mux for output control signals
reg current_output_udp_hdr_valid;
reg current_output_udp_hdr_ready;
reg current_output_tvalid;
reg current_output_tready;
always @* begin
case (select_reg)
{%- for p in ports %}
{{w}}'d{{p}}: begin
current_output_udp_hdr_valid = output_{{p}}_udp_hdr_valid;
current_output_udp_hdr_ready = output_{{p}}_udp_hdr_ready;
current_output_tvalid = output_{{p}}_udp_payload_tvalid;
current_output_tready = output_{{p}}_udp_payload_tready;
end
{%- endfor %}
default: begin
current_output_udp_hdr_valid = 1'b0;
current_output_udp_hdr_ready = 1'b0;
current_output_tvalid = 1'b0;
current_output_tready = 1'b0;
end
endcase
end
always @* begin
select_next = select_reg;
frame_next = frame_reg;
input_udp_hdr_ready_next = input_udp_hdr_ready_reg & ~input_udp_hdr_valid;
input_udp_payload_tready_next = 1'b0;
{%- for p in ports %}
output_{{p}}_udp_hdr_valid_next = output_{{p}}_udp_hdr_valid_reg & ~output_{{p}}_udp_hdr_ready;
{%- endfor %}
output_eth_dest_mac_next = output_eth_dest_mac_reg;
output_eth_src_mac_next = output_eth_src_mac_reg;
output_eth_type_next = output_eth_type_reg;
output_ip_version_next = output_ip_version_reg;
output_ip_ihl_next = output_ip_ihl_reg;
output_ip_dscp_next = output_ip_dscp_reg;
output_ip_ecn_next = output_ip_ecn_reg;
output_ip_length_next = output_ip_length_reg;
output_ip_identification_next = output_ip_identification_reg;
output_ip_flags_next = output_ip_flags_reg;
output_ip_fragment_offset_next = output_ip_fragment_offset_reg;
output_ip_ttl_next = output_ip_ttl_reg;
output_ip_protocol_next = output_ip_protocol_reg;
output_ip_header_checksum_next = output_ip_header_checksum_reg;
output_ip_source_ip_next = output_ip_source_ip_reg;
output_ip_dest_ip_next = output_ip_dest_ip_reg;
output_udp_source_port_next = output_udp_source_port_reg;
output_udp_dest_port_next = output_udp_dest_port_reg;
output_udp_length_next = output_udp_length_reg;
output_udp_checksum_next = output_udp_checksum_reg;
if (input_udp_payload_tvalid & input_udp_payload_tready) begin
// end of frame detection
if (input_udp_payload_tlast) begin
frame_next = 1'b0;
end
end
if (~frame_reg & enable & input_udp_hdr_valid & ~current_output_udp_hdr_valid & ~current_output_tvalid) begin
// start of frame, grab select value
frame_next = 1'b1;
select_next = select;
input_udp_hdr_ready_next = 1'b1;
case (select)
{%- for p in ports %}
{{w}}'d{{p}}: output_{{p}}_udp_hdr_valid_next = 1'b1;
{%- endfor %}
endcase
output_eth_dest_mac_next = input_eth_dest_mac;
output_eth_src_mac_next = input_eth_src_mac;
output_eth_type_next = input_eth_type;
output_ip_version_next = input_ip_version;
output_ip_ihl_next = input_ip_ihl;
output_ip_dscp_next = input_ip_dscp;
output_ip_ecn_next = input_ip_ecn;
output_ip_length_next = input_ip_length;
output_ip_identification_next = input_ip_identification;
output_ip_flags_next = input_ip_flags;
output_ip_fragment_offset_next = input_ip_fragment_offset;
output_ip_ttl_next = input_ip_ttl;
output_ip_protocol_next = input_ip_protocol;
output_ip_header_checksum_next = input_ip_header_checksum;
output_ip_source_ip_next = input_ip_source_ip;
output_ip_dest_ip_next = input_ip_dest_ip;
output_udp_source_port_next = input_udp_source_port;
output_udp_dest_port_next = input_udp_dest_port;
output_udp_length_next = input_udp_length;
output_udp_checksum_next = input_udp_checksum;
end
input_udp_payload_tready_next = output_udp_payload_tready_int_early & frame_next;
output_udp_payload_tdata_int = input_udp_payload_tdata;
output_udp_payload_tvalid_int = input_udp_payload_tvalid & input_udp_payload_tready;
output_udp_payload_tlast_int = input_udp_payload_tlast;
output_udp_payload_tuser_int = input_udp_payload_tuser;
end
always @(posedge clk) begin
if (rst) begin
select_reg <= {{w}}'d0;
frame_reg <= 1'b0;
input_udp_hdr_ready_reg <= 1'b0;
input_udp_payload_tready_reg <= 1'b0;
{%- for p in ports %}
output_{{p}}_udp_hdr_valid_reg <= 1'b0;
{%- endfor %}
end else begin
select_reg <= select_next;
frame_reg <= frame_next;
input_udp_hdr_ready_reg <= input_udp_hdr_ready_next;
input_udp_payload_tready_reg <= input_udp_payload_tready_next;
{%- for p in ports %}
output_{{p}}_udp_hdr_valid_reg <= output_{{p}}_udp_hdr_valid_next;
{%- endfor %}
end
output_eth_dest_mac_reg <= output_eth_dest_mac_next;
output_eth_src_mac_reg <= output_eth_src_mac_next;
output_eth_type_reg <= output_eth_type_next;
output_ip_version_reg <= output_ip_version_next;
output_ip_ihl_reg <= output_ip_ihl_next;
output_ip_dscp_reg <= output_ip_dscp_next;
output_ip_ecn_reg <= output_ip_ecn_next;
output_ip_length_reg <= output_ip_length_next;
output_ip_identification_reg <= output_ip_identification_next;
output_ip_flags_reg <= output_ip_flags_next;
output_ip_fragment_offset_reg <= output_ip_fragment_offset_next;
output_ip_ttl_reg <= output_ip_ttl_next;
output_ip_protocol_reg <= output_ip_protocol_next;
output_ip_header_checksum_reg <= output_ip_header_checksum_next;
output_ip_source_ip_reg <= output_ip_source_ip_next;
output_ip_dest_ip_reg <= output_ip_dest_ip_next;
output_udp_source_port_reg <= output_udp_source_port_next;
output_udp_dest_port_reg <= output_udp_dest_port_next;
output_udp_length_reg <= output_udp_length_next;
output_udp_checksum_reg <= output_udp_checksum_next;
end
// output datapath logic
reg [7:0] output_udp_payload_tdata_reg = 8'd0;
{%- for p in ports %}
reg output_{{p}}_udp_payload_tvalid_reg = 1'b0, output_{{p}}_udp_payload_tvalid_next;
{%- endfor %}
reg output_udp_payload_tlast_reg = 1'b0;
reg output_udp_payload_tuser_reg = 1'b0;
reg [7:0] temp_udp_payload_tdata_reg = 8'd0;
reg temp_udp_payload_tvalid_reg = 1'b0, temp_udp_payload_tvalid_next;
reg temp_udp_payload_tlast_reg = 1'b0;
reg temp_udp_payload_tuser_reg = 1'b0;
// datapath control
reg store_udp_payload_int_to_output;
reg store_udp_payload_int_to_temp;
reg store_udp_payload_temp_to_output;
{% for p in ports %}
assign output_{{p}}_udp_payload_tdata = output_udp_payload_tdata_reg;
assign output_{{p}}_udp_payload_tvalid = output_{{p}}_udp_payload_tvalid_reg;
assign output_{{p}}_udp_payload_tlast = output_udp_payload_tlast_reg;
assign output_{{p}}_udp_payload_tuser = output_udp_payload_tuser_reg;
{% endfor %}
// enable ready input next cycle if output is ready or the temp reg will not be filled on the next cycle (output reg empty or no input)
assign output_udp_payload_tready_int_early = current_output_tready | (~temp_udp_payload_tvalid_reg & (~current_output_tvalid | ~output_udp_payload_tvalid_int));
always @* begin
// transfer sink ready state to source
{%- for p in ports %}
output_{{p}}_udp_payload_tvalid_next = output_{{p}}_udp_payload_tvalid_reg;
{%- endfor %}
temp_udp_payload_tvalid_next = temp_udp_payload_tvalid_reg;
store_udp_payload_int_to_output = 1'b0;
store_udp_payload_int_to_temp = 1'b0;
store_udp_payload_temp_to_output = 1'b0;
if (output_udp_payload_tready_int_reg) begin
// input is ready
if (current_output_tready | ~current_output_tvalid) begin
// output is ready or currently not valid, transfer data to output
{%- for p in ports %}
output_{{p}}_udp_payload_tvalid_next = output_udp_payload_tvalid_int & (select_reg == {{w}}'d{{p}});
{%- endfor %}
store_udp_payload_int_to_output = 1'b1;
end else begin
// output is not ready, store input in temp
temp_udp_payload_tvalid_next = output_udp_payload_tvalid_int;
store_udp_payload_int_to_temp = 1'b1;
end
end else if (current_output_tready) begin
// input is not ready, but output is ready
{%- for p in ports %}
output_{{p}}_udp_payload_tvalid_next = temp_udp_payload_tvalid_reg & (select_reg == {{w}}'d{{p}});
{%- endfor %}
temp_udp_payload_tvalid_next = 1'b0;
store_udp_payload_temp_to_output = 1'b1;
end
end
always @(posedge clk) begin
if (rst) begin
{%- for p in ports %}
output_{{p}}_udp_payload_tvalid_reg <= 1'b0;
{%- endfor %}
output_udp_payload_tready_int_reg <= 1'b0;
temp_udp_payload_tvalid_reg <= 1'b0;
end else begin
{%- for p in ports %}
output_{{p}}_udp_payload_tvalid_reg <= output_{{p}}_udp_payload_tvalid_next;
{%- endfor %}
output_udp_payload_tready_int_reg <= output_udp_payload_tready_int_early;
temp_udp_payload_tvalid_reg <= temp_udp_payload_tvalid_next;
end
// datapath
if (store_udp_payload_int_to_output) begin
output_udp_payload_tdata_reg <= output_udp_payload_tdata_int;
output_udp_payload_tlast_reg <= output_udp_payload_tlast_int;
output_udp_payload_tuser_reg <= output_udp_payload_tuser_int;
end else if (store_udp_payload_temp_to_output) begin
output_udp_payload_tdata_reg <= temp_udp_payload_tdata_reg;
output_udp_payload_tlast_reg <= temp_udp_payload_tlast_reg;
output_udp_payload_tuser_reg <= temp_udp_payload_tuser_reg;
end
if (store_udp_payload_int_to_temp) begin
temp_udp_payload_tdata_reg <= output_udp_payload_tdata_int;
temp_udp_payload_tlast_reg <= output_udp_payload_tlast_int;
temp_udp_payload_tuser_reg <= output_udp_payload_tuser_int;
end
end
endmodule
""")
output_file.write(t.render(
n=ports,
w=select_width,
name=name,
ports=range(ports)
))
print("Done")
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
98e4d65023487abe3e1d25487d510bec8a565b46 | 84a0e742eeb89016f419b13329a4e6a1828e4d31 | /001_IntroductionToCS&ProgrammingUsingPython/Extra_Problems/oop_fraction.py | 235020581d1f7a8ddb21abd3e0d787229b39d430 | [
"MIT"
] | permissive | dalalsunil1986/Computer-Science-Degree | e85736c8c705bb82d897519cf2339ff638bc1b5f | e2c73f35cc48bbcc2a5cc0ddc6867fd0787c6dd9 | refs/heads/master | 2023-03-16T18:37:31.954245 | 2020-02-24T17:08:47 | 2020-02-24T17:08:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,064 | py | """
@author: Anirudh Sharma
"""
class Fraction(object):
def __init__(self, numerator, denominator):
assert type(numerator) == int and type(denominator) == int
self.numerator = numerator
self.denominator = denominator
def __str__(self):
return str(self.numerator) + "/" + str(self.denominator)
def __add__(self, other):
n = self.numerator * other.denominator + other.numerator * self.denominator
d = self.denominator * other.denominator
return Fraction(n, d)
def __sub__(self, other):
n = self.numerator * other.denominator - other.numerator * self.denominator
d = self.denominator * other.denominator
return Fraction(n, d)
def __float__(self):
return self.numerator / self.denominator
def inverse(self):
return Fraction(self.denominator, self.numerator)
a = Fraction(1, 2)
b = Fraction(2, 3)
plus = a + b
print(plus)
minus = a - b
print(minus)
f = float(a)
print(f)
r = Fraction.inverse(b)
print(r) | [
"[email protected]"
] | |
534a6d3743ebc5084d7a4381efa5f146340deebe | 5c6bdc1915d56f1fee9b66a45365cefd097ff1f4 | /challenge_3.py | 645cd85ef5cd8e4cdba1fe3b01314768a428c6e6 | [] | no_license | chandanmanjunath/LearnByexample | 534a9e880453c316f4168c4b234165d935d2dac7 | 52351f7fba57ac0d0f13edb44c537131af860b60 | refs/heads/master | 2021-05-07T17:29:10.852798 | 2017-10-29T12:28:58 | 2017-10-29T12:28:58 | 108,732,377 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | if __name__ == '__main__':
a = int(raw_input())
b = int(raw_input())
if (a>=1 and a<=pow(10,10)) and (b>=1 and b<=pow(10,10)) :
print a+b
print a-b
print a*b
| [
"[email protected]"
] | |
5944c73b17f82c3bf11149917b9d99491d0d1e91 | fe32d7054687dd3cbee99e43b32488bff262681d | /tests/checkers/projects/test_python.py | df3e48d7ae2e84ed26b25acdbb5315f67579dd4e | [
"Apache-2.0"
] | permissive | whwkong/verse | 106d61f4a3a6bbabab1cdd7583c909fa48717214 | 0dc25222c309c780afee5cc6d5293858e5ead08e | refs/heads/master | 2021-06-14T16:31:48.729895 | 2017-04-04T19:20:39 | 2017-04-04T19:20:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,275 | py | """
Test `checkers.projects.python` file
"""
import pytest
from checkers import base
from checkers.projects import python
class TestPythonVersionChecker:
"""
Test `python.PythonVersionChecker` class
"""
@pytest.fixture
def instance(self):
return python.PythonVersionChecker()
def test_class_inheritance(self, instance):
"""Test class inheritance"""
assert isinstance(instance, base.BaseVersionChecker)
assert isinstance(instance, base.GitHubVersionChecker)
def test_class_properties(self, instance):
"""Test class properties"""
assert instance.name == 'Python'
assert instance.slug == 'python'
assert instance.homepage == 'https://www.python.org/'
assert instance.repository == 'https://github.com/python/cpython'
class TestAnsibleVersionChecker:
"""
Test `python.AnsibleVersionChecker` class
"""
@pytest.fixture
def instance(self):
return python.AnsibleVersionChecker()
def test_class_inheritance(self, instance):
"""Test class inheritance"""
assert isinstance(instance, base.BaseVersionChecker)
assert isinstance(instance, base.GitHubVersionChecker)
def test_class_properties(self, instance):
"""Test class properties"""
assert instance.name == 'Ansible'
assert instance.slug == 'ansible'
assert instance.homepage == 'https://www.ansible.com/'
assert instance.repository == 'https://github.com/ansible/ansible'
class TestCeleryVersionChecker:
"""
Test `python.CeleryVersionChecker` class
"""
@pytest.fixture
def instance(self):
return python.CeleryVersionChecker()
def test_class_inheritance(self, instance):
"""Test class inheritance"""
assert isinstance(instance, base.BaseVersionChecker)
assert isinstance(instance, base.GitHubVersionChecker)
def test_class_properties(self, instance):
"""Test class properties"""
assert instance.name == 'Celery'
assert instance.slug == 'celery'
assert instance.homepage == 'http://www.celeryproject.org/'
assert instance.repository == 'https://github.com/celery/celery'
class TestDjangoVersionChecker:
"""
Test `python.DjangoVersionChecker` class
"""
@pytest.fixture
def instance(self):
return python.DjangoVersionChecker()
def test_class_inheritance(self, instance):
"""Test class inheritance"""
assert isinstance(instance, base.BaseVersionChecker)
assert isinstance(instance, base.GitHubVersionChecker)
def test_class_properties(self, instance):
"""Test class properties"""
assert instance.name == 'Django'
assert instance.slug == 'django'
assert instance.homepage == 'https://www.djangoproject.com/'
assert instance.repository == 'https://github.com/django/django'
class TestDjangoRESTFrameworkVersionChecker:
"""
Test `python.DjangoRESTFrameworkVersionChecker` class
"""
@pytest.fixture
def instance(self):
return python.DjangoRESTFrameworkVersionChecker()
def test_class_inheritance(self, instance):
"""Test class inheritance"""
assert isinstance(instance, base.BaseVersionChecker)
assert isinstance(instance, base.GitHubVersionChecker)
def test_class_properties(self, instance):
"""Test class properties"""
assert instance.name == 'Django REST Framework'
assert instance.slug == 'django-rest-framework'
assert instance.homepage == 'http://www.django-rest-framework.org/'
assert (
instance.repository ==
'https://github.com/tomchristie/django-rest-framework'
)
class TestFlaskVersionChecker:
"""
Test `python.FlaskVersionChecker` class
"""
@pytest.fixture
def instance(self):
return python.FlaskVersionChecker()
def test_class_inheritance(self, instance):
"""Test class inheritance"""
assert isinstance(instance, base.BaseVersionChecker)
assert isinstance(instance, base.GitHubVersionChecker)
def test_class_properties(self, instance):
"""Test class properties"""
assert instance.name == 'Flask'
assert instance.slug == 'flask'
assert instance.homepage == 'http://flask.pocoo.org/'
assert instance.repository == 'https://github.com/pallets/flask'
class TestGunicornVersionChecker:
"""
Test `python.GunicornVersionChecker` class
"""
@pytest.fixture
def instance(self):
return python.GunicornVersionChecker()
def test_class_inheritance(self, instance):
"""Test class inheritance"""
assert isinstance(instance, base.BaseVersionChecker)
assert isinstance(instance, base.GitHubVersionChecker)
def test_class_properties(self, instance):
"""Test class properties"""
assert instance.name == 'Gunicorn'
assert instance.slug == 'gunicorn'
assert instance.homepage == 'http://gunicorn.org/'
assert instance.repository == 'https://github.com/benoitc/gunicorn'
class TestRequestsVersionChecker:
"""
Test `python.RequestsVersionChecker` class
"""
@pytest.fixture
def instance(self):
return python.RequestsVersionChecker()
def test_class_inheritance(self, instance):
"""Test class inheritance"""
assert isinstance(instance, base.BaseVersionChecker)
assert isinstance(instance, base.GitHubVersionChecker)
def test_class_properties(self, instance):
"""Test class properties"""
assert instance.name == 'Requests'
assert instance.slug == 'python-requests'
assert instance.homepage == 'http://docs.python-requests.org/'
assert (
instance.repository ==
'https://github.com/kennethreitz/requests'
)
def test_class_normalize_tag_name_method(self, instance):
"""Test class `_normalize_tag_name()` method"""
assert instance._normalize_tag_name('2.0') == ''
assert instance._normalize_tag_name('v2.0.0') == 'v2.0.0'
def test_class_get_versions_method(self, mocker, instance):
"""Test class `get_versions()` method"""
mocked_get_github_tags = mocker.patch.object(
instance, '_get_github_tags',
)
assert instance.get_versions() == mocked_get_github_tags.return_value
mocked_get_github_tags.assert_called_once_with(
normalize_func=instance._normalize_tag_name,
)
class TestScrapyVersionChecker:
"""
Test `python.ScrapyVersionChecker` class
"""
@pytest.fixture
def instance(self):
return python.ScrapyVersionChecker()
def test_class_inheritance(self, instance):
"""Test class inheritance"""
assert isinstance(instance, base.BaseVersionChecker)
assert isinstance(instance, base.GitHubVersionChecker)
def test_class_properties(self, instance):
"""Test class properties"""
assert instance.name == 'Scrapy'
assert instance.slug == 'scrapy'
assert instance.homepage == 'https://scrapy.org/'
assert instance.repository == 'https://github.com/scrapy/scrapy'
| [
"[email protected]"
] | |
6b4ab0a7e10c34f653dd28cfdf289ca292364259 | 7e4425342a4d7e0f40978af17091f32d2712c79c | /Day_36_01_Word2VecBasic.py | 06bed965a1102af98a5115949451121c9d0eb08e | [] | no_license | yunhui21/CB_Ai_NLP | eca3da00c6c9615c8737b50d2c5ebe8dd1e3ba8a | b66ecc24abfd988fc9e7f19fa1941826b1bf38a4 | refs/heads/master | 2023-01-07T14:21:26.758030 | 2020-11-16T05:57:30 | 2020-11-16T05:57:30 | 291,835,156 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,299 | py | # Day_36_01_Word2VecBasic.py
# onehotvec 클래스의 수만큼 숫자로 단어를 볂솬 - 현실적으로 클래스의 개수가 너무 많다.
#
'''
skipgram :
'''''
# end 위치를 구하세요.
# 전체위치에서 target범위만 제거하세요.
def extrast(token_count, target, window_size ):
start = max(target - window_size, 0)
end = min(target + window_size + 1, token_count)
return [i for i in range(start, end) if i != target]
def show_dataset(tokens, window_size, is_skipgram):
token_count = len(tokens)
for target in range(token_count):
surround = extrast(token_count, target, window_size)
print(target, surround, end='')
# 문제
# surround가 가라키는 단어들을 출력하세요.
if is_skipgram:
# print(list([zip([target] * len(surround), surround)]))
print([(tokens[t], tokens[s]) for t, s in zip([target] * len(surround), surround)])
else:
print([tokens[i] for i in surround], tokens[target])
tokens = ['the', 'quick', 'brown', 'fax','jumps','over', 'the', 'lazy', 'dog']
# show_dataset(tokens, 1, is_skipgram=True)
# # show_dataset(tokens, 1, is_skimgram= False )
show_dataset(tokens, 2, is_skipgram=True)
print()
show_dataset(tokens, 2, is_skipgram=False) | [
"[email protected]"
] | |
1e17cd4603703f78fef3307911e3585ea18568ef | fa5713863cada0177d15e56f5327b79d907a119f | /test/plot_jmrs_trkeff.py | c1a348b41ca20f15dabf50e782c2d4a5aaeef348 | [] | no_license | rappoccio/EXOVV | 1500c126d8053b47fbc425d1c2f9e76f14cb75c5 | db96edf661398b5bab131bbeba36d331b180d12d | refs/heads/master | 2020-04-03T20:12:57.959191 | 2018-08-24T01:30:03 | 2018-08-24T01:30:03 | 39,910,319 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,419 | py | #! /usr/bin/env python
##################
# Finding the mistag rate plots
##################
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--postfix', type='string', action='store',
dest='postfix',
default = '',
help='Postfix for plots')
(options, args) = parser.parse_args()
argv = []
import math
import ROOT
import sys
import array
ROOT.gROOT.Macro("rootlogon.C")
canvs = []
rgs = []
effs = [1.00, 0.99, 0.98, 0.97, 0.96, 0.95]
effstrs = [ '100', '099', '098', '097', '096', '095' ]
for effstr in effstrs :
f = ROOT.TFile("jmr_ungroomed_trkeff" + effstr + ".root")
c = f.Get("totresc2_0")
c.Draw()
canvs.append(c)
rg = c.GetPrimitive("rg_0").Clone( 'eff_' + effstr )
rgs.append( rg )
rg0 = rgs[0].Clone("rg0")
gs0 = rg0.GetListOfGraphs()
ptBinA = array.array('d', [ 200., 260., 350., 460., 550., 650., 760., 900, 1000, 1100, 1200, 1300, 13000.])
r = 0.8 / math.sqrt(2.)
xmaxes = [ x * r for x in ptBinA ]
xmins = [ x / 20. for x in ptBinA ]
canvs = []
rgsdiv = []
for irg,rg in enumerate(rgs):
ci = ROOT.TCanvas("c" + rg.GetName(), "c" + rg.GetName() )
gs = rg.GetListOfGraphs()
rgdiv = ROOT.TMultiGraph( rg.GetName() + "_div", "Track Efficiency = " + str(effs[irg]) + rg.GetTitle() + " Uncertainty")
for ig,g in enumerate(gs):
xdiv = array.array('d', [])
ydiv = array.array('d', [])
for i in xrange( g.GetN() ):
x = ROOT.Double(0.0)
y = ROOT.Double(0.0)
y0 = ROOT.Double(0.0)
dy = g.GetErrorY(i)
g.GetPoint(i,x,y)
gs0[ig].GetPoint(i,x,y0)
if y0 > 0.0 and y > 0.0 and dy / y < 0.75 and x > xmins[ig] and x < xmaxes[ig] :
xdiv.append( x )
ydiv.append( (y-y0)/y0)
gdiv = ROOT.TGraph( len(xdiv), xdiv, ydiv )
gdiv.SetName(g.GetName() + "_div")
gdiv.SetLineStyle(g.GetLineStyle())
gdiv.SetLineColor(g.GetLineColor())
rgdiv.Add( gdiv )
rgsdiv.append( rgdiv )
ci.cd()
rgdiv.Draw("AL")
rgdiv.GetHistogram().SetTitleOffset(1.0, "Y")
rgdiv.SetMinimum(0.0)
rgdiv.SetMaximum(0.5)
ci.Update()
canvs.append(ci)
ci.Print("jmr_unc_trkeff" + effstr[irg] + ".png", "png" )
ci.Print("jmr_unc_trkeff" + effstr[irg] + ".pdf", "pdf" )
| [
"[email protected]"
] | |
c5093f06d78421e5e06a8db7730c58cbcafd1e0d | 39bef50ed12468e57ad94a8e2551da6c7c45c8ed | /networkx/drawing/nx_pylab.py | b96ab87ca39c0e2b1a8f38fc8a0858575319078d | [] | no_license | biancini/Rorschach-Test-Platform | b1a5dfdbe5a15a68ce4dcf66887346fbf2e94169 | 7ae68e1054637046278325eaa419b23f09b420d3 | refs/heads/master | 2020-05-17T11:00:13.889678 | 2012-04-11T16:31:19 | 2012-04-11T16:31:19 | 3,789,381 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 27,792 | py | """
**********
Matplotlib
**********
Draw networks with matplotlib (pylab).
See Also
--------
matplotlib: http://matplotlib.sourceforge.net/
pygraphviz: http://networkx.lanl.gov/pygraphviz/
"""
__author__ = """Aric Hagberg ([email protected])"""
# Copyright (C) 2004-2010 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
__all__ = ['draw',
'draw_networkx',
'draw_networkx_nodes',
'draw_networkx_edges',
'draw_networkx_labels',
'draw_networkx_edge_labels',
'draw_circular',
'draw_random',
'draw_spectral',
'draw_spring',
'draw_shell',
'draw_graphviz']
import networkx as nx
from networkx.drawing.layout import shell_layout,\
circular_layout,spectral_layout,spring_layout,random_layout
def draw(G, pos=None, ax=None, hold=None, **kwds):
"""Draw the graph G with Matplotlib (pylab).
Draw the graph as a simple representation with no node
labels or edge labels and using the full Matplotlib figure area
and no axis labels by default. See draw_networkx() for more
full-featured drawing that allows title, axis labels etc.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
ax : Matplotlib Axes object, optional
Draw the graph in specified Matplotlib axes.
hold : bool, optional
Set the Matplotlib hold state. If True subsequent draw
commands will be added to the current axes.
**kwds : optional keywords
See networkx.draw_networkx() for a description of optional keywords.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G)
>>> nx.draw(G,pos=nx.spring_layout(G)) # use spring layout
See Also
--------
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
Notes
-----
This function has the same name as pylab.draw and pyplot.draw
so beware when using
>>> from networkx import *
since you might overwrite the pylab.draw function.
Good alternatives are:
With pylab:
>>> import pylab as P #
>>> import networkx as nx
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G) # networkx draw()
>>> P.draw() # pylab draw()
With pyplot
>>> import matplotlib.pyplot as plt
>>> import networkx as nx
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G) # networkx draw()
>>> plt.draw() # pyplot draw()
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
"""
try:
import matplotlib.pylab as pylab
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
cf=pylab.gcf()
cf.set_facecolor('w')
if ax is None:
if cf._axstack() is None:
ax=cf.add_axes((0,0,1,1))
else:
ax=cf.gca()
# allow callers to override the hold state by passing hold=True|False
b = pylab.ishold()
h = kwds.pop('hold', None)
if h is not None:
pylab.hold(h)
try:
draw_networkx(G,pos=pos,ax=ax,**kwds)
ax.set_axis_off()
pylab.draw_if_interactive()
except:
pylab.hold(b)
raise
pylab.hold(b)
return
def draw_networkx(G, pos=None, with_labels=True, **kwds):
"""Draw the graph G using Matplotlib.
Draw the graph with Matplotlib with options for node positions,
labeling, titles, and many other drawing features.
See draw() for simple drawing without labels or axes.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
with_labels : bool, optional (default=True)
Set to True to draw labels on the nodes.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
nodelist : list, optional (default G.nodes())
Draw only specified nodes
edgelist : list, optional (default=G.edges())
Draw only specified edges
node_size : scalar or array, optional (default=300)
Size of nodes. If an array is specified it must be the
same length as nodelist.
node_color : color string, or array of floats, (default='r')
Node color. Can be a single color format string,
or a sequence of colors with the same length as nodelist.
If numeric values are specified they will be mapped to
colors using the cmap and vmin,vmax parameters. See
matplotlib.scatter for more details.
node_shape : string, optional (default='o')
The shape of the node. Specification is as matplotlib.scatter
marker, one of 'so^>v<dph8'.
alpha : float, optional (default=1.0)
The node transparency
cmap : Matplotlib colormap, optional (default=None)
Colormap for mapping intensities of nodes
vmin,vmax : float, optional (default=None)
Minimum and maximum for node colormap scaling
linewidths : [None | scalar | sequence]
Line width of symbol border (default =1.0)
width : float, optional (default=1.0)
Line width of edges
edge_color : color string, or array of floats (default='r')
Edge color. Can be a single color format string,
or a sequence of colors with the same length as edgelist.
If numeric values are specified they will be mapped to
colors using the edge_cmap and edge_vmin,edge_vmax parameters.
edge_ cmap : Matplotlib colormap, optional (default=None)
Colormap for mapping intensities of edges
edge_vmin,edge_vmax : floats, optional (default=None)
Minimum and maximum for edge colormap scaling
style : string, optional (deafult='solid')
Edge line style (solid|dashed|dotted,dashdot)
labels : dictionary, optional (deafult=None)
Node labels in a dictionary keyed by node of text labels
font_size : int, optional (default=12)
Font size for text labels
font_color : string, optional (default='k' black)
Font color string
font_weight : string, optional (default='normal')
Font weight
font_family : string, optional (default='sans-serif')
Font family
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G)
>>> nx.draw(G,pos=nx.spring_layout(G)) # use spring layout
>>> import pylab
>>> limits=pylab.axis('off') # turn of axis
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pylab as pylab
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if pos is None:
pos=nx.drawing.spring_layout(G) # default to spring layout
node_collection=draw_networkx_nodes(G, pos, **kwds)
edge_collection=draw_networkx_edges(G, pos, **kwds)
if with_labels:
draw_networkx_labels(G, pos, **kwds)
pylab.draw_if_interactive()
def draw_networkx_nodes(G, pos,
nodelist=None,
node_size=300,
node_color='r',
node_shape='o',
alpha=1.0,
cmap=None,
vmin=None,
vmax=None,
ax=None,
linewidths=None,
**kwds):
"""Draw the nodes of the graph G.
This draws only the nodes of the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
nodelist : list, optional
Draw only specified nodes (default G.nodes())
node_size : scalar or array
Size of nodes (default=300). If an array is specified it must be the
same length as nodelist.
node_color : color string, or array of floats
Node color. Can be a single color format string (default='r'),
or a sequence of colors with the same length as nodelist.
If numeric values are specified they will be mapped to
colors using the cmap and vmin,vmax parameters. See
matplotlib.scatter for more details.
node_shape : string
The shape of the node. Specification is as matplotlib.scatter
marker, one of 'so^>v<dph8' (default='o').
alpha : float
The node transparency (default=1.0)
cmap : Matplotlib colormap
Colormap for mapping intensities of nodes (default=None)
vmin,vmax : floats
Minimum and maximum for node colormap scaling (default=None)
linewidths : [None | scalar | sequence]
Line width of symbol border (default =1.0)
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nodes=nx.draw_networkx_nodes(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pylab as pylab
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax=pylab.gca()
if nodelist is None:
nodelist=G.nodes()
if not nodelist or len(nodelist)==0: # empty nodelist, no drawing
return None
try:
xy=numpy.asarray([pos[v] for v in nodelist])
except KeyError as e:
raise nx.NetworkXError('Node %s has no position.'%e)
except ValueError:
raise nx.NetworkXError('Bad value in node positions.')
node_collection=ax.scatter(xy[:,0], xy[:,1],
s=node_size,
c=node_color,
marker=node_shape,
cmap=cmap,
vmin=vmin,
vmax=vmax,
alpha=alpha,
linewidths=linewidths)
# pylab.axes(ax)
pylab.sci(node_collection)
node_collection.set_zorder(2)
return node_collection
def draw_networkx_edges(G, pos,
edgelist=None,
width=1.0,
edge_color='k',
style='solid',
alpha=None,
edge_cmap=None,
edge_vmin=None,
edge_vmax=None,
ax=None,
arrows=True,
**kwds):
"""Draw the edges of the graph G.
This draws only the edges of the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
edgelist : collection of edge tuples
Draw only specified edges(default=G.edges())
width : float
Line width of edges (default =1.0)
edge_color : color string, or array of floats
Edge color. Can be a single color format string (default='r'),
or a sequence of colors with the same length as edgelist.
If numeric values are specified they will be mapped to
colors using the edge_cmap and edge_vmin,edge_vmax parameters.
style : string
Edge line style (default='solid') (solid|dashed|dotted,dashdot)
alpha : float
The edge transparency (default=1.0)
edge_ cmap : Matplotlib colormap
Colormap for mapping intensities of edges (default=None)
edge_vmin,edge_vmax : floats
Minimum and maximum for edge colormap scaling (default=None)
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
arrows : bool, optional (default=True)
For directed graphs, if True draw arrowheads.
Notes
-----
For directed graphs, "arrows" (actually just thicker stubs) are drawn
at the head end. Arrows can be turned off with keyword arrows=False.
Yes, it is ugly but drawing proper arrows with Matplotlib this
way is tricky.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> edges=nx.draw_networkx_edges(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib
import matplotlib.pylab as pylab
import matplotlib.cbook as cb
from matplotlib.colors import colorConverter,Colormap
from matplotlib.collections import LineCollection
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax=pylab.gca()
if edgelist is None:
edgelist=G.edges()
if not edgelist or len(edgelist)==0: # no edges!
return None
# set edge positions
edge_pos=numpy.asarray([(pos[e[0]],pos[e[1]]) for e in edgelist])
if not cb.iterable(width):
lw = (width,)
else:
lw = width
if not cb.is_string_like(edge_color) \
and cb.iterable(edge_color) \
and len(edge_color)==len(edge_pos):
if numpy.alltrue([cb.is_string_like(c)
for c in edge_color]):
# (should check ALL elements)
# list of color letters such as ['k','r','k',...]
edge_colors = tuple([colorConverter.to_rgba(c,alpha)
for c in edge_color])
elif numpy.alltrue([not cb.is_string_like(c)
for c in edge_color]):
# If color specs are given as (rgb) or (rgba) tuples, we're OK
if numpy.alltrue([cb.iterable(c) and len(c) in (3,4)
for c in edge_color]):
edge_colors = tuple(edge_color)
else:
# numbers (which are going to be mapped with a colormap)
edge_colors = None
else:
raise ValueError('edge_color must consist of either color names or numbers')
else:
if cb.is_string_like(edge_color) or len(edge_color)==1:
edge_colors = ( colorConverter.to_rgba(edge_color, alpha), )
else:
raise ValueError('edge_color must be a single color or list of exactly m colors where m is the number or edges')
edge_collection = LineCollection(edge_pos,
colors = edge_colors,
linewidths = lw,
antialiaseds = (1,),
linestyle = style,
transOffset = ax.transData,
)
edge_collection.set_zorder(1) # edges go behind nodes
ax.add_collection(edge_collection)
# Note: there was a bug in mpl regarding the handling of alpha values for
# each line in a LineCollection. It was fixed in matplotlib in r7184 and
# r7189 (June 6 2009). We should then not set the alpha value globally,
# since the user can instead provide per-edge alphas now. Only set it
# globally if provided as a scalar.
if cb.is_numlike(alpha):
edge_collection.set_alpha(alpha)
if edge_colors is None:
if edge_cmap is not None:
assert(isinstance(edge_cmap, Colormap))
edge_collection.set_array(numpy.asarray(edge_color))
edge_collection.set_cmap(edge_cmap)
if edge_vmin is not None or edge_vmax is not None:
edge_collection.set_clim(edge_vmin, edge_vmax)
else:
edge_collection.autoscale()
pylab.sci(edge_collection)
arrow_collection=None
if G.is_directed() and arrows:
# a directed graph hack
# draw thick line segments at head end of edge
# waiting for someone else to implement arrows that will work
arrow_colors = edge_colors
a_pos=[]
p=1.0-0.25 # make head segment 25 percent of edge length
for src,dst in edge_pos:
x1,y1=src
x2,y2=dst
dx=x2-x1 # x offset
dy=y2-y1 # y offset
d=numpy.sqrt(float(dx**2+dy**2)) # length of edge
if d==0: # source and target at same position
continue
if dx==0: # vertical edge
xa=x2
ya=dy*p+y1
if dy==0: # horizontal edge
ya=y2
xa=dx*p+x1
else:
theta=numpy.arctan2(dy,dx)
xa=p*d*numpy.cos(theta)+x1
ya=p*d*numpy.sin(theta)+y1
a_pos.append(((xa,ya),(x2,y2)))
arrow_collection = LineCollection(a_pos,
colors = arrow_colors,
linewidths = [4*ww for ww in lw],
antialiaseds = (1,),
transOffset = ax.transData,
)
arrow_collection.set_zorder(1) # edges go behind nodes
ax.add_collection(arrow_collection)
# update view
minx = numpy.amin(numpy.ravel(edge_pos[:,:,0]))
maxx = numpy.amax(numpy.ravel(edge_pos[:,:,0]))
miny = numpy.amin(numpy.ravel(edge_pos[:,:,1]))
maxy = numpy.amax(numpy.ravel(edge_pos[:,:,1]))
w = maxx-minx
h = maxy-miny
padx, pady = 0.05*w, 0.05*h
corners = (minx-padx, miny-pady), (maxx+padx, maxy+pady)
ax.update_datalim( corners)
ax.autoscale_view()
# if arrow_collection:
return edge_collection
def draw_networkx_labels(G, pos,
labels=None,
font_size=12,
font_color='k',
font_family='sans-serif',
font_weight='normal',
alpha=1.0,
ax=None,
**kwds):
"""Draw node labels on the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
font_size : int
Font size for text labels (default=12)
font_color : string
Font color string (default='k' black)
font_family : string
Font family (default='sans-serif')
font_weight : string
Font weight (default='normal')
alpha : float
The text transparency (default=1.0)
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> labels=nx.draw_networkx_labels(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pylab as pylab
import matplotlib.cbook as cb
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax=pylab.gca()
if labels is None:
labels=dict( (n,n) for n in G.nodes())
# set optional alignment
horizontalalignment=kwds.get('horizontalalignment','center')
verticalalignment=kwds.get('verticalalignment','center')
text_items={} # there is no text collection so we'll fake one
for n, label in labels.items():
(x,y)=pos[n]
if not cb.is_string_like(label):
label=str(label) # this will cause "1" and 1 to be labeled the same
t=ax.text(x, y,
label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
transform = ax.transData,
clip_on=True,
)
text_items[n]=t
return text_items
def draw_networkx_edge_labels(G, pos,
edge_labels=None,
label_pos=0.5,
font_size=10,
font_color='k',
font_family='sans-serif',
font_weight='normal',
alpha=1.0,
bbox=None,
ax=None,
rotate=True,
**kwds):
"""Draw edge labels.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
alpha : float
The text transparency (default=1.0)
edge_labels : dictionary
Edge labels in a dictionary keyed by edge two-tuple of text
labels (default=None). Only labels for the keys in the dictionary
are drawn.
label_pos : float
Position of edge label along edge (0=head, 0.5=center, 1=tail)
font_size : int
Font size for text labels (default=12)
font_color : string
Font color string (default='k' black)
font_weight : string
Font weight (default='normal')
font_family : string
Font family (default='sans-serif')
bbox : Matplotlib bbox
Specify text box shape and colors.
clip_on : bool
Turn on clipping at axis boundaries (default=True)
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> edge_labels=nx.draw_networkx_edge_labels(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
"""
try:
import matplotlib.pylab as pylab
import matplotlib.cbook as cb
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax=pylab.gca()
if edge_labels is None:
labels=dict( ((u,v), d) for u,v,d in G.edges(data=True) )
else:
labels = edge_labels
text_items={}
for (n1,n2), label in labels.items():
(x1,y1)=pos[n1]
(x2,y2)=pos[n2]
(x,y) = (x1 * label_pos + x2 * (1.0 - label_pos),
y1 * label_pos + y2 * (1.0 - label_pos))
if rotate:
angle=numpy.arctan2(y2-y1,x2-x1)/(2.0*numpy.pi)*360 # degrees
# make label orientation "right-side-up"
if angle > 90:
angle-=180
if angle < - 90:
angle+=180
# transform data coordinate angle to screen coordinate angle
xy=numpy.array((x,y))
trans_angle=ax.transData.transform_angles(numpy.array((angle,)),
xy.reshape((1,2)))[0]
else:
trans_angle=0.0
# use default box of white with white border
if bbox is None:
bbox = dict(boxstyle='round',
ec=(1.0, 1.0, 1.0),
fc=(1.0, 1.0, 1.0),
)
if not cb.is_string_like(label):
label=str(label) # this will cause "1" and 1 to be labeled the same
# set optional alignment
horizontalalignment=kwds.get('horizontalalignment','center')
verticalalignment=kwds.get('verticalalignment','center')
t=ax.text(x, y,
label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
rotation=trans_angle,
transform = ax.transData,
bbox = bbox,
zorder = 1,
clip_on=True,
)
text_items[(n1,n2)]=t
return text_items
def draw_circular(G, **kwargs):
"""Draw the graph G with a circular layout."""
draw(G,circular_layout(G),**kwargs)
def draw_random(G, **kwargs):
"""Draw the graph G with a random layout."""
draw(G,random_layout(G),**kwargs)
def draw_spectral(G, **kwargs):
"""Draw the graph G with a spectral layout."""
draw(G,spectral_layout(G),**kwargs)
def draw_spring(G, **kwargs):
"""Draw the graph G with a spring layout."""
draw(G,spring_layout(G),**kwargs)
def draw_shell(G, **kwargs):
"""Draw networkx graph with shell layout."""
nlist = kwargs.get('nlist', None)
if nlist != None:
del(kwargs['nlist'])
draw(G,shell_layout(G,nlist=nlist),**kwargs)
def draw_graphviz(G, prog="neato", **kwargs):
"""Draw networkx graph with graphviz layout."""
pos=nx.drawing.graphviz_layout(G,prog)
draw(G,pos,**kwargs)
def draw_nx(G,pos,**kwds):
"""For backward compatibility; use draw or draw_networkx."""
draw(G,pos,**kwds)
# fixture for nose tests
def setup_module(module):
#from nose import SkipTest
try:
import matplotlib as mpl
mpl.use('PS',warn=False)
import pylab
except:
raise nx.NetworkXError("matplotlib not available")
| [
"[email protected]"
] | |
4448d6c8c421b18d7c9450edff543c95675794b8 | 426e56d0d15dfb9609dc31e273baa2cc0d249fdd | /certificates/custom_components/mikrotik_router/switch.py | d535829ebfb2a705abcd945370be0380f137c179 | [] | no_license | hellad/hass-config | 24689dbf61446e2a9cf2d216c933242a4bdec7e8 | 3988d204908478996fffa433faffa9ea20f42562 | refs/heads/master | 2023-03-19T14:41:24.300034 | 2023-03-05T20:33:31 | 2023-03-05T20:33:31 | 234,546,187 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,639 | py | """Support for the Mikrotik Router switches."""
import logging
from typing import Any, Optional
from collections.abc import Mapping
from homeassistant.components.switch import SwitchEntity
from homeassistant.const import CONF_NAME, CONF_HOST, ATTR_ATTRIBUTION
from homeassistant.core import callback
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.restore_state import RestoreEntity
from .helper import format_attribute
from .const import DOMAIN, ATTRIBUTION
from .switch_types import (
MikrotikSwitchEntityDescription,
SWITCH_TYPES,
DEVICE_ATTRIBUTES_IFACE_ETHER,
DEVICE_ATTRIBUTES_IFACE_SFP,
)
_LOGGER = logging.getLogger(__name__)
# ---------------------------
# async_setup_entry
# ---------------------------
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up switches for Mikrotik Router component."""
inst = config_entry.data[CONF_NAME]
mikrotik_controller = hass.data[DOMAIN][config_entry.entry_id]
switches = {}
@callback
def update_controller():
"""Update the values of the controller."""
update_items(inst, mikrotik_controller, async_add_entities, switches)
mikrotik_controller.listeners.append(
async_dispatcher_connect(
hass, mikrotik_controller.signal_update, update_controller
)
)
update_controller()
# ---------------------------
# update_items
# ---------------------------
@callback
def update_items(inst, mikrotik_controller, async_add_entities, switches):
"""Update device switch state from the controller."""
new_switches = []
# Add switches
for switch, sid_func in zip(
# Switch type name
[
"interface",
"nat",
"mangle",
"filter",
"ppp_secret",
"queue",
"kidcontrol_enable",
"kidcontrol_pause",
],
# Entity function
[
MikrotikControllerPortSwitch,
MikrotikControllerNATSwitch,
MikrotikControllerMangleSwitch,
MikrotikControllerFilterSwitch,
MikrotikControllerSwitch,
MikrotikControllerQueueSwitch,
MikrotikControllerSwitch,
MikrotikControllerKidcontrolPauseSwitch,
],
):
uid_switch = SWITCH_TYPES[switch]
for uid in mikrotik_controller.data[SWITCH_TYPES[switch].data_path]:
uid_data = mikrotik_controller.data[SWITCH_TYPES[switch].data_path]
item_id = f"{inst}-{switch}-{uid_data[uid][uid_switch.data_reference]}"
_LOGGER.debug("Updating sensor %s", item_id)
if item_id in switches:
if switches[item_id].enabled:
switches[item_id].async_schedule_update_ha_state()
continue
switches[item_id] = sid_func(
inst=inst,
uid=uid,
mikrotik_controller=mikrotik_controller,
entity_description=uid_switch,
)
new_switches.append(switches[item_id])
if new_switches:
async_add_entities(new_switches)
# ---------------------------
# MikrotikControllerSwitch
# ---------------------------
class MikrotikControllerSwitch(SwitchEntity, RestoreEntity):
"""Representation of a switch."""
def __init__(
self,
inst,
uid,
mikrotik_controller,
entity_description: MikrotikSwitchEntityDescription,
):
self.entity_description = entity_description
self._inst = inst
self._ctrl = mikrotik_controller
self._attr_extra_state_attributes = {ATTR_ATTRIBUTION: ATTRIBUTION}
self._data = mikrotik_controller.data[self.entity_description.data_path][uid]
@property
def available(self) -> bool:
"""Return if controller is available."""
return self._ctrl.connected()
@property
def name(self) -> str:
"""Return the name."""
if self.entity_description.data_name_comment and self._data["comment"]:
return (
f"{self._inst} {self.entity_description.name} {self._data['comment']}"
)
return f"{self._inst} {self.entity_description.name} {self._data[self.entity_description.data_name]}"
@property
def unique_id(self) -> str:
"""Return a unique id for this entity."""
return f"{self._inst.lower()}-{self.entity_description.key}-{self._data[self.entity_description.data_reference].lower()}"
@property
def is_on(self) -> bool:
"""Return true if device is on."""
return self._data[self.entity_description.data_is_on]
@property
def icon(self) -> str:
"""Return the icon."""
if self._data[self.entity_description.data_is_on]:
return self.entity_description.icon_enabled
else:
return self.entity_description.icon_disabled
@property
def extra_state_attributes(self) -> Mapping[str, Any]:
"""Return the state attributes."""
attributes = super().extra_state_attributes
for variable in self.entity_description.data_attributes_list:
if variable in self._data:
attributes[format_attribute(variable)] = self._data[variable]
return attributes
def turn_on(self, **kwargs: Any) -> None:
"""Required abstract method."""
pass
def turn_off(self, **kwargs: Any) -> None:
"""Required abstract method."""
pass
async def async_turn_on(self) -> None:
"""Turn on the switch."""
path = self.entity_description.data_switch_path
param = self.entity_description.data_reference
value = self._data[self.entity_description.data_reference]
mod_param = self.entity_description.data_switch_parameter
self._ctrl.set_value(path, param, value, mod_param, False)
await self._ctrl.force_update()
async def async_turn_off(self) -> None:
"""Turn off the switch."""
path = self.entity_description.data_switch_path
param = self.entity_description.data_reference
value = self._data[self.entity_description.data_reference]
mod_param = self.entity_description.data_switch_parameter
self._ctrl.set_value(path, param, value, mod_param, True)
await self._ctrl.async_update()
@property
def device_info(self) -> DeviceInfo:
"""Return a description for device registry."""
dev_connection = DOMAIN
dev_connection_value = self.entity_description.data_reference
dev_group = self.entity_description.ha_group
if self.entity_description.ha_group.startswith("data__"):
dev_group = self.entity_description.ha_group[6:]
if dev_group in self._data:
dev_group = self._data[dev_group]
dev_connection_value = dev_group
if self.entity_description.ha_connection:
dev_connection = self.entity_description.ha_connection
if self.entity_description.ha_connection_value:
dev_connection_value = self.entity_description.ha_connection_value
if dev_connection_value.startswith("data__"):
dev_connection_value = dev_connection_value[6:]
dev_connection_value = self._data[dev_connection_value]
info = DeviceInfo(
connections={(dev_connection, f"{dev_connection_value}")},
identifiers={(dev_connection, f"{dev_connection_value}")},
default_name=f"{self._inst} {dev_group}",
model=f"{self._ctrl.data['resource']['board-name']}",
manufacturer=f"{self._ctrl.data['resource']['platform']}",
sw_version=f"{self._ctrl.data['resource']['version']}",
configuration_url=f"http://{self._ctrl.config_entry.data[CONF_HOST]}",
via_device=(DOMAIN, f"{self._ctrl.data['routerboard']['serial-number']}"),
)
return info
async def async_added_to_hass(self):
"""Run when entity about to be added to hass."""
_LOGGER.debug("New switch %s (%s)", self._inst, self.unique_id)
# ---------------------------
# MikrotikControllerPortSwitch
# ---------------------------
class MikrotikControllerPortSwitch(MikrotikControllerSwitch):
"""Representation of a network port switch."""
@property
def extra_state_attributes(self) -> Mapping[str, Any]:
"""Return the state attributes."""
attributes = super().extra_state_attributes
if self._data["type"] == "ether":
for variable in DEVICE_ATTRIBUTES_IFACE_ETHER:
if variable in self._data:
attributes[format_attribute(variable)] = self._data[variable]
if "sfp-shutdown-temperature" in self._data:
for variable in DEVICE_ATTRIBUTES_IFACE_SFP:
if variable in self._data:
attributes[format_attribute(variable)] = self._data[variable]
return attributes
@property
def icon(self) -> str:
"""Return the icon."""
if self._data["running"]:
icon = self.entity_description.icon_enabled
else:
icon = self.entity_description.icon_disabled
if not self._data["enabled"]:
icon = "mdi:lan-disconnect"
return icon
async def async_turn_on(self) -> Optional[str]:
"""Turn on the switch."""
path = self.entity_description.data_switch_path
param = self.entity_description.data_reference
if self._data["about"] == "managed by CAPsMAN":
_LOGGER.error("Unable to enable %s, managed by CAPsMAN", self._data[param])
return "managed by CAPsMAN"
if "-" in self._data["port-mac-address"]:
param = "name"
value = self._data[self.entity_description.data_reference]
mod_param = self.entity_description.data_switch_parameter
self._ctrl.set_value(path, param, value, mod_param, False)
if "poe-out" in self._data and self._data["poe-out"] == "off":
path = "/interface/ethernet"
self._ctrl.set_value(path, param, value, "poe-out", "auto-on")
await self._ctrl.force_update()
async def async_turn_off(self) -> Optional[str]:
"""Turn off the switch."""
path = self.entity_description.data_switch_path
param = self.entity_description.data_reference
if self._data["about"] == "managed by CAPsMAN":
_LOGGER.error("Unable to disable %s, managed by CAPsMAN", self._data[param])
return "managed by CAPsMAN"
if "-" in self._data["port-mac-address"]:
param = "name"
value = self._data[self.entity_description.data_reference]
mod_param = self.entity_description.data_switch_parameter
self._ctrl.set_value(path, param, value, mod_param, True)
if "poe-out" in self._data and self._data["poe-out"] == "auto-on":
path = "/interface/ethernet"
self._ctrl.set_value(path, param, value, "poe-out", "off")
await self._ctrl.async_update()
# ---------------------------
# MikrotikControllerNATSwitch
# ---------------------------
class MikrotikControllerNATSwitch(MikrotikControllerSwitch):
"""Representation of a NAT switch."""
@property
def name(self) -> str:
"""Return the name."""
if self._data["comment"]:
return f"{self._inst} NAT {self._data['comment']}"
return f"{self._inst} NAT {self._data['name']}"
async def async_turn_on(self) -> None:
"""Turn on the switch."""
path = self.entity_description.data_switch_path
param = ".id"
value = None
for uid in self._ctrl.data["nat"]:
if self._ctrl.data["nat"][uid]["uniq-id"] == (
f"{self._data['chain']},{self._data['action']},{self._data['protocol']},"
f"{self._data['in-interface']}:{self._data['dst-port']}-"
f"{self._data['out-interface']}:{self._data['to-addresses']}:{self._data['to-ports']}"
):
value = self._ctrl.data["nat"][uid][".id"]
mod_param = self.entity_description.data_switch_parameter
self._ctrl.set_value(path, param, value, mod_param, False)
await self._ctrl.force_update()
async def async_turn_off(self) -> None:
"""Turn off the switch."""
path = self.entity_description.data_switch_path
param = ".id"
value = None
for uid in self._ctrl.data["nat"]:
if self._ctrl.data["nat"][uid]["uniq-id"] == (
f"{self._data['chain']},{self._data['action']},{self._data['protocol']},"
f"{self._data['in-interface']}:{self._data['dst-port']}-"
f"{self._data['out-interface']}:{self._data['to-addresses']}:{self._data['to-ports']}"
):
value = self._ctrl.data["nat"][uid][".id"]
mod_param = self.entity_description.data_switch_parameter
self._ctrl.set_value(path, param, value, mod_param, True)
await self._ctrl.async_update()
# ---------------------------
# MikrotikControllerMangleSwitch
# ---------------------------
class MikrotikControllerMangleSwitch(MikrotikControllerSwitch):
"""Representation of a Mangle switch."""
async def async_turn_on(self) -> None:
"""Turn on the switch."""
path = self.entity_description.data_switch_path
param = ".id"
value = None
for uid in self._ctrl.data["mangle"]:
if self._ctrl.data["mangle"][uid]["uniq-id"] == (
f"{self._data['chain']},{self._data['action']},{self._data['protocol']},"
f"{self._data['src-address']}:{self._data['src-port']}-"
f"{self._data['dst-address']}:{self._data['dst-port']},"
f"{self._data['src-address-list']}-{self._data['dst-address-list']}"
):
value = self._ctrl.data["mangle"][uid][".id"]
mod_param = self.entity_description.data_switch_parameter
self._ctrl.set_value(path, param, value, mod_param, False)
await self._ctrl.force_update()
async def async_turn_off(self) -> None:
"""Turn off the switch."""
path = self.entity_description.data_switch_path
param = ".id"
value = None
for uid in self._ctrl.data["mangle"]:
if self._ctrl.data["mangle"][uid]["uniq-id"] == (
f"{self._data['chain']},{self._data['action']},{self._data['protocol']},"
f"{self._data['src-address']}:{self._data['src-port']}-"
f"{self._data['dst-address']}:{self._data['dst-port']},"
f"{self._data['src-address-list']}-{self._data['dst-address-list']}"
):
value = self._ctrl.data["mangle"][uid][".id"]
mod_param = self.entity_description.data_switch_parameter
self._ctrl.set_value(path, param, value, mod_param, True)
await self._ctrl.async_update()
# ---------------------------
# MikrotikControllerFilterSwitch
# ---------------------------
class MikrotikControllerFilterSwitch(MikrotikControllerSwitch):
"""Representation of a Filter switch."""
async def async_turn_on(self) -> None:
"""Turn on the switch."""
path = self.entity_description.data_switch_path
param = ".id"
value = None
for uid in self._ctrl.data["filter"]:
if self._ctrl.data["filter"][uid]["uniq-id"] == (
f"{self._data['chain']},{self._data['action']},{self._data['protocol']},{self._data['layer7-protocol']},"
f"{self._data['in-interface']},{self._data['in-interface-list']}:{self._data['src-address']},{self._data['src-address-list']}:{self._data['src-port']}-"
f"{self._data['out-interface']},{self._data['out-interface-list']}:{self._data['dst-address']},{self._data['dst-address-list']}:{self._data['dst-port']}"
):
value = self._ctrl.data["filter"][uid][".id"]
mod_param = self.entity_description.data_switch_parameter
self._ctrl.set_value(path, param, value, mod_param, False)
await self._ctrl.force_update()
async def async_turn_off(self) -> None:
"""Turn off the switch."""
path = self.entity_description.data_switch_path
param = ".id"
value = None
for uid in self._ctrl.data["filter"]:
if self._ctrl.data["filter"][uid]["uniq-id"] == (
f"{self._data['chain']},{self._data['action']},{self._data['protocol']},{self._data['layer7-protocol']},"
f"{self._data['in-interface']},{self._data['in-interface-list']}:{self._data['src-address']},{self._data['src-address-list']}:{self._data['src-port']}-"
f"{self._data['out-interface']},{self._data['out-interface-list']}:{self._data['dst-address']},{self._data['dst-address-list']}:{self._data['dst-port']}"
):
value = self._ctrl.data["filter"][uid][".id"]
mod_param = self.entity_description.data_switch_parameter
self._ctrl.set_value(path, param, value, mod_param, True)
await self._ctrl.async_update()
# ---------------------------
# MikrotikControllerQueueSwitch
# ---------------------------
class MikrotikControllerQueueSwitch(MikrotikControllerSwitch):
"""Representation of a queue switch."""
async def async_turn_on(self) -> None:
"""Turn on the switch."""
path = self.entity_description.data_switch_path
param = ".id"
value = None
for uid in self._ctrl.data["queue"]:
if self._ctrl.data["queue"][uid]["name"] == f"{self._data['name']}":
value = self._ctrl.data["queue"][uid][".id"]
mod_param = self.entity_description.data_switch_parameter
self._ctrl.set_value(path, param, value, mod_param, False)
await self._ctrl.force_update()
async def async_turn_off(self) -> None:
"""Turn off the switch."""
path = self.entity_description.data_switch_path
param = ".id"
value = None
for uid in self._ctrl.data["queue"]:
if self._ctrl.data["queue"][uid]["name"] == f"{self._data['name']}":
value = self._ctrl.data["queue"][uid][".id"]
mod_param = self.entity_description.data_switch_parameter
self._ctrl.set_value(path, param, value, mod_param, True)
await self._ctrl.async_update()
# ---------------------------
# MikrotikControllerKidcontrolPauseSwitch
# ---------------------------
class MikrotikControllerKidcontrolPauseSwitch(MikrotikControllerSwitch):
"""Representation of a queue switch."""
async def async_turn_on(self) -> None:
"""Turn on the switch."""
path = self.entity_description.data_switch_path
param = self.entity_description.data_reference
value = self._data[self.entity_description.data_reference]
command = "resume"
self._ctrl.execute(path, command, param, value)
await self._ctrl.force_update()
async def async_turn_off(self) -> None:
"""Turn off the switch."""
path = self.entity_description.data_switch_path
param = self.entity_description.data_reference
value = self._data[self.entity_description.data_reference]
command = "pause"
self._ctrl.execute(path, command, param, value)
await self._ctrl.async_update()
| [
"[email protected]"
] | |
504bb84fc68bf1dfd94876a59dc581ff3a921147 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2846/60586/295434.py | e2875119305df6adbc78001b5fc61b6eda843866 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | def test12():
n=int(input())
s=input()
x=s.split(" ")
arr=[]
for i in x:
arr.append(int(i))
zero=arr.count(0)
if s=="1 1 1 1 1":
return 1
if s=="0 0 0 0 0 0 0":
return 0
if zero==len(set(arr)):
return 0
if(len(set(arr))==22):
return(21)
if len(set(arr))==3:
return 2
return len(set(arr))
print(test12()) | [
"[email protected]"
] | |
a4b1c54b4bb3f7c5e696da947123729e9367eee1 | 29c3595a4e1f8de9382650610aee5a13e2a135f6 | /venv/Lib/site-packages/django/views/decorators/cache.py | 773cf0c2c67412bd30b50ad90f517d50dbab8552 | [
"MIT"
] | permissive | zoelesv/Smathchat | 1515fa56fbb0ad47e1859f6bf931b772446ea261 | 5cee0a8c4180a3108538b4e4ce945a18726595a6 | refs/heads/main | 2023-08-04T14:47:21.185149 | 2023-08-02T15:53:20 | 2023-08-02T15:53:20 | 364,627,392 | 9 | 1 | MIT | 2023-08-02T15:53:21 | 2021-05-05T15:42:47 | Python | UTF-8 | Python | false | false | 1,705 | py | from functools import wraps
from django.middleware.cache import CacheMiddleware
from django.utils.cache import add_never_cache_headers, patch_cache_control
from django.utils.decorators import decorator_from_middleware_with_args
def cache_page(timeout, *, cache=None, key_prefix=None):
"""
Decorator for views that tries getting the page from the cache and
populates the cache if the page isn't in the cache yet.
The cache is keyed by the URL and some data from the headers.
Additionally there is the key prefix that is used to distinguish different
cache areas in a multi-site setup. You could use the
get_current_site().domain, for example, as that is unique across a Django
project.
Additionally, all headers from the response's Vary header will be taken
into account on caching -- just like the middleware does.
"""
return decorator_from_middleware_with_args(CacheMiddleware)(
page_timeout=timeout, cache_alias=cache, key_prefix=key_prefix,
)
def cache_control(**kwargs):
def _cache_controller(viewfunc):
@wraps(viewfunc)
def _cache_controlled(request, *args, **kw):
response = viewfunc(request, *args, **kw)
patch_cache_control(response, **kwargs)
return response
return _cache_controlled
return _cache_controller
def never_cache(view_func):
"""
Decorator that adds headers to a response so that it will never be cached.
"""
@wraps(view_func)
def _wrapped_view_func(request, *args, **kwargs):
response = view_func(request, *args, **kwargs)
add_never_cache_headers(response)
return response
return _wrapped_view_func
| [
"[email protected]"
] | |
28ec052e9c58a50f9db14275c3fe505405877f48 | dd098f8a93f787e38676283679bb39a290ba28b4 | /samples/openapi3/client/3_0_3_unit_test/python-experimental/test/test_models/test_anyof.py | 197c3449a9e49196e0d0dc3b0844ab50910bddba | [
"Apache-2.0"
] | permissive | InfoSec812/openapi-generator | 727c0235d3bad9b85ac12068808f844287af6003 | e0c72702c3d5dae2a627a2926f0cddeedca61e32 | refs/heads/master | 2022-10-22T00:31:33.318867 | 2022-08-20T14:10:31 | 2022-08-20T14:10:31 | 152,479,633 | 1 | 0 | Apache-2.0 | 2023-09-04T23:34:09 | 2018-10-10T19:38:43 | Java | UTF-8 | Python | false | false | 1,385 | py | # coding: utf-8
"""
openapi 3.0.3 sample spec
sample spec for testing openapi functionality, built from json schema tests for draft6 # noqa: E501
The version of the OpenAPI document: 0.0.1
Generated by: https://openapi-generator.tech
"""
import unittest
import unit_test_api
from unit_test_api.model.anyof import Anyof
from unit_test_api import configuration
class TestAnyof(unittest.TestCase):
"""Anyof unit test stubs"""
_configuration = configuration.Configuration()
def test_second_anyof_valid_passes(self):
# second anyOf valid
Anyof._from_openapi_data(
2.5,
_configuration=self._configuration
)
def test_neither_anyof_valid_fails(self):
# neither anyOf valid
with self.assertRaises((unit_test_api.ApiValueError, unit_test_api.ApiTypeError)):
Anyof._from_openapi_data(
1.5,
_configuration=self._configuration
)
def test_both_anyof_valid_passes(self):
# both anyOf valid
Anyof._from_openapi_data(
3,
_configuration=self._configuration
)
def test_first_anyof_valid_passes(self):
# first anyOf valid
Anyof._from_openapi_data(
1,
_configuration=self._configuration
)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
a904290ec8ed97238dff5bff3c599df824611c11 | 02e23da0431623db86c8138bda350a1d526d4185 | /Archivos Python Documentos/Graficas/.history/ejecutable_20200216215432.py | dfa14620e68d6ef4a0639d6914525ed6612644cf | [] | no_license | Jaamunozr/Archivos-python | d9996d3d10ff8429cd1b4c2b396016a3a5482889 | 1f0af9ba08f12ac27e111fcceed49bbcf3b39657 | refs/heads/master | 2022-08-05T14:49:45.178561 | 2022-07-13T13:44:39 | 2022-07-13T13:44:39 | 244,073,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,272 | py | import pylab as pl
import numpy as np
# Crear una figura de 8x6 puntos de tamaño, 80 puntos por pulgada (Se modifica a 16x8)
pl.figure(figsize=(16, 8), dpi=100)
# Crear una nueva subgráfica en una rejilla de 1x1 (se podrian crean una de dos graficas en una reijlla)
pl.subplot(1, 1, 1)
# Obtencion de datos para seno y coseno (Desde -2pi hasta 2pi)
X = np.linspace(-2.1*np.pi, 2.1*np.pi, 256, endpoint=True) #el numero 256 es la cantidad de datos en ese intervalo
C, S = np.cos(X), np.sin(X)
# Graficar la función coseno con una línea continua azul de 1 pixel de grosor
pl.plot(X, C, color="blue", linewidth=1.0, linestyle="-")
# Graficar la función seno con una línea continua verde de 1 pixel de grosor
pl.plot(X, S, color="green", linewidth=1.0, linestyle="-")
# Establecer límites del eje x (Divisiones en X)
pl.xlim(-8.0, 8.0)
# Ticks en x(Impresión de intervalos, cantidad de datos mostrados en el eje)
pl.xticks(np.linspace(-8, 8, 17, endpoint=True))
# Establecer límites del eje y (Divisiones en Y)
pl.ylim(-1.0, 1.0)
# Ticks en y (Impresión de intervalos, cantidad de datos mostrados en el eje)
pl.yticks(np.linspace(-1, 1, 5, endpoint=True))
'''Otra opcion de determinar los limites a imprimir
pl.xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi])
pl.yticks([-1, 0, +1]) '''
#AGREGAR LINEAS DE EJES Y QUITAR EL RECUADRO:
'''
ax = pl.gca()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data',0))
ax.yaxis.set_ticks_position('left')
ax.spines['left'].set_position(('data',0))'''
# Guardar la figura usando 72 puntos por pulgada
# savefig("exercice_2.png", dpi=72)
#Indicamos los espacios entre los bordes de grafica y las graficas
#pl.xlim(X.min() * 1.1, X.max() * 1.1)
pl.ylim(C.min() * 1.1, C.max() * 1.1)
#AGREGRA UNA LEYENDA
pl.plot(X, C, color="blue", linewidth=2.5, linestyle="-", label="Coseno")
pl.plot(X, S, color="red", linewidth=2.5, linestyle="-", label="Seno")
pl.legend(loc='upper left')
#AGREGRA UNA ANOTACION EN UN PUNTO CONOCIDO
t = 2 * np.pi / 3
#Para coseno------------------------------------------------------------------------------------
pl.plot([t, t], [0, np.cos(t)], color='blue', linewidth=2.5, linestyle="--")
pl.scatter([t, ], [np.cos(t), ], 350, color='blue')
pl.annotate(r'$sin(\frac{2\pi}{3})=\frac{\sqrt{3}}{2}$',#DATOS A IMPRIMIR DEL TEXTO
xy=(t, np.sin(t)), xycoords='data', #COORDENADAS DE REFERENCIA PARA LA FLECHA Y EL TEXTO
xytext=(+10, +30), textcoords='offset points', fontsize=16,#INDICAN POSICION DEL TEXTO
arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=.2")) #DIRECCION DE LA FLECHA
#Para seno--------------------------------------------------------------------------------------
pl.plot([t, t],[0, np.sin(t)], color='red', linewidth=2.5, linestyle="--")
pl.scatter([t, ],[np.sin(t), ], 50, color='red')
pl.annotate(r'$cos(\frac{2\pi}{3})=-\frac{1}{2}$',
xy=(t, np.cos(t)), xycoords='data',
xytext=(-90, -50), textcoords='offset points', fontsize=16,
arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=.2"))#DIRECCION DE LA FLECHA
# Mostrar resultado en pantalla (Con 2 segundos de muestreo)
pl.pause(10)
| [
"[email protected]"
] | |
08cb23a06a7856db4ecb22d88ec90a611deba95b | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /man/case/life/big_government.py | 7fba7f61642853f57bfca0dad6bb4279f36648e4 | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py |
#! /usr/bin/env python
def fact_or_eye(str_arg):
ask_new_woman(str_arg)
print('world_or_last_life')
def ask_new_woman(str_arg):
print(str_arg)
if __name__ == '__main__':
fact_or_eye('long_child_or_few_place')
| [
"[email protected]"
] | |
b0d36ff01b81621a8a30b4260aee51ff0b7fd312 | 0ac1df08e2cb2a089e912e3237209e0fb683504a | /edgy/workflow/transition.py | d44537e832efd938ea119ed0b1b40d23812ce52a | [] | no_license | python-edgy/workflow | ee8654b5cd3931f26dc6c4c519bc865cba1864ca | b27edaa7a80bf7cd40d5a26df114058f3795dacd | refs/heads/master | 2020-12-11T20:24:36.461621 | 2016-07-22T09:26:47 | 2016-07-22T09:26:47 | 51,644,998 | 1 | 0 | null | 2016-08-17T14:18:02 | 2016-02-13T12:28:06 | Python | UTF-8 | Python | false | false | 3,284 | py | # -*- coding: utf-8 -*-
"""
The smallest atom of ``edgy.workflow`` is a ``Transition``, which basically is a regular python
callable with additional metadata to make the system aware of when it can be applied.
"""
from edgy.workflow.constants import WILDCARD
from edgy.workflow.utils import issequence
class Transition(object):
"""
Defines when and how to go from one state to another, eventually applying a user-defined
side-effect while being applied.
Example::
>>> t = Transition(name='sleep', source='awake', target='asleep')
>>> class Person(object):
... state = 'awake'
>>> me = Person()
>>> t(me)
>>> me.state
'asleep'
This class can also be used as a decorator::
>>> @Transition(source='asleep', target='awake')
>>> def wakeup(self, subject):
... print('HEY!')
>>> wakeup(me)
>>> me.state
'awake'
A special wildcard source can make transitions work from any state. Just specify "*" as a
transition source and you'll be able to transition from any state.
"""
# Tracks each time a Transition instance is created. Used to retain order.
creation_counter = 0
# Transition handler. If absent, the transition is considered as "partial", and should be called with a handler
# callable to be complete.
handler = None
def __init__(self, handler=None, name=None, source=None, target=None):
self.source = tuple(source if issequence(source) else (source,))
self.target = target
self._name = name
# Increase the creation counter, and save our local copy.
self.creation_counter = Transition.creation_counter
Transition.creation_counter += 1
if handler:
self.handler = handler or self.handler
def __call__(self, *args, **kwargs):
if self.handler:
return self.__call_complete(*args, **kwargs)
return self.__call_partial(*args, **kwargs)
def __call_partial(self, handler):
self.handler = handler
return self
def __call_complete(self, subject, *args, **kwargs):
if not WILDCARD in self.source and not subject.state in self.source:
raise RuntimeError(
'This transition cannot be executed on a subject in "{}" state, authorized source '
'states are {}.'.format(subject.state,
', '.join(['"{}"'.format(state) for state in self.source]))
)
try:
retval = self.handler(self, subject, *args, **kwargs)
subject.state = self.target
except Exception as e:
raise
return retval
@property
def __name__(self):
if self._name:
return self._name
if self.handler:
return self.handler.__name__
return 'partial'
# Alias that can be used in django templates, for example.
name = __name__
def __repr__(self):
return '<{}.{} object "{}" ({} to {}) at {}>'.format(
type(self).__module__,
type(self).__name__,
self.__name__,
'/'.join(self.source),
self.target,
hex(id(self)),
)
| [
"[email protected]"
] | |
78ab8469e9d3cb049c2360ccbb087a9236a83ec7 | a1a3fc3511d3e2e29909411163bafd8932f87426 | /tests/extension/dataflow_/regionadd_filter_enable/dataflow_regionadd_filter_enable.py | bef3f6dbb5ee6c07fe7b89b1e8dc44cb193f5f69 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | yongfu-li/veriloggen | 25316c6f1a024669e7cb87f3491a1d3592356ea9 | a6230da3350c6e4bb54e10a46ac855c24c27f17f | refs/heads/master | 2021-01-23T11:50:43.050607 | 2017-09-04T08:30:06 | 2017-09-04T08:30:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,378 | py | from __future__ import absolute_import
from __future__ import print_function
import sys
import os
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))))
from veriloggen import *
import veriloggen.dataflow as dataflow
def mkMain():
# input variiable
x = dataflow.Variable('xdata', valid='xvalid', ready='xready')
reset = dataflow.Variable(
'resetdata', valid='resetvalid', ready='resetready', width=1)
enable = dataflow.Variable(
'enabledata', valid='enablevalid', ready='enableready', width=1)
# dataflow definition
z, v = dataflow.RegionAdd(
x * x, 4, initval=0, enable=enable, reset=reset, filter=True)
# set output attribute
z.output('zdata', valid='zvalid', ready='zready')
v.output('vdata', valid='vvalid', ready='vready')
df = dataflow.Dataflow(z, v)
m = df.to_module('main')
# df.draw_graph()
return m
def mkTest(numports=8):
m = Module('test')
# target instance
main = mkMain()
params = m.copy_params(main)
ports = m.copy_sim_ports(main)
clk = ports['CLK']
rst = ports['RST']
xdata = ports['xdata']
xvalid = ports['xvalid']
xready = ports['xready']
resetdata = ports['resetdata']
resetvalid = ports['resetvalid']
resetready = ports['resetready']
enabledata = ports['enabledata']
enablevalid = ports['enablevalid']
enableready = ports['enableready']
zdata = ports['zdata']
zvalid = ports['zvalid']
zready = ports['zready']
vdata = ports['vdata']
vvalid = ports['vvalid']
vready = ports['vready']
uut = m.Instance(main, 'uut',
params=m.connect_params(main),
ports=m.connect_ports(main))
reset_done = m.Reg('reset_done', initval=0)
reset_stmt = []
reset_stmt.append(reset_done(0))
reset_stmt.append(xdata(0))
reset_stmt.append(xvalid(0))
reset_stmt.append(enabledata(0))
reset_stmt.append(enablevalid(0))
reset_stmt.append(resetdata(0))
reset_stmt.append(resetvalid(0))
reset_stmt.append(zready(0))
simulation.setup_waveform(m, uut)
simulation.setup_clock(m, clk, hperiod=5)
init = simulation.setup_reset(m, rst, reset_stmt, period=100)
nclk = simulation.next_clock
init.add(
Delay(1000),
reset_done(1),
nclk(clk),
Delay(10000),
Systask('finish'),
)
def send(name, data, valid, ready, step=1, waitnum=10, send_size=20):
fsm = FSM(m, name + 'fsm', clk, rst)
count = m.TmpReg(32, initval=0)
fsm.add(valid(0))
fsm.goto_next(cond=reset_done)
for _ in range(waitnum):
fsm.goto_next()
fsm.add(valid(1))
fsm.goto_next()
fsm.add(data(data + step), cond=ready)
fsm.add(count.inc(), cond=ready)
fsm.add(valid(0), cond=AndList(count == 5, ready))
fsm.goto_next(cond=AndList(count == 5, ready))
for _ in range(waitnum):
fsm.goto_next()
fsm.add(valid(1))
fsm.add(data(data + step), cond=ready)
fsm.add(count.inc(), cond=ready)
fsm.add(valid(0), cond=AndList(count == send_size, ready))
fsm.goto_next(cond=AndList(count == send_size, ready))
fsm.make_always()
def receive(name, data, valid, ready, waitnum=10):
fsm = FSM(m, name + 'fsm', clk, rst)
fsm.add(ready(0))
fsm.goto_next(cond=reset_done)
fsm.goto_next()
yinit = fsm.current
fsm.add(ready(1), cond=valid)
fsm.goto_next(cond=valid)
for i in range(waitnum):
fsm.add(ready(0))
fsm.goto_next()
fsm.goto(yinit)
fsm.make_always()
send('x', xdata, xvalid, xready, waitnum=10, send_size=100)
receive('z', zdata, Ands(zvalid, vvalid), zready, waitnum=5)
receive('v', vdata, Ands(zvalid, vvalid), vready, waitnum=5)
# enable port
enable_fsm = FSM(m, 'enable', clk, rst)
enable_count = m.Reg('enable_count', 32, initval=0)
enable_fsm.goto_next(cond=reset_done)
enable_fsm_init = enable_fsm.current
enable_fsm.add(enablevalid(1)) # always High
enable_fsm.add(enable_count.inc(), cond=AndList(enablevalid, enableready))
enable_fsm.add(enabledata(1), cond=AndList(
enablevalid, enableready, enable_count == 2))
enable_fsm.goto_next(cond=AndList(
enablevalid, enableready, enable_count == 2))
enable_fsm.add(enabledata(0), cond=AndList(enablevalid, enableready))
enable_fsm.add(enable_count(0))
enable_fsm.goto(enable_fsm_init, cond=AndList(enablevalid, enableready))
enable_fsm.make_always()
# reset port
reset_fsm = FSM(m, 'reset', clk, rst)
reset_count = m.Reg('reset_count', 32, initval=0)
reset_fsm.goto_next(cond=reset_done)
reset_fsm_init = reset_fsm.current
reset_fsm.add(resetvalid(1)) # always High
reset_fsm.add(reset_count.inc(), cond=AndList(resetvalid, resetready))
#reset_fsm.add( resetdata(1), cond=AndList(resetvalid, resetready, reset_count==2) )
reset_fsm.add(resetdata(0), cond=AndList(
resetvalid, resetready, reset_count == 2))
reset_fsm.goto_next(cond=AndList(resetvalid, resetready, reset_count == 2))
reset_fsm.add(resetdata(0), cond=AndList(resetvalid, resetready))
reset_fsm.add(reset_count(0))
reset_fsm.goto(reset_fsm_init, cond=AndList(resetvalid, resetready))
reset_fsm.make_always()
m.Always(Posedge(clk))(
If(reset_done)(
If(AndList(xvalid, xready))(
Systask('display', 'xdata=%d', xdata)
),
If(AndList(zvalid, zready))(
Systask('display', 'zdata=%d', zdata)
),
If(AndList(vvalid, vready))(
Systask('display', 'vdata=%d', vdata)
)
)
)
return m
if __name__ == '__main__':
test = mkTest()
verilog = test.to_verilog('tmp.v')
print(verilog)
# run simulator (Icarus Verilog)
sim = simulation.Simulator(test)
rslt = sim.run() # display=False
#rslt = sim.run(display=True)
print(rslt)
# launch waveform viewer (GTKwave)
# sim.view_waveform() # background=False
# sim.view_waveform(background=True)
| [
"[email protected]"
] | |
d236ab80a1798bb92f400c21f53470b7b4d79c24 | fdffd3f8ad31ffd917b1df4199ff5d88df80b420 | /Chapter_08/matplotlib_learning.py | f2b0806f79708322e36af13779f60ecd5eb0b416 | [] | no_license | LelandYan/data_analysis | 83c0cefa1b0783a8d3d13050092b2ab085cd859e | 9482c4667ecac189545f40b9f5bad3c495d48068 | refs/heads/master | 2020-04-17T04:25:47.975087 | 2019-02-12T07:40:37 | 2019-02-12T07:40:37 | 166,229,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,529 | py | import matplotlib.pyplot as plt
from numpy.random import randn
import numpy as np
# fig = plt.figure()
# ax1 = fig.add_subplot(2, 2, 1)
# ax2 = fig.add_subplot(2, 2, 2)
# ax3 = fig.add_subplot(2, 2, 3)
# plt.plot(randn(50).cumsum(), 'k--')
# _ = ax1.hist(randn(100), bins=20, color='k', alpha=0.3)
# ax2.scatter(np.arange(30), np.arange(30) + 3 * randn(30))
# 调整subplot周围的间距
# fig, axes = plt.subplots(2, 2, sharex=True, sharey=True)
# for i in range(2):
# for j in range(2):
# axes[i, j].hist(randn(500), bins=50, color='k', alpha=0.5)
# plt.subplots_adjust(wspace=0, hspace=0)
# plt.plot(randn(30).cumsum(), 'ko--')
# plt.plot(randn(30).cumsum(), color='k', linestyle='dashed', marker='o')
# data = randn(30).cumsum()
# # plt.plot(data, 'k--', label='Default')
# plt.plot(data, 'k-', drawstyle='steps-post', label='steps-post')
# plt.legend(loc='best')
# plt.xlim()
# plt.savefig('figpath.svg')
# plt.show()
# from io import BytesIO
# buffer = BytesIO()
# plt.savefig(buffer)
# plot_data = buffer.getvalue()
# ax = fig.add_subplot(1, 1, 1)
# ax.plot(randn(1000).cumsum())
# plt.show()
# from datetime import datetime
# import pandas as pd
#
# fig = plt.figure()
# ax = fig.add_subplot(1, 1, 1)
#
# data = pd.read_csv('spx.csv', index_col=0, parse_dates=True)
# spx = data['SPX']
# spx.plot(ax=ax, style='k--',alpha=0.3)
# crisis_data = [
# (datetime(2007, 10, 11), 'Peak of bull market'),
# (datetime(2008, 3, 12), 'Bear Stearns Fails'),
# (datetime(2008, 9, 15), 'Lehman Bankruptcy')
# ]
# for date, label in crisis_data:
# ax.annotate(label, xy=(date, spx.asof(date) + 50),
# xytext=(date, spx.asof(date) + 200),
# arrowprops=dict(facecolor='black'),
# horizontalalignment='left', verticalalignment='top')
# ax.set_xlim(['1/1/2007', '1/1/2011'])
# ax.set_ylim([600, 1800])
# ax.set_title("Important dates in 2008-2009 financial crisis")
# plt.show()
# ax.savefig('figpath.svg')
# matplotlib配置
# plt.rc('figure', figsize=(10, 10))
from pandas import DataFrame, Series
# pandas中的绘图函数
# 线型图
# s = Series(np.random.randn(10).cumsum(), index=np.arange(0, 100, 10))
# s.plot()
# plt.show()
# df = DataFrame(np.random.randn(10, 4).cumsum(0), columns=['A', 'B', 'C', 'D'], index=np.arange(0, 100, 10))
# df.plot()
# plt.show()
# 柱状图 kind='bar/barh' Serise和DataFrame的索引将会被X,Y刻度
# fig, axes = plt.subplots(2, 1)
# data = Series(np.random.rand(16), index=list('abcdefghijklmnop'))
# data.plot(kind='bar', ax=axes[0], color='k', alpha=0.7)
# data.plot(kind='barh', ax=axes[1], color='k', alpha=0.7)
# plt.show()
import pandas as pd
# df = DataFrame(np.random.rand(6, 4),
# index=['one', 'two', 'three', 'four', 'five', 'six'],
# columns=pd.Index(['A', 'B', 'C', 'D'], names='Genus'))
# df.plot(kind='bar')
# df.plot(kind='barh', stacked=True, alpha=0.5)
# plt.show()
# tips = pd.read_csv('tips.csv')
# party_counts = pd.crosstab(tips.day,tips.size)
# print(party_counts.ix[:,2:5])
# 直方图和密度图
# tips = pd.read_csv('tips.csv')
# tips['tip_pct'] = tips['tip'] / tips['total_bill']
# tips['tip_pct'].hist(bins=20)
# tips['tip_pct'].plot(kind='kde')
# plt.show()
# comp1 = np.random.normal(0, 1, size=200)
# comp2 = np.random.normal(10, 2, size=200)
# values = Series(np.concatenate([comp1,comp2]))
# values.hist(bins=100,alpha=0.3,color='k',normed=True)
# values.plot(kind='kde',style='k--')
# plt.show()
# 散步图
# macro = pd.read_csv('macrodata.csv')
# # data = macro[['cpi', 'm1', 'tbilrate', 'unemp']]
# # # print(data[-5:])
# # trans_data = np.log(data).diff().dropna()
# # # print(trans_data[-5:])
# # plt.scatter(trans_data['m1'],trans_data['unemp'])
# # plt.title('Changes in log')
# # pd.scatter_matrix(trans_data,diagonal='kde',color='k',alpha=0.3)
# # plt.show()
# 绘制地图
data = pd.read_csv('Haiti.csv')
# 清除错误的信息
data = data[(data.LATITUDE > 18) & (data.LATITUDE < 20) & (data.LONGITUDE > -75) & (data.LONGITUDE < -70) & (
data.CATEGORY.notnull())]
def to_cat_list(catstr):
stripped = (x.strip() for x in catstr.split(','))
return [x for x in stripped if x]
def get_all_categories(cat_series):
cat_sets = (set(to_cat_list(x) for x in cat_series))
return sorted(set.union(*cat_sets))
def get_english(cat):
code, names = cat.split('.')
if '|' in names:
names = names.split('|')[1]
return code, names.strip()
print(get_english('2. Urgences logistiques | Vital Lines'))
| [
"[email protected]"
] | |
5f4d648fe87f277326ed1c245f130c1540612c9f | acda0bc700943654156d491eaa0b766bea0ae7bd | /apps/item/views.py | 5a977f899786fbf9bf8ce0698dd0d24153b6aefd | [] | no_license | bluehawkarthur/casa_campo | a11baaec966d51a1e733ad2dd48bb77a0ecd6cb5 | 22a57b58a722769e8e25330457ed868d230f5c05 | refs/heads/master | 2021-01-18T15:05:47.674205 | 2016-10-26T20:03:55 | 2016-10-26T20:03:55 | 68,387,895 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,605 | py | from django.shortcuts import render_to_response, render
from django.views.generic import FormView, ListView, DetailView, UpdateView
from pure_pagination.mixins import PaginationMixin
from django.core.urlresolvers import reverse_lazy
from .forms import ItemForm
from django.http import HttpResponseRedirect
from django.template import RequestContext
from .models import Item
# Create your views here. , UpdateView, DeleteView
def CrearItem(request):
if request.method == 'POST':
form = ItemForm(request.POST)
if form.is_valid():
item = Item(
codigo=form.cleaned_data['codigo'],
unidad=form.cleaned_data['unidad'],
descripcion=form.cleaned_data['descripcion'],
cantidad=form.cleaned_data['cantidad'],
pr_costo=form.cleaned_data['pr_costo'])
item.save()
return HttpResponseRedirect(reverse_lazy('listar_item'))
else:
print 'dfsdfsdfsdf'
form = ItemForm()
variables = RequestContext(request, {'form': form})
return render_to_response('item/crearitem.html', variables)
class ListarItem(PaginationMixin, ListView):
template_name = 'item/listar_item.html'
paginate_by = 5
model = Item
context_object_name = 'item'
class DetalleItem(DetailView):
template_name = 'item/detalle_item.html'
model = Item
context_object_name = 'item'
class EditItem(UpdateView):
template_name = 'item/edit_item.html'
model = Item
fields = ['codigo', 'unidad', 'descripcion', 'cantidad', 'pr_costo']
success_url = reverse_lazy('listar_item')
def DeleteItem(request, item):
e = Item.objects.get(id=item)
e.delete()
return HttpResponseRedirect(reverse_lazy('listar_item')) | [
"[email protected]"
] | |
169cc6e3a08adc088826a5b3ab17e5fcb13c6c44 | b976a3ca1e9cb98a9c90e57243255d0a8ace3572 | /Probability & Statistics/pharmacy_multi_regression.py | 911ba06a6717cd97204579ffadd3597f75e39138 | [
"MIT"
] | permissive | akhilvydyula/Data-Science-and-Machine-Learning-Projects-Dojo | fbe9408818cbfdb31d7fa0e52d9566bab998b9e1 | 4e2932dfa6749b360a7a605050c953ef52fc6547 | refs/heads/master | 2023-05-06T00:42:57.787384 | 2021-05-28T06:40:25 | 2021-05-28T06:40:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | """
A pharmacy delivers medications to the surrounding community.
Drivers can make several stops per delivery.
The owner would like to predict the length of time a delivery will take based on one or two related variables.
"""
from sklearn.linear_model import LinearRegression
x1, x2 = [1,3,2,3,1], [8,4,9,6,3]
y = [29, 31, 36, 35, 19]
reg = LinearRegression()
reg.fit(list(zip(x1,x2)), y)
b1, b2 = reg.coef_[0], reg.coef_[1]
b0 = reg.intercept_
print(f'y = {b0:.{3}} + {b1:.{3}}x1 + {b2:.{3}}x2') | [
"[email protected]"
] | |
a06e523614c65dc76a0ee5de471b3d4970df6c87 | f82757475ea13965581c2147ff57123b361c5d62 | /gi-stubs/repository/GstVideo/VideoResampler.py | a56a7a96d40d30d7b2b241bb4ea4fabe51b4f99e | [] | no_license | ttys3/pygobject-stubs | 9b15d1b473db06f47e5ffba5ad0a31d6d1becb57 | d0e6e93399212aada4386d2ce80344eb9a31db48 | refs/heads/master | 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null | UTF-8 | Python | false | false | 6,320 | py | # encoding: utf-8
# module gi.repository.GstVideo
# from /usr/lib64/girepository-1.0/GstVideo-1.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.overrides.GObject as __gi_overrides_GObject
import gi.repository.Gst as __gi_repository_Gst
import gi.repository.GstBase as __gi_repository_GstBase
import gobject as __gobject
class VideoResampler(__gi.Struct):
"""
:Constructors:
::
VideoResampler()
"""
def clear(self): # real signature unknown; restored from __doc__
""" clear(self) """
pass
def init(self, method, flags, n_phases, n_taps, shift, in_size, out_size, options): # real signature unknown; restored from __doc__
""" init(self, method:GstVideo.VideoResamplerMethod, flags:GstVideo.VideoResamplerFlags, n_phases:int, n_taps:int, shift:float, in_size:int, out_size:int, options:Gst.Structure) -> bool """
return False
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self): # real signature unknown; restored from __doc__
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
in_size = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
max_taps = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
n_phases = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
n_taps = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
offset = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
out_size = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
phase = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
taps = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_gst_reserved = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(VideoResampler), '__module__': 'gi.repository.GstVideo', '__gtype__': <GType void (4)>, '__dict__': <attribute '__dict__' of 'VideoResampler' objects>, '__weakref__': <attribute '__weakref__' of 'VideoResampler' objects>, '__doc__': None, 'in_size': <property object at 0x7f930d2a1770>, 'out_size': <property object at 0x7f930d2a1860>, 'max_taps': <property object at 0x7f930d2a1950>, 'n_phases': <property object at 0x7f930d2a1a40>, 'offset': <property object at 0x7f930d2a1b30>, 'phase': <property object at 0x7f930d2a1c20>, 'n_taps': <property object at 0x7f930d2a1d10>, 'taps': <property object at 0x7f930d2a1e00>, '_gst_reserved': <property object at 0x7f930d2a1ef0>, 'clear': gi.FunctionInfo(clear), 'init': gi.FunctionInfo(init)})"
__gtype__ = None # (!) real value is '<GType void (4)>'
__info__ = StructInfo(VideoResampler)
| [
"[email protected]"
] | |
cec03f25f354aaa3f99e4de8a868b3777d100efc | 0010a92176b766f4bdf37c1144fa0f724cfaf564 | /env/lib/python3.5/site-packages/aliyunsdkecs/request/v20140526/CreateImageRequest.py | 5fcd8e48fafcdcaf41af6211a1c3634952c20daa | [] | no_license | pengjinfu/My-Admin | bc2d8b53da8be0fad60e1d8979bdca3f2c4560d9 | 26206d1def673adb7dfe5c8044c654a0e65320d1 | refs/heads/master | 2021-08-30T02:17:57.432743 | 2017-12-15T17:05:05 | 2017-12-15T17:05:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,655 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class CreateImageRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'CreateImage')
def get_DiskDeviceMappings(self):
return self.get_query_params().get('DiskDeviceMappings')
def set_DiskDeviceMappings(self,DiskDeviceMappings):
for i in range(len(DiskDeviceMappings)):
if DiskDeviceMappings[i].get('Size') is not None:
self.add_query_param('DiskDeviceMapping.' + bytes(i + 1) + '.Size' , DiskDeviceMappings[i].get('Size'))
if DiskDeviceMappings[i].get('SnapshotId') is not None:
self.add_query_param('DiskDeviceMapping.' + bytes(i + 1) + '.SnapshotId' , DiskDeviceMappings[i].get('SnapshotId'))
if DiskDeviceMappings[i].get('Device') is not None:
self.add_query_param('DiskDeviceMapping.' + bytes(i + 1) + '.Device' , DiskDeviceMappings[i].get('Device'))
if DiskDeviceMappings[i].get('DiskType') is not None:
self.add_query_param('DiskDeviceMapping.' + bytes(i + 1) + '.DiskType' , DiskDeviceMappings[i].get('DiskType'))
def get_Tag4Value(self):
return self.get_query_params().get('Tag.4.Value')
def set_Tag4Value(self,Tag4Value):
self.add_query_param('Tag.4.Value',Tag4Value)
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_SnapshotId(self):
return self.get_query_params().get('SnapshotId')
def set_SnapshotId(self,SnapshotId):
self.add_query_param('SnapshotId',SnapshotId)
def get_Tag2Key(self):
return self.get_query_params().get('Tag.2.Key')
def set_Tag2Key(self,Tag2Key):
self.add_query_param('Tag.2.Key',Tag2Key)
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_Description(self):
return self.get_query_params().get('Description')
def set_Description(self,Description):
self.add_query_param('Description',Description)
def get_Tag3Key(self):
return self.get_query_params().get('Tag.3.Key')
def set_Tag3Key(self,Tag3Key):
self.add_query_param('Tag.3.Key',Tag3Key)
def get_Platform(self):
return self.get_query_params().get('Platform')
def set_Platform(self,Platform):
self.add_query_param('Platform',Platform)
def get_Tag1Value(self):
return self.get_query_params().get('Tag.1.Value')
def set_Tag1Value(self,Tag1Value):
self.add_query_param('Tag.1.Value',Tag1Value)
def get_ImageName(self):
return self.get_query_params().get('ImageName')
def set_ImageName(self,ImageName):
self.add_query_param('ImageName',ImageName)
def get_Tag3Value(self):
return self.get_query_params().get('Tag.3.Value')
def set_Tag3Value(self,Tag3Value):
self.add_query_param('Tag.3.Value',Tag3Value)
def get_Architecture(self):
return self.get_query_params().get('Architecture')
def set_Architecture(self,Architecture):
self.add_query_param('Architecture',Architecture)
def get_Tag5Key(self):
return self.get_query_params().get('Tag.5.Key')
def set_Tag5Key(self,Tag5Key):
self.add_query_param('Tag.5.Key',Tag5Key)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_Tag5Value(self):
return self.get_query_params().get('Tag.5.Value')
def set_Tag5Value(self,Tag5Value):
self.add_query_param('Tag.5.Value',Tag5Value)
def get_Tag1Key(self):
return self.get_query_params().get('Tag.1.Key')
def set_Tag1Key(self,Tag1Key):
self.add_query_param('Tag.1.Key',Tag1Key)
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId)
def get_Tag2Value(self):
return self.get_query_params().get('Tag.2.Value')
def set_Tag2Value(self,Tag2Value):
self.add_query_param('Tag.2.Value',Tag2Value)
def get_ImageVersion(self):
return self.get_query_params().get('ImageVersion')
def set_ImageVersion(self,ImageVersion):
self.add_query_param('ImageVersion',ImageVersion)
def get_Tag4Key(self):
return self.get_query_params().get('Tag.4.Key')
def set_Tag4Key(self,Tag4Key):
self.add_query_param('Tag.4.Key',Tag4Key) | [
"[email protected]"
] | |
6c1c35ef28e08ac096358de3535ce5d1f50ca604 | e57d7785276053332c633b57f6925c90ad660580 | /sdk/loganalytics/azure-mgmt-loganalytics/azure/mgmt/loganalytics/aio/operations/_shared_keys_operations.py | e209ceb60ee78a0cc0c90df3c27836f9fb07693b | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | adriananeci/azure-sdk-for-python | 0d560308497616a563b6afecbb494a88535da4c5 | b2bdfe659210998d6d479e73b133b6c51eb2c009 | refs/heads/main | 2023-08-18T11:12:21.271042 | 2021-09-10T18:48:44 | 2021-09-10T18:48:44 | 405,684,423 | 1 | 0 | MIT | 2021-09-12T15:51:51 | 2021-09-12T15:51:50 | null | UTF-8 | Python | false | false | 7,719 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SharedKeysOperations:
"""SharedKeysOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.loganalytics.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get_shared_keys(
self,
resource_group_name: str,
workspace_name: str,
**kwargs: Any
) -> "_models.SharedKeys":
"""Gets the shared keys for a workspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SharedKeys, or the result of cls(response)
:rtype: ~azure.mgmt.loganalytics.models.SharedKeys
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SharedKeys"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self.get_shared_keys.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=63, min_length=4, pattern=r'^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SharedKeys', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_shared_keys.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/sharedKeys'} # type: ignore
async def regenerate(
self,
resource_group_name: str,
workspace_name: str,
**kwargs: Any
) -> "_models.SharedKeys":
"""Regenerates the shared keys for a Log Analytics Workspace. These keys are used to connect
Microsoft Operational Insights agents to the workspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SharedKeys, or the result of cls(response)
:rtype: ~azure.mgmt.loganalytics.models.SharedKeys
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SharedKeys"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self.regenerate.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=63, min_length=4, pattern=r'^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SharedKeys', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
regenerate.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/regenerateSharedKey'} # type: ignore
| [
"[email protected]"
] | |
b137998baadcf6c1c7eddef0dd667c340d56e435 | 6a41f12ddb104c4f214fa8bf2864860a8952d17c | /books_crawler/books_crawler/settings.py | 7916845ebd89642dd40df20c0bfb0f0e827a9905 | [] | no_license | jakiiii/Web-Scraping-Scratch | 39bb32ea2044e6c4e52ee58ea88794f2a77d75cd | 46cd54d3a06d70cef070f47b3c15b530691c3187 | refs/heads/master | 2020-04-21T00:34:50.736222 | 2019-02-07T06:38:54 | 2019-02-07T06:38:54 | 169,200,752 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,194 | py | # -*- coding: utf-8 -*-
# Scrapy settings for books_crawler project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'books_crawler'
SPIDER_MODULES = ['books_crawler.spiders']
NEWSPIDER_MODULE = 'books_crawler.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'books_crawler (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'books_crawler.middlewares.BooksCrawlerSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'books_crawler.middlewares.BooksCrawlerDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'scrapy.pipelines.images.ImagesPipeline': 1
}
IMAGES_STORE = '/home/jaki/Dev/WebScrapingScratch/images'
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"[email protected]"
] | |
522bd050c87ec2e3215a3c729553e1d611c0549a | 824b582c2e0236e987a29b233308917fbdfc57a7 | /sdk/python/pulumi_google_native/orgpolicy/v2/get_folder_policy.py | 14f2f3278537c86a7f93b5c154315a900a2b904d | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | 24601/pulumi-google-native | ce8faf8455609a9572a8cbe0638c66427bf0ae7f | b219a14201c6c58eaa10caaeacbdaab528931adf | refs/heads/master | 2023-08-23T05:48:31.819709 | 2021-10-08T18:50:44 | 2021-10-08T18:50:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,527 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetFolderPolicyResult',
'AwaitableGetFolderPolicyResult',
'get_folder_policy',
'get_folder_policy_output',
]
@pulumi.output_type
class GetFolderPolicyResult:
def __init__(__self__, name=None, spec=None):
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if spec and not isinstance(spec, dict):
raise TypeError("Expected argument 'spec' to be a dict")
pulumi.set(__self__, "spec", spec)
@property
@pulumi.getter
def name(self) -> str:
"""
Immutable. The resource name of the Policy. Must be one of the following forms, where constraint_name is the name of the constraint which this Policy configures: * `projects/{project_number}/policies/{constraint_name}` * `folders/{folder_id}/policies/{constraint_name}` * `organizations/{organization_id}/policies/{constraint_name}` For example, "projects/123/policies/compute.disableSerialPortAccess". Note: `projects/{project_id}/policies/{constraint_name}` is also an acceptable name for API requests, but responses will return the name using the equivalent project number.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def spec(self) -> 'outputs.GoogleCloudOrgpolicyV2PolicySpecResponse':
"""
Basic information about the Organization Policy.
"""
return pulumi.get(self, "spec")
class AwaitableGetFolderPolicyResult(GetFolderPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetFolderPolicyResult(
name=self.name,
spec=self.spec)
def get_folder_policy(folder_id: Optional[str] = None,
policy_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetFolderPolicyResult:
"""
Gets a `Policy` on a resource. If no `Policy` is set on the resource, NOT_FOUND is returned. The `etag` value can be used with `UpdatePolicy()` to update a `Policy` during read-modify-write.
"""
__args__ = dict()
__args__['folderId'] = folder_id
__args__['policyId'] = policy_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:orgpolicy/v2:getFolderPolicy', __args__, opts=opts, typ=GetFolderPolicyResult).value
return AwaitableGetFolderPolicyResult(
name=__ret__.name,
spec=__ret__.spec)
@_utilities.lift_output_func(get_folder_policy)
def get_folder_policy_output(folder_id: Optional[pulumi.Input[str]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetFolderPolicyResult]:
"""
Gets a `Policy` on a resource. If no `Policy` is set on the resource, NOT_FOUND is returned. The `etag` value can be used with `UpdatePolicy()` to update a `Policy` during read-modify-write.
"""
...
| [
"[email protected]"
] | |
0ef7ffea33b2244997b5c41254c53c90db1083aa | ee8cb974f12977894f7f0fda5b8129570224618b | /gim/core/migrations/0013_auto__add_field_issueevent_related_content_type__add_field_issueevent_.py | 43503150f2cbcde634778bdb46e6ccbccd7ff624 | [] | no_license | derekey/github-issues-manager | 996b3c7b9acd0362b7d99948d45a15ea05d58cc2 | 63a405b993e77f10b9c2b6d9790aae7576d9d84f | refs/heads/develop | 2021-01-21T01:03:01.739800 | 2014-11-09T21:26:49 | 2014-11-09T21:26:49 | 42,234,954 | 1 | 0 | null | 2015-09-10T09:22:40 | 2015-09-10T09:22:39 | null | UTF-8 | Python | false | false | 27,500 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'IssueEvent.related_content_type'
db.add_column(u'core_issueevent', 'related_content_type',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'], null=True, blank=True),
keep_default=False)
# Adding field 'IssueEvent.related_object_id'
db.add_column(u'core_issueevent', 'related_object_id',
self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'IssueEvent.related_content_type'
db.delete_column(u'core_issueevent', 'related_content_type_id')
# Deleting field 'IssueEvent.related_object_id'
db.delete_column(u'core_issueevent', 'related_object_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.commit': {
'Meta': {'ordering': "('committed_at',)", 'object_name': 'Commit'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'commits_authored'", 'null': 'True', 'to': u"orm['core.GithubUser']"}),
'author_email': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'author_name': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'authored_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'comments_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'committed_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'committer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'commits__commited'", 'null': 'True', 'to': u"orm['core.GithubUser']"}),
'committer_email': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'committer_name': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'github_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'parents': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'parents_rel_+'", 'to': u"orm['core.Commit']"}),
'repository': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'commits'", 'to': u"orm['core.Repository']"}),
'sha': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'tree': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'})
},
u'core.githubuser': {
'Meta': {'ordering': "('username',)", 'object_name': 'GithubUser'},
'_available_repositories': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'available_repositories_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'avatar_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'github_id': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'github_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_organization': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'organizations_rel_+'", 'to': u"orm['core.GithubUser']"}),
'organizations_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'organizations_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'token': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'core.issue': {
'Meta': {'unique_together': "(('repository', 'number'),)", 'object_name': 'Issue'},
'assignee': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'assigned_issues'", 'null': 'True', 'to': u"orm['core.GithubUser']"}),
'base_label': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'base_sha': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'body': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'body_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'closed_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'closed_issues'", 'null': 'True', 'to': u"orm['core.GithubUser']"}),
'closed_by_fetched': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'comments_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'comments_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'comments_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'commits': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'issues'", 'symmetrical': 'False', 'to': u"orm['core.Commit']"}),
'commits_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'commits_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'events_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'events_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'github_id': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'github_pr_id': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'github_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
'head_label': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'head_sha': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_pull_request': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'labels': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'issues'", 'symmetrical': 'False', 'to': u"orm['core.Label']"}),
'mergeable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'merged': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'merged_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'merged_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'merged_prs'", 'null': 'True', 'to': u"orm['core.GithubUser']"}),
'milestone': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'issues'", 'null': 'True', 'to': u"orm['core.Milestone']"}),
'nb_additions': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'nb_changed_files': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'nb_commits': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'nb_deletions': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'pr_comments_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'pr_comments_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'pr_comments_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'pr_fetched_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'repository': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'issues'", 'to': u"orm['core.Repository']"}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '10', 'db_index': 'True'}),
'title': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_issues'", 'to': u"orm['core.GithubUser']"})
},
u'core.issuecomment': {
'Meta': {'ordering': "('created_at',)", 'object_name': 'IssueComment'},
'body': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'body_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'github_id': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'github_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': u"orm['core.Issue']"}),
'repository': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': u"orm['core.Repository']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'issue_comments'", 'to': u"orm['core.GithubUser']"})
},
u'core.issueevent': {
'Meta': {'ordering': "('created_at', 'github_id')", 'object_name': 'IssueEvent'},
'commit_sha': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'event': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'github_id': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'github_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'events'", 'to': u"orm['core.Issue']"}),
'related_content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'related_object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'repository': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'issues_events'", 'to': u"orm['core.Repository']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'issues_events'", 'null': 'True', 'to': u"orm['core.GithubUser']"})
},
u'core.label': {
'Meta': {'ordering': "('label_type', 'order', 'typed_name')", 'unique_together': "(('repository', 'name'),)", 'object_name': 'Label', 'index_together': "(('repository', 'label_type', 'order'),)"},
'api_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'color': ('django.db.models.fields.CharField', [], {'max_length': '6'}),
'fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'github_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'labels'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['core.LabelType']"}),
'name': ('django.db.models.fields.TextField', [], {}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'repository': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'labels'", 'to': u"orm['core.Repository']"}),
'typed_name': ('django.db.models.fields.TextField', [], {'db_index': 'True'})
},
u'core.labeltype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('repository', 'name'),)", 'object_name': 'LabelType'},
'edit_details': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'edit_mode': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250', 'db_index': 'True'}),
'regex': ('django.db.models.fields.TextField', [], {}),
'repository': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'label_types'", 'to': u"orm['core.Repository']"})
},
u'core.milestone': {
'Meta': {'ordering': "('number',)", 'unique_together': "(('repository', 'number'),)", 'object_name': 'Milestone'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'milestones'", 'to': u"orm['core.GithubUser']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'due_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'github_id': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'github_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'repository': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'milestones'", 'to': u"orm['core.Repository']"}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '10', 'db_index': 'True'}),
'title': ('django.db.models.fields.TextField', [], {'db_index': 'True'})
},
u'core.pullrequestcomment': {
'Meta': {'ordering': "('created_at',)", 'object_name': 'PullRequestComment'},
'body': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'body_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'entry_point': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': u"orm['core.PullRequestCommentEntryPoint']"}),
'fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'github_id': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'github_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pr_comments'", 'to': u"orm['core.Issue']"}),
'repository': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pr_comments'", 'to': u"orm['core.Repository']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pr_comments'", 'to': u"orm['core.GithubUser']"})
},
u'core.pullrequestcommententrypoint': {
'Meta': {'ordering': "('created_at',)", 'object_name': 'PullRequestCommentEntryPoint'},
'commit_sha': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'diff_hunk': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'github_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pr_comments_entry_points'", 'to': u"orm['core.Issue']"}),
'original_commit_sha': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'original_position': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'repository': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pr_comments_entry_points'", 'to': u"orm['core.Repository']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'pr_comments_entry_points'", 'null': 'True', 'to': u"orm['core.GithubUser']"})
},
u'core.repository': {
'Meta': {'ordering': "('owner', 'name')", 'unique_together': "(('owner', 'name'),)", 'object_name': 'Repository'},
'collaborators': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'repositories'", 'symmetrical': 'False', 'to': u"orm['core.GithubUser']"}),
'collaborators_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'collaborators_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'comments_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'comments_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'github_id': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'github_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
'has_issues': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_fork': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'issues_events_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'issues_events_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'issues_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'issues_state_closed_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'issues_state_open_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'labels_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'labels_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'milestones_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'milestones_state_closed_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'milestones_state_open_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owned_repositories'", 'to': u"orm['core.GithubUser']"}),
'pr_comments_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'pr_comments_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'prs_fetched_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'prs_state_closed_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'prs_state_open_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['core'] | [
"[email protected]"
] | |
893136904401af906e7bdbcf75c63539d98f9364 | 5cb7b9fe09b1dd20c0664d0c86c375ffe353903c | /static/js/pypyjs/pypy-nojit.js-0.3.1/lib/modules/test/test_shlex.py | ba0f3d1fcae7670d0a08cc51cf4cc0b57557c939 | [
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] | permissive | shiblon/pytour | 6d0ee4a679cf7e6ffd8ac6326b8bb0d9071a7c73 | 71a181ec16fd38b0af62f55e28a50e91790733b9 | refs/heads/master | 2021-01-17T10:09:18.822575 | 2020-09-23T20:05:58 | 2020-09-23T20:05:58 | 23,226,350 | 2 | 3 | Apache-2.0 | 2020-02-17T22:36:02 | 2014-08-22T13:33:27 | Python | UTF-8 | Python | false | false | 5,315 | py | # -*- coding: utf-8 -*-
import unittest
import shlex
from test import test_support
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# The original test data set was from shellwords, by Hartmut Goebel.
data = r"""x|x|
foo bar|foo|bar|
foo bar|foo|bar|
foo bar |foo|bar|
foo bar bla fasel|foo|bar|bla|fasel|
x y z xxxx|x|y|z|xxxx|
\x bar|\|x|bar|
\ x bar|\|x|bar|
\ bar|\|bar|
foo \x bar|foo|\|x|bar|
foo \ x bar|foo|\|x|bar|
foo \ bar|foo|\|bar|
foo "bar" bla|foo|"bar"|bla|
"foo" "bar" "bla"|"foo"|"bar"|"bla"|
"foo" bar "bla"|"foo"|bar|"bla"|
"foo" bar bla|"foo"|bar|bla|
foo 'bar' bla|foo|'bar'|bla|
'foo' 'bar' 'bla'|'foo'|'bar'|'bla'|
'foo' bar 'bla'|'foo'|bar|'bla'|
'foo' bar bla|'foo'|bar|bla|
blurb foo"bar"bar"fasel" baz|blurb|foo"bar"bar"fasel"|baz|
blurb foo'bar'bar'fasel' baz|blurb|foo'bar'bar'fasel'|baz|
""|""|
''|''|
foo "" bar|foo|""|bar|
foo '' bar|foo|''|bar|
foo "" "" "" bar|foo|""|""|""|bar|
foo '' '' '' bar|foo|''|''|''|bar|
\""|\|""|
"\"|"\"|
"foo\ bar"|"foo\ bar"|
"foo\\ bar"|"foo\\ bar"|
"foo\\ bar\"|"foo\\ bar\"|
"foo\\" bar\""|"foo\\"|bar|\|""|
"foo\\ bar\" dfadf"|"foo\\ bar\"|dfadf"|
"foo\\\ bar\" dfadf"|"foo\\\ bar\"|dfadf"|
"foo\\\x bar\" dfadf"|"foo\\\x bar\"|dfadf"|
"foo\x bar\" dfadf"|"foo\x bar\"|dfadf"|
\''|\|''|
'foo\ bar'|'foo\ bar'|
'foo\\ bar'|'foo\\ bar'|
"foo\\\x bar\" df'a\ 'df'|"foo\\\x bar\"|df'a|\|'df'|
\"foo"|\|"foo"|
\"foo"\x|\|"foo"|\|x|
"foo\x"|"foo\x"|
"foo\ "|"foo\ "|
foo\ xx|foo|\|xx|
foo\ x\x|foo|\|x|\|x|
foo\ x\x\""|foo|\|x|\|x|\|""|
"foo\ x\x"|"foo\ x\x"|
"foo\ x\x\\"|"foo\ x\x\\"|
"foo\ x\x\\""foobar"|"foo\ x\x\\"|"foobar"|
"foo\ x\x\\"\''"foobar"|"foo\ x\x\\"|\|''|"foobar"|
"foo\ x\x\\"\'"fo'obar"|"foo\ x\x\\"|\|'"fo'|obar"|
"foo\ x\x\\"\'"fo'obar" 'don'\''t'|"foo\ x\x\\"|\|'"fo'|obar"|'don'|\|''|t'|
'foo\ bar'|'foo\ bar'|
'foo\\ bar'|'foo\\ bar'|
foo\ bar|foo|\|bar|
foo#bar\nbaz|foobaz|
:-) ;-)|:|-|)|;|-|)|
áéíóú|á|é|í|ó|ú|
"""
posix_data = r"""x|x|
foo bar|foo|bar|
foo bar|foo|bar|
foo bar |foo|bar|
foo bar bla fasel|foo|bar|bla|fasel|
x y z xxxx|x|y|z|xxxx|
\x bar|x|bar|
\ x bar| x|bar|
\ bar| bar|
foo \x bar|foo|x|bar|
foo \ x bar|foo| x|bar|
foo \ bar|foo| bar|
foo "bar" bla|foo|bar|bla|
"foo" "bar" "bla"|foo|bar|bla|
"foo" bar "bla"|foo|bar|bla|
"foo" bar bla|foo|bar|bla|
foo 'bar' bla|foo|bar|bla|
'foo' 'bar' 'bla'|foo|bar|bla|
'foo' bar 'bla'|foo|bar|bla|
'foo' bar bla|foo|bar|bla|
blurb foo"bar"bar"fasel" baz|blurb|foobarbarfasel|baz|
blurb foo'bar'bar'fasel' baz|blurb|foobarbarfasel|baz|
""||
''||
foo "" bar|foo||bar|
foo '' bar|foo||bar|
foo "" "" "" bar|foo||||bar|
foo '' '' '' bar|foo||||bar|
\"|"|
"\""|"|
"foo\ bar"|foo\ bar|
"foo\\ bar"|foo\ bar|
"foo\\ bar\""|foo\ bar"|
"foo\\" bar\"|foo\|bar"|
"foo\\ bar\" dfadf"|foo\ bar" dfadf|
"foo\\\ bar\" dfadf"|foo\\ bar" dfadf|
"foo\\\x bar\" dfadf"|foo\\x bar" dfadf|
"foo\x bar\" dfadf"|foo\x bar" dfadf|
\'|'|
'foo\ bar'|foo\ bar|
'foo\\ bar'|foo\\ bar|
"foo\\\x bar\" df'a\ 'df"|foo\\x bar" df'a\ 'df|
\"foo|"foo|
\"foo\x|"foox|
"foo\x"|foo\x|
"foo\ "|foo\ |
foo\ xx|foo xx|
foo\ x\x|foo xx|
foo\ x\x\"|foo xx"|
"foo\ x\x"|foo\ x\x|
"foo\ x\x\\"|foo\ x\x\|
"foo\ x\x\\""foobar"|foo\ x\x\foobar|
"foo\ x\x\\"\'"foobar"|foo\ x\x\'foobar|
"foo\ x\x\\"\'"fo'obar"|foo\ x\x\'fo'obar|
"foo\ x\x\\"\'"fo'obar" 'don'\''t'|foo\ x\x\'fo'obar|don't|
"foo\ x\x\\"\'"fo'obar" 'don'\''t' \\|foo\ x\x\'fo'obar|don't|\|
'foo\ bar'|foo\ bar|
'foo\\ bar'|foo\\ bar|
foo\ bar|foo bar|
foo#bar\nbaz|foo|baz|
:-) ;-)|:-)|;-)|
áéíóú|áéíóú|
"""
class ShlexTest(unittest.TestCase):
def setUp(self):
self.data = [x.split("|")[:-1]
for x in data.splitlines()]
self.posix_data = [x.split("|")[:-1]
for x in posix_data.splitlines()]
for item in self.data:
item[0] = item[0].replace(r"\n", "\n")
for item in self.posix_data:
item[0] = item[0].replace(r"\n", "\n")
def splitTest(self, data, comments):
for i in range(len(data)):
l = shlex.split(data[i][0], comments=comments)
self.assertEqual(l, data[i][1:],
"%s: %s != %s" %
(data[i][0], l, data[i][1:]))
def oldSplit(self, s):
ret = []
lex = shlex.shlex(StringIO(s))
tok = lex.get_token()
while tok:
ret.append(tok)
tok = lex.get_token()
return ret
def testSplitPosix(self):
"""Test data splitting with posix parser"""
self.splitTest(self.posix_data, comments=True)
def testCompat(self):
"""Test compatibility interface"""
for i in range(len(self.data)):
l = self.oldSplit(self.data[i][0])
self.assertEqual(l, self.data[i][1:],
"%s: %s != %s" %
(self.data[i][0], l, self.data[i][1:]))
# Allow this test to be used with old shlex.py
if not getattr(shlex, "split", None):
for methname in dir(ShlexTest):
if methname.startswith("test") and methname != "testCompat":
delattr(ShlexTest, methname)
def test_main():
test_support.run_unittest(ShlexTest)
if __name__ == "__main__":
test_main()
| [
"[email protected]"
] | |
63f6862c5fa020fc79e11cdb16aee06ddb1ff1a0 | d5d35d20ec811cbaa792e681d559361cd7f38159 | /challenge/DidacticVampireText.py | 70cdffe7dc20b01345fe0e2f5d252051a8275136 | [] | no_license | markieboy/hacker.org | afe43f0b4213ec135f8b095bcc7b1a7a755581d8 | da1689bdcc2fe91a81a30385680fd367f2d6e9cf | refs/heads/master | 2021-06-21T12:07:21.503999 | 2017-08-11T08:38:05 | 2017-08-11T08:38:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | #!/usr/bin/env python3
# Q: http://www.hacker.org/challenge/chal.php?id=139
# A: http://www.hacker.org/challenge/chal.php?answer=sunshine&id=139&go=Submit
import re
import urllib.request
import hacker_org_util
PROBLEM_ID = '139'
def main():
source = urllib.request.urlopen(hacker_org_util.build_challenge_url(PROBLEM_ID)).read().decode()
m = re.search('<p>(.*)<p>', source, flags=re.DOTALL)
text = m.group(1)
print(''.join(re.findall(r'[A-Z]', text)))
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
86bf68a3fdb54d1cb09fca3faa9ef12d0f6fa966 | ee53b0262007b2f0db0fe15b2ad85f65fafa4e25 | /Leetcode/849. Maximize Distance to Closest Person.py | f36d9de210337b439e5c96e96c00caecda775ca7 | [] | no_license | xiaohuanlin/Algorithms | bd48caacb08295fc5756acdac609be78e143a760 | 157cbaeeff74130e5105e58a6b4cdf66403a8a6f | refs/heads/master | 2023-08-09T05:18:06.221485 | 2023-08-08T11:53:15 | 2023-08-08T11:53:15 | 131,491,056 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,279 | py | '''
In a row of seats, 1 represents a person sitting in that seat, and 0 represents that the seat is empty.
There is at least one empty seat, and at least one person sitting.
Alex wants to sit in the seat such that the distance between him and the closest person to him is maximized.
Return that maximum distance to closest person.
Example 1:
Input: [1,0,0,0,1,0,1]
Output: 2
Explanation:
If Alex sits in the second open seat (seats[2]), then the closest person has distance 2.
If Alex sits in any other open seat, the closest person has distance 1.
Thus, the maximum distance to the closest person is 2.
Example 2:
Input: [1,0,0,0]
Output: 3
Explanation:
If Alex sits in the last seat, the closest person is 3 seats away.
This is the maximum distance possible, so the answer is 3.
Note:
1 <= seats.length <= 20000
seats contains only 0s or 1s, at least one 0, and at least one 1.
'''
import unittest
class Solution:
def maxDistToClosest(self, seats):
"""
:type seats: List[int]
:rtype: int
"""
i = 0
start = -1
max_dis = 0
while i < len(seats):
if seats[i] == 0:
if i == len(seats) - 1:
# print(i, start)
dis = i - start
else:
i += 1
continue
else:
if start == -1:
# print(i, start)
dis = i
start = i
else:
dis = (i - start) // 2
# print(mid, dis)
start = i
# print(dis, max_dis)
if dis > max_dis:
max_dis = dis
i += 1
return max_dis
class TestSolution(unittest.TestCase):
def test_case(self):
examples = (
([1,0,0,0,1,0,1], 2),
([1,0,0,0], 3),
([0,0,0,1], 3),
([0,1,0,0,0,0], 4),
)
for first, second in examples:
self.assert_function(first, second)
def assert_function(self, first, second):
self.assertEqual(Solution().maxDistToClosest(first), second,
msg="first: {}; second: {}".format(first, second))
unittest.main()
| [
"[email protected]"
] | |
735d952b9b73db8a38c1a772c6a5c61bceced913 | e1dd6d9dccb822d472b7f4f9e8446dd9202eb5a1 | /sdk/test/test_io_k8s_api_rbac_v1alpha1_cluster_role_list.py | f7e76f339f1516a69e8e00215b9d2dd97d478213 | [] | no_license | swiftdiaries/argo_client | 8af73e8df6a28f9ea5f938b5894ab8b7825e4cc2 | b93758a22d890cb33cbd81934042cfc3c12169c7 | refs/heads/master | 2020-05-17T12:11:57.556216 | 2019-07-24T23:23:33 | 2019-07-24T23:23:33 | 183,701,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,030 | py | # coding: utf-8
"""
Argo API Client
Generated python client for the Argo Workflows # noqa: E501
OpenAPI spec version: v1.14.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import argo.sdk
from models.io_k8s_api_rbac_v1alpha1_cluster_role_list import IoK8sApiRbacV1alpha1ClusterRoleList # noqa: E501
from argo.sdk.rest import ApiException
class TestIoK8sApiRbacV1alpha1ClusterRoleList(unittest.TestCase):
"""IoK8sApiRbacV1alpha1ClusterRoleList unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testIoK8sApiRbacV1alpha1ClusterRoleList(self):
"""Test IoK8sApiRbacV1alpha1ClusterRoleList"""
# FIXME: construct object with mandatory attributes with example values
# model = argo.sdk.models.io_k8s_api_rbac_v1alpha1_cluster_role_list.IoK8sApiRbacV1alpha1ClusterRoleList() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
8651769e811843c8771b34777e0cd3f9f73886cd | 20674c17d815214bf66b75be686bb8a45c0f5914 | /version1/382_Linked_List_Random_Note.py | b3eb2bb00277f1ab4588a3185e4daf65f981fec9 | [] | no_license | moontree/leetcode | e7b670969fe20785b15aae82996875fd66de1b08 | f2bf9b13508cd01c8f383789569e55a438f77202 | refs/heads/master | 2021-05-20T20:36:45.615420 | 2020-04-02T09:15:26 | 2020-04-02T09:15:26 | 252,408,563 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,799 | py | """
Given a singly linked list, return a random node's value from the linked list.
Each node must have the same probability of being chosen.
Follow up:
What if the linked list is extremely large and its length is unknown to you?
Could you solve this efficiently without using extra space?
Example:
// Init a singly linked list [1,2,3].
ListNode head = new ListNode(1);
head.next = new ListNode(2);
head.next.next = new ListNode(3);
Solution solution = new Solution(head);
// getRandom() should return either 1, 2, or 3 randomly. Each element should have equal probability of returning.
solution.getRandom();
"""
from list_helper import *
import random
import collections
class Solution(object):
def __init__(self, head):
"""
@param head The linked list's head.
Note that the head is guaranteed to be not null, so it contains at least one node.
:type head: ListNode
"""
cur = head
self.nums = []
while cur:
self.nums.append(cur.val)
cur = cur.next
def getRandom(self):
"""
Returns a random node's value.
:rtype: int
"""
step = random.randint(0, len(self.nums) - 1)
return self.nums[step]
def _get_random_of_stream(self):
"""
Returns a random node's value.
:rtype: int
"""
h = self._head
if h is None:
return None
count = 0
res = h.val
while h:
rv = random.randint(0, count)
if rv == 0:
res = h.val
h = h.next
count += 1
return res
head = ListNode(1);
head.next = ListNode(2);
head.next.next = ListNode(3);
solution = Solution(head);
for i in xrange(5):
print solution.getRandom()
| [
"[email protected]"
] | |
746f538f4f59613057ed9e33923e1a08e11e714b | 1524720d6480ad0a51b6fd8ff709587455bf4c5d | /tums/trunk/lite/nevow/scripts/consolejstest.py | 0a952bcc2cf4f3a02ccf1aa2154442b69701dc9e | [] | no_license | calston/tums | 2bd6d3cac5232d2ccb7e9becfc649e302a310eab | b93e3e957ff1da5b020075574942913c8822d12a | refs/heads/master | 2020-07-12T03:46:43.639800 | 2018-05-12T10:54:54 | 2018-05-12T10:54:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,135 | py | # Copyright (c) 2006 Divmod.
# See LICENSE for details.
"""
Out-of-browser conversion of javascript test modules that use Athena's "//
import" syntax into monolithic scripts suitable for feeding into a plain
javascript interpreter
"""
from sys import argv
from twisted.python.util import sibpath
import nevow, subprocess
_DUMMY_MODULE_NAME = 'ConsoleJSTest'
def getDependencies(fname, ignore=('Divmod.Runtime', 'MochiKit.DOM'),
bootstrap=nevow.athena.LivePage.BOOTSTRAP_MODULES,
packages=None):
"""
Get the javascript modules that the code in the file with name C{fname}
depends on, recursively
@param fname: javascript source file name
@type fname: C{str}
@param ignore: names of javascript modules to exclude from dependency list
@type ignore: sequence
@param boostrap: names of javascript modules to always include, regardless
of explicit dependencies (defaults to L{nevow.athena.LivePage}'s list of
bootstrap modules)
@type boostrap: sequence
@param packages: all javascript packages we know about. defaults to the
result of L{nevow.athena.allJavascriptPackages}
@type packages: C{dict}
@return: modules included by javascript in file named C{fname}
@rtype: dependency-ordered list of L{nevow.athena.JSModule} instances
"""
if packages is None:
packages = nevow.athena.allJavascriptPackages()
packages[_DUMMY_MODULE_NAME] = fname
# TODO if a module is ignored, we should ignore its dependencies
return ([nevow.athena.JSModule.getOrCreate(m, packages)
for m in bootstrap if m not in ignore] +
[dep for dep in nevow.athena.JSModule(
_DUMMY_MODULE_NAME, packages).allDependencies()
if dep.name not in bootstrap
and dep.name != _DUMMY_MODULE_NAME
and dep.name not in ignore])
def generateTestScript(fname, after={'Divmod.Base': ('Divmod.Base.addLoadEvent = function() {};',)},
dependencies=None):
"""
Turn the contents of the Athena-style javascript test module in the file
named C{fname} into a plain javascript script. Recursively includes any
modules that are depended on, as well as the utility module
nevow/test/testsupport.js.
@param fname: javascript source file name
@type fname: C{str}
@param after: mapping of javascript module names to sequences of lines of
javascript source that should be injected into the output immediately
after the source of the named module is included
@type after: C{dict}
@param dependencies: the modules the script depends on. Defaults to the
result of L{getDependencies}
@type dependencies: dependency-ordered list of L{nevow.athena.JSModule}
instances
@return: converted javascript source text
@rtype: C{str}
"""
if dependencies is None:
dependencies= getDependencies(fname)
load = lambda fname: 'load(%r);' % (fname,)
initialized = set()
js = [load(sibpath(nevow.__file__, 'test/testsupport.js'))]
for m in dependencies:
segments = m.name.split('.')
if segments[-1] == '__init__':
segments = segments[:-1]
initname = '.'.join(segments)
if initname not in initialized:
initialized.add(initname)
if '.' in initname:
prefix = ''
else:
prefix = 'var '
js.append('%s%s = {};' % (prefix, initname))
js.append(load(m.mapping[m.name]))
if m.name in after:
js.extend(after[m.name])
js.append(file(fname).read())
return '\n'.join(js)
def run():
"""
Read a single filename from the command line arguments, replace any module
imports with the body of the module in question and pipe the result to the
spidermonkey javascript interpreter
"""
# TODO: support more than one filename at a time
js = generateTestScript(argv[1])
subprocess.Popen('/usr/bin/smjs', stdin=subprocess.PIPE).communicate(js)
| [
"[email protected]"
] | |
583466431748d71c10e4768b2295e9e980422200 | 10d77a1bca1358738179185081906956faf3963a | /venv/Lib/site-packages/django/core/mail/backends/filebased.py | f01e1497dbcc6dc15a7cf45416368b8606f613a2 | [] | no_license | ekansh18/WE_Care_NGO_WEBSITE | 3eb6b12ae798da26aec75d409b0b92f7accd6c55 | 7c1eaa78d966d13893c38e7157744fbf8f377e71 | refs/heads/master | 2023-07-16T07:22:48.920429 | 2021-08-31T04:11:19 | 2021-08-31T04:11:19 | 401,563,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,727 | py | """Email backend that writes messages to a file."""
import datetime
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.mail.backends.console import (
EmailBackend """Email backend that writes messages to a file."""
import datetime
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.mail.backends.console import (
EmailBackend as ConsoleEmailBackend,
)
class EmailBackend(ConsoleEmailBackend):
def __init__(self, *args, file_path=None, **kwargs):
self._fname = None
if file_path is not None:
self.file_path = file_path
else:
self.file_path = getattr(settings, 'EMAIL_FILE_PATH', None)
# Make sure self.file_path is a string.
if not isinstance(self.file_path, str):
raise ImproperlyConfigured('Path for saving emails is invalid: %r' % self.file_path)
self.file_path = os.path.abspath(self.file_path)
try:
os.makedirs(self.file_path, exist_ok=True)
except FileExistsError:
raise ImproperlyConfigured(
'Path for saving email messages exists, but is not a directory: %s' % self.file_path
)
except OSError as err:
raise ImproperlyConfigured(
'Could not create directory for saving email messages: %s (%s)' % (self.file_path, err)
)
# Make sure that self.file_path is writable.
if not os.access(self.file_path, os.W_OK):
raise ImproperlyConfigured('Could not write to directory: %s' % self.file_path)
# Finally, call super().
# Since we're using the console-based backend as a base,
# force the stream to be None, so we don't default to stdout
kwargs['stream'] = None
super().__init__(*args, **kwargs)
def write_message(self, message):
self.stream.write(message.message().as_bytes() + b'\n')
self.stream.write(b'-' * 79)
self.stream.write(b'\n')
def _get_filename(self):
"""Return a unique file name."""
if self._fname is None:
timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
fname = "%s-%s.log" % (timestamp, abs(id(self)))
self._fname = os.path.join(self.file_path, fname)
return self._fname
def open(self):
if self.stream is None:
self.stream = open(self._get_filename(), 'ab')
return True
return False
def close(self):
try:
if self.stream is not None:
self.stream.close()
finally:
self.stream = None
| [
"[email protected]"
] | |
2466113f0da79b2244862448de9eb3746c0d33d1 | 8fd255fc3498ec970d7202d3f70a671b7aa4c64b | /pmsal/blog/views.py | d5b704545317b790379154f070dad0ca73a0eb84 | [
"MIT"
] | permissive | klebercode/pmsal | afda05fe29bb67db70fc7dcb8dfc577f4a3f0c9c | d78477f7cd1a5d1ed9973e13be5758c71a2ce2db | refs/heads/master | 2016-09-06T05:42:34.468341 | 2015-04-04T17:24:07 | 2015-04-04T17:24:07 | 33,409,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,212 | py | # coding: utf-8
from django.db.models import Q
from django.views import generic
from django.views.generic.dates import (YearArchiveView, MonthArchiveView,
DayArchiveView)
from pmsal.context_processors import EnterpriseExtraContext
from pmsal.blog.models import Entry
from pmsal.core.models import Category
class EntryYearArchiveView(YearArchiveView):
queryset = Entry.published.all()
date_field = 'created'
make_object_list = True
allow_future = True
# TODO: mudar a paginacao
paginate_by = 10
class EntryMonthArchiveView(MonthArchiveView):
queryset = Entry.published.all()
date_field = 'created'
make_object_list = True
allow_future = True
class EntryDayArchiveView(DayArchiveView):
queryset = Entry.published.all()
date_field = 'created'
make_object_list = True
allow_future = True
class EntryListView(EnterpriseExtraContext, generic.ListView):
# model = Entry
queryset = Entry.published.all()
template_name = 'blog/entry_home.html'
# TODO: mudar a paginacao
paginate_by = 6
def get_queryset(self, **kwargs):
search = self.request.GET.get('search', '')
if search:
obj_lst = Entry.published.filter(Q(title__icontains=search) |
Q(created__icontains=search) |
Q(body__icontains=search))
else:
obj_lst = Entry.published.all()
return obj_lst
def get_context_data(self, **kwargs):
context = super(EntryListView, self).get_context_data(**kwargs)
search = self.request.GET.get('search', '')
context['search'] = search
context['tag_list'] = Entry.tags.most_common()
# TODO: mudar a forma de carregamento das categorias
context['category_list'] = Category.objects.filter(area=3
).order_by('?')[:10]
return context
class EntryDateDetailView(EnterpriseExtraContext, generic.DateDetailView):
# model = Entry
queryset = Entry.published.all()
date_field = 'created'
make_object_list = True
allow_future = True
def get_context_data(self, **kwargs):
context = super(EntryDateDetailView, self).get_context_data(**kwargs)
context['tag_list'] = Entry.tags.most_common()
# TODO: mudar a forma de carregamento das categorias
context['category_list'] = Category.objects.all().order_by('?')[:10]
return context
class EntryTagListView(EntryListView):
"""
Herda de EntryListView mudando o filtro para tag selecionada
"""
def get_queryset(self):
"""
Incluir apenas as Entries marcadas com a tag selecionada
"""
return Entry.published.filter(tags__slug=self.kwargs['tag_slug'])
class EntryCategoryListView(EntryListView):
"""
Herda de EntryListView mudando o filtro para categoria selecionada
"""
def get_queryset(self, **kwargs):
"""
Inclui apenas as Entries marcadas com a categoria selecionada
"""
return Entry.published.filter(categories__slug=self.kwargs['cat_slug'])
| [
"[email protected]"
] | |
1f282037ba707bdcb0c2fbd47ed08bb8e0e60104 | 5aa14c620a383d8429c144e5af46b0322c674439 | /tests/python/Lut1DTransformTest.py | 0b6073a7cd51dad23173cb33a42118d333820dbb | [
"BSD-3-Clause",
"CC-BY-4.0"
] | permissive | asdlei99/OpenColorIO | ae421f6c14870ffe735c73107b76f6746bd563ee | 9b23e9623792d8cc6e6c1dfd5394335ee148bcf3 | refs/heads/master | 2023-03-13T16:14:19.693576 | 2021-03-03T03:11:10 | 2021-03-03T03:11:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,698 | py | # SPDX-License-Identifier: BSD-3-Clause
# Copyright Contributors to the OpenColorIO Project.
import logging
import unittest
logger = logging.getLogger(__name__)
try:
import numpy as np
except ImportError:
logger.warning(
"NumPy could not be imported. "
"Test case will lack significant coverage!"
)
np = None
import PyOpenColorIO as OCIO
class Lut1DTransformTest(unittest.TestCase):
def test_default_constructor(self):
"""
Test the default constructor.
"""
lut = OCIO.Lut1DTransform()
self.assertEqual(lut.getLength(), 2)
self.assertEqual(lut.getDirection(), OCIO.TRANSFORM_DIR_FORWARD)
self.assertEqual(lut.getHueAdjust(), OCIO.HUE_NONE)
self.assertFalse(lut.getInputHalfDomain())
self.assertFalse(lut.getOutputRawHalfs())
self.assertEqual(lut.getInterpolation(), OCIO.INTERP_DEFAULT)
self.assertEqual(lut.getFileOutputBitDepth(), OCIO.BIT_DEPTH_UNKNOWN)
r, g, b = lut.getValue(0)
self.assertEqual([r, g, b], [0, 0, 0])
r, g, b = lut.getValue(1)
self.assertEqual([r, g, b], [1, 1, 1])
def test_direction(self):
"""
Test the setDirection() and getDirection() methods.
"""
lut = OCIO.Lut1DTransform()
for direction in OCIO.TransformDirection.__members__.values():
lut.setDirection(direction)
self.assertEqual(lut.getDirection(), direction)
# Wrong type tests.
for invalid in (None, 1, 'test'):
with self.assertRaises(TypeError):
lut.setDirection(invalid)
def test_format_metadata(self):
"""
Test the getFormatMetadata() method.
"""
lut = OCIO.Lut1DTransform()
format_metadata = lut.getFormatMetadata()
self.assertIsInstance(format_metadata, OCIO.FormatMetadata)
self.assertEqual(format_metadata.getElementName(), 'ROOT')
self.assertEqual(format_metadata.getName(), '')
self.assertEqual(format_metadata.getID(), '')
format_metadata.setName('name')
format_metadata.setID('id')
self.assertEqual(format_metadata.getName(), 'name')
self.assertEqual(format_metadata.getID(), 'id')
def test_file_output_bit_depth(self):
"""
Test get/setFileOutputBitDepth.
"""
lut = OCIO.Lut1DTransform()
self.assertEqual(lut.getFileOutputBitDepth(), OCIO.BIT_DEPTH_UNKNOWN)
lut.setFileOutputBitDepth(OCIO.BIT_DEPTH_UINT10)
self.assertEqual(lut.getFileOutputBitDepth(), OCIO.BIT_DEPTH_UINT10)
def test_hue_adjust(self):
"""
Test get/setHueAdjust.
"""
lut = OCIO.Lut1DTransform()
self.assertEqual(lut.getHueAdjust(), OCIO.HUE_NONE)
lut.setHueAdjust(OCIO.HUE_DW3)
self.assertEqual(lut.getHueAdjust(), OCIO.HUE_DW3)
with self.assertRaises(OCIO.Exception):
lut.setHueAdjust(OCIO.HUE_WYPN)
def test_input_half_domain(self):
"""
Test get/getInputHalfDomain.
"""
lut = OCIO.Lut1DTransform()
self.assertFalse(lut.getInputHalfDomain())
lut.setInputHalfDomain(True)
self.assertTrue(lut.getInputHalfDomain())
def test_output_raw_halfs(self):
"""
Test get/setOutputRawHalfs.
"""
lut = OCIO.Lut1DTransform()
self.assertFalse(lut.getOutputRawHalfs())
lut.setOutputRawHalfs(True)
self.assertTrue(lut.getOutputRawHalfs())
def test_length(self):
"""
Test get/setLength.
"""
lut = OCIO.Lut1DTransform()
self.assertEqual(lut.getLength(), 2)
lut.setValue(0, 0.1, 0.2, 0.3)
lut.setLength(3)
self.assertEqual(lut.getLength(), 3)
# Changing the length reset LUT values to identity.
r, g, b = lut.getValue(0)
self.assertEqual([r, g, b], [0, 0, 0])
def test_constructor_with_keywords(self):
"""
Test Lut1DTransform constructor with keywords and validate its values.
"""
lut = OCIO.Lut1DTransform(
length=65536,
inputHalfDomain=True,
outputRawHalfs=True,
fileOutputBitDepth=OCIO.BIT_DEPTH_UINT10,
hueAdjust=OCIO.HUE_DW3,
interpolation=OCIO.INTERP_BEST,
direction=OCIO.TRANSFORM_DIR_INVERSE)
self.assertEqual(lut.getLength(), 65536)
self.assertEqual(lut.getDirection(), OCIO.TRANSFORM_DIR_INVERSE)
self.assertEqual(lut.getHueAdjust(), OCIO.HUE_DW3)
self.assertTrue(lut.getInputHalfDomain())
self.assertTrue(lut.getOutputRawHalfs())
self.assertEqual(lut.getInterpolation(), OCIO.INTERP_BEST)
self.assertEqual(lut.getFileOutputBitDepth(), OCIO.BIT_DEPTH_UINT10)
lut = OCIO.Lut1DTransform(
length=4,
direction=OCIO.TRANSFORM_DIR_INVERSE)
self.assertEqual(lut.getLength(), 4)
self.assertEqual(lut.getDirection(), OCIO.TRANSFORM_DIR_INVERSE)
self.assertEqual(lut.getHueAdjust(), OCIO.HUE_NONE)
self.assertFalse(lut.getInputHalfDomain())
self.assertFalse(lut.getOutputRawHalfs())
self.assertEqual(lut.getInterpolation(), OCIO.INTERP_DEFAULT)
self.assertEqual(lut.getFileOutputBitDepth(), OCIO.BIT_DEPTH_UNKNOWN)
def test_constructor_with_positional(self):
"""
Test Lut1DTransform constructor without keywords and validate its values.
"""
lut = OCIO.Lut1DTransform(65536, True, True, OCIO.BIT_DEPTH_UINT10,
OCIO.HUE_DW3, OCIO.INTERP_BEST,
OCIO.TRANSFORM_DIR_INVERSE)
self.assertEqual(lut.getLength(), 65536)
self.assertEqual(lut.getDirection(), OCIO.TRANSFORM_DIR_INVERSE)
self.assertEqual(lut.getHueAdjust(), OCIO.HUE_DW3)
self.assertTrue(lut.getInputHalfDomain())
self.assertTrue(lut.getOutputRawHalfs())
self.assertEqual(lut.getInterpolation(), OCIO.INTERP_BEST)
self.assertEqual(lut.getFileOutputBitDepth(), OCIO.BIT_DEPTH_UINT10)
def test_array(self):
"""
Get & set Lut array values.
"""
lut = OCIO.Lut1DTransform(length=3)
r, g, b = lut.getValue(0)
self.assertEqual([r, g, b], [0, 0, 0])
r, g, b = lut.getValue(1)
self.assertEqual([r, g, b], [0.5, 0.5, 0.5])
r, g, b = lut.getValue(2)
self.assertEqual([r, g, b], [1, 1, 1])
lut.setValue(0, 0.1, 0.2, 0.3)
r, g, b = lut.getValue(0)
# Values are stored as float.
self.assertAlmostEqual(r, 0.1, delta=1e-6)
self.assertAlmostEqual(g, 0.2, delta=1e-6)
self.assertAlmostEqual(b, 0.3, delta=1e-6)
if not np:
logger.warning("NumPy not found. Skipping part of test!")
return
data = lut.getData()
expected = np.array([0.1, 0.2, 0.3,
0.5, 0.5, 0.5,
1., 1., 1.]).astype(np.float32)
self.assertEqual(data.all(), expected.all())
data[6] = 0.9
data[7] = 1.1
data[8] = 1.2
lut.setData(data)
r, g, b = lut.getValue(2)
self.assertAlmostEqual(r, 0.9, delta=1e-6)
self.assertAlmostEqual(g, 1.1, delta=1e-6)
self.assertAlmostEqual(b, 1.2, delta=1e-6)
def test_equals(self):
"""
Test equals.
"""
lut = OCIO.Lut1DTransform()
lut2 = OCIO.Lut1DTransform()
self.assertTrue(lut.equals(lut2))
lut.setValue(0, 0.1, 0.2, 0.3)
self.assertFalse(lut.equals(lut2))
| [
"[email protected]"
] | |
b77167d258ce02e04bdda1ea6a83707259bbe0f7 | 80e701c5b9c03ef288848d8b368360e0940d9b67 | /sleyeball/files.py | 51db5f0a7dda0c4166dceea14a6d3bc400d4b819 | [] | no_license | esheldon/sleyeball | a4917300b041747e0600186f0e596c6d83a95ff4 | 9eee500119d2bc07c942350a67c8777257e92a3d | refs/heads/master | 2020-06-20T05:22:59.921610 | 2019-07-17T18:38:29 | 2019-07-17T18:38:29 | 197,008,586 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,756 | py | import os
def get_base_dir():
"""
base directory
"""
return os.environ['SLDIR']
def get_cand_dir():
"""
we keep lists here
"""
return os.path.join(get_base_dir(), 'candidates')
def get_cand_file_orig():
"""
holds paths to coadds
"""
d = get_cand_dir()
return os.path.join(d, 'z4ErinSheldon.fits')
def get_cand_file():
"""
holds paths to coadds
"""
d = get_cand_dir()
return os.path.join(d, 'z4ErinSheldon-clean.fits')
def get_badreg_dir():
"""
we keep lists here
"""
return os.path.join(get_base_dir(), 'badregions')
def get_badreg_file():
"""
holds paths to coadds
"""
d = get_badreg_dir()
return os.path.join(d, 'y3a2_foreground_mask_v2.1.fits.gz')
def get_stamp_dir(tilename):
"""
location for the image and temp files
"""
return os.path.join(
get_base_dir(),
'stamps',
tilename,
)
def get_temp_dir():
"""
location for the image and temp files
"""
return os.environ['TMPDIR']
def get_stamp_file(tilename, number):
"""
location of a output file
"""
odir = get_stamp_dir(tilename)
fname = '%s-%06d.jpg' % (tilename, number)
return os.path.join(odir, fname)
#
# batch processing
#
def get_script_dir():
"""
location for scripts
"""
return os.path.join(get_base_dir(), 'scripts')
def get_script_file(tilename):
"""
location for scripts
"""
d = get_script_dir()
fname = '%s.sh' % tilename
return os.path.join(d, fname)
def get_wq_file(tilename, missing=False):
"""
location for scripts
"""
fname = '%s.yaml' % tilename
d = get_script_dir()
return os.path.join(d, fname)
| [
"[email protected]"
] | |
87cc2abd3c13a2d90fd462c57af819701673b894 | 7c5f9f4a237669d9acc4b59711ac1cf91ba71f26 | /torch/_dynamo/guards.py | c16c48515857ab7f942a58f2c98d68360e317e40 | [
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | colesbury/pytorch | 4924df03a4aaf88b51ff9d2c5572ac35a30f26be | d70f9c7888dc488304010a554d8b56a505919bda | refs/heads/master | 2023-08-27T22:11:15.257393 | 2023-03-29T20:17:06 | 2023-03-29T20:17:10 | 79,366,698 | 1 | 0 | null | 2018-08-09T13:54:22 | 2017-01-18T17:47:38 | Python | UTF-8 | Python | false | false | 33,502 | py | import builtins
import collections
import logging
import math
import os
import re
import types
import weakref
from inspect import currentframe, getframeinfo
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type, Union
from weakref import ReferenceType
import torch
from torch._guards import (
DuplicateInputs,
Guard,
GuardBuilderBase,
GuardEnvExpr,
GuardSource,
Source,
)
from torch.fx.experimental.symbolic_shapes import SYMPY_INTERP
from . import config, convert_frame, mutation_guard
from .eval_frame import set_guard_error_hook, set_guard_fail_hook
from .exc import unimplemented
from .types import GuardedCode, GuardFail, GuardFn # noqa: F401
from .utils import (
dict_const_keys,
dict_const_keys_repr,
dict_param_key_ids,
guard_failures,
HAS_NUMPY,
istype,
np,
orig_code_map,
rename_implicit,
tensor_always_has_static_shape,
tensor_static_reason_to_message,
tuple_iterator_getitem,
tuple_iterator_len,
)
log = logging.getLogger(__name__)
TensorGuards = torch._C._dynamo.guards.TensorGuards
check_obj_id = torch._C._dynamo.guards.check_obj_id
check_type_id = torch._C._dynamo.guards.check_type_id
CLOSURE_VARS = collections.OrderedDict(
[
("___check_type_id", check_type_id),
("___check_obj_id", check_obj_id),
("___is_grad_enabled", torch.is_grad_enabled),
("___odict_getitem", collections.OrderedDict.__getitem__),
("___dict_param_key_ids", dict_param_key_ids),
("___dict_const_keys", dict_const_keys),
("___tuple_iterator_len", tuple_iterator_len),
("___tuple_iterator_getitem", tuple_iterator_getitem),
("__math_isnan", math.isnan),
("inf", float("inf")),
]
)
def strip_function_call(name):
"""
"___odict_getitem(a, 1)" => "a"
"""
m = re.search(r"([a-z0-9_]+)\(([^(),]+)[^()]*\)", name)
if m and m.group(1) != "slice":
return strip_function_call(m.group(2))
return strip_getattr_getitem(name)
def strip_getattr_getitem(name):
"""
"a[1]" => "a"
"a.foo" => "a"
"""
return re.split(r"[.\[]", name)[0]
class GuardBuilder(GuardBuilderBase):
def __init__(
self,
id_ref: Callable[[Type[object]], str],
source_ref: Callable[[Source], str],
scope: Optional[Dict[str, object]],
check_fn_manager: "CheckFunctionManager",
renames=True,
):
self.id_ref = id_ref
self.source_ref = source_ref
if scope:
if renames:
scope = {rename_implicit(k): v for k, v in scope.items()}
else:
scope = dict()
self.scope: Dict[str, object] = scope
self.scope["__builtins__"] = builtins.__dict__.copy()
for (
name,
package_module,
) in torch.package.package_importer._package_imported_modules.items():
name = name.replace(">", "_").replace("<", "_").replace(".", "_dot_")
# Write the package module into the scope so that we can import it
self.scope["__builtins__"][name] = package_module # type: ignore[index]
# Write the demangled name to the scope so that we can use it
self.scope[name] = package_module
self.argnames: List[str] = []
# Code is python expression strings generated for each guard
self.code: List[str] = []
# shape_env_code is only used by local_builder and is used for
# shape env code. This exists only because we need to make sure
# shape env guards get run after tensor match guards (since the
# tensor match guards make sure we actually have tensors)
self.shape_env_code: List[str] = []
# [Note - On Eager Tensor Guards]
# Most of the time, we generate Python code in a guard to directly
# check various properties. However, tensors are a bit special;
# it is too slow to check their properties one-by-one in Python.
# Instead, there is a C++ function TensorGuards.check which takes
# all of the tensor arguments and checks them all against compile-time
# examples entirely in C++. Thus, every time we process a
# TENSOR_MATCH guard, we just add another entry to
# tensor_check_names/tensor_check_examples, saying "for this local,
# check it against this example", and it all ends up getting
# swept up into a single call to ___check_tensors. Invariant:
# len(tensor_check_names) == len(tensor_check_examples).
self.tensor_check_names: List[str] = []
self.tensor_check_examples: List[torch.Tensor] = []
self.check_fn_manager: CheckFunctionManager = check_fn_manager
# Warning: use this with care! This lets you access what the current
# value of the value you are guarding on is. You probably don't want
# to actually durably save this value though (because it's specific
# to this frame!) Instead, you should be reading out some property
# (like its type) which is what you permanently install into the
# guard code.
def get(self, name: str) -> Any:
return eval(name, self.scope, CLOSURE_VARS)
# Registers the usage of the source name referenced by the
# string (or stored in the Guard) as being guarded upon. It's important
# to call this before generating some code that makes use of 'guard',
# because without this call, we won't actually bind the variable
# you reference in the actual guard closure (oops!)
def arg_ref(self, guard: Union[str, Guard]) -> str:
name: str
if isinstance(guard, str):
name = guard
else:
name = guard.name
base = strip_getattr_getitem(strip_function_call(name))
if base not in self.argnames:
if re.match(r"^\d+$", base):
log.warning(f"invalid var name: {guard}")
self.argnames.append(base)
return name
def TYPE_MATCH(self, guard: Guard):
# ___check_type_id is same as `id(type(x)) == y`
t = type(self.get(guard.name))
obj_id = self.id_ref(t)
code = f"___check_type_id({self.arg_ref(guard)}, {obj_id})"
self._produce_guard_code(guard, [code])
def BOOL_FALSE(self, guard: Guard):
# Guard on the runtime value being 'False',
# can be faster than seemingly equivalent checks like DICT_KEYS for empty dict
#
# WARNING: this guard is not safe to use generally. It only works if the runtime
# value is of a type that supports bool(), and some types e.g. Tensor do not.
# Only use this guard in cases you can guarantee the runtime type will be friendly.
# (e.g. Specialized NNModule with mutation protection via setattr)
#
# Why not simply check the runtime type inside this guard? It's slow enough to defeat
# the purpose of using this guard, which itself is supposed to be a faster alternative
# to DICT_KEYS.
ref = self.arg_ref(guard)
code = f"not {ref}"
self._produce_guard_code(guard, [code])
def ID_MATCH(self, guard: Guard):
# ___check_obj_id is same as `id(x) == y`
m = re.match(r"^type\((.+)\)$", guard.name)
if m:
# optional optimization to produce cleaner/faster guard code
return self.TYPE_MATCH(
Guard(m.group(1), guard.source, GuardBuilder.TYPE_MATCH)
)
code = f"___check_obj_id({self.arg_ref(guard)}, {self.id_ref(self.get(guard.name))})"
self._produce_guard_code(guard, [code])
def NAME_MATCH(self, guard: Guard):
obj = self.get(guard.name)
code = f"{self.arg_ref(guard)}.__name__ == {obj.__name__}"
self._produce_guard_code(guard, [code])
def HASATTR(self, guard: Guard):
m = re.match(r"^(.*)[.]([a-zA-Z0-9_]+)$", guard.name)
assert m, f"invalid hasattr check {guard.name}"
base, attr = m.group(1, 2)
ref = self.arg_ref(base)
val = hasattr(self.get(base), attr)
code = None
if val:
code = f"hasattr({ref}, {attr!r})"
else:
code = f"not hasattr({ref}, {attr!r})"
self._produce_guard_code(guard, [code], provided_guarded_object=self.get(base))
def EQUALS_MATCH(self, guard: Guard):
ref = self.arg_ref(guard)
val = self.get(guard.name)
t = type(val)
np_types = (
(
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
np.float16,
np.float32,
np.float64,
)
if HAS_NUMPY
else ()
)
assert istype(
val,
(
int,
float,
bool,
type(None),
str,
type,
list,
tuple,
set,
slice,
frozenset,
range,
torch.Size,
torch.device,
torch.dtype,
)
+ np_types,
), t.__name__
if istype(val, (torch.device, torch.dtype)):
# TODO(jansel): is this slow? perhaps optimize it
code = [f"str({ref}) == {str(val)!r}"]
self._produce_guard_code(guard, code)
return
# Special case for nan because float("nan") == float("nan") evaluates to False
if istype(val, float) and math.isnan(val):
code = list()
code.append(f"___check_type_id({ref}, {self.id_ref(t)})")
code.append(f"__math_isnan({ref})")
self._produce_guard_code(guard, code)
return
code = list()
# If matching equality against list/tuple, we must also check that
# the internal types match. (TODO: what about nested lists?)
if istype(val, (list, tuple)):
# NB: LIST_LENGTH takes care of the outer __check_type_id test
self.LIST_LENGTH(guard)
for idx, elem in enumerate(val):
code.append(
f"___check_type_id({ref}[{idx}], {self.id_ref(type(elem))})"
)
else:
# Add type check to prevent equality check between tensor and non-tensor.
code.append(f"___check_type_id({ref}, {self.id_ref(t)})")
if istype(val, torch.Size):
val = tuple(val)
# TODO: It feels like it would be better to just implement our own
# equality test in C that handles all of the necessary type checking
# and NaN tests
code.append(f"{ref} == {val!r}")
self._produce_guard_code(guard, code)
def CONSTANT_MATCH(self, guard: Guard):
val = self.get(guard.name)
if istype(val, (bool, type(None))):
self.ID_MATCH(guard)
else:
self.EQUALS_MATCH(guard)
def NN_MODULE(self, guard: Guard):
self.ID_MATCH(guard)
ref = self.arg_ref(guard)
val = self.get(guard.name)
def setup_guard():
assert istype(val.training, bool)
self.code.append(f"{ref}.training == {val.training}")
if hasattr(val, "training"):
# There are cases where a monkeypatched object has a guard made between __new__ and __init__
setup_guard()
else:
unimplemented(f"Guard setup for uninitialized class {type(val)}")
def FUNCTION_MATCH(self, guard: Guard):
"""things like torch.add and user defined functions"""
if guard.is_local():
return self.ID_MATCH(guard)
def BUILTIN_MATCH(self, guard: Guard):
return self.FUNCTION_MATCH(guard)
def PYMODULE_MATCH(self, guard: Guard):
return self.FUNCTION_MATCH(guard)
def LIST_LENGTH(self, guard):
ref = self.arg_ref(guard)
value = self.get(guard.name)
t = type(value)
code = list()
code.append(f"___check_type_id({ref}, {self.id_ref(t)})")
code.append(f"len({ref}) == {len(value)}")
self._produce_guard_code(guard, code)
def TUPLE_ITERATOR_LEN(self, guard):
ref = self.arg_ref(guard)
value = self.get(guard.name)
t = type(value)
code = list()
code.append(f"___check_type_id({ref}, {self.id_ref(t)})")
code.append(f"___tuple_iterator_len({ref}) == {tuple_iterator_len(value)}")
self._produce_guard_code(guard, code)
def DICT_KEYS(self, guard):
ref = self.arg_ref(guard)
value = self.get(guard.name)
t = type(value)
code = list()
code.append(f"___check_type_id({ref}, {self.id_ref(t)})")
param_key_ids = set(dict_param_key_ids(value))
const_keys = set(dict_const_keys(value))
const_keys_repr = dict_const_keys_repr(const_keys)
if param_key_ids:
code.append(f"___dict_param_key_ids({ref}) == {param_key_ids!r}")
code.append(f"___dict_const_keys({ref}) == {const_keys_repr}")
else:
code.append(f"set({ref}.keys()) == {const_keys_repr}")
self._produce_guard_code(guard, code)
def WEAKREF_ALIVE(self, guard):
self._produce_guard_code(guard, [f"{self.arg_ref(guard)} is not None"])
def NN_MODULE_PARAM_NAMES(self, guard):
ref = self.arg_ref(guard)
value = self.get(guard.name)
t = type(value)
keys = {k for k, v in value.named_parameters()}
code = list()
code.append(f"___check_type_id({ref}, {self.id_ref(t)})")
code.append(f"{{k for k, v in {ref}.named_parameters()}} == {keys!r}")
self._produce_guard_code(guard, code)
def ODICT_KEYS(self, guard):
"""OrderedDict keys match"""
ref = self.arg_ref(guard)
value = self.get(guard.name)
t = type(value)
code = list()
code.append(f"___check_type_id({ref}, {self.id_ref(t)})")
code.append(f"str({ref}.keys()) == {str(value.keys())!r}")
self._produce_guard_code(guard, code)
def OBJECT_MUTATION(self, guard: Guard):
mutation_guard.watch(self.get(guard.name), self.check_fn_manager)
def GRAD_MODE(self, guard: Guard):
"""Guard on the initial grad state"""
assert guard.name == ""
assert guard.source is GuardSource.GLOBAL
code = None
if convert_frame.initial_grad_state:
code = "___is_grad_enabled()"
else:
code = "not ___is_grad_enabled()"
self._produce_guard_code(guard, [code])
def SHAPE_ENV(self, guard: Guard):
# Let's handle ShapeEnv guards. To do this, we will resolve
# shape variables to sources from tracked_fakes. This must happen after
# tensor checks.
assert guard.name == ""
output_graph = self.check_fn_manager.output_graph
# NB: self.output_graph can be None in the debug_nops tests
fs = output_graph.tracked_fakes
constraint_inputs = [a.constraint_dims for a in fs]
guards = output_graph.shape_env.produce_guards(
[a.fake for a in fs],
[a.source for a in fs],
constraint_inputs=constraint_inputs,
source_ref=self.source_ref,
)
for shape_guard in guards:
self._produce_guard_code(guard, [shape_guard], shape_env=True)
def TENSOR_MATCH(self, guard: Guard):
if guard.is_nn_module():
self.ID_MATCH(guard)
else:
value = self.get(guard.name)
assert isinstance(value, torch.Tensor)
tensor_name = self.arg_ref(guard)
# [Note - On Export Tensor Guards]
#
# In eager mode, tensor guards are evaluated through C++, in guards.cpp
# see [Note - On Eager Tensor Guards] for more info.
#
# In export mode, we instead maintain parallel logic between C++ and python
# here, with an exception of checking the dispatch key - with the idea that a dispatch key
# is an entirely runtime notion that would make no sense to keep in an exported graph.
#
# Now, this idea is okay, but to paraphrase @ezyang, this mental model is sufficient for now, although
# not entirely true.
# For example, suppose one of the input tensors had the negative dispatch key.
# You should end up with a graph that is specialized for tensors that have a negative dispatch key.
# If you allow a Tensor that does NOT have this bit set, you will accidentally run it "as if" it were negated.
# Now, negative key only shows up for complex numbers, and most likely, the exported to target doesn't
# support this feature at all, but the point stands that :some: tensor state only shows up on dispatch key.
# TODO(voz): Either populate a dispatch_key check into the guards, or error on users passing in an unsupported
# subset of keys during export.
#
# The list of tensor fields and calls we care about can be found in `terms` below.
# TODO(voz): We are missing storage offset in all our tensor guards?
code: List[str] = list()
if self.check_fn_manager.output_graph.export:
self.TYPE_MATCH(guard)
terms = [
"dtype",
"device.type",
"device.index",
"requires_grad",
"ndimension()",
]
if not config.dynamic_shapes:
terms.append("stride()")
# We need to do this to avoid the torch.Size type in guards
code.append(f"{tensor_name}.shape == {tuple(value.shape)}")
for term in terms:
real_value = self.get(tensor_name + "." + term)
code.append(f"{tensor_name}.{term} == {real_value}")
else:
self.tensor_check_names.append(tensor_name)
self.tensor_check_examples.append(value)
# A frame is valid for reuse with dynamic dimensions if the new dynamic dimensions are a
# strict subset of the old.
#
# The logic here is as follows:
#
# Every mark_dynamic directive is a user-knows-best command, which can incur a raise at tracing
# time if we find guards that run counter to the user directive.
# If compiling a frame with explicit dynamic dims X could cause an exception, we MUST NOT skip compiling.
#
# If the frame is compiled with any marked dynamic indices, let's call that set of indices X.
# When we evaluated inputs against the guards, given the same tensor with potentially new dynamic indices,
# let's call that set Y.
#
# When X is a strict subset of Y, the potential new raises introduced during compilation are a strict subset
# of the raises we
# could have encountered. The frame compiled under Y is safe to reuse with X.
# When X is not a strict subset of Y, the non-overlapping new elements of X may cause new raises, and the
# frame is no longer fit for reuse.
#
# This is the case because any newly introduced mark_dynamic directives have a chance of
# raising, failing compilation. Any existing mark_dynamic indices that we lost are safe to lose
# as all it means is that we have gotten rid of a user directive which could incur a raise at compile time.
# In the case of when there is no Y, that is, there are no dynamic indices marked at all, the frame is safe
# to reuse
# as an empty set is a safe degeneration - that is, a strictly static tensor is always valid for a frame
# compiled with that same
# tensor + more onerous user directives.
assert guard.source is not None
static, reason = tensor_always_has_static_shape(value, is_tensor=True)
if not static:
if hasattr(value, "_dynamo_dynamic_indices"):
code.append(
f"({tensor_name}._dynamo_dynamic_indices.issubset({value._dynamo_dynamic_indices})) if hasattr({tensor_name}, '_dynamo_dynamic_indices') else True" # noqa: B950
)
# In the case of us not having any dynamic dimension indices, we compiled the frame with no chance of
# raising for this specific tensor - and any inputs with more dynamic user directives specified must be recompiled.
else:
code.append(
f"hasattr({tensor_name}, '_dynamo_dynamic_indices') == False"
)
else:
assert not hasattr(
value, "_dynamo_dynamic_indices"
), f"Illegal Unreachable state, guard accumulation for dynamic tensor that should have been static. Initial static message: {tensor_static_reason_to_message(reason)}" # noqa: B950
if len(code) > 0:
self._produce_guard_code(guard, code)
# A util that appends guarded code, or, in the case of export, adds data onto guards
def _produce_guard_code(
self, guard, code_list, provided_guarded_object=None, shape_env=False
):
# WARNING: It is important that cur_frame/caller do NOT stay in
# the current frame, because they will keep things live longer
# than they should. See TestMisc.test_release_module_memory
cur_frame = currentframe()
assert cur_frame is not None
caller = cur_frame.f_back
del cur_frame
assert caller is not None
func_name = getframeinfo(caller)[2]
del caller
# We use func_name for export, so might as well get a nice defensive check out of it
assert func_name in dir(
self.__class__
), f"_produce_guard_code must be called from inside GuardedCode. Called from {func_name}"
if shape_env:
self.shape_env_code.extend(code_list)
else:
self.code.extend(code_list)
# Not all guards have names, some can be installed globally (see asserts on HAS_GRAD)
if provided_guarded_object is None:
name_valid = guard.name is not None and guard.name != ""
guarded_object = self.get(guard.name) if name_valid else None
else:
guarded_object = provided_guarded_object
guarded_object_type = (
weakref.ref(type(guarded_object)) if guarded_object is not None else None
)
obj_ref = None
if hasattr(guarded_object.__class__, "__weakref__"):
obj_ref = weakref.ref(guarded_object)
guard.set_export_info(
func_name,
guarded_object_type,
code_list,
obj_ref,
)
# NB: Naively, you'd expect this to only be a function that produces
# the callable that constitutes the guard. However, there is some
# delicate handling for invalidating this check function when the
# locals/globals get invalidated, so there's some extra state
# we have to hold in this manager class.
#
# TODO: this object has reference cycle with itself, via check_fn which
# references back to CheckFunction via ___guarded_code in closure_vars.
# Ideally, there shouldn't be any ref cycle so that guards are
# promptly disposed of.
class CheckFunctionManager:
def __init__(
self,
output_graph=None,
f_locals: Optional[Dict[str, object]] = None,
f_globals: Optional[Dict[str, object]] = None,
guard_fail_fn: Optional[Callable[[Tuple[str, str]], None]] = None,
):
guards = output_graph.guards if output_graph else None
self.valid = True
self._weakrefs: List["ReferenceType[object]"] = []
self._seen_ids: Set[int] = set()
self.output_graph = output_graph
# Note: right overrides left
def combine_scopes(left, right):
if left is None:
return right
if right is None:
return left
return {**left, **right}
def source_ref(source):
guard_source = source.guard_source()
if guard_source is GuardSource.CONSTANT:
# No need to track constants
return source.name()
builder = guard_source.select(w_local(), w_global())
assert builder is not None
return builder.arg_ref(source.name())
local_builder = GuardBuilder(
self.id_ref,
source_ref,
combine_scopes(f_globals, f_locals),
self,
renames=True,
)
global_builder = GuardBuilder(
self.id_ref, source_ref, f_globals, self, renames=False
)
# source_ref can cause a cycle, make sure we break it with weakref
w_local = weakref.ref(local_builder)
w_global = weakref.ref(global_builder)
for guard in sorted(guards or [], key=Guard.sort_key):
if (
not config.guard_nn_modules
and guard.is_nn_module()
# Default func args must be guarded on.
# TODO: we could make use of 'DefaultsSource' and offer a .guard.is_defaults() API
and "__defaults__" not in guard.name
and "__kwdefaults__" not in guard.name
and (config.skip_nnmodule_hook_guards or "hooks" not in guard.name)
):
continue
guard.create(local_builder, global_builder)
self.check_fn = self.compile_check_fn(
local_builder, global_builder, guards, guard_fail_fn
)
self._seen_ids.clear()
def compile_check_fn(
self, local_builder, global_builder, guards_out, guard_fail_fn
):
assert not (set(local_builder.argnames) & set(global_builder.argnames))
# see parallel handling of ".0" / "___implicit0" in _eval_frame.c
largs = [a for a in local_builder.scope.keys() if a == "___implicit0"]
largs += [a for a in local_builder.argnames if a != "___implicit0"]
largs += ["**___kwargs_ignored"]
args = ",".join(largs)
code_parts = (
["___guarded_code.valid"] + local_builder.code + global_builder.code
)
# TODO(whc) maybe only the 'check_tensors' one is ambiguous? if so we can be less general..
verbose_code_parts = (
["___guarded_code.valid"] + local_builder.code + global_builder.code
)
tensor_check_names = (
local_builder.tensor_check_names + global_builder.tensor_check_names
)
check_tensors_fn = None
check_tensors_verbose_fn = None
if tensor_check_names:
assert (
not self.output_graph.export
), "Illegal to set tensor_check_names in export."
tensor_check_examples = (
local_builder.tensor_check_examples
+ global_builder.tensor_check_examples
)
tensor_guards = TensorGuards(
*tensor_check_examples, dynamic_shapes=config.dynamic_shapes
)
check_tensors_fn = tensor_guards.check
check_tensors_verbose_fn = tensor_guards.check_verbose
code_parts.append(f"___check_tensors({', '.join(tensor_check_names)})")
verbose_args = ", ".join(
tensor_check_names + ["tensor_check_names=tensor_check_names"]
)
verbose_code_parts.append(f"___check_tensors_verbose({verbose_args})")
aotautograd_guards: List[GuardEnvExpr] = (
self.output_graph.tracing_context.guards_context.aotautograd_guards
if self.output_graph
else []
)
for guard in aotautograd_guards:
if isinstance(guard, DuplicateInputs):
source_a = guard.input_source_a
source_b = guard.input_source_b
code_part = f"{source_a.name()} is {source_b.name()}"
code_parts.append(code_part)
verbose_code_parts.append(code_part)
else:
raise RuntimeError(f"Unknown GuardEnvExpr: {guard}")
code_parts.extend(local_builder.shape_env_code)
verbose_code_parts.extend(local_builder.shape_env_code)
assert not global_builder.shape_env_code
code = " and ".join(unique(code_parts))
closure_vars = collections.OrderedDict(
[
("___guarded_code", self),
("___check_tensors", check_tensors_fn),
("___check_tensors_verbose", check_tensors_verbose_fn),
("tensor_check_names", tensor_check_names),
]
+ list(SYMPY_INTERP.items())
)
closure_vars.update(CLOSURE_VARS)
py_code = f"""\
def ___make_guard_fn({','.join(closure_vars.keys())}):
return lambda {args}: {code}
"""
if os.environ.get("TORCHDYNAMO_PRINT_GUARDS", None) == "1":
print("GUARDS", code)
set_guard_fail_hook(guard_fail_hook)
out: Dict[str, Any] = dict()
# print("RUNNING PY CODE", py_code)
exec(py_code, global_builder.scope, out)
guard_fn = out["___make_guard_fn"](*closure_vars.values())
guard_fn.closure_vars = closure_vars
# TODO(whc) maybe '.code_parts' was only kept around for the guard callback? so we don't need both
guard_fn.args = largs
guard_fn.code_parts = code_parts
guard_fn.verbose_code_parts = verbose_code_parts
guard_fn.global_scope = global_builder.scope
guard_fn.guard_fail_fn = guard_fail_fn
return guard_fn
def invalidate(self, ref):
# A weakref is no longer valid, self.check_fn should return false
self.valid = False
def id_ref(self, obj):
"""add a weakref, return the id"""
try:
if id(obj) not in self._seen_ids:
self._weakrefs.append(weakref.ref(obj, self.invalidate))
self._seen_ids.add(id(obj))
except TypeError:
pass # cannot weakref bool object
return id(obj)
stashed_first_fail_reason = None
def guard_fail_hook(
guard_fn: GuardFn,
code: types.CodeType,
f_locals: Dict[str, object],
index: int,
last: bool,
) -> None:
"""
called whenever a guard fails.
"""
first = index == 0
global stashed_first_fail_reason
# Don't waste time computing the fail reason for guards we aren't going to report out.
if not guard_fn.guard_fail_fn and not (first or last):
return
scope = {rename_implicit(k): v for k, v in f_locals.items()}
scope.update(guard_fn.closure_vars)
reason = None
for part in guard_fn.verbose_code_parts:
fail_reason = eval(part, guard_fn.global_scope, scope)
# TODO(whc) hacky for now as not every 'part' in guard_fn.verbose_code_parts
# is updated to return a string explaining the failure.
if isinstance(fail_reason, str):
reason = fail_reason
break
elif isinstance(fail_reason, bool) and not fail_reason:
reason = part
break
if first:
stashed_first_fail_reason = reason
if not last:
return
# Technically, we're failing our last guard, which is our oldest guard due to the
# eval_frame.c logic that moves newest frames to head, but for logging purposes
# it's more useful to see the 'first' failure (if we never got a hit) since it's
# likely not yet been logged as a failure reason in a case of repeating failures.
assert stashed_first_fail_reason
guard_failures[orig_code_map[code]].append(stashed_first_fail_reason)
stashed_first_fail_reason = None
# TODO should we GuardFail our stashed_first_fail_reason too?
try:
if guard_fn.guard_fail_fn is not None:
guard_fn.guard_fail_fn(
GuardFail(reason or "unknown reason", orig_code_map[code])
)
except Exception as e:
log.error(
"Failure in guard_fail_fn callback - raising here will cause a NULL Error on guard eval",
exc_info=True,
)
def guard_error_hook(
guard_fn: GuardFn,
code: types.CodeType,
f_locals: Dict[str, object],
index: int,
last: bool,
):
print(
f"ERROR RUNNING GUARDS {code.co_name} {code.co_filename}:{code.co_firstlineno}"
)
# TODO: If we passed in the exception here, we could get a precise
# column number of which subexpression failed. But that would also
# require us to have the TRUE code that was eval'ed, not a shoddy
# reconstruction (like is done here)
print("lambda " + ", ".join(guard_fn.args) + ":")
print(" ", " and\n ".join(guard_fn.code_parts))
set_guard_error_hook(guard_error_hook)
def unique(seq):
seen = set()
for x in seq:
if x not in seen:
yield x
seen.add(x)
| [
"[email protected]"
] | |
7b46bd89e96c7e6548ef6f816becf00ba0d630ee | a56a74b362b9263289aad96098bd0f7d798570a2 | /venv/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py | 67e80611ef10e78892a747eb9f010880cb4577dc | [
"MIT"
] | permissive | yoonkt200/ml-theory-python | 5812d06841d30e1068f6592b5730a40e87801313 | 7643136230fd4f291b6e3dbf9fa562c3737901a2 | refs/heads/master | 2022-12-21T14:53:21.624453 | 2021-02-02T09:33:07 | 2021-02-02T09:33:07 | 132,319,537 | 13 | 14 | MIT | 2022-12-19T17:23:57 | 2018-05-06T08:17:45 | Python | UTF-8 | Python | false | false | 22,347 | py | """
An agg http://antigrain.com/ backend
Features that are implemented
* capstyles and join styles
* dashes
* linewidth
* lines, rectangles, ellipses
* clipping to a rectangle
* output to RGBA and PNG, optionally JPEG and TIFF
* alpha blending
* DPI scaling properly - everything scales properly (dashes, linewidths, etc)
* draw polygon
* freetype2 w/ ft2font
TODO:
* integrate screen dpi w/ ppi and text
"""
try:
import threading
except ImportError:
import dummy_threading as threading
try:
from contextlib import nullcontext
except ImportError:
from contextlib import ExitStack as nullcontext # Py 3.6.
from math import radians, cos, sin
import numpy as np
from matplotlib import cbook, rcParams, __version__
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, RendererBase)
from matplotlib.font_manager import findfont, get_font
from matplotlib.ft2font import (LOAD_FORCE_AUTOHINT, LOAD_NO_HINTING,
LOAD_DEFAULT, LOAD_NO_AUTOHINT)
from matplotlib.mathtext import MathTextParser
from matplotlib.path import Path
from matplotlib.transforms import Bbox, BboxBase
from matplotlib import colors as mcolors
from matplotlib.backends._backend_agg import RendererAgg as _RendererAgg
from matplotlib.backend_bases import _has_pil
if _has_pil:
from PIL import Image
backend_version = 'v2.2'
def get_hinting_flag():
mapping = {
True: LOAD_FORCE_AUTOHINT,
False: LOAD_NO_HINTING,
'either': LOAD_DEFAULT,
'native': LOAD_NO_AUTOHINT,
'auto': LOAD_FORCE_AUTOHINT,
'none': LOAD_NO_HINTING
}
return mapping[rcParams['text.hinting']]
class RendererAgg(RendererBase):
"""
The renderer handles all the drawing primitives using a graphics
context instance that controls the colors/styles
"""
# we want to cache the fonts at the class level so that when
# multiple figures are created we can reuse them. This helps with
# a bug on windows where the creation of too many figures leads to
# too many open file handles. However, storing them at the class
# level is not thread safe. The solution here is to let the
# FigureCanvas acquire a lock on the fontd at the start of the
# draw, and release it when it is done. This allows multiple
# renderers to share the cached fonts, but only one figure can
# draw at time and so the font cache is used by only one
# renderer at a time.
lock = threading.RLock()
def __init__(self, width, height, dpi):
RendererBase.__init__(self)
self.dpi = dpi
self.width = width
self.height = height
self._renderer = _RendererAgg(int(width), int(height), dpi)
self._filter_renderers = []
self._update_methods()
self.mathtext_parser = MathTextParser('Agg')
self.bbox = Bbox.from_bounds(0, 0, self.width, self.height)
def __getstate__(self):
# We only want to preserve the init keywords of the Renderer.
# Anything else can be re-created.
return {'width': self.width, 'height': self.height, 'dpi': self.dpi}
def __setstate__(self, state):
self.__init__(state['width'], state['height'], state['dpi'])
def _update_methods(self):
self.draw_gouraud_triangle = self._renderer.draw_gouraud_triangle
self.draw_gouraud_triangles = self._renderer.draw_gouraud_triangles
self.draw_image = self._renderer.draw_image
self.draw_markers = self._renderer.draw_markers
self.draw_path_collection = self._renderer.draw_path_collection
self.draw_quad_mesh = self._renderer.draw_quad_mesh
self.copy_from_bbox = self._renderer.copy_from_bbox
self.get_content_extents = self._renderer.get_content_extents
def tostring_rgba_minimized(self):
extents = self.get_content_extents()
bbox = [[extents[0], self.height - (extents[1] + extents[3])],
[extents[0] + extents[2], self.height - extents[1]]]
region = self.copy_from_bbox(bbox)
return np.array(region), extents
def draw_path(self, gc, path, transform, rgbFace=None):
# docstring inherited
nmax = rcParams['agg.path.chunksize'] # here at least for testing
npts = path.vertices.shape[0]
if (nmax > 100 and npts > nmax and path.should_simplify and
rgbFace is None and gc.get_hatch() is None):
nch = np.ceil(npts / nmax)
chsize = int(np.ceil(npts / nch))
i0 = np.arange(0, npts, chsize)
i1 = np.zeros_like(i0)
i1[:-1] = i0[1:] - 1
i1[-1] = npts
for ii0, ii1 in zip(i0, i1):
v = path.vertices[ii0:ii1, :]
c = path.codes
if c is not None:
c = c[ii0:ii1]
c[0] = Path.MOVETO # move to end of last chunk
p = Path(v, c)
try:
self._renderer.draw_path(gc, p, transform, rgbFace)
except OverflowError:
raise OverflowError("Exceeded cell block limit (set "
"'agg.path.chunksize' rcparam)")
else:
try:
self._renderer.draw_path(gc, path, transform, rgbFace)
except OverflowError:
raise OverflowError("Exceeded cell block limit (set "
"'agg.path.chunksize' rcparam)")
def draw_mathtext(self, gc, x, y, s, prop, angle):
"""
Draw the math text using matplotlib.mathtext
"""
ox, oy, width, height, descent, font_image, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
xd = descent * sin(radians(angle))
yd = descent * cos(radians(angle))
x = round(x + ox + xd)
y = round(y - oy + yd)
self._renderer.draw_text_image(font_image, x, y + 1, angle, gc)
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
# docstring inherited
if ismath:
return self.draw_mathtext(gc, x, y, s, prop, angle)
flags = get_hinting_flag()
font = self._get_agg_font(prop)
if font is None:
return None
# We pass '0' for angle here, since it will be rotated (in raster
# space) in the following call to draw_text_image).
font.set_text(s, 0, flags=flags)
font.draw_glyphs_to_bitmap(antialiased=rcParams['text.antialiased'])
d = font.get_descent() / 64.0
# The descent needs to be adjusted for the angle.
xo, yo = font.get_bitmap_offset()
xo /= 64.0
yo /= 64.0
xd = d * sin(radians(angle))
yd = d * cos(radians(angle))
x = round(x + xo + xd)
y = round(y + yo + yd)
self._renderer.draw_text_image(font, x, y + 1, angle, gc)
def get_text_width_height_descent(self, s, prop, ismath):
# docstring inherited
if ismath in ["TeX", "TeX!"]:
# todo: handle props
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
w, h, d = texmanager.get_text_width_height_descent(
s, fontsize, renderer=self)
return w, h, d
if ismath:
ox, oy, width, height, descent, fonts, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
return width, height, descent
flags = get_hinting_flag()
font = self._get_agg_font(prop)
font.set_text(s, 0.0, flags=flags)
w, h = font.get_width_height() # width and height of unrotated string
d = font.get_descent()
w /= 64.0 # convert from subpixels
h /= 64.0
d /= 64.0
return w, h, d
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
# docstring inherited
# todo, handle props, angle, origins
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
Z = texmanager.get_grey(s, size, self.dpi)
Z = np.array(Z * 255.0, np.uint8)
w, h, d = self.get_text_width_height_descent(s, prop, ismath)
xd = d * sin(radians(angle))
yd = d * cos(radians(angle))
x = round(x + xd)
y = round(y + yd)
self._renderer.draw_text_image(Z, x, y, angle, gc)
def get_canvas_width_height(self):
# docstring inherited
return self.width, self.height
def _get_agg_font(self, prop):
"""
Get the font for text instance t, caching for efficiency
"""
fname = findfont(prop)
font = get_font(fname)
font.clear()
size = prop.get_size_in_points()
font.set_size(size, self.dpi)
return font
def points_to_pixels(self, points):
# docstring inherited
return points * self.dpi / 72
def buffer_rgba(self):
return memoryview(self._renderer)
def tostring_argb(self):
return np.asarray(self._renderer).take([3, 0, 1, 2], axis=2).tobytes()
def tostring_rgb(self):
return np.asarray(self._renderer).take([0, 1, 2], axis=2).tobytes()
def clear(self):
self._renderer.clear()
def option_image_nocomposite(self):
# docstring inherited
# It is generally faster to composite each image directly to
# the Figure, and there's no file size benefit to compositing
# with the Agg backend
return True
def option_scale_image(self):
# docstring inherited
return False
def restore_region(self, region, bbox=None, xy=None):
"""
Restore the saved region. If bbox (instance of BboxBase, or
its extents) is given, only the region specified by the bbox
will be restored. *xy* (a pair of floats) optionally
specifies the new position (the LLC of the original region,
not the LLC of the bbox) where the region will be restored.
>>> region = renderer.copy_from_bbox()
>>> x1, y1, x2, y2 = region.get_extents()
>>> renderer.restore_region(region, bbox=(x1+dx, y1, x2, y2),
... xy=(x1-dx, y1))
"""
if bbox is not None or xy is not None:
if bbox is None:
x1, y1, x2, y2 = region.get_extents()
elif isinstance(bbox, BboxBase):
x1, y1, x2, y2 = bbox.extents
else:
x1, y1, x2, y2 = bbox
if xy is None:
ox, oy = x1, y1
else:
ox, oy = xy
# The incoming data is float, but the _renderer type-checking wants
# to see integers.
self._renderer.restore_region(region, int(x1), int(y1),
int(x2), int(y2), int(ox), int(oy))
else:
self._renderer.restore_region(region)
def start_filter(self):
"""
Start filtering. It simply create a new canvas (the old one is saved).
"""
self._filter_renderers.append(self._renderer)
self._renderer = _RendererAgg(int(self.width), int(self.height),
self.dpi)
self._update_methods()
def stop_filter(self, post_processing):
"""
Save the plot in the current canvas as a image and apply
the *post_processing* function.
def post_processing(image, dpi):
# ny, nx, depth = image.shape
# image (numpy array) has RGBA channels and has a depth of 4.
...
# create a new_image (numpy array of 4 channels, size can be
# different). The resulting image may have offsets from
# lower-left corner of the original image
return new_image, offset_x, offset_y
The saved renderer is restored and the returned image from
post_processing is plotted (using draw_image) on it.
"""
width, height = int(self.width), int(self.height)
buffer, (l, b, w, h) = self.tostring_rgba_minimized()
self._renderer = self._filter_renderers.pop()
self._update_methods()
if w > 0 and h > 0:
img = np.frombuffer(buffer, np.uint8)
img, ox, oy = post_processing(img.reshape((h, w, 4)) / 255.,
self.dpi)
gc = self.new_gc()
if img.dtype.kind == 'f':
img = np.asarray(img * 255., np.uint8)
img = img[::-1]
self._renderer.draw_image(gc, l + ox, height - b - h + oy, img)
class FigureCanvasAgg(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Attributes
----------
figure : `matplotlib.figure.Figure`
A high-level Figure instance
"""
def copy_from_bbox(self, bbox):
renderer = self.get_renderer()
return renderer.copy_from_bbox(bbox)
def restore_region(self, region, bbox=None, xy=None):
renderer = self.get_renderer()
return renderer.restore_region(region, bbox, xy)
def draw(self):
"""
Draw the figure using the renderer.
"""
self.renderer = self.get_renderer(cleared=True)
# Acquire a lock on the shared font cache.
with RendererAgg.lock, \
(self.toolbar._wait_cursor_for_draw_cm() if self.toolbar
else nullcontext()):
self.figure.draw(self.renderer)
# A GUI class may be need to update a window using this draw, so
# don't forget to call the superclass.
super().draw()
def get_renderer(self, cleared=False):
l, b, w, h = self.figure.bbox.bounds
key = w, h, self.figure.dpi
reuse_renderer = (hasattr(self, "renderer")
and getattr(self, "_lastKey", None) == key)
if not reuse_renderer:
self.renderer = RendererAgg(w, h, self.figure.dpi)
self._lastKey = key
elif cleared:
self.renderer.clear()
return self.renderer
def tostring_rgb(self):
"""Get the image as an RGB byte string.
`draw` must be called at least once before this function will work and
to update the renderer for any subsequent changes to the Figure.
Returns
-------
bytes
"""
return self.renderer.tostring_rgb()
def tostring_argb(self):
"""Get the image as an ARGB byte string.
`draw` must be called at least once before this function will work and
to update the renderer for any subsequent changes to the Figure.
Returns
-------
bytes
"""
return self.renderer.tostring_argb()
def buffer_rgba(self):
"""Get the image as a memoryview to the renderer's buffer.
`draw` must be called at least once before this function will work and
to update the renderer for any subsequent changes to the Figure.
Returns
-------
memoryview
"""
return self.renderer.buffer_rgba()
def print_raw(self, filename_or_obj, *args, **kwargs):
FigureCanvasAgg.draw(self)
renderer = self.get_renderer()
with cbook.open_file_cm(filename_or_obj, "wb") as fh:
fh.write(renderer.buffer_rgba())
print_rgba = print_raw
def print_png(self, filename_or_obj, *args,
metadata=None, pil_kwargs=None,
**kwargs):
"""
Write the figure to a PNG file.
Parameters
----------
filename_or_obj : str or PathLike or file-like object
The file to write to.
metadata : dict, optional
Metadata in the PNG file as key-value pairs of bytes or latin-1
encodable strings.
According to the PNG specification, keys must be shorter than 79
chars.
The `PNG specification`_ defines some common keywords that may be
used as appropriate:
- Title: Short (one line) title or caption for image.
- Author: Name of image's creator.
- Description: Description of image (possibly long).
- Copyright: Copyright notice.
- Creation Time: Time of original image creation
(usually RFC 1123 format).
- Software: Software used to create the image.
- Disclaimer: Legal disclaimer.
- Warning: Warning of nature of content.
- Source: Device used to create the image.
- Comment: Miscellaneous comment;
conversion from other image format.
Other keywords may be invented for other purposes.
If 'Software' is not given, an autogenerated value for matplotlib
will be used.
For more details see the `PNG specification`_.
.. _PNG specification: \
https://www.w3.org/TR/2003/REC-PNG-20031110/#11keywords
pil_kwargs : dict, optional
If set to a non-None value, use Pillow to save the figure instead
of Matplotlib's builtin PNG support, and pass these keyword
arguments to `PIL.Image.save`.
If the 'pnginfo' key is present, it completely overrides
*metadata*, including the default 'Software' key.
"""
from matplotlib import _png
if metadata is None:
metadata = {}
default_metadata = {
"Software":
f"matplotlib version{__version__}, http://matplotlib.org/",
}
FigureCanvasAgg.draw(self)
if pil_kwargs is not None:
from PIL import Image
from PIL.PngImagePlugin import PngInfo
# Only use the metadata kwarg if pnginfo is not set, because the
# semantics of duplicate keys in pnginfo is unclear.
if "pnginfo" in pil_kwargs:
if metadata:
cbook._warn_external("'metadata' is overridden by the "
"'pnginfo' entry in 'pil_kwargs'.")
else:
pnginfo = PngInfo()
for k, v in {**default_metadata, **metadata}.items():
pnginfo.add_text(k, v)
pil_kwargs["pnginfo"] = pnginfo
pil_kwargs.setdefault("dpi", (self.figure.dpi, self.figure.dpi))
(Image.fromarray(np.asarray(self.buffer_rgba()))
.save(filename_or_obj, format="png", **pil_kwargs))
else:
renderer = self.get_renderer()
with cbook.open_file_cm(filename_or_obj, "wb") as fh:
_png.write_png(renderer._renderer, fh, self.figure.dpi,
metadata={**default_metadata, **metadata})
def print_to_buffer(self):
FigureCanvasAgg.draw(self)
renderer = self.get_renderer()
return (bytes(renderer.buffer_rgba()),
(int(renderer.width), int(renderer.height)))
if _has_pil:
# Note that these methods should typically be called via savefig() and
# print_figure(), and the latter ensures that `self.figure.dpi` already
# matches the dpi kwarg (if any).
@cbook._delete_parameter("3.2", "dryrun")
def print_jpg(self, filename_or_obj, *args, dryrun=False,
pil_kwargs=None, **kwargs):
"""
Write the figure to a JPEG file.
Parameters
----------
filename_or_obj : str or PathLike or file-like object
The file to write to.
Other Parameters
----------------
quality : int
The image quality, on a scale from 1 (worst) to 100 (best).
The default is :rc:`savefig.jpeg_quality`. Values above
95 should be avoided; 100 completely disables the JPEG
quantization stage.
optimize : bool
If present, indicates that the encoder should
make an extra pass over the image in order to select
optimal encoder settings.
progressive : bool
If present, indicates that this image
should be stored as a progressive JPEG file.
pil_kwargs : dict, optional
Additional keyword arguments that are passed to
`PIL.Image.save` when saving the figure. These take precedence
over *quality*, *optimize* and *progressive*.
"""
FigureCanvasAgg.draw(self)
if dryrun:
return
# The image is pasted onto a white background image to handle
# transparency.
image = Image.fromarray(np.asarray(self.buffer_rgba()))
background = Image.new('RGB', image.size, "white")
background.paste(image, image)
if pil_kwargs is None:
pil_kwargs = {}
for k in ["quality", "optimize", "progressive"]:
if k in kwargs:
pil_kwargs.setdefault(k, kwargs[k])
pil_kwargs.setdefault("quality", rcParams["savefig.jpeg_quality"])
pil_kwargs.setdefault("dpi", (self.figure.dpi, self.figure.dpi))
return background.save(
filename_or_obj, format='jpeg', **pil_kwargs)
print_jpeg = print_jpg
@cbook._delete_parameter("3.2", "dryrun")
def print_tif(self, filename_or_obj, *args, dryrun=False,
pil_kwargs=None, **kwargs):
FigureCanvasAgg.draw(self)
if dryrun:
return
if pil_kwargs is None:
pil_kwargs = {}
pil_kwargs.setdefault("dpi", (self.figure.dpi, self.figure.dpi))
return (Image.fromarray(np.asarray(self.buffer_rgba()))
.save(filename_or_obj, format='tiff', **pil_kwargs))
print_tiff = print_tif
@_Backend.export
class _BackendAgg(_Backend):
FigureCanvas = FigureCanvasAgg
FigureManager = FigureManagerBase
| [
"[email protected]"
] | |
b636530dff51028fbe0a7fbb20a468126863860f | 3b9d763180410bf0abf5b9c37391a64319efe839 | /toontown/building/DistributedHQInteriorAI.py | d7c12fec12fec9b0432c0e998ae1a9b352eb808e | [] | no_license | qphoton/Reverse_Engineering_Project_ToonTown | 442f15d484324be749f6f0e5e4e74fc6436e4e30 | 11468ab449060169191366bc14ff8113ee3beffb | refs/heads/master | 2021-05-08T00:07:09.720166 | 2017-10-21T02:37:22 | 2017-10-21T02:37:22 | 107,617,661 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 938 | py | from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
import cPickle
class DistributedHQInteriorAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory("DistributedHQInteriorAI")
def __init__(self, air):
DistributedObjectAI.__init__(self, air)
self.isTutorial = False
self.zoneId = 0
self.block = 0
self.leaderData = cPickle.dumps(([], [], []))
def setZoneIdAndBlock(self, zoneId, block):
self.zoneId = zoneId
self.block = block
def setLeaderBoard(self, leaderData):
self.leaderData = leaderData
def setTutorial(self, isTutorial):
self.isTutorial = False
def getZoneIdAndBlock(self):
return (self.zoneId, self.block)
def getLeaderBoard(self):
return self.leaderData
def getTutorial(self):
return self.isTutorial | [
"[email protected]"
] | |
07f7480b8204fdcc16a56564512124c02be477e2 | f3050b7f84e584dcde54ca1690944bfccc6f5d9c | /demo/other/demo_fomat.py | 767ea789d7dee62f3161cff0034d57438ab22024 | [] | no_license | azhenglianxi/api_Project | 0c8444c2bad7464fd57911be4fdcd131a63c46b2 | 2ae87b87e41f522d4ef20f63bad6adcaec1f9874 | refs/heads/master | 2020-09-14T12:08:07.080748 | 2019-12-12T09:08:22 | 2019-12-12T09:08:22 | 223,124,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 801 | py | name="tom"
age=20
#1. 输出:你好,tom先生,今晚吃鸡!
print(f"你好,{name}先生,今晚吃鸡!")
print("你好,{}先生,今晚吃鸡!".format(name))
#2. 输出:你好,tom先生,今晚{吃鸡}!
print(f"你好,{name}先生,今晚{{吃鸡}}!")
print("你好,{}先生,今晚{{吃鸡}}!".format(name))
#3. 输出:你好,{tom}先生,今晚吃鸡!
print(f"你好,{{{name}}}先生,今晚吃鸡!")
print("你好,{{{}}}先生,今晚吃鸡!".format(name))
print("姓名和年龄分别是:{}、{}".format(name, age)) # 不带编号,顺序填坑
print("姓名和年龄分别是:{1}、{0}".format(age, name)) # 带数字编号、可以变换顺序
print("姓名和年龄分别是:{x}、{y}".format(x='小明', y=age)) # 带关键字
| [
"[email protected]"
] | |
0821f14666c075ca5ef4644670d667a41ce5450f | ce6c8e0e3a986af3fe3c347a4af16f1ca337f82c | /630.course-schedule-iii.py | c6f16e25c08edfd0eebd9959c9ace96be3683d8b | [] | no_license | chenjienan/python-leetcode | dc098373ae7f73dd502d7747888a37a3bd0820cb | 90c000c3be70727cde4f7494fbbb1c425bfd3da4 | refs/heads/master | 2020-04-28T20:46:50.395260 | 2020-02-12T18:48:01 | 2020-02-12T18:48:01 | 175,556,339 | 16 | 3 | null | null | null | null | UTF-8 | Python | false | false | 656 | py | #
# @lc app=leetcode id=630 lang=python3
#
# [630] Course Schedule III
#
import heapq
class Solution:
def scheduleCourse(self, courses: List[List[int]]):
courses.sort(key=lambda x: x[1])
day = 0
heap = []
for i in range(len(courses)):
if day + courses[i][0] <= courses[i][1]:
day += courses[i][0]
heapq.heappush(heap, -courses[i][0])
else:
# has overlap
heapq.heappush(heap, -courses[i][0])
day += courses[i][0] + heap[0]
heapq.heappop(heap)
return len(heap)
| [
"[email protected]"
] | |
41e0c35d8f6f140f28776895e5818242163e49f8 | 7a239875dc9147377c68403e33ce234448b3b443 | /libsubmit/version.py | e35bbbade12937ef58ce92916b9c34a995a2520d | [
"Apache-2.0"
] | permissive | benhg/libsubmit | a51f14ffe7220a5c523d5b9b5c079b2a4f030749 | 3ff05719d01e9a7c78c81d002b22f9a927cb6a4f | refs/heads/master | 2021-05-14T02:25:27.566641 | 2018-02-09T21:35:44 | 2018-02-09T21:35:44 | 116,594,575 | 0 | 0 | null | 2018-02-09T21:12:08 | 2018-01-07T19:55:57 | Python | UTF-8 | Python | false | false | 91 | py | ''' Set module version
<Major>.<Minor>.<maintenance>[-alpha/beta/..]
'''
VERSION = '0.3.0'
| [
"[email protected]"
] | |
cfe183779f01a5fbe1379f11d1cc62902be02994 | 9079a555d1fd22ad9701227c58151ae1ca3595d3 | /CSES/1097.py | 85e2d0f2b48353028d34faa5b5a999c8a74da857 | [] | no_license | DebRC/My-Competitve-Programming-Solutions | c2a03b18f15cebd3793ce1c288dbb51fc0a33ef4 | fe956eed619a21bd24a5fd647791d4c56cd1b021 | refs/heads/main | 2023-02-04T08:28:13.915967 | 2020-12-28T09:11:29 | 2020-12-28T09:11:29 | 324,591,343 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | def removal_game_tab(a, n):
dp=[[0 for i in range(n)] for i in range(n)]
for i in range(n-1,-1,-1):
for j in range(i,n):
if i==j:
dp[i][i]=a[i]
else:
dp[i][j] = max(a[i]-dp[i+1][j],a[j]-dp[i][j-1])
return dp[0][n-1]
n = int(input())
a = list(map(int, input().split()))
print((sum(a)+removal_game_tab(a,n))//2) | [
"[email protected]"
] | |
7baaeaed32956cf32db8273ce882ac55fbcf7d77 | 5695d365852a5b9bc4f8092c8aba139530def229 | /hs_collection_resource/migrations/0002_collectiondeletedresource_resource_owners.py | d6a80fde974db5778255ad0469fa26b6f89dd634 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | heliumdatacommons/commonsshare | 6863705e71be2fb9ef4a822e391d60cfcbc82a44 | 4336dc337ca2b36c2d0a0a7ea793af624c1356c7 | refs/heads/develop | 2021-05-11T14:12:53.511860 | 2020-04-15T20:48:38 | 2020-04-15T20:48:38 | 117,697,775 | 2 | 4 | BSD-3-Clause | 2020-03-31T14:08:15 | 2018-01-16T14:58:04 | Python | UTF-8 | Python | false | false | 586 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('hs_collection_resource', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='collectiondeletedresource',
name='resource_owners',
field=models.ManyToManyField(related_name='collectionDeleted', to=settings.AUTH_USER_MODEL),
),
]
| [
"[email protected]"
] | |
e08959efc568fd56daefcf8ab0405bd7db16d4b2 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/response/AlipayOpenMiniTipsDeliveryCreateResponse.py | 0f0bc4813082df45cd6c1d04d87d1319c5c3daad | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 754 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayOpenMiniTipsDeliveryCreateResponse(AlipayResponse):
def __init__(self):
super(AlipayOpenMiniTipsDeliveryCreateResponse, self).__init__()
self._delivery_id = None
@property
def delivery_id(self):
return self._delivery_id
@delivery_id.setter
def delivery_id(self, value):
self._delivery_id = value
def parse_response_content(self, response_content):
response = super(AlipayOpenMiniTipsDeliveryCreateResponse, self).parse_response_content(response_content)
if 'delivery_id' in response:
self.delivery_id = response['delivery_id']
| [
"[email protected]"
] | |
cb1d4ea82e737e5bf9c2ec42560c94336b5e4563 | 5182897b2f107f4fd919af59c6762d66c9be5f1d | /.history/src/Simulador_20200711163425.py | aca21747f8e28878eb2b0759badeae8ea1d21215 | [
"MIT"
] | permissive | eduardodut/Trabalho_final_estatistica_cd | 422b7e702f96291f522bcc68d2e961d80d328c14 | fbedbbea6bdd7a79e1d62030cde0fab4e93fc338 | refs/heads/master | 2022-11-23T03:14:05.493054 | 2020-07-16T23:49:26 | 2020-07-16T23:49:26 | 277,867,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,607 | py | import pandas as pd
import numpy as np
from Matriz_esferica import Matriz_esferica
from Individuo import Individuo, Fabrica_individuo
import random
from itertools import permutations
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from scipy.sparse import csr_matrix, lil_matrix
class Simulador():
SADIO = 0
INFECTADO_TIPO_1 = 1 #assintomáticos e o infectado inicial
INFECTADO_TIPO_2 = 2 #sintomático
CURADO = 3
MORTO = 4
def __init__(
self,
tamanho_matriz, #numero de linhas e colunas da matriz esférica
percentual_inicial_tipo1, #percentual inicial da população que será infectada tipo 1
percentual_inicial_tipo2, #percentual inicial da população que será infectada tipo 2
chance_infeccao, #chance que um infectado tipo 2 tem de infectar um indivíduo saudável
chance_infeccao_tipo2, #chance de um indivíduo infectado se tornar contagioso
chance_morte, #chance de um indivíduo tipo 2 morrer ao fim de uma atualização
atualizacoes_cura): #número de atualizações necessárias para a cura de um indivíduo tipo 1 ou 2
self.num_atualizacoes = 0
self.lista_infectados_tipo_2 = []
self.lista_infectados_tipo_1 = []
self.num_curados = 0
self.num_mortos = 0
self.chance_infeccao = chance_infeccao
self.chance_infeccao_tipo2 = chance_infeccao_tipo2
self.chance_morte = chance_morte
self.atualizacoes_cura = atualizacoes_cura
self.populacao_inicial = int(tamanho_matriz**2)
self.num_inicial_tipo2 = int(self.populacao_inicial * percentual_inicial_tipo2)
self.num_inicial_tipo1 = 1 + int(self.populacao_inicial * percentual_inicial_tipo1)
self.num_inicial_sadios = self.populacao_inicial - (self.num_inicial_tipo2 + self.num_inicial_tipo1)
self.matriz_status = lil_matrix((tamanho_matriz, tamanho_matriz),dtype= np.uint8)
self.matriz_atualizacoes_cura = lil_matrix((tamanho_matriz, tamanho_matriz),dtype= np.uint8)
#self.matriz_status = self.df_individuos.to_numpy()
self.popular(tamanho_matriz)
self.lista_matrizes_status = []
#objeto que é responsável por validar a movimentação no grid n x n
self.matriz_esferica = Matriz_esferica(tamanho_matriz)
dict = {
'num_sadios':self.num_inicial_sadios,
'num_infect_t1':self.num_inicial_tipo1,
'num_infect_t2':self.num_inicial_tipo2,
'num_curados':0,
'num_mortos':0}
#dataframe que guardará os resultados de cada atualização
self.dataframe = pd.DataFrame(dict,index = [0])
self.salvar_posicionamento()
def criar_individuo(self, status, posicao):
self.matriz_status[posicao[0], posicao[1]] = status
if status == self.INFECTADO_TIPO_1 or status == self.INFECTADO_TIPO_2:
self.matriz_atualizacoes_cura[posicao[0], posicao[1]] = self.atualizacoes_cura
def salvar_posicionamento(self):
self.lista_matrizes_status.append(self.matriz_status)
def verificar_infeccao(self, lista_infectantes):
lista_novos_infectados_tipo1 = []
lista_novos_infectados_tipo2 = []
#itera sobre sobre a lista de individuos que infectam e cada um realiza a tividade de infectar
for indice_infectante in lista_infectantes:
#busca os vizinhos do infectante atual
lista_vizinhos = self.matriz_esferica.get_vizinhos(indice_infectante)
#Para cada vizinho, se ele for sadio, é gerado um número aleatório para verificar se foi infectado
for indice_vizinho in lista_vizinhos:
#verificação de SADIO
if self.verifica_status(indice_vizinho) == self.SADIO:
#verificação do novo status
novo_status = self.infectar(chance_infeccao, chance_infeccao_tipo2)
#se for um infectado tipo 1
if novo_status == Individuo.INFECTADO_TIPO_1:
#adiciona na lista de novos tipo 1
lista_novos_infectados_tipo1.append(indice_vizinho)
self.criar_individuo(Individuo.INFECTADO_TIPO_1,indice_vizinho)
if novo_status == Individuo.INFECTADO_TIPO_2:
#adiciona na lista de novos tipo 1
lista_novos_infectados_tipo2.append(indice_vizinho)
self.criar_individuo(Individuo.INFECTADO_TIPO_2,indice_vizinho)
return lista_novos_infectados_tipo1, lista_novos_infectados_tipo2
def checagem_morte_individual(self, chance_morte, indice):
rng_morte = random.random()
if rng_morte <= chance_morte:
self.matriz_status[indice[0], indice[1]] = self.MORTO
return self.MORTO
else:
return self.checar_cura_individual(indice)
def checar_cura_individual(self, indice):
#print("passei na cura")
#num_atualizacoes_restantes = self.matriz_atualizacoes_cura[indice[0], indice[1]]
self.matriz_atualizacoes_cura[indice[0], indice[1]] -= 1
if self.matriz_atualizacoes_cura[indice[0], indice[1]] == 0:
self.matriz_status[indice[0], indice[1]] = self.CURADO
return self.CURADO
else:
return self.matriz_status[indice[0], indice[1]]
def checagem_morte_cura_lista(self, lista_infectantes_tipo2):
lista_curados = []
lista_mortos = []
for indice_infectante in lista_infectantes_tipo2:
novo_status = self.checagem_morte_individual(self.chance_morte, indice_infectante)
if novo_status == Individuo.MORTO:
lista_mortos.append(indice_infectante)
if novo_status == Individuo.CURADO:
lista_curados.append(indice_infectante)
return lista_mortos, lista_curados
def checagem_cura_lista(self, lista_infectantes):
lista_curados = []
for indice_infectante in lista_infectantes:
novo_status = self.checar_cura_individual(indice_infectante)
if novo_status == Individuo.CURADO:
lista_curados.append(indice_infectante)
return lista_curados
def iterar(self):
#Verifica os novos infectados por infectantes do tipo 1 e 2
print(self.lista_infectados_tipo_1+self.lista_infectados_tipo_2)
lista_novos_infectados_tipo1, lista_novos_infectados_tipo2 = self.verificar_infeccao(self.lista_infectados_tipo_1+self.lista_infectados_tipo_2)
#Verifica morte/cura dos infectados tipo 2
lista_mortos, lista_curados_t2 = self.checagem_morte_cura_lista(self.lista_infectados_tipo_2)
#Verifica cura dos infectados tipo 1
lista_curados_t1 = self.checagem_cura_lista(self.lista_infectados_tipo_1)
#remove os mortos e curados das listas de infectantes tipo 1 e 2
nova_lista_infectados_t2 = []
for indice in self.lista_infectados_tipo_2:
if indice not in lista_mortos and indice not in lista_curados_t2:
nova_lista_infectados_t2.append(indice)
self.lista_infectados_tipo_2 = nova_lista_infectados_t2
nova_lista_infectados_t1 = []
for indice in self.lista_infectados_tipo_1:
if indice not in lista_curados_t1:
nova_lista_infectados_t1.append(indice)
self.lista_infectados_tipo_1 = nova_lista_infectados_t1
#atualiza o número de mortos
self.num_mortos = self.num_mortos + len(lista_mortos)
#atualiza o número de curados
self.num_curados = self.num_curados + len(lista_curados_t1) + len(lista_curados_t2)
#movimentar infectantes:
nova_lista_infectados_t1 = []
for indice in self.lista_infectados_tipo_1:
nova_lista_infectados_t1.append(self.mover_infectante(indice))
self.lista_infectados_tipo_1 = nova_lista_infectados_t1
nova_lista_infectados_t2 = []
for indice in self.lista_infectados_tipo_2:
nova_lista_infectados_t2.append(self.mover_infectante(indice))
self.lista_infectados_tipo_2 = nova_lista_infectados_t2
print(self.lista_infectados_tipo_1+self.lista_infectados_tipo_2)
#adicionar os novos infectados tipo 1 e 2 para as respectivas listas
self.lista_infectados_tipo_2 = self.lista_infectados_tipo_2 + lista_novos_infectados_tipo2
self.lista_infectados_tipo_1 = self.lista_infectados_tipo_1 + lista_novos_infectados_tipo1
dict = {
'num_sadios':self.populacao_inicial - self.num_mortos - self.num_curados - len(self.lista_infectados_tipo_1) - len(self.lista_infectados_tipo_2) ,
'num_infect_t1':len(self.lista_infectados_tipo_1),
'num_infect_t2':len(self.lista_infectados_tipo_2),
'num_curados':self.num_curados,
'num_mortos':self.num_mortos}
self.dataframe = self.dataframe.append(dict, ignore_index=True)
# print("num t1: ", len(self.lista_infectados_tipo_1))
# print("num t2: ", len(self.lista_infectados_tipo_2))
# print("num curados: ", self.num_curados)
# print("num mortos: ", self.num_mortos)
# print("---------")
# #salva a nova matriz de status
self.salvar_posicionamento()
#adiciona 1 ao número de atualizações realizadas na matriz
self.num_atualizacoes +=1
def infectar(self, chance_infeccao, chance_infeccao_tipo2):
saida = Individuo.SADIO
#número aleatório para chance de infectar o vizinho
rng_infeccao = random.random()
if rng_infeccao <= chance_infeccao:
#número aleatório para chance de infecção tipo 1 ou 2
rng_infeccao_tipo2 = random.random()
if rng_infeccao_tipo2 <= chance_infeccao_tipo2:
saida = Individuo.INFECTADO_TIPO_2
else:
saida = Individuo.INFECTADO_TIPO_1
return saida
def popular(self, tamanho_matriz):
#lista de possíveis combinações de índices da matriz de dados
permutacoes = permutations(list(range(tamanho_matriz)),2)
#conversão para lista de tuplas(x,y)
lista_indices = list(permutacoes)
#embaralhamento dos índices
random.shuffle(lista_indices)
#cria o primeiro tipo1:
indice = lista_indices.pop()
self.criar_individuo(Individuo.INFECTADO_TIPO_1, indice)
self.lista_infectados_tipo_1.append(indice)
#cria o restante dos tipos 1
for i in range(1,self.num_inicial_tipo1):
indice = lista_indices.pop()
self.criar_individuo(Individuo.INFECTADO_TIPO_1,indice)
self.lista_infectados_tipo_1.append(indice)
#cria o restante dos tipo 2:
for indice in range(self.num_inicial_tipo2):
indice = lista_indices.pop()
self.criar_individuo(Individuo.INFECTADO_TIPO_2,indice)
self.lista_infectados_tipo_2.append(indice)
def trocar(self,matriz,ponto_ini,ponto_final):
x_ini = ponto_ini[0]
y_ini = ponto_ini[1]
x_fin = ponto_final[0]
y_fin = ponto_final[1]
aux = matriz[x_fin,y_fin]
matriz[x_fin,y_fin] = matriz[x_ini,y_ini]
matriz[x_ini,y_ini] = aux
def verifica_status(self, indice):
return self.matriz_status[indice[0], indice[1]]
def mover_infectante(self, posicao_inicial):
pos_x, pos_y = posicao_inicial[0], posicao_inicial[1]
rng_posicao = random.random()
if rng_posicao <=0.25:
#move pra cima
pos_x -= 1
elif rng_posicao <=0.5:
#move pra baixo
pos_x += 1
elif rng_posicao <=0.75:
#move para esquerda
pos_y -= 1
else:
#move para direita
pos_y += 1
posicao_final= self.matriz_esferica.valida_ponto_matriz(pos_x, pos_y)
self.trocar(self.matriz_status, posicao_inicial, posicao_final)
self.trocar(self.matriz_atualizacoes_cura, posicao_inicial, posicao_final)
return posicao_final
chance_infeccao = 0.3
chance_infeccao_tipo2 = 1
chance_morte = 1
atualizacoes_cura = 10
percentual_inicial_tipo1 = 0.0
percentual_inicial_tipo2 = 0.0
sim = Simulador(
5,
percentual_inicial_tipo1,
percentual_inicial_tipo2,
chance_infeccao,
chance_infeccao_tipo2,
chance_morte,atualizacoes_cura)
#print(sim.lista_matrizes_posicionamento[0])
#print(sim.lista_infectados_tipo_2)
#print(sim.lista_infectados_tipo_1)
cmap = ListedColormap(['w', 'y', 'r', 'blue', 'black'])
while sim.dataframe.iloc[-1]['num_infect_t1']+sim.dataframe.iloc[-1]['num_infect_t2'] > 0:
plt.matshow(sim.matriz_status.toarray(), cmap = cmap, vmin= 0, vmax = 4)
#
sim.iterar()
#print(sim.dataframe.iloc[-1])
#print("xxxxxxxxxxxxxxxxxTipo: ",type(sim.lista_matrizes_posicionamento[len(sim.lista_matrizes_posicionamento)-1].toarray()))
print(sim.dataframe)
plt.show()
# for i in range(12):
# #plt.matshow(sim.lista_matrizes_status[i].toarray(), cmap = cmap, vmin= 0, vmax = 4)
# print(i)
# print("Status")
# print(sim.matriz_status.toarray())
# print("Cura")
# print(sim.matriz_atualizacoes_cura.toarray())
# sim.iterar()
# m = sim.matriz_atualizacoes_cura[sim.matriz_status == 1 or sim.matriz_status == 2].toarray()
# print(m)
#plt.show()
#print(sim.dataframe)
# print(sim.lista_infectados_tipo_1)
# print(sim.lista_infectados_tipo_2)
# sim.iterar()
# print(sim.lista_infectados_tipo_1)
# print(sim.lista_infectados_tipo_2)
# print(sim.dataframe)
# print("status inicial: ", sim.df_individuos[1][0].status)
# print("Novos infectados: ", sim.verificar_infeccao(sim.lista_infectados_tipo_1))
# plt.show()
| [
"[email protected]"
] | |
c60c7909ee17189186d37d45b7eda97c4c7d3bf0 | bc441bb06b8948288f110af63feda4e798f30225 | /resource_manage_sdk/api/cmdb_approve/get_history_approver_list_pb2.pyi | abd98e65926c1c34c66d48ba053cdace0455c688 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,398 | pyi | # @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedScalarFieldContainer as google___protobuf___internal___containers___RepeatedScalarFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from google.protobuf.struct_pb2 import (
Struct as google___protobuf___struct_pb2___Struct,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class GetHistoryApproverListRequest(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
@property
def query(self) -> google___protobuf___struct_pb2___Struct: ...
def __init__(self,
*,
query : typing___Optional[google___protobuf___struct_pb2___Struct] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> GetHistoryApproverListRequest: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> GetHistoryApproverListRequest: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"query",b"query"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"query",b"query"]) -> None: ...
class GetHistoryApproverListResponse(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
userList = ... # type: google___protobuf___internal___containers___RepeatedScalarFieldContainer[typing___Text]
def __init__(self,
*,
userList : typing___Optional[typing___Iterable[typing___Text]] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> GetHistoryApproverListResponse: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> GetHistoryApproverListResponse: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"userList",b"userList"]) -> None: ...
class GetHistoryApproverListResponseWrapper(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
code = ... # type: builtin___int
codeExplain = ... # type: typing___Text
error = ... # type: typing___Text
@property
def data(self) -> GetHistoryApproverListResponse: ...
def __init__(self,
*,
code : typing___Optional[builtin___int] = None,
codeExplain : typing___Optional[typing___Text] = None,
error : typing___Optional[typing___Text] = None,
data : typing___Optional[GetHistoryApproverListResponse] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> GetHistoryApproverListResponseWrapper: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> GetHistoryApproverListResponseWrapper: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"data",b"data"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"code",b"code",u"codeExplain",b"codeExplain",u"data",b"data",u"error",b"error"]) -> None: ...
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.