blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
469e579b0a396a30e46ed93bc267b76bed2218c9 | b088d5dc4321f9f145c7bceb20a0b9479b374c65 | /level1&2/42883.py | 8169482655042d081bd9380cf7217e0935b0e85c | []
| no_license | heojungeun/codingtestPractice | 55bfc2b13791f5cb3133b0815991a0c696f8482c | 65d668bf6df82967f89d4ec4eb3a1e11de603729 | refs/heads/master | 2022-09-17T00:34:05.887237 | 2020-05-30T06:45:30 | 2020-05-30T06:45:30 | 261,093,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,404 | py | def solution(number, k):
# import itertools
# dig = []
# for i in range(0,len(number)):
# dig.append(i)
# dig = list(itertools.combinations(dig,k))
# lenn = len(number)
# arr = []
# for x in dig:
# tmp = ''
# for i in range(lenn):
# if i in x:
# continue
# tmp += number[i]
# arr.append(int(tmp))
# answer = str(max(arr))
st = []
for x in number:
if k==0 or not st:
st.append(x)
else:
if st[-1] < x:
tmp = reversed(st)
for e in tmp:
if e < x:
st.pop()
k -= 1
if k==0 or not st:
st.append(x)
break
else:
st.append(x)
break
else:
st.append(x)
while k > 0:
st.pop()
k -= 1
answer = "".join(st)
return answer
def standardsolution(number,k):
st = []
for i, num in enumerate(number):
while st and k>0 and st[-1]<num:
st.pop()
k -= 1
if k==0:
st += number[i:]
break
st.append(num)
st = st[:-k] if k>0 else st
return "".join(st)
n = "12"
nk = 1
print(solution(n,nk)) | [
"[email protected]"
]
| |
33864e4c4e10988ab56cdf4ba1f15fbbd344f0e0 | d2f50124ff3bec70b9b3139ecb063b06e526781d | /despachos_mercancias/migrations/0012_auto_20170113_1639.py | 17429c15b3153edf6a6c8081aaad3e0199999d20 | []
| no_license | odecsarrollo/odecopack-componentes | e8d993f089bf53bbf3c53d1265e70ac5c06b59b8 | b583a115fb30205d358d97644c38d66636b573ff | refs/heads/master | 2022-12-12T00:33:02.874268 | 2020-08-13T18:45:01 | 2020-08-13T18:45:01 | 189,262,705 | 0 | 0 | null | 2022-12-08T11:23:46 | 2019-05-29T16:37:21 | Python | UTF-8 | Python | false | false | 618 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-01-13 21:39
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('despachos_mercancias', '0011_enviotransportadoratcc_ciudad'),
]
operations = [
migrations.AlterField(
model_name='enviotransportadoratcc',
name='ciudad',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='mis_envios_tcc', to='geografia_colombia.Ciudad'),
),
]
| [
"[email protected]"
]
| |
6d9ad99200ba4e3cdf7b88a7da2787de0df12c8b | afde521f50b6be4be9e5c3071ed6459419fb5edb | /env/lib/python3.6/site-packages/pyecharts/charts/scatter3D.py | 35198ac9849fd58ec18641bff9956994438195d7 | []
| no_license | guhongcheng/myblog | ddef4aa0888dedfb70933b34bfd0c5da5bb5d5cd | b11f5ee26125b9551b1f27814b96a845dd4e6a76 | refs/heads/master | 2022-12-18T20:26:46.596014 | 2018-07-26T02:46:07 | 2018-07-26T02:46:07 | 134,683,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,712 | py | # coding=utf-8
from pyecharts.chart import Chart
import pyecharts.constants as constants
class Scatter3D(Chart):
"""
<<< 3D 散点图 >>>
"""
def __init__(self, title="", subtitle="", **kwargs):
kwargs["renderer"] = constants.CANVAS_RENDERER
super(Scatter3D, self).__init__(title, subtitle, **kwargs)
self._js_dependencies.add("echartsgl")
def add(self, *args, **kwargs):
self.__add(*args, **kwargs)
def __add(self, name, data, grid3d_opacity=1, **kwargs):
"""
:param name:
系列名称,用于 tooltip 的显示,legend 的图例筛选
:param data:
数据项,数据中,每一行是一个『数据项』,每一列属于一个『维度』
:param grid3d_opacity:
3D 笛卡尔坐标系组的透明度(点的透明度),默认为 1,完全不透明。
:param kwargs:
"""
kwargs.update(
xaxis3d_type="value", yaxis3d_type="value", zaxis3d_type="value"
)
chart = self._get_all_options(**kwargs)
self._option.get("legend")[0].get("data").append(name)
self._option.update(
xAxis3D=chart["xaxis3D"],
yAxis3D=chart["yaxis3D"],
zAxis3D=chart["zaxis3D"],
grid3D=chart["grid3D"],
)
self._option.get("series").append(
{
"type": "scatter3D",
"name": name,
"data": data,
"label": chart["label"],
"itemStyle": {"opacity": grid3d_opacity},
}
)
self._config_components(**kwargs)
| [
"[email protected]"
]
| |
dba87a9b580d39b7e8694daed7b9a5cb06a8db56 | 998a180e5c974d89c9ad33532d4fd33298c806a4 | /chapter1_arrays_and_strings/palindrome_permutation_1_4.py | 9fff45e7b07a41cdbf04a5422ddc172fcfa0d501 | []
| no_license | Taycode/cracking-the-coding-interview-solutions | c542a047a37b5af406469ba3f912b4bbdc326b05 | 0c2dcc4d4558dc4766b5ddcce470a60986eb39a6 | refs/heads/master | 2023-02-08T16:31:59.683541 | 2020-12-27T16:59:12 | 2020-12-27T16:59:12 | 324,807,557 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 957 | py | """
Given a string, write a function to check if it is a permutation of a palin
drome. A palindrome is a word or phrase that is the same forwards and backwards. A permutation
is a rearrangement of letters. The palindrome does not need to be limited to just dictionary words.
EXAMPLE
Input: Tact Coa
Output: True (permutations: "taco cat", "atco eta", etc.)
"""
def palindrome_permutation(string):
"""
:param string: string
:return: boolean
"""
the_dict = {}
string = string.replace(' ', '')
string = string.lower()
for _ in string:
if _ not in the_dict.keys():
the_dict[_] = 1
else:
the_dict[_] += 1
values = list(the_dict.values())
length = len(string)
if length % 2 == 0:
for _ in values:
if _ % 2 != 0:
return False
else:
return True
else:
count = 0
for _ in values:
if _ % 2 != 0:
count += 1
if count > 1:
return False
else:
return True
print(palindrome_permutation('Tact Coa'))
| [
"[email protected]"
]
| |
ce37c43e76430750154401851a00fca84140d317 | abd9537f8b90a990e195ded5f9fafdcc108d2a48 | /swea/d4/1486/1486_shelf_powerset.py | 487056a865e5b4c70509b2d17d0851b107ba7e2c | []
| no_license | ohdnf/algorithms | 127171744631406c1d08cc2583aa569a094fa2cd | 6f286753dab827facc436af4f2130f11dad2d44f | refs/heads/master | 2023-08-09T11:19:56.445351 | 2021-08-31T13:11:46 | 2021-08-31T13:11:46 | 236,180,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | import sys
sys.stdin = open('input.txt')
t = int(input())
for test_case in range(1, t+1):
n, b = map(int, input().split()) # 점원 수, 선반 높이
clerks = list(map(int, input().split()))
# clerks.sort(reverse=True)
heights = list()
for i in range(1<<n):
tmp = 0
for j in range(n+1):
if i & (1<<j):
tmp += clerks[j]
if tmp >= b:
heights.append(tmp - b)
heights.sort()
print('#{} {}'.format(test_case, heights[0]))
| [
"[email protected]"
]
| |
7f2df471b94bb54376e154486267ebd828d91fe3 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_deepens.py | 5ee1f3387e1e9e28ab6fb75803b9751b7df84712 | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py |
#calss header
class _DEEPENS():
def __init__(self,):
self.name = "DEEPENS"
self.definitions = deepen
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['deepen']
| [
"[email protected]"
]
| |
809cb39be1c498b2dc3381f28cb369e0fa000dd1 | d404fb72dee51f8c2791bf21cc5d9ee91d2d6a45 | /ch03_if/0118_grade.py | 994ed4ab4c6a92af05a7cb316a7605ce19cac7b7 | []
| no_license | kangwonlee/18pf_welcome_template | 6c5c997e7aac08d8a7d94d4a146037c2d3b4a813 | 9279559c7cde37a18b8e1d5e596f161087493218 | refs/heads/master | 2021-04-12T07:52:29.577562 | 2018-03-18T21:29:28 | 2018-03-18T21:29:28 | 125,769,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | score = int(input("성적을 입력하시오: "))
if score >= 90:
print("학점 A")
elif score >= 80:
print("학점 B")
elif score >= 70:
print("학점 C")
elif score >= 60:
print("학점 D")
else:
print("학점 F")
| [
"[email protected]"
]
| |
12e01a17bd141b1e82d95006b641e5bb0343d272 | 484c462c29e3c2f8ac280b79c11db6982c6a8ca6 | /neurolab-0.2.3/neurolab/__init__.py | 1c9445506114073da6f75ac4c16ca8634f996e27 | []
| no_license | thelma1944/Python_Stuff | b5fa53bf008bb5e865204201b144fe20e7f87565 | 077131a2c9f247396dca86fdf18933d38ae8d501 | refs/heads/master | 2021-06-05T12:25:35.779070 | 2020-10-03T18:20:16 | 2020-10-03T18:20:16 | 16,077,931 | 0 | 1 | null | 2021-03-26T00:30:14 | 2014-01-20T17:36:16 | Python | UTF-8 | Python | false | false | 1,754 | py | # -*- coding: utf-8 -*-
"""
Neurolab is a simple and powerful Neural Network Library for Python.
Contains based neural networks, train algorithms and flexible framework
to create and explore other neural network types.
:Features:
- Pure python + numpy
- API like Neural Network Toolbox (NNT) from MATLAB
- Interface to use train algorithms form scipy.optimize
- Flexible network configurations and learning algorithms. You may change: train, error, initializetion and activation functions
- Unlimited number of neural layers and number of neurons in layers
- Variety of supported types of Artificial Neural Network and learning algorithms
:Example:
>>> import numpy as np
>>> import neurolab as nl
>>> # Create train samples
>>> input = np.random.uniform(-0.5, 0.5, (10, 2))
>>> target = (input[:, 0] + input[:, 1]).reshape(10, 1)
>>> # Create network with 2 inputs, 5 neurons in input layer and 1 in output layer
>>> net = nl.net.newff([[-0.5, 0.5], [-0.5, 0.5]], [5, 1])
>>> # Train process
>>> err = net.train(input, target, show=15)
Epoch: 15; Error: 0.150308402918;
Epoch: 30; Error: 0.072265865089;
Epoch: 45; Error: 0.016931355131;
The goal of learning is reached
>>> # Test
>>> net.sim([[0.2, 0.1]]) # 0.2 + 0.1
array([[ 0.28757596]])
:Links:
- `Home Page <http://code.google.com/p/neurolab/>`_
- `PyPI Page <http://pypi.python.org/pypi/neurolab>`_
- `Documentation <http://packages.python.org/neurolab/>`_
- `Examples <http://packages.python.org/neurolab/example.html>`_
"""
import net
from tool import load
__version__ = '0.2.3'
# Development Status :: 1 - Planning, 2 - Pre-Alpha, 3 - Alpha,
# 4 - Beta, 5 - Production/Stable
__status__ = '4 - Beta'
| [
"[email protected]"
]
| |
d49b8dd94000b3cb7de0d0de656972db01f76896 | 8015f1c62a2cb4efd21aa8938336913bf8117868 | /bamap/ba0150.pngMap.py | 12eae37b66b5a10110de9f226bbdc8418a2818d0 | []
| no_license | GamerNoTitle/Beepers-and-OLED | 675b5e3c179df0f0e27b42bf594c43860d03b9af | afe1340e5394ae96bda5f9022a8a66824368091e | refs/heads/master | 2020-04-20T00:09:47.122471 | 2019-04-29T04:59:35 | 2019-04-29T04:59:35 | 168,515,579 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 8,468 | py | ba0150.pngMap = [
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000011111110000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000011111100000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111110000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000001011111111111111000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011011111111111111111111110000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000010111111111111111111111111111111100000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111100000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111110000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000001111111111111111111111111111111111111000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111110000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111111000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111111100000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111111110000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111111111000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000101111111111111111111111111111111111111111000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111111000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000001111111111111111111111111111111111111000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111110000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111100000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111110000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000001111111111111111111111111111111111110000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111110000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000001111111111111111111111111111111110000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111011111000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000001011111111111111111111111111101111100000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000001111111111111111111111111111100011000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111001101100000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000010111111111111111111111111111100000010000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000111111111111111111111111111100010000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111110000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111010100000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000001111111111111111111111111100000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000001111111111111111111111111000000000000000000000000000',
'00000000000000000000000000000000000000000011111000000000000000000000000000101111111111111111111110011100000000000000000000000000',
'00000000000000000000000000000000000000000011111110000000000000000000000000001111111111111111111111011100000000000000000000000000',
'00000000000000000000000000000000000001011111111111000000000000000000000011111111111111111111111111001000000000000000000000000000',
'00000000000000000000000000000000000011111111111111000000000000000000000011111111111111111111111111011100000000000000000000000000',
'00000000000000000000000000000000000011111111111110000000000000000000000011111111111111111111111111111100000000000000000000000000',
'00000000000000000000000000000000000011111111111110000000000000000000001011111111111111111111111111111100000000000000000000000000',
'00000000000000000000000000000000000011111111111111000000000000000000000111111111111111111111111111111100000000000000000000000000',
'00000000000000000000000000000000000011111111111111110000000000000000001111111111111111111111111111111110000000000000000000000000',
'00000000000000000000000000000000000000111111111111111111111111111100111111111111111111111111111111111100000000000000000000000000',
'00000000000000000000000000000000000000011111111111111111111111111111111111111111111111111111111111111110000000000000000000000000',
'00000000000000000000000000000000000000000011111011111111111111111111111111111111111111111111111111111100001100000000000000000000',
'00000000000000000000000000000000000000000000011011111111111111111111111111111111111111111111111111111110011000000000000000000000',
'00000000000000000000000000000000000000000000000001111111111111111111111111111111111111111111111111111111100000000000000000000000',
'00000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111111111111111000000000000000000000000',
'00000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111111111111111100000000000000000000000',
'00000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111111111111111110000000000000000000000',
'00000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111111111111111111000000000000000000000',
'00000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111111111111111111100000000000000000000',
'00000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111111111111111111110000000000000000000',
'00000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111111111111111111111000000000000000000',
]
| [
"[email protected]"
]
| |
14575f006fa799ab2f3698289711bf9ad024a62a | 86813bf514f3e0257f92207f40a68443f08ee44b | /0338 比特位计数/0338 比特位计数.py | 96ff96f4ec3d4463f1b79f1a90552912d3e21da3 | []
| no_license | Aurora-yuan/Leetcode_Python3 | 4ce56679b48862c87addc8cd870cdd525c9d926c | 720bb530850febc2aa67a56a7a0b3a85ab37f415 | refs/heads/master | 2021-07-12T13:23:19.399155 | 2020-10-21T03:14:36 | 2020-10-21T03:14:36 | 212,998,500 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,031 | py | #label: math/dynamic programming difficulty: medium
"""
思路一:
麻瓜思想,每个数转成二进制计数
"""
class Solution:
def countBits(self, num: int) -> List[int]:
res = list()
for i in range(num+1):
res.append(bin(i).count('1'))
return res
“”“
思路二:
《剑指Offer》里提到的结论:如果一个数 i 和 i - 1 做与运算,那么 i 的二进制表示形式中的最右边一个 1 会变成0 。
利用动态规划的思想。
如果我们已经知道了 i & i -1 这个数字的1的个数cnt,那么根据上面的提到的结论, i 这个数字中 1 的个数就是 cnt + 1。
所以不难得到状态转移方程: dp[i] = dp[i & (i - 1)] + 1
”“”
class Solution(object):
def countBits(self, num):
"""
:type num: int
:rtype: List[int]
"""
dp = [0 for i in range(num + 1)]
for i in range(1, num + 1):
dp[i] = dp[i & (i - 1)] + 1
return dp
| [
"[email protected]"
]
| |
0eefce36ea159a3ee01e3a8648d44a932052a570 | 679e31fe16e92e1d0bc3448c25845103f19a622f | /web_flask/3-python_route.py | 96ec3a910bab88bd161ba28e53c1573167ff9a05 | []
| no_license | Gikaro/AirBnB_clone_v2 | ab7d63ce3e942253ded54d30d68c631eb055308c | 5744e747f2fdb722d7e6843bd1e4a67abf9c8243 | refs/heads/master | 2023-03-20T01:34:49.172584 | 2020-09-02T23:22:39 | 2020-09-02T23:22:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | #!/usr/bin/python3
"""WebFlask module"""
from flask import Flask
from os import environ
app = Flask(__name__)
@app.route('/', strict_slashes=False)
def hello_route():
"""Display Hello HBNB"""
return 'Hello HBNB!'
@app.route('/hbnb', strict_slashes=False)
def hbnb_route():
"""Display HBNB"""
return 'HBNB'
@app.route('/c/<text>', strict_slashes=False)
def c_route(text):
"""Display text"""
real_text = text.replace('_', ' ')
return 'C {}'.format(real_text)
@app.route('/python/', strict_slashes=False)
@app.route('/python/<text>', strict_slashes=False)
def python_route(text='is cool'):
"""Display text"""
real_text = text.replace('_', ' ')
return 'Python {}'.format(real_text)
if __name__ == '__main__':
environ['FLASK_APP'] = __file__
app.run(host='0.0.0.0', port=5000)
| [
"[email protected]"
]
| |
1e80f050379c3620ecae456eef6480ff547b77d4 | 13f5c66af02a64aa8c5d988e9560b82bcf058fd0 | /learning_sql/views.py | 8cec81152d26fe1d92d386c49f799cf9269d320b | []
| no_license | heitorchang/reading-list | a1090b969d0f16cbc7c0e371671e85dca0bde201 | 3dcfd68cb02179e75216ff459fda693ec1fb8684 | refs/heads/master | 2023-04-27T03:04:28.122341 | 2023-04-21T14:04:20 | 2023-04-21T14:04:20 | 67,825,952 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,099 | py | # p. 249
def create_totals_vw():
cursor = cnx.cursor()
cursor.execute("""
CREATE VIEW customer_totals_vw
(cust_id,
cust_type_cd,
cust_name,
num_accounts,
tot_deposits
)
AS
SELECT cst.cust_id, cst.cust_type_cd,
CASE
WHEN cst.cust_type_cd = 'B' THEN
(SELECT bus.name FROM business AS bus WHERE bus.cust_id = cst.cust_id)
ELSE
(SELECT concat(ind.fname, ' ', ind.lname)
FROM individual AS ind
WHERE ind.cust_id = cst.cust_id)
END AS cust_name,
SUM(CASE WHEN act.status = 'ACTIVE' THEN 1 ELSE 0 END) AS tot_active_accounts,
SUM(CASE WHEN act.status = 'ACTIVE' THEN act.avail_balance ELSE 0 END) AS tot_balance
FROM customer AS cst INNER JOIN account AS act
ON act.cust_id = cst.cust_id
GROUP BY cst.cust_id, cst.cust_type_cd;""")
cursor.close()
def create_totals_tbl():
cursor = cnx.cursor()
# NOTE: creating this table freezes data; new data will not be reflected
cursor.execute("""
CREATE TABLE customer_totals
AS
SELECT * FROM customer_totals_vw;""")
cursor.close()
| [
"[email protected]"
]
| |
799f27d7bd6278066b4a0c11259c76d826704d80 | 48e9d0e84238daf0de290551e3588e9ff3f49549 | /calculadora.py | 43fadb96364fb7c2a09a91ee806895c89e916e0c | []
| no_license | celord/PythonGreencore | 9606af569738703b66d80bce6e423c9a313fa539 | 259aadcc346203f8092f6c6d286e3fca2e9fc550 | refs/heads/master | 2020-05-30T23:18:15.542876 | 2019-06-19T14:39:59 | 2019-06-19T14:39:59 | 190,014,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,066 | py | def Menu():
print("""*****************
Calculadora
************
Menu
1) Suma
2) Resta
3) Multiplicacion
4) Division
""")
def Calculadora():
"Funcion para calcular Operaciones Aritmeticas"
Menu()
opc = int(input("Seleccion Opcion \n"))
while (opc > 0 and opc <5):
x = int(input("Ingrese Numero\n"))
y = int(input("Ingrese otro numero\n"))
if (opc ==1):
print ("La suma es: ", x + y)
opc = int(input("Seleccione Opcion"))
elif (opc == 2):
print("La resta es:", x-y )
opc = int(input("Seleccione Opcion"))
elif(opc==3):
print("La multiplicacion es:", x * y)
opc = int(input("Seleccione Opcion"))
elif(opc==4):
try:
print("La division es: ", x/y)
opc = int(input("Seleccione Opcion"))
except ZeroDivisionError:
print("No se permite la division entre 0")
opc = int(input("Seleccione Opcion"))
Calculadora()
| [
"[email protected]"
]
| |
34e03ed4d72971d6ba7816fbfd77917897ceb6db | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_103/ch81_2020_04_08_14_08_30_221505.py | 3d1850c8883fa9998cbf766d0212133a37e9b36c | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | def interseccao_valores(dicio1,dicio2):
lista=[]
for valor in dicio1.values():
for valor2 in dicio2.values():
if valor==valor2:
lista.append(valor)
return lista
| [
"[email protected]"
]
| |
cc8073a006724d4c3a463c9da8af11bbef0e2d5c | 19136335b7e88324546fdfed45b4d0b22042202c | /rplugin/python3/deoplete/filter/converter_truncate_menu.py | 90a331d0f918b01825f96575468fc8be3376b89e | [
"MIT"
]
| permissive | nholik/deoplete.nvim | 3074fa3cdd5a8a2df5f300d0ac74fedde6555fdf | 614cd3ddf1f352c977f3405e809d967093571117 | refs/heads/master | 2020-05-27T18:05:59.540419 | 2019-05-26T22:26:41 | 2019-05-26T22:26:41 | 188,736,112 | 0 | 0 | NOASSERTION | 2019-05-26T22:06:01 | 2019-05-26T22:06:01 | null | UTF-8 | Python | false | false | 1,034 | py | # ============================================================================
# FILE: converter_truncate_menu.py
# AUTHOR: Shougo Matsushita <Shougo.Matsu at gmail.com>
# License: MIT license
# ============================================================================
from deoplete.base.filter import Base
from deoplete.util import truncate_skipping
class Filter(Base):
def __init__(self, vim):
super().__init__(vim)
self.name = 'converter_truncate_menu'
self.description = 'truncate menu converter'
def filter(self, context):
max_width = context['max_menu_width']
if not context['candidates'] or 'menu' not in context[
'candidates'][0] or max_width <= 0:
return context['candidates']
footer_width = max_width / 3
for candidate in context['candidates']:
candidate['menu'] = truncate_skipping(
candidate.get('menu', ''),
max_width, '..', footer_width)
return context['candidates']
| [
"[email protected]"
]
| |
e923e894f298de0501742da399ba096237404c13 | 6aa9fdff566a2ca384ed1b1db6933a415581bc22 | /backend/isasatec_23315/wsgi.py | 0442c9c343131871e14516b7d974bda02284aece | []
| no_license | crowdbotics-apps/isasatec-23315 | ae10c4ecde97b30cde72a627d65354b666ddb32c | 6a0a3bfddbba71ac7ee6256ffd1de0f7b3e565e7 | refs/heads/master | 2023-01-28T16:38:25.528446 | 2020-12-11T15:56:50 | 2020-12-11T15:56:50 | 320,553,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | """
WSGI config for isasatec_23315 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "isasatec_23315.settings")
application = get_wsgi_application()
| [
"[email protected]"
]
| |
74bc608d4b97f1b7c0df621bcabed6cd361d7dbc | dd0b0df88a08a4f4ab249c76cf0ea82482ff37bb | /sfepy/terms/terms.py | 41157ad7b967a814fae63a5692dccc27c5c414df | [
"BSD-3-Clause"
]
| permissive | mfkiwl/sfepy | 43e3a2cbed240b8ef387a8ab9037c1f6fd19e0fe | 67275845da49e772b2f8faaa48df165893f2be16 | refs/heads/master | 2021-01-18T07:59:19.289053 | 2013-10-31T09:54:55 | 2013-10-31T09:54:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48,287 | py | import re
from copy import copy
import numpy as nm
from sfepy.base.base import (as_float_or_complex, get_default, assert_,
Container, Struct, basestr, goptions)
from sfepy.base.compat import in1d
# Used for imports in term files.
from sfepy.terms.extmods import terms
from sfepy.linalg import split_range
_match_args = re.compile('^([^\(\}]*)\((.*)\)$').match
_match_virtual = re.compile('^virtual$').match
_match_state = re.compile('^state(_[_a-zA-Z0-9]+)?$').match
_match_parameter = re.compile('^parameter(_[_a-zA-Z0-9]+)?$').match
_match_material = re.compile('^material(_[_a-zA-Z0-9]+)?$').match
_match_material_opt = re.compile('^opt_material(_[_a-zA-Z0-9]+)?$').match
_match_material_root = re.compile('(.+)\.(.*)').match
def get_arg_kinds(arg_types):
"""
Translate `arg_types` of a Term to a canonical form.
Parameters
----------
arg_types : tuple of strings
The term argument types, as given in the `arg_types` attribute.
Returns
-------
arg_kinds : list of strings
The argument kinds - one of 'virtual_variable', 'state_variable',
'parameter_variable', 'opt_material', 'user'.
"""
arg_kinds = []
for ii, arg_type in enumerate(arg_types):
if _match_virtual(arg_type):
arg_kinds.append('virtual_variable')
elif _match_state(arg_type):
arg_kinds.append('state_variable')
elif _match_parameter(arg_type):
arg_kinds.append('parameter_variable')
elif _match_material(arg_type):
arg_kinds.append('material')
elif _match_material_opt(arg_type):
arg_kinds.append('opt_material')
if ii > 0:
msg = 'opt_material at position %d, must be at 0!' % ii
raise ValueError(msg)
else:
arg_kinds.append('user')
return arg_kinds
def get_shape_kind(integration):
"""
Get data shape kind for given integration type.
"""
if integration == 'surface':
shape_kind = 'surface'
elif integration in ('volume', 'surface_extra'):
shape_kind = 'volume'
elif integration == 'point':
shape_kind = 'point'
else:
raise NotImplementedError('unsupported term integration! (%s)'
% integration)
return shape_kind
def split_complex_args(args):
"""
Split complex arguments to real and imaginary parts.
Returns
-------
newargs : dictionary
Dictionary with lists corresponding to `args` such that each
argument of numpy.complex128 data type is split to its real and
imaginary part. The output depends on the number of complex
arguments in 'args':
- 0: list (key 'r') identical to input one
- 1: two lists with keys 'r', 'i' corresponding to real
and imaginary parts
- 2: output dictionary contains four lists:
- 'r' - real(arg1), real(arg2)
- 'i' - imag(arg1), imag(arg2)
- 'ri' - real(arg1), imag(arg2)
- 'ir' - imag(arg1), real(arg2)
"""
newargs = {}
cai = []
for ii, arg in enumerate(args):
if isinstance(arg, nm.ndarray) and (arg.dtype == nm.complex128):
cai.append(ii)
if len(cai) > 0:
newargs['r'] = list(args[:])
newargs['i'] = list(args[:])
arg1 = cai[0]
newargs['r'][arg1] = args[arg1].real.copy()
newargs['i'][arg1] = args[arg1].imag.copy()
if len(cai) == 2:
arg2 = cai[1]
newargs['r'][arg2] = args[arg2].real.copy()
newargs['i'][arg2] = args[arg2].imag.copy()
newargs['ri'] = list(args[:])
newargs['ir'] = list(args[:])
newargs['ri'][arg1] = newargs['r'][arg1]
newargs['ri'][arg2] = newargs['i'][arg2]
newargs['ir'][arg1] = newargs['i'][arg1]
newargs['ir'][arg2] = newargs['r'][arg2]
elif len(cai) > 2:
raise NotImplementedError('more than 2 complex arguments! (%d)'
% len(cai))
else:
newargs['r'] = args[:]
return newargs
def vector_chunk_generator(total_size, chunk_size, shape_in,
zero=False, set_shape=True, dtype=nm.float64):
if not chunk_size:
chunk_size = total_size
shape = list(shape_in)
sizes = split_range(total_size, chunk_size)
ii = nm.array(0, dtype=nm.int32)
for size in sizes:
chunk = nm.arange(size, dtype=nm.int32) + ii
if set_shape:
shape[0] = size
if zero:
out = nm.zeros(shape, dtype=dtype)
else:
out = nm.empty(shape, dtype=dtype)
yield out, chunk
ii += size
def create_arg_parser():
from pyparsing import Literal, Word, delimitedList, Group, \
StringStart, StringEnd, Optional, nums, alphas, alphanums
inumber = Word("+-"+nums, nums)
history = Optional(Literal('[').suppress() + inumber
+ Literal(']').suppress(), default=0)("history")
history.setParseAction(lambda str, loc, toks: int(toks[0]))
variable = Group(Word(alphas, alphanums + '._') + history)
derivative = Group(Literal('d') + variable\
+ Literal('/').suppress() + Literal('dt'))
trace = Group(Literal('tr') + Literal('(').suppress() + variable \
+ Literal(')').suppress())
generalized_var = derivative | trace | variable
args = StringStart() + delimitedList(generalized_var) + StringEnd()
return args
# 22.01.2006, c
class CharacteristicFunction(Struct):
def __init__(self, region):
self.igs = region.igs
self.region = region
self.local_chunk = None
self.ig = None
def __call__(self, chunk_size, shape_in, zero=False, set_shape=True,
ret_local_chunk=False, dtype=nm.float64):
els = self.region.get_cells(self.ig)
for out, chunk in vector_chunk_generator(els.shape[0], chunk_size,
shape_in, zero, set_shape,
dtype):
self.local_chunk = chunk
if ret_local_chunk:
yield out, chunk
else:
yield out, els[chunk]
self.local_chunk = None
def set_current_group(self, ig):
self.ig = ig
def get_local_chunk(self):
return self.local_chunk
class ConnInfo(Struct):
def get_region(self, can_trace=True):
if self.is_trace and can_trace:
return self.region.get_mirror_region()[0]
else:
return self.region
def get_region_name(self, can_trace=True):
if self.is_trace and can_trace:
reg = self.region.get_mirror_region()[0]
else:
reg = self.region
if reg is not None:
return reg.name
else:
return None
def iter_igs(self):
if self.region is not None:
for ig in self.region.igs:
if self.virtual_igs is not None:
ir = self.virtual_igs.tolist().index(ig)
rig = self.virtual_igs[ir]
else:
rig = None
if not self.is_trace:
ii = ig
else:
ig_map_i = self.region.get_mirror_region()[2]
ii = ig_map_i[ig]
if self.state_igs is not None:
ic = self.state_igs.tolist().index(ii)
cig = self.state_igs[ic]
else:
cig = None
yield rig, cig
else:
yield None, None
class Terms(Container):
@staticmethod
def from_desc(term_descs, regions, integrals=None):
"""
Create terms, assign each term its region.
"""
from sfepy.terms import term_table
terms = Terms()
for td in term_descs:
try:
constructor = term_table[td.name]
except:
msg = "term '%s' is not in %s" % (td.name,
sorted(term_table.keys()))
raise ValueError(msg)
try:
region = regions[td.region]
except IndexError:
raise KeyError('region "%s" does not exist!' % td.region)
term = Term.from_desc(constructor, td, region, integrals=integrals)
terms.append(term)
return terms
def __init__(self, objs=None):
Container.__init__(self, objs=objs)
self.update_expression()
def insert(self, ii, obj):
Container.insert(self, ii, obj)
self.update_expression()
def append(self, obj):
Container.append(self, obj)
self.update_expression()
def update_expression(self):
self.expression = []
for term in self:
aux = [term.sign, term.name, term.arg_str,
term.integral_name, term.region.name]
self.expression.append(aux)
def __mul__(self, other):
out = Terms()
for name, term in self.iteritems():
out.append(term * other)
return out
def __rmul__(self, other):
return self * other
def __add__(self, other):
if isinstance(other, Term):
out = self.copy()
out.append(other)
elif isinstance(other, Terms):
out = Terms(self._objs + other._objs)
else:
raise ValueError('cannot add Terms with %s!' % other)
return out
def __radd__(self, other):
return self + other
def __sub__(self, other):
if isinstance(other, Term):
out = self + (-other)
elif isinstance(other, Terms):
out = self + (-other)
else:
raise ValueError('cannot subtract Terms with %s!' % other)
return out
def __rsub__(self, other):
return -self + other
def __pos__(self):
return self
def __neg__(self):
return -1.0 * self
def setup(self):
for term in self:
term.setup()
def assign_args(self, variables, materials, user=None):
"""
Assign all term arguments.
"""
for term in self:
term.assign_args(variables, materials, user)
def get_variable_names(self):
out = []
for term in self:
out.extend(term.get_variable_names())
return list(set(out))
def get_material_names(self):
out = []
for term in self:
out.extend(term.get_material_names())
return list(set(out))
def get_user_names(self):
out = []
for term in self:
out.extend(term.get_user_names())
return list(set(out))
def set_current_group(self, ig):
for term in self:
term.char_fun.set_current_group(ig)
class Term(Struct):
name = ''
arg_types = ()
arg_shapes = {}
integration = 'volume'
geometries = ['2_3', '2_4', '3_4', '3_8']
@staticmethod
def new(name, integral, region, **kwargs):
from sfepy.terms import term_table
arg_str = _match_args(name)
if arg_str is not None:
name, arg_str = arg_str.groups()
else:
raise ValueError('bad term syntax! (%s)' % name)
if name in term_table:
constructor = term_table[name]
else:
msg = "term '%s' is not in %s" % (name, sorted(term_table.keys()))
raise ValueError(msg)
obj = constructor(name, arg_str, integral, region, **kwargs)
return obj
@staticmethod
def from_desc(constructor, desc, region, integrals=None):
from sfepy.fem import Integrals
if integrals is None:
integrals = Integrals()
obj = constructor(desc.name, desc.args, None, region)
obj.set_integral(integrals.get(desc.integral, obj.get_integral_info()))
obj.sign = desc.sign
return obj
def __init__(self, name, arg_str, integral, region, **kwargs):
self.name = name
self.arg_str = arg_str
self.region = region
self._kwargs = kwargs
self._integration = self.integration
self.sign = 1.0
self.set_integral(integral)
def __mul__(self, other):
try:
mul = as_float_or_complex(other)
except ValueError:
raise ValueError('cannot multiply Term with %s!' % other)
out = self.copy(name=self.name)
out.sign = mul * self.sign
return out
def __rmul__(self, other):
return self * other
def __add__(self, other):
if isinstance(other, Term):
out = Terms([self, other])
else:
out = NotImplemented
return out
def __sub__(self, other):
if isinstance(other, Term):
out = Terms([self, -1.0 * other])
else:
out = NotImplemented
return out
def __pos__(self):
return self
def __neg__(self):
out = -1.0 * self
return out
def set_integral(self, integral):
"""
Set the term integral.
"""
self.integral = integral
if self.integral is not None:
self.integral_name = self.integral.name
kind = self.get_integral_info()
if kind != integral.kind:
msg = "integral kind for term %s must be '%s'! (is '%s')" \
% (self.name, kind, integral.kind)
raise ValueError(msg)
def setup(self):
self.char_fun = CharacteristicFunction(self.region)
self.function = Struct.get(self, 'function', None)
self.step = 0
self.dt = 1.0
self.is_quasistatic = False
self.has_integral = True
self.has_region = True
self.itype = itype = None
aux = re.compile('([a-z]+)_.*').match(self.name)
if aux:
itype = aux.group(1)
self.raw_itype = itype
self.setup_formal_args()
if self._kwargs:
self.setup_args(**self._kwargs)
else:
self.args = []
def setup_formal_args(self):
self.arg_names = []
self.arg_steps = {}
self.arg_derivatives = {}
self.arg_traces = {}
parser = create_arg_parser()
self.arg_desc = parser.parseString(self.arg_str)
for arg in self.arg_desc:
trace = False
derivative = None
if isinstance(arg[1], int):
name, step = arg
else:
kind = arg[0]
name, step = arg[1]
if kind == 'd':
derivative = arg[2]
elif kind == 'tr':
trace = True
match = _match_material_root(name)
if match:
name = (match.group(1), match.group(2))
self.arg_names.append(name)
self.arg_steps[name] = step
self.arg_derivatives[name] = derivative
self.arg_traces[name] = trace
def setup_args(self, **kwargs):
self._kwargs = kwargs
self.args = []
for arg_name in self.arg_names:
if isinstance(arg_name, basestr):
self.args.append(self._kwargs[arg_name])
else:
self.args.append((self._kwargs[arg_name[0]], arg_name[1]))
self.classify_args()
self.check_args()
def __call__(self, diff_var=None, chunk_size=None, **kwargs):
"""
Subclasses either implement __call__ or plug in a proper _call().
"""
return self._call(diff_var, chunk_size, **kwargs)
def _call(self, diff_var=None, chunk_size=None, **kwargs):
msg = 'base class method "_call" called for %s' \
% self.__class__.__name__
raise RuntimeError(msg)
def assign_args(self, variables, materials, user=None):
"""
Check term argument existence in variables, materials, user data
and assign the arguments to terms. Also check compatibility of
field and term subdomain lists (igs).
"""
if user is None:
user = {}
kwargs = {}
for arg_name in self.arg_names:
if isinstance(arg_name, basestr):
if arg_name in variables.names:
kwargs[arg_name] = variables[arg_name]
elif arg_name in user:
kwargs[arg_name] = user[arg_name]
else:
raise ValueError('argument %s not found!' % arg_name)
else:
arg_name = arg_name[0]
if arg_name in materials.names:
kwargs[arg_name] = materials[arg_name]
else:
raise ValueError('material argument %s not found!'
% arg_name)
self.setup_args(**kwargs)
def classify_args(self):
"""
Classify types of the term arguments and find matching call
signature.
A state variable can be in place of a parameter variable and
vice versa.
"""
self.names = Struct(name='arg_names',
material=[], variable=[], user=[],
state=[], virtual=[], parameter=[])
# Prepare for 'opt_material' - just prepend a None argument if needed.
if isinstance(self.arg_types[0], tuple):
arg_types = self.arg_types[0]
else:
arg_types = self.arg_types
if len(arg_types) == (len(self.args) + 1):
self.args.insert(0, (None, None))
self.arg_names.insert(0, (None, None))
if isinstance(self.arg_types[0], tuple):
assert_(len(self.modes) == len(self.arg_types))
# Find matching call signature using variable arguments - material
# and user arguments are ignored!
matched = []
for it, arg_types in enumerate(self.arg_types):
arg_kinds = get_arg_kinds(arg_types)
if self._check_variables(arg_kinds):
matched.append((it, arg_kinds))
if len(matched) == 1:
i_match, arg_kinds = matched[0]
arg_types = self.arg_types[i_match]
self.mode = self.modes[i_match]
elif len(matched) == 0:
msg = 'cannot match arguments! (%s)' % self.arg_names
raise ValueError(msg)
else:
msg = 'ambiguous arguments! (%s)' % self.arg_names
raise ValueError(msg)
else:
arg_types = self.arg_types
arg_kinds = get_arg_kinds(self.arg_types)
self.mode = Struct.get(self, 'mode', None)
if not self._check_variables(arg_kinds):
raise ValueError('cannot match variables! (%s)'
% self.arg_names)
# Set actual argument types.
self.ats = list(arg_types)
for ii, arg_kind in enumerate(arg_kinds):
name = self.arg_names[ii]
if arg_kind.endswith('variable'):
names = self.names.variable
if arg_kind == 'virtual_variable':
self.names.virtual.append(name)
elif arg_kind == 'state_variable':
self.names.state.append(name)
elif arg_kind == 'parameter_variable':
self.names.parameter.append(name)
elif arg_kind.endswith('material'):
names = self.names.material
else:
names = self.names.user
names.append(name)
self.n_virtual = len(self.names.virtual)
if self.n_virtual > 1:
raise ValueError('at most one virtial variable is allowed! (%d)'
% self.n_virtual)
self.set_arg_types()
self.setup_integration()
if (self.raw_itype == 'dw') and (self.mode == 'eval'):
self.itype = 'd'
else:
self.itype = self.raw_itype
def _check_variables(self, arg_kinds):
for ii, arg_kind in enumerate(arg_kinds):
if arg_kind.endswith('variable'):
var = self.args[ii]
check = {'virtual_variable' : var.is_virtual,
'state_variable' : var.is_state_or_parameter,
'parameter_variable' : var.is_state_or_parameter}
if not check[arg_kind]():
return False
else:
return True
def set_arg_types(self):
pass
def check_args(self):
"""
Common checking to all terms.
Check compatibility of field and term subdomain lists (igs).
"""
vns = self.get_variable_names()
for name in vns:
field = self._kwargs[name].get_field()
if field is None:
continue
if not nm.all(in1d(self.region.vertices,
field.region.vertices)):
msg = ('%s: incompatible regions: (self, field %s)'
+ '(%s in %s)') %\
(self.name, field.name,
self.region.vertices, field.region.vertices)
raise ValueError(msg)
def get_variable_names(self):
return self.names.variable
def get_material_names(self):
out = []
for aux in self.names.material:
if aux[0] is not None:
out.append(aux[0])
return out
def get_user_names(self):
return self.names.user
def get_virtual_name(self):
if not self.names.virtual:
return None
var = self.get_virtual_variable()
return var.name
def get_state_names(self):
"""
If variables are given, return only true unknowns whose data are of
the current time step (0).
"""
variables = self.get_state_variables()
return [var.name for var in variables]
def get_parameter_names(self):
return copy(self.names.parameter)
def get_conn_key(self):
"""The key to be used in DOF connectivity information."""
key = (self.name,) + tuple(self.arg_names)
key += (self.integral_name, self.region.name)
return key
def get_conn_info(self):
vvar = self.get_virtual_variable()
svars = self.get_state_variables()
pvars = self.get_parameter_variables()
all_vars = self.get_variables()
dc_type = self.get_dof_conn_type()
tgs = self.get_geometry_types()
v_igs = v_tg = None
if vvar is not None:
field = vvar.get_field()
if field is not None:
v_igs = field.igs
if vvar.name in tgs:
v_tg = tgs[vvar.name]
else:
v_tg = None
else:
# No virtual variable -> all unknowns are in fact known parameters.
pvars += svars
svars = []
region = self.get_region()
if region is not None:
is_any_trace = reduce(lambda x, y: x or y,
self.arg_traces.values())
if is_any_trace:
region.setup_mirror_region()
self.char_fun.igs = region.igs
vals = []
aux_pvars = []
for svar in svars:
# Allow only true state variables.
if not svar.is_state():
aux_pvars.append(svar)
continue
field = svar.get_field()
if field is not None:
s_igs = field.igs
else:
s_igs = None
is_trace = self.arg_traces[svar.name]
if svar.name in tgs:
ps_tg = tgs[svar.name]
else:
ps_tg = v_tg
val = ConnInfo(virtual=vvar, virtual_igs=v_igs,
state=svar, state_igs=s_igs,
primary=svar, primary_igs=s_igs,
has_virtual=True,
has_state=True,
is_trace=is_trace,
dc_type=dc_type,
v_tg=v_tg,
ps_tg=ps_tg,
region=region,
all_vars=all_vars)
vals.append(val)
pvars += aux_pvars
for pvar in pvars:
field = pvar.get_field()
if field is not None:
p_igs = field.igs
else:
p_igs = None
is_trace = self.arg_traces[pvar.name]
if pvar.name in tgs:
ps_tg = tgs[pvar.name]
else:
ps_tg = v_tg
val = ConnInfo(virtual=vvar, virtual_igs=v_igs,
state=None, state_igs=[],
primary=pvar.get_primary(), primary_igs=p_igs,
has_virtual=vvar is not None,
has_state=False,
is_trace=is_trace,
dc_type=dc_type,
v_tg=v_tg,
ps_tg=ps_tg,
region=region,
all_vars=all_vars)
vals.append(val)
if vvar and (len(vals) == 0):
# No state, parameter variables, just the virtual one.
val = ConnInfo(virtual=vvar, virtual_igs=v_igs,
state=vvar.get_primary(), state_igs=v_igs,
primary=vvar.get_primary(), primary_igs=v_igs,
has_virtual=True,
has_state=False,
is_trace=False,
dc_type=dc_type,
v_tg=v_tg,
ps_tg=v_tg,
region=region,
all_vars=all_vars)
vals.append(val)
return vals
def get_args_by_name(self, arg_names):
"""
Return arguments by name.
"""
out = []
for name in arg_names:
try:
ii = self.arg_names.index(name)
except ValueError:
raise ValueError('non-existing argument! (%s)' % name)
out.append(self.args[ii])
return out
def get_args(self, arg_types=None, **kwargs):
"""
Return arguments by type as specified in arg_types (or
self.ats). Arguments in **kwargs can override the ones assigned
at the term construction - this is useful for passing user data.
"""
ats = self.ats
if arg_types is None:
arg_types = ats
args = []
iname, region_name, ig = self.get_current_group()
for at in arg_types:
ii = ats.index(at)
arg_name = self.arg_names[ii]
if isinstance(arg_name, basestr):
if arg_name in kwargs:
args.append(kwargs[arg_name])
else:
args.append(self.args[ii])
else:
mat, par_name = self.args[ii]
if mat is not None:
mat_data = mat.get_data((region_name, self.integral_name),
ig, par_name)
else:
mat_data = None
args.append(mat_data)
return args
def get_kwargs(self, keys, **kwargs):
"""Extract arguments from **kwargs listed in keys (default is
None)."""
return [kwargs.get(name) for name in keys]
def get_arg_name(self, arg_type, full=False, join=None):
"""
Get the name of the argument specified by `arg_type.`
Parameters
----------
arg_type : str
The argument type string.
full : bool
If True, return the full name. For example, if the name of a
variable argument is 'u' and its time derivative is
requested, the full name is 'du/dt'.
join : str, optional
Optionally, the material argument name tuple can be joined
to a single string using the `join` string.
Returns
-------
name : str
The argument name.
"""
try:
ii = self.ats.index(arg_type)
except ValueError:
return None
name = self.arg_names[ii]
if full:
# Include derivatives.
if self.arg_derivatives[name]:
name = 'd%s/%s' % (name, self.arg_derivatives[name])
if (join is not None) and isinstance(name, tuple):
name = join.join(name)
return name
def get_integral_info(self):
"""
Get information on the term integral.
Returns
-------
kind : 'v' or 's'
The integral kind.
"""
if self.integration:
if self.integration == 'volume':
kind = 'v'
elif 'surface' in self.integration:
kind = 's'
elif self.integration == 'point':
kind = None
else:
raise ValueError('unsupported term integration! (%s)'
% self.integration)
else:
kind = None
return kind
def setup_integration(self):
self.has_geometry = True
self.geometry_types = {}
if isinstance(self.integration, basestr):
for var in self.get_variables():
self.geometry_types[var.name] = self.integration
else:
if self.mode is not None:
self.integration = self._integration[self.mode]
if self.integration is not None:
for arg_type, gtype in self.integration.iteritems():
var = self.get_args(arg_types=[arg_type])[0]
self.geometry_types[var.name] = gtype
gtypes = list(set(self.geometry_types.itervalues()))
if 'surface_extra' in gtypes:
self.dof_conn_type = 'volume'
elif len(gtypes):
self.dof_conn_type = gtypes[0]
def get_region(self):
return self.region
def get_geometry_types(self):
"""
Returns
-------
out : dict
The required geometry types for each variable argument.
"""
return self.geometry_types
def get_current_group(self):
return (self.integral_name, self.region.name, self.char_fun.ig)
def get_dof_conn_type(self):
return Struct(name='dof_conn_info', type=self.dof_conn_type,
region_name=self.region.name)
def set_current_group(self, ig):
self.char_fun.set_current_group(ig)
def igs(self):
return self.char_fun.igs
def get_assembling_cells(self, shape=None):
"""
According to the term integration type, return either the term
region cell indices or local index sequence.
"""
shape_kind = get_shape_kind(self.integration)
ig = self.char_fun.ig
cells = self.region.get_cells(ig, true_cells_only=False)
if shape_kind == 'surface':
cells = nm.arange(cells.shape[0], dtype=nm.int32)
elif shape_kind == 'point':
cells = nm.arange(shape[0], dtype=nm.int32)
else:
cells = cells.astype(nm.int32)
return cells
def iter_groups(self):
if self.dof_conn_type == 'point':
igs = self.igs()[0:1]
else:
igs = self.igs()
for ig in igs:
if self.integration == 'volume':
if not len(self.region.get_cells(ig)): continue
self.set_current_group(ig)
yield ig
def time_update(self, ts):
if ts is not None:
self.step = ts.step
self.dt = ts.dt
self.is_quasistatic = ts.is_quasistatic
def advance(self, ts):
"""
Advance to the next time step. Implemented in subclasses.
"""
def get_vector(self, variable):
"""Get the vector stored in `variable` according to self.arg_steps
and self.arg_derivatives. Supports only the backward difference w.r.t.
time."""
name = variable.name
return variable(step=self.arg_steps[name],
derivative=self.arg_derivatives[name])
def get_approximation(self, variable, get_saved=False):
"""
Return approximation corresponding to `variable`. Also return
the corresponding geometry (actual or saved, according to
`get_saved`).
"""
geo, _, key = self.get_mapping(variable, get_saved=get_saved,
return_key=True)
ig = key[2]
ap = variable.get_approximation(ig)
return ap, geo
def get_variables(self, as_list=True):
if as_list:
variables = self.get_args_by_name(self.names.variable)
else:
variables = {}
for var in self.get_args_by_name(self.names.variable):
variables[var.name] = var
return variables
def get_virtual_variable(self):
aux = self.get_args_by_name(self.names.virtual)
if len(aux) == 1:
var = aux[0]
else:
var = None
return var
def get_state_variables(self, unknown_only=False):
variables = self.get_args_by_name(self.names.state)
if unknown_only:
variables = [var for var in variables
if (var.kind == 'unknown') and
(self.arg_steps[var.name] == 0)]
return variables
def get_parameter_variables(self):
return self.get_args_by_name(self.names.parameter)
def get_materials(self, join=False):
materials = self.get_args_by_name(self.names.material)
for mat in materials:
if mat[0] is None:
materials.remove(mat)
if join:
materials = list(set(mat[0] for mat in materials))
return materials
def get_qp_key(self):
"""
Return a key identifying uniquely the term quadrature points.
"""
return (self.region.name, self.integral.name)
def get_physical_qps(self):
"""
Get physical quadrature points corresponding to the term region
and integral.
"""
from sfepy.fem.mappings import get_physical_qps, PhysicalQPs
if self.integration == 'point':
phys_qps = PhysicalQPs(self.region.igs)
else:
phys_qps = get_physical_qps(self.region, self.integral)
return phys_qps
def get_mapping(self, variable, get_saved=False, return_key=False):
"""
Get the reference mapping from a variable.
Notes
-----
This is a convenience wrapper of Field.get_mapping() that
initializes the arguments using the term data.
"""
integration = self.geometry_types[variable.name]
is_trace = self.arg_traces[variable.name]
if is_trace:
region, ig_map, ig_map_i = self.region.get_mirror_region()
ig = ig_map_i[self.char_fun.ig]
else:
region = self.region
ig = self.char_fun.ig
out = variable.field.get_mapping(ig, region,
self.integral, integration,
get_saved=get_saved,
return_key=return_key)
return out
def get_data_shape(self, variable):
"""
Get data shape information from variable.
Notes
-----
This is a convenience wrapper of FieldVariable.get_data_shape() that
initializes the arguments using the term data.
"""
integration = self.geometry_types[variable.name]
is_trace = self.arg_traces[variable.name]
if is_trace:
region, ig_map, ig_map_i = self.region.get_mirror_region()
ig = ig_map_i[self.char_fun.ig]
else:
region = self.region
ig = self.char_fun.ig
out = variable.get_data_shape(ig, self.integral,
integration, region.name)
return out
def get(self, variable, quantity_name, bf=None, integration=None,
step=None, time_derivative=None):
"""
Get the named quantity related to the variable.
Notes
-----
This is a convenience wrapper of Variable.evaluate() that
initializes the arguments using the term data.
"""
name = variable.name
step = get_default(step, self.arg_steps[name])
time_derivative = get_default(time_derivative,
self.arg_derivatives[name])
integration = get_default(integration, self.geometry_types[name])
data = variable.evaluate(self.char_fun.ig, mode=quantity_name,
region=self.region, integral=self.integral,
integration=integration,
step=step, time_derivative=time_derivative,
is_trace=self.arg_traces[name], bf=bf)
return data
def check_shapes(self, *args, **kwargs):
"""
Default implementation of function to check term argument shapes
at run-time.
"""
pass
def standalone_setup(self):
from sfepy.fem import setup_dof_conns
conn_info = {'aux' : self.get_conn_info()}
setup_dof_conns(conn_info)
materials = self.get_materials(join=True)
for mat in materials:
mat.time_update(None, [Struct(terms=[self])])
def call_get_fargs(self, args, kwargs):
try:
fargs = self.get_fargs(*args, **kwargs)
except RuntimeError:
terms.errclear()
raise ValueError
return fargs
def call_function(self, out, fargs):
try:
status = self.function(out, *fargs)
except RuntimeError:
terms.errclear()
raise ValueError
if status:
terms.errclear()
raise ValueError('term evaluation failed! (%s)' % self.name)
return status
def eval_real(self, shape, fargs, mode='eval', term_mode=None,
diff_var=None, **kwargs):
out = nm.empty(shape, dtype=nm.float64)
if mode == 'eval':
status = self.call_function(out, fargs)
# Sum over elements but not over components.
out1 = nm.sum(out, 0).squeeze()
return out1, status
else:
status = self.call_function(out, fargs)
return out, status
def eval_complex(self, shape, fargs, mode='eval', term_mode=None,
diff_var=None, **kwargs):
rout = nm.empty(shape, dtype=nm.float64)
fargsd = split_complex_args(fargs)
# Assuming linear forms. Then the matrix is the
# same both for real and imaginary part.
rstatus = self.call_function(rout, fargsd['r'])
if (diff_var is None) and len(fargsd) >= 2:
iout = nm.empty(shape, dtype=nm.float64)
istatus = self.call_function(iout, fargsd['i'])
if mode == 'eval' and len(fargsd) >= 4:
irout = nm.empty(shape, dtype=nm.float64)
irstatus = self.call_function(irout, fargsd['ir'])
riout = nm.empty(shape, dtype=nm.float64)
ristatus = self.call_function(riout, fargsd['ri'])
out = (rout - iout) + (riout + irout) * 1j
status = rstatus or istatus or ristatus or irstatus
else:
out = rout + 1j * iout
status = rstatus or istatus
else:
out, status = rout, rstatus
if mode == 'eval':
out1 = nm.sum(out, 0).squeeze()
return out1, status
else:
return out, status
def evaluate(self, mode='eval', diff_var=None,
standalone=True, ret_status=False, **kwargs):
"""
Evaluate the term.
Parameters
----------
mode : 'eval' (default), or 'weak'
The term evaluation mode.
Returns
-------
val : float or array
In 'eval' mode, the term returns a single value (the
integral, it does not need to be a scalar), while in 'weak'
mode it returns an array for each element.
status : int, optional
The flag indicating evaluation success (0) or failure
(nonzero). Only provided if `ret_status` is True.
iels : array of ints, optional
The local elements indices in 'weak' mode. Only provided in
non-'eval' modes.
"""
if standalone:
self.standalone_setup()
kwargs = kwargs.copy()
term_mode = kwargs.pop('term_mode', None)
if mode == 'eval':
val = 0.0
status = 0
for ig in self.iter_groups():
args = self.get_args(**kwargs)
self.check_shapes(*args)
_args = tuple(args) + (mode, term_mode, diff_var)
fargs = self.call_get_fargs(_args, kwargs)
shape, dtype = self.get_eval_shape(*_args, **kwargs)
if dtype == nm.float64:
_v, stat = self.eval_real(shape, fargs, mode, term_mode,
**kwargs)
elif dtype == nm.complex128:
_v, stat = self.eval_complex(shape, fargs, mode, term_mode,
**kwargs)
else:
raise ValueError('unsupported term dtype! (%s)' % dtype)
val += _v
status += stat
val *= self.sign
elif mode in ('el_avg', 'el', 'qp'):
vals = None
iels = nm.empty((0, 2), dtype=nm.int32)
status = 0
for ig in self.iter_groups():
args = self.get_args(**kwargs)
self.check_shapes(*args)
_args = tuple(args) + (mode, term_mode, diff_var)
fargs = self.call_get_fargs(_args, kwargs)
shape, dtype = self.get_eval_shape(*_args, **kwargs)
if dtype == nm.float64:
val, stat = self.eval_real(shape, fargs, mode, term_mode,
**kwargs)
elif dtype == nm.complex128:
val, stat = self.eval_complex(shape, fargs, mode, term_mode,
**kwargs)
if vals is None:
vals = val
else:
vals = nm.r_[vals, val]
_iels = self.get_assembling_cells(val.shape)
aux = nm.c_[nm.repeat(ig, _iels.shape[0])[:,None],
_iels[:,None]]
iels = nm.r_[iels, aux]
status += stat
vals *= self.sign
elif mode == 'weak':
vals = []
iels = []
status = 0
varr = self.get_virtual_variable()
if diff_var is not None:
varc = self.get_variables(as_list=False)[diff_var]
for ig in self.iter_groups():
args = self.get_args(**kwargs)
self.check_shapes(*args)
_args = tuple(args) + (mode, term_mode, diff_var)
fargs = self.call_get_fargs(_args, kwargs)
n_elr, n_qpr, dim, n_enr, n_cr = self.get_data_shape(varr)
n_row = n_cr * n_enr
if diff_var is None:
shape = (n_elr, 1, n_row, 1)
else:
n_elc, n_qpc, dim, n_enc, n_cc = self.get_data_shape(varc)
n_col = n_cc * n_enc
shape = (n_elr, 1, n_row, n_col)
if varr.dtype == nm.float64:
val, stat = self.eval_real(shape, fargs, mode, term_mode,
diff_var, **kwargs)
elif varr.dtype == nm.complex128:
val, stat = self.eval_complex(shape, fargs, mode, term_mode,
diff_var, **kwargs)
else:
raise ValueError('unsupported term dtype! (%s)'
% varr.dtype)
vals.append(self.sign * val)
iels.append((ig, self.get_assembling_cells(val.shape)))
status += stat
# Setup return value.
if mode == 'eval':
out = (val,)
else:
out = (vals, iels)
if goptions['check_term_finiteness']:
assert_(nm.isfinite(out[0]).all(),
msg='%+.2e * %s.%d.%s(%s) term values not finite!'
% (self.sign, self.name, self.integral.order,
self.region.name, self.arg_str))
if ret_status:
out = out + (status,)
if len(out) == 1:
out = out[0]
return out
def assemble_to(self, asm_obj, val, iels, mode='vector', diff_var=None):
import sfepy.fem.extmods.assemble as asm
vvar = self.get_virtual_variable()
dc_type = self.get_dof_conn_type()
if mode == 'vector':
if asm_obj.dtype == nm.float64:
assemble = asm.assemble_vector
else:
assert_(asm_obj.dtype == nm.complex128)
assemble = asm.assemble_vector_complex
for ii in range(len(val)):
if not(val[ii].dtype == nm.complex128):
val[ii] = nm.complex128(val[ii])
for ii, (ig, _iels) in enumerate(iels):
vec_in_els = val[ii]
dc = vvar.get_dof_conn(dc_type, ig, active=True)
assert_(vec_in_els.shape[2] == dc.shape[1])
assemble(asm_obj, vec_in_els, _iels, 1.0, dc)
elif mode == 'matrix':
if asm_obj.dtype == nm.float64:
assemble = asm.assemble_matrix
else:
assert_(asm_obj.dtype == nm.complex128)
assemble = asm.assemble_matrix_complex
svar = diff_var
tmd = (asm_obj.data, asm_obj.indptr, asm_obj.indices)
for ii, (ig, _iels) in enumerate(iels):
mtx_in_els = val[ii]
if ((asm_obj.dtype == nm.complex128)
and (mtx_in_els.dtype == nm.float64)):
mtx_in_els = mtx_in_els.astype(nm.complex128)
rdc = vvar.get_dof_conn(dc_type, ig, active=True)
is_trace = self.arg_traces[svar.name]
cdc = svar.get_dof_conn(dc_type, ig, active=True,
is_trace=is_trace)
assert_(mtx_in_els.shape[2:] == (rdc.shape[1], cdc.shape[1]))
sign = 1.0
if self.arg_derivatives[svar.name]:
if not self.is_quasistatic or (self.step > 0):
sign *= 1.0 / self.dt
else:
sign = 0.0
assemble(tmd[0], tmd[1], tmd[2], mtx_in_els,
_iels, sign, rdc, cdc)
else:
raise ValueError('unknown assembling mode! (%s)' % mode)
| [
"[email protected]"
]
| |
2a2d1d8830e835a1494087e94fb849e401876cc4 | bf21cd0ef7a94fa106ccd9f91a4bbfdcda7f94ed | /python-basic/chapter04/ex01_2.py | 2b0d435813f0cc5b511a07e9e93529dd676c29ef | []
| no_license | juneglee/Deep_Learning | fdf8cae1b962aaa0ce557cb53f78a22b6d5ae1e8 | 17a448cf6a7c5b61b967dd78af3d328d63378205 | refs/heads/master | 2023-07-15T03:02:55.739619 | 2021-08-19T14:04:55 | 2021-08-19T14:04:55 | 273,253,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | # 리스트 연선자 : 연결(+) , 반복(*), len()
# 리스트 연산자
list_a = [1, 2, 3]
list_b = [4, 5, 6]
print("# 리스트")
print("list_a = ", list_a)
print("list_b = ", list_b)
print()
# 기본 연산자 : 연결(+) , 반복(*)
print("# 리스트 기본 연산자")
print("list_a + list_b =", list_a + list_b)
print("list_a * 3 =", list_a * 3)
print()
# 길이 구하기 : len()
print("# 길이 구하기")
print("len(list_a) = ", len(list_a))
| [
"[email protected]"
]
| |
59f257e74467edf2e02f1c12f63bef4bc528fd7e | 085488720112922ff3aed15f99f3c93911425c4a | /vesper/signal/tests/test_s3_byte_sequence.py | c6f1f484a9b415154c8d517bf998a2ab6d8b4200 | [
"MIT"
]
| permissive | HaroldMills/Vesper | 0b61d18bc241af22bfc251088fc87d72add6367b | ec92fe5231f54336499db189a3bbc6cb08a19e61 | refs/heads/master | 2023-07-05T22:45:27.316498 | 2023-07-04T11:58:14 | 2023-07-04T11:58:14 | 19,112,486 | 49 | 6 | MIT | 2023-02-14T16:09:19 | 2014-04-24T14:55:34 | Python | UTF-8 | Python | false | false | 909 | py | import unittest
import warnings
from vesper.signal.tests.byte_sequence_tests import ByteSequenceTests
from vesper.signal.s3_byte_sequence import S3ByteSequence
from vesper.tests.test_case import TestCase
REGION_NAME = 'us-east-2'
BUCKET_NAME = 'vesper-test'
OBJECT_KEY = 'Bytes 00-FF.dat'
OBJECT_LENGTH = 256
# TODO: Look into ResourceWarning issue mentioned below. Is it safe to
# ignore the warnings?
class S3ByteSequenceTests(TestCase, ByteSequenceTests):
@property
def sequence(self):
return S3ByteSequence(REGION_NAME, BUCKET_NAME, OBJECT_KEY)
def setUp(self):
# Without the following, the `S3ByteSequence` unit tests
# output a ResourceWarning about an unclosed transport to the
# console.
warnings.filterwarnings(
action="ignore", message="unclosed", category=ResourceWarning)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
102f709bebff12b32c93c321b66bd7327cd6e92b | b15d2787a1eeb56dfa700480364337216d2b1eb9 | /accelbyte_py_sdk/api/matchmaking/models/models_query_mock_by.py | 8e41cf6eec7f84d441d5c2d4e272a292a791f88e | [
"MIT"
]
| permissive | AccelByte/accelbyte-python-sdk | dedf3b8a592beef5fcf86b4245678ee3277f953d | 539c617c7e6938892fa49f95585b2a45c97a59e0 | refs/heads/main | 2023-08-24T14:38:04.370340 | 2023-08-22T01:08:03 | 2023-08-22T01:08:03 | 410,735,805 | 2 | 1 | MIT | 2022-08-02T03:54:11 | 2021-09-27T04:00:10 | Python | UTF-8 | Python | false | false | 3,879 | py | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: ags_py_codegen
# AccelByte Gaming Services Matchmaking Service (2.25.7)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import Model
class ModelsQueryMockBy(Model):
"""Models query mock by (models.QueryMockBy)
Properties:
timestamp_after: (timestamp_after) REQUIRED int
"""
# region fields
timestamp_after: int # REQUIRED
# endregion fields
# region with_x methods
def with_timestamp_after(self, value: int) -> ModelsQueryMockBy:
self.timestamp_after = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "timestamp_after"):
result["timestamp_after"] = int(self.timestamp_after)
elif include_empty:
result["timestamp_after"] = 0
return result
# endregion to methods
# region static methods
@classmethod
def create(cls, timestamp_after: int, **kwargs) -> ModelsQueryMockBy:
instance = cls()
instance.timestamp_after = timestamp_after
return instance
@classmethod
def create_from_dict(
cls, dict_: dict, include_empty: bool = False
) -> ModelsQueryMockBy:
instance = cls()
if not dict_:
return instance
if "timestamp_after" in dict_ and dict_["timestamp_after"] is not None:
instance.timestamp_after = int(dict_["timestamp_after"])
elif include_empty:
instance.timestamp_after = 0
return instance
@classmethod
def create_many_from_dict(
cls, dict_: dict, include_empty: bool = False
) -> Dict[str, ModelsQueryMockBy]:
return (
{k: cls.create_from_dict(v, include_empty=include_empty) for k, v in dict_}
if dict_
else {}
)
@classmethod
def create_many_from_list(
cls, list_: list, include_empty: bool = False
) -> List[ModelsQueryMockBy]:
return (
[cls.create_from_dict(i, include_empty=include_empty) for i in list_]
if list_
else []
)
@classmethod
def create_from_any(
cls, any_: any, include_empty: bool = False, many: bool = False
) -> Union[
ModelsQueryMockBy, List[ModelsQueryMockBy], Dict[Any, ModelsQueryMockBy]
]:
if many:
if isinstance(any_, dict):
return cls.create_many_from_dict(any_, include_empty=include_empty)
elif isinstance(any_, list):
return cls.create_many_from_list(any_, include_empty=include_empty)
else:
raise ValueError()
else:
return cls.create_from_dict(any_, include_empty=include_empty)
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"timestamp_after": "timestamp_after",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"timestamp_after": True,
}
# endregion static methods
| [
"[email protected]"
]
| |
150caba5e2ce7bbe0f2fac7228a73b1c48b130e8 | 35be15b1dc120a256750cf66305f169974c2e55c | /ecommerce/jan2020/blog/admin.py | 2f847da382c95e6db6daed732a0c2fa22da850e2 | []
| no_license | tejendrapatel/Ecommerce_Logics | 2f10a5e216691add092f37a8186a9940b905f173 | 16ad13672c275e1be3ee6b3f5cd84d09f1600496 | refs/heads/main | 2023-08-27T00:53:10.438777 | 2021-10-23T11:50:36 | 2021-10-23T11:50:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | from django.contrib import admin
from blog.models import *
admin.site.register(college)
admin.site.register(contact)
| [
"[email protected]"
]
| |
d342732ac3b6fe72d50a5b8e94fc6365d7766d2f | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/network/v20200301/route_table.py | 0f7b2c604bdb6750a43a91e17b7ebf103a368267 | [
"BSD-3-Clause",
"Apache-2.0"
]
| permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,631 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['RouteTableInitArgs', 'RouteTable']
@pulumi.input_type
class RouteTableInitArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
disable_bgp_route_propagation: Optional[pulumi.Input[bool]] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
route_table_name: Optional[pulumi.Input[str]] = None,
routes: Optional[pulumi.Input[Sequence[pulumi.Input['RouteArgs']]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a RouteTable resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[bool] disable_bgp_route_propagation: Whether to disable the routes learned by BGP on that route table. True means disable.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] route_table_name: The name of the route table.
:param pulumi.Input[Sequence[pulumi.Input['RouteArgs']]] routes: Collection of routes contained within a route table.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if disable_bgp_route_propagation is not None:
pulumi.set(__self__, "disable_bgp_route_propagation", disable_bgp_route_propagation)
if id is not None:
pulumi.set(__self__, "id", id)
if location is not None:
pulumi.set(__self__, "location", location)
if route_table_name is not None:
pulumi.set(__self__, "route_table_name", route_table_name)
if routes is not None:
pulumi.set(__self__, "routes", routes)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="disableBgpRoutePropagation")
def disable_bgp_route_propagation(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to disable the routes learned by BGP on that route table. True means disable.
"""
return pulumi.get(self, "disable_bgp_route_propagation")
@disable_bgp_route_propagation.setter
def disable_bgp_route_propagation(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_bgp_route_propagation", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="routeTableName")
def route_table_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the route table.
"""
return pulumi.get(self, "route_table_name")
@route_table_name.setter
def route_table_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "route_table_name", value)
@property
@pulumi.getter
def routes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RouteArgs']]]]:
"""
Collection of routes contained within a route table.
"""
return pulumi.get(self, "routes")
@routes.setter
def routes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RouteArgs']]]]):
pulumi.set(self, "routes", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class RouteTable(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
disable_bgp_route_propagation: Optional[pulumi.Input[bool]] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_table_name: Optional[pulumi.Input[str]] = None,
routes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouteArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Route table resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] disable_bgp_route_propagation: Whether to disable the routes learned by BGP on that route table. True means disable.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] route_table_name: The name of the route table.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouteArgs']]]] routes: Collection of routes contained within a route table.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RouteTableInitArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Route table resource.
:param str resource_name: The name of the resource.
:param RouteTableInitArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RouteTableInitArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
disable_bgp_route_propagation: Optional[pulumi.Input[bool]] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_table_name: Optional[pulumi.Input[str]] = None,
routes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouteArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RouteTableInitArgs.__new__(RouteTableInitArgs)
__props__.__dict__["disable_bgp_route_propagation"] = disable_bgp_route_propagation
__props__.__dict__["id"] = id
__props__.__dict__["location"] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["route_table_name"] = route_table_name
__props__.__dict__["routes"] = routes
__props__.__dict__["tags"] = tags
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["subnets"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:network:RouteTable"), pulumi.Alias(type_="azure-native:network/v20150501preview:RouteTable"), pulumi.Alias(type_="azure-native:network/v20150615:RouteTable"), pulumi.Alias(type_="azure-native:network/v20160330:RouteTable"), pulumi.Alias(type_="azure-native:network/v20160601:RouteTable"), pulumi.Alias(type_="azure-native:network/v20160901:RouteTable"), pulumi.Alias(type_="azure-native:network/v20161201:RouteTable"), pulumi.Alias(type_="azure-native:network/v20170301:RouteTable"), pulumi.Alias(type_="azure-native:network/v20170601:RouteTable"), pulumi.Alias(type_="azure-native:network/v20170801:RouteTable"), pulumi.Alias(type_="azure-native:network/v20170901:RouteTable"), pulumi.Alias(type_="azure-native:network/v20171001:RouteTable"), pulumi.Alias(type_="azure-native:network/v20171101:RouteTable"), pulumi.Alias(type_="azure-native:network/v20180101:RouteTable"), pulumi.Alias(type_="azure-native:network/v20180201:RouteTable"), pulumi.Alias(type_="azure-native:network/v20180401:RouteTable"), pulumi.Alias(type_="azure-native:network/v20180601:RouteTable"), pulumi.Alias(type_="azure-native:network/v20180701:RouteTable"), pulumi.Alias(type_="azure-native:network/v20180801:RouteTable"), pulumi.Alias(type_="azure-native:network/v20181001:RouteTable"), pulumi.Alias(type_="azure-native:network/v20181101:RouteTable"), pulumi.Alias(type_="azure-native:network/v20181201:RouteTable"), pulumi.Alias(type_="azure-native:network/v20190201:RouteTable"), pulumi.Alias(type_="azure-native:network/v20190401:RouteTable"), pulumi.Alias(type_="azure-native:network/v20190601:RouteTable"), pulumi.Alias(type_="azure-native:network/v20190701:RouteTable"), pulumi.Alias(type_="azure-native:network/v20190801:RouteTable"), pulumi.Alias(type_="azure-native:network/v20190901:RouteTable"), pulumi.Alias(type_="azure-native:network/v20191101:RouteTable"), pulumi.Alias(type_="azure-native:network/v20191201:RouteTable"), pulumi.Alias(type_="azure-native:network/v20200401:RouteTable"), pulumi.Alias(type_="azure-native:network/v20200501:RouteTable"), pulumi.Alias(type_="azure-native:network/v20200601:RouteTable"), pulumi.Alias(type_="azure-native:network/v20200701:RouteTable"), pulumi.Alias(type_="azure-native:network/v20200801:RouteTable"), pulumi.Alias(type_="azure-native:network/v20201101:RouteTable"), pulumi.Alias(type_="azure-native:network/v20210201:RouteTable"), pulumi.Alias(type_="azure-native:network/v20210301:RouteTable"), pulumi.Alias(type_="azure-native:network/v20210501:RouteTable")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(RouteTable, __self__).__init__(
'azure-native:network/v20200301:RouteTable',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'RouteTable':
"""
Get an existing RouteTable resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = RouteTableInitArgs.__new__(RouteTableInitArgs)
__props__.__dict__["disable_bgp_route_propagation"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["routes"] = None
__props__.__dict__["subnets"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return RouteTable(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="disableBgpRoutePropagation")
def disable_bgp_route_propagation(self) -> pulumi.Output[Optional[bool]]:
"""
Whether to disable the routes learned by BGP on that route table. True means disable.
"""
return pulumi.get(self, "disable_bgp_route_propagation")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the route table resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def routes(self) -> pulumi.Output[Optional[Sequence['outputs.RouteResponse']]]:
"""
Collection of routes contained within a route table.
"""
return pulumi.get(self, "routes")
@property
@pulumi.getter
def subnets(self) -> pulumi.Output[Sequence['outputs.SubnetResponse']]:
"""
A collection of references to subnets.
"""
return pulumi.get(self, "subnets")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
| [
"[email protected]"
]
| |
a19d147dc3ac0dc1389f80f703d9bfdb9880730f | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/303/usersdata/281/66059/submittedfiles/testes.py | 4f5702282440b083bcb9303e18bb7339f798841e | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
print("Hello Word")
print("olá Mundo") | [
"[email protected]"
]
| |
c55f365e169d1106b279c90b35267dee292c835b | acd749424ec557eb2c0aed20333131eeb738b27a | /pyart/io/nexrad_cdm.py | 2df276d06cb14dcb08a6247e4e97c50c44c7cd4b | [
"BSD-3-Clause"
]
| permissive | zxdawn/pyart | c5b8cc505e4eea0db01af40bdd3a796ff11020b2 | fc51a68dfb488392217b2093ed593f07016e793b | refs/heads/CNRAD | 2020-03-17T23:14:30.526023 | 2019-05-19T13:39:00 | 2019-05-19T13:39:00 | 134,036,631 | 9 | 0 | null | 2018-05-22T14:07:30 | 2018-05-19T06:34:09 | Python | UTF-8 | Python | false | false | 13,609 | py | """
pyart.io.nexrad_cdm
===================
Functions for accessing Common Data Model (CDM) NEXRAD Level 2 files.
.. autosummary::
:toctree: generated/
read_nexrad_cdm
_scan_info
_populate_scan_dic
_get_moment_data
"""
import os
from datetime import datetime, timedelta
import netCDF4
import numpy as np
from .nexrad_common import get_nexrad_location
from ..config import FileMetadata, get_fillvalue
from ..core.radar import Radar
from .common import make_time_unit_str, _test_arguments
def read_nexrad_cdm(filename, field_names=None, additional_metadata=None,
file_field_names=False, exclude_fields=None,
station=None, **kwargs):
"""
Read a Common Data Model (CDM) NEXRAD Level 2 file.
Parameters
----------
filename : str
File name or URL of a Common Data Model (CDM) NEXRAD Level 2 file.
File of in this format can be created using the NetCDF Java Library
tools [1]_. A URL of a OPeNDAP file on the UCAR THREDDS Data
Server [2]_ is also accepted the netCDF4 library has been compiled
with OPeNDAP support.
field_names : dict, optional
Dictionary mapping NEXRAD moments to radar field names. If a
data type found in the file does not appear in this dictionary or has
a value of None it will not be placed in the radar.fields dictionary.
A value of None, the default, will use the mapping defined in the
metadata configuration file.
additional_metadata : dict of dicts, optional
Dictionary of dictionaries to retrieve metadata from during this read.
This metadata is not used during any successive file reads unless
explicitly included. A value of None, the default, will not
introduct any addition metadata and the file specific or default
metadata as specified by the metadata configuration file will be used.
file_field_names : bool, optional
True to use the NEXRAD field names for the field names. If this
case the field_names parameter is ignored. The field dictionary will
likely only have a 'data' key, unless the fields are defined in
`additional_metadata`.
exclude_fields : list or None, optional
List of fields to exclude from the radar object. This is applied
after the `file_field_names` and `field_names` parameters.
station : str
Four letter ICAO name of the NEXRAD station used to determine the
location in the returned radar object. This parameter is only
used when the location is not contained in the file, which occur
in older NEXRAD files. If the location is not provided in the file
and this parameter is set to None the station name will be determined
from the filename.
Returns
-------
radar : Radar
Radar object containing all moments and sweeps/cuts in the volume.
Gates not collected are masked in the field data.
References
----------
.. [1] http://www.unidata.ucar.edu/software/netcdf-java/documentation.htm
.. [2] http://thredds.ucar.edu/thredds/catalog.html
"""
# test for non empty kwargs
_test_arguments(kwargs)
# create metadata retrieval object
filemetadata = FileMetadata('nexrad_cdm', field_names,
additional_metadata, file_field_names,
exclude_fields)
# open the file
dataset = netCDF4.Dataset(filename)
dattrs = dataset.ncattrs()
dvars = dataset.variables
if 'cdm_data_type' not in dattrs or dataset.cdm_data_type != 'RADIAL':
raise IOError('%s is not a valid CDM NetCDF file' % (filename))
# determine the scan information
scan_info = _scan_info(dvars)
radials_per_scan = [max(s['nradials']) for s in scan_info]
ngates_per_scan = [max(s['ngates']) for s in scan_info]
ngates = max(ngates_per_scan)
nrays = sum(radials_per_scan)
nsweeps = len(scan_info)
# extract data which changes depending on scan,
# specifically time, azimuth, elevation and fixed angle data, as well as
# the moment data.
time_data = np.empty((nrays, ), dtype='float64')
azim_data = np.empty((nrays, ), dtype='float32')
elev_data = np.empty((nrays, ), dtype='float32')
fixed_agl_data = np.empty((nsweeps, ), dtype='float32')
fdata = {
'Reflectivity':
np.ma.masked_equal(np.ones((nrays, ngates), dtype='float32'), 1),
'RadialVelocity':
np.ma.masked_equal(np.ones((nrays, ngates), dtype='float32'), 1),
'SpectrumWidth':
np.ma.masked_equal(np.ones((nrays, ngates), dtype='float32'), 1),
'DifferentialReflectivity':
np.ma.masked_equal(np.ones((nrays, ngates), dtype='float32'), 1),
'CorrelationCoefficient':
np.ma.masked_equal(np.ones((nrays, ngates), dtype='float32'), 1),
'DifferentialPhase':
np.ma.masked_equal(np.ones((nrays, ngates), dtype='float32'), 1),
}
ray_i = 0
for scan_index, scan_dic in enumerate(scan_info):
var_index = scan_dic['index'][0]
nradials = scan_dic['nradials'][0]
time_var = scan_dic['time_vars'][0]
azimuth_var = scan_dic['azimuth_vars'][0]
elevation_var = scan_dic['elevation_vars'][0]
nradials = scan_dic['nradials'][0]
end = ray_i + nradials
time_data[ray_i:end] = dvars[time_var][var_index][:nradials]
azim_data[ray_i:end] = dvars[azimuth_var][var_index][:nradials]
elev_data[ray_i:end] = dvars[elevation_var][var_index][:nradials]
fixed_agl_data[scan_index] = np.mean(
dvars[elevation_var][var_index][:nradials])
for i, moment in enumerate(scan_dic['moments']):
moment_index = scan_dic['index'][i]
m_ngates = scan_dic['ngates'][i]
m_nradials = scan_dic['nradials'][i]
if moment.endswith('_HI'):
fdata_name = moment[:-3]
else:
fdata_name = moment
sweep = _get_moment_data(dvars[moment], moment_index, m_ngates)
fdata[fdata_name][ray_i:ray_i + m_nradials, :m_ngates] = (
sweep[:m_nradials, :m_ngates])
ray_i += nradials
# time
time = filemetadata('time')
first_time_var = scan_info[0]['time_vars'][0]
time_start = datetime.strptime(dvars[first_time_var].units[-20:],
"%Y-%m-%dT%H:%M:%SZ")
time_start = time_start + timedelta(seconds=int(time_data[0]/1000))
time['data'] = time_data/1000. - int(time_data[0]/1000)
time['units'] = make_time_unit_str(time_start)
# range
_range = filemetadata('range')
max_ngates_scan_index = ngates_per_scan.index(ngates)
scan_dic = scan_info[max_ngates_scan_index]
max_ngates_moment_index = scan_dic['ngates'].index(ngates)
distance_var = scan_dic['distance_vars'][max_ngates_moment_index]
_range['data'] = dvars[distance_var][:]
_range['meters_to_center_of_first_gate'] = _range['data'][0]
_range['meters_between_gates'] = _range['data'][1] - _range['data'][0]
# fields
fields = {}
for moment_name, moment_data in fdata.items():
field_name = filemetadata.get_field_name(moment_name)
field_dic = filemetadata(field_name)
field_dic['_FillValue'] = get_fillvalue()
field_dic['data'] = moment_data
fields[field_name] = field_dic
# metadata
metadata = filemetadata('metadata')
metadata['original_container'] = 'NEXRAD Level II'
# scan_type
scan_type = 'ppi'
# latitude, longitude, altitude
latitude = filemetadata('latitude')
longitude = filemetadata('longitude')
altitude = filemetadata('altitude')
# use the locations in the NetCDF file is available
if ((hasattr(dataset, 'StationLatitude') and
hasattr(dataset, 'StationLongitude') and
hasattr(dataset, 'StationElevationInMeters'))):
lat = dataset.StationLatitude
lon = dataset.StationLongitude
alt = dataset.StationElevationInMeters
else:
# if no locations in the file look them up from station name.
if station is None:
# determine the station name from the filename
# this will fail in some cases, in which case station
# should be implicitly provided in the function call.
station = os.path.basename(filename)[:4].upper()
lat, lon, alt = get_nexrad_location(station)
latitude['data'] = np.array([lat], dtype='float64')
longitude['data'] = np.array([lon], dtype='float64')
altitude['data'] = np.array([alt], dtype='float64')
# sweep_number, sweep_mode, fixed_angle, sweep_start_ray_index
# sweep_end_ray_index
sweep_number = filemetadata('sweep_number')
sweep_mode = filemetadata('sweep_mode')
sweep_start_ray_index = filemetadata('sweep_start_ray_index')
sweep_end_ray_index = filemetadata('sweep_end_ray_index')
sweep_number['data'] = np.arange(nsweeps, dtype='int32')
sweep_mode['data'] = np.array(
nsweeps * ['azimuth_surveillance'], dtype='S')
rays_per_scan = list(radials_per_scan)
sweep_end_ray_index['data'] = np.cumsum(rays_per_scan, dtype='int32') - 1
rays_per_scan.insert(0, 0)
sweep_start_ray_index['data'] = np.cumsum(rays_per_scan[:-1],
dtype='int32')
# azimuth, elevation, fixed_angle
azimuth = filemetadata('azimuth')
elevation = filemetadata('elevation')
fixed_angle = filemetadata('fixed_angle')
azimuth['data'] = azim_data
elevation['data'] = elev_data
fixed_angle['data'] = fixed_agl_data
dataset.close()
return Radar(
time, _range, fields, metadata, scan_type,
latitude, longitude, altitude,
sweep_number, sweep_mode, fixed_angle, sweep_start_ray_index,
sweep_end_ray_index,
azimuth, elevation,
instrument_parameters=None)
def _scan_info(dvars):
""" Return a list of information on the scans in the volume. """
# determine the time of the sweep start
time_variables = [k for k in dvars.keys() if k.startswith('time')]
scan_start_times = set([])
for var in time_variables:
for time in dvars[var][:, 0]:
scan_start_times.add(time)
scan_start_times = list(scan_start_times)
scan_start_times.sort()
# build the scan_info list
time_var_to_moment = { # time variable to moment conversion
'timeR': 'Reflectivity',
'timeV': 'RadialVelocity',
'timeD': 'DifferentialReflectivity',
'timeC': 'CorrelationCoefficient',
'timeP': 'DifferentialPhase',
'timeR_HI': 'Reflectivity_HI',
'timeV_HI': 'RadialVelocity_HI',
'timeD_HI': 'DifferentialReflectivity_HI',
'timeC_HI': 'CorrelationCoefficient_HI',
'timeP_HI': 'DifferentialPhase_HI',
}
scan_info = [{'start_time': t, 'time_vars': [], 'moments': [],
'nradials': [], 'ngates': [], 'elevation_vars': [],
'azimuth_vars': [], 'distance_vars': [], 'index': []}
for t in scan_start_times]
for time_var in time_variables:
for time_var_i, time in enumerate(dvars[time_var][:, 0]):
scan_index = scan_start_times.index(time)
scan_dic = scan_info[scan_index]
moment = time_var_to_moment[time_var]
_populate_scan_dic(scan_dic, time_var, time_var_i, moment, dvars)
# corner cases, timeV is a dimension for RadialVelocity AND
# SpectrumWidth
if time_var == 'timeV':
_populate_scan_dic(scan_dic, time_var, time_var_i,
'SpectrumWidth', dvars)
if time_var == 'timeV_HI':
_populate_scan_dic(scan_dic, time_var, time_var_i,
'SpectrumWidth_HI', dvars)
return scan_info
def _populate_scan_dic(scan_dic, time_var, time_var_i, moment, dvars):
""" Populate a dictionary in the scan_info list. """
if time_var.endswith('HI'):
var_suffix = time_var[-4:]
else:
var_suffix = time_var[-1:]
ngates = dvars['numGates' + var_suffix][time_var_i]
nradials = dvars['numRadials' + var_suffix][time_var_i]
scan_dic['time_vars'].append(time_var)
scan_dic['index'].append(time_var_i)
scan_dic['moments'].append(moment)
scan_dic['elevation_vars'].append('elevation' + var_suffix)
scan_dic['azimuth_vars'].append('azimuth' + var_suffix)
scan_dic['distance_vars'].append('distance' + var_suffix)
scan_dic['ngates'].append(ngates)
scan_dic['nradials'].append(nradials)
return
def _get_moment_data(moment_var, index, ngates):
""" Retieve moment data for a given scan. """
# mask, scale and offset
moment_var.set_auto_maskandscale(False)
raw_moment_data = moment_var[index][:, :ngates]
if '_Unsigned' in moment_var.ncattrs():
if raw_moment_data.dtype == np.int8:
raw_moment_data = raw_moment_data.view('uint8')
if raw_moment_data.dtype == np.int16:
raw_moment_data = raw_moment_data.view('uint16')
raw_moment_data = np.ma.masked_less_equal(raw_moment_data, 1)
if 'scale_factor' in moment_var.ncattrs():
scale = moment_var.scale_factor
else:
scale = 1.0
if 'add_offset' in moment_var.ncattrs():
add_offset = moment_var.add_offset
else:
add_offset = 0.0
return raw_moment_data * scale + add_offset
| [
"[email protected]"
]
| |
96ecd307f055f3b68969e8d57c8d8d5d0247f15a | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/4/i5u.py | ddabd582980b6c53e4d7b8520dbd5e019a933eac | []
| no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'i5U':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
]
| |
685eea7db453f95d3b09c7e014f28eeee0ba4439 | a8123a86db99b9365b10ba76dd509d58caa7bc10 | /python/practice/start_again/2021/05182021/Day18.3_Darw_a_spriograph.py | 8814fcac3b147a6c0f49245cd49b4fbe21a8a16f | []
| no_license | smohapatra1/scripting | c0404081da8a10e92e7c7baa8b540acc16540e77 | 3628c9109204ad98231ae8ee92b6bfa6b27e93cd | refs/heads/master | 2023-08-22T20:49:50.156979 | 2023-08-22T20:43:03 | 2023-08-22T20:43:03 | 147,619,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | #Draw a Spirograph
from turtle import Turtle, Screen
import turtle as t
import random
tim = t.Turtle()
t.colormode(255)
tim.speed("fastest")
#Random Color
def random_color():
r = random.randint(0,255)
g = random.randint(0,255)
b = random.randint(0,255)
color = (r, g, b)
return color
def draw_spirograph(size_of_gap):
for _ in range(int(360/size_of_gap)):
tim.color(random_color())
tim.circle(100)
#current_heading = tim.heading()
# To change the direction
tim.setheading(tim.heading() + size_of_gap )
draw_spirograph(10)
screen = t.Screen()
screen.exitonclick() | [
"[email protected]"
]
| |
aaf7c07df0a3a79d0aa83017aa4a3142f7911d98 | dec5c1416279178c23e81794789ed27e7e806faf | /profiles_api/models.py | 921345fafbd8fe1b8cb4afa2e7952b8838987617 | [
"MIT"
]
| permissive | amitarvindpatil/profiles-rest-api | 44c7555888e654a2a64362d21834f5a67aeab07a | c2092bdc13c77e2f1f3cd4940740f752cc2b180f | refs/heads/master | 2022-09-15T06:53:40.777169 | 2020-05-31T09:01:43 | 2020-05-31T09:01:43 | 260,257,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,032 | py | from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import BaseUserManager
from django.conf import settings
# Create your models here.
class UserProfileManager(BaseUserManager):
"""Manager For UserProfile"""
def create_user(self,email,name,password=None):
"""Create New User Prfile"""
if not email:
raise ValueError('User Must have an email address')
email = self.normalize_email(email)
user = self.model(email=email,name=name)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self,email,name,password):
"""create and save new superuser with given details"""
user = self.create_user(email,name,password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser,PermissionsMixin):
""" DataBase model for user in a system """
email = models.EmailField(max_length=255,unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserProfileManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['name']
def get_full_name(self):
""" Retrive Full Name of User"""
return self.name
def get_short_name(self):
""" Retrive Short Name of user """
return self.name
def __str__(self):
""" Retrive String representation of user"""
return self.email
class ProfileFeedItem(models.Model):
"""Profile Status Update"""
user_profile = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
status_text = models.CharField(max_length=255)
created_on = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.status_text
| [
"[email protected]"
]
| |
792b61efe2adbe81bfa8e2d488a1dbf4bd884444 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-das/huaweicloudsdkdas/v3/model/export_top_sql_templates_details_response.py | 1286a8c3db19b4ec0f54ea95567708a585fd8a62 | [
"Apache-2.0"
]
| permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 4,619 | py | # coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ExportTopSqlTemplatesDetailsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'top_sql_templates': 'list[TopSqlTemplate]',
'total_count': 'int'
}
attribute_map = {
'top_sql_templates': 'top_sql_templates',
'total_count': 'total_count'
}
def __init__(self, top_sql_templates=None, total_count=None):
"""ExportTopSqlTemplatesDetailsResponse
The model defined in huaweicloud sdk
:param top_sql_templates: SQL模板列表。
:type top_sql_templates: list[:class:`huaweicloudsdkdas.v3.TopSqlTemplate`]
:param total_count: SQL模板总数。
:type total_count: int
"""
super(ExportTopSqlTemplatesDetailsResponse, self).__init__()
self._top_sql_templates = None
self._total_count = None
self.discriminator = None
if top_sql_templates is not None:
self.top_sql_templates = top_sql_templates
if total_count is not None:
self.total_count = total_count
@property
def top_sql_templates(self):
"""Gets the top_sql_templates of this ExportTopSqlTemplatesDetailsResponse.
SQL模板列表。
:return: The top_sql_templates of this ExportTopSqlTemplatesDetailsResponse.
:rtype: list[:class:`huaweicloudsdkdas.v3.TopSqlTemplate`]
"""
return self._top_sql_templates
@top_sql_templates.setter
def top_sql_templates(self, top_sql_templates):
"""Sets the top_sql_templates of this ExportTopSqlTemplatesDetailsResponse.
SQL模板列表。
:param top_sql_templates: The top_sql_templates of this ExportTopSqlTemplatesDetailsResponse.
:type top_sql_templates: list[:class:`huaweicloudsdkdas.v3.TopSqlTemplate`]
"""
self._top_sql_templates = top_sql_templates
@property
def total_count(self):
"""Gets the total_count of this ExportTopSqlTemplatesDetailsResponse.
SQL模板总数。
:return: The total_count of this ExportTopSqlTemplatesDetailsResponse.
:rtype: int
"""
return self._total_count
@total_count.setter
def total_count(self, total_count):
"""Sets the total_count of this ExportTopSqlTemplatesDetailsResponse.
SQL模板总数。
:param total_count: The total_count of this ExportTopSqlTemplatesDetailsResponse.
:type total_count: int
"""
self._total_count = total_count
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ExportTopSqlTemplatesDetailsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
]
| |
d5af0b0faa18fdfc639b31b41dfbdb93a890659b | 085a6c4ac532bd4f46980f340890659b0cd03824 | /two_sigma_problems/problem_9.py | f2c2e10d39d7f5185f1a978013c9b743178ba7e5 | [
"MIT"
]
| permissive | thinhnguyennt7/Daily-Coding-Problem | c66aa51422dc79ee912fbd042fefb2b2cf37a94f | 16d42e33af1de08aac1d888be518e398b4674bc8 | refs/heads/master | 2021-04-04T02:10:52.800504 | 2020-03-18T17:29:44 | 2020-03-18T17:30:01 | 248,416,248 | 1 | 1 | MIT | 2020-03-19T05:13:37 | 2020-03-19T05:13:36 | null | UTF-8 | Python | false | false | 223 | py | """This problem was asked by Two Sigma.
Using a function rand5() that returns an integer from 1 to 5 (inclusive) with
uniform probability, implement a function rand7() that returns an integer
from 1 to 7 (inclusive).
""" | [
"[email protected]"
]
| |
d994f4b20a182b9c9b4b26dea314bed2f83d5097 | da52951c32b37aa75765b718707ce08c0a6208d1 | /ReinforcementLearning/PolicyGradient/PPO/tf2/main.py | b3a0d38e4986d6a9da18c87322ee6faa32643f1d | []
| no_license | philtabor/Youtube-Code-Repository | 08c1a0210f80976df50b01a91f1936a7d5c7b302 | eb3aa9733158a4f7c4ba1fefaa812b27ffd889b6 | refs/heads/master | 2023-08-08T05:28:11.712470 | 2023-03-27T16:07:29 | 2023-03-27T16:07:29 | 144,081,173 | 811 | 568 | null | 2023-07-24T20:00:37 | 2018-08-09T00:21:29 | Python | UTF-8 | Python | false | false | 1,575 | py | import gym
import numpy as np
from agent import Agent
from utils import plot_learning_curve
if __name__ == '__main__':
env = gym.make('CartPole-v0')
N = 20
batch_size = 5
n_epochs = 4
alpha = 0.0003
agent = Agent(n_actions=env.action_space.n, batch_size=batch_size,
alpha=alpha, n_epochs=n_epochs,
input_dims=env.observation_space.shape)
n_games = 300
figure_file = 'plots/cartpole.png'
best_score = env.reward_range[0]
score_history = []
learn_iters = 0
avg_score = 0
n_steps = 0
for i in range(n_games):
observation = env.reset()
done = False
score = 0
while not done:
action, prob, val = agent.choose_action(observation)
observation_, reward, done, info = env.step(action)
n_steps += 1
score += reward
agent.store_transition(observation, action,
prob, val, reward, done)
if n_steps % N == 0:
agent.learn()
learn_iters += 1
observation = observation_
score_history.append(score)
avg_score = np.mean(score_history[-100:])
if avg_score > best_score:
best_score = avg_score
agent.save_models()
print('episode', i, 'score %.1f' % score, 'avg score %.1f' % avg_score,
'time_steps', n_steps, 'learning_steps', learn_iters)
x = [i+1 for i in range(len(score_history))]
plot_learning_curve(x, score_history, figure_file)
| [
"[email protected]"
]
| |
954328033e830e24754e6bdfd16070c83a6e687a | f11be78c01892f7c9dc44178ceeaacc0283f582f | /jsonschema_marshmallow/codegen/__init__.py | e031ea4e64bb66032fcc999588b2c073ec93bcfd | [
"MIT"
]
| permissive | gijzelaerr/jsonschema-marshmallow | c6c8e5097f57e609832df30f02513b5d3e4737d0 | e73e523cd32f2235525a9c61e731c741268e4164 | refs/heads/main | 2023-06-24T00:43:45.410868 | 2021-07-20T11:57:54 | 2021-07-20T11:57:54 | 387,666,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 56 | py | from jsonschema_marshmallow.codegen.cli import codegen
| [
"[email protected]"
]
| |
351cca2054fb8641c34017b3bc190680a699b824 | 4b44a299bafbd4ca408ce1c89c9fe4a449632783 | /python3/10_Modules/Parallel_Processing/a_get_cpu_count.py | 0a0464db866ec3a6c8aa2be9e3d728d2be413a38 | []
| no_license | umunusb1/PythonMaterial | ecd33d32b2de664eaaae5192be7c3f6d6bef1d67 | 1e0785c55ccb8f5b9df1978e1773365a29479ce0 | refs/heads/master | 2023-01-23T23:39:35.797800 | 2020-12-02T19:29:00 | 2020-12-02T19:29:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | import multiprocessing as mp
result = '''There are {} processors, in number, in this \
computer'''.format(mp.cpu_count())
print(result)
print(dir(mp))
print(mp.current_process())
| [
"[email protected]"
]
| |
08aea1c4cf86277a51c4d590dbf843a9e116acea | 3ccd609f68016aad24829b8dd3cdbb535fb0ff6d | /python/bpy/types/FILEBROWSER_UL_dir.py | d242f98ab9b1a289208ea3db9e875d5ed1fb5d58 | []
| no_license | katharostech/blender_externs | 79b2eed064fd927e3555aced3e2eb8a45840508e | fdf7f019a460de0fe7e62375c1c94f7ab0e9f68d | refs/heads/master | 2020-04-11T14:00:29.393478 | 2018-10-01T00:40:51 | 2018-10-01T00:40:51 | 161,838,212 | 1 | 1 | null | 2018-12-14T20:41:32 | 2018-12-14T20:41:32 | null | UTF-8 | Python | false | false | 140 | py | class FILEBROWSER_UL_dir:
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
pass
| [
"[email protected]"
]
| |
cb3c52836c92de725f4b0b5bc037f530ce63d13a | 656b431bf7ac23d5593ddf4fb69c29c251d744cb | /zen/layer/base/node.py | 91c5f8a19f460b42f4d3cf942d8f853c60c39140 | []
| no_license | knighton/zen-0.14 | 2c8e4f0aa2e6c862d4022eb346a619268250273e | 7936e43a115d00888bf6c523525bf9f3e7a49256 | refs/heads/master | 2021-01-21T05:33:01.494392 | 2018-05-17T15:01:30 | 2018-05-17T15:01:30 | 101,927,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,619 | py | from copy import deepcopy
from ..arch.vee import Vee
class Node(Vee):
"""
A node of a neural network.
They consist of input and non-input nodes (Inputs and LayerNodes).
"""
def __init__(self):
self._out_shape = None
self._out_dtype = None
self._out_data = None
self._out_nodes = []
def out_shape(self):
"""
-> shape (must be built)
"""
return self._out_shape
def out_dtype(self):
"""
-> dtype (must be built)
"""
return self._out_dtype
def out_data(self):
"""
-> data (must be forward()'ed)
"""
return self._out_data
def add_out_node(self, node):
"""
node ->
"""
self._out_nodes.append(node)
def out_nodes(self):
"""
-> node
"""
return self._out_nodes
def try_to_build(self):
raise NotImplementedError
def is_built(self):
raise NotImplementedError
def params(self):
raise NotImplementedError
class InteriorNode(Node):
"""
A non-input node (the normal case).
"""
def __init__(self):
super().__init__()
self._in_nodes = None
self._num_ready_in_nodes = 0
def _gather_shapes_dtypes_for_build(self):
assert self._in_nodes, 'Tried to build an internal node with no inputs.'
in_shapes = []
in_dtypes = []
for node in self._in_nodes:
shape = node.out_shape()
if shape is None:
return False, None, None
in_shapes.append(shape)
dtype = node.out_dtype()
if dtype is None:
return False, None, None
in_dtypes.append(dtype)
return True, in_shapes, in_dtypes
def in_nodes(self):
return self._in_nodes
def to_spec_or_specs(self):
raise NotImplementedError
class LayerNode(InteriorNode):
"""
Neural network node wrapping a single layer.
"""
def __init__(self, spec, in_nodes=None):
super().__init__()
if in_nodes:
for node in in_nodes:
node.add_out_node(self)
self._in_nodes = in_nodes
self._spec = spec
self._layer = None
def __call__(self, *in_nodes):
"""
Return a copy of ourself that is connected to the given feed nodes.
This is how graphs are constructed.
"""
assert not self._in_nodes
return LayerNode(deepcopy(self._spec), in_nodes)
def try_to_build(self):
"""
Try to construct the internal layer of a node given the shapes and
dtypes of its input nodes. Tries to build its output nodes.
Returns true if this node could be built (output nodes will fail if not
all inputs are built yet during graph building).
"""
can_build, in_shapes, in_dtypes = self._gather_shapes_dtypes_for_build()
if not can_build:
return False
self._layer, self._out_shape, self._out_dtype = \
self._spec.build_multi_input(in_shapes, in_dtypes)
for node in self._out_nodes:
node.try_to_build()
return True
def is_built(self):
return self._layer is not None
def params(self):
"""
Build the node if not built, then collect the node's trainable
parameters for the optimizer.
"""
assert self._layer, \
'Not all input nodes have been built (the graph is missing an ' + \
'input or inputs).'
return self._layer.params()
def in_node_is_ready(self, is_training):
"""
Receive notification that one of our input nodes has data. If they all
do, perform a forward pass and notify the nodes that we feed into.
"""
assert self._in_nodes, \
'Called in_node_is_ready() on a node with no inputs.'
assert self._layer, \
'Not all input nodes have been built (the graph is missing an ' + \
'input or inputs).'
self._num_ready_in_nodes += 1
if self._num_ready_in_nodes < len(self._in_nodes):
return
xx = []
for node in self._in_nodes:
x = node.out_data()
assert x is not None
xx.append(x)
self._out_data = self._layer.forward_multi_input(xx, is_training)
for node in self._out_nodes:
node.in_node_is_ready(is_training)
self._num_ready_in_nodes = 0
def to_spec_or_specs(self):
return self._spec
| [
"[email protected]"
]
| |
b53d51e90634a68addf27b8fb44bc961f55f096a | e5e2b7da41fda915cb849f031a0223e2ac354066 | /sdk/python/pulumi_azure_native/web/v20190801/web_app_diagnostic_logs_configuration.py | b955573f291c7fb008c4a782dcab13d6ce640e51 | [
"BSD-3-Clause",
"Apache-2.0"
]
| permissive | johnbirdau/pulumi-azure-native | b7d3bdddeb7c4b319a7e43a892ddc6e25e3bfb25 | d676cc331caa0694d8be99cb90b93fa231e3c705 | refs/heads/master | 2023-05-06T06:48:05.040357 | 2021-06-01T20:42:38 | 2021-06-01T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,078 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['WebAppDiagnosticLogsConfigurationArgs', 'WebAppDiagnosticLogsConfiguration']
@pulumi.input_type
class WebAppDiagnosticLogsConfigurationArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
application_logs: Optional[pulumi.Input['ApplicationLogsConfigArgs']] = None,
detailed_error_messages: Optional[pulumi.Input['EnabledConfigArgs']] = None,
failed_requests_tracing: Optional[pulumi.Input['EnabledConfigArgs']] = None,
http_logs: Optional[pulumi.Input['HttpLogsConfigArgs']] = None,
kind: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a WebAppDiagnosticLogsConfiguration resource.
:param pulumi.Input[str] name: Name of the app.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
:param pulumi.Input['ApplicationLogsConfigArgs'] application_logs: Application logs configuration.
:param pulumi.Input['EnabledConfigArgs'] detailed_error_messages: Detailed error messages configuration.
:param pulumi.Input['EnabledConfigArgs'] failed_requests_tracing: Failed requests tracing configuration.
:param pulumi.Input['HttpLogsConfigArgs'] http_logs: HTTP logs configuration.
:param pulumi.Input[str] kind: Kind of resource.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if application_logs is not None:
pulumi.set(__self__, "application_logs", application_logs)
if detailed_error_messages is not None:
pulumi.set(__self__, "detailed_error_messages", detailed_error_messages)
if failed_requests_tracing is not None:
pulumi.set(__self__, "failed_requests_tracing", failed_requests_tracing)
if http_logs is not None:
pulumi.set(__self__, "http_logs", http_logs)
if kind is not None:
pulumi.set(__self__, "kind", kind)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the app.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Name of the resource group to which the resource belongs.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="applicationLogs")
def application_logs(self) -> Optional[pulumi.Input['ApplicationLogsConfigArgs']]:
"""
Application logs configuration.
"""
return pulumi.get(self, "application_logs")
@application_logs.setter
def application_logs(self, value: Optional[pulumi.Input['ApplicationLogsConfigArgs']]):
pulumi.set(self, "application_logs", value)
@property
@pulumi.getter(name="detailedErrorMessages")
def detailed_error_messages(self) -> Optional[pulumi.Input['EnabledConfigArgs']]:
"""
Detailed error messages configuration.
"""
return pulumi.get(self, "detailed_error_messages")
@detailed_error_messages.setter
def detailed_error_messages(self, value: Optional[pulumi.Input['EnabledConfigArgs']]):
pulumi.set(self, "detailed_error_messages", value)
@property
@pulumi.getter(name="failedRequestsTracing")
def failed_requests_tracing(self) -> Optional[pulumi.Input['EnabledConfigArgs']]:
"""
Failed requests tracing configuration.
"""
return pulumi.get(self, "failed_requests_tracing")
@failed_requests_tracing.setter
def failed_requests_tracing(self, value: Optional[pulumi.Input['EnabledConfigArgs']]):
pulumi.set(self, "failed_requests_tracing", value)
@property
@pulumi.getter(name="httpLogs")
def http_logs(self) -> Optional[pulumi.Input['HttpLogsConfigArgs']]:
"""
HTTP logs configuration.
"""
return pulumi.get(self, "http_logs")
@http_logs.setter
def http_logs(self, value: Optional[pulumi.Input['HttpLogsConfigArgs']]):
pulumi.set(self, "http_logs", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
class WebAppDiagnosticLogsConfiguration(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
application_logs: Optional[pulumi.Input[pulumi.InputType['ApplicationLogsConfigArgs']]] = None,
detailed_error_messages: Optional[pulumi.Input[pulumi.InputType['EnabledConfigArgs']]] = None,
failed_requests_tracing: Optional[pulumi.Input[pulumi.InputType['EnabledConfigArgs']]] = None,
http_logs: Optional[pulumi.Input[pulumi.InputType['HttpLogsConfigArgs']]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Configuration of App Service site logs.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['ApplicationLogsConfigArgs']] application_logs: Application logs configuration.
:param pulumi.Input[pulumi.InputType['EnabledConfigArgs']] detailed_error_messages: Detailed error messages configuration.
:param pulumi.Input[pulumi.InputType['EnabledConfigArgs']] failed_requests_tracing: Failed requests tracing configuration.
:param pulumi.Input[pulumi.InputType['HttpLogsConfigArgs']] http_logs: HTTP logs configuration.
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] name: Name of the app.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: WebAppDiagnosticLogsConfigurationArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Configuration of App Service site logs.
:param str resource_name: The name of the resource.
:param WebAppDiagnosticLogsConfigurationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(WebAppDiagnosticLogsConfigurationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
application_logs: Optional[pulumi.Input[pulumi.InputType['ApplicationLogsConfigArgs']]] = None,
detailed_error_messages: Optional[pulumi.Input[pulumi.InputType['EnabledConfigArgs']]] = None,
failed_requests_tracing: Optional[pulumi.Input[pulumi.InputType['EnabledConfigArgs']]] = None,
http_logs: Optional[pulumi.Input[pulumi.InputType['HttpLogsConfigArgs']]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = WebAppDiagnosticLogsConfigurationArgs.__new__(WebAppDiagnosticLogsConfigurationArgs)
__props__.__dict__["application_logs"] = application_logs
__props__.__dict__["detailed_error_messages"] = detailed_error_messages
__props__.__dict__["failed_requests_tracing"] = failed_requests_tracing
__props__.__dict__["http_logs"] = http_logs
__props__.__dict__["kind"] = kind
if name is None and not opts.urn:
raise TypeError("Missing required property 'name'")
__props__.__dict__["name"] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:web/v20190801:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-native:web:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-nextgen:web:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-native:web/v20150801:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-nextgen:web/v20150801:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-native:web/v20160801:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-nextgen:web/v20160801:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-native:web/v20180201:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-nextgen:web/v20180201:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-native:web/v20181101:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-nextgen:web/v20181101:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-native:web/v20200601:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-nextgen:web/v20200601:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-native:web/v20200901:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-nextgen:web/v20200901:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-native:web/v20201001:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-nextgen:web/v20201001:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-native:web/v20201201:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-nextgen:web/v20201201:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-native:web/v20210101:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-nextgen:web/v20210101:WebAppDiagnosticLogsConfiguration")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(WebAppDiagnosticLogsConfiguration, __self__).__init__(
'azure-native:web/v20190801:WebAppDiagnosticLogsConfiguration',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'WebAppDiagnosticLogsConfiguration':
"""
Get an existing WebAppDiagnosticLogsConfiguration resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = WebAppDiagnosticLogsConfigurationArgs.__new__(WebAppDiagnosticLogsConfigurationArgs)
__props__.__dict__["application_logs"] = None
__props__.__dict__["detailed_error_messages"] = None
__props__.__dict__["failed_requests_tracing"] = None
__props__.__dict__["http_logs"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
return WebAppDiagnosticLogsConfiguration(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="applicationLogs")
def application_logs(self) -> pulumi.Output[Optional['outputs.ApplicationLogsConfigResponse']]:
"""
Application logs configuration.
"""
return pulumi.get(self, "application_logs")
@property
@pulumi.getter(name="detailedErrorMessages")
def detailed_error_messages(self) -> pulumi.Output[Optional['outputs.EnabledConfigResponse']]:
"""
Detailed error messages configuration.
"""
return pulumi.get(self, "detailed_error_messages")
@property
@pulumi.getter(name="failedRequestsTracing")
def failed_requests_tracing(self) -> pulumi.Output[Optional['outputs.EnabledConfigResponse']]:
"""
Failed requests tracing configuration.
"""
return pulumi.get(self, "failed_requests_tracing")
@property
@pulumi.getter(name="httpLogs")
def http_logs(self) -> pulumi.Output[Optional['outputs.HttpLogsConfigResponse']]:
"""
HTTP logs configuration.
"""
return pulumi.get(self, "http_logs")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
| [
"[email protected]"
]
| |
3e6a874a64e7d69cc870d2a47199ffe654c59f9b | 904e75e2ceff81c18a432fe1b951b683e859cbed | /views/console/voucher.py | 809b9bed03144310d4c914e793626e5bbf9acd22 | []
| no_license | PUYUP/plutoborn | a42c65fa360de41a1236af00b5718948dc1b9940 | e6b47b7f183fcff60fa803329e11c2e87de560ef | refs/heads/master | 2022-12-05T17:06:10.049472 | 2020-08-19T09:12:45 | 2020-08-19T09:12:45 | 254,116,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,675 | py | from django.conf import settings
from django.views import View
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models.functions import Coalesce
from django.db.models import Q, F, Sum, Count, Case, When, Value, Subquery, OuterRef, IntegerField
from utils.pagination import Pagination
from utils.generals import get_model
Voucher = get_model('market', 'Voucher')
@method_decorator(login_required, name='dispatch')
class VoucherListView(View):
template_name = 'console/voucher/list.html'
context = dict()
def get(self, request):
vouchers = Voucher.objects \
.annotate(
total_redeem=Coalesce(Count('voucher_redeems'), 0)
).order_by('-total_redeem')
# paginator
page_num = int(self.request.GET.get('p', 0))
paginator = Paginator(vouchers, settings.PAGINATION_PER_PAGE)
try:
vouchers_pagination = paginator.page(page_num + 1)
except PageNotAnInteger:
vouchers_pagination = paginator.page(1)
except EmptyPage:
vouchers_pagination = paginator.page(paginator.num_pages)
pagination = Pagination(request, vouchers, vouchers_pagination, page_num, paginator)
self.context['vouchers'] = vouchers
self.context['vouchers_total'] = vouchers.count()
self.context['vouchers_pagination'] = vouchers_pagination
self.context['pagination'] = pagination
return render(request, self.template_name, self.context)
| [
"[email protected]"
]
| |
1d6c708e713687a606bcec30490c9970a32b2031 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/94/usersdata/203/55258/submittedfiles/mediaLista.py | 9f8eabcc98947ef4aefb6758c5a6a0a6eab90482 | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | # -*- coding: utf-8 -*-
n=int(input('tamanho da lista: '))
l=[]
soma=0
for i in range (1,n+1,1):
l.append(input('elemento da lista: '))
for i in range (0,n-1,1):
soma=soma+l[i]
media=soma/n
print ('%.2f' %l[0])
print ('%.2f' %l[n-1])
print ('%.2f' %media)
print ('%.2f' %l) | [
"[email protected]"
]
| |
f16c623f2284f4fcc342ceffbc101ff396686148 | 59b3dce3c770e70b2406cc1dd623a2b1f68b8394 | /python_1/lessons/calculations.py | 9fc441721ed85e47fac26d241c4db2cfd87301c8 | []
| no_license | patrickbeeson/python-classes | 04ed7b54fc4e1152a191eeb35d42adc214b08e39 | b5041e71badd1ca2c013828e3b2910fb02e9728f | refs/heads/master | 2020-05-20T07:17:36.693960 | 2015-01-23T14:41:46 | 2015-01-23T14:41:46 | 29,736,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | print("""---------------
Some Calculations
---------------""")
print(314159e-5)
print(10**6, 1j**2)
print(3 + 2 * 4, 1 / 3)
print("-" * 20)
print((3.14159 * 16) ** 2)
print(3.14159 * 16 ** 2)
print(20 * "-")
print("------------------\nEnd of Calculations\n--------------") | [
"[email protected]"
]
| |
982f09e06da9b91e11bebb4ecf8d383bc704f702 | a5cf1d2fc478d490df05eb198d1a0fb77fcb0bc9 | /flask_oauthlib/contrib/client/__init__.py | 4b777b4430dcbd3daf2326ea063c4e02dca552ae | [
"BSD-3-Clause"
]
| permissive | ageis/flask-oauthlib | 516df1a661441cc46c26ab5e9b07fa328066a5f4 | 9414e002505354e8b5b3aa5f54a0889c836aa732 | refs/heads/master | 2021-01-05T05:11:59.090723 | 2020-04-19T07:20:23 | 2020-04-19T07:20:23 | 240,891,932 | 1 | 0 | BSD-3-Clause | 2020-04-19T07:20:24 | 2020-02-16T12:58:27 | null | UTF-8 | Python | false | false | 3,277 | py | import copy
from flask import current_app
from werkzeug.local import LocalProxy
from .application import OAuth1Application, OAuth2Application
__all__ = ['OAuth', 'OAuth1Application', 'OAuth2Application']
class OAuth(object):
"""The extension to integrate OAuth 1.0a/2.0 to Flask applications.
oauth = OAuth(app)
or::
oauth = OAuth()
oauth.init_app(app)
"""
state_key = 'oauthlib.contrib.client'
def __init__(self, app=None):
self.remote_apps = {}
if app is not None:
self.init_app(app)
def init_app(self, app):
app.extensions = getattr(app, 'extensions', {})
app.extensions[self.state_key] = OAuthState()
def add_remote_app(self, remote_app, name=None, **kwargs):
"""Adds remote application and applies custom attributes on it.
If the application instance's name is different from the argument
provided name, or the keyword arguments is not empty, then the
application instance will not be modified but be copied as a
prototype.
:param remote_app: the remote application instance.
:type remote_app: the subclasses of :class:`BaseApplication`
:params kwargs: the overriding attributes for the application instance.
"""
if name is None:
name = remote_app.name
if name != remote_app.name or kwargs:
remote_app = copy.copy(remote_app)
remote_app.name = name
vars(remote_app).update(kwargs)
if not hasattr(remote_app, 'clients'):
remote_app.clients = cached_clients
self.remote_apps[name] = remote_app
return remote_app
def remote_app(self, name, version=None, **kwargs):
"""Creates and adds new remote application.
:param name: the remote application's name.
:param version: '1' or '2', the version code of OAuth protocol.
:param kwargs: the attributes of remote application.
"""
if version is None:
if 'request_token_url' in kwargs:
version = '1'
else:
version = '2'
if version == '1':
remote_app = OAuth1Application(name, clients=cached_clients)
elif version == '2':
remote_app = OAuth2Application(name, clients=cached_clients)
else:
raise ValueError('unkonwn version %r' % version)
return self.add_remote_app(remote_app, **kwargs)
def __getitem__(self, name):
return self.remote_apps[name]
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
app = self.remote_apps.get(key)
if app:
return app
raise AttributeError('No such app: %s' % key)
class OAuthState(object):
def __init__(self):
self.cached_clients = {}
def get_cached_clients():
"""Gets the cached clients dictionary in current context."""
if OAuth.state_key not in current_app.extensions:
raise RuntimeError('%r is not initialized.' % current_app)
state = current_app.extensions[OAuth.state_key]
return state.cached_clients
cached_clients = LocalProxy(get_cached_clients)
| [
"[email protected]"
]
| |
8113e61753b63a1adf848618b5af0bff3890f601 | eecbf2f570b46e5a890847288144f2df8097d988 | /awlsim/core/instructions/insn_zr.py | f279e701766a8124bf9e436cb8dc38b157639018 | []
| no_license | ITI/PLCNet | 8ebb34dc57862abfc3a635fb3cee197601cade71 | 7f2c1a9d3a8a0ca8d8ab9a8027c65bc0ff0db64c | refs/heads/master | 2020-06-10T00:19:14.916423 | 2016-10-01T06:53:38 | 2016-10-01T06:53:38 | 193,533,866 | 2 | 0 | null | 2019-06-24T15:42:51 | 2019-06-24T15:42:50 | null | UTF-8 | Python | false | false | 1,490 | py | # -*- coding: utf-8 -*-
#
# AWL simulator - instructions
#
# Copyright 2012-2014 Michael Buesch <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from __future__ import division, absolute_import, print_function, unicode_literals
from awlsim.common.compat import *
from awlsim.core.instructions.main import * #@nocy
from awlsim.core.operators import *
#from awlsim.core.instructions.main cimport * #@cy
class AwlInsn_ZR(AwlInsn): #+cdef
__slots__ = ()
def __init__(self, cpu, rawInsn):
AwlInsn.__init__(self, cpu, AwlInsn.TYPE_ZR, rawInsn)
self.assertOpCount(1)
def staticSanityChecks(self):
self.ops[0].assertType(AwlOperator.MEM_Z)
def run(self):
#@cy cdef S7StatusWord s
s = self.cpu.statusWord
self.cpu.getCounter(self.ops[0].resolve(True).value.byteOffset).run_ZR(s.VKE)
s.OR, s.NER = 0, 0
| [
"[email protected]"
]
| |
f45bd2b725edf19a4c9f528650707dc5900d8683 | 83959c80527cd727042bc3467b6e537fca8bef1a | /kbengine_stone_assets/scripts/common/tornado/platform/windows.py | b1d701de4fcc5ac181dde0a8d77764622db74e77 | []
| no_license | shanlihou/ttxiaoyouxi | 696697807cbf9d1fe41fb10fe64f8f29d5bd8864 | bca20863c4e1b5d6f3f835fee17c700292918a6c | refs/heads/master | 2020-04-26T13:12:13.153761 | 2019-03-03T12:36:04 | 2019-03-03T12:36:04 | 173,572,763 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 727 | py | # NOTE: win32 support is currently experimental, and not recommended
# for production use.
#from __future__ import absolute_import, division, print_function, with_statement
#import ctypes # type: ignore
#import ctypes.wintypes # type: ignore
# See: http://msdn.microsoft.com/en-us/library/ms724935(VS.85).aspx
#SetHandleInformation = ctypes.windll.kernel32.SetHandleInformation
#SetHandleInformation.argtypes = (ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)
#SetHandleInformation.restype = ctypes.wintypes.BOOL
#HANDLE_FLAG_INHERIT = 0x00000001
def set_close_exec(fd):
# success = SetHandleInformation(fd, HANDLE_FLAG_INHERIT, 0)
# if not success:
# raise ctypes.WinError()
pass | [
"[email protected]"
]
| |
d50db3f0895bfcfe6b6a9eb5f62c99302983871e | a29c6e83ae4f9010941d15c8fd4cfc67680bb054 | /pandas/pandas_sample.py | 11ec8f3f3066928a773948c21c7d305883c6a906 | []
| no_license | ym0179/bit_seoul | f1ff5faf4ae20fbc8c0e2ed10a005f8bd4b2c2b8 | 14d1fb2752312790c39898fc53a45c1cf427a4d1 | refs/heads/master | 2023-02-27T19:52:23.577540 | 2021-02-08T00:30:16 | 2021-02-08T00:30:16 | 311,265,926 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,632 | py | #Day9
#2020-11-19
import pandas as pd
import numpy as np
from numpy.random import randn
np.random.seed(100)
data = randn(5,4) #5행 4열
print(data)
df = pd.DataFrame(data, index='A B C D E'.split(),
columns='가 나 다 라'.split())
print(df)
data2 = [[1,2,3,4,], [5,6,7,8], [9,10,11,12],
[13,14,15,16], [17,18,19,20]] #list
df2 = pd.DataFrame(data2, index=['A','B','C','D','E'],
columns=['가','나','다','라'])
print(df2)
# 가 나 다 라
# A 1 2 3 4
# B 5 6 7 8
# C 9 10 11 12
# D 13 14 15 16
# E 17 18 19 20
df3 = pd.DataFrame(np.array([[1,2,3],[4,5,6]]))
print(df3)
print("df2['나'] :\n",df2['나']) #2,6,10,14,18
print("df2['나','라'] :\n",df2[['나','라']]) #2,6,10,14,18
#4,8,12,16,20
# print("df2[0] : ", df2[0]) #에러, 컬럼명으로 해줘야 에러 안남
# print("df2.loc['나'] : \n", df2.loc['나']) #에러, loc 행에서만 사용 가능 (행과 함께 사용)
print("df2.iloc[:,2] : \n", df2.iloc[:, 2]) #3,7,11,15,19
# print("df2[:,2] : \n", df2[:, 2]) #에러
#행
print("df2.loc['A'] : \n", df2.loc['A']) #A행 출력
print("df2.loc['A','C'] : \n", df2.loc[['A','C']]) #A, C행 출력
print("df2.iloc[0] : \n", df2.iloc[0]) #A행 출력
print("df2.iloc[0,1] : \n", df2.iloc[[0,2]]) #A, C행 출력
#행렬
print("df2.loc[['A','B'], ['나','다']] : \n",df2.loc[['A','B'], ['나','다']])
#한개의 값만 확인
print("df2.loc['E','다'] : \n",df2.loc['E','다']) #19
print("df2.iloc[4,2] : \n",df2.iloc[4,2]) #19
print("df2.iloc[4][2] : \n",df2.iloc[4][2]) #19
| [
"[email protected]"
]
| |
ddc3a481f63796292ef24894d2303cc7aa6bb7c0 | de996400d54cc2073671e2bab976534e8263bacb | /molecule-design/moldesign/sample/__init__.py | 6a4baed4f892c46a1a9b987c82b38d3e5e6e5344 | []
| no_license | tskluzac/colmena | c865e233d0f4cea20f2d3e14ef73425aee5bf78f | 042ce37e5acc8a240845b8cce11effe832c1c913 | refs/heads/master | 2022-11-28T17:52:19.819967 | 2020-08-06T19:41:49 | 2020-08-06T19:41:49 | 285,658,744 | 0 | 0 | null | 2020-08-06T19:52:04 | 2020-08-06T19:52:03 | null | UTF-8 | Python | false | false | 42 | py | """Functions for sampling new molecules""" | [
"[email protected]"
]
| |
bf56ed2037a8d92ae1cd83b1ca14a15536c85df2 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2385/60677/251112.py | a199b651fd4db5634ac382da13966aee61e6f9bc | []
| no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | times=int(input())
for i in range(times):
n=int(input())
k=1
answer=1
while n-2*k+2>0:
answer+=n-2*k+2
k+=1
if n==4:
answer=8
print((answer)%(10**9+7)) | [
"[email protected]"
]
| |
a4c56e977fcf8aa0aa8b1d5700eac711f0e99616 | e1ffebca6a0f185663c779462e3ca27866f557b8 | /GROUP_project/project/api/migrations/0002_auto_20191204_0429.py | 9dcf91bc2afb5abac10e0bf7a31e18ff8156c88e | []
| no_license | asselyer/Backend2019 | d8d85d7850261880fe4aeef9092b0a8c7b1b6767 | ec5931e2bd22ec62e68592a4199c00184f4dacc3 | refs/heads/master | 2020-07-24T13:38:21.246351 | 2019-12-04T03:16:27 | 2019-12-04T03:16:27 | 207,944,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,680 | py | # Generated by Django 2.2.3 on 2019-12-03 22:29
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('api', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='postfile',
name='creator',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='postcomment',
name='creator',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='favoritepost',
name='users',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='blog',
name='category',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='blogs', to='api.BlogCategory'),
),
migrations.AddField(
model_name='blog',
name='creator',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='created_blogs', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='postfile',
name='posts',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='post_documents', to='api.Post'),
),
migrations.AddField(
model_name='postcomment',
name='posts',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='post_comments', to='api.Post'),
),
migrations.AddField(
model_name='post',
name='blog',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='posts', to='api.Blog'),
),
migrations.AddField(
model_name='post',
name='creator',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='created_posts', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='favoritepost',
name='posts',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='likes', to='api.Post'),
),
]
| [
"[email protected]"
]
| |
af3d099f71d75651e8da95d4362fc5e824ea06bf | 1886065d10342822b10063cd908a690fccf03d8b | /appengine/findit/waterfall/analyze_build_failure_pipeline.py | df1014f2a3c07efd6b1e1306d39bff06edb9fa1f | [
"BSD-3-Clause"
]
| permissive | TrellixVulnTeam/chromium-infra_A6Y5 | 26af0dee12f89595ebc6a040210c9f62d8ded763 | d27ac0b230bedae4bc968515b02927cf9e17c2b7 | refs/heads/master | 2023-03-16T15:33:31.015840 | 2017-01-31T19:55:59 | 2017-01-31T20:06:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,205 | py | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from common import appengine_util
from common.pipeline_wrapper import BasePipeline
from common.pipeline_wrapper import pipeline
from libs import time_util
from model import analysis_status
from model.wf_analysis import WfAnalysis
from waterfall.detect_first_failure_pipeline import DetectFirstFailurePipeline
from waterfall.extract_deps_info_pipeline import ExtractDEPSInfoPipeline
from waterfall.extract_signal_pipeline import ExtractSignalPipeline
from waterfall.flake.trigger_flake_analyses_pipeline import (
TriggerFlakeAnalysesPipeline)
from waterfall.identify_culprit_pipeline import IdentifyCulpritPipeline
from waterfall.pull_changelog_pipeline import PullChangelogPipeline
from waterfall.start_try_job_on_demand_pipeline import (
StartTryJobOnDemandPipeline)
from waterfall.trigger_swarming_tasks_pipeline import (
TriggerSwarmingTasksPipeline)
class AnalyzeBuildFailurePipeline(BasePipeline):
def __init__(self, master_name, builder_name, build_number, build_completed,
force_rerun_try_job):
super(AnalyzeBuildFailurePipeline, self).__init__(
master_name, builder_name, build_number, build_completed,
force_rerun_try_job)
self.master_name = master_name
self.builder_name = builder_name
self.build_number = build_number
def _LogUnexpectedAborting(self, was_aborted):
"""Marks the WfAnalysis status as error, indicating that it was aborted.
Args:
was_aborted (bool): True if the pipeline was aborted, otherwise False.
"""
if not was_aborted:
return
analysis = WfAnalysis.Get(
self.master_name, self.builder_name, self.build_number)
# Heuristic analysis could have already completed, while triggering the
# try job kept failing and lead to the abortion.
if not analysis.completed:
analysis.status = analysis_status.ERROR
analysis.result_status = None
analysis.put()
def finalized(self):
self._LogUnexpectedAborting(self.was_aborted)
def _ResetAnalysis(self, master_name, builder_name, build_number):
analysis = WfAnalysis.Get(master_name, builder_name, build_number)
analysis.pipeline_status_path = self.pipeline_status_path()
analysis.status = analysis_status.RUNNING
analysis.result_status = None
analysis.start_time = time_util.GetUTCNow()
analysis.version = appengine_util.GetCurrentVersion()
analysis.end_time = None
analysis.put()
# Arguments number differs from overridden method - pylint: disable=W0221
def run(self, master_name, builder_name, build_number, build_completed,
force_rerun_try_job):
self._ResetAnalysis(master_name, builder_name, build_number)
# The yield statements below return PipelineFutures, which allow subsequent
# pipelines to refer to previous output values.
# https://github.com/GoogleCloudPlatform/appengine-pipelines/wiki/Python
# Heuristic Approach.
failure_info = yield DetectFirstFailurePipeline(
master_name, builder_name, build_number)
change_logs = yield PullChangelogPipeline(failure_info)
deps_info = yield ExtractDEPSInfoPipeline(failure_info, change_logs)
signals = yield ExtractSignalPipeline(failure_info)
heuristic_result = yield IdentifyCulpritPipeline(
failure_info, change_logs, deps_info, signals, build_completed)
# Try job approach.
with pipeline.InOrder():
# Swarming rerun.
# Triggers swarming tasks when first time test failure happens.
# This pipeline will run before build completes.
yield TriggerSwarmingTasksPipeline(
master_name, builder_name, build_number, failure_info)
# Checks if first time failures happen and starts a try job if yes.
yield StartTryJobOnDemandPipeline(
master_name, builder_name, build_number, failure_info,
signals, heuristic_result, build_completed, force_rerun_try_job)
# Trigger flake analysis on flaky tests, if any.
yield TriggerFlakeAnalysesPipeline(
master_name, builder_name, build_number)
| [
"[email protected]"
]
| |
350dcd30a907105662e6bda717ac24f31ad8370f | 2136701f48ad131084b331039d864f85988cf451 | /spider/.svn/pristine/35/350dcd30a907105662e6bda717ac24f31ad8370f.svn-base | c6842a58e1a64674b74afbe2cc40404b328236bd | []
| no_license | cuiyulin77/other | 9d374a47d482f1c3f9ef0f3ac4429487643b04b9 | c00cafaf7607452966fa523c4d0b04edb7f153e6 | refs/heads/master | 2020-05-18T04:24:26.095929 | 2019-04-30T06:37:53 | 2019-04-30T06:37:53 | 184,169,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,692 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from pymysql import *
import re
class WbUserPipeline(object):
def process_item(self, item, spider):
return item
class DBPipeline(object):
def __init__(self):
# 连接数据库
self.connect = connect(
# host='47.92.77.18',
host='192.168.3.15',
# host='127.0.0.1',
db='spider',
user='root',
# password='admin8152', # 生产服务器
password='root',
port=3306,
charset='utf8'
)
self.cursor = self.connect.cursor()
def process_item(self, item, spider):
# 获取情感分类
try:
# 插入数据
print("*"*100)
user_id = re.match(r'https\:\/\/m\.weibo\.cn\/u\/(\d+)\?uid.*', item['user_url']).group(1)
print('user_id',user_id)
self.cursor.execute(
"INSERT INTO weibo_user(id,summary,user_name,user_id,user_url,fans,followers,get_time) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)",
('0', str(item['summary']),str(item['user_name']),
str(user_id),
str(item['user_url']),
str(item['fans']),
str(item['followers']),
str(item['get_time'])),
)
self.connect.commit()
print('mysql一条数据插入成功')
except Exception as e:
# 出现错误时打印错误日志
print(e)
return item
| [
"[email protected]"
]
| ||
2e67dafe7fac1cbbc5927705e346ad37a6ed6c89 | fcde32709c62b8ee86da459bb7c8eee52c848118 | /爬虫1903/day09/Baidu/Baidu/settings.py | 6b94193878c3f25ccff9e68ecba1f7857d9f4e73 | []
| no_license | klaus2015/py_base | 6b92d362c3d7dc0e09205a037f4d580381dac94d | ec32c731c1c2f6a0dab87f1d167397e4fa86b8de | refs/heads/master | 2022-07-28T15:49:30.383648 | 2020-05-11T15:31:43 | 2020-05-11T15:31:43 | 261,777,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,181 | py | # -*- coding: utf-8 -*-
# Scrapy settings for Baidu project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'Baidu'
SPIDER_MODULES = ['Baidu.spiders']
NEWSPIDER_MODULE = 'Baidu.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'Baidu (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS = 20
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'Baidu.middlewares.BaiduSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'Baidu.middlewares.BaiduDownloaderMiddleware': 543,
'Baidu.middlewares.RandomUserAgentDownloaderMiddleware':200,
'Baidu.middlewares.RandomProxyDownloaderMiddleware':250,
}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'Baidu.pipelines.BaiduPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"[email protected]"
]
| |
727bfe5706d30425d6dc1953d30b21f36aeb2901 | 74eafe55252eff97fd9a2e1e6564ecf243f7c058 | /oop/squares_gen.py | ab286f9884b1857157fe12a048b69b5aa72e1f91 | []
| no_license | srikanthpragada/demo_24_june_2019 | c5ddef71eb721367d656924d312e9ca7ac80c34a | fa7aca273d1ffe6ded34795a639910ab91ce66a0 | refs/heads/master | 2020-06-11T10:19:22.384096 | 2019-08-01T15:28:26 | 2019-08-01T15:28:26 | 193,929,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 106 | py | def squares(num):
for i in range(1, num + 1):
yield i * i
for n in squares(5):
print(n)
| [
"[email protected]"
]
| |
f7d45287bca0ec55ea32d9ed5da25480ec9a3285 | e608c9525e88ba3589cb4a2fd47f6a2e0442bfb2 | /pycorrector/seq2seq/corrector_model.py | 02bd512d259867d51f01a8b89dbf7780a9222e09 | [
"Apache-2.0"
]
| permissive | YC-wind/pycorrector | 9f5c14d2cc0cf6f53ff253c6035cf816e1f334d8 | c68ea194a95949f6d6ffb7cb33dfc6679e1bbc9e | refs/heads/master | 2020-03-16T21:56:53.394037 | 2018-05-02T04:05:17 | 2018-05-02T04:05:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,328 | py | # -*- coding: utf-8 -*-
# Author: XuMing <[email protected]>
# Brief:
import random
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
import seq2seq
from reader import PAD_ID, GO_ID
class CorrectorModel(object):
"""Sequence-to-sequence model used to correct grammatical errors in text.
NOTE: mostly copied from TensorFlow's seq2seq_model.py; only modifications
are:
- the introduction of RMSProp as an optional optimization algorithm
- the introduction of a "projection bias" that biases decoding towards
selecting tokens that appeared in the input
"""
def __init__(self, source_vocab_size, target_vocab_size, buckets, size,
num_layers, max_gradient_norm, batch_size, learning_rate,
learning_rate_decay_factor, use_lstm=False,
num_samples=512, forward_only=False, config=None,
corrective_tokens_mask=None):
"""Create the model.
Args:
source_vocab_size: size of the source vocabulary.
target_vocab_size: size of the target vocabulary.
buckets: a list of pairs (I, O), where I specifies maximum input
length that will be processed in that bucket, and O specifies
maximum output length. Training instances that have longer than I
or outputs longer than O will be pushed to the next bucket and
padded accordingly. We assume that the list is sorted, e.g., [(2,
4), (8, 16)].
size: number of units in each layer of the model.
num_layers: number of layers in the model.
max_gradient_norm: gradients will be clipped to maximally this norm.
batch_size: the size of the batches used during training;
the model construction is independent of batch_size, so it can be
changed after initialization if this is convenient, e.g.,
for decoding.
learning_rate: learning rate to start with.
learning_rate_decay_factor: decay learning rate by this much when
needed.
use_lstm: if true, we use LSTM cells instead of GRU cells.
num_samples: number of samples for sampled softmax.
forward_only: if set, we do not construct the backward pass in the
model.
"""
self.source_vocab_size = source_vocab_size
self.target_vocab_size = target_vocab_size
self.buckets = buckets
self.batch_size = batch_size
self.learning_rate = tf.Variable(float(learning_rate), trainable=False)
self.learning_rate_decay_op = self.learning_rate.assign(
self.learning_rate * learning_rate_decay_factor)
self.global_step = tf.Variable(0, trainable=False)
self.config = config
# Feeds for inputs.
self.encoder_inputs = []
self.decoder_inputs = []
self.target_weights = []
for i in range(buckets[-1][0]): # Last bucket is the biggest one.
self.encoder_inputs.append(tf.placeholder(tf.int32, shape=[None],
name="encoder{0}".format(
i)))
for i in range(buckets[-1][1] + 1):
self.decoder_inputs.append(tf.placeholder(tf.int32, shape=[None],
name="decoder{0}".format(
i)))
self.target_weights.append(tf.placeholder(tf.float32, shape=[None],
name="weight{0}".format(
i)))
# One hot encoding of corrective tokens.
corrective_tokens_tensor = tf.constant(corrective_tokens_mask if
corrective_tokens_mask else
np.zeros(self.target_vocab_size),
shape=[self.target_vocab_size],
dtype=tf.float32)
batched_corrective_tokens = tf.stack(
[corrective_tokens_tensor] * self.batch_size)
self.batch_corrective_tokens_mask = batch_corrective_tokens_mask = \
tf.placeholder(
tf.float32,
shape=[None, None],
name="corrective_tokens")
# Our targets are decoder inputs shifted by one.
targets = [self.decoder_inputs[i + 1]
for i in range(len(self.decoder_inputs) - 1)]
# If we use sampled softmax, we need an output projection.
output_projection = None
softmax_loss_function = None
# Sampled softmax only makes sense if we sample less than vocabulary
# size.
if num_samples > 0 and num_samples < self.target_vocab_size:
w = tf.get_variable("proj_w", [size, self.target_vocab_size])
w_t = tf.transpose(w)
b = tf.get_variable("proj_b", [self.target_vocab_size])
output_projection = (w, b)
def sampled_loss(labels, logits):
labels = tf.reshape(labels, [-1, 1])
return tf.nn.sampled_softmax_loss(w_t, b, labels, logits,
num_samples,
self.target_vocab_size)
softmax_loss_function = sampled_loss
# Create the internal multi-layer cell for our RNN.
single_cell = tf.nn.rnn_cell.GRUCell(size)
if use_lstm:
single_cell = tf.nn.rnn_cell.BasicLSTMCell(size)
cell = single_cell
if num_layers > 1:
cell = tf.nn.rnn_cell.MultiRNNCell([single_cell] * num_layers)
# The seq2seq function: we use embedding for the input and attention.
def seq2seq_f(encoder_inputs, decoder_inputs, do_decode):
"""
:param encoder_inputs: list of length equal to the input bucket
length of 1-D tensors (of length equal to the batch size) whose
elements consist of the token index of each sample in the batch
at a given index in the input.
:param decoder_inputs:
:param do_decode:
:return:
"""
if do_decode:
# Modify bias here to bias the model towards selecting words
# present in the input sentence.
input_bias = self.build_input_bias(encoder_inputs,
batch_corrective_tokens_mask)
# Redefined seq2seq to allow for the injection of a special
# decoding function that
return seq2seq.embedding_attention_seq2seq(
encoder_inputs, decoder_inputs, cell,
num_encoder_symbols=source_vocab_size,
num_decoder_symbols=target_vocab_size,
embedding_size=size,
output_projection=output_projection,
feed_previous=do_decode,
loop_fn_factory=
apply_input_bias_and_extract_argmax_fn_factory(input_bias))
else:
return seq2seq.embedding_attention_seq2seq(
encoder_inputs, decoder_inputs, cell,
num_encoder_symbols=source_vocab_size,
num_decoder_symbols=target_vocab_size,
embedding_size=size,
output_projection=output_projection,
feed_previous=do_decode)
# Training outputs and losses.
if forward_only:
self.outputs, self.losses = tf.contrib.legacy_seq2seq.model_with_buckets(
self.encoder_inputs, self.decoder_inputs, targets,
self.target_weights, buckets,
lambda x, y: seq2seq_f(x, y, True),
softmax_loss_function=softmax_loss_function)
if output_projection is not None:
for b in range(len(buckets)):
# We need to apply the same input bias used during model
# evaluation when decoding.
input_bias = self.build_input_bias(
self.encoder_inputs[:buckets[b][0]],
batch_corrective_tokens_mask)
self.outputs[b] = [
project_and_apply_input_bias(output, output_projection,
input_bias)
for output in self.outputs[b]]
else:
self.outputs, self.losses = tf.contrib.legacy_seq2seq.model_with_buckets(
self.encoder_inputs, self.decoder_inputs, targets,
self.target_weights, buckets,
lambda x, y: seq2seq_f(x, y, False),
softmax_loss_function=softmax_loss_function)
# Gradients and SGD update operation for training the model.
params = tf.trainable_variables()
if not forward_only:
self.gradient_norms = []
self.updates = []
opt = tf.train.RMSPropOptimizer(0.001) if self.config.use_rms_prop \
else tf.train.GradientDescentOptimizer(self.learning_rate)
# opt = tf.train.AdamOptimizer()
for b in range(len(buckets)):
gradients = tf.gradients(self.losses[b], params)
clipped_gradients, norm = tf.clip_by_global_norm(
gradients, max_gradient_norm)
self.gradient_norms.append(norm)
self.updates.append(opt.apply_gradients(
zip(clipped_gradients, params),
global_step=self.global_step))
self.saver = tf.train.Saver(tf.global_variables())
def build_input_bias(self, encoder_inputs, batch_corrective_tokens_mask):
packed_one_hot_inputs = tf.one_hot(indices=tf.stack(
encoder_inputs, axis=1), depth=self.target_vocab_size)
return tf.maximum(batch_corrective_tokens_mask,
tf.reduce_max(packed_one_hot_inputs,
reduction_indices=1))
def step(self, session, encoder_inputs, decoder_inputs, target_weights,
bucket_id, forward_only, corrective_tokens=None):
"""Run a step of the model feeding the given inputs.
Args:
session: tensorflow session to use.
encoder_inputs: list of numpy int vectors to feed as encoder inputs.
decoder_inputs: list of numpy int vectors to feed as decoder inputs.
target_weights: list of numpy float vectors to feed as target weights.
bucket_id: which bucket of the model to use.
forward_only: whether to do the backward step or only forward.
Returns:
A triple consisting of gradient norm (or None if we did not do
backward), average perplexity, and the outputs.
Raises:
ValueError: if length of encoder_inputs, decoder_inputs, or
target_weights disagrees with bucket size for the specified
bucket_id.
"""
# Check if the sizes match.
encoder_size, decoder_size = self.buckets[bucket_id]
if len(encoder_inputs) != encoder_size:
raise ValueError("Encoder length must be equal to the one in bucket,"
" %d != %d." % (len(encoder_inputs), encoder_size))
if len(decoder_inputs) != decoder_size:
raise ValueError("Decoder length must be equal to the one in bucket,"
" %d != %d." % (len(decoder_inputs), decoder_size))
if len(target_weights) != decoder_size:
raise ValueError("Weights length must be equal to the one in bucket,"
" %d != %d." % (len(target_weights), decoder_size))
# Input feed: encoder inputs, decoder inputs, target_weights,
# as provided.
input_feed = {}
for l in range(encoder_size):
input_feed[self.encoder_inputs[l].name] = encoder_inputs[l]
for l in range(decoder_size):
input_feed[self.decoder_inputs[l].name] = decoder_inputs[l]
input_feed[self.target_weights[l].name] = target_weights[l]
corrective_tokens_vector = (corrective_tokens if
corrective_tokens is not None else
np.zeros(self.target_vocab_size))
batch_corrective_tokens = np.repeat([corrective_tokens_vector],
self.batch_size, axis=0)
input_feed[self.batch_corrective_tokens_mask.name] = (
batch_corrective_tokens)
# Since our targets are decoder inputs shifted by one, we need one more.
last_target = self.decoder_inputs[decoder_size].name
input_feed[last_target] = np.zeros([self.batch_size], dtype=np.int32)
# Output feed: depends on whether we do a backward step or not.
if not forward_only:
output_feed = [self.updates[bucket_id], # Update Op that does SGD.
self.gradient_norms[bucket_id], # Gradient norm.
self.losses[bucket_id]] # Loss for this batch.
else:
output_feed = [self.losses[bucket_id]] # Loss for this batch.
for l in range(decoder_size): # Output logits.
output_feed.append(self.outputs[bucket_id][l])
outputs = session.run(output_feed, input_feed)
if not forward_only:
# Gradient norm, loss, no outputs.
return outputs[1], outputs[2], None
else:
# No gradient norm, loss, outputs.
return None, outputs[0], outputs[1:]
def get_batch(self, data, bucket_id):
"""Get a random batch of data from the specified bucket, prepare for
step.
To feed data in step(..) it must be a list of batch-major vectors, while
data here contains single length-major cases. So the main logic of this
function is to re-index data cases to be in the proper format for
feeding.
Args:
data: a tuple of size len(self.buckets) in which each element contains
lists of pairs of input and output data that we use to create a
batch.
bucket_id: integer, which bucket to get the batch for.
Returns:
The triple (encoder_inputs, decoder_inputs, target_weights) for
the constructed batch that has the proper format to call step(...)
later.
"""
encoder_size, decoder_size = self.buckets[bucket_id]
encoder_inputs, decoder_inputs = [], []
# Get a random batch of encoder and decoder inputs from data,
# pad them if needed, reverse encoder inputs and add GO to decoder.
for _ in range(self.batch_size):
encoder_input, decoder_input = random.choice(data[bucket_id])
# Encoder inputs are padded and then reversed.
encoder_pad = [PAD_ID] * (
encoder_size - len(encoder_input))
encoder_inputs.append(list(reversed(encoder_input + encoder_pad)))
# Decoder inputs get an extra "GO" symbol, and are padded then.
decoder_pad_size = decoder_size - len(decoder_input) - 1
decoder_inputs.append([GO_ID] + decoder_input +
[PAD_ID] * decoder_pad_size)
# Now we create batch-major vectors from the data selected above.
batch_encoder_inputs, batch_decoder_inputs, batch_weights = [], [], []
# Batch encoder inputs are just re-indexed encoder_inputs.
for length_idx in range(encoder_size):
batch_encoder_inputs.append(
np.array([encoder_inputs[batch_idx][length_idx]
for batch_idx in range(self.batch_size)],
dtype=np.int32))
# Batch decoder inputs are re-indexed decoder_inputs, we create weights.
for length_idx in range(decoder_size):
batch_decoder_inputs.append(
np.array([decoder_inputs[batch_idx][length_idx]
for batch_idx in range(self.batch_size)],
dtype=np.int32))
# Create target_weights to be 0 for targets that are padding.
batch_weight = np.ones(self.batch_size, dtype=np.float32)
for batch_idx in range(self.batch_size):
# We set weight to 0 if the corresponding target is a PAD
# symbol. The corresponding target is decoder_input shifted by 1
# forward.
if length_idx < decoder_size - 1:
target = decoder_inputs[batch_idx][length_idx + 1]
if length_idx == decoder_size - 1 or target == PAD_ID:
batch_weight[batch_idx] = 0.0
batch_weights.append(batch_weight)
return batch_encoder_inputs, batch_decoder_inputs, batch_weights
def project_and_apply_input_bias(logits, output_projection, input_bias):
if output_projection is not None:
logits = nn_ops.xw_plus_b(
logits, output_projection[0], output_projection[1])
# Apply softmax to ensure all tokens have a positive value.
probs = tf.nn.softmax(logits)
# Apply input bias, which is a mask of shape [batch, vocab len]
# where each token from the input in addition to all "corrective"
# tokens are set to 1.0.
return tf.multiply(probs, input_bias)
def apply_input_bias_and_extract_argmax_fn_factory(input_bias):
"""
:param encoder_inputs: list of length equal to the input bucket
length of 1-D tensors (of length equal to the batch size) whose
elements consist of the token index of each sample in the batch
at a given index in the input.
:return:
"""
def fn_factory(embedding, output_projection=None, update_embedding=True):
"""Get a loop_function that extracts the previous symbol and embeds it.
Args:
embedding: embedding tensor for symbols.
output_projection: None or a pair (W, B). If provided, each fed previous
output will first be multiplied by W and added B.
update_embedding: Boolean; if False, the gradients will not propagate
through the embeddings.
Returns:
A loop function.
"""
def loop_function(prev, _):
prev = project_and_apply_input_bias(prev, output_projection,
input_bias)
prev_symbol = math_ops.argmax(prev, 1)
# Note that gradients will not propagate through the second
# parameter of embedding_lookup.
emb_prev = embedding_ops.embedding_lookup(embedding, prev_symbol)
if not update_embedding:
emb_prev = array_ops.stop_gradient(emb_prev)
return emb_prev, prev_symbol
return loop_function
return fn_factory
| [
"[email protected]"
]
| |
93320c27e48d82cb9a176e9aed8825a5e95f31a2 | cee3e57aaae9eaeb16f696e3cdad5f32c3af6861 | /evennia/server/portal/mssp.py | 5ff0a7b319a0e8139ee16b4a057e3b81c4e8bf0c | [
"BSD-3-Clause"
]
| permissive | Sa1tC/evennia | 8ef7fae9cbeb2d46bd6cdf5c5482331f9e0846ff | 1248428d132fde1b975678b53e22c1ca68a73a43 | refs/heads/master | 2021-01-23T12:32:03.594263 | 2017-05-22T06:21:25 | 2017-05-22T06:21:25 | 93,164,000 | 0 | 1 | null | 2017-06-02T12:36:55 | 2017-06-02T12:36:55 | null | UTF-8 | Python | false | false | 6,861 | py | """
MSSP - Mud Server Status Protocol
This implements the MSSP telnet protocol as per
http://tintin.sourceforge.net/mssp/. MSSP allows web portals and
listings to have their crawlers find the mud and automatically
extract relevant information about it, such as genre, how many
active players and so on.
"""
from builtins import object
from django.conf import settings
from evennia.utils import utils
MSSP = chr(70)
MSSP_VAR = chr(1)
MSSP_VAL = chr(2)
# try to get the customized mssp info, if it exists.
MSSPTable_CUSTOM = utils.variable_from_module(settings.MSSP_META_MODULE, "MSSPTable", default={})
class Mssp(object):
"""
Implements the MSSP protocol. Add this to a variable on the telnet
protocol to set it up.
"""
def __init__(self, protocol):
"""
initialize MSSP by storing protocol on ourselves and calling
the client to see if it supports MSSP.
Args:
protocol (Protocol): The active protocol instance.
"""
self.protocol = protocol
self.protocol.will(MSSP).addCallbacks(self.do_mssp, self.no_mssp)
def get_player_count(self):
"""
Get number of logged-in players.
Returns:
count (int): The number of players in the MUD.
"""
return str(self.protocol.sessionhandler.count_loggedin())
def get_uptime(self):
"""
Get how long the portal has been online (reloads are not counted).
Returns:
uptime (int): Number of seconds of uptime.
"""
return str(self.protocol.sessionhandler.uptime)
def no_mssp(self, option):
"""
Called when mssp is not requested. This is the normal
operation.
Args:
option (Option): Not used.
"""
self.protocol.handshake_done()
def do_mssp(self, option):
"""
Negotiate all the information.
Args:
option (Option): Not used.
"""
self.mssp_table = {
# Required fields
"NAME": "Evennia",
"PLAYERS": self.get_player_count,
"UPTIME" : self.get_uptime,
# Generic
"CRAWL DELAY": "-1",
"HOSTNAME": "", # current or new hostname
"PORT": ["4000"], # most important port should be last in list
"CODEBASE": "Evennia",
"CONTACT": "", # email for contacting the mud
"CREATED": "", # year MUD was created
"ICON": "", # url to icon 32x32 or larger; <32kb.
"IP": "", # current or new IP address
"LANGUAGE": "", # name of language used, e.g. English
"LOCATION": "", # full English name of server country
"MINIMUM AGE": "0", # set to 0 if not applicable
"WEBSITE": "www.evennia.com",
# Categorisation
"FAMILY": "Custom", # evennia goes under 'Custom'
"GENRE": "None", # Adult, Fantasy, Historical, Horror, Modern, None, or Science Fiction
"GAMEPLAY": "None", # Adventure, Educational, Hack and Slash, None,
# Player versus Player, Player versus Environment,
# Roleplaying, Simulation, Social or Strategy
"STATUS": "Open Beta", # Alpha, Closed Beta, Open Beta, Live
"GAMESYSTEM": "Custom", # D&D, d20 System, World of Darkness, etc. Use Custom if homebrew
"SUBGENRE": "None", # LASG, Medieval Fantasy, World War II, Frankenstein,
# Cyberpunk, Dragonlance, etc. Or None if not available.
# World
"AREAS": "0",
"HELPFILES": "0",
"MOBILES": "0",
"OBJECTS": "0",
"ROOMS": "0", # use 0 if room-less
"CLASSES": "0", # use 0 if class-less
"LEVELS": "0", # use 0 if level-less
"RACES": "0", # use 0 if race-less
"SKILLS": "0", # use 0 if skill-less
# Protocols set to 1 or 0)
"ANSI": "1",
"GMCP": "0",
"ATCP": "0",
"MCCP": "0",
"MCP": "0",
"MSDP": "0",
"MSP": "0",
"MXP": "0",
"PUEBLO": "0",
"SSL": "1",
"UTF-8": "1",
"ZMP": "0",
"VT100": "0",
"XTERM 256 COLORS": "0",
# Commercial set to 1 or 0)
"PAY TO PLAY": "0",
"PAY FOR PERKS": "0",
# Hiring set to 1 or 0)
"HIRING BUILDERS": "0",
"HIRING CODERS": "0",
# Extended variables
# World
"DBSIZE": "0",
"EXITS": "0",
"EXTRA DESCRIPTIONS": "0",
"MUDPROGS": "0",
"MUDTRIGS": "0",
"RESETS": "0",
# Game (set to 1, 0 or one of the given alternatives)
"ADULT MATERIAL": "0",
"MULTICLASSING": "0",
"NEWBIE FRIENDLY": "0",
"PLAYER CITIES": "0",
"PLAYER CLANS": "0",
"PLAYER CRAFTING": "0",
"PLAYER GUILDS": "0",
"EQUIPMENT SYSTEM": "None", # "None", "Level", "Skill", "Both"
"MULTIPLAYING": "None", # "None", "Restricted", "Full"
"PLAYERKILLING": "None", # "None", "Restricted", "Full"
"QUEST SYSTEM": "None", # "None", "Immortal Run", "Automated", "Integrated"
"ROLEPLAYING": "None", # "None", "Accepted", "Encouraged", "Enforced"
"TRAINING SYSTEM": "None", # "None", "Level", "Skill", "Both"
"WORLD ORIGINALITY": "None", # "All Stock", "Mostly Stock", "Mostly Original", "All Original"
}
# update the static table with the custom one
if MSSPTable_CUSTOM:
self.mssp_table.update(MSSPTable_CUSTOM)
varlist = ''
for variable, value in self.mssp_table.items():
if callable(value):
value = value()
if utils.is_iter(value):
for partval in value:
varlist += MSSP_VAR + str(variable) + MSSP_VAL + str(partval)
else:
varlist += MSSP_VAR + str(variable) + MSSP_VAL + str(value)
# send to crawler by subnegotiation
self.protocol.requestNegotiation(MSSP, varlist)
self.protocol.handshake_done()
| [
"[email protected]"
]
| |
aef3c3624058a9104e4a84e3fdb7e33668a84b8c | 90d4b790f9a7198760fdbcfad6abd2da851f2f4e | /0x0F-python-object_relational_mapping/3-my_safe_filter_states.py | b2b27fdbdcc3fbb2c02bd4aa205bc8225158b438 | []
| no_license | Murega14/holbertonschool-higher_level_programming | 2817a532d7d6739ed046e350903e394ed1bae0a3 | f29a4c4e74c01798cb51bfe5160432569a1ca833 | refs/heads/master | 2023-03-15T08:22:06.926537 | 2018-09-09T20:46:33 | 2018-09-09T20:46:33 | 572,548,803 | 1 | 0 | null | 2022-11-30T14:11:06 | 2022-11-30T14:10:32 | null | UTF-8 | Python | false | false | 643 | py | #!/usr/bin/python3
# gets all states via python yee boi with your own state SAFE
def main(args):
# gets all state stuff SAFELY
if len(args) != 5:
raise Exception("need 4 arguments!")
db = MySQLdb.connect(host='localhost',
user=args[1],
passwd=args[2],
db=args[3])
cur = db.cursor()
cur.execute(
"SELECT * FROM states WHERE name LIKE %s ORDER BY id ASC",
(args[4],))
states = cur.fetchall()
for state in states:
print(state)
if __name__ == "__main__":
import sys
import MySQLdb
main(sys.argv)
| [
"[email protected]"
]
| |
c1ee39b1b2a7ca3e916a559da292bc53bfdc5b74 | 017f62ebc7357dc665723a5b4fa75294f31fda8f | /lib/jnpr/eznc/resrc/srx/nat/nat_proxy_arp.py | 10fe27280370f10dad027ec9771f769faed67709 | [
"Apache-2.0",
"BSD-2-Clause"
]
| permissive | cro/py-junos-eznc | c2588d9fde7b65ec523c558d741716f3a19621c7 | 4c111476cc8eb7599462379ddf55743ae30bbf5c | refs/heads/master | 2021-01-20T16:36:20.034788 | 2013-11-19T19:17:32 | 2013-11-19T19:17:32 | 14,535,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,235 | py | # 3rd-party modules
from lxml.builder import E
# module packages
from ...resource import Resource
from .... import jxml as JXML
class NatProxyArp( Resource ):
"""
[edit security nat proxy-arp interface <if_name> address <ip_prefix>]
Resource namevar:
tuple(if_name, ip_prefix)
Description:
This resource allows you to add/remove proxy-arp entries for NAT. At
this time, there are no managed properties, so you can simply add or
remove entries by the name tuple(if_name, ip_prefix)
For example, to select an entry directly:
entry = NatProxyArp(jdev, ('reth0.213','198.18.11.5'))
Or using the bind mechanism:
jdev.bind(parp=NatProxyArp)
entry = jdev.parp[('reth0.213', '198.18.11.5')]
To create it, you need to use the 'touch' option when invoking
write() since there are no properites for proxy-arp entries
if not entry.exists:
entry.write(touch=True)
And to remove the same entry:
entry.delete()
"""
def _xml_at_top(self):
return E.security(E.nat(
E('proxy-arp',
E.interface(E.name( self._name[0] ),
E.address(E.name( self._name[1]))
)
)
))
##### -----------------------------------------------------------------------
##### OVERLOADS
##### -----------------------------------------------------------------------
def rename(self, name):
""" UNSUPPORTED """
raise RuntimeError("Unsupported for Resource: %s" % self.__class__.__name__)
##### -----------------------------------------------------------------------
##### XML read
##### -----------------------------------------------------------------------
def _xml_at_res(self, xml):
return xml.find('.//proxy-arp/interface')
def _xml_to_py(self, as_xml, to_py ):
Resource._r_has_xml_status( as_xml, to_py )
##### -----------------------------------------------------------------------
##### Resource List, Catalog
##### -- only executed by 'manager' resources
##### -----------------------------------------------------------------------
def _r_list(self):
raise RuntimeError("@@@ NEED TO IMPLEMENT!")
def _r_catalog(self):
raise RuntimeError("@@@ NEED TO IMPLEMENT!")
| [
"[email protected]"
]
| |
8f26c0707c6e96062c78e160ebb53b168b45685b | b18ff1d2a88cdad6d8ca73a8e6c34943f7bee055 | /toolcall/models/__init__.py | b223e50a69b3e81be3d40cd155596ad4cbf3849e | [
"MIT"
]
| permissive | thebjorn/toolcall | 9c812d608a67990dfb04b4e8bc1ebfcd4e7440c3 | 2c1597c8224958b4751cfb09f7a1b4439ca6df09 | refs/heads/master | 2021-06-13T21:33:12.495795 | 2019-08-31T10:50:55 | 2019-08-31T10:50:55 | 147,824,176 | 0 | 0 | MIT | 2021-06-10T20:46:47 | 2018-09-07T13:02:31 | HTML | UTF-8 | Python | false | false | 53 | py | # -*- coding: utf-8 -*-
from .tool_models import *
| [
"[email protected]"
]
| |
9e9bd761750fdacff2550f9144c914ddc1e8529c | 7bead245354e233f76fff4608938bf956abb84cf | /test/test_docx_table_cell.py | f1aec4840f9d6c266499020f55fa9f2df8b0c8a9 | [
"Apache-2.0"
]
| permissive | Cloudmersive/Cloudmersive.APIClient.Python.Convert | 5ba499937b9664f37cb2700509a4ba93952e9d6c | dba2fe7257229ebdacd266531b3724552c651009 | refs/heads/master | 2021-10-28T23:12:42.698951 | 2021-10-18T03:44:49 | 2021-10-18T03:44:49 | 138,449,321 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 955 | py | # coding: utf-8
"""
convertapi
Convert API lets you effortlessly convert file formats and types. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import cloudmersive_convert_api_client
from cloudmersive_convert_api_client.models.docx_table_cell import DocxTableCell # noqa: E501
from cloudmersive_convert_api_client.rest import ApiException
class TestDocxTableCell(unittest.TestCase):
"""DocxTableCell unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDocxTableCell(self):
"""Test DocxTableCell"""
# FIXME: construct object with mandatory attributes with example values
# model = cloudmersive_convert_api_client.models.docx_table_cell.DocxTableCell() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
689f7241d4dc56a641bc73e4a10d491e1b16ae55 | a86864b0ca6bc1d4dbdd22c26257340b8131e859 | /forms/contract_award.py | 24bb6668c834f1225f8d923038e604378fd92b82 | [
"MIT"
]
| permissive | pudo-attic/ted-xml | 95d00f4f02ce16677da7672d4f40478ef13fac11 | 627c100ba464f574c2c71f7584f05f3aabf480e8 | refs/heads/master | 2021-01-01T05:31:32.156917 | 2013-09-13T13:13:29 | 2013-09-13T13:13:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,149 | py | from pprint import pprint
from parseutil import Extractor
LOOKUP = {
'appeal_body': {
'std': './/PROCEDURES_FOR_APPEAL/APPEAL_PROCEDURE_BODY_RESPONSIBLE//',
'util': './/APPEAL_PROCEDURES/RESPONSIBLE_FOR_APPEAL_PROCEDURES//',
'mil': './/PROCEDURES_FOR_APPEAL/APPEAL_PROCEDURE_BODY_RESPONSIBLE//'
},
'authority': {
'std': './/CONTRACTING_AUTHORITY_INFORMATION_CONTRACT_AWARD/NAME_ADDRESSES_CONTACT_CONTRACT_AWARD//',
'util': './/NAME_ADDRESSES_CONTACT_CONTRACT_AWARD_UTILITIES/CA_CE_CONCESSIONAIRE_PROFILE//',
'mil': './/CONTRACTING_AUTHORITY_INFORMATION_CONTRACT_AWARD_DEFENCE//CA_CE_CONCESSIONAIRE_PROFILE//',
},
'award_dest': {
'std': './/AWARD_OF_CONTRACT',
'util': './FD_CONTRACT_AWARD_UTILITIES/AWARD_CONTRACT_CONTRACT_AWARD_UTILITIES',
'mil': './/AWARD_OF_CONTRACT_DEFENCE'
},
'total_value': {
'std': './/TOTAL_FINAL_VALUE/COSTS_RANGE_AND_CURRENCY_WITH_VAT_RATE',
'util': './/OBJECT_CONTRACT_AWARD_UTILITIES/COSTS_RANGE_AND_CURRENCY_WITH_VAT_RATE',
'mil': './/TOTAL_FINAL_VALUE/COSTS_RANGE_AND_CURRENCY_WITH_VAT_RATE'
},
'award_description': {
'std': './/DESCRIPTION_AWARD_NOTICE_INFORMATION',
'util': './/OBJECT_CONTRACT_AWARD_UTILITIES/DESCRIPTION_CONTRACT_AWARD_UTILITIES',
'mil': './/DESCRIPTION_AWARD_NOTICE_INFORMATION_DEFENCE'
},
'short_desc': {
'std': './/DESCRIPTION_AWARD_NOTICE_INFORMATION/SHORT_CONTRACT_DESCRIPTION/P',
'util': './/DESCRIPTION_CONTRACT_AWARD_UTILITIES/SHORT_DESCRIPTION/P',
'mil': './/DESCRIPTION_AWARD_NOTICE_INFORMATION_DEFENCE/SHORT_CONTRACT_DESCRIPTION/P'
},
'reference': {
'std': './/ADMINISTRATIVE_INFORMATION_CONTRACT_AWARD/FILE_REFERENCE_NUMBER/P',
'util': './/ADMINISTRATIVE_INFO_CONTRACT_AWARD_UTILITIES/REFERENCE_NUMBER_ATTRIBUTED/P',
'mil': './/ADMINISTRATIVE_INFORMATION_CONTRACT_AWARD_DEFENCE/FILE_REFERENCE_NUMBER/P'
},
'additional_info': {
'std': './/COMPLEMENTARY_INFORMATION_CONTRACT_AWARD/ADDITIONAL_INFORMATION/P',
'util': './/COMPLEMENTARY_INFORMATION_CONTRACT_AWARD_UTILITIES/ADDITIONAL_INFORMATION/P',
'mil': './/COMPLEMENTARY_INFORMATION_CONTRACT_AWARD/ADDITIONAL_INFORMATION/P'
},
'electronic_auction': {
'std': './/F03_IS_ELECTRONIC_AUCTION_USABLE',
'util': './/F06_IS_ELECTRONIC_AUCTION_USABLE',
'mil': './/F18_IS_ELECTRONIC_AUCTION_USABLE'
},
'activity_type': {
'std': './/TYPE_AND_ACTIVITIES_AND_PURCHASING_ON_BEHALF//TYPE_OF_ACTIVITY',
'util': './/NOPATH',
'mil': './/TYPE_AND_ACTIVITIES_OR_CONTRACTING_ENTITY_AND_PURCHASING_ON_BEHALF//TYPE_OF_ACTIVITY'
},
'activity_type_other': {
'std': './/TYPE_AND_ACTIVITIES_AND_PURCHASING_ON_BEHALF//TYPE_OF_ACTIVITY_OTHER',
'util': './/NOPATH',
'mil': './/TYPE_AND_ACTIVITIES_OR_CONTRACTING_ENTITY_AND_PURCHASING_ON_BEHALF//TYPE_OF_ACTIVITY_OTHER'
},
'authority_type': {
'std': './/TYPE_AND_ACTIVITIES_AND_PURCHASING_ON_BEHALF//TYPE_OF_CONTRACTING_AUTHORITY',
'util': './/NOPATH',
'mil': './/TYPE_AND_ACTIVITIES_OR_CONTRACTING_ENTITY_AND_PURCHASING_ON_BEHALF//TYPE_OF_CONTRACTING_AUTHORITY'
},
'authority_type_other': {
'std': './/TYPE_AND_ACTIVITIES_AND_PURCHASING_ON_BEHALF//TYPE_AND_ACTIVITIES/TYPE_OF_CONTRACTING_AUTHORITY_OTHER',
'util': './/NOPATH',
'mil': './/TYPE_AND_ACTIVITIES_OR_CONTRACTING_ENTITY_AND_PURCHASING_ON_BEHALF/TYPE_AND_ACTIVITIES/TYPE_OF_CONTRACTING_AUTHORITY_OTHER'
},
'operator': {
'std': './ECONOMIC_OPERATOR_NAME_ADDRESS//',
'util': './/',
'mil': './ECONOMIC_OPERATOR_NAME_ADDRESS//'
},
}
def _lookup(s, key):
return LOOKUP[key][s]
def extract_address(ext, prefix, query):
if query is None:
return {}
data = {
prefix + '_official_name': ext.text(query+'OFFICIALNAME'),
prefix + '_address': ext.text(query+'ADDRESS'),
prefix + '_town': ext.text(query+'TOWN'),
prefix + '_postal_code': ext.text(query+'POSTAL_CODE'),
prefix + '_country': ext.attr(query+'COUNTRY', 'VALUE'),
prefix + '_attention': ext.text(query+'ATTENTION'),
prefix + '_phone': ext.text(query+'PHONE'),
prefix + '_email': ext.text(query+'EMAIL') or ext.text(query+'E_MAIL'),
prefix + '_fax': ext.text(query+'FAX'),
prefix + '_url': ext.text(query+'URL_GENERAL') or ext.text(query+'URL'),
prefix + '_url_buyer': ext.text(query+'URL_BUYER'),
prefix + '_url_info': ext.text(query+'URL_INFORMATION'),
prefix + '_url_participate': ext.text(query+'URL_PARTICIPATE')
}
for k, v in data.items():
if v is None:
del data[k]
return data
def extract_values(ext, prefix, query):
if query is None:
return {}
data = {
prefix + '_currency': ext.attr(query, 'CURRENCY'),
prefix + '_cost': ext.attr(query + '/VALUE_COST', 'FMTVAL'),
prefix + '_low': ext.attr(query + '//LOW_VALUE', 'FMTVAL'),
prefix + '_high': ext.attr(query + '//HIGH_VALUE', 'FMTVAL'),
prefix + '_months': ext.attr(query + '//NUMBER_MONTHS', 'FMTVAL'),
prefix + '_years': ext.attr(query + '//NUMBER_YEARS', 'FMTVAL'),
prefix + '_vat_rate': ext.attr(query + '//VAT_PRCT', 'FMTVAL')
}
if ext.el.find(query + '/INCLUDING_VAT') is not None:
data[prefix + '_vat_included'] = True
if ext.el.find(query + '/EXCLUDING_VAT') is not None:
data[prefix + '_vat_included'] = False
for k, v in data.items():
if v is None:
del data[k]
return data
def parse_award(root, lookup):
ext = Extractor(root)
contract = {
'contract_number': ext.text('./CONTRACT_NUMBER') or ext.text('.//CONTRACT_NO'),
'lot_number': ext.text('./LOT_NUMBER') or ext.text('.//LOT_NUMBER'),
'contract_title': ext.text('./CONTRACT_TITLE/P') or ext.text('./CONTRACT_TITLE') or ext.text('.//TITLE_CONTRACT') or ext.text('.//TITLE_CONTRACT/P'),
'contract_award_day': ext.text('.//CONTRACT_AWARD_DATE/DAY') or ext.text('.//DATE_OF_CONTRACT_AWARD/DAY'),
'contract_award_month': ext.text('.//CONTRACT_AWARD_DATE/MONTH') or ext.text('.//DATE_OF_CONTRACT_AWARD/MONTH'),
'contract_award_year': ext.text('.//CONTRACT_AWARD_DATE/YEAR') or ext.text('.//DATE_OF_CONTRACT_AWARD/YEAR'),
'offers_received_num': ext.text('.//OFFERS_RECEIVED_NUMBER'),
'offers_received_meaning': ext.text('.//OFFERS_RECEIVED_NUMBER_MEANING')
}
contract.update(extract_values(ext, 'contract_value', './/COSTS_RANGE_AND_CURRENCY_WITH_VAT_RATE'))
contract.update(extract_values(ext, 'initial_value', './/INITIAL_ESTIMATED_TOTAL_VALUE_CONTRACT'))
contract.update(extract_address(ext, 'operator', lookup('operator')))
#from lxml import etree
#print etree.tostring(root, pretty_print=True)
#pprint(contract)
#ext.audit()
return contract
def parse_form(root):
form_type = 'std'
if 'DEFENCE' in root.tag:
form_type = 'mil'
elif 'UTILITIES' in root.tag:
form_type = 'util'
lookup = lambda k: _lookup(form_type, k)
ext = Extractor(root)
form = {
'file_reference': ext.text(lookup('reference')),
'relates_to_eu_project': ext.text('.//RELATES_TO_EU_PROJECT_YES/P'),
'notice_dispatch_day': ext.text('.//NOTICE_DISPATCH_DATE/DAY'),
'notice_dispatch_month': ext.text('.//NOTICE_DISPATCH_DATE/MONTH'),
'notice_dispatch_year': ext.text('.//NOTICE_DISPATCH_DATE/YEAR'),
'appeal_procedure': ext.text('.//PROCEDURES_FOR_APPEAL//LODGING_OF_APPEALS_PRECISION/P'),
'location': ext.text(lookup('award_description')+'/LOCATION_NUTS/LOCATION/P') or ext.text(lookup('award_description')+'/LOCATION_NUTS/LOCATION'),
'location_nuts': ext.attr(lookup('award_description')+'/LOCATION_NUTS/NUTS', 'CODE'),
'type_contract': ext.attr(lookup('award_description')+'//TYPE_CONTRACT', 'VALUE'),
'gpa_covered': ext.attr(lookup('award_description')+'/CONTRACT_COVERED_GPA', 'VALUE'),
'electronic_auction': ext.attr(lookup('electronic_auction'), 'VALUE'),
'cpv_code': ext.attr(lookup('award_description')+'/CPV/CPV_MAIN/CPV_CODE', 'CODE'),
'reason_lawful': ext.html('.//REASON_CONTRACT_LAWFUL'),
#'cpv_additional_code': ext.attr('.//DESCRIPTION_AWARD_NOTICE_INFORMATION/CPV/CPV_ADDITIONAL/CPV_CODE', 'CODE'),
'authority_type': ext.text(lookup('authority_type'), 'VALUE'),
'authority_type_other': ext.text(lookup('authority_type_other'), 'VALUE'),
'activity_type': ext.text(lookup('activity_type')),
'activity_type_other': ext.text(lookup('activity_type_other')),
'activity_contractor': ext.attr('.//ACTIVITIES_OF_CONTRACTING_ENTITY/ACTIVITY_OF_CONTRACTING_ENTITY', 'VALUE'),
'concessionaire_email': ext.text('.//CA_CE_CONCESSIONAIRE_PROFILE/E_MAILS/E_MAIL'),
'concessionaire_nationalid': ext.text('.//CA_CE_CONCESSIONAIRE_PROFILE/ORGANISATION/NATIONALID'),
'concessionaire_contact': ext.text('.//CA_CE_CONCESSIONAIRE_PROFILE/CONTACT_POINT'),
'contract_award_title': ext.text(lookup('award_description')+'/TITLE_CONTRACT/P'),
'contract_description': ext.html(lookup('short_desc')),
'additional_information': ext.html(lookup('additional_info')),
'contract_type_supply': ext.attr('.//TYPE_CONTRACT_LOCATION_W_PUB/TYPE_SUPPLIES_CONTRACT', 'VALUE')
}
form.update(extract_address(ext, 'authority', lookup('authority')))
form.update(extract_address(ext, 'appeal_body', lookup('appeal_body')))
form.update(extract_address(ext, 'on_behalf', './/TYPE_AND_ACTIVITIES_AND_PURCHASING_ON_BEHALF/PURCHASING_ON_BEHALF//'))
#form.update(extract_address(ext, 'lodging_info', './/PROCEDURES_FOR_APPEAL/LODGING_INFORMATION_FOR_SERVICE//'))
ext.ignore('.//PROCEDURES_FOR_APPEAL/MEDIATION_PROCEDURE_BODY_RESPONSIBLE/*')
ext.ignore('.//PROCEDURES_FOR_APPEAL/LODGING_INFORMATION_FOR_SERVICE/*')
ext.ignore('./FD_CONTRACT_AWARD_DEFENCE/COMPLEMENTARY_INFORMATION_CONTRACT_AWARD/PROCEDURES_FOR_APPEAL/LODGING_INFORMATION_FOR_SERVICE/*')
ext.ignore('./FD_CONTRACT_AWARD_UTILITIES/CONTRACTING_ENTITY_CONTRACT_AWARD_UTILITIES/NAME_ADDRESSES_CONTACT_CONTRACT_AWARD_UTILITIES/INTERNET_ADDRESSES_CONTRACT_AWARD_UTILITIES/URL_GENERAL')
ext.ignore('./FD_CONTRACT_AWARD_UTILITIES/COMPLEMENTARY_INFORMATION_CONTRACT_AWARD_UTILITIES/APPEAL_PROCEDURES/SERVICE_FROM_INFORMATION/*')
ext.ignore('./FD_CONTRACT_AWARD_UTILITIES/PROCEDURES_CONTRACT_AWARD_UTILITIES/ADMINISTRATIVE_INFO_CONTRACT_AWARD_UTILITIES/PREVIOUS_PUBLICATION_INFORMATION_NOTICE_F6/*')
# Make awards criteria their own table.
ext.ignore('./FD_CONTRACT_AWARD/PROCEDURE_DEFINITION_CONTRACT_AWARD_NOTICE/AWARD_CRITERIA_CONTRACT_AWARD_NOTICE_INFORMATION/AWARD_CRITERIA_DETAIL_F03/*')
ext.ignore('./FD_CONTRACT_AWARD_UTILITIES/PROCEDURES_CONTRACT_AWARD_UTILITIES/F06_AWARD_CRITERIA_CONTRACT_UTILITIES_INFORMATION/*')
ext.ignore('./FD_CONTRACT_AWARD_DEFENCE/PROCEDURE_DEFINITION_CONTRACT_AWARD_NOTICE_DEFENCE/AWARD_CRITERIA_CONTRACT_AWARD_NOTICE_INFORMATION_DEFENCE/AWARD_CRITERIA_DETAIL_F18/*')
ext.ignore('.FD_CONTRACT_AWARD_UTILITIES/PROCEDURES_CONTRACT_AWARD_UTILITIES/F06_AWARD_CRITERIA_CONTRACT_UTILITIES_INFORMATION/PRICE_AWARD_CRITERIA/*')
ext.ignore('./FD_CONTRACT_AWARD_DEFENCE/PROCEDURE_DEFINITION_CONTRACT_AWARD_NOTICE_DEFENCE/ADMINISTRATIVE_INFORMATION_CONTRACT_AWARD_DEFENCE/PREVIOUS_PUBLICATION_INFORMATION_NOTICE_F18/*')
ext.ignore('./FD_CONTRACT_AWARD/AWARD_OF_CONTRACT/*')
ext.ignore('./FD_CONTRACT_AWARD_DEFENCE/AWARD_OF_CONTRACT_DEFENCE/*')
ext.ignore('./FD_CONTRACT_AWARD_UTILITIES/AWARD_CONTRACT_CONTRACT_AWARD_UTILITIES/*')
ext.ignore('./FD_CONTRACT_AWARD_UTILITIES/OBJECT_CONTRACT_AWARD_UTILITIES/DESCRIPTION_CONTRACT_AWARD_UTILITIES/SHORT_DESCRIPTION/*')
ext.ignore('./FD_CONTRACT_AWARD/PROCEDURE_DEFINITION_CONTRACT_AWARD_NOTICE/ADMINISTRATIVE_INFORMATION_CONTRACT_AWARD/PREVIOUS_PUBLICATION_INFORMATION_NOTICE_F3/*')
ext.text('.//TYPE_CONTRACT_LOCATION_W_PUB/SERVICE_CATEGORY_PUB')
ext.text('.//CPV/CPV_ADDITIONAL/CPV_CODE')
form.update(extract_values(ext, 'total_value', lookup('total_value')))
#from lxml import etree
#el = root.find('./FD_CONTRACT_AWARD/OBJECT_CONTRACT_INFORMATION_CONTRACT_AWARD_NOTICE/TOTAL_FINAL_VALUE')
#if el:
# print etree.tostring(el, pretty_print=True)
# #pprint(form)
#ext.audit()
contracts = []
for award in root.findall(lookup('award_dest')):
contract = parse_award(award, lookup)
contract.update(form)
contracts.append(contract)
#pprint(contract)
return contracts
| [
"[email protected]"
]
| |
aa5650cfa845d0f8a1a8b2048a907d06c2b3d36d | 1061216c2c33c1ed4ffb33e6211565575957e48f | /python-legacy/test/test_custom_profile_field.py | 9c780d683beda23dc85ae0a5a0c376b149184f96 | []
| no_license | MSurfer20/test2 | be9532f54839e8f58b60a8e4587348c2810ecdb9 | 13b35d72f33302fa532aea189e8f532272f1f799 | refs/heads/main | 2023-07-03T04:19:57.548080 | 2021-08-11T19:16:42 | 2021-08-11T19:16:42 | 393,920,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,526 | py | # coding: utf-8
"""
Zulip REST API
Powerful open source group chat # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import openapi_client
from openapi_client.models.custom_profile_field import CustomProfileField # noqa: E501
from openapi_client.rest import ApiException
class TestCustomProfileField(unittest.TestCase):
"""CustomProfileField unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test CustomProfileField
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = openapi_client.models.custom_profile_field.CustomProfileField() # noqa: E501
if include_optional :
return CustomProfileField(
id = 56,
type = 56,
order = 56,
name = '',
hint = '',
field_data = ''
)
else :
return CustomProfileField(
)
def testCustomProfileField(self):
"""Test CustomProfileField"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
b7c41240fa74e52ba4534e26961d3cbf7322a0d6 | 43ed422113d58b27d5012f5ccf405700a46fc0f2 | /MaskRCNN/model/loss.py | eb4cb8a2d3d03b016b3857b3071a40cc1977da99 | []
| no_license | wprazuch/DeepLearningPlayground | 99a86945818e8a42e77408369e566b793ac612b9 | 53859fb4fd7bfc314121c85870afabd47627ce73 | refs/heads/master | 2022-12-16T05:50:22.757434 | 2020-09-03T09:23:26 | 2020-09-03T09:23:26 | 187,896,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,227 | py | import tensorflow as tf
import tensorflow.keras.backend as K
from utils import batch_pack_graph
def smooth_l1_loss(y_true, y_pred):
"""Implements Smooth-L1 loss.
y_true and y_pred are typically: [N, 4], but could be any shape.
"""
diff = K.abs(y_true - y_pred)
less_than_one = K.cast(K.less(diff, 1.0), 'float32')
loss = (less_than_one * 0.5 * diff ** 2) + (1 - less_than_one) * (diff - 0.5)
return loss
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for BG/FG.
"""
rpn_match = tf.squeeze(rpn_match, -1)
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
indices = tf.where(K.not_equal(rpn_match, 0))
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
loss = K.sparse_categorical_crossentropy(target=anchor_class, output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):
"""Return the RPN bounding box loss graph.
configs: the model configs object.
target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].
Uses 0 padding to fill in unsed bbox deltas.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
"""
# Positive anchors contribute to the loss, but negative and
# neutral anchors (match value of 0 or -1) don't.
rpn_match = K.squeeze(rpn_match, -1)
indices = tf.where(K.equal(rpn_match, 1))
rpn_bbox = tf.gather_nd(rpn_bbox, indices)
batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)
target_bbox = batch_pack_graph(target_bbox, batch_counts,
config.IMAGES_PER_GPU)
loss = smooth_l1_loss(target_bbox, rpn_bbox)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def mrcnn_class_loss_graph(target_class_ids, pred_class_logits, active_class_ids):
"""Loss for the classifier head of Mask RCNN.
target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero
padding to fill in the array.
pred_class_logits: [batch, num_rois, num_classes]
active_class_ids: [batch, num_classes]. Has a value of 1 for
classes that are in the dataset of the image, and 0
for classes that are not in the dataset.
"""
# During model building, Keras calls this function with
# target_class_ids of type float32. Unclear why. Cast it
# to int to get around it.
target_class_ids = tf.cast(target_class_ids, 'int64')
pred_class_ids = tf.argmax(pred_class_logits, axis=2)
# TODO: Update this line to work with batch > 1. Right now it assumes all
# images in a batch have the same active_class_ids
pred_active = tf.gather(active_class_ids[0], pred_class_ids)
# Loss
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=target_class_ids, logits=pred_class_logits
)
loss = loss * pred_active
loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active)
return loss
def mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):
"""Loss for Mask R-CNN bounding box refinement.
target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]
target_class_ids: [batch, num_rois]. Integer class IDs.
pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]
"""
# Reshape to merge batch and roi dimensions for simplicity.
target_class_ids = K.reshape(target_class_ids, (-1,))
target_bbox = K.reshape(target_bbox, (-1, 4))
pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))
# Only positive ROIs contribute to the loss. And only
# the right class_id of each ROI. Get their indices.
positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]
positive_roi_class_ids = tf.cast(
tf.gather(target_class_ids, positive_roi_class_ids), tf.int64
)
indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)
# Gather the deltas (predicted and true) that contribute to loss
target_bbox = tf.gather(target_bbox, positive_roi_ix)
pred_bbox = tf.gather_nd(pred_bbox, indices)
# Smooth-L1 Loss
loss = K.switch(tf.size(target_bbox) > 0,
smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),
tf.constant(0.0))
loss = K.mean(loss)
return loss
def mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):
"""Mask binary cross-entropy loss for the masks head.
target_masks: [batch, num_rois, height, width].
A float32 tensor of values 0 or 1. Uses zero padding to fill array.
target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.
pred_masks: [batch, proposals, height, width, num_classes] float32 tensor
with values from 0 to 1.
"""
# Reshape for simplicity. Merge first two dimensions into one.
target_class_ids = K.reshape(target_class_ids, (-1,))
mask_shape = tf.shape(target_masks)
target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))
pred_shape = tf.shape(pred_masks)
pred_masks = K.reshape(pred_masks,
(-1, pred_shape[2], pred_shape[3], pred_shape[4]))
pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])
positive_ix = tf.where(target_class_ids > 0)[:, 0]
positive_class_ids = tf.cast(
tf.gather(target_class_ids, positive_ix), tf.int64
)
indices = tf.stack([positive_ix, positive_class_ids], axis=1)
y_true = tf.gather(target_masks, positive_ix)
y_pred = tf.gather_nd(pred_masks, indices)
loss = K.switch(tf.size(y_true) > 0, K.binary_crossentropy(target=y_true, output=y_pred),
tf.constant(0.0))
loss = K.mean(loss)
return loss
| [
"[email protected]"
]
| |
00f4f432b42195fe0d5718333d4fea31f17c3546 | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /layout-blt/configs/bert_layout_publaynet_config.py | c468d18d59fde1f6a87c790cc4dbb6815ec3e80b | [
"CC-BY-4.0",
"Apache-2.0"
]
| permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 2,219 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default Hyperparameter configuration."""
import ml_collections
def get_config():
"""Gets the default hyperparameter configuration."""
config = ml_collections.ConfigDict()
# Exp info
config.dataset_path = "/path/to/publaynet/"
config.dataset = "PubLayNet"
config.vocab_size = 137
config.experiment = "bert_layout"
config.model_class = "bert_layout"
config.image_size = 256
# Training info
config.seed = 0
config.log_every_steps = 100
config.eval_num_steps = 1000
config.max_length = 130
config.batch_size = 64
config.train_shuffle = True
config.eval_pad_last_batch = False
config.eval_batch_size = 64
config.num_train_steps = 100_000
config.checkpoint_every_steps = 5000
config.eval_every_steps = 5000
config.num_eval_steps = 100
# Model info
config.layout_dim = 2
config.dtype = "float32"
config.autoregressive = False
config.shuffle_buffer_size = 10
config.use_vae = True
config.share_embeddings = True
config.num_layers = 4
config.qkv_dim = 512
config.emb_dim = 512
config.mlp_dim = 2048
config.num_heads = 8
config.dropout_rate = 0.1
config.attention_dropout_rate = 0.3
config.restore_checkpoints = True
config.label_smoothing = 0.
config.sampling_method = "top-p"
config.use_vertical_info = False
# Optimizer info
config.optimizer = ml_collections.ConfigDict()
config.optimizer.type = "adam"
config.optimizer.warmup_steps = 4000
config.optimizer.lr = 5e-3
config.optimizer.beta1 = 0.9
config.optimizer.beta2 = 0.98
config.optimizer.weight_decay = 0.01
config.beta_rate = 1 / 20_000
return config
| [
"[email protected]"
]
| |
b0852cf85d9083b3a78990c4c4ecb96b24190dc2 | 191d18fae52df2b10fc3c78676612ce0828c1ad8 | /essentials/multi_server.py | b345cfc321697ef93d206779c4ae5ae4b88e165c | [
"MIT"
]
| permissive | yada-yoda/pollmaster | 1e44ef42f68bf971e67b75c84842556d2ef2d687 | c7431d6b952599671c6408209528dceaad19116e | refs/heads/master | 2021-10-19T14:32:24.222515 | 2019-02-21T18:57:07 | 2019-02-21T18:57:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,061 | py | import time
import discord
from essentials.settings import SETTINGS
from utils.paginator import embed_list_paginated
async def get_pre(bot, message):
'''Gets the prefix for a message.'''
if str(message.channel.type) == 'private':
shared_server_list = await get_servers(bot, message)
if shared_server_list.__len__() == 0:
return 'pm!'
elif shared_server_list.__len__() == 1:
return await get_server_pre(bot, shared_server_list[0])
else:
# return a tuple of all prefixes.. this will check them all!
return tuple([await get_server_pre(bot, s) for s in shared_server_list])
else:
return await get_server_pre(bot, message.server)
async def get_server_pre(bot, server):
'''Gets the prefix for a server.'''
try:
#result = await bot.db.config.find_one({'_id': str(server.id)})
result = bot.pre[str(server.id)]
except AttributeError:
return 'pm!'
if not result: #or not result.get('prefix'):
return 'pm!'
return result #result.get('prefix')
async def get_servers(bot, message, short=None):
'''Get best guess of relevant shared servers'''
if message.server is None:
list_of_shared_servers = []
for s in bot.servers:
if message.author.id in [m.id for m in s.members]:
list_of_shared_servers.append(s)
if short is not None:
query = bot.db.polls.find({'short': short})
if query is not None:
server_ids_with_short = [poll['server_id'] async for poll in query]
servers_with_short = [bot.get_server(x) for x in server_ids_with_short]
shared_servers_with_short = list(set(servers_with_short).intersection(set(list_of_shared_servers)))
if shared_servers_with_short.__len__() >= 1:
return shared_servers_with_short
# do this if no shared server with short is found
if list_of_shared_servers.__len__() == 0:
return []
else:
return list_of_shared_servers
else:
return [message.server]
async def ask_for_server(bot, message, short=None):
server_list = await get_servers(bot, message, short)
if server_list.__len__() == 0:
if short == None:
await bot.say(
'I could not find a common server where we can see eachother. If you think this is an error, please contact the developer.')
else:
await bot.say(f'I could not find a server where the poll {short} exists that we both can see.')
return None
elif server_list.__len__() == 1:
return server_list[0]
else:
text = 'I\'m not sure which server you are referring to. Please tell me by typing the corresponding number.\n'
i = 1
for name in [s.name for s in server_list]:
text += f'\n**{i}** - {name}'
i += 1
embed = discord.Embed(title="Select your server", description=text, color=SETTINGS.color)
server_msg = await bot.send_message(message.channel, embed=embed)
valid_reply = False
nr = 1
while valid_reply == False:
reply = await bot.wait_for_message(timeout=60, author=message.author)
if reply and reply.content:
if reply.content.startswith(await get_pre(bot, message)):
# await bot.say('You can\'t use bot commands while I am waiting for an answer.'
# '\n I\'ll stop waiting and execute your command.')
return False
if str(reply.content).isdigit():
nr = int(reply.content)
if 0 < nr <= server_list.__len__():
valid_reply = True
return server_list[nr - 1]
async def ask_for_channel(bot, server, message):
# if performed from a channel, return that channel
if str(message.channel.type) == 'text':
return message.channel
# if exactly 1 channel, return it
channel_list = [c for c in server.channels if str(c.type) == 'text']
if channel_list.__len__() == 1:
return channel_list[0]
# if no channels, display error
if channel_list.__len__() == 0:
embed = discord.Embed(title="Select a channel", description='No text channels found on this server. Make sure I can see them.', color=SETTINGS.color)
await bot.say(embed=embed)
return False
# otherwise ask for a channel
i = 1
text = 'Polls are bound to a specific channel on a server. Please select the channel for this poll by typing the corresponding number.\n'
for name in [c.name for c in channel_list]:
to_add = f'\n**{i}** - {name}'
# check if length doesn't exceed allowed maximum or split it into multiple messages
if text.__len__() + to_add.__len__() > 2048:
embed = discord.Embed(title="Select a channel", description=text, color=SETTINGS.color)
await bot.say(embed=embed)
text = 'Polls are bound to a specific channel on a server. Please select the channel for this poll by typing the corresponding number.\n'
else:
text += to_add
i += 1
embed = discord.Embed(title="Select a channel", description=text, color=SETTINGS.color)
await bot.say(embed=embed)
valid_reply = False
nr = 1
while valid_reply == False:
reply = await bot.wait_for_message(timeout=60, author=message.author)
if reply and reply.content:
if reply.content.startswith(await get_pre(bot, message)):
# await bot.say('You can\'t use bot commands while I am waiting for an answer.'
# '\n I\'ll stop waiting and execute your command.')
return False
if str(reply.content).isdigit():
nr = int(reply.content)
if 0 < nr <= channel_list.__len__():
valid_reply = True
return channel_list[nr - 1] | [
"[email protected]"
]
| |
921548cdfb11ada7eb5d4be07398294bf09ce197 | b9963ffb80aad7e057bc375edb85ac7ed5a837d0 | /adventofcode2017/03b.py | 44f43305774184f644e62bce54dfc526c453e223 | [
"MIT"
]
| permissive | matslindh/codingchallenges | a2db9f4579e9f35189f5cdf74590863cf84bdf95 | a846e522f7a31e988c470cda87955ee3ef20a274 | refs/heads/main | 2022-12-23T15:56:19.776354 | 2022-12-15T21:03:37 | 2022-12-15T21:03:37 | 76,491,177 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 888 | py | from itertools import repeat
from math import floor
map = []
s_y = s_x = 1001
for y in range(0, s_y):
map.append(list(repeat(0, s_x)))
x = y = floor(s_x/2)
map[y][x] = 1
x += 1
dir = 'R'
written = 0
while written <= 289326:
if dir == 'R':
if not map[y-1][x]:
dir = 'U'
else:
x += 1
elif dir == 'U':
if not map[y][x-1]:
dir = 'L'
else:
y -= 1
elif dir == 'L':
if not map[y+1][x]:
dir = 'D'
else:
x -= 1
elif dir == 'D':
if not map[y][x+1]:
dir = 'R'
else:
y += 1
written = map[y-1][x-1] + map[y-1][x] + map[y-1][x+1] + \
map[y][x-1] + map[y][x+1] + \
map[y+1][x-1] + map[y+1][x] + map[y+1][x+1]
print(dir, x, y, written)
map[y][x] = written
| [
"[email protected]"
]
| |
f829374ecf93d80a724d38e00dff9ecc2cb9c16b | f68065baf489013c926dcfea9994878716d19586 | /accounts/views.py | 323deb2d9a062d75f066d39db1854285279ddd21 | []
| no_license | groyce/pots | 06667fdc686b74a897c42879cbed5803e9efb154 | ac839943c84c3135cb4596a8f734e4a061086e10 | refs/heads/master | 2020-04-10T01:42:55.863071 | 2018-12-06T19:47:18 | 2018-12-06T19:47:18 | 160,723,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,905 | py | from django.http import HttpResponse
from django.shortcuts import render
from django.contrib.auth import authenticate, login
from .forms import LoginForm, UserRegistrationForm, UserEditForm, ProfileEditForm
from django.contrib.auth.decorators import login_required
from .models import Profile
def user_login(request):
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
user = authenticate(request,
username=cd['username'],
password=cd['password'])
if user is not None:
if user.is_active:
login(request, user)
return HttpResponse('Authenticated '\
'successfully')
else:
return HttpResponse('Disabled account')
else:
return HttpResponse('Invalid login')
else:
form = LoginForm()
return render(request, 'accounts/login.html', {'form': form})
@login_required
def edit(request):
if request.method == 'POST':
user_form = UserEditForm(instance=request.user,
data=request.POST)
profile_form = ProfileEditForm(
instance=request.user.profile,
data=request.POST,
files=request.FILES)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
else:
user_form = UserEditForm(instance=request.user)
profile_form = ProfileEditForm(instance=request.user.profile)
return render(request,
'accounts/edit.html',
{'user_form': user_form,
'profile_form': profile_form})
@login_required
def dashboard(request):
return render(request,
'accounts/dashboard.html',
{'section': 'dashboard'})
def register(request):
if request.method == 'POST':
user_form = UserRegistrationForm(request.POST)
if user_form.is_valid():
# Create a new user object but avoid saving it yet
new_user = user_form.save(commit=False)
# Set the chosen password
new_user.set_password(
user_form.cleaned_data['password'])
# Save the User object
new_user.save()
# Create the user profile
Profile.objects.create(user=new_user)
return render(request,
'accounts/register_done.html',
{'new_user': new_user})
else:
user_form = UserRegistrationForm()
return render(request,
'accounts/register.html',
{'user_form': user_form})
| [
"[email protected]"
]
| |
305a52c242ec94adeaaf52fb037f2f4072fe2272 | 8961efe29765a8093bcd669adb3fa6ca186eadfd | /toolbox/attacks/FGSM-Attack/run_all.py | de63948e966ad6a4eb5ec4441dfcdcc3b516de03 | []
| no_license | JayceeLee/adversarial-toolbox | 12bfe720fd0984b6dc1c10d61486b2e36f22fde9 | 01d624d995d3c55d220cdf570ca00510f32cc43a | refs/heads/master | 2020-03-27T04:22:55.631986 | 2018-02-06T01:25:27 | 2018-02-06T01:25:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 92 | py | python fgsm_inception_v3.py
python fgsm_inception_resnet_v2.py
python fgsm_resnet_v2_101.py
| [
"[email protected]"
]
| |
8cd85855d175d322e73f636de7aed0b6850bdf52 | 2f233b31ea7ffefad4b901b561f341fabe3bbb1f | /2017/02a.py | 77f9ee8c4d1e176ea1331fdbdd314eff205802e3 | [
"MIT"
]
| permissive | cz-fish/advent-of-code | 066b63c3ac2e3b13bf88ae86843a7a9a7b687e96 | ecbcef544e8d89ec019464811760ce86f84dbc6e | refs/heads/master | 2023-08-03T19:41:23.186666 | 2023-03-14T08:59:04 | 2023-03-14T08:59:04 | 226,355,674 | 0 | 0 | MIT | 2023-07-20T02:51:13 | 2019-12-06T15:17:10 | Python | UTF-8 | Python | false | false | 593 | py | #!/usr/bin/env python3
grid = []
with open('input02.txt', 'rt') as f:
for ln in f.readlines():
grid.append([int(x) for x in ln.strip().split('\t')])
print(sum([max(l) - min(l) for l in grid]))
print('-----')
s = 0
for ln in grid:
srt = sorted(ln)
stop = False
for i in range(len(srt) - 1):
x = srt[i]
if x == 0:
continue
for j in range(i+1, len(srt)):
y = srt[j]
if y // x * x == y:
s += y // x
stop = True
break
if stop:
break
print(s) | [
"[email protected]"
]
| |
2909402b00fb3a6e6b883535089989ab85eb7e84 | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/graph_objs/mesh3d/legendgrouptitle/_font.py | 766e46a49d072ae53fadbf9814e540e8ef7dfdbb | [
"MIT"
]
| permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 8,471 | py | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "mesh3d.legendgrouptitle"
_path_str = "mesh3d.legendgrouptitle.font"
_valid_props = {"color", "family", "size"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Font object
Sets this legend group's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.mesh3d.legendgrouptitle.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.mesh3d.legendgrouptitle.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.mesh3d.legendgrouptitle.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| [
"[email protected]"
]
| |
f6066d060c195e6f9ef837b859b666ab4f30bdb8 | 096167807fa625681beae7e25919357c90b89e75 | /emails/models.py | 1fb86f349ab69c1489f2ef26d7c95be401ff5b2d | []
| no_license | bussiere/Sumomo | c849484fbae37490998bcc44e232bf6a252fe9d7 | ac3efc46014e66e193c5f852d121a25dd0a9ec5e | refs/heads/master | 2021-01-19T11:34:42.645970 | 2012-08-31T04:15:32 | 2012-08-31T04:15:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | from django.db import models
# Create your models here.
class Contact(models.Model):
Emails = models.TextField(null=True, blank=True)
class Email(models.Model):
Sender = models.ForeignKey("Contact",related_name="Sender", null=True, blank=True)
Recepter = models.ManyToManyField("Contact", related_name="Recepter",null=True, blank=True)
Title = models.TextField(null=True, blank=True)
Date = models.DateField(null=True, blank=True)
Content = models.TextField(null=True, blank=True)
File = models.ManyToManyField("attachments.File", null=True, blank=True)
Tag = models.ManyToManyField("tags.Tag", null=True, blank=True) | [
"[email protected]"
]
| |
c9272ab9ce25ad997cef0881159ae4bc9c13e0ef | e0d9844e123fa0706388814b9f29758258589487 | /torch/distributions/distribution.py | 33c48de39d91713f0e5c6f65b31cd98687ac56d8 | []
| no_license | pigpigman8686/seg | b5cf5261a5744e89ed5e5b145f60b0ccc3ba2c0c | 61c3816f7ba76243a872fe5c5fc0dede17026987 | refs/heads/master | 2023-04-10T22:22:35.035542 | 2021-04-22T06:24:36 | 2021-04-22T06:24:36 | 360,398,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,229 | py | import torch
import warnings
from torch.distributions import constraints
from torch.distributions.utils import lazy_property
from typing import Dict, Optional, Any
class Distribution(object):
r"""
Distribution is the abstract base class for probability distributions.
"""
has_rsample = False
has_enumerate_support = False
_validate_args = __debug__
@staticmethod
def set_default_validate_args(value):
"""
Sets whether validation is enabled or disabled.
The default behavior mimics Python's ``assert`` statement: validation
is on by default, but is disabled if Python is run in optimized mode
(via ``python -O``). Validation may be expensive, so you may want to
disable it once a model is working.
Args:
value (bool): Whether to enable validation.
"""
if value not in [True, False]:
raise ValueError
Distribution._validate_args = value
def __init__(self, batch_shape=torch.Size(), event_shape=torch.Size(), validate_args=None):
self._batch_shape = batch_shape
self._event_shape = event_shape
if validate_args is not None:
self._validate_args = validate_args
if self._validate_args:
try:
arg_constraints = self.arg_constraints
except NotImplementedError:
arg_constraints = {}
warnings.warn(f'{self.__class__} does not define `arg_constraints`. ' +
'Please set `arg_constraints = {}` or initialize the distribution ' +
'with `validate_args=False` to turn off validation.')
for param, constraint in arg_constraints.items():
if constraints.is_dependent(constraint):
continue # skip constraints that cannot be checked
if param not in self.__dict__ and isinstance(getattr(type(self), param), lazy_property):
continue # skip checking lazily-constructed args
if not constraint.check(getattr(self, param)).all():
raise ValueError("The parameter {} has invalid values".format(param))
super(Distribution, self).__init__()
def expand(self, batch_shape, _instance=None):
"""
Returns a new distribution instance (or populates an existing instance
provided by a derived class) with batch dimensions expanded to
`batch_shape`. This method calls :class:`~torch.Tensor.expand` on
the distribution's parameters. As such, this does not allocate new
memory for the expanded distribution instance. Additionally,
this does not repeat any args checking or parameter broadcasting in
`__init__.py`, when an instance is first created.
Args:
batch_shape (torch.Size): the desired expanded size.
_instance: new instance provided by subclasses that
need to override `.expand`.
Returns:
New distribution instance with batch dimensions expanded to
`batch_size`.
"""
raise NotImplementedError
@property
def batch_shape(self):
"""
Returns the shape over which parameters are batched.
"""
return self._batch_shape
@property
def event_shape(self):
"""
Returns the shape of a single sample (without batching).
"""
return self._event_shape
@property
def arg_constraints(self) -> Dict[str, constraints.Constraint]:
"""
Returns a dictionary from argument names to
:class:`~torch.distributions.constraints.Constraint` objects that
should be satisfied by each argument of this distribution. Args that
are not tensors need not appear in this dict.
"""
raise NotImplementedError
@property
def support(self) -> Optional[Any]:
"""
Returns a :class:`~torch.distributions.constraints.Constraint` object
representing this distribution's support.
"""
raise NotImplementedError
@property
def mean(self):
"""
Returns the mean of the distribution.
"""
raise NotImplementedError
@property
def variance(self):
"""
Returns the variance of the distribution.
"""
raise NotImplementedError
@property
def stddev(self):
"""
Returns the standard deviation of the distribution.
"""
return self.variance.sqrt()
def sample(self, sample_shape=torch.Size()):
"""
Generates a sample_shape shaped sample or sample_shape shaped batch of
samples if the distribution parameters are batched.
"""
with torch.no_grad():
return self.rsample(sample_shape)
def rsample(self, sample_shape=torch.Size()):
"""
Generates a sample_shape shaped reparameterized sample or sample_shape
shaped batch of reparameterized samples if the distribution parameters
are batched.
"""
raise NotImplementedError
def sample_n(self, n):
"""
Generates n samples or n batches of samples if the distribution
parameters are batched.
"""
warnings.warn('sample_n will be deprecated. Use .sample((n,)) instead', UserWarning)
return self.sample(torch.Size((n,)))
def log_prob(self, value):
"""
Returns the log of the probability density/mass function evaluated at
`value`.
Args:
value (Tensor):
"""
raise NotImplementedError
def cdf(self, value):
"""
Returns the cumulative density/mass function evaluated at
`value`.
Args:
value (Tensor):
"""
raise NotImplementedError
def icdf(self, value):
"""
Returns the inverse cumulative density/mass function evaluated at
`value`.
Args:
value (Tensor):
"""
raise NotImplementedError
def enumerate_support(self, expand=True):
"""
Returns tensor containing all values supported by a discrete
distribution. The result will enumerate over dimension 0, so the shape
of the result will be `(cardinality,) + batch_shape + event_shape`
(where `event_shape = ()` for univariate distributions).
Note that this enumerates over all batched tensors in lock-step
`[[0, 0], [1, 1], ...]`. With `expand=False`, enumeration happens
along dim 0, but with the remaining batch dimensions being
singleton dimensions, `[[0], [1], ..`.
To iterate over the full Cartesian product use
`itertools.product(m.enumerate_support())`.
Args:
expand (bool): whether to expand the support over the
batch dims to match the distribution's `batch_shape`.
Returns:
Tensor iterating over dimension 0.
"""
raise NotImplementedError
def entropy(self):
"""
Returns entropy of distribution, batched over batch_shape.
Returns:
Tensor of shape batch_shape.
"""
raise NotImplementedError
def perplexity(self):
"""
Returns perplexity of distribution, batched over batch_shape.
Returns:
Tensor of shape batch_shape.
"""
return torch.exp(self.entropy())
def _extended_shape(self, sample_shape=torch.Size()):
"""
Returns the size of the sample returned by the distribution, given
a `sample_shape`. Note, that the batch and event shapes of a distribution
instance are fixed at the time of construction. If this is empty, the
returned shape is upcast to (1,).
Args:
sample_shape (torch.Size): the size of the sample to be drawn.
"""
if not isinstance(sample_shape, torch.Size):
sample_shape = torch.Size(sample_shape)
return sample_shape + self._batch_shape + self._event_shape
def _validate_sample(self, value):
"""
Argument validation for distribution methods such as `log_prob`,
`cdf` and `icdf`. The rightmost dimensions of a value to be
scored via these methods must agree with the distribution's batch
and event shapes.
Args:
value (Tensor): the tensor whose log probability is to be
computed by the `log_prob` method.
Raises
ValueError: when the rightmost dimensions of `value` do not match the
distribution's batch and event shapes.
"""
if not isinstance(value, torch.Tensor):
raise ValueError('The value argument to log_prob must be a Tensor')
event_dim_start = len(value.size()) - len(self._event_shape)
if value.size()[event_dim_start:] != self._event_shape:
raise ValueError('The right-most size of value must match event_shape: {} vs {}.'.
format(value.size(), self._event_shape))
actual_shape = value.size()
expected_shape = self._batch_shape + self._event_shape
for i, j in zip(reversed(actual_shape), reversed(expected_shape)):
if i != 1 and j != 1 and i != j:
raise ValueError('Value is not broadcastable with batch_shape+event_shape: {} vs {}.'.
format(actual_shape, expected_shape))
try:
support = self.support
except NotImplementedError:
warnings.warn(f'{self.__class__} does not define `support` to enable ' +
'sample validation. Please initialize the distribution with ' +
'`validate_args=False` to turn off validation.')
return
assert support is not None
if not support.check(value).all():
raise ValueError('The value argument must be within the support')
def _get_checked_instance(self, cls, _instance=None):
if _instance is None and type(self).__init__ != cls.__init__:
raise NotImplementedError("Subclass {} of {} that defines a custom __init__ method "
"must also define a custom .expand() method.".
format(self.__class__.__name__, cls.__name__))
return self.__new__(type(self)) if _instance is None else _instance
def __repr__(self):
param_names = [k for k, _ in self.arg_constraints.items() if k in self.__dict__]
args_string = ', '.join(['{}: {}'.format(p, self.__dict__[p]
if self.__dict__[p].numel() == 1
else self.__dict__[p].size()) for p in param_names])
return self.__class__.__name__ + '(' + args_string + ')'
| [
"[email protected]"
]
| |
1c9832b0b85c1b52d6843f79ec2dcb1fa84e81b1 | 68ab00c77312827e522151e6e9f2fff166e85b9c | /mypy_boto3_builder/structures/collection.py | 41b5f2550bb9a33bdb6cd53825ecc814e7734f48 | [
"MIT"
]
| permissive | pyto86pri/mypy_boto3_builder | 2cdfb3ed55ea1ff23cdffd5a9ee5400e71562450 | e8132dc4632430e0abd4cd330af51a8b1c82028f | refs/heads/master | 2023-01-25T04:06:11.174287 | 2020-12-03T23:39:06 | 2020-12-03T23:39:06 | 319,283,736 | 0 | 0 | MIT | 2020-12-07T10:29:52 | 2020-12-07T10:29:51 | null | UTF-8 | Python | false | false | 1,280 | py | """
Boto3 ServiceResource or Resource collection.
"""
from typing import Set
from mypy_boto3_builder.import_helpers.import_string import ImportString
from mypy_boto3_builder.structures.class_record import ClassRecord
from mypy_boto3_builder.type_annotations.external_import import ExternalImport
from mypy_boto3_builder.type_annotations.fake_annotation import FakeAnnotation
class Collection(ClassRecord):
"""
Boto3 ServiceResource or Resource collection.
"""
def __init__(
self,
name: str,
attribute_name: str,
parent_name: str,
type_annotation: FakeAnnotation,
docstring: str = "",
):
super().__init__(
name=name,
use_alias=True,
docstring=docstring,
bases=[
ExternalImport(
source=ImportString("boto3", "resources", "collection"),
name="ResourceCollection",
)
],
)
self.attribute_name = attribute_name
self.parent_name = parent_name
self.type_annotation = type_annotation
def get_types(self) -> Set[FakeAnnotation]:
types = super().get_types()
types.update(self.type_annotation.get_types())
return types
| [
"[email protected]"
]
| |
97876c1143af3c1bbcf63ea5db171555c18fc239 | 242086b8c6a39cbc7af3bd7f2fd9b78a66567024 | /python/PP4E-Examples-1.4/Examples/PP4E/Gui/Intro/gui3.py | 6617d3e8edd2b088131c50e73653265dc000e795 | []
| no_license | chuzui/algorithm | 7537d0aa051ac4cbe9f6a7ca9a3037204803a650 | c3006b24c4896c1242d3ceab43ace995c94f10c8 | refs/heads/master | 2021-01-10T13:05:30.902020 | 2015-09-27T14:39:02 | 2015-09-27T14:39:02 | 8,404,397 | 4 | 4 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | import sys
from tkinter import *
def quit(): # a custom callback handler
print('Hello, I must be going...') # kill windows and process
sys.exit()
widget = Button(None, text='Hello event world', command=quit)
widget.pack()
widget.mainloop()
| [
"zui"
]
| zui |
38249fa8185cebfdb4c30d7dddd3e605bad8b96b | 5bf245e55b756ca3e664d857f36db092855c7a98 | /externals/mne/fixes.py | 399715ee615775de35fa7a26ddf1c8dde4d48d47 | [
"BSD-3-Clause"
]
| permissive | kingjr/decoding_challenge_cortana_2016_3rd | b264fabbe8fb2f3788d11dc2c4deebcf217a64a5 | 26c2ebf5200b5a5cd268fa73ac3928d7257d08d3 | refs/heads/master | 2021-01-20T17:54:12.617430 | 2016-07-13T22:31:58 | 2016-07-13T22:31:58 | 63,120,115 | 10 | 2 | null | null | null | null | UTF-8 | Python | false | false | 33,605 | py | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
# XXX : originally copied from scikit-learn
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD
from __future__ import division
import collections
from distutils.version import LooseVersion
from functools import partial
from gzip import GzipFile
import inspect
from math import ceil, log
from operator import itemgetter
import re
import warnings
import numpy as np
from numpy.fft import irfft
import scipy
from scipy import linalg, sparse
from .externals import six
from .externals.six.moves import copyreg, xrange
###############################################################################
# Misc
# helpers to get function arguments
if hasattr(inspect, 'signature'): # py35
def _get_args(function, varargs=False):
params = inspect.signature(function).parameters
args = [key for key, param in params.items()
if param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)]
if varargs:
varargs = [param.name for param in params.values()
if param.kind == param.VAR_POSITIONAL]
if len(varargs) == 0:
varargs = None
return args, varargs
else:
return args
else:
def _get_args(function, varargs=False):
out = inspect.getargspec(function) # args, varargs, keywords, defaults
if varargs:
return out[:2]
else:
return out[0]
class gzip_open(GzipFile): # python2.6 doesn't have context managing
def __enter__(self):
if hasattr(GzipFile, '__enter__'):
return GzipFile.__enter__(self)
else:
return self
def __exit__(self, exc_type, exc_value, traceback):
if hasattr(GzipFile, '__exit__'):
return GzipFile.__exit__(self, exc_type, exc_value, traceback)
else:
return self.close()
class _Counter(collections.defaultdict):
"""Partial replacement for Python 2.7 collections.Counter."""
def __init__(self, iterable=(), **kwargs):
super(_Counter, self).__init__(int, **kwargs)
self.update(iterable)
def most_common(self):
return sorted(six.iteritems(self), key=itemgetter(1), reverse=True)
def update(self, other):
"""Adds counts for elements in other"""
if isinstance(other, self.__class__):
for x, n in six.iteritems(other):
self[x] += n
else:
for x in other:
self[x] += 1
try:
Counter = collections.Counter
except AttributeError:
Counter = _Counter
def _unique(ar, return_index=False, return_inverse=False):
"""A replacement for the np.unique that appeared in numpy 1.4.
While np.unique existed long before, keyword return_inverse was
only added in 1.4.
"""
try:
ar = ar.flatten()
except AttributeError:
if not return_inverse and not return_index:
items = sorted(set(ar))
return np.asarray(items)
else:
ar = np.asarray(ar).flatten()
if ar.size == 0:
if return_inverse and return_index:
return ar, np.empty(0, np.bool), np.empty(0, np.bool)
elif return_inverse or return_index:
return ar, np.empty(0, np.bool)
else:
return ar
if return_inverse or return_index:
perm = ar.argsort()
aux = ar[perm]
flag = np.concatenate(([True], aux[1:] != aux[:-1]))
if return_inverse:
iflag = np.cumsum(flag) - 1
iperm = perm.argsort()
if return_index:
return aux[flag], perm[flag], iflag[iperm]
else:
return aux[flag], iflag[iperm]
else:
return aux[flag], perm[flag]
else:
ar.sort()
flag = np.concatenate(([True], ar[1:] != ar[:-1]))
return ar[flag]
if LooseVersion(np.__version__) < LooseVersion('1.5'):
unique = _unique
else:
unique = np.unique
def _bincount(X, weights=None, minlength=None):
"""Replacing np.bincount in numpy < 1.6 to provide minlength."""
result = np.bincount(X, weights)
if minlength is None or len(result) >= minlength:
return result
out = np.zeros(minlength, np.int)
out[:len(result)] = result
return out
if LooseVersion(np.__version__) < LooseVersion('1.6'):
bincount = _bincount
else:
bincount = np.bincount
def _copysign(x1, x2):
"""Slow replacement for np.copysign, which was introduced in numpy 1.4"""
return np.abs(x1) * np.sign(x2)
if not hasattr(np, 'copysign'):
copysign = _copysign
else:
copysign = np.copysign
def _in1d(ar1, ar2, assume_unique=False, invert=False):
"""Replacement for in1d that is provided for numpy >= 1.4"""
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
if not hasattr(np, 'in1d') or LooseVersion(np.__version__) < '1.8':
in1d = _in1d
else:
in1d = np.in1d
def _digitize(x, bins, right=False):
"""Replacement for digitize with right kwarg (numpy < 1.7).
Notes
-----
This fix is only meant for integer arrays. If ``right==True`` but either
``x`` or ``bins`` are of a different type, a NotImplementedError will be
raised.
"""
if right:
x = np.asarray(x)
bins = np.asarray(bins)
if (x.dtype.kind not in 'ui') or (bins.dtype.kind not in 'ui'):
raise NotImplementedError("Only implemented for integer input")
return np.digitize(x - 1e-5, bins)
else:
return np.digitize(x, bins)
if LooseVersion(np.__version__) < LooseVersion('1.7'):
digitize = _digitize
else:
digitize = np.digitize
def _tril_indices(n, k=0):
"""Replacement for tril_indices that is provided for numpy >= 1.4"""
mask = np.greater_equal(np.subtract.outer(np.arange(n), np.arange(n)), -k)
indices = np.where(mask)
return indices
if not hasattr(np, 'tril_indices'):
tril_indices = _tril_indices
else:
tril_indices = np.tril_indices
def _unravel_index(indices, dims):
"""Add support for multiple indices in unravel_index that is provided
for numpy >= 1.4"""
indices_arr = np.asarray(indices)
if indices_arr.size == 1:
return np.unravel_index(indices, dims)
else:
if indices_arr.ndim != 1:
raise ValueError('indices should be one dimensional')
ndims = len(dims)
unraveled_coords = np.empty((indices_arr.size, ndims), dtype=np.int)
for coord, idx in zip(unraveled_coords, indices_arr):
coord[:] = np.unravel_index(idx, dims)
return tuple(unraveled_coords.T)
if LooseVersion(np.__version__) < LooseVersion('1.4'):
unravel_index = _unravel_index
else:
unravel_index = np.unravel_index
def _qr_economic_old(A, **kwargs):
"""
Compat function for the QR-decomposition in economic mode
Scipy 0.9 changed the keyword econ=True to mode='economic'
"""
with warnings.catch_warnings(record=True):
return linalg.qr(A, econ=True, **kwargs)
def _qr_economic_new(A, **kwargs):
return linalg.qr(A, mode='economic', **kwargs)
if LooseVersion(scipy.__version__) < LooseVersion('0.9'):
qr_economic = _qr_economic_old
else:
qr_economic = _qr_economic_new
def savemat(file_name, mdict, oned_as="column", **kwargs):
"""MATLAB-format output routine that is compatible with SciPy 0.7's.
0.7.2 (or .1?) added the oned_as keyword arg with 'column' as the default
value. It issues a warning if this is not provided, stating that "This will
change to 'row' in future versions."
"""
import scipy.io
try:
return scipy.io.savemat(file_name, mdict, oned_as=oned_as, **kwargs)
except TypeError:
return scipy.io.savemat(file_name, mdict, **kwargs)
if hasattr(np, 'count_nonzero'):
from numpy import count_nonzero
else:
def count_nonzero(X):
return len(np.flatnonzero(X))
# little dance to see if np.copy has an 'order' keyword argument
if 'order' in _get_args(np.copy):
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
def _meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
.. versionchanged:: 1.9
1-D and 0-D cases are allowed.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
.. versionadded:: 1.7.0
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
.. versionadded:: 1.7.0
copy : bool, optional
If False, a view into the original arrays are returned in order to
conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous
arrays. Furthermore, more than one element of a broadcast array
may refer to a single memory location. If you need to write to the
arrays, make copies first.
.. versionadded:: 1.7.0
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
"""
ndim = len(xi)
copy_ = kwargs.pop('copy', True)
sparse = kwargs.pop('sparse', False)
indexing = kwargs.pop('indexing', 'xy')
if kwargs:
raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
% (list(kwargs)[0],))
if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::])
for i, x in enumerate(xi)]
shape = [x.size for x in output]
if indexing == 'xy' and ndim > 1:
# switch first and second axis
output[0].shape = (1, -1) + (1,) * (ndim - 2)
output[1].shape = (-1, 1) + (1,) * (ndim - 2)
shape[0], shape[1] = shape[1], shape[0]
if sparse:
if copy_:
return [x.copy() for x in output]
else:
return output
else:
# Return the full N-D matrix (not only the 1-D vector)
if copy_:
mult_fact = np.ones(shape, dtype=int)
return [x * mult_fact for x in output]
else:
return np.broadcast_arrays(*output)
if LooseVersion(np.__version__) < LooseVersion('1.7'):
meshgrid = _meshgrid
else:
meshgrid = np.meshgrid
###############################################################################
# Back porting firwin2 for older scipy
# Original version of firwin2 from scipy ticket #457, submitted by "tash".
#
# Rewritten by Warren Weckesser, 2010.
def _firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=1.0):
"""FIR filter design using the window method.
From the given frequencies `freq` and corresponding gains `gain`,
this function constructs an FIR filter with linear phase and
(approximately) the given frequency response.
Parameters
----------
numtaps : int
The number of taps in the FIR filter. `numtaps` must be less than
`nfreqs`. If the gain at the Nyquist rate, `gain[-1]`, is not 0,
then `numtaps` must be odd.
freq : array-like, 1D
The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being
Nyquist. The Nyquist frequency can be redefined with the argument
`nyq`.
The values in `freq` must be nondecreasing. A value can be repeated
once to implement a discontinuity. The first value in `freq` must
be 0, and the last value must be `nyq`.
gain : array-like
The filter gains at the frequency sampling points.
nfreqs : int, optional
The size of the interpolation mesh used to construct the filter.
For most efficient behavior, this should be a power of 2 plus 1
(e.g, 129, 257, etc). The default is one more than the smallest
power of 2 that is not less than `numtaps`. `nfreqs` must be greater
than `numtaps`.
window : string or (string, float) or float, or None, optional
Window function to use. Default is "hamming". See
`scipy.signal.get_window` for the complete list of possible values.
If None, no window function is applied.
nyq : float
Nyquist frequency. Each frequency in `freq` must be between 0 and
`nyq` (inclusive).
Returns
-------
taps : numpy 1D array of length `numtaps`
The filter coefficients of the FIR filter.
Examples
--------
A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and
that decreases linearly on [0.5, 1.0] from 1 to 0:
>>> taps = firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0]) # doctest: +SKIP
>>> print(taps[72:78]) # doctest: +SKIP
[-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961]
See also
--------
scipy.signal.firwin
Notes
-----
From the given set of frequencies and gains, the desired response is
constructed in the frequency domain. The inverse FFT is applied to the
desired response to create the associated convolution kernel, and the
first `numtaps` coefficients of this kernel, scaled by `window`, are
returned.
The FIR filter will have linear phase. The filter is Type I if `numtaps`
is odd and Type II if `numtaps` is even. Because Type II filters always
have a zero at the Nyquist frequency, `numtaps` must be odd if `gain[-1]`
is not zero.
.. versionadded:: 0.9.0
References
----------
.. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal
Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989).
(See, for example, Section 7.4.)
.. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital
Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm
"""
if len(freq) != len(gain):
raise ValueError('freq and gain must be of same length.')
if nfreqs is not None and numtaps >= nfreqs:
raise ValueError('ntaps must be less than nfreqs, but firwin2 was '
'called with ntaps=%d and nfreqs=%s'
% (numtaps, nfreqs))
if freq[0] != 0 or freq[-1] != nyq:
raise ValueError('freq must start with 0 and end with `nyq`.')
d = np.diff(freq)
if (d < 0).any():
raise ValueError('The values in freq must be nondecreasing.')
d2 = d[:-1] + d[1:]
if (d2 == 0).any():
raise ValueError('A value in freq must not occur more than twice.')
if numtaps % 2 == 0 and gain[-1] != 0.0:
raise ValueError("A filter with an even number of coefficients must "
"have zero gain at the Nyquist rate.")
if nfreqs is None:
nfreqs = 1 + 2 ** int(ceil(log(numtaps, 2)))
# Tweak any repeated values in freq so that interp works.
eps = np.finfo(float).eps
for k in range(len(freq)):
if k < len(freq) - 1 and freq[k] == freq[k + 1]:
freq[k] = freq[k] - eps
freq[k + 1] = freq[k + 1] + eps
# Linearly interpolate the desired response on a uniform mesh `x`.
x = np.linspace(0.0, nyq, nfreqs)
fx = np.interp(x, freq, gain)
# Adjust the phases of the coefficients so that the first `ntaps` of the
# inverse FFT are the desired filter coefficients.
shift = np.exp(-(numtaps - 1) / 2. * 1.j * np.pi * x / nyq)
fx2 = fx * shift
# Use irfft to compute the inverse FFT.
out_full = irfft(fx2)
if window is not None:
# Create the window to apply to the filter coefficients.
from scipy.signal.signaltools import get_window
wind = get_window(window, numtaps, fftbins=False)
else:
wind = 1
# Keep only the first `numtaps` coefficients in `out`, and multiply by
# the window.
out = out_full[:numtaps] * wind
return out
def get_firwin2():
"""Helper to get firwin2"""
try:
from scipy.signal import firwin2
except ImportError:
firwin2 = _firwin2
return firwin2
def _filtfilt(*args, **kwargs):
"""wrap filtfilt, excluding padding arguments"""
from scipy.signal import filtfilt
# cut out filter args
if len(args) > 4:
args = args[:4]
if 'padlen' in kwargs:
del kwargs['padlen']
return filtfilt(*args, **kwargs)
def get_filtfilt():
"""Helper to get filtfilt from scipy"""
from scipy.signal import filtfilt
if 'padlen' in _get_args(filtfilt):
return filtfilt
return _filtfilt
def _get_argrelmax():
try:
from scipy.signal import argrelmax
except ImportError:
argrelmax = _argrelmax
return argrelmax
def _argrelmax(data, axis=0, order=1, mode='clip'):
"""Calculate the relative maxima of `data`.
Parameters
----------
data : ndarray
Array in which to find the relative maxima.
axis : int, optional
Axis over which to select from `data`. Default is 0.
order : int, optional
How many points on each side to use for the comparison
to consider ``comparator(n, n+x)`` to be True.
mode : str, optional
How the edges of the vector are treated.
Available options are 'wrap' (wrap around) or 'clip' (treat overflow
as the same as the last (or first) element).
Default 'clip'. See `numpy.take`.
Returns
-------
extrema : tuple of ndarrays
Indices of the maxima in arrays of integers. ``extrema[k]`` is
the array of indices of axis `k` of `data`. Note that the
return value is a tuple even when `data` is one-dimensional.
"""
comparator = np.greater
if((int(order) != order) or (order < 1)):
raise ValueError('Order must be an int >= 1')
datalen = data.shape[axis]
locs = np.arange(0, datalen)
results = np.ones(data.shape, dtype=bool)
main = data.take(locs, axis=axis, mode=mode)
for shift in xrange(1, order + 1):
plus = data.take(locs + shift, axis=axis, mode=mode)
minus = data.take(locs - shift, axis=axis, mode=mode)
results &= comparator(main, plus)
results &= comparator(main, minus)
if(~results.any()):
return results
return np.where(results)
###############################################################################
# Back porting matrix_rank for numpy < 1.7
def _matrix_rank(M, tol=None):
""" Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that
are greater than `tol`.
Parameters
----------
M : {(M,), (M, N)} array_like
array of <=2 dimensions
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for
linear least squares [2].
This default threshold is designed to detect rank deficiency accounting
for the numerical errors of the SVD computation. Imagine that there is a
column in `M` that is an exact (in floating point) linear combination of
other columns in `M`. Computing the SVD on `M` will not produce a
singular value exactly equal to 0 in general: any difference of the
smallest SVD value from 0 will be caused by numerical imprecision in the
calculation of the SVD. Our threshold for small SVD values takes this
numerical imprecision into account, and the default threshold will detect
such numerical rank deficiency. The threshold may declare a matrix `M`
rank deficient even if the linear combination of some columns of `M` is
not exactly equal to another column of `M` but only numerically very
close to another column of `M`.
We chose our default threshold because it is in wide use. Other
thresholds are possible. For example, elsewhere in the 2007 edition of
*Numerical recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance
values to detect *effective* rank deficiency. The most useful measure of
the tolerance depends on the operations you intend to use on your matrix.
For example, if your data come from uncertain measurements with
uncertainties greater than floating point epsilon, choosing a tolerance
near that uncertainty may be preferable. The tolerance may be absolute if
the uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = np.asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return np.int(not all(M == 0))
S = np.linalg.svd(M, compute_uv=False)
if tol is None:
tol = S.max() * np.max(M.shape) * np.finfo(S.dtype).eps
return np.sum(S > tol)
if LooseVersion(np.__version__) > '1.7.1':
from numpy.linalg import matrix_rank
else:
matrix_rank = _matrix_rank
def _reconstruct_partial(func, args, kwargs):
"""Helper to pickle partial functions"""
return partial(func, *args, **(kwargs or {}))
def _reduce_partial(p):
"""Helper to pickle partial functions"""
return _reconstruct_partial, (p.func, p.args, p.keywords)
# This adds pickling functionality to older Python 2.6
# Please always import partial from here.
copyreg.pickle(partial, _reduce_partial)
def normalize_colors(vmin, vmax, clip=False):
"""Helper to handle matplotlib API"""
import matplotlib.pyplot as plt
try:
return plt.Normalize(vmin, vmax, clip=clip)
except AttributeError:
return plt.normalize(vmin, vmax, clip=clip)
def assert_true(expr, msg='False is not True'):
"""Fake assert_true without message"""
if not expr:
raise AssertionError(msg)
def assert_is(expr1, expr2, msg=None):
"""Fake assert_is without message"""
assert_true(expr2 is expr2, msg)
def assert_is_not(expr1, expr2, msg=None):
"""Fake assert_is_not without message"""
assert_true(expr1 is not expr2, msg)
assert_raises_regex_impl = None
# from numpy 1.9.1
def assert_raises_regex(exception_class, expected_regexp,
callable_obj=None, *args, **kwargs):
"""
Fail unless an exception of class exception_class and with message that
matches expected_regexp is thrown by callable when invoked with arguments
args and keyword arguments kwargs.
Name of this function adheres to Python 3.2+ reference, but should work in
all versions down to 2.6.
"""
__tracebackhide__ = True # Hide traceback for py.test
import nose
global assert_raises_regex_impl
if assert_raises_regex_impl is None:
try:
# Python 3.2+
assert_raises_regex_impl = nose.tools.assert_raises_regex
except AttributeError:
try:
# 2.7+
assert_raises_regex_impl = nose.tools.assert_raises_regexp
except AttributeError:
# 2.6
# This class is copied from Python2.7 stdlib almost verbatim
class _AssertRaisesContext(object):
def __init__(self, expected, expected_regexp=None):
self.expected = expected
self.expected_regexp = expected_regexp
def failureException(self, msg):
return AssertionError(msg)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
raise self.failureException(
"{0} not raised".format(exc_name))
if not issubclass(exc_type, self.expected):
# let unexpected exceptions pass through
return False
self.exception = exc_value # store for later retrieval
if self.expected_regexp is None:
return True
expected_regexp = self.expected_regexp
if isinstance(expected_regexp, basestring):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(str(exc_value)):
raise self.failureException(
'"%s" does not match "%s"' %
(expected_regexp.pattern, str(exc_value)))
return True
def impl(cls, regex, callable_obj, *a, **kw):
mgr = _AssertRaisesContext(cls, regex)
if callable_obj is None:
return mgr
with mgr:
callable_obj(*a, **kw)
assert_raises_regex_impl = impl
return assert_raises_regex_impl(exception_class, expected_regexp,
callable_obj, *args, **kwargs)
def _sparse_block_diag(mats, format=None, dtype=None):
"""An implementation of scipy.sparse.block_diag since old versions of
scipy don't have it. Forms a sparse matrix by stacking matrices in block
diagonal form.
Parameters
----------
mats : list of matrices
Input matrices.
format : str, optional
The sparse format of the result (e.g. "csr"). If not given, the
matrix is returned in "coo" format.
dtype : dtype specifier, optional
The data-type of the output matrix. If not given, the dtype is
determined from that of blocks.
Returns
-------
res : sparse matrix
"""
nmat = len(mats)
rows = []
for ia, a in enumerate(mats):
row = [None] * nmat
row[ia] = a
rows.append(row)
return sparse.bmat(rows, format=format, dtype=dtype)
try:
from scipy.sparse import block_diag as sparse_block_diag
except Exception:
sparse_block_diag = _sparse_block_diag
def _isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within a
tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol : float
The relative tolerance parameter (see Notes).
atol : float
The absolute tolerance parameter (see Notes).
equal_nan : bool
Whether to compare NaN's as equal. If True, NaN's in `a` will be
considered equal to NaN's in `b` in the output array.
Returns
-------
y : array_like
Returns a boolean array of where `a` and `b` are equal within the
given tolerance. If both `a` and `b` are scalars, returns a single
boolean value.
See Also
--------
allclose
Notes
-----
.. versionadded:: 1.7.0
For finite values, isclose uses the following equation to test whether
two floating point values are equivalent.
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
The above equation is not symmetric in `a` and `b`, so that
`isclose(a, b)` might be different from `isclose(b, a)` in
some rare cases.
Examples
--------
>>> isclose([1e10,1e-7], [1.00001e10,1e-8])
array([ True, False], dtype=bool)
>>> isclose([1e10,1e-8], [1.00001e10,1e-9])
array([ True, True], dtype=bool)
>>> isclose([1e10,1e-8], [1.0001e10,1e-9])
array([False, True], dtype=bool)
>>> isclose([1.0, np.nan], [1.0, np.nan])
array([ True, False], dtype=bool)
>>> isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
array([ True, True], dtype=bool)
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x - y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
# Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT).
# This will cause casting of x later. Also, make sure to allow subclasses
# (e.g., for numpy.ma).
dt = np.core.multiarray.result_type(y, 1.)
y = np.array(y, dtype=dt, copy=False, subok=True)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if np.all(xfin) and np.all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Because we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
both_nan = np.isnan(x) & np.isnan(y)
cond[both_nan] = both_nan[both_nan]
return cond
if LooseVersion(np.__version__) < LooseVersion('1.7'):
isclose = _isclose
else:
isclose = np.isclose
| [
"[email protected]"
]
| |
4fd2db085bebdf0fb2594d570603ecce95d71f50 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03227/s459999028.py | 03236a4d5fabd077163769a4c50f0ed805cccd94 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | def main():
s = input().rstrip()
if len(s) == 2:
print(s)
else:
print(s[::-1])
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
d807abdc220d8649a4f546bf8715b4be597aec77 | 5a71ca1f5c964f803350e3c1238cb48986db565c | /coinlibbitfinex/tests/test_bitfinex_streamapi.py | 25331ab7bbc5c1144c8083305e500db7203b9b85 | []
| no_license | tetocode/coinliball | fd644cbc16039ecad7e43228ea4e287ead5c8e5f | 41ebbac13c1fbba98aedaa766b9a505cb157f374 | refs/heads/master | 2022-09-28T21:58:08.130006 | 2020-06-04T03:00:56 | 2020-06-04T03:00:56 | 269,247,318 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,491 | py | import time
from queue import Queue, Empty
import pytest
from coinlib.datatypes.streamdata import StreamData
from coinlibbitbankcc.streamapi import StreamApi
WAIT = 3
N = 10
def test_subscribe(stream_api: StreamApi):
xrp_usd_params = {
'event': 'subscribe',
'channel': 'book',
'pair': 'XRPUSD',
'prec': 'P0',
'freq': 'F0',
'len': '25',
}
xrp_btc_params = xrp_usd_params.copy()
xrp_btc_params['pair'] = 'XRPBTC'
q = Queue()
stream_api.on_raw_data = q.put
stream_api.subscribe(('xrp_usd', xrp_usd_params))
stream_api.subscribe(('xrp_btc', xrp_btc_params))
keys = set()
time.sleep(1)
for _ in range(N):
d: StreamData = q.get(timeout=WAIT)
k = d.key
keys.add(k)
assert keys == {'xrp_usd', 'xrp_btc'}
stream_api.unsubscribe('xrp_usd')
time.sleep(1)
for _ in range(q.qsize() + N):
q.get(timeout=WAIT)
keys = set()
for _ in range(q.qsize() + N):
d = q.get(timeout=WAIT)
k = d.key
keys.add(k)
assert keys == {'xrp_btc'}
stream_api.unsubscribe('xrp_btc')
with pytest.raises(Empty):
for _ in range(q.qsize() + N):
q.get(timeout=WAIT)
# re-subscribe
stream_api.subscribe(('xrp_usd', xrp_usd_params), ('xrp_btc', xrp_btc_params))
keys = set()
for _ in range(N):
d = q.get(timeout=WAIT)
k = d.key
keys.add(k)
assert keys == {'xrp_usd', 'xrp_btc'}
| [
"_"
]
| _ |
d8bef9d2257e646945921eef2184ee0089672dc5 | dabc9c7ec7cce125a12c6243ff67fd91e620d636 | /tap/line.py | 2784be13d982350342f9cef81eb316a081153234 | [
"BSD-3-Clause",
"BSD-2-Clause"
]
| permissive | Mark-E-Hamilton/tappy | 7634209c2862c9e837b58602d4b59636fd9a8e89 | 62c1a4ef1d9e724d3c7bbb31361c17c3bf071d04 | refs/heads/master | 2021-01-15T09:04:09.813683 | 2016-03-21T04:51:45 | 2016-03-21T04:51:45 | 53,630,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,921 | py | # Copyright (c) 2016, Matt Layman
class Line(object):
"""Base type for TAP data.
TAP is a line based protocol. Thus, the most primitive type is a line.
"""
@property
def category(self):
raise NotImplementedError
class Result(Line):
"""Information about an individual test line."""
def __init__(
self, ok, number=None, description='', directive=None,
diagnostics=None):
self._ok = ok
if number:
self._number = int(number)
else:
# The number may be an empty string so explicitly set to None.
self._number = None
self._description = description
self.directive = directive
self.diagnostics = diagnostics
@property
def category(self):
""":returns: ``test``"""
return 'test'
@property
def ok(self):
"""Get the ok status.
:rtype: bool
"""
return self._ok
@property
def number(self):
"""Get the test number.
:rtype: int
"""
return self._number
@property
def description(self):
"""Get the description."""
return self._description
@property
def skip(self):
"""Check if this test was skipped.
:rtype: bool
"""
return self.directive.skip
@property
def todo(self):
"""Check if this test was a TODO.
:rtype: bool
"""
return self.directive.todo
def __str__(self):
is_not = ''
if not self.ok:
is_not = 'not '
directive = ''
if self.directive is not None:
directive = ' # {0}'.format(self.directive.text)
diagnostics = ''
if self.diagnostics is not None:
diagnostics = '\n' + self.diagnostics.rstrip()
return "{0}ok {1} - {2}{3}{4}".format(
is_not, self.number, self.description, directive, diagnostics)
class Plan(Line):
"""A plan line to indicate how many tests to expect."""
def __init__(self, expected_tests, directive=None):
self._expected_tests = expected_tests
self.directive = directive
@property
def category(self):
""":returns: ``plan``"""
return 'plan'
@property
def expected_tests(self):
"""Get the number of expected tests.
:rtype: int
"""
return self._expected_tests
@property
def skip(self):
"""Check if this plan should skip the file.
:rtype: bool
"""
return self.directive.skip
class Diagnostic(Line):
"""A diagnostic line (i.e. anything starting with a hash)."""
def __init__(self, text):
self._text = text
@property
def category(self):
""":returns: ``diagnostic``"""
return 'diagnostic'
@property
def text(self):
"""Get the text."""
return self._text
class Bail(Line):
"""A bail out line (i.e. anything starting with 'Bail out!')."""
def __init__(self, reason):
self._reason = reason
@property
def category(self):
""":returns: ``bail``"""
return 'bail'
@property
def reason(self):
"""Get the reason."""
return self._reason
class Version(Line):
"""A version line (i.e. of the form 'TAP version 13')."""
def __init__(self, version):
self._version = version
@property
def category(self):
""":returns: ``version``"""
return 'version'
@property
def version(self):
"""Get the version number.
:rtype: int
"""
return self._version
class Unknown(Line):
"""A line that represents something that is not a known TAP line.
This exists for the purpose of a Null Object pattern.
"""
@property
def category(self):
""":returns: ``unknown``"""
return 'unknown'
| [
"[email protected]"
]
| |
1412f35638ca0ea7b9a84f157d78d221431a2524 | 810ce1c1ac47743e253171ec7541c0e431d952c2 | /small_programme/crawler/crawling.py | e445437136947a14712e6ade780429dd6b18b819 | []
| no_license | hjlarry/practise-py | 91052c25dc7ab706c6234f6d657db76667a27124 | 871e06b9652d356f55e3888f1f7ea180ac2b1954 | refs/heads/master | 2022-09-11T17:47:48.557194 | 2022-08-10T02:07:24 | 2022-08-10T02:07:24 | 136,263,989 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,379 | py | import asyncio
import collections
import logging
import re
import time
import urllib
import cgi
import sys
import aiohttp
from reporting import report
LOGGER = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
FetchStatistic = collections.namedtuple(
"FetchStatistic",
[
"url",
"next_url",
"status",
"exception",
"size",
"content_type",
"encoding",
"num_urls",
"num_new_urls",
],
)
def lenient_host(host):
parts = host.split(".")[-2:]
return "".join(parts)
def is_redirect(response):
return response.status in (300, 301, 302, 303, 307)
class Crawler:
def __init__(
self,
roots,
exclude=None,
strict=True,
max_redirect=10,
max_tries=4,
max_tasks=10,
*,
loop=None,
):
self.roots = roots
self.loop = loop or asyncio.get_event_loop()
self.exclude = exclude
self.strict = strict
self.max_redirect = max_redirect
self.max_tries = max_tries
self.max_tasks = max_tasks
self.q = asyncio.Queue(loop=self.loop)
self.session = aiohttp.ClientSession(loop=self.loop)
self.seen_urls = set()
self.done = []
self.root_domains = set()
for root in self.roots:
parts = urllib.parse.urlparse(root)
host, port = urllib.parse.splitport(parts.netloc)
if not host:
continue
if re.match(r"\A[\d\.]*\Z", host):
self.root_domains.add(root)
else:
host = host.lower()
if self.strict:
self.root_domains.add(host)
else:
self.root_domains.add(lenient_host(host)) # 非严格模式则a.bc.com添加为bc.com
for root in self.roots:
self.add_url(root)
self.t0 = time.time()
self.t1 = None
def close(self):
self.session.close()
def add_url(self, url, max_redirect=None):
if max_redirect is None:
max_redirect = self.max_redirect
LOGGER.info(f"adding url: {url}, {max_redirect}")
self.seen_urls.add(url)
self.q.put_nowait((url, max_redirect))
def record_statistic(self, fetch_statistic):
self.done.append(fetch_statistic)
def _host_okay_strictish(self, host):
host = host[4:] if host.startswith("www.") else "www." + host
return host in self.root_domains
def _host_okay_lenident(self, host):
return lenient_host(host) in self.root_domains
def host_okay(self, host):
host = host.lower()
if host in self.root_domains:
return True
if re.match(r"\A[\d\.]*\Z", host):
return False
if self.strict:
return self._host_okay_strictish(host)
else:
return self._host_okay_lenident(host)
def url_allowed(self, url):
if self.exclude and re.search(self.exclude, url):
return False
parts = urllib.parse.urlparse(url)
if parts.scheme not in ("http", "https"):
LOGGER.debug(f"skip non http url: {url}")
return False
host, part = urllib.parse.splitport(parts.netloc)
if not self.host_okay(host):
LOGGER.debug(f"skip non-root host in {url}")
return False
return True
async def parse_links(self, response):
links = set()
content_type = None
encoding = None
body = await response.read()
if response.status == 200:
content_type = response.headers.get("content-type")
pdict = {}
if content_type:
content_type, pdict = cgi.parse_header(content_type)
encoding = pdict.get("charset", "utf-8")
if content_type in ("text/html", "application/xml"):
text = await response.text()
# href 替换为 (?:href|src) 可以拿到图片的链接
urls = set(re.findall(r"""(?i)href=["']([^\s"'<>]+)""", text))
if urls:
LOGGER.info(f"got {len(urls)} distinct urls from {response.url}")
for url in urls:
normalized = urllib.parse.urljoin(str(response.url), url)
defragmented, frag = urllib.parse.urldefrag(normalized)
if self.url_allowed(defragmented):
links.add(defragmented)
stat = FetchStatistic(
url=response.url,
next_url=None,
status=response.status,
exception=None,
size=len(body),
content_type=content_type,
encoding=encoding,
num_urls=len(links),
num_new_urls=len(links - self.seen_urls),
)
return stat, links
async def fetch(self, url, max_redirect):
tries = 0
exception = None
while tries < self.max_tries:
try:
response = await self.session.get(url, allow_redirects=False)
if tries > 1:
LOGGER.info(f"try {tries} for {url} success")
break
except aiohttp.ClientError as client_err:
LOGGER.info(f"try {tries} for {url} raise {client_err}")
exception = client_err
tries += 1
else:
LOGGER.error(f"{url} failed after {self.max_tries} tries")
self.record_statistic(
FetchStatistic(
url=url,
next_url=None,
status=None,
exception=exception,
size=0,
content_type=None,
encoding=None,
num_urls=0,
num_new_urls=0,
)
)
return
try:
if is_redirect(response):
location = response.headers["location"]
next_url = urllib.parse.urljoin(url, location)
self.record_statistic(
FetchStatistic(
url=url,
next_url=next_url,
status=response.status,
exception=None,
size=0,
content_type=None,
encoding=None,
num_urls=0,
num_new_urls=0,
)
)
if next_url in self.seen_urls:
return
if max_redirect > 0:
LOGGER.info(f"redirect to {next_url} from {url}")
self.add_url(next_url, max_redirect - 1)
else:
LOGGER.error(f"redirect num limit for {next_url} from {url}")
else:
stat, links = await self.parse_links(response)
self.record_statistic(stat)
for link in links.difference(self.seen_urls):
self.q.put_nowait((link, self.max_redirect))
self.seen_urls.update(links)
finally:
await response.release()
async def worker(self):
try:
while True:
url, max_redirect = await self.q.get()
assert url in self.seen_urls
await self.fetch(url, max_redirect)
self.q.task_done()
except asyncio.CancelledError:
pass
async def crawl(self):
workers = [
asyncio.Task(self.worker(), loop=self.loop) for _ in range(self.max_tasks)
]
self.t0 = time.time()
await self.q.join()
self.t1 = time.time()
for w in workers:
w.cancel()
def main():
loop = asyncio.get_event_loop()
roots = ("http://doc.1.com/platform/realname/",)
crawler = Crawler(roots)
try:
loop.run_until_complete(crawler.crawl())
except KeyboardInterrupt:
sys.stderr.flush()
print("\nInterrupted\n")
finally:
f = open("report.txt", "w+")
report(crawler, file=f)
crawler.close()
loop.stop()
loop.run_forever()
loop.close()
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
b11986b3974295a315c63bf1ec08b07e1e0e3087 | dde9442399c78414c05f7f36803c861638065ca3 | /Multidimensional-Lists-Exercise/Radioactive-Mutant-Vampire-Bunnies.py | a22c9f63fe0ef1c68063385ce0f936bf2bfc334d | []
| no_license | Vigyrious/python_advanced | 6778eed9e951b5a11b22f6c6d8ea5b160c3aa00d | 67db470e78b194aea1f9a35283d5a88b0f6ab94c | refs/heads/main | 2023-03-23T12:24:59.688699 | 2021-03-12T20:53:04 | 2021-03-12T20:53:04 | 347,192,305 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,396 | py | row, col = map(int, input().split(" "))
matrix = []
[matrix.append(list(input())) for _ in range(row)]
movements = list(input())
player_row, player_col = [[row_index,col_index] for row_index in range(row) for col_index in range(col) if matrix[row_index][col_index] == "P"][0]
is_dead = False
has_won = False
while not is_dead and not has_won:
bunnies = [[bunny_row, bunny_col] for bunny_row in range(row) for bunny_col in range(col) if matrix[bunny_row][bunny_col] == "B"]
current_movement = movements.pop(0)
if current_movement == "U":
if player_row-1 in range(row):
if matrix[player_row-1][player_col] == "B":
player_row -= 1
matrix[player_row][player_col] = "B"
is_dead = True
else:
matrix[player_row][player_col] = "."
matrix[player_row - 1][player_col] = "P"
player_row -= 1
else:
matrix[player_row][player_col] = "."
has_won = True
elif current_movement == "D":
if player_row+1 in range(row):
if matrix[player_row+1][player_col] == "B":
player_row += 1
matrix[player_row][player_col] = "B"
is_dead = True
else:
matrix[player_row][player_col] = "."
matrix[player_row + 1][player_col] = "P"
player_row += 1
else:
matrix[player_row][player_col] = "."
has_won = True
elif current_movement == "L":
if player_col-1 in range(col):
if matrix[player_row][player_col - 1] == "B":
player_col -= 1
matrix[player_row][player_col] = "B"
is_dead = True
else:
matrix[player_row][player_col] = "."
matrix[player_row][player_col - 1] = "P"
player_col -= 1
else:
matrix[player_row][player_col] = "."
has_won = True
elif current_movement == "R":
if player_col+1 in range(col):
if matrix[player_row][player_col + 1] == "B":
player_col += 1
matrix[player_row][player_col] = "B"
is_dead = True
else:
matrix[player_row][player_col] = "."
matrix[player_row][player_col + 1] = "P"
player_col += 1
else:
matrix[player_row][player_col] = "."
has_won = True
for bunny in bunnies:
bunny_row, bunny_col = bunny
if bunny_row+1 in range(row):
if matrix[bunny_row+1][bunny_col] == "P":
is_dead = True
matrix[bunny_row + 1][bunny_col] = "B"
if bunny_row-1 in range(row):
if matrix[bunny_row-1][bunny_col] == "P":
is_dead = True
matrix[bunny_row - 1][bunny_col] = "B"
if bunny_col + 1 in range(col):
if matrix[bunny_row][bunny_col+1] == "P":
is_dead = True
matrix[bunny_row][bunny_col+1] = "B"
if bunny_col - 1 in range(col):
if matrix[bunny_row][bunny_col-1] == "P":
is_dead = True
matrix[bunny_row][bunny_col-1] = "B"
[print(''.join(sub)) for sub in matrix]
print(f"won: {player_row} {player_col}") if has_won else print(f"dead: {player_row} {player_col}") | [
"[email protected]"
]
| |
369add1f2e8ed2f7a86b91b166f88feef21733e3 | 63b79eb44cf682ece74be1fc866f7651837db448 | /powerplay/models/game_content_media.py | cc2654fda4508741a4901f39caab2b020b8b674c | []
| no_license | bclark86/powerplay-py | c8cc4df8acd9ada91299706b7a7113ab9c963645 | 584d754629936a93d95157356ff806a5c68438dc | refs/heads/main | 2023-07-19T04:23:16.510338 | 2021-09-02T13:17:12 | 2021-09-02T13:17:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,861 | py | # coding: utf-8
"""
NHL API
Documenting the publicly accessible portions of the NHL API. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class GameContentMedia(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'epg': 'list[AnyOfGameContentMediaEpgItems]',
'milestones': 'GameContentMediaMilestones'
}
attribute_map = {
'epg': 'epg',
'milestones': 'milestones'
}
def __init__(self, epg=None, milestones=None): # noqa: E501
"""GameContentMedia - a model defined in Swagger""" # noqa: E501
self._epg = None
self._milestones = None
self.discriminator = None
if epg is not None:
self.epg = epg
if milestones is not None:
self.milestones = milestones
@property
def epg(self):
"""Gets the epg of this GameContentMedia. # noqa: E501
:return: The epg of this GameContentMedia. # noqa: E501
:rtype: list[AnyOfGameContentMediaEpgItems]
"""
return self._epg
@epg.setter
def epg(self, epg):
"""Sets the epg of this GameContentMedia.
:param epg: The epg of this GameContentMedia. # noqa: E501
:type: list[AnyOfGameContentMediaEpgItems]
"""
self._epg = epg
@property
def milestones(self):
"""Gets the milestones of this GameContentMedia. # noqa: E501
:return: The milestones of this GameContentMedia. # noqa: E501
:rtype: GameContentMediaMilestones
"""
return self._milestones
@milestones.setter
def milestones(self, milestones):
"""Sets the milestones of this GameContentMedia.
:param milestones: The milestones of this GameContentMedia. # noqa: E501
:type: GameContentMediaMilestones
"""
self._milestones = milestones
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(GameContentMedia, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GameContentMedia):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
]
| |
d7b4e049f95736c4a3a270a0a6e326a8bc7e03d5 | 887b9fd5f4fd4b9448f32750788b138b2e94be3e | /stock/futu/import_requests _income.py | 6411f1232d8b5870521859d6a0da9b07f5f729fa | []
| no_license | hong0396/hotwind_git | 8fa11b3bc46aadd0b83b297cb6c6919102b7b920 | 544d984d8a8cdc42b422792a5064d19d24e0c831 | refs/heads/master | 2020-04-04T01:11:50.010424 | 2018-11-03T07:24:59 | 2018-11-03T07:24:59 | 136,184,346 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 863 | py | import requests
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Connection': 'keep-alive',
'Cookie': 'UM_distinctid=165fa9285fb762-07c06f613d5cac-8383268-e1000-165fa9285fc20a; cipher_device_id=1537507232150902; tgw_l7_route=8d34ab350eb9a9772a5a0c377f34d47d',
'Host': 'finance.futunn.com',
'Origin': 'https://www.futunn.com',
'Referer': 'https://www.futunn.com/quote/stock-info?m=us&code=CYTXW&type=finance_analyse',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'
}
url='https://finance.futunn.com/api/finance/balance-sheet?code=CYTXW&label=us&quarter=0&page=0'
r = requests.get(url,headers=headers).json()
print(r.get("data").get("list"))
print(r.get("data").get("pages"))
| [
"[email protected]"
]
| |
7db05f705d72bdf87180f6a7bff371d915d8b61e | 299e5934971f9de638692e2667d6e270bcab5cbd | /214.最短回文串.py | fd576408b90eb365d8d4759abcade422cdf7f582 | []
| no_license | ycj123/Leetcode-Python3 | 14bcd6c9f4d26191d5d40c77e923df4d0be4c0e5 | 1593960cdf2655ef1dcf68e3517e7121670c6ac3 | refs/heads/master | 2022-12-16T23:12:19.326702 | 2020-09-18T00:17:45 | 2020-09-18T00:17:45 | 295,302,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | #
# @lc app=leetcode.cn id=214 lang=python3
#
# [214] 最短回文串
#
# https://leetcode-cn.com/problems/shortest-palindrome/description/
#
# algorithms
# Hard (36.30%)
# Likes: 262
# Dislikes: 0
# Total Accepted: 23.3K
# Total Submissions: 64.2K
# Testcase Example: '"aacecaaa"'
#
# 给定一个字符串 s,你可以通过在字符串前面添加字符将其转换为回文串。找到并返回可以用这种方式转换的最短回文串。
#
# 示例 1:
#
# 输入: "aacecaaa"
# 输出: "aaacecaaa"
#
#
# 示例 2:
#
# 输入: "abcd"
# 输出: "dcbabcd"
#
#
# @lc code=start
class Solution:
def shortestPalindrome(self, s: str) -> str:
r = s[::-1]
for i in range(len(s) + 1):
if s.startswith(r[i:]):
return r[:i] + s
# @lc code=end
| [
"[email protected]"
]
| |
ec7acf98f9484508ac4aef0ff75457eae8bd99f0 | f05acf7451fe58b64ec11744e8afddf142014efa | /crowdsorter/views/_session.py | dd75bd5c8d2a06dddfad4e0d7a5a23f7570d19a0 | [
"MIT"
]
| permissive | iCodeIN/crowdsorter | 899ac58b1df43ca134d3f966dcf2ec1c4a49e0df | 1c847f1f0284fc810ec1f2dd501acb4dbfa16bbb | refs/heads/master | 2023-03-26T16:09:04.914897 | 2020-11-11T18:25:55 | 2020-11-11T18:25:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,337 | py | from flask import session
VERSION = 2 # increment when session logic changes to clear sessions
VOTED_NAMES = f"voted-names:{VERSION}:"
SKIPPED_NAMES = f"skipped-names:{VERSION}:"
VIEWED_PAIRS = f"viewed-pairs:{VERSION}:"
def get_voted_names(code):
return _get(VOTED_NAMES, code)
def set_voted_names(code, names):
_set(VOTED_NAMES, code, names)
def add_voted_name(code, name):
names = get_voted_names(code)
if name not in names:
names.append(name)
set_voted_names(code, names)
def get_skipped_names(code):
return _get(SKIPPED_NAMES, code)
def set_skipped_names(code, names):
_set(SKIPPED_NAMES, code, names)
def add_skipped_name(code, name):
names = get_skipped_names(code)
if name not in names:
names.append(name)
set_skipped_names(code, names)
def get_viewed_pairs(code):
return _get(VIEWED_PAIRS, code)
def set_viewed_pairs(code, pairs):
_set(VIEWED_PAIRS, code, pairs)
def add_viewed_pair(code, pair):
pairs = get_viewed_pairs(code)
if pair not in pairs:
pairs.append(pair)
set_viewed_pairs(code, pairs)
def _get(prefix, code):
key = prefix + code
value = session.get(key) or []
return value
def _set(prefix, code, value):
key = prefix + code
session[key] = value
session.permanent = True
| [
"[email protected]"
]
| |
69aa022e185b5ec3bb7d2f6da610e01aedc92957 | fba1ae1672a770846eb219e4e092ba4c162aec40 | /2.py | 01fbf8c10fb3569d0961749560f345f35e124f7d | []
| no_license | peterbe/optimize-titles.json | 9272ad39d7565c448dce2b22a3d844ef0e7524d6 | ff7f8a01a5a742906ebb350c55cc963ca3b85e73 | refs/heads/master | 2022-11-23T05:33:05.004619 | 2020-07-15T18:18:48 | 2020-07-15T18:18:48 | 279,936,544 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | import json
import csv
with open('0.json') as f:
data = json.load(f)
import csv
with open('2.csv', 'w') as f:
writer = csv.writer(f, delimiter="|")
for each, value in data['titles'].items():
writer.writerow([each, value['title'], value['popularity']])
| [
"[email protected]"
]
| |
986e4045b106ad579041853e9891735e06800efd | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/rottenOranges_20200810191228.py | 2e8869821ab7c96fc3b8d53eff3ef2e939c3ffb4 | []
| no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 492 | py | def markrotten(i,j,row,column)
def oranges(grid):
# loop through the grid
# if there is no fresh orange just return 0
# if there is a two check all its four neighbours
# recursive call
# count when a one becomes a two
row = len(grid)
column = len(grid[0])
for i in range(len(grid)):
for j in range(len(i)):
if grid[i][j] == 2:
markrotten(i,j,row,column,grid)
oranges( [[2,1,1],[0,1,1],[1,0,1]])
| [
"[email protected]"
]
| |
d20e606c613d78c5971e9e9c8e93448c465bcbe1 | 68aa9bf99d62a5b991dc5aaa3d794f4bcd6e355a | /Programiranje/gui/Capital_Cities.py | 24cfdaf80e27e2162949498aef012db6a6261742 | []
| no_license | dujodujo/lemur | 82c9e695459597ab1b3430e566bc375af84d563c | 1e6350b33f86f89f89c5bddbd3924364f027160e | refs/heads/master | 2021-01-01T16:49:35.386172 | 2013-11-06T09:59:12 | 2013-11-06T09:59:12 | 14,150,163 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,022 | py | import sys, os, random
from PyQt4.QtCore import *
from PyQt4.QtGui import *
class Form(QDialog):
def __init__(self,parent = None):
super(Form, self).__init__(parent)
self.get_data()
self.answers = 0
self.count = 0
self.countryLabel = QLabel("Country:")
self.fromCountryLabel = QLabel()
self.fromCountryLabel.setText("Slovenija")
self.capitalLabel = QLabel("Capital:")
self.fromLineEdit = QLineEdit()
self.countLabel = QLabel()
self.resultLabel = QLabel()
grid = QGridLayout()
grid.addWidget(self.countryLabel,0,0)
grid.addWidget(self.fromCountryLabel,0,1)
grid.addWidget(self.capitalLabel,1,0)
grid.addWidget(self.fromLineEdit,1,1)
grid.addWidget(self.countLabel,2,0)
grid.addWidget(self.resultLabel,2,1)
self.setLayout(grid)
self.connect(self.fromLineEdit, SIGNAL("returnPressed()"), self.update_ui)
def select(self):
self.fromCountryLabel.setText(random.choice([x for x in self.capitals.keys()]))
def update_ui(self):
capitals = self.capitals
country = self.fromCountryLabel.text()
name = self.fromLineEdit.text()
if name == capitals[country]:
self.resultLabel.setText("Pravilno")
self.count +=1
else:
self.resultLabel.setText("Nepravilno, pravilni odgovor je " + capitals[country] )
self.answers +=1
self.countLabel.setText("{}/{}".format(self.count,self.answers))
self.fromLineEdit.clear()
self.select()
def get_data(self):
self.capitals = {}
if os.path.exists(os.getcwd() + "\\imena.txt"):
for line in open("imena.txt", "rt"):
line = line.strip()
data = line.split(", ")
country = data[0]
capital = data[1]
self.capitals[country] = capital
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
| [
"[email protected]"
]
| |
3e7c227a882f2cd39cdaf02c0f17a021690effc5 | 40a04920dea94179878e25a0804ce4a6b459aca9 | /Python/Django/Portfolio/apps/first_app/urls.py | 8e117530ce911208aad1a83f1f376ca1c35e005b | []
| no_license | Kryptonian92/pythonAssignments | 5c7dd9140d07c94b19816ebbcaba579338479136 | 06355e0481307a77e5acd53b86b1fc144e98302a | refs/heads/master | 2021-01-20T15:50:46.355224 | 2017-10-28T19:02:52 | 2017-10-28T19:02:52 | 90,771,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py | from django.conf.urls import url
from . import views # This line is new!
urlpatterns = [
url(r'^$', views.index), # This line has changed!
url(r'^testimonials$', views.show)
]
| [
"[email protected]"
]
| |
6c7376c8231168cb83ab28cd66f7376c7363aa20 | 0b842bcb3bf20e1ce628d39bf7e11abd7699baf9 | /oscar/a/sys/platform/manager/blinky/__init__.py | 856452a17cd28db74867fb490c14dee2212dcaf8 | []
| no_license | afeset/miner2-tools | 75cc8cdee06222e0d81e39a34f621399e1ceadee | 81bcc74fe7c0ca036ec483f634d7be0bab19a6d0 | refs/heads/master | 2016-09-05T12:50:58.228698 | 2013-08-27T21:09:56 | 2013-08-27T21:09:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 574 | py | #
# Copyright Qwilt, 2012
#
# The code contained in this file may not be used by any other entities without explicit written permission from Qwilt.
#
# Author: shmulika
#
G_GROUP_NAME_PLATFORM_POWER_BLINKY_ADAPTOR = "power-blinky-adaptor"
G_GROUP_NAME_PLATFORM_FANS_BLINKY_ADAPTOR = "fans-blinky-adaptor"
G_GROUP_NAME_PLATFORM_TEMPERATURE_BLINKY_ADAPTOR = "temperature-blinky-adaptor"
G_GROUP_NAME_PLATFORM_MANAGER_BLINKY_ADAPTOR = "platform-manager-blinky-adaptor"
G_GROUP_NAME_PLATFORM_SOURCE_BLINKY_ADAPTOR = "source-blinky-adaptor"
| [
"[email protected]"
]
| |
b2c1be6d03658e2b794333b2d98e59fda98d2e05 | 7d97daf9b9f46d68bbe29441d8db554918dfcdc4 | /leetcode/StringtoInteger8.py | 5a117868d64f0d0ad26bb4ae61baff99e7332feb | []
| no_license | hundyoung/LeetCode | 9a56c4f078dcb4e875a6178c14665b7784c1a0a2 | 803e164d3a21b593cb89206b3a362c1ab1eb9abf | refs/heads/master | 2020-09-23T02:51:13.657444 | 2020-05-06T12:53:07 | 2020-05-06T12:53:07 | 225,383,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | class Solution:
def myAtoi(self, str: str) -> int:
str1 = str.strip()
result = ""
for i in range(len(str1)):
char = str1[i]
if(i==0and (char=="+" or char=="-")):
result= result+char
elif char.isdigit():
result = result + char
else:
break
# print(str1)
try:
result = int(result)
result = min(2**31-1,result)
result = max((-2)**31,result)
return result
except:
return 0
if __name__ == '__main__':
solution = Solution()
print(solution.myAtoi("-5-")) | [
"[email protected]"
]
| |
6b1515908b2fe16543fdcf82ee9325387b7d572b | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_plagued.py | 33674872c908833ea1ef79864be519cf6ce0d184 | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py |
from xai.brain.wordbase.verbs._plague import _PLAGUE
#calss header
class _PLAGUED(_PLAGUE, ):
def __init__(self,):
_PLAGUE.__init__(self)
self.name = "PLAGUED"
self.specie = 'verbs'
self.basic = "plague"
self.jsondata = {}
| [
"[email protected]"
]
| |
2bd765f9129f0648c344eac691a54cae5729812b | cc0d06e2aad3d30152c4a3f3356befdc58748313 | /13nov_til_19nov/17_novin1900.py | 0e049e544ca995c89370175743c5e1de70beedec | []
| no_license | lasse-steinnes/IN1900 | db0bb4da33fa024d4fe9207337c0f1d956197c50 | c8d97c2903078471f8e419f88cc8488d9b8fc7da | refs/heads/master | 2020-12-14T15:34:36.429764 | 2020-01-18T19:59:46 | 2020-01-18T19:59:46 | 234,789,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | #### Forelesning in1900 ####
### Siste forelesning. Foilbasert.
## siste del av ODE-løsere og modellering av smittsomme sykdommer
## System av ODE´s
### Skal lage klassehierarki for ODE løsere.
###
| [
"[email protected]"
]
| |
9a68a892ee7454b8952addae4614751aba7824f7 | 0789e92ff05448f511352982dbc9fcc8b481e806 | /kikar_hamedina/reporting/management/commands/export_commentator_data_to_csv.py | 728384914535440e35a1486adb779002cbeca29e | []
| no_license | danielhers/kikar-hamedina | 9645dfc554c004092cb44bb5189b63e9940b3801 | a838a2fc675ea7100c620477bae438f215c741f7 | refs/heads/dev | 2020-06-14T14:08:05.069290 | 2017-05-04T17:22:03 | 2017-05-04T17:22:03 | 75,173,287 | 0 | 0 | null | 2016-11-30T09:47:01 | 2016-11-30T09:47:01 | null | UTF-8 | Python | false | false | 6,338 | py | #!encoding utf-8
from csv import DictWriter
from django.utils import timezone
from facebook_feeds.management.commands.kikar_base_commands import KikarBaseCommand
from facebook_feeds.models import Facebook_Feed, Facebook_Status
DELIMITER = '~'
class Command(KikarBaseCommand):
def add_arguments(self, parser):
parser.add_argument('--year',
action='store',
dest='year',
default=None,
help="choose year to filter on"
)
parser.add_argument('--feed',
action='store',
dest='feed',
default=None,
help="choose year to filter on"
)
parser.add_argument('--total',
action='store_true',
dest='total',
default=False,
help="Get statistics for total of activity, not separated by feed"
)
def build_commentator_data(self, statuses, year=None):
years = ['2014', '2015'] if not year else [year]
counter = dict()
counter['unique'] = {'likes_2014': set(), 'likes_2015': set(), 'comments_2014': set(),
'comments_2015': set()}
counter['full'] = {'likes_2014': long(), 'likes_2015': long(), 'comments_2014': long(),
'comments_2015': long()}
for year in years:
for status in statuses.filter(published__year=year).order_by('published'):
if not status.is_comment:
counter['unique']['likes_%s' % year] = counter['unique'][
'likes_%s' % year].union(
set(status.likes.values_list('user', flat=True)))
counter['unique']['comments_%s' % year] = counter['unique'][
'comments_%s' % year].union(
set(status.comments.values_list('comment_from_id', flat=True)))
counter['full']['likes_%s' % year] += status.likes.count()
counter['full']['comments_%s' % year] += status.comments.count()
print('\t%s' % status.published)
return counter
def handle(self, *args, **options):
print('Start.')
feed = options['feed']
feeds = Facebook_Feed.objects.filter(id=feed) if feed else Facebook_Feed.objects.all()
counter = dict()
if options['total']:
statuses = Facebook_Status.objects.all()
counter['total'] = self.build_commentator_data(statuses, year=options['year'])
else:
for feed in feeds.order_by('id'):
print(feed.id)
statuses = feed.facebook_status_set.filter(is_comment=False)
counter[feed.id] = self.build_commentator_data(statuses, year=options['year'])
file_name = 'commentator_data_{}.csv'.format(timezone.now().strftime('%Y_%m_%d_%H_%M_%S'))
with open(file_name, 'wb') as f:
field_names = [
'feed_id',
'link',
'mk_id',
'mk_name',
'mk_party',
'likes_2014_unique',
'likes_2015_unique',
'likes_2014_full',
'likes_2015_full',
'comments_2014_unique',
'comments_2015_unique',
'comments_2014_full',
'comments_2015_full'
]
csv_data = DictWriter(f, fieldnames=field_names, delimiter=DELIMITER)
headers = {field_name: field_name for field_name in field_names}
csv_data.writerow(headers)
if options['total']:
row = {'mk_id': 'total',
'mk_name': 'total',
'mk_party': None,
'feed_id': 'total',
'link': None,
'likes_2014_unique': len(counter['total']['unique']['likes_2014']),
'likes_2015_unique': len(counter['total']['unique']['likes_2015']),
'likes_2014_full': counter['total']['full']['likes_2014'],
'likes_2015_full': counter['total']['full']['likes_2015'],
'comments_2014_unique': len(counter['total']['unique']['comments_2014']),
'comments_2015_unique': len(counter['total']['unique']['comments_2015']),
'comments_2014_full': counter['total']['full']['comments_2014'],
'comments_2015_full': counter['total']['full']['comments_2015']
}
csv_data.writerow(row)
else:
for feed in feeds:
row = {'mk_id': feed.persona.object_id,
'mk_name': unicode(feed.persona.content_object.name).encode(
'utf-8') if feed.persona.content_object else feed.username,
'mk_party': unicode(feed.persona.content_object.current_party.name).encode(
'utf-8') if feed.persona.content_object else None,
'feed_id': feed.id,
'link': 'http://www.facebook.com/{}'.format(feed.vendor_id),
'likes_2014_unique': len(counter[feed.id]['unique']['likes_2014']),
'likes_2015_unique': len(counter[feed.id]['unique']['likes_2015']),
'likes_2014_full': counter[feed.id]['full']['likes_2014'],
'likes_2015_full': counter[feed.id]['full']['likes_2015'],
'comments_2014_unique': len(counter[feed.id]['unique']['comments_2014']),
'comments_2015_unique': len(counter[feed.id]['unique']['comments_2015']),
'comments_2014_full': counter[feed.id]['full']['comments_2014'],
'comments_2015_full': counter[feed.id]['full']['comments_2015']
}
csv_data.writerow(row)
print('Done.')
| [
"[email protected]"
]
| |
98f50633d7a2f376fa62cba3433f8d1dd51588f3 | ebe5167148cfff43d24b6c66e44634bb55513b72 | /solutions/linkedlist/160.Intersection.of.Two.Linked.Lists.py | 2cf42f3f9cf91b328616216b08a513d48d5ff246 | []
| no_license | ljia2/leetcode.py | c90ac38a25331d61d3ff77fd135b82372da3a09f | 08c6d27498e35f636045fed05a6f94b760ab69ca | refs/heads/master | 2020-03-25T03:37:13.318582 | 2019-07-18T23:14:41 | 2019-07-18T23:14:41 | 143,351,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,122 | py | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def getIntersectionNode(self, headA, headB):
"""
Write a program to find the node at which the intersection of two singly linked lists begins.
For example, the following two linked lists:
begin to intersect at node c1.
Example 1:
Input: intersectVal = 8, listA = [4,1,8,4,5], listB = [5,0,1,8,4,5], skipA = 2, skipB = 3
Output: Reference of the node with value = 8
Input Explanation: The intersected node's value is 8 (note that this must not be 0 if the two lists intersect). From the head of A, it reads as [4,1,8,4,5]. From the head of B, it reads as [5,0,1,8,4,5]. There are 2 nodes before the intersected node in A; There are 3 nodes before the intersected node in B.
Example 2:
Input: intersectVal = 2, listA = [0,9,1,2,4], listB = [3,2,4], skipA = 3, skipB = 1
Output: Reference of the node with value = 2
Input Explanation: The intersected node's value is 2 (note that this must not be 0 if the two lists intersect). From the head of A, it reads as [0,9,1,2,4]. From the head of B, it reads as [3,2,4]. There are 3 nodes before the intersected node in A; There are 1 node before the intersected node in B.
Example 3:
Input: intersectVal = 0, listA = [2,6,4], listB = [1,5], skipA = 3, skipB = 2
Output: null
Input Explanation: From the head of A, it reads as [2,6,4]. From the head of B, it reads as [1,5]. Since the two lists do not intersect, intersectVal must be 0, while skipA and skipB can be arbitrary values.
Explanation: The two lists do not intersect, so return null.
Notes:
If the two linked lists have no intersection at all, return null.
The linked lists must retain their original structure after the function returns.
You may assume there are no cycles anywhere in the entire linked structure.
Your code should preferably run in O(n) time and use only O(1) memory.
:type head1, head1: ListNode
:rtype: ListNode
"""
if not headA or not headB:
return None
lenA = self.get_length(headA)
lenB = self.get_length(headB)
# adjust to ensure A is longer than B;
# swap both headA/B and lenA/B
if lenA < lenB:
headA, headB = headB, headA
lenA, lenB = lenB, lenA
stepA = 0
runnerA = headA
while stepA + lenB < lenA and runnerA:
stepA += 1
runnerA = runnerA.next
runnerB = headB
while runnerA and runnerB and runnerA != runnerB:
runnerA = runnerA.next
runnerB = runnerB.next
if runnerA != runnerB:
return None
else:
return runnerA
def get_length(self, head):
length = 0
runner = head
while runner:
length += 1
runner = runner.next
return length | [
"[email protected]"
]
| |
c797fec39e87cec2724d05c13ea1be0f98111384 | 7f66c66eb82b480e8a23ecbfb8613aae02cb50f7 | /tests/integration/parity/utils.py | 572d4c4af3500566de67c6e37afa8c80f6465074 | [
"MIT"
]
| permissive | y19818/web3.py | 03ddedcfdbd4dde2c1a458b31f5e796509b3c7c6 | 32a85a287ab63220d1e0c06d77be74de595ff02f | refs/heads/master | 2021-06-25T00:30:50.312173 | 2019-12-02T08:21:40 | 2019-12-02T08:21:40 | 225,276,093 | 0 | 0 | MIT | 2019-12-02T03:20:47 | 2019-12-02T03:20:47 | null | UTF-8 | Python | false | false | 1,939 | py | import signal
import socket
import subprocess
import time
from vns_utils import (
to_text,
)
import requests
def wait_for_socket(ipc_path, timeout=60):
start = time.time()
while time.time() < start + timeout:
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(ipc_path)
sock.settimeout(timeout)
except (FileNotFoundError, socket.error):
time.sleep(0.01)
else:
break
def wait_for_http(endpoint_uri, timeout=60):
start = time.time()
while time.time() < start + timeout:
try:
requests.get(endpoint_uri)
except requests.ConnectionError:
time.sleep(0.01)
else:
break
def get_process(command_list, terminates=False):
proc = subprocess.Popen(
command_list,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1,
)
if terminates:
wait_for_popen(proc, 30)
try:
yield proc
finally:
kill_proc_gracefully(proc)
output, errors = proc.communicate()
print(
"Parity Process Exited:\n"
"stdout:{0}\n\n"
"stderr:{1}\n\n".format(
to_text(output),
to_text(errors),
)
)
def wait_for_popen(proc, timeout):
start = time.time()
while time.time() < start + timeout:
if proc.poll() is None:
time.sleep(0.01)
else:
break
def kill_proc_gracefully(proc):
if proc.poll() is None:
proc.send_signal(signal.SIGINT)
wait_for_popen(proc, 13)
if proc.poll() is None:
proc.terminate()
wait_for_popen(proc, 5)
if proc.poll() is None:
proc.kill()
wait_for_popen(proc, 2)
| [
"[email protected]"
]
| |
8c4e25032a017464274c3783f28d6988a1017590 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_135/3673.py | 56a2a64feb4db88351e2187df1ddbb45f569ef30 | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 583 | py |
import sys
def t_process():
n1 = int(sys.stdin.readline())
n1 -= 1
n1_matrix = [set(map(int, sys.stdin.readline().split())) for _ in range(4)]
n2 = int(sys.stdin.readline())
n2 -= 1
n2_matrix = [set(map(int, sys.stdin.readline().split())) for _ in range(4)]
sol = list(n1_matrix[n1].intersection(n2_matrix[n2]))
if len(sol) > 1:
return "Bad magician!"
if len(sol) == 0:
return "Volunteer cheated!"
if len(sol) == 1:
return int(sol[0])
def main():
t = int(sys.stdin.readline())
for k in range(1, t + 1):
print("Case #{0}: {1}".format(k, t_process()))
main()
| [
"[email protected]"
]
| |
1152f9facac5c0cb34d89abe0989f056a54199fe | 0ab3ab2cda94a700f015ff172ef37abc3402ed75 | /drawfromfile.py | 3150dd3f92c114e2f97a979d71243be2403f76c8 | []
| no_license | mikerr/laserPOV | 719c85493f8a4dc05e92267695e9e0804aac0b64 | 215ee38db2c3a2ff6e92e1c4f5aa18615ec76839 | refs/heads/master | 2016-09-06T07:49:40.767385 | 2015-04-11T20:49:39 | 2015-04-11T20:49:39 | 33,660,512 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 879 | py | #!/usr/bin/python
import subprocess,time
file = open('drawing','r')
x,y = [], []
for line in file:
row = line.split()
x.append(row[0])
y.append(row[1])
SPEED = 0.09
REPS = 10
XOFFSET = 160
YOFFSET = 110
for loop in range (REPS):
for i in range (len(x)):
xpos = int(x[i]) + XOFFSET
ypos = int(y[i]) + YOFFSET
command = "echo 7=" + str(xpos) + ">/dev/servoblaster"
subprocess.call (command, shell=True)
command = "echo 0=" + str(ypos) + ">/dev/servoblaster"
subprocess.call (command, shell=True)
time.sleep(SPEED)
for i in reversed (range (len(x))):
xpos = int(x[i]) + XOFFSET
ypos = int(y[i]) + YOFFSET
command = "echo 7=" + str(xpos) + ">/dev/servoblaster"
subprocess.call (command, shell=True)
command = "echo 0=" + str(ypos) + ">/dev/servoblaster"
subprocess.call (command, shell=True)
time.sleep(SPEED)
| [
"pi@raspberrypi.(none)"
]
| pi@raspberrypi.(none) |
23afbdc21f3c52e6711d6a97008f609df14f55bf | a2ad46d4995b2dbe182e645a15b7d5a7047d3b56 | /2018.12.05.provetta/all-CMS-submissions-2018-12-05/2018-12-05.12:18:30.099314.VR437605.conta_multipli.py | e4d78f797d2d4c80a6e99aaa5ded613795628fb5 | []
| no_license | romeorizzi/temi_prog_public | ccf634a0291dd943b503f8dc57ed03de4d9b1a68 | e9e3e98d4a9a3cddec45d514180b83fd5004fe7b | refs/heads/master | 2020-04-09T04:27:35.687265 | 2019-03-25T14:04:04 | 2019-03-25T14:04:04 | 160,024,031 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 725 | py | """
* user: VR437605
* fname: ANNALISA
* lname: DETTORI
* task: conta_multipli
* score: 100.0
* date: 2018-12-05 12:18:30.099314
"""
#!/usr/bin/env python3
# Template per soluzione conta_multipli
from __future__ import print_function
import sys
if sys.version_info < (3, 0):
input = raw_input # in python2, l'equivalente di input è raw_input
# Devi modificare l'implementazione di questa funzione per fare
# quanto richiesto dal testo dell'esercizio
def conta_multipli(a, b, c):
p=0
for n in range (1,c+1):
if n%a==0 and n%b!=0 :
p+=1
return p
# Lettura input: non devi modificare il codice sotto questa riga
a, b, c = map(int, input().split())
print(conta_multipli(a, b, c))
| [
"[email protected]"
]
| |
497b09aec342a86f55cb820435ec603f2aab872a | 07fbdae51275b4bab2074524fc4c1ae58ac53d08 | /List's/Lists Basics/Exercise/Solutions/10. Bread Factory.py | ef0ba277e0e12230313d1b24fb7eeaa1489595d6 | []
| no_license | rimisarK-blue/Python-Fundamental-knowledge | 85c2afa4401f848c9919f672c7fa3d54a43e761f | a182fb1c7c3ce11f9e26ce0afefe5c2069d70e8d | refs/heads/main | 2023-03-09T02:08:34.411768 | 2021-02-15T20:19:52 | 2021-02-15T20:19:52 | 326,009,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,155 | py |
events = input().split('|')
energy = 100
coins = 100
good_day = True
for com in events:
command, value = com.split('-')
value = int(value)
if command == 'rest':
if energy == 100:
print("You gained 0 energy.")
print(f"Current energy: 100.")
elif energy + value > 100:
print(f"You gained {value} energy.")
print(f"Current energy: 100.")
else:
energy += value
print(f"You gained {value} energy.")
print(f"Current energy: {energy}.")
elif command == 'order':
if energy >= 30:
energy -= 30
coins += value
print(f"You earned {value} coins.")
else:
energy += 50
print("You had to rest!")
else:
if coins - value > 0:
coins -= value
print(f"You bought {command}.")
else:
good_day = False
print(f"Closed! Cannot afford {command}.")
break
if good_day and coins > 0 and energy > 0:
print("Day completed!")
print(f"Coins: {coins}")
print(f"Energy: {energy}")
| [
"[email protected]"
]
| |
0e7737cccb51a80b11f78c5e07adef62d4d64487 | e61fa7205d3d01787ca0b3d512efa67a94c1105b | /Raspberry/config.py | ea6164f0af539e884b8d16c78501a11231c89ffb | []
| no_license | cyrilvincent/IoT | 4d95c6def9d4c822edee71d547e8b151846f26ee | d73a4d78501975beeeddc9fd90704f5982f66a66 | refs/heads/master | 2021-12-28T08:42:50.088460 | 2021-09-09T10:12:33 | 2021-09-09T10:12:33 | 215,299,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46 | py | mac="00:0E:EA:CF:47:5A"
usb="COM4"
usb2="COM5" | [
"[email protected]"
]
| |
68cf3e5e2413d7edeffddb03c066dfb7a3e78310 | 4e187a73d451f8c500955098e8f7d466b90d05de | /Flasky0.1.py | 299910de2cdaa6ebb388c7732ee6b2261932d8dc | []
| no_license | LinZiYU1996/Flask_Login | 21c3592b6116ca49a17bab98eb4171ea4721b551 | 420d540cf18f4627054ecf589872611e6e6ff8b6 | refs/heads/master | 2021-01-02T08:48:38.669567 | 2017-08-02T03:14:37 | 2017-08-02T03:14:37 | 99,066,660 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | from flask import Flask,render_template,flash,url_for,redirect
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_wtf import FlaskForm
from flask_login import LoginManager,login_user,UserMixin,logout_user,login_required
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SECRET_KEY']='kkk'
bootstrap = Bootstrap(app)
moment=Moment(app)
login_manger=LoginManager()
login_manger.session_protection='strong'
login_manger.login_view='login'
login_manger.init_app(app)
if __name__ == '__main__':
app.run()
| [
"[email protected]"
]
| |
757bb5db334a4b6518bf2b293c9f9cc451d67ebf | 5891051796778cfb44a255248ce38789bfef9e70 | /P_base/python_pdf/kp.py | 1849b1e321f912c79b6c02533938157eb9a214ea | []
| no_license | Faithlmy/Python_base | cc546a5d86b123e102a69df1227cde9b6e567493 | 5a43557e6375dc9dbe5f6701d7c10e549873a5ab | refs/heads/master | 2021-01-01T17:07:04.097978 | 2018-03-31T16:44:01 | 2018-03-31T16:44:01 | 98,000,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,258 | py | #!/usr/bin/env python3
# encoding: utf-8
import sys
import importlib
importlib.reload(sys)
from pdfminer.pdfparser import PDFParser,PDFDocument
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import LTTextBoxHorizontal,LAParams
from pdfminer.pdfinterp import PDFTextExtractionNotAllowed
'''
解析pdf 文本,保存到txt文件中
'''
path = r'/home/faith/Desktop/phtoword.pdf'
def parse():
fp = open(path, 'rb') # 以二进制读模式打开
#用文件对象来创建一个pdf文档分析器
praser = PDFParser(fp)
# 创建一个PDF文档
doc = PDFDocument()
# 连接分析器 与文档对象
praser.set_document(doc)
doc.set_parser(praser)
# 提供初始化密码
# 如果没有密码 就创建一个空的字符串
doc.initialize()
# 检测文档是否提供txt转换,不提供就忽略
if not doc.is_extractable:
raise PDFTextExtractionNotAllowed
else:
# 创建PDf 资源管理器 来管理共享资源
rsrcmgr = PDFResourceManager()
# 创建一个PDF设备对象
laparams = LAParams()
device = PDFPageAggregator(rsrcmgr, laparams=laparams)
# 创建一个PDF解释器对象
interpreter = PDFPageInterpreter(rsrcmgr, device)
# 循环遍历列表,每次处理一个page的内容
for page in doc.get_pages(): # doc.get_pages() 获取page列表
interpreter.process_page(page)
# 接受该页面的LTPage对象
layout = device.get_result()
# print(layout)
# 这里layout是一个LTPage对象 里面存放着 这个page解析出的各种对象 一般包括LTTextBox, LTFigure, LTImage, LTTextBoxHorizontal 等等 想要获取文本就获得对象的text属性,
for x in layout:
print(x.get_text())
# if (isinstance(x, LTTextBoxHorizontal)):
# with open(r'/root/pdf/t_pdf/turn_pdf2.txt', 'a') as f:
# results = x.get_text().encode('utf-8')
# print(results)
# f.write(results + '\n')
if __name__ == '__main__':
parse() | [
"[email protected]"
]
| |
1bab715b0c564a7a2941200a68f23a04ab4bfd58 | be2c022b270522fe24475b794d53a3fd973a5de1 | /영동/05_11049_행렬 곱셈 순서.py | 9a26a4594789aceefcc502611d23e25d9aedf66e | []
| no_license | zeroistfilm/week04 | ea4a358be0931fe28202b7ce543ed246536a1c50 | fdb5985e2d899c8b1a60cb81d660937304fa5bcb | refs/heads/main | 2023-02-09T09:35:27.795180 | 2021-01-07T02:29:28 | 2021-01-07T02:29:28 | 325,717,500 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 562 | py | # https://www.acmicpc.net/problem/11049
import sys
#sys.stdin = open("input.txt", "r")
N = int(sys.stdin.readline())
M = [0 for i in range(N+1)]
for i in range(N):
a,b = map(int, sys.stdin.readline().split())
M[i]=a
M[i+1] = b
Matrix = [[0 for i in range(N)] for i in range(N)]
for i in range(1,N):
r = 0
c = i
for _ in range(N,i,-1):
tmp=[]
for k in range(r,c):
tmp.append(Matrix[r][k]+Matrix[k+1][c]+(M[r]*M[k+1]*M[c+1]))
Matrix[r][c]=min(tmp)
r += 1
c += 1
print(Matrix[0][-1])
| [
"[email protected]"
]
| |
f96729b38a64cf05d84ab0e508ac4cb889ce989b | ce348e6f43e0eeb83a171f73dc924b95c121fe7f | /backend/sharedstory_24977/wsgi.py | 58a5c6babe8473b4c0580a501312c526f0f7ed5c | []
| no_license | crowdbotics-apps/sharedstory-24977 | ec19e35d67d0119dac3d30521e2aef050f60fa8c | 2dbd37503ab449981acbfb86f98d54580b4d6a92 | refs/heads/master | 2023-03-23T15:31:00.793794 | 2021-03-11T16:01:22 | 2021-03-11T16:01:22 | 346,757,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | """
WSGI config for sharedstory_24977 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sharedstory_24977.settings')
application = get_wsgi_application()
| [
"[email protected]"
]
| |
1f4ddfa1c8bc8ae0575ee67ac34d8226efa92e7e | e1efc8e0b0e4629dea61504fbc816c0527691bd9 | /3.jvm/24-静态分派.py | 4057e69948dec7c7341531bc1d10fa9e78285067 | []
| no_license | xiongmengmeng/xmind-technology | 2bb67a0bf92cfd660cac01f8ab3a2454423ccba5 | e2fdb6987ef805a65f0a4feb52d84383853f4b77 | refs/heads/main | 2023-07-31T07:10:29.868120 | 2021-09-11T08:18:17 | 2021-09-11T08:18:17 | 307,636,242 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,314 | py | import os,sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,parentdir)
import xmind
xmind_name="jvm"
w = xmind.load(os.path.dirname(os.path.abspath(__file__))+"\\"+xmind_name+".xmind")
s2=w.createSheet()
s2.setTitle("静态分派")
r2=s2.getRootTopic()
r2.setTitle("静态分派")
content={
'Java具备面向对象的3个基本特征':[
'继承',
'封装(get/set)',
{'多态':[
'继承,重写(Override),向上转型(Human h=new Man())三大必要条件',
'方法重载:同一个方法名,参数或者类型不同。(Overload)',
'方法重写:父类与子类有同样的方法名和参数,这叫方法覆盖。(Override)'
]}
],
'任务':[
'不等同于【方法执行】,该阶段唯一任务是确定【被调用方法版本】,不涉及方法内部具体运行过程'
],
'五条字节码指令':[
{'invokestatic':[
'调用静态方法'
]},
{'invokespecial':[
'调用实例构造器<init>()方法、私有方法和父类中的方法'
]},
{'invokevirtual':[
'调用所有的虚方法'
]},
{'invokeinterface':[
'调用接口方法,在运行时确定一个实现该接口的对象'
]},
{'invokedynamic':[
'运行时动态解析出调用点限定符所引用的方法,然后再执行该方法'
]}
],
'解析':[
{'定义':[
'静态过程',
'编译期间确定',
'把【符号引用】转变为【直接引用】,确定唯一的【方法调用版本】',
'如能被invokestatic和invokespecial指令调用的方法'
]},
{'分类':[
{'静态方法':[
'与类型直接关联,不能通过【重写】出现别的版本,适合类加载阶段进行解析'
]},
{'私有方法':[
'外部不可被访问,不能通过【继承】出现别的版本,适合类加载阶段进行解析'
]},
'实例构造器',
'父类方法',
{'被final修饰的方法(invokevirtual指令调用)':[
'【无法被覆盖】,没有其他版本的可能'
]}
]},
],
'静态分派':[
{'定义':[
'依赖【静态类型】决定【方法执行版本】',
'发生在【编译阶段】,不由虚拟机来执行的',
{'典型表现':[
'方法重载'
]}
]},
{'重载':[
'通过【参数的静态类型】而不是实际类型作为判定依据的',
'静态类型是在【编译期可知】',
'实际类型在运行期才可确认'
]},
{'重载时目标方法选择(字面量没有显示的静态类型时)':[
'1.char>int>long>float>double的顺序转型进行匹配',
'2.一次自动装箱,封装类型java.lang.Character',
'3.java.lang.Serializable,是java.lang.Character类实现的一个接口,自动装箱之后还是找不到装箱类,会找装箱类所实现的接口类型',
'4.Object,如果有多个父类,那将在继承关系中从下往上开始搜索',
'5.变长参数的重载优先级是最低的'
]}
],
}
#构建xmind
xmind.build(content,r2)
#保存xmind
xmind.save(w,os.path.dirname(os.path.abspath(__file__))+"\\"+xmind_name+".xmind") | [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.