blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c6634f95a8581d8241a4d53c143fe96dbad59ea9 | 8516f0f456b91c0da6c016b64d68ff2c2cdaf68d | /src/array/intersect.py | 1dff47db905e488c248ad540666fbb1ba5276a6f | [] | no_license | huowolf/leetcode | b5bb67206fab6417055b0534c7627bc281a29eef | f34909f09c22d2164bbe21fc7da0361fcbd63fd6 | refs/heads/master | 2020-03-26T20:20:13.386017 | 2018-11-20T13:04:03 | 2018-11-20T13:04:03 | 145,317,787 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,160 | py | #https://leetcode-cn.com/explore/interview/card/top-interview-questions-easy/1/array/26/
#两个数组的交集 II
#给定两个数组,编写一个函数来计算它们的交集
#===============================================================================
# 输入: nums1 = [1,2,2,1], nums2 = [2,2]
# 输出: [2,2]
#===============================================================================
class Solution:
def intersect(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
result=[]
#遍历其中一个数组,发现相同元素时添加到新列表中,同时删去另一个数组中的一个相同元素
for i in nums1:
for j in nums2:
#删除相同元素后,同时跳出该趟搜索
if i==j:
result.append(i)
nums2.remove(j)
break
return result
nums1 = [1,2,2,1]
nums2 = [2,2]
l=Solution().intersect(nums1, nums2)
print(l)
nums1 = [4,9,5]
nums2 = [9,4,9,8,4]
l=Solution().intersect(nums1, nums2)
print(l)
| [
"[email protected]"
] | |
0968ca412075e81decf4567daec9d3887be7f97a | 0a2356bde96ebc9b6a82bd91a833bbe04ffb3b82 | /myInstagram/migrations/0006_auto_20201019_1230.py | 304e84e3452ec7887377156161bd1620a31e667a | [
"MIT"
] | permissive | dancan-sandys/Instagram-clone | 3c1ec335f0806ab714e8946fba94e87b17329c78 | 08cfe2a40f6e701beb6b5fc97a090b61d5e242f3 | refs/heads/master | 2023-01-01T09:20:52.186988 | 2020-10-20T11:19:39 | 2020-10-20T11:19:39 | 304,602,156 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-10-19 09:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myInstagram', '0005_remove_photo_profile'),
]
operations = [
migrations.AlterField(
model_name='photo',
name='photo_url',
field=models.ImageField(upload_to='photos/'),
),
migrations.AlterField(
model_name='profile',
name='profile_photo',
field=models.ImageField(upload_to='profile/'),
),
]
| [
"[email protected]"
] | |
de6ce81cbeb176a956e3e8f4d930d20d3ee38341 | 1b9bd441c500e79042c48570035071dc20bfaf44 | /sources/Yalkut Shimoni/set_alt_structs_nach.py | d8f9d28ee8faf0f6b3ed0e326dc0f06ecfc49d49 | [] | no_license | Sefaria/Sefaria-Data | ad2d1d38442fd68943535ebf79e2603be1d15b2b | 25bf5a05bf52a344aae18075fba7d1d50eb0713a | refs/heads/master | 2023-09-05T00:08:17.502329 | 2023-08-29T08:53:40 | 2023-08-29T08:53:40 | 5,502,765 | 51 | 52 | null | 2023-08-29T11:42:31 | 2012-08-22T00:18:38 | null | UTF-8 | Python | false | false | 4,419 | py | # -*- coding: utf-8 -*-
import urllib
import urllib2
from urllib2 import URLError, HTTPError
import json
import pdb
import os
import sys
from bs4 import BeautifulSoup
import re
p = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, p)
os.environ['DJANGO_SETTINGS_MODULE'] = "sefaria.settings"
from local_settings import *
sys.path.insert(0, SEFARIA_PROJECT_PATH)
from sefaria.model import *
def post_index(index):
url = SEFARIA_SERVER + '/api/v2/raw/index/Yalkut_Shimoni_on_Nach'
indexJSON = json.dumps(index)
values = {
'json': indexJSON,
'apikey': API_KEY
}
data = urllib.urlencode(values)
req = urllib2.Request(url, data)
try:
response = urllib2.urlopen(req)
print response.read()
except HTTPError, e:
print 'Error code: ', e.code
def convertIntoRef(line):
arr = line.split(",")
perek = arr[0]
remez = arr[1]
para = arr[2]
return (perek, Ref("Yalkut Shimoni on Torah."+remez+"."+para))
perakim = {}
perakim = { "nodes" : [] }
parshiot = { "nodes": [] }
title_eng = ["Joshua", "Judges", "I Samuel", "II Samuel", "I Kings", "II Kings", "Isaiah", "Jeremiah", "Ezekiel", "Hosea",
"Joel", "Amos", "Obadiah", "Jonah", "Micah", "Nahum", "Habakkuk", "Zephaniah", "Haggai", "Zechariah", "Malachi",
"Psalms", "Proverbs", "Job", "Song of Songs", "Ruth", "Lamentations", "Eccelesiastes", "Esther", "Daniel", "Ezra",
"Nehemiah", "I Chronicles", "II Chronicles"]
title_heb = [u"יהושע", u"שופתים", u"שמואל א", u"שמואל ב", u"מלכים א",
u"מלכים ב", u"ישעיהו", u"ירמיהו", u"יחזקאל", u"הושע", u"יואל", u"עמוס",
u"עובדיה", u"יונה", u"מיכה", u"נחום", u"חבקוק", u"צפניה", u"חגי",
u"זכריה", u"מלאכי", u"תהילים", u"משלי", u"איוב", u"שיר השירים",
u"רות", u"איכה", u"קהלת", u"אסתר", u"דניאל", u"עזרא", u"נחמיה",
u"דברי הימים א", u"דברי הימים ב"]
def getHebrewParsha(parsha):
for count, eng in enumerate(title_eng):
if eng==parsha:
return title_heb[count]
for count, title in enumerate(title_eng):
f=open("parsha_"+title+".txt", 'r')
while True:
line = f.readline()
if line == '':
break
parsha_name, start_ref = convertIntoRef(line)
line = f.readline()
parsha_name, end_ref = convertIntoRef(line)
wholeRef = start_ref.to(end_ref).normal()
parsha = ArrayMapNode()
parsha.add_title(parsha_name, "en", primary=True)
parsha.add_title(getHebrewParsha(parsha_name), "he", primary=True)
parsha.key = parsha_name
parsha.depth = 0
parsha.addressTypes = []
parsha.sectionNames = []
parsha.wholeRef = wholeRef
parsha.refs = []
parshiot["nodes"].append(parsha.serialize())
for count, title in enumerate(title_eng):
if title=='Devarim':
continue
f=open("perek_"+title+".txt", 'r')
line = "nothing"
first_one = ""
last_one = ""
refs_dict = {}
current = 0
while line != '':
prev_line = line
line = f.readline()
if line == '':
break
start_perek, start_ref = convertIntoRef(line)
if prev_line == "nothing":
first_one = (start_perek, start_ref)
line = f.readline()
end_perek, end_ref = convertIntoRef(line)
last_one = (end_perek, end_ref)
if start_perek == end_perek:
refs_dict[start_perek] = start_ref.to(end_ref).normal()
refs = []
for i in range(int(last_one[0])):
if str(i+1) in refs_dict:
refs.append(refs_dict[str(i+1)])
else:
refs.append("")
whole_ref = first_one[1].to(last_one[1]).normal()
chumash = ArrayMapNode()
chumash.add_title(title_heb[count], "he", primary=True)
chumash.add_title(title, "en", primary=True)
chumash.key = title
chumash.addressTypes = ["Integer"]
chumash.sectionNames = ["Chapter"]
chumash.depth = 1
chumash.wholeRef = whole_ref
chumash.refs = refs
chumash.validate()
perakim["nodes"].append(chumash.serialize())
f.close()
root = JaggedArrayNode()
root.key = "yalkut_on_nach"
root.add_title("Yalkut Shimoni on Nach", "en", primary=True)
root.add_title(u"""ילקות שמעוני על נ״ח""", "he", primary=True)
root.depth = 2
root.sectionNames = ["Remez", "Paragraph"]
root.heSectionNames = [u"רמז", u"פסקה"]
root.addressTypes = ["Integer", "Integer"]
index = {
"title": "Yalkut Shimoni on Nach",
"categories": ["Midrash"],
"alt_structs": {"Parsha": parshiot, "Chapters": perakim},
"default_struct": "Remez",
"schema": root.serialize()
}
post_index(index)
| [
"[email protected]"
] | |
f584d11e5c58a9b060c411497ab86658c716e806 | 8bcf973008b1d7549f59501a1667909848ea87dd | /Day0716/day32/2.查看GIL切换的指令数.py | 3e6d82deb3f562a4c9f1c61daba30f427b1cd984 | [] | no_license | simplesmall/Python-FullStack | 74ffeb2119eecb7fcb21a136d01aaaf2bcc2c24c | 210844ef6443a5543d49a20dbec2db9a9b960230 | refs/heads/master | 2022-12-17T00:56:40.515335 | 2019-11-15T02:07:57 | 2019-11-15T02:07:57 | 221,816,447 | 0 | 1 | null | 2022-12-13T19:22:26 | 2019-11-15T01:10:55 | Python | UTF-8 | Python | false | false | 52 | py | import sys
v1 = sys.getcheckinterval()
print(v1) | [
"[email protected]"
] | |
5167d45c33d996d136141fa7758a62e1f72334c7 | 8a2f5982c90c205cfee8f8fdce264a930c1b3749 | /acoustics/standards/iso_1996_1_2003.py | 266bc5cfdd490432fa7c696a757a75053959407c | [
"BSD-3-Clause",
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | AlanLeJie/python-acoustics | 05dde3ba6b7cf07265f5186f3742bb22f01fa1bb | af72e7f88003f0bba06934ea38c98e8993c4a6c6 | refs/heads/master | 2023-08-28T18:41:08.924307 | 2020-08-18T21:37:14 | 2021-10-26T12:24:40 | 444,331,803 | 1 | 0 | BSD-3-Clause | 2022-01-04T07:52:28 | 2022-01-04T07:52:28 | null | UTF-8 | Python | false | false | 1,313 | py | """
ISO 1996-1:2003
===============
ISO 1996-1:2003 defines the basic quantities to be used for the description of
noise in community environments and describes basic assessment procedures. It
also specifies methods to assess environmental noise and gives guidance on
predicting the potential annoyance response of a community to long-term exposure
from various types of environmental noises. The sound sources can be separate or
in various combinations. Application of the method to predict annoyance response
is limited to areas where people reside and to related long-term land uses.
"""
import numpy as np
def composite_rating_level(levels, hours, adjustment):
"""Composite rating level.
:params levels: Level per period.
:params hours: Amount of hours per period.
:params adjustment: Adjustment per period.
Composite whole-day rating levels are calculated as
.. math:: L_R = 10 \\log{\\left[ \\sum_i \\frac{d_i}{24} 10^{(L_i+K_i)/10} \\right]}
where :math:`i` is a period. See equation 6 and 7 of the standard.
.. note:: Summation is done over the last axis.
"""
levels = np.asarray(levels)
hours = np.asarray(hours)
adjustment = np.asarray(adjustment)
return 10.0 * np.log10((hours / 24.0 * 10.0**((levels + adjustment) / 10.0)).sum(axis=-1))
| [
"[email protected]"
] | |
ae7880c58342e9df5ce5e8cfb7839e96478b4471 | d27af9d58b91b8cd998ac0eb87d980d304ff0670 | /Regular-Contest/ARC042/ARC042_A.py | 9d22a7d98f1c573116fecd236ebbad8243a509c8 | [] | no_license | mongesan/Atcoder-m0_ngesan-py | 29dd79daab149003ffc8b6b6bad5fa2e7daa9646 | 6654af034d4ff4cece1be04c2c8b756976d99a4b | refs/heads/master | 2023-08-20T19:50:04.547025 | 2021-10-27T12:24:51 | 2021-10-27T12:24:51 | 258,486,105 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9 | py | #ARC042_A | [
"[email protected]"
] | |
6e8ba53c338c54d629c04f510c415fc9c29d34ad | d2e80a7f2d93e9a38f37e70e12ff564986e76ede | /Python-cookbook-2nd/cb2_01/cb2_1_8_exm_1.py | dd82698e08bf87714cf746fb4c3b765abbef23fa | [] | no_license | mahavivo/Python | ceff3d173948df241b4a1de5249fd1c82637a765 | 42d2ade2d47917ece0759ad83153baba1119cfa1 | refs/heads/master | 2020-05-21T10:01:31.076383 | 2018-02-04T13:35:07 | 2018-02-04T13:35:07 | 54,322,949 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | def containsAny(seq, aset):
return bool(set(aset).intersection(seq))
| [
"[email protected]"
] | |
e4ce03bdc845db19985f057f29474d509faf9e51 | 807972e7cbbd9a4ac3e33cbe7f6336140b69db99 | /__init__.py | 3914dcd126ec41dd5af0204c5c57e0df94084099 | [] | no_license | imhuay/bert_by_keras | efa6c65fdb3b7cf337218b9644c40ce26875e7ea | bb847a6b45f45fce349aa7c30897dd34f3453648 | refs/heads/master | 2023-04-21T19:55:09.367856 | 2021-04-27T13:46:43 | 2021-04-27T13:46:43 | 333,485,996 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Time:
2021-03-10 11:03 AM
Author:
huayang
Subject:
"""
| [
"[email protected]"
] | |
c2506960988c3521a7ee66ffa6b3e0b8285d7800 | 741ee09b8b73187fab06ecc1f07f46a6ba77e85c | /AutonomousSourceCode/data/raw/sort/02e9654d-b2b6-4400-b461-b0237b1385b4__selection_sort.py | 32b8579c493ef2d06cb99bcee9dee400f3b1157d | [] | no_license | erickmiller/AutomatousSourceCode | fbe8c8fbf215430a87a8e80d0479eb9c8807accb | 44ee2fb9ac970acf7389e5da35b930d076f2c530 | refs/heads/master | 2021-05-24T01:12:53.154621 | 2020-11-20T23:50:11 | 2020-11-20T23:50:11 | 60,889,742 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 822 | py | from ds import arrays
import sys
from profile import profile
@profile
def sort(a):
selection_sort(a,0,len(a))
def selection_sort(a,start,length):
for i in xrange(start+1,start+length):
key = a[i]
j = i
while(j>start and a[j-1]>key):
a[j] = a[j-1]
j -= 1
a[j] = key
def main():
a = arrays.make(sys.argv)
sort(a)
return a
if __name__=="__main__":
main()
########################################tests########################################
def assert_sorted(a,from_index,length):
selection_sort(a, from_index, length)
for i in xrange(from_index, from_index + length - 1):
assert a[i]<=a[i+1]
def should_partially_sort():
assert_sorted([30,20,10,5,3,2,4,1,-4,-5],3,5)
assert_sorted(arrays.array(50,False),10,20)
| [
"[email protected]"
] | |
2e39c69f636818b969cb435a652bbead60b25fa1 | eadc22ca135f5fa4c095e3292f2a55d642b58b5a | /polymath/codegen/dnnweavergen/dnnweaver2/tensorOps/cnn.py | 02034190513bd0f9d03d17392db6b196b7c6a8f2 | [
"Apache-2.0"
] | permissive | ZhuangzhuangWu/polymath | eb4b71f2e310a0c99b985935984aa396643bed46 | 4857df309a8aecf392fc8a1c7ec1fdfad3706426 | refs/heads/master | 2023-06-24T02:44:36.095451 | 2021-07-28T18:04:45 | 2021-07-28T18:04:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37,043 | py | from polymath.codegen.dnnweavergen.dnnweaver2.tensorOps.NodeOp import NodeOp, GradOp
from polymath.codegen.dnnweavergen.dnnweaver2.graph import get_default_graph
from polymath.codegen.dnnweavergen.dnnweaver2.scalar.ops import Ops
from polymath.codegen.dnnweavergen.dnnweaver2.scalar.dtypes import FQDtype, FixedPoint
from polymath.codegen.dnnweavergen.dnnweaver2 import get_tensor
from polymath.codegen.dnnweavergen.dnnweaver2.tensor import Tensor
class TypeCastOp(NodeOp):
def __init__(self, data, output_dtype, node_name=None):
self.data = data
input_tensors = data
self.output_dtype = output_dtype
super(TypeCastOp, self).__init__(node_name=node_name, input_tensors=input_tensors)
def _get_output_shape(self):
return self.data.shape
def _get_output_dtype(self):
return self.output_dtype
def get_ops(self):
return {}
class Convolution(NodeOp):
def __init__(self, data, weights, bias, node_name, pad='SAME', stride=None, group=1, dtype=FQDtype.FP32):
# Input data >3D
self.data = data
# Weights data 4D
self.weights = weights
assert len(self.weights.shape) == 4
if len(self.data.shape) < 3:
input_channels = 1
else:
input_channels = self.data.shape[-1]
assert self.weights.shape[-1] == input_channels, 'Expected {} input channels in weights, got {}'.format(input_channels, self.weights.shape[-1])
# Bias data 1D
# if bias.dtype != self._get_output_dtype():
# # bias = TypeCastOp(bias, self._get_output_dtype(), node_name='bias-typecast').output_tensors
# assert bias.dtype == self._get_output_dtype()
self.bias = bias
assert len(bias.shape) == 1
assert bias.shape[0] == weights.shape[-4], 'Bias shape {} does not match weights shape {}'.format(bias.shape, weights.shape)
# Stride
if stride is None:
stride = (1,1,1,1)
assert len(stride) == len(self.data.shape)
self.stride = stride
# Padding
if pad == 'SAME':
self.pad = ((0,0),
(self.weights.shape[-3]//2,self.weights.shape[-3]//2),
(self.weights.shape[-2]//2,self.weights.shape[-2]//2),
(0,0)
)
elif pad == 'VALID':
self.pad = ((0,0),(0,0),(0,0),(0,0))
else:
assert len(pad) == 2
self.pad = ((0,0), (pad[0], pad[0]), (pad[1], pad[1]), (0,0))
# Group
self.group = group
input_tensors = (data, weights, bias)
self.dtype=dtype
super(Convolution, self).__init__(node_name=node_name, input_tensors=input_tensors)
def _get_output_shape(self):
cout = self.weights.shape[-4]
hout = (self.data.shape[-3] - self.weights.shape[-3] + self.pad[-3][0] + self.pad[-3][1]) // self.stride[-3] + 1
wout = (self.data.shape[-2] - self.weights.shape[-2] + self.pad[-2][0] + self.pad[-2][1]) // self.stride[-2] + 1
out_shape = []
for i in range(len(self.data.shape)-3):
out_shape.append(self.data.shape[i])
out_shape.append(hout)
out_shape.append(wout)
out_shape.append(cout)
return tuple(out_shape)
def _get_output_dtype(self):
total_bits = 64
total_frac_bits = self.data.dtype.frac_bits + self.weights.dtype.frac_bits
return FixedPoint(total_bits, total_frac_bits)
def _autograd(self, x, y, grad_dtype=FQDtype.FP32):
self.output_loss = self._get_incoming_gradients(y, grad_dtype)
assert x in self.input_tensors, 'Op: {}, x: {}'.format(self.name, x.name)
if x == self.data:
if self.input_loss[0] is None:
op = ConvolutionBackprop(data=self.data, weights=self.weights, output_loss=self.output_loss, pad=self.pad, stride=self.stride, group=self.group, node_name=self.name, dtype=grad_dtype)
self.input_loss[0] = op.output_tensors
return self.input_loss[0]
else:
if self.input_loss[1] is None:
op = ConvolutionGradient(data=self.data, weights=self.weights, output_loss=self.output_loss, pad=self.pad, stride=self.stride, group=self.group, node_name=self.name, dtype=grad_dtype)
self.input_loss[1] = op.output_tensors
return self.input_loss[1]
def get_ops(self):
num = 1
for i in range(len(self.data.shape)-3):
num *= self.data.shape[i]
cout = self.output_tensors.shape[-1]
cin = self.data.shape[-1]
hout = self.output_tensors.shape[-3]
wout = self.output_tensors.shape[-2]
hfil = self.weights.shape[-3]
wfil = self.weights.shape[-2]
mac = (wfil * hfil * cin * \
cout * hout * wout * \
num) // self.group
dtypes = (self.data.dtype, self.weights.dtype, self.output_tensors.dtype)
return {Ops.MAC(dtypes): mac}
def load_params(self, params):
self.weights.data = params["weights"]
self.bias.data = params["bias"]
class ConvolutionBackprop(GradOp):
def __init__(self, data, weights, output_loss, node_name, pad='SAME', stride=None, group=1, dtype=None):
self.data = data
self.weights = weights
self.output_loss = output_loss
input_tensors = (self.output_loss, self.weights)
node_name = node_name + '-input-backprop'
self.dtype=dtype
# Stride
if stride is None:
stride = (1,1)
assert len(stride) == 2
self.stride = stride
# Padding
if pad == 'SAME':
self.pad = (self.weights.shape[-2]//2,self.weights.shape[-1]//2)
elif pad == 'VALID':
self.pad = (0,0)
else:
assert len(pad) == 2
self.pad = pad
# Group
self.group = group
super(ConvolutionBackprop, self).__init__(node_name=node_name, input_tensors=input_tensors)
def _get_output_shape(self):
return self.data.shape
def get_ops(self):
num = 1
for i in range(len(self.data.shape)-3):
num *= self.data.shape[i]
cout = self.output_loss[0].shape[-3]
cin = self.data.shape[-3]
hin = self.data.shape[-2]
win = self.data.shape[-1]
hfil = self.weights.shape[-2]
wfil = self.weights.shape[-1]
mac = (wfil * hfil * cout * \
cin * hin * win * \
num)/self.group
dtypes = (self.output_loss[0].dtype, self.weights.dtype, self.output_tensors.dtype)
return {Ops.MAC(dtypes): mac}
class ConvolutionGradient(GradOp):
def __init__(self, data, weights, output_loss, node_name, pad='SAME', stride=None, group=1, dtype=None):
self.data = data
self.weights = weights
self.output_loss = output_loss
input_tensors = (self.output_loss, self.data)
node_name = self.weights.name + '-grad'
self.dtype=dtype
# Stride
if stride is None:
stride = (1,1)
assert len(stride) == 2
self.stride = stride
# Padding
if pad == 'SAME':
self.pad = (self.weights.shape[-2]//2,self.weights.shape[-1]//2)
elif pad == 'VALID':
self.pad = (0,0)
else:
assert len(pad) == 2
self.pad = pad
# Group
self.group = group
if dtype is None:
dtype = self.graph.grad_dtype
super(ConvolutionGradient, self).__init__(node_name=node_name, input_tensors=input_tensors)
def _get_output_shape(self):
return self.weights.shape
def get_ops(self):
num = 1
for i in range(len(self.data.shape)-3):
num *= self.data.shape[i]
cout = self.output_loss[0].shape[-3]
cin = self.data.shape[-3]
hout = self.output_loss[0].shape[-2]
wout = self.output_loss[0].shape[-1]
hfil = self.weights.shape[-2]
wfil = self.weights.shape[-1]
mul = (hout * wout * \
cout * cin * hfil * wfil * \
num) / self.group
add = (hout * wout * \
num) / self.group
# return {Ops.MUL: mul, Ops.ADD: add}
dtypes = (self.output_loss[0].dtype, self.data.dtype, self.output_tensors.dtype)
return {Ops.MAC(dtypes): mul}
class MaxPooling(NodeOp):
def __init__(self, data, pooling_kernel, node_name, pad='VALID', stride=None, dtype=None):
# Input data >3D
self.data = data
# Pooling kernel
assert len(pooling_kernel) == len(data.shape)
self.pooling_kernel = pooling_kernel
# Stride
if isinstance(stride, int) or len(stride) == 1:
stride = (1, stride, stride, 1)
self.stride = stride
if pad == 'VALID':
self.pad = (
(0,0),
(0,0),
(0,0),
(0,0))
elif pad == 'SAME':
w = self.data.shape[-2]
h = self.data.shape[-3]
pad_w = (w - 1) * self.stride[-2] - w + self.pooling_kernel[-2]
pad_h = (h - 1) * self.stride[-3] - h + self.pooling_kernel[-3]
pad_w_l = pad_w // 2
pad_w_r = pad_w - pad_w_l
pad_h_t = pad_h // 2
pad_h_b = pad_h - pad_h_t
self.pad = (
(0,0),
(pad_h_t,pad_h_b),
(pad_w_l,pad_w_r),
(0,0))
else:
_pad = []
assert len(pad) == 4 or len(pad) == 2
for i in range(len(pad)):
if isinstance(pad[i], int):
_pad.append((pad[i],pad[i]))
else:
assert len(pad[i]) == 2
_pad.append(tuple(pad[i]))
self.pad = _pad
input_tensors = (data)
if dtype is None:
dtype = self.data.dtype
self.dtype=dtype
super(MaxPooling, self).__init__(node_name=node_name, input_tensors=input_tensors)
def _get_output_shape(self):
cout = self.data.shape[-1]
hout = (self.data.shape[-3] - self.pooling_kernel[-3] + self.pad[-3][0] + self.pad[-3][1]) // self.stride[-3] + 1
wout = (self.data.shape[-2] - self.pooling_kernel[-2] + self.pad[-2][0] + self.pad[-2][1]) // self.stride[-2] + 1
out_shape = []
for i in range(len(self.data.shape)-3):
out_shape.append(self.data.shape[i])
out_shape.append(hout)
out_shape.append(wout)
out_shape.append(cout)
return tuple(out_shape)
def _get_output_dtype(self):
return self.data.dtype
def _autograd(self, x, y, grad_dtype=FQDtype.FP32):
self.output_loss = self._get_incoming_gradients(y, grad_dtype=grad_dtype)
if self.input_loss[0] is None:
op = MaxPoolBackprop(data=self.data, pooling_kernel=self.pooling_kernel, output_loss=self.output_loss, node_name=self.name)
self.input_loss[0] = op.output_tensors
assert x in self.input_tensors, 'Op: {}, x: {}'.format(self.name, x.name)
return self.input_loss[0]
def get_ops(self):
num = 1
for i in range(len(self.output_tensors.shape)-3):
num *= self.data.shape[i]
cout = self.output_tensors.shape[-3]
hout = self.output_tensors.shape[-2]
wout = self.output_tensors.shape[-1]
hfil = self.pooling_kernel[-2]
wfil = self.pooling_kernel[-1]
CMP = hfil * wfil *\
hout * wout * cout *\
num
dtypes = (self.data.dtype)
return {Ops.CMP(dtypes): CMP}
class MaxPoolBackprop(GradOp):
def __init__(self, data, output_loss, pooling_kernel, node_name, dtype=None):
self.data = data
self.output_loss = output_loss
self.pooling_kernel = pooling_kernel
input_tensors = (self.output_loss)
node_name = self.data.name + '-backprop'
if dtype is None:
dtype = self.output_loss.dtype
self.dtype=dtype
super(MaxPoolBackprop, self).__init__(node_name=node_name, input_tensors=input_tensors)
def _get_output_shape(self):
return self.data.shape
def get_ops(self):
num = 1
for i in range(len(self.output_tensors.shape)-3):
num *= self.data.shape[i]
cin = self.data.shape[-3]
hin = self.data.shape[-2]
win = self.data.shape[-1]
hfil = self.pooling_kernel[-2]
wfil = self.pooling_kernel[-1]
CMP = hfil * wfil * \
hin * win * cin * \
num
dtypes = (self.data.dtype)
return {Ops.CMP(dtypes): CMP}
class Flatten(NodeOp):
def __init__(self, data, node_name):
# Input data >3D
self.data = data
input_tensors = data
super(Flatten, self).__init__(node_name=node_name, input_tensors=input_tensors)
def _get_output_shape(self):
cout = self.data.shape[-3]
hout = self.data.shape[-2]
wout = self.data.shape[-1]
out_shape = []
for i in range(len(self.data.shape)-3):
out_shape.append(self.data.shape[i])
out_shape.append(cout*hout*wout)
return tuple(out_shape)
def _autograd(self, x, y, grad_dtype=FQDtype.FP32):
self.output_loss = self._get_incoming_gradients(y, grad_dtype=grad_dtype)
if self.input_loss[0] is None:
op = FlattenBackprop(data=self.data, output_loss=self.output_loss, node_name=self.name, dtype=grad_dtype)
self.input_loss[0] = op.output_tensors
assert x in self.input_tensors, 'Op: {}, x: {}'.format(self.name, x.name)
return self.input_loss[0]
def _get_output_dtype(self):
return self.data.dtype
def get_ops(self):
return {}
class FlattenBackprop(GradOp):
def __init__(self, data, output_loss, node_name, dtype=None):
self.data = data
self.output_loss = output_loss
input_tensors = (self.output_loss)
node_name = self.data.name + '-backprop'
self.dtype=dtype
super(FlattenBackprop, self).__init__(node_name=node_name, input_tensors=input_tensors)
def _get_output_shape(self):
return self.data.shape
def get_ops(self):
return {}
class Concat(NodeOp):
def __init__(self, data, concat_dim, node_name, dtype=None):
self.data = tuple(data)
input_tensors = data
if concat_dim < 0:
concat_dim += len(input_tensors[0].shape)
for _data in data:
assert len(_data.shape) == len(data[0].shape)
for dim in range(len(_data.shape)):
if dim != concat_dim:
assert _data.shape[dim] == data[0].shape[dim], '{} does not match {} for dimension {}'.format(data[0].__str__(), _data.__str__(), dim)
self.concat_dim = concat_dim
if dtype is None:
dtype = data[0].dtype
self.dtype=dtype
super(Concat, self).__init__(node_name=node_name, input_tensors=input_tensors)
def _get_output_shape(self):
concat_dim = 0
for _data in self.data:
concat_dim += _data.shape[self.concat_dim]
out_shape = []
for i in range(len(self.data[0].shape)):
if i == self.concat_dim:
out_shape.append(concat_dim)
else:
out_shape.append(self.data[0].shape[i])
return tuple(out_shape)
def _get_output_dtype(self):
return self.data[0].dtype
def _autograd(self, x, y, grad_dtype=FQDtype.FP32):
self.output_loss = self._get_incoming_gradients(y, grad_dtype=grad_dtype)
assert x in self.data, 'Op: {}, x: {}'.format(self.name, x.name)
for i in range(len(self.data)):
if x == self.data[i]:
if self.input_loss[i] is None:
op = ConcatBackprop(data=self.data[i], output_loss=self.output_loss, node_name=self.name, dtype=grad_dtype)
self.input_loss[i] = op.output_tensors
return self.input_loss[i]
def get_ops(self):
return {}
class ConcatBackprop(GradOp):
def __init__(self, data, output_loss, node_name, dtype=None):
self.data = data
self.output_loss = output_loss
input_tensors = (self.output_loss)
node_name = self.data.name + '-backprop'
self.dtype=dtype
super(ConcatBackprop, self).__init__(node_name=node_name, input_tensors=input_tensors)
def _get_output_shape(self):
return self.data.shape
def get_ops(self):
return {}
class Add(NodeOp):
def __init__(self, data, node_name, dtype=None):
self.data = tuple(data)
input_tensors = data
for _data in data:
assert len(_data.shape) == len(data[0].shape)
for dim in range(len(_data.shape)):
assert _data.shape[dim] == data[0].shape[dim], '{} does not match {} for dimension {}'.format(data[0].__str__(), _data.__str__(), dim)
if dtype is None:
dtype = data[0].dtype
self.dtype=dtype
super(Add, self).__init__(node_name=node_name, input_tensors=input_tensors)
def _get_output_shape(self):
return self.data[0].shape
def _get_output_dtype(self):
return self.data[0].dtype
def _autograd(self, x, y, grad_dtype=FQDtype.FP32):
self.output_loss = self._get_incoming_gradients(y, grad_dtype=grad_dtype)
assert x in self.data, 'Op: {}, x: {}'.format(self.name, x.name)
for i in range(len(self.data)):
if x == self.data[i]:
if self.input_loss[i] is None:
op = AddBackprop(data=self.data[i], output_loss=self.output_loss, node_name=self.name, dtype=grad_dtype)
self.input_loss[i] = op.output_tensors
return self.input_loss[i]
def get_ops(self):
return {}
class AddBackprop(GradOp):
def __init__(self, data, output_loss, node_name, dtype=None):
self.data = data
self.output_loss = output_loss
input_tensors = (self.output_loss)
node_name = self.data.name + '-backprop'
self.dtype=dtype
super(AddBackprop, self).__init__(node_name=node_name, input_tensors=input_tensors)
def _get_output_shape(self):
return self.data.shape
def get_ops(self):
return {}
class MatMul(NodeOp):
def __init__(self, data, weights, biases, name, dtype=None):
# Input data >3D
self.data = data
# Weights data 2D
self.weights = weights
assert len(self.weights.shape) == 2
assert self.weights.shape[-1] == self.data.shape[-1], 'Dimension mismatch between data ({}) and weights ({})'.format(self.data, self.weights)
# Biases data 2D
self.biases = biases
assert len(self.biases.shape) == 1
assert self.biases.shape[0] == self.weights.shape[-2]
input_tensors = (data, weights, biases)
super(MatMul, self).__init__(node_name=name, input_tensors=input_tensors)
def _get_output_shape(self):
cout = self.weights.shape[-2]
out_shape = []
for i in range(len(self.data.shape)-1):
out_shape.append(self.data.shape[i])
out_shape.append(cout)
return tuple(out_shape)
def _get_output_dtype(self):
total_bits = 64
total_frac_bits = self.data.dtype.frac_bits + self.weights.dtype.frac_bits
return FixedPoint(total_bits, total_frac_bits)
def _autograd(self, x, y, grad_dtype=FQDtype.FP32):
self.output_loss = self._get_incoming_gradients(y, grad_dtype=grad_dtype)
assert x in self.input_tensors, 'Op: {}, x: {}'.format(self.name, x.name)
if x == self.data:
if self.input_loss[0] is None:
op = MatMulBackprop(data=self.data, weights=self.weights, output_loss=self.output_loss, node_name=self.name, dtype=grad_dtype)
self.input_loss[0] = op.output_tensors
return self.input_loss[0]
else:
if self.input_loss[1] is None:
op = MatMulGradient(data=self.data, weights=self.weights, output_loss=self.output_loss, node_name=self.name, dtype=grad_dtype)
self.input_loss[1] = op.output_tensors
return self.input_loss[1]
def get_ops(self):
num = 1
for i in range(len(self.data.shape)-1):
num *= self.data.shape[i]
cout = self.output_tensors.shape[-1]
cin = self.data.shape[-1]
mac = cin * \
cout * \
num
dtypes = (self.data.dtype, self.weights.dtype, self.output_tensors.dtype)
return {Ops.MAC(dtypes): mac}
class MatMulBackprop(GradOp):
def __init__(self, data, weights, output_loss, node_name, dtype=None):
self.data = data
self.weights = weights
self.output_loss = output_loss
input_tensors = (self.output_loss, self.weights)
node_name = node_name + '-backprop'
super(MatMulBackprop, self).__init__(node_name=node_name, input_tensors=input_tensors)
def _get_output_shape(self):
return self.data.shape
def get_ops(self):
num = 1
for i in range(len(self.data.shape)-1):
num *= self.data.shape[i]
cout = self.output_loss[0].shape[-1]
cin = self.data.shape[-1]
mac = cin * \
cout * \
num
dtypes = (self.output_loss[0].dtype, self.data.dtype, self.output_tensors.dtype)
return {Ops.MAC(dtypes): mac}
class MatMulGradient(GradOp):
def __init__(self, data, weights, output_loss, node_name, dtype=None):
self.data = data
self.weights = weights
self.output_loss = output_loss
input_tensors = (self.output_loss, self.data)
node_name = self.weights.name + '-grad'
self.dtype=dtype
super(MatMulGradient, self).__init__(node_name=node_name, input_tensors=input_tensors)
def _get_output_shape(self):
return self.weights.shape
def get_ops(self):
num = 1
for i in range(len(self.data.shape)-1):
num *= self.data.shape[i]
cout = self.output_loss[0].shape[-1]
cin = self.data.shape[-1]
mul = cin * \
cout * \
num
add = num
# return {Ops.MUL: mul, Ops.ADD: add}
dtypes = (self.output_loss[0].dtype, self.data.dtype, self.output_tensors.dtype)
return {Ops.MAC(dtypes): mul}
class AddBias(NodeOp):
def __init__(self, data, weights, dim, node_name, dtype=FQDtype.FP32):
# Input data
self.data = data
# Bias data is 1D
self.weights = weights
if isinstance(weights.shape, int):
assert weights.shape == data.shape[dim]
else:
assert len(self.weights.shape) == 1
assert self.data.shape[dim] == weights.shape[0]
self.dim=dim
input_tensors = (data, weights)
self.dtype=dtype
super(AddBias, self).__init__(node_name=node_name, input_tensors=input_tensors)
def _get_output_shape(self):
return self.data.shape
def _autograd(self, x, y, grad_dtype=FQDtype.FP32):
self.output_loss = self._get_incoming_gradients(y, grad_dtype=grad_dtype)
assert x in self.input_tensors, 'Op: {}, x: {}'.format(self.name, x.name)
if x == self.data:
if self.input_loss[0] is None:
op = AddBiasBackprop(data=self.data, weights=self.weights, output_loss=self.output_loss, dim=self.dim, node_name=self.name, dtype=grad_dtype)
self.input_loss[0] = op.output_tensors
return self.input_loss[0]
else:
if self.input_loss[1] is None:
op = AddBiasGradient(data=self.data, weights=self.weights, output_loss=self.output_loss, dim=self.dim, node_name=self.name, dtype=grad_dtype)
self.input_loss[1] = op.output_tensors
return self.input_loss[1]
def get_ops(self):
num = 1
for i in range(len(self.data.shape)):
num *= self.data.shape[i]
add = num
dtypes = (self.data.dtype, self.weights.dtype)
return {Ops.ADD(dtypes): add}
class AddBiasBackprop(GradOp):
def __init__(self, data, weights, output_loss, dim, node_name, dtype=None):
# Input data
self.data = data
# Bias data is 1D
self.weights = weights
# Output loss
self.output_loss = output_loss
if isinstance(weights.shape, int):
assert weights.shape == data.shape[dim]
else:
assert len(self.weights.shape) == 1
assert self.data.shape[dim] == weights.shape[0]
input_tensors = (output_loss, weights)
node_name = self.weights.name + '-backprop'
self.dtype=dtype
super(AddBiasBackprop, self).__init__(node_name=node_name, input_tensors=input_tensors)
def _get_output_shape(self):
return self.data.shape
def get_ops(self):
return {}
class AddBiasGradient(GradOp):
def __init__(self, data, weights, output_loss, dim, node_name, dtype=None):
# Input data
self.data = data
# Bias data is 1D
self.weights = weights
# Output loss
self.output_loss = output_loss
self.dim = dim
if isinstance(weights.shape, int):
assert weights.shape == data.shape[dim]
else:
assert len(self.weights.shape) == 1
assert self.data.shape[dim] == weights.shape[0]
input_tensors = (output_loss, data)
node_name = self.weights.name + '-grad'
self.dtype=dtype
super(AddBiasGradient, self).__init__(node_name=node_name, input_tensors=input_tensors)
def _get_output_shape(self):
return self.weights.shape
def get_ops(self):
num = 1
for i in range(len(self.data.shape)):
if i != self.dim:
num *= self.data.shape[i]
add = num
dtypes = (self.output_loss.dtype, self.data.dtype)
return {Ops.ADD(dtypes): add}
class GlobalAvgPooling(NodeOp):
def __init__(self, data, node_name, dtype=None):
# Input data >3D
assert len(data.shape) > 3, data
self.data = data
input_tensors = data
if dtype is None:
dtype = data.dtype
self.dtype=dtype
super(GlobalAvgPooling, self).__init__(node_name=node_name, input_tensors=input_tensors)
def _get_output_shape(self):
cout = self.data.shape[-3]
out_shape = []
for i in range(len(self.data.shape)-3):
out_shape.append(self.data.shape[i])
out_shape.append(cout)
return tuple(out_shape)
def _autograd(self, x, y, grad_dtype=FQDtype.FP32):
self.output_loss = self._get_incoming_gradients(y, grad_dtype=grad_dtype)
if self.input_loss[0] is None:
op = FlattenBackprop(data=self.data, output_loss=self.output_loss, node_name=self.name, dtype=grad_dtype)
self.input_loss[0] = op.output_tensors
assert x in self.input_tensors, 'Op: {}, x: {}'.format(self.name, x.name)
return self.input_loss[0]
def get_ops(self):
return {}
class GlobalAvgPoolingBackprop(GradOp):
def __init__(self, data, output_loss, node_name, dtype=None):
self.data = data
self.output_loss = output_loss
input_tensors = (self.output_loss)
node_name = self.data.name + '-backprop'
self.dtype=dtype
super(GlobalAvgPoolingBackprop, self).__init__(node_name=node_name, input_tensors=input_tensors)
def _get_output_shape(self):
return self.data.shape
def get_ops(self):
return {}
class AddScalar(NodeOp):
def __init__(self, data, scalar, node_name, dtype=None):
self.data = data
self.scalar = scalar
assert len(scalar.shape) == 1
assert scalar.shape[0] == 1
input_tensors = (data, scalar)
self.dtype=dtype
super(AddScalar, self).__init__(node_name=node_name, input_tensors=input_tensors)
def _get_output_shape(self):
return self.data.shape
def get_ops(self):
raise ValueError
return {}
class MulScalar(NodeOp):
def __init__(self, data, scalar, node_name, dtype=None):
self.data = data
self.scalar = scalar
assert len(scalar.shape) == 1
assert scalar.shape[0] == 1
input_tensors = (data, scalar)
self.dtype=dtype
super(MulScalar, self).__init__(node_name=node_name, input_tensors=input_tensors)
def _get_output_shape(self):
return self.data.shape
def get_ops(self):
raise ValueError
return {}
class InverseTensor(NodeOp):
def __init__(self, data, node_name, dtype=None):
self.data = data
input_tensors = (data)
self.dtype=dtype
super(InverseTensor, self).__init__(node_name=node_name, input_tensors=input_tensors)
def _get_output_shape(self):
return self.data.shape
def get_ops(self):
raise ValueError
return {}
class SubVector(NodeOp):
def __init__(self, data, vector, dim, node_name, dtype=FQDtype.FP32):
# Input data
self.data = data
# Bias data is 1D
self.vector = vector
if isinstance(vector.shape, int):
assert vector.shape == data.shape[dim]
else:
assert len(self.vector.shape) == 1
assert self.data.shape[dim] == vector.shape[0]
self.dim=dim
input_tensors = (data, vector)
self.dtype=dtype
super(SubVector, self).__init__(node_name=node_name, input_tensors=input_tensors)
def _get_output_shape(self):
return self.data.shape
def get_ops(self):
raise ValueError
return {}
class MulVector(NodeOp):
def __init__(self, data, vector, dim, node_name, dtype=FQDtype.FP32):
# Input data
self.data = data
# Bias data is 1D
self.vector = vector
if isinstance(vector.shape, int):
assert vector.shape == data.shape[dim]
else:
assert len(self.vector.shape) == 1
assert self.data.shape[dim] == vector.shape[0]
self.dim=dim
input_tensors = (data, vector)
self.dtype=dtype
super(MulVector, self).__init__(node_name=node_name, input_tensors=input_tensors)
def _get_output_shape(self):
return self.data.shape
def get_ops(self):
raise ValueError
return {}
class LeakyReLU(NodeOp):
def __init__(self, data, scalar, node_name, dtype=None):
self.data = data
self.scalar = scalar
assert len(scalar.shape) == 1
assert scalar.shape[0] == 1
input_tensors = (data, scalar)
self.dtype=dtype
super(LeakyReLU, self).__init__(node_name=node_name, input_tensors=input_tensors)
def _get_output_shape(self):
return self.data.shape
def _get_output_dtype(self):
return self.data.dtype
def get_ops(self):
mul_dtypes = (self.data.dtype, FixedPoint(16, 15))
rshift_dtype = FixedPoint(self.data.dtype.bits + 16, self.data.dtype.frac_bits + 15)
cmp_dtypes = (self.data.dtype)
return {Ops.MUL(mul_dtypes): self.data.size,
Ops.RSHIFT(rshift_dtype): self.data.size,
Ops.CMP(cmp_dtypes): self.data.size}
class Maximum(NodeOp):
def __init__(self, data, node_name, dtype=FQDtype.FP32):
# Input data
assert len(data) > 1
s0 = data[0].shape
for t in data:
s = t.shape
assert len(s0) == len(s)
for d in range(len(s)):
assert s[d] == s0[d]
input_tensors = data
self.dtype=dtype
super(Maximum, self).__init__(node_name=node_name, input_tensors=input_tensors)
def _get_output_shape(self):
return self.input_tensors[0].shape
def get_ops(self):
raise ValueError
return {}
class Reorg(NodeOp):
def __init__(self, data, reorg_kernel, node_name, dtype=None):
# Input data >3D
self.data = data
# Reorg kernel
if isinstance(reorg_kernel, int):
reorg_kernel = (reorg_kernel, reorg_kernel)
self.reorg_kernel = reorg_kernel
input_tensors = (data)
if dtype is None:
dtype = self.data.dtype
self.dtype=dtype
super(Reorg, self).__init__(node_name=node_name, input_tensors=input_tensors)
def _get_output_shape(self):
cout = self.data.shape[-3] * self.reorg_kernel[-1] * self.reorg_kernel[-2]
hout = (self.data.shape[-2]) // self.reorg_kernel[-2]
wout = (self.data.shape[-1]) // self.reorg_kernel[-1]
out_shape = []
for i in range(len(self.data.shape)-3):
out_shape.append(self.data.shape[i])
out_shape.append(cout)
out_shape.append(hout)
out_shape.append(wout)
return tuple(out_shape)
def get_ops(self):
return {}
class BatchNorm(NodeOp):
def __init__(self, data, mean, scale, eps, node_name, dtype=FQDtype.FP32):
# Input data
self.data = data
# Channel
dim = -1
# Mean data is 1D
self.mean = mean
if isinstance(mean.shape, int):
assert mean.shape == data.shape[dim]
else:
assert len(self.mean.shape) == 1
assert self.data.shape[dim] == mean.shape[0]
# Scale data is 1D
self.scale = scale
if isinstance(scale.shape, int):
assert scale.shape == data.shape[dim]
else:
assert len(self.scale.shape) == 1
assert self.data.shape[dim] == scale.shape[0]
self.dim = dim
self.eps = eps
input_tensors = (data, mean, scale)
self.dtype=dtype
super(BatchNorm, self).__init__(node_name=node_name, input_tensors=input_tensors)
def _get_output_shape(self):
return self.data.shape
def _get_output_dtype(self):
return FixedPoint(32, self.data.dtype.frac_bits + self.scale.dtype.frac_bits)
def get_ops(self):
ops = self.data.size
sub_dtypes = (self.data.dtype, self.mean.dtype)
mul_dtypes = (self.data.dtype, self.scale.dtype)
return {Ops.SUB(sub_dtypes): ops, Ops.MUL(sub_dtypes): ops}
def load_params(self, params):
self.mean.data = params["mean"]
self.scale.data = params["scale"]
def typecast(i, dtype, name=None):
if dtype is None or i.dtype == dtype:
return i
else:
return TypeCastOp(i, dtype).output_tensors
def addBias(i, b, dim, name=None, dtype=None):
g = get_default_graph()
op = AddBias(i, b, dim, name, dtype=dtype)
return typecast(op.output_tensors, dtype)
def conv2D(i, w, b, name=None, stride=None, pad='SAME', group=1, dtype=None):
g = get_default_graph()
op = Convolution(i, w, b, name, stride=stride, pad=pad, group=group, dtype=dtype)
return typecast(op.output_tensors, dtype)
def maxPool(i, pooling_kernel, stride=(1,2,2,1), pad='VALID', name=None, dtype=None):
g = get_default_graph()
op = MaxPooling(i, pooling_kernel, name, stride=stride, pad=pad, dtype=dtype)
return typecast(op.output_tensors, dtype)
def flatten(i, name=None, dtype=None):
g = get_default_graph()
op = Flatten(i, name)
return typecast(op.output_tensors, dtype)
def matmul(i, w, b, name=None, dtype=None):
g = get_default_graph()
op = MatMul(i, w, b, name=name, dtype=dtype)
return typecast(op.output_tensors, dtype)
def concat(data, concat_dim, name=None, dtype=None):
op = Concat(data, concat_dim, name, dtype=dtype)
return typecast(op.output_tensors, dtype)
def add(data, name=None, dtype=None):
op = Add(data, name, dtype=dtype)
return typecast(op.output_tensors, dtype)
def globalAvgPool(data, name=None, dtype=None):
op = GlobalAvgPooling(data, name, dtype=dtype)
return typecast(op.output_tensors, dtype)
def batch_norm(data, mean, scale, eps=0.000001, name=None, dtype=None):
op = BatchNorm(data, mean, scale, eps=eps, node_name=name, dtype=dtype)
return typecast(op.output_tensors, dtype)
def leakyReLU(data, name=None, alpha=0.1, dtype=None):
if not isinstance(alpha, Tensor):
alpha = get_tensor(shape=(1), name='alpha', data=alpha)
op = LeakyReLU(data, alpha, node_name=None)
return typecast(op.output_tensors, dtype)
def reorg(data, reorg_kernel, name=None, dtype=None):
op = Reorg(data, reorg_kernel, name, dtype=dtype)
return typecast(op.output_tensors, dtype)
| [
"[email protected]"
] | |
74e26e09eea6b1c02032bef6283506cea981f66f | 5b11be48f06b6779fe073deb90dde14659543367 | /coloredlogs/tests.py | f160d525e80fffe8de65f5b542bdc121101ea1e4 | [
"MIT"
] | permissive | davliu/python-coloredlogs | 03a612a6d6dd72255d7cee98386bf74f7ed5a204 | 549decbfde404b475cb772e86bdb5091f9f1baff | refs/heads/master | 2020-05-20T18:31:58.680478 | 2015-06-02T19:03:23 | 2015-06-02T19:03:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,045 | py | # Automated tests for the `coloredlogs' package.
#
# Author: Peter Odding <[email protected]>
# Last Change: May 27, 2015
# URL: http://coloredlogs.readthedocs.org
# Standard library modules.
import logging
import random
import re
import string
import unittest
# External dependencies.
from humanfriendly.terminal import ansi_wrap
# The module we're testing.
import coloredlogs
import coloredlogs.converter
# External test dependency required to test support for custom log levels.
import verboselogs
# Compatibility with Python 2 and 3.
try:
# Python 2.
from StringIO import StringIO
except ImportError:
# Python 3.
from io import StringIO
# Compiled regular expression that matches a single line of output produced by
# ColoredStreamHandler (does not include matching of ANSI escape sequences).
PLAIN_TEXT_PATTERN = re.compile(r'''
(?P<date> \d{4}-\d{2}-\d{2} )
\s (?P<time> \d{2}:\d{2}:\d{2} )
\s (?P<hostname> \S+ )
\s (?P<logger_name> \w+ )
\[ (?P<process_id> \d+ ) \]
\s (?P<severity> [A-Z]+ )
\s (?P<message> .* )
''', re.VERBOSE)
class ColoredLogsTestCase(unittest.TestCase):
def setUp(self):
"""Start each test from a known state."""
# Reset global state.
coloredlogs.install()
coloredlogs.set_level(logging.INFO)
# Reset local state.
self.stream = StringIO()
self.handler = coloredlogs.ColoredStreamHandler(stream=self.stream, isatty=False)
self.logger_name = ''.join(random.choice(string.ascii_letters) for i in range(25))
self.logger = verboselogs.VerboseLogger(self.logger_name)
self.logger.addHandler(self.handler)
def test_is_verbose(self):
"""Make sure is_verbose() does what it should :-)."""
assert coloredlogs.root_handler.level == logging.INFO
assert not coloredlogs.is_verbose()
coloredlogs.set_level(logging.VERBOSE)
assert coloredlogs.is_verbose()
def test_increase_verbosity(self):
"""Make sure increase_verbosity() respects default and custom levels."""
assert coloredlogs.root_handler.level == logging.INFO
coloredlogs.increase_verbosity()
assert coloredlogs.root_handler.level == logging.VERBOSE
coloredlogs.increase_verbosity()
assert coloredlogs.root_handler.level == logging.DEBUG
coloredlogs.increase_verbosity()
assert coloredlogs.root_handler.level == logging.NOTSET
coloredlogs.increase_verbosity()
assert coloredlogs.root_handler.level == logging.NOTSET
def test_decrease_verbosity(self):
"""Make sure decrease_verbosity() respects default and custom levels."""
assert coloredlogs.root_handler.level == logging.INFO
coloredlogs.decrease_verbosity()
assert coloredlogs.root_handler.level == logging.WARNING
coloredlogs.decrease_verbosity()
assert coloredlogs.root_handler.level == logging.ERROR
coloredlogs.decrease_verbosity()
assert coloredlogs.root_handler.level == logging.CRITICAL
coloredlogs.decrease_verbosity()
assert coloredlogs.root_handler.level == logging.CRITICAL
def test_level_discovery(self):
"""Make sure find_defined_levels() always reports the levels defined in Python's standard library."""
for number in (0, 10, 20, 30, 40, 50):
assert number in coloredlogs.find_defined_levels()
def test_missing_isatty_method(self):
"""Make sure ColoredStreamHandler() doesn't break because of a missing isatty() method."""
# This should not raise any exceptions in the constructor.
coloredlogs.ColoredStreamHandler(stream=object())
def test_non_string_messages(self):
"""Make sure ColoredStreamHandler() doesn't break because of non-string messages."""
# This should not raise any exceptions; all of these values can be cast to strings.
for value in (True, False, 0, 42, (), []):
self.logger.info(value)
def test_plain_text_output_format(self):
"""Inspect the plain text output of coloredlogs."""
# Test that filtering on severity works.
self.handler.level = logging.INFO
self.logger.debug("No one should see this message.")
assert len(self.stream.getvalue().strip()) == 0
# Test that the default output format looks okay in plain text.
self.handler.level = logging.DEBUG
for method, severity in ((self.logger.debug, 'DEBUG'),
(self.logger.info, 'INFO'),
(self.logger.verbose, 'VERBOSE'),
(self.logger.warning, 'WARN'),
(self.logger.error, 'ERROR'),
(self.logger.critical, 'CRITICAL')):
# Prepare the text.
text = "This is a message with severity %r." % severity.lower()
# Log the message with the given severity.
method(text)
# Get the line of output generated by the handler.
output = self.stream.getvalue()
lines = output.splitlines()
last_line = lines[-1]
assert text in last_line
assert severity in last_line
assert PLAIN_TEXT_PATTERN.match(last_line)
def test_html_conversion(self):
ansi_encoded_text = 'I like %s - www.eelstheband.com' % ansi_wrap('birds', bold=True, color='blue')
assert ansi_encoded_text == 'I like \x1b[1;34mbirds\x1b[0m - www.eelstheband.com'
html_encoded_text = coloredlogs.converter.convert(ansi_encoded_text)
assert html_encoded_text == 'I like <span style="font-weight: bold; color: blue;">birds</span> - <a href="http://www.eelstheband.com" style="color: inherit;">www.eelstheband.com</a>'
def test_output_interception(self):
expected_output = 'testing, 1, 2, 3 ..'
assert coloredlogs.converter.capture(['sh', '-c', 'echo -n %s' % expected_output]) == expected_output
| [
"[email protected]"
] | |
49a15a9cea2349452b2a8d4b6d3320a51ee31a06 | cd4f28ff0efa74889c4db8e91fb2caaebe37d592 | /data.py | 4fe388fae73f36ff04095a522803be9981dd224a | [] | no_license | quirell/CSOBO | fd59f4fac0f9f38c32afb7277d6a4c1ecc59a8ea | 73a98374cbf21920cecc9b19f77aee1624339769 | refs/heads/master | 2021-01-13T02:06:14.489103 | 2015-05-24T00:39:16 | 2015-05-24T00:39:16 | 33,379,478 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,948 | py | __author__ = 'quirell'
import os
import re
class TestCase:
"""
fullname - nazwa przypadku testowego
testname - nazwa grupy do ktorej przypadek testowy nalezy, wiecej tu: http://anjos.mgi.polymtl.ca/qaplib/inst.html
value - najlepsza (minimalna) wartosc rozwiazania
solution - permutacja dla ktorej rozwiazanie przyjmuje najmniejsza wartosc
distance, flow - wiadomo
"""
datapath = ""
solutionspath = ""
def __init__(self,name):
self.fullname = name
self.testname = re.match(r"([a-zA-Z]+).*",name).group(1)
self.value = self.flow = self.distance = self.solution = None
self.size = 0
def load(self):
with open(TestCase.datapath + "/" + self.fullname + ".dat") as f:
self.size = int(f.readline())
line = "\n"
while line == "\n":
line = f.readline()
flow = []
for _ in xrange(self.size):
flow.append([int(i) for i in line.split()])
while len(flow[-1]) != self.size:
line = f.readline()
flow[-1].extend([int(i) for i in line.split()])
line = f.readline()
# line = "\n"
while line == "\n":
line = f.readline()
distance = []
for _ in xrange(self.size):
distance.append([int(i) for i in line.split()])
while len(distance[-1]) != self.size:
line = f.readline()
distance[-1].extend([int(i) for i in line.split()])
line = f.readline()
solution = None
if os.path.isfile(TestCase.solutionspath + "/" + self.fullname + ".sln"):
with open(TestCase.solutionspath + "/" + self.fullname + ".sln") as f:
line = f.readline()
_, self.value = line.split()
self.value = int(self.value)
solution = []
for line in f:
if "," in line:
solution.extend([int(i.strip()) for i in line.split(",") if i.strip().isdigit()])
else:
solution.extend([int(i.strip()) for i in line.split()])
self.flow = flow
self.distance = distance
if solution:
self.solution = [i-1 for i in solution]
def solutionavailable(self):
return self.solution is not None
def __str__(self):
return self.fullname + " size: "+self.size+" value: "+self.value
class Data:
def __init__(self):
self.datapath = "data"
self.solutionspath = "solutions"
TestCase.datapath = self.datapath
TestCase.solutionspath = self.solutionspath
def gettestcases(self):
testcases = []
for filename in os.listdir(self.datapath):
testcases.append(TestCase(filename[:-4]))
return testcases
| [
"="
] | = |
746cf426cb0daad0ecbfe251adf903b1597644cb | 01b8229a1adbc8149e4226c81e31e56e0598b87b | /python/emp.py | 8904efc26c20d0c856d6e55dda26f62c425d2f7a | [] | no_license | Amel294/amel | 660f4cbd16f58d89a624bc90beeb791d239c6959 | e54fd091a4bff436fe94a9901ca9b7c189b6824d | refs/heads/master | 2023-05-10T16:56:54.039667 | 2023-05-08T16:57:18 | 2023-05-08T16:57:18 | 160,795,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | class employee:
def add(self):
self.name=raw_input('\nenter employee name:\t')
self.nuber=input('\nemployee number:\t')
self.salary=input('\nenter salary:\t')
def show(self):
print('\nname=',self.name,'\n')
print('e number=',self.nuber,'\n')
print('salary=',self.salary,'\n')
x=employee()
x.add()
x.show()
| [
"[email protected]"
] | |
bebb96c64a6feaa14c97cbda198522f712b111c1 | d525935af3c80584fb2175623591a1fc86349db5 | /Problems/Process integer input/task.py | 929699ff539661399dfca3f35156baba73cba1c6 | [] | no_license | TonyNewbie/CoffeeMachine | 63822ffdec8570166ebf44c0ffe51bfa14d33810 | 319c41189ede6a2e6e33bd15ae675101c3377b62 | refs/heads/master | 2022-04-22T13:23:47.904126 | 2020-04-26T07:25:44 | 2020-04-26T07:25:44 | 258,960,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | number = int(input())
while number < 101:
if number < 10:
number = int(input())
continue
print(number)
number = int(input())
| [
"[email protected]"
] | |
52962265003f52013b4546cc3dad4e69fe53f7fb | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/apimanagement/latest/get_group.py | 06ab9558e9ca04569a59178cba640eb3db562465 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 4,720 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetGroupResult',
'AwaitableGetGroupResult',
'get_group',
]
@pulumi.output_type
class GetGroupResult:
"""
Contract details.
"""
def __init__(__self__, built_in=None, description=None, display_name=None, external_id=None, name=None, type=None):
if built_in and not isinstance(built_in, bool):
raise TypeError("Expected argument 'built_in' to be a bool")
pulumi.set(__self__, "built_in", built_in)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if external_id and not isinstance(external_id, str):
raise TypeError("Expected argument 'external_id' to be a str")
pulumi.set(__self__, "external_id", external_id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="builtIn")
def built_in(self) -> bool:
"""
true if the group is one of the three system groups (Administrators, Developers, or Guests); otherwise false.
"""
return pulumi.get(self, "built_in")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Group description. Can contain HTML formatting tags.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
Group name.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="externalId")
def external_id(self) -> Optional[str]:
"""
For external groups, this property contains the id of the group from the external identity provider, e.g. for Azure Active Directory `aad://<tenant>.onmicrosoft.com/groups/<group object id>`; otherwise the value is null.
"""
return pulumi.get(self, "external_id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
class AwaitableGetGroupResult(GetGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetGroupResult(
built_in=self.built_in,
description=self.description,
display_name=self.display_name,
external_id=self.external_id,
name=self.name,
type=self.type)
def get_group(group_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGroupResult:
"""
Use this data source to access information about an existing resource.
:param str group_id: Group identifier. Must be unique in the current API Management service instance.
:param str resource_group_name: The name of the resource group.
:param str service_name: The name of the API Management service.
"""
__args__ = dict()
__args__['groupId'] = group_id
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:apimanagement/latest:getGroup', __args__, opts=opts, typ=GetGroupResult).value
return AwaitableGetGroupResult(
built_in=__ret__.built_in,
description=__ret__.description,
display_name=__ret__.display_name,
external_id=__ret__.external_id,
name=__ret__.name,
type=__ret__.type)
| [
"[email protected]"
] | |
8bd8d6553e25d404d1919bb14a246ace10a097c8 | a1a57977131ea917a3f3094dae4a3d18846103c0 | /tests_auto/2d/quad4/axialdisp_gendb.py | d81640af303b4b9322f9a47bbcf8322f0d52db21 | [
"MIT"
] | permissive | rwalkerlewis/pylith | cef02d5543e99a3e778a1c530967e6b5f1d5dcba | 8d0170324d3fcdc5e6c4281759c680faa5dd8d38 | refs/heads/master | 2023-08-24T18:27:30.877550 | 2020-08-05T16:37:28 | 2020-08-05T16:37:28 | 154,047,591 | 0 | 0 | MIT | 2018-10-21T20:05:59 | 2018-10-21T20:05:59 | null | UTF-8 | Python | false | false | 2,319 | py | #!/usr/bin/env python
#
# ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2017 University of California, Davis
#
# See COPYING for license information.
#
# ----------------------------------------------------------------------
#
## @file tests/2d/quad4/axialdisp_gendb.py
##
## @brief Python script to generate spatial database with displacement
## boundary conditions for the axial displacement test.
import numpy
class GenerateDB(object):
"""
Python object to generate spatial database with displacement
boundary conditions for the axial displacement test.
"""
def __init__(self):
"""
Constructor.
"""
return
def run(self):
"""
Generate the database.
"""
# Domain
x = numpy.arange(-4000.0, 4000.1, 1000.0)
y = numpy.arange(-4000.0, 4000.1, 1000.0)
npts = x.shape[0]
xx = x * numpy.ones( (npts, 1), dtype=numpy.float64)
yy = y * numpy.ones( (npts, 1), dtype=numpy.float64)
xy = numpy.zeros( (npts**2, 2), dtype=numpy.float64)
xy[:,0] = numpy.ravel(xx)
xy[:,1] = numpy.ravel(numpy.transpose(yy))
from axialdisp_soln import AnalyticalSoln
soln = AnalyticalSoln()
disp = soln.displacement(xy)
from spatialdata.geocoords.CSCart import CSCart
cs = CSCart()
cs.inventory.spaceDim = 2
cs._configure()
data = {'points': xy,
'coordsys': cs,
'data_dim': 2,
'values': [{'name': "displacement-x",
'units': "m",
'data': numpy.ravel(disp[0,:,0])},
{'name': "displacement-y",
'units': "m",
'data': numpy.ravel(disp[0,:,1])}]}
from spatialdata.spatialdb.SimpleIOAscii import SimpleIOAscii
io = SimpleIOAscii()
io.inventory.filename = "axial_disp.spatialdb"
io._configure()
io.write(data)
return
# ======================================================================
if __name__ == "__main__":
app = GenerateDB()
app.run()
# End of file
| [
"[email protected]"
] | |
1e175f5fbd461cacef895e6d82085207e7b62938 | 02ea99ea65d4768781b59ac97082fa7e1763711c | /neural_structured_learning/estimator/adversarial_regularization.py | a63a7ad858f2c30b8260beef2ba03ee89b88acf5 | [
"Apache-2.0"
] | permissive | Nzteb/neural-structured-learning | cad8a94b7b7d0d77eb0d0dca584398d749ff4ca6 | 0d50227f01556b1cebbd841496f5d1c9c4ef4a61 | refs/heads/master | 2022-11-15T22:10:32.112476 | 2020-07-09T00:13:09 | 2020-07-09T00:13:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,326 | py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A wrapper function to enable adversarial regularization to an Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import inspect
import neural_structured_learning.configs as nsl_configs
import neural_structured_learning.lib as nsl_lib
import tensorflow as tf
def add_adversarial_regularization(estimator,
optimizer_fn=None,
adv_config=None):
"""Adds adversarial regularization to a `tf.estimator.Estimator`.
The returned estimator will include the adversarial loss as a regularization
term in its training objective, and will be trained using the optimizer
provided by `optimizer_fn`. `optimizer_fn` (along with the hyperparameters)
should be set to the same one used in the base `estimator`.
If `optimizer_fn` is not set, a default optimizer `tf.train.AdagradOptimizer`
with `learning_rate=0.05` will be used.
Args:
estimator: A `tf.estimator.Estimator` object, the base model.
optimizer_fn: A function that accepts no arguments and returns an instance
of `tf.train.Optimizer`. This optimizer (instead of the one used in
`estimator`) will be used to train the model. If not specified, default to
`tf.train.AdagradOptimizer` with `learning_rate=0.05`.
adv_config: An instance of `nsl.configs.AdvRegConfig` that specifies various
hyperparameters for adversarial regularization.
Returns:
A modified `tf.estimator.Estimator` object with adversarial regularization
incorporated into its loss.
"""
if not adv_config:
adv_config = nsl_configs.AdvRegConfig()
base_model_fn = estimator._model_fn # pylint: disable=protected-access
try:
base_model_fn_args = inspect.signature(base_model_fn).parameters.keys()
except AttributeError: # For Python 2 compatibility
base_model_fn_args = inspect.getargspec(base_model_fn).args # pylint: disable=deprecated-method
def adv_model_fn(features, labels, mode, params=None, config=None):
"""The adversarial-regularized model_fn.
Args:
features: This is the first item returned from the `input_fn` passed to
`train`, `evaluate`, and `predict`. This should be a single `tf.Tensor`
or `dict` of same.
labels: This is the second item returned from the `input_fn` passed to
`train`, `evaluate`, and `predict`. This should be a single `tf.Tensor`
or dict of same (for multi-head models). If mode is
`tf.estimator.ModeKeys.PREDICT`, `labels=None` will be passed. If the
`model_fn`'s signature does not accept `mode`, the `model_fn` must still
be able to handle `labels=None`.
mode: Optional. Specifies if this is training, evaluation, or prediction.
See `tf.estimator.ModeKeys`.
params: Optional `dict` of hyperparameters. Will receive what is passed to
Estimator in the `params` parameter. This allows users to configure
Estimators from hyper parameter tuning.
config: Optional `estimator.RunConfig` object. Will receive what is passed
to Estimator as its `config` parameter, or a default value. Allows
setting up things in the model_fn based on configuration such as
`num_ps_replicas`, or `model_dir`. Unused currently.
Returns:
A `tf.estimator.EstimatorSpec` with adversarial regularization.
"""
# Parameters 'params' and 'config' are optional. If they are not passed,
# then it is possible for base_model_fn not to accept these arguments.
# See documentation for tf.estimator.Estimator for additional context.
kwargs = {'mode': mode}
if 'params' in base_model_fn_args:
kwargs['params'] = params
if 'config' in base_model_fn_args:
kwargs['config'] = config
base_fn = functools.partial(base_model_fn, **kwargs)
# Uses the same variable scope for calculating the original objective and
# adversarial regularization.
with tf.compat.v1.variable_scope(tf.compat.v1.get_variable_scope(),
reuse=tf.compat.v1.AUTO_REUSE,
auxiliary_name_scope=False):
original_spec = base_fn(features, labels)
# Adversarial regularization only happens in training.
if mode != tf.estimator.ModeKeys.TRAIN:
return original_spec
adv_neighbor, _ = nsl_lib.gen_adv_neighbor(
features,
original_spec.loss,
adv_config.adv_neighbor_config,
# The pgd_model_fn is a dummy identity function since loss is
# directly available from spec_fn.
pgd_model_fn=lambda features: features,
pgd_loss_fn=lambda labels, features: base_fn(features, labels).loss,
pgd_labels=labels)
# Runs the base model again to compute loss on adv_neighbor.
adv_spec = base_fn(adv_neighbor, labels)
final_loss = original_spec.loss + adv_config.multiplier * adv_spec.loss
if not optimizer_fn:
# Default to the Adagrad optimizer, the same as canned DNNEstimator.
optimizer = tf.train.AdagradOptimizer(learning_rate=0.05)
else:
optimizer = optimizer_fn()
train_op = optimizer.minimize(
loss=final_loss, global_step=tf.compat.v1.train.get_global_step())
update_ops = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.UPDATE_OPS)
if update_ops:
train_op = tf.group(train_op, *update_ops)
return original_spec._replace(loss=final_loss, train_op=train_op)
# Replaces the model_fn while keeps other fields/methods in the estimator.
estimator._model_fn = adv_model_fn # pylint: disable=protected-access
return estimator
| [
"[email protected]"
] | |
6f0e144c9f2150d6ed2b247269a15962705f55d8 | f98c9dea0e212be5c7bc3161499e5633383bd4d7 | /zmq/zmq_client.py | a27214a3bfe54b37c5d0b7c49f3ab8e11e63d9e0 | [
"MIT"
] | permissive | ysoftman/test_code | dddb5bee3420977bfa335320a09d66e5984403f5 | 0bf6307073081eeb1d654a1eb5efde44a0bdfe1e | refs/heads/master | 2023-08-17T05:45:49.716829 | 2023-08-16T05:00:09 | 2023-08-16T05:00:09 | 108,200,568 | 4 | 0 | MIT | 2023-03-15T04:23:10 | 2017-10-25T00:49:26 | C++ | UTF-8 | Python | false | false | 713 | py | #-*- coding: utf-8 -*-
# 20160422 ysoftman
# pyzmq (python3.x) client
import zmq
import sys
def send_req(ip, port):
context = zmq.Context()
socket = context.socket(zmq.REQ)
# socket.setsockopt(zmq.REQ, b'')
socket.connect("tcp://%s:%s" % (ip, port))
# python 3 에서 기본 인코딩이 유니코드
# send 함스는 유니코드를 사용할 수 없어, byte 형태로 만든다.
data = b'hello'
for i in range(10):
socket.send(data)
print("send %s to server. [%d]" % (data, i))
reply = socket.recv()
print("reply %s from server.[%d]" % (reply, i))
if __name__ == "__main__":
print("start testing...")
send_req("127.0.0.1", "55555")
| [
"[email protected]"
] | |
7f0537ded18f3ab59f3f0455f9f3074597b23440 | 377e3a552fb807febc18ce036af77edbce93ca19 | /binary trees/inorder_traversal_DFS.py | 47831f96111d76a9cebf1a030c565db26ac4a083 | [] | no_license | souravs17031999/100dayscodingchallenge | 940eb9b6d6037be4fc0dd5605f9f808614085bd9 | d05966f3e6875a5ec5a8870b9d2627be570d18d9 | refs/heads/master | 2022-10-29T11:05:46.762554 | 2022-09-28T13:04:32 | 2022-09-28T13:04:32 | 215,993,823 | 44 | 12 | null | 2022-08-18T14:58:50 | 2019-10-18T09:55:03 | Python | UTF-8 | Python | false | false | 2,714 | py | # Program for inorder traversal for a binary tree
# --------------------------------
# As we know inorder traversal means, Left-Node-Right
# We can take example for follwoing tree and visualize stack call :
# 1
# / \
# 2 3
# / \
# 4 5
#
# RECURSIVE APPROACH
# --------------------------------
# TIME : 0(N), SPACE : NOT CONSTANT, DUE TO RECURSIVE CALLS.
# WE should also try to write iterative solution, because there might
# be some case where stack recursion depth limit is exceeded due to not
# enough memory available or due to system limit on recursion calls.
# ---------------------------------------
# ITERATIVE SOLTUION :
#Push the current node to S and set current = current->left until current is NULL
# If current is NULL and stack is not empty then
# * Pop the top item from stack.
# * Print the popped item, set current = popped_item->right
# * Go to step 3.
# If current is NULL and stack is empty then we are done.
# ---------------------------------------------
# TIME : 0(N), SPACE : 0(N) WHERE N IS THE NUMBER OF NODES IN THE TREE.
# ----------------------------------------------
# we can also optimized more on space complexity part by not using any
# stack or recursion, named as "MORRIS TRAVERSAL" which is described in
# MORRIS_traversal.py in a separate program.
# ----------------------------------------------
class Node:
def __init__(self, val):
self.data = val
self.left = None
self.right = None
# inorder recursive
def inorder_rec(root):
if root == None:
return
inorder_rec(root.left)
print(root.data, end = " ")
inorder_rec(root.right)
# ITERATIVE SOLUTION :
from collections import deque
def inorder_itr(root):
if root == None:
return
stack = deque([])
ptr = root
while True:
# this will be true everytimee until ptr.left becomes None,
# that means all the left ones will be on the stack firstly.
if ptr:
stack.append(ptr)
ptr = ptr.left
# now when above fails, then we need to pop the top of stack
# and print it, also make current ptr to ptr.right to traverse
# for right subtree
elif stack:
ptr = stack.pop()
print(ptr.data, end = " ")
ptr = ptr.right
# now if current ptr is also None and stack is also empty,
# then we need to move out of loop.
else:
break
# driver test function
if __name__ == '__main__':
root = Node(1)
root.left = Node(2)
root.right = Node(3)
root.left.left = Node(4)
root.left.right = Node(5)
#inorder_rec(root)
inorder_itr(root)
| [
"[email protected]"
] | |
5491ad295a53569c3f8bf53561b41e6351e48c37 | a56252fda5c9e42eff04792c6e16e413ad51ba1a | /resources/usr/lib/python2.7/dist-packages/numpy/distutils/tests/f2py_ext/tests/test_fib2.py | fdab4abd60cb7434c08186156c7da74a5cebbda2 | [
"Apache-2.0"
] | permissive | edawson/parliament2 | 4231e692565dbecf99d09148e75c00750e6797c4 | 2632aa3484ef64c9539c4885026b705b737f6d1e | refs/heads/master | 2021-06-21T23:13:29.482239 | 2020-12-07T21:10:08 | 2020-12-07T21:10:08 | 150,246,745 | 0 | 0 | Apache-2.0 | 2019-09-11T03:22:55 | 2018-09-25T10:21:03 | Python | UTF-8 | Python | false | false | 88 | py | ../../../../../../../../share/pyshared/numpy/distutils/tests/f2py_ext/tests/test_fib2.py | [
"[email protected]"
] | |
da5ef56bfccc88c74e51c8cf0376e38e3a3ca319 | 7626a8371c7a847f93bdae5e1d6e03ee9667c3ba | /print/users/migrations/0001_initial.py | 08083d447c75319b2a232ce671351aac48b5e516 | [] | no_license | zzyzx4/sp | 52c815fd115b4605942baa73687838f64cd41864 | 90c7a90b3de27af674422e2c8892bad5ba7891e8 | refs/heads/master | 2020-05-23T21:20:28.166932 | 2019-07-19T11:56:49 | 2019-07-19T11:56:49 | 186,950,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 777 | py | # Generated by Django 2.2.2 on 2019-06-24 15:40
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(default='default.jpg', upload_to='profile_pics')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
946f6acb38c734fdff7ed61463b00a60f7de3399 | f0316e656767cf505b32c83eef4df13bb9f6b60c | /Kattis/qaly.py | 86b1b75314997d2f525dc10e31ebf5f2bd66f855 | [] | no_license | AkshdeepSharma/Classroom | 70ec46b35fab5fc4a9d2eac430659d7dafba93da | 4e55799466c101c736de6c7e07d716ff147deb83 | refs/heads/master | 2022-06-13T18:14:03.236503 | 2022-05-17T20:16:28 | 2022-05-17T20:16:28 | 94,828,359 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | N = int(input())
qaly = 0
for i in range(N):
nums = input().split(" ")
qaly += float(nums[0]) * float(nums[1])
print(round(qaly, 3))
| [
"[email protected]"
] | |
a332e92186cd5002d1095263b0a5abaae4af5d37 | 3c27b86f0165ab24e6b04d505e8471e032594f0b | /pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGL/raw/GLES1/OES/texture_cube_map.py | 1f616ca628a9790612c0acccc4ae8f17355fac0e | [
"LicenseRef-scancode-warranty-disclaimer",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LGPL-2.1-or-later",
"GPL-3.0-only",
"LGPL-2.0-or-later",
"GPL-3.0-or-later",
"MIT"
] | permissive | alexus37/AugmentedRealityChess | 8b9ccdfffc8aee93a86a44b8ef53c034ec6a10d1 | 7f600ad153270feff12aa7aa86d7ed0a49ebc71c | refs/heads/master | 2020-12-24T13:29:21.967833 | 2020-02-27T09:38:50 | 2020-02-27T09:38:50 | 31,264,034 | 1 | 1 | MIT | 2020-02-27T09:38:52 | 2015-02-24T14:36:34 | Python | UTF-8 | Python | false | false | 2,394 | py | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLES1 import _types as _cs
# End users want this...
from OpenGL.raw.GLES1._types import *
from OpenGL.raw.GLES1 import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLES1_OES_texture_cube_map'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLES1,'GLES1_OES_texture_cube_map',error_checker=_errors._error_checker)
GL_MAX_CUBE_MAP_TEXTURE_SIZE_OES=_C('GL_MAX_CUBE_MAP_TEXTURE_SIZE_OES',0x851C)
GL_NORMAL_MAP_OES=_C('GL_NORMAL_MAP_OES',0x8511)
GL_REFLECTION_MAP_OES=_C('GL_REFLECTION_MAP_OES',0x8512)
GL_TEXTURE_BINDING_CUBE_MAP_OES=_C('GL_TEXTURE_BINDING_CUBE_MAP_OES',0x8514)
GL_TEXTURE_CUBE_MAP_NEGATIVE_X_OES=_C('GL_TEXTURE_CUBE_MAP_NEGATIVE_X_OES',0x8516)
GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_OES=_C('GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_OES',0x8518)
GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_OES=_C('GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_OES',0x851A)
GL_TEXTURE_CUBE_MAP_OES=_C('GL_TEXTURE_CUBE_MAP_OES',0x8513)
GL_TEXTURE_CUBE_MAP_POSITIVE_X_OES=_C('GL_TEXTURE_CUBE_MAP_POSITIVE_X_OES',0x8515)
GL_TEXTURE_CUBE_MAP_POSITIVE_Y_OES=_C('GL_TEXTURE_CUBE_MAP_POSITIVE_Y_OES',0x8517)
GL_TEXTURE_CUBE_MAP_POSITIVE_Z_OES=_C('GL_TEXTURE_CUBE_MAP_POSITIVE_Z_OES',0x8519)
GL_TEXTURE_GEN_MODE_OES=_C('GL_TEXTURE_GEN_MODE_OES',0x2500)
GL_TEXTURE_GEN_STR_OES=_C('GL_TEXTURE_GEN_STR_OES',0x8D60)
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,arrays.GLfloatArray)
def glGetTexGenfvOES(coord,pname,params):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,arrays.GLintArray)
def glGetTexGenivOES(coord,pname,params):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,ctypes.POINTER(_cs.GLfixed))
def glGetTexGenxvOES(coord,pname,params):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLfloat)
def glTexGenfOES(coord,pname,param):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,arrays.GLfloatArray)
def glTexGenfvOES(coord,pname,params):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLint)
def glTexGeniOES(coord,pname,param):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,arrays.GLintArray)
def glTexGenivOES(coord,pname,params):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLfixed)
def glTexGenxOES(coord,pname,param):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,ctypes.POINTER(_cs.GLfixed))
def glTexGenxvOES(coord,pname,params):pass
| [
"[email protected]"
] | |
fc32d33d92acf211a927ee9591a9c2e2c794716f | faa83048d2bb62c27f030942f3f038f87637c293 | /indico/core/oauth/protector.py | 29693821d8a79e3bc2dcaeb25d629331dcedddbf | [
"MIT"
] | permissive | janschill/indico | f79536db43afaf631449fef5119069af2938e76d | 068a947446eb624308d6264e34a4061807e6ff12 | refs/heads/master | 2023-06-08T07:32:33.007683 | 2021-06-18T12:42:03 | 2021-06-18T12:42:03 | 339,700,154 | 0 | 0 | MIT | 2021-06-18T12:42:04 | 2021-02-17T11:22:48 | Python | UTF-8 | Python | false | false | 3,105 | py | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import flask
from authlib.integrations.flask_oauth2 import ResourceProtector
from authlib.oauth2.rfc6750.validator import BearerTokenValidator
from flask import after_this_request, jsonify
from werkzeug.exceptions import HTTPException
from indico.core.db import db
from indico.core.oauth.models.applications import SystemAppType
from indico.core.oauth.models.tokens import OAuthToken
from indico.core.oauth.util import query_token
from indico.util.date_time import now_utc
class IndicoAuthlibHTTPError(HTTPException):
def __init__(self, status_code, payload, headers):
super().__init__(payload.get('error_description') or payload['error'])
resp = jsonify(payload)
resp.headers.update(headers)
resp.status_code = status_code
self.response = resp
class IndicoResourceProtector(ResourceProtector):
def raise_error_response(self, error):
payload = dict(error.get_body())
headers = error.get_headers()
raise IndicoAuthlibHTTPError(error.status_code, payload, headers)
def parse_request_authorization(self, request):
access_token_querystring = flask.request.args.get('access_token')
if access_token_querystring and not request.headers.get('Authorization', '').lower().startswith('bearer '):
validator = self.get_token_validator('legacy_qs')
return validator, access_token_querystring
return super().parse_request_authorization(request)
class IndicoBearerTokenValidator(BearerTokenValidator):
def authenticate_token(self, token_string):
return query_token(token_string)
def validate_token(self, token, scopes):
super().validate_token(token, scopes)
# if we get here, the token is valid so we can mark it as used at the end of the request
# XXX: should we wait or do it just now? even if the request failed for some reason, the
# token could be considered used, since it was valid and most likely used by a client who
# expected to do something with it...
token_id = token.id # avoid DetachedInstanceError in the callback
@after_this_request
def _update_last_use(response):
with db.tmp_session() as sess:
# do not modify `token` directly, it's attached to a different session!
sess.query(OAuthToken).filter_by(id=token_id).update({OAuthToken.last_used_dt: now_utc()})
sess.commit()
return response
class IndicoLegacyQueryStringBearerTokenValidator(IndicoBearerTokenValidator):
TOKEN_TYPE = 'legacy_qs'
def authenticate_token(self, token_string):
token = super().authenticate_token(token_string)
if token and token.application.system_app_type == SystemAppType.checkin:
# Only the checkin app is allowed to pass tokens insecurely via query string
return token
| [
"[email protected]"
] | |
78b44ed568709f7982622043bd51601faeed8ab8 | 4791bde7bb7275fc25480fdf0cd81d1a9450a50c | /accounts/views.py | 2cd377f5f3cbd97f9432c0c428c386f474093f90 | [] | no_license | VinneyJ/RU-I-tech-app | 9692f0681dd704ce52c621b3d080d1a90fbe501b | dd15335f26a35d8e32477e7dd384f3a80351d25d | refs/heads/master | 2020-05-02T21:04:17.773867 | 2019-03-28T13:25:18 | 2019-03-28T13:25:18 | 178,209,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,070 | py | from django.shortcuts import render, redirect
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.decorators import login_required
from django.contrib.auth import login, logout
from accounts.form import UserRegisterForm, UserUpdateForm, ProfileUpdateForm
from .models import Profile
# Create your views here.
def signup_view(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
user = form.save()
#log user in
login(request, user)
return redirect('articles:list')
else:
form=UserRegisterForm()
return render(request, 'accounts/signup.html', {'form':form})
def login_view(request):
if request.method == 'POST':
form = AuthenticationForm(data=request.POST)
if form.is_valid():
user = form.get_user()
login(request,user)
#log user in
if 'next' in request.POST:
return redirect(request.POST.get('next'))
else:
return redirect('articles:list')
else:
form = AuthenticationForm()
return render(request, 'accounts/login.html', {'form':form})
@login_required
def profile_view(request):
if request.method == 'POST':
u_form = UserUpdateForm(request.POST, instance=request.user)
p_form = ProfileUpdateForm(request.POST,
request.FILES,
instance=request.user.profile)
if u_form.is_valid and p_form.is_valid():
u_form.save()
p_form.save()
#ENTER MESSAGES HERE
return redirect('accounts:profile')
else:
u_form = UserUpdateForm(instance=request.user)
p_form = ProfileUpdateForm(instance=request.user.profile)
return render(request, 'accounts/profile.html', {'u_form' : u_form, 'p_form' : p_form})
def logout_view(request):
if request.method =='POST':
logout(request)
return redirect('articles:list')
| [
"[email protected]"
] | |
86c0cb10d29d06dcf7aa7a311986ac1f5d219e7e | 2420eab92b5d1ec2225d2eeb128a41e3c7b1ce38 | /11/JackTonenizer.py | b7a45cc64a958be663d46d4e26cc25dc8127e138 | [] | no_license | hokiepete/building-a-modern-computer-from-first-principles | f98549a5d32ff23d4eab3b089324e61dac22841a | bba0d437b10ba5b5f6861067b3a0ba4ac14ef447 | refs/heads/master | 2023-08-17T21:38:44.589836 | 2021-10-24T01:41:56 | 2021-10-24T01:41:56 | 385,073,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,155 | py | # Main program that sets up and invokes the other modules
import os
KEYWORDS = set([
'class','constructor','function','method','field','static','var','int','char','boolean',
'void','true','false','null','this','let','do','if','else','while','return'
])
SYMBOL = set([
'{','}','(',')','[',']','.',',',';','+','-','*','/','&','|','<','>','=','~'
])
SUBS = {'<':'<', '>': '>', '\'': '"', '\"': '"', '&': '&'}
class JackTokenizer:
def __init__(self, input_string):
self.raw_string = input_string
self.tokens = []
self.tagged_tokens = []
self.clean_lines()
self.tokenize()
self.tag_tokens()
def clean_lines(self):
lines = self.raw_string.split('\n')
cleaned = []
IN_COMMENT = False
for line in lines:
if IN_COMMENT:
if "*/" in line:
IN_COMMENT = False
cleaned_line = line.split('*/')[1].strip()
else:
continue
elif '//' in line:
cleaned_line = line.split('//')[0].strip()
elif "//*" in line:
if '*/' in line:
pref, suff = line.split('//*')
cleaned_line = pref.strip() + ' ' + suff.split('*/')[1].strip()
else:
IN_COMMENT = True
cleaned_line = line.split('//*')[0].strip()
elif "/*" in line:
if '*/' in line:
pref, suff = line.split('/*')
cleaned_line = pref.strip() + ' ' + suff.split('*/')[1].strip()
else:
IN_COMMENT = True
cleaned_line = line.split('/*')[0].strip()
else:
cleaned_line = line.strip()
if cleaned_line and (not cleaned_line.isspace()):
cleaned.append(cleaned_line)
self.cleaned_string = ' '.join(cleaned)
def tokenize(self):
while self.cleaned_string:
token = self.get_next_token()
if token:
self.tokens.append(token)
def get_next_token(self):
token = ''
literal = False
for i, char in enumerate(self.cleaned_string):
if char in ['\'', "\""]:
if literal:
literal = False
else:
literal = True
if not literal:
if char == ' ':
self.cleaned_string = self.cleaned_string[i+1:]
return token
if char in SYMBOL:
if token:
self.cleaned_string = self.cleaned_string[i:]
return token
else:
self.cleaned_string = self.cleaned_string[i+1:]
return char
if token.isnumeric() and not char.isnumeric():
raise ValueError(
f"Variable names cannot start with a numeric character. Please fix token beginning with {token + char}"
)
token += char
return token
def tag_tokens(self):
self.tagged_tokens.append('<tokens>')
for token in self.tokens:
if token in KEYWORDS:
self.tagged_tokens.append(f"<keyword> {token} </keyword>")
elif token in SUBS:
self.tagged_tokens.append(f"<symbol> {SUBS[token]} </symbol>")
elif token in SYMBOL:
self.tagged_tokens.append(f"<symbol> {token} </symbol>")
elif token[0] in ['\'', '\"']:
self.tagged_tokens.append(f"<stringConstant> {token[1:-1]} </stringConstant>")
elif token.isnumeric():
self.tagged_tokens.append(f"<integerConstant> {token} </integerConstant>")
else:
self.tagged_tokens.append(f"<identifier> {token} </identifier>")
self.tagged_tokens.append('</tokens>')
if __name__ == '__main__':
srcpath = 'ArrayTest\Main.jack'
if os.path.isdir(srcpath):
# read and parse the system file
# with open(srcpath + '\\Sys.vm', 'r') as file:
# text = file.read()
# get all the files in the directory minus the system file
# and parse the files
files = os.listdir(srcpath)
for file in files:
if file.endswith('.jack'):
with open(srcpath + f'\\{file}', 'r') as f:
text = f.read()
analyzer = JackTokenizer(text)
destfile = f'{srcpath}\\{file.replace(".jack", "T.xml")}'
with open(destfile, 'w') as f:
f.write('\n'.join(analyzer.tagged_tokens)+'\n')
else:
with open(srcpath, 'r') as file:
text = file.read()
analyzer = JackTokenizer(text)
destfile = f'{srcpath.replace(".jack", "T.xml")}'
with open(destfile, 'w') as f:
f.write('\n'.join(analyzer.tagged_tokens)+'\n')
| [
"[email protected]"
] | |
6a8158e71b678a31bafc2805c7a170059a1636e0 | 8528604d3231d86feada09465170220b892c1c35 | /landlab/grid/warnings.py | f1b23dd50399188b1024ed1c35288a9fb622b4ba | [
"MIT"
] | permissive | ChristinaB/landlab | 9780acbd5753741cd91c40dbc5d683b66f2481a6 | 82fd45d059dbe58728b074b024e46a1a10ce1e5c | refs/heads/master | 2021-04-26T22:45:44.007416 | 2020-12-02T22:57:27 | 2020-12-02T22:57:27 | 124,138,852 | 0 | 1 | MIT | 2020-12-02T22:57:28 | 2018-03-06T21:08:56 | Python | UTF-8 | Python | false | false | 988 | py | import os
from ..core.messages import deprecation_message
class DeprecatedSignature(DeprecationWarning):
msg = "You are using a deprecated calling signature."
def __init__(self, name, old=None, new=None):
self._name = name
self._old = old
self._new = new
if old:
self._old = self._construct_call(name, self._old[0], self._old[1])
if new:
self._new = self._construct_call(name, self._new[0], self._new[1])
@staticmethod
def _construct_call(name, args, kwds):
signature = ", ".join(
[repr(arg) for arg in args]
+ ["{k}={v}".format(k=k, v=repr(v)) for k, v in kwds.items()]
)
return "{name}({signature})".format(name=name, signature=signature)
def __str__(self):
if self._new:
use = ">>> grid = {call}".format(call=self._new)
else:
use = None
return os.linesep + deprecation_message(self.msg, use=use)
| [
"[email protected]"
] | |
99a5ca707f3d8c2a1b8bca69fffae0e09c4debbb | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03007/s124226483.py | 6a9358f63822c404d9005c20865e9e13e95ae0d6 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,008 | py | def c_successive_subtraction(N, A):
A.sort()
maximum = A.pop()
minimum = A.pop(0)
# Aの要素のうち、最大のものと最小のものを分けて置いておく。
# それら以外の要素 a_k について、
# a_k が非負なら、最小のものから a_k を引くことで負の方向に大きくできる。
# a_k が負なら、最大のものから a_k を引くことで正の方向に大きくできる。
# 最後に 最大のもの - 最小のもの とすると、最後に残る整数を最大にできる。
operation = []
for a in A:
if a >= 0:
operation.append('{} {}'.format(minimum, a))
minimum -= a
else:
operation.append('{} {}'.format(maximum, a))
maximum -= a
operation.append('{} {}'.format(maximum, minimum))
return str(maximum - minimum) + '\n' + '\n'.join(operation)
N = int(input())
A = [int(i) for i in input().split()]
print(c_successive_subtraction(N, A)) | [
"[email protected]"
] | |
cd8a93ba1a1eecf47d977440117edacdc4fa67db | 47e964290f45472898859fc997a9d6806518f507 | /catkin_ws/devel/lib/python2.7/dist-packages/action_demo/msg/_DoDishesAction.py | 4320719ba3e6a6335d8510bdaee0df0ea9505816 | [] | no_license | jianhengLiu/ROS | 3623fefc80ac29d7037f40886861b6f2eb30ea40 | 53961dc736bcbf39553deef97da030deb40538a0 | refs/heads/master | 2020-06-11T10:11:31.540705 | 2020-01-12T11:05:39 | 2020-01-12T11:05:39 | 193,918,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | /home/chris/catkin_ws/devel/.private/action_demo/lib/python2.7/dist-packages/action_demo/msg/_DoDishesAction.py | [
"[email protected]"
] | |
2c9a70b0cecef465d4b201e73b956ba6ebe213bf | d1969a22afb7adda791caa4edf464dad02b684c0 | /apps/edu/urls.py | b7d0eac3b362ac6a5aee738851c571d6a27a6507 | [
"MIT"
] | permissive | local-host-club/cms | 11d4ea1105dabc6d0a60b935484b5f9eb2ec1da9 | 136fb075f11011ea77672b3468f69262a43eb500 | refs/heads/master | 2020-07-02T17:09:36.135855 | 2016-11-28T13:31:11 | 2016-11-28T13:31:11 | 74,293,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,036 | py | """cms URL Configuration
"""
from django.conf.urls import url
from apps.edu import views
urlpatterns = [
url(r'^curriculo/$', views.CompetenciaAreaList.as_view(), name='curriculo'),
url(r'^competencia_area/add$', views.CompetenciaAreaCreateView.as_view(), name='competencia_area_add'),
url(r'^competencia_area/(?P<pk>\d+)/$', views.CompetenciaAreaDetail.as_view(), name='competencia_area_detail'),
url(r'^competencia/add$', views.CompetenciaCreateView.as_view(), name='competencia_add'),
url(r'^indicador/add$', views.IndicadorCreateView.as_view(), name='indicador_add'),
url(r'^nivel/add$', views.NivelCreateView.as_view(), name='nivel_add'),
url(r'^nota/(?P<pk>\d+)/add$', views.NotaCreateView.as_view(), name='nota_add'),
url(r'^evaluacion/add$', views.EvaluacionCreateView.as_view(), name='evaluacion_add'),
url(r'^evaluacion/list$', views.EvaluacionListView.as_view(), name='evaluacion_list'),
url(r'^evaluacion/(?P<pk>\d+)$', views.EvaluacionDetail.as_view(), name='evaluacion_detail'), ]
| [
"[email protected]"
] | |
84d21ec07c949680015fbdeecdffdc7bfb2421f8 | 60a831fb3c92a9d2a2b52ff7f5a0f665d4692a24 | /IronPythonStubs/release/stubs.min/Autodesk/Revit/DB/__init___parts/DividedPath.py | c4b8a23dba9c44a17db440c15326440085b239fb | [
"MIT"
] | permissive | shnlmn/Rhino-Grasshopper-Scripts | a9411098c5d1bbc55feb782def565d535b27b709 | 0e43c3c1d09fb12cdbd86a3c4e2ba49982e0f823 | refs/heads/master | 2020-04-10T18:59:43.518140 | 2020-04-08T02:49:07 | 2020-04-08T02:49:07 | 161,219,695 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 10,464 | py | class DividedPath(Element,IDisposable):
""" An element that consists of a set of points distributed along a path which consists of a connected set of curves and edges. """
@staticmethod
def AreCurveReferencesConnected(document,curveReferences):
""" AreCurveReferencesConnected(document: Document,curveReferences: IList[Reference]) -> bool """
pass
@staticmethod
def Create(document,curveReferences,intersectors=None):
"""
Create(document: Document,curveReferences: IList[Reference]) -> DividedPath
Create(document: Document,curveReferences: IList[Reference],intersectors: ICollection[ElementId]) -> DividedPath
"""
pass
def Dispose(self):
""" Dispose(self: Element,A_0: bool) """
pass
def Flip(self):
"""
Flip(self: DividedPath)
Toggle the flipped value
"""
pass
def getBoundingBox(self,*args):
""" getBoundingBox(self: Element,view: View) -> BoundingBoxXYZ """
pass
def GetIntersectingElements(self):
"""
GetIntersectingElements(self: DividedPath) -> ICollection[ElementId]
Get the elements whose intersection with path produces points.
"""
pass
@staticmethod
def IsCurveReferenceValid(document,curveReference):
"""
IsCurveReferenceValid(document: Document,curveReference: Reference) -> bool
This returns true if the reference represents a curve or edge that can be used
to create a divided path.
document: The document.
curveReference: The reference.
Returns: True if the reference can be used to create a divided path,false otherwise.
"""
pass
@staticmethod
def IsIntersectorValidForCreation(document,intersector):
"""
IsIntersectorValidForCreation(document: Document,intersector: ElementId) -> bool
This returns true if the intersector is an element that can be used to
intersect with a newly created divided path.
document: The document.
intersector: The intersector.
Returns: True if the reference can be used to create a divided path,false otherwise.
"""
pass
def IsIntersectorValidForDividedPath(self,intersector):
"""
IsIntersectorValidForDividedPath(self: DividedPath,intersector: ElementId) -> bool
This returns true if the intersector is an element that can be used to
intersect with the divided path.
intersector: The intersector.
Returns: True if the reference can be used to create a divided path,false otherwise.
"""
pass
def IsValidBeginningIndent(self,beginningIndent):
"""
IsValidBeginningIndent(self: DividedPath,beginningIndent: float) -> bool
Checks that the indent value does not cause the beginningIndent and endIndent
to overlop
"""
pass
def IsValidEndIndent(self,endIndent):
"""
IsValidEndIndent(self: DividedPath,endIndent: float) -> bool
Checks that the indent value does not cause the beginningIndent and endIndent
to overlop
"""
pass
@staticmethod
def IsValidFixedNumberOfPoints(fixedNumberOfPoints):
"""
IsValidFixedNumberOfPoints(fixedNumberOfPoints: int) -> bool
Identifies if the indicated number of points is valid for assignment
to a
DividedPath with a layout type 'FixedNumber'.
"""
pass
def IsValidMeasurementType(self,measurementType):
"""
IsValidMeasurementType(self: DividedPath,measurementType: DividedPathMeasurementType) -> bool
Checks that the measurement type enumeration value is valid
"""
pass
def IsValidSpacingRuleJustification(self,justification):
"""
IsValidSpacingRuleJustification(self: DividedPath,justification: SpacingRuleJustification) -> bool
Checks that the justification enumeration value is valid
"""
pass
def IsValidSpacingRuleLayout(self,layout):
"""
IsValidSpacingRuleLayout(self: DividedPath,layout: SpacingRuleLayout) -> bool
Checks that the spacing rule layout enumeration value is valid
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: Element,disposing: bool) """
pass
@staticmethod
def SeparateReferencesIntoConnectedReferences(document,curveReferences):
""" SeparateReferencesIntoConnectedReferences(document: Document,curveReferences: IList[Reference]) -> IList[IList[Reference]] """
pass
def setElementType(self,*args):
""" setElementType(self: Element,type: ElementType,incompatibleExceptionMessage: str) """
pass
def SetIntersectingElements(self,intersectors):
""" SetIntersectingElements(self: DividedPath,intersectors: ICollection[ElementId]) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
BeginningIndent=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The beginningIndent is an offset distance from the beginning of the
first curve that determines the beginning of the range over which
the layout is applied.
The measurement type determines how the distance is measured.
Get: BeginningIndent(self: DividedPath) -> float
Set: BeginningIndent(self: DividedPath)=value
"""
DisplayNodeNumbers=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Controls whether the node numbers are shown when the divided path is selected
Get: DisplayNodeNumbers(self: DividedPath) -> bool
Set: DisplayNodeNumbers(self: DividedPath)=value
"""
DisplayNodes=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Controls whether the points of the divided path are visible
Get: DisplayNodes(self: DividedPath) -> bool
Set: DisplayNodes(self: DividedPath)=value
"""
DisplayReferenceCurves=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Controls whether the curves in the path are visible
Get: DisplayReferenceCurves(self: DividedPath) -> bool
Set: DisplayReferenceCurves(self: DividedPath)=value
"""
Distance=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The distance between points that are distributed along the path according to the selected layout.
When the layout is set to 'FixedDistance' this value can be set to desired distance.
The measurement type determines how the distance is measured.
Get: Distance(self: DividedPath) -> float
Set: Distance(self: DividedPath)=value
"""
EndIndent=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The endIndent is an offset distance from the end of the
last curve that determines the end of the range over which
the layout is applied.
The measurement type determines how the distance is measured.
Get: EndIndent(self: DividedPath) -> float
Set: EndIndent(self: DividedPath)=value
"""
FixedNumberOfPoints=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The number of points used when the layout is set to 'FixedNumber'.
Get: FixedNumberOfPoints(self: DividedPath) -> int
Set: FixedNumberOfPoints(self: DividedPath)=value
"""
Flipped=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""If the divided path is flipped the nodes are numbered in the reverse order.
It also switches the ends from which beginningIndent and endIndent are measured from.
Get: Flipped(self: DividedPath) -> bool
"""
IsClosedLoop=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Whether or not the path forms a closed loop.
Get: IsClosedLoop(self: DividedPath) -> bool
"""
IsCyclical=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""True if the first and last point coincide
False otherwise.
Get: IsCyclical(self: DividedPath) -> bool
"""
MaximumDistance=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The maximum distance is used when the layout is set to 'MaximumSpacing'.
When that layout rule is used the distance between points will not exceed this value.
The measurement type determines how the distance is measured.
Get: MaximumDistance(self: DividedPath) -> float
Set: MaximumDistance(self: DividedPath)=value
"""
MeasurementType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The measurement type determines how distances are calculated.
Either along a straight line between two points ('ChordLength')
or along the segment of the path that connects them. ('SegmentLength').
Get: MeasurementType(self: DividedPath) -> DividedPathMeasurementType
Set: MeasurementType(self: DividedPath)=value
"""
MinimumDistance=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The minimum distance is used when the layout is set to 'MinimumSpacing'.
When that layout rule is used the distance between points will not fall below this value.
The measurement type determines how the distance is measured.
Get: MinimumDistance(self: DividedPath) -> float
Set: MinimumDistance(self: DividedPath)=value
"""
NumberOfPoints=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The total number of points of the divided surface.
This combines the layout points and the intersection points.
Get: NumberOfPoints(self: DividedPath) -> int
"""
SpacingRuleJustification=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""When the layout is set to 'FixedDistance' the points may not cover the
entire range of the path. The justification determines whether
the points are centered on the range,or shifted towards the start or end of the range.
Get: SpacingRuleJustification(self: DividedPath) -> SpacingRuleJustification
Set: SpacingRuleJustification(self: DividedPath)=value
"""
SpacingRuleLayout=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The layout determines how points are distributed along the path.
Get: SpacingRuleLayout(self: DividedPath) -> SpacingRuleLayout
Set: SpacingRuleLayout(self: DividedPath)=value
"""
TotalPathLength=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The sum of the curve lengths.
Get: TotalPathLength(self: DividedPath) -> float
"""
| [
"[email protected]"
] | |
6c2852e49e135f9302519afe6ba267c820f0331f | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=2.0_rd=0.5_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=53/sched.py | 4782c407067e60157e4904c483d2dc99938f2852 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | -X FMLP -Q 0 -L 2 70 300
-X FMLP -Q 0 -L 2 60 200
-X FMLP -Q 1 -L 2 58 175
-X FMLP -Q 2 -L 2 55 175
-X FMLP -Q 3 -L 1 33 200
30 400
30 400
26 100
20 125
15 175
| [
"[email protected]"
] | |
21d55738f288dbd03aa9bf55cc94a249e00cb2e8 | fd67592b2338105e0cd0b3503552d188b814ad95 | /test/test_models/test_contact_inside_base_with_id.py | 964e5f31739d9ed26dde6e699ebac9fc39ca81a9 | [] | no_license | E-goi/sdk-python | 175575fcd50bd5ad426b33c78bdeb08d979485b7 | 5cba50a46e1d288b5038d18be12af119211e5b9f | refs/heads/master | 2023-04-29T20:36:02.314712 | 2023-04-18T07:42:46 | 2023-04-18T07:42:46 | 232,095,340 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,082 | py | # coding: utf-8
"""
APIv3 (New)
# Introduction This is our new version of API. We invite you to start using it and give us your feedback # Getting Started E-goi can be integrated with many environments and programming languages via our REST API. We've created a developer focused portal to give your organization a clear and quick overview of how to integrate with E-goi. The developer portal focuses on scenarios for integration and flow of events. We recommend familiarizing yourself with all of the content in the developer portal, before start using our rest API. The E-goi APIv3 is served over HTTPS. To ensure data privacy, unencrypted HTTP is not supported. Request data is passed to the API by POSTing JSON objects to the API endpoints with the appropriate parameters. BaseURL = api.egoiapp.com # RESTful Services This API supports 5 HTTP methods: * <b>GET</b>: The HTTP GET method is used to **read** (or retrieve) a representation of a resource. * <b>POST</b>: The POST verb is most-often utilized to **create** new resources. * <b>PATCH</b>: PATCH is used for **modify** capabilities. The PATCH request only needs to contain the changes to the resource, not the complete resource * <b>PUT</b>: PUT is most-often utilized for **update** capabilities, PUT-ing to a known resource URI with the request body containing the newly-updated representation of the original resource. * <b>DELETE</b>: DELETE is pretty easy to understand. It is used to **delete** a resource identified by a URI. # Authentication We use a custom authentication method, you will need a apikey that you can find in your account settings. Below you will see a curl example to get your account information: #!/bin/bash curl -X GET 'https://api.egoiapp.com/my-account' \\ -H 'accept: application/json' \\ -H 'Apikey: <YOUR_APY_KEY>' Here you can see a curl Post example with authentication: #!/bin/bash curl -X POST 'http://api.egoiapp.com/tags' \\ -H 'accept: application/json' \\ -H 'Apikey: <YOUR_APY_KEY>' \\ -H 'Content-Type: application/json' \\ -d '{`name`:`Your custom tag`,`color`:`#FFFFFF`}' # SDK Get started quickly with E-goi with our integration tools. Our SDK is a modern open source library that makes it easy to integrate your application with E-goi services. * <a href='https://github.com/E-goi/sdk-java'>Java</a> * <a href='https://github.com/E-goi/sdk-php'>PHP</a> * <a href='https://github.com/E-goi/sdk-python'>Python</a> * <a href='https://github.com/E-goi/sdk-ruby'>Ruby</a> * <a href='https://github.com/E-goi/sdk-javascript'>Javascript</a> * <a href='https://github.com/E-goi/sdk-csharp'>C#</a> # Stream Limits Stream limits are security mesures we have to make sure our API have a fair use policy, for this reason, any request that creates or modifies data (**POST**, **PATCH** and **PUT**) is limited to a maximum of **20MB** of content length. If you arrive to this limit in one of your request, you'll receive a HTTP code **413 (Request Entity Too Large)** and the request will be ignored. To avoid this error in importation's requests, it's advised the request's division in batches that have each one less than 20MB. # Timeouts Timeouts set a maximum waiting time on a request's response. Our API, sets a default timeout for each request and when breached, you'll receive an HTTP **408 (Request Timeout)** error code. You should take into consideration that response times can vary widely based on the complexity of the request, amount of data being analyzed, and the load on the system and workspace at the time of the query. When dealing with such errors, you should first attempt to reduce the complexity and amount of data under analysis, and only then, if problems are still occurring ask for support. For all these reasons, the default timeout for each request is **10 Seconds** and any request that creates or modifies data (**POST**, **PATCH** and **PUT**) will have a timeout of **60 Seconds**. Specific timeouts may exist for specific requests, these can be found in the request's documentation. # Callbacks A callback is an asynchronous API request that originates from the API server and is sent to the client in response to a previous request sent by that client. The API will make a **POST** request to the address defined in the URL with the information regarding the event of interest and share data related to that event. <a href='/usecases/callbacks/' target='_blank'>[Go to callbacks documentation]</a> ***Note:*** Only http or https protocols are supported in the Url parameter. <security-definitions/> # noqa: E501
The version of the OpenAPI document: 3.0.0
Generated by: https://openapi-generator.tech
"""
import unittest
import egoi_api
from egoi_api.model.contact_inside_base_with_id import ContactInsideBaseWithId
from egoi_api import configuration
class TestContactInsideBaseWithId(unittest.TestCase):
"""ContactInsideBaseWithId unit test stubs"""
_configuration = configuration.Configuration()
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
f9799d5d427629227541426eb2f76d39b9c7be55 | 06e2dd0aa78241edbe647a8b5ef075a90ee4a8b6 | /97/holidays.py | 719a5166b67dda66aa92ab9dfadacfa9422f242d | [] | no_license | StefanKaeser/pybites | a6a78b51039ab4792deb285dc799c6abf7bea6d5 | 9f839af4ef400786b7c28701c2241f310bb4422c | refs/heads/master | 2020-08-23T11:28:03.172666 | 2020-06-15T15:37:14 | 2020-06-15T15:37:14 | 216,606,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | from collections import defaultdict
import os
from urllib.request import urlretrieve
from bs4 import BeautifulSoup
# prep data
# tmp = os.getenv("TMP", "/tmp")
tmp = os.path.curdir
page = "us_holidays.html"
holidays_page = os.path.join(tmp, page)
urlretrieve(f"https://bites-data.s3.us-east-2.amazonaws.com/{page}", holidays_page)
with open(holidays_page) as f:
content = f.read()
def get_us_bank_holidays(content=content):
"""Receive scraped html output, make a BS object, parse the bank
holiday table (css class = list-table), and return a dict of
keys -> months and values -> list of bank holidays"""
soup = BeautifulSoup(content, "html.parser")
holiday_table = soup.find("table", {"class": "list-table"})
months = [tag.string.split("-")[1] for tag in holiday_table.find_all("time")]
holiday_names = [tag.string.strip() for tag in holiday_table.find_all("a")]
holidays = defaultdict(list)
for month, name in zip(months, holiday_names):
holidays[month].append(name)
return holidays
| [
"[email protected]"
] | |
0381c9188731c7b9a643d7d35757c09a22da7724 | f2ebfb99b0e6a07afba7e583f820737511a1a98e | /code/models/listener.py | 53325f3801b8e3ba46209f53072932ee06991b75 | [
"MIT"
] | permissive | jayelm/emergent-generalization | 211c065d4829322792396ad6605dc51024e913cd | 35b1d97a940826008cde13498aa75c233a7f454a | refs/heads/master | 2023-08-27T00:25:32.988514 | 2021-09-29T04:47:22 | 2021-09-29T04:47:22 | 373,944,198 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,517 | py | """
Listener models
"""
import torch
import torch.nn as nn
from . import rnn
class CopyListener(nn.Module):
def __init__(self, feat_model, message_size=100, dropout=0.2):
super().__init__()
self.feat_model = feat_model
self.feat_size = feat_model.final_feat_dim
self.dropout = nn.Dropout(p=dropout)
self.message_size = message_size
if self.message_size is None:
self.bilinear = nn.Linear(self.feat_size, 1, bias=False)
else:
self.bilinear = nn.Linear(self.message_size, self.feat_size, bias=False)
def embed_features(self, feats):
batch_size = feats.shape[0]
n_obj = feats.shape[1]
rest = feats.shape[2:]
feats_flat = feats.view(batch_size * n_obj, *rest)
feats_emb_flat = self.feat_model(feats_flat)
feats_emb = feats_emb_flat.unsqueeze(1).view(batch_size, n_obj, -1)
feats_emb = self.dropout(feats_emb)
return feats_emb
def compare(self, feats_emb, message_enc):
"""
Compute dot products
"""
scores = torch.einsum("ijh,ih->ij", (feats_emb, message_enc))
return scores
def forward(self, feats, message):
# Embed features
feats_emb = self.embed_features(feats)
# Embed message
if self.message_size is None:
return self.bilinear(feats_emb).squeeze(2)
else:
message_bilinear = self.bilinear(message)
return self.compare(feats_emb, message_bilinear)
def reset_parameters(self):
self.feat_model.reset_parameters()
self.bilinear.reset_parameters()
class Listener(CopyListener):
def __init__(self, feat_model, embedding_module, **kwargs):
super().__init__(feat_model, **kwargs)
self.embedding = embedding_module
self.lang_model = rnn.RNNEncoder(self.embedding, hidden_size=self.message_size)
self.vocab_size = embedding_module.num_embeddings
def forward(self, feats, lang, lang_length):
# Embed features
feats_emb = self.embed_features(feats)
# Embed language
lang_emb = self.lang_model(lang, lang_length)
# Bilinear term: lang embedding space -> feature embedding space
lang_bilinear = self.bilinear(lang_emb)
return self.compare(feats_emb, lang_bilinear)
def reset_parameters(self):
super().reset_parameters()
self.embedding.reset_parameters()
self.lang_model.reset_parameters()
| [
"[email protected]"
] | |
de99f451e82714b1c4644394330ee7044c740365 | 62cbf8dcd921feb309d79ad66767405ea27623ba | /python/boj/2667_boj_danji.py | 34b79935b3fb77392d1a17aad2eb8c53ba322477 | [] | no_license | woodg1207/APS | 1d20f8b7c6d7a7f41e9920ec41ad0c435a881519 | e49a6fb01e1a51213963cd58f13a1364833482f8 | refs/heads/master | 2023-06-13T21:41:28.114299 | 2021-06-21T04:17:19 | 2021-06-21T04:17:19 | 288,982,972 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 802 | py | import sys; sys.stdin=open('s2667.txt','r')
from collections import deque
N = int(input())
arr = [list(map(int, input())) for _ in range(N)]
dr = [1,0,-1,0]
dc = [0,1,0,-1]
danji_list = []
for i in range(N):
for j in range(N):
if arr[i][j]:
arr[i][j] = 0
c = 1
q = deque()
q.append([i,j])
while q:
p = q.popleft()
for d in range(4):
nr, nc = p[0]+dr[d], p[1]+dc[d]
if 0<=nr<N and 0<=nc<N:
if arr[nr][nc]:
q.append([nr,nc])
arr[nr][nc] = 0
c += 1
danji_list.append(c)
print(len(danji_list))
for i in sorted(danji_list):
print(i) | [
"[email protected]"
] | |
77ceff54bf05ef0ec310abe67194e4c272925c48 | a7b66311c2ce113789933ec3162f1128b2862f13 | /app/closeLoop/ForcastDiffFactor.py | 08170df61a83a596a2f558df9d51de995411210f | [
"MIT"
] | permissive | ChanJeunlam/geolearn | 214b2c42359ea1164b39117fad2d7470adeb6d35 | 791caa54eb70920823ea7d46714dc8a3e7fa7445 | refs/heads/master | 2023-07-16T04:13:15.526364 | 2021-08-16T05:24:18 | 2021-08-16T05:24:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,847 | py | from hydroDL import pathSMAP, master, utils
from hydroDL.master import default
from hydroDL.post import plot, stat
import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import pandas as pd
doLst = list()
# doLst.append('train')
doLst.append('test')
doLst.append('post')
saveDir = os.path.join(pathSMAP['dirResult'], 'DA')
# test
if 'test' in doLst:
torch.cuda.set_device(2)
subset = 'CONUSv2f1'
tRange = [20150402, 20180401]
yrStrLst = ['2015', '2016', '2017']
yfLst = list()
ypLst = list()
for yrStr in yrStrLst:
out = os.path.join(pathSMAP['Out_L3_NA'], 'DA', 'CONUSv2f1_DA' + yrStr)
df, yf, obs = master.test(
out, tRange=tRange, subset=subset, batchSize=100)
out = os.path.join(pathSMAP['Out_L3_NA'], 'DA',
'CONUSv2f1_LSTM' + yrStr)
df, yp, obs = master.test(out, tRange=tRange, subset=subset)
yf = yf.squeeze()
yp = yp.squeeze()
yfLst.append(yf)
ypLst.append(yp)
obs = obs.squeeze()
# figure out how many days observation lead
maskObs = 1 * ~np.isnan(obs.squeeze())
maskDay = np.zeros(maskObs.shape).astype(int)
ngrid, nt = maskObs.shape
for j in range(ngrid):
temp = 0
for i in range(nt):
maskDay[j, i] = temp
if maskObs[j, i] == 1:
temp = 1
else:
if temp != 0:
temp = temp + 1
ind = np.random.randint(0, ngrid)
maskObsDay = maskObs * maskDay
unique, counts = np.unique(maskObsDay, return_counts=True)
maskF = (maskDay >= 1) & (maskDay <= 3)
statPLst = list()
statFLst = list()
for k in range(3):
statP = stat.statError(
utils.fillNan(ypLst[k], maskF), utils.fillNan(obs, maskF))
statF = stat.statError(
utils.fillNan(yfLst[k], maskF), utils.fillNan(obs, maskF))
statPLst.append(statP)
statFLst.append(statF)
cropFile = r'/mnt/sdb/Data/Crop/cropRate_CONUSv2f1.csv'
cropRate = pd.read_csv(cropFile, dtype=np.float, header=None).values
# croprate - 0 corn, 4 soybean, 22 spring wheat, 23 winter wheat
dataGrid = [(statPLst[0]['RMSE'] - statFLst[0]['RMSE']) / statPLst[0]['RMSE'],
(statPLst[1]['RMSE'] - statFLst[1]['RMSE']) / statPLst[1]['RMSE'],
(statPLst[2]['RMSE'] - statFLst[2]['RMSE']) / statPLst[2]['RMSE'],
]
prcp = df.getDataTs('APCP_FORA').squeeze()
dataTs = [[obs, ypLst[0], yfLst[0]], [obs, ypLst[1], yfLst[1]],
[obs, ypLst[2], yfLst[2]], [prcp]]
crd = df.getGeo()
t = df.getT()
mapNameLst = ['dRMSE 2015', 'dRMSE 2016', 'dRMSE 2017']
tsNameLst = ['obs', 'prj', 'fore']
tBar = [utils.time.t2dt(20160401), utils.time.t2dt(20170401)]
#plt.tight_layout()
plot.plotTsMap(
dataGrid,
dataTs,
lat=crd[0],
lon=crd[1],
t=t,
mapNameLst=mapNameLst,
isGrid=True,
multiTS=True,
linewidth=1,
figsize=(10, 10),
tBar=tBar)
# see result for different seasons
tRangeLst = [[20180101, 20180201], [20180201, 20180301], [20180301, 20180401],
[20160401, 20160501], [20160501, 20160601], [20160601, 20160701],
[20160701, 20160801], [20160801, 20160901], [20160901, 20161001],
[20161001, 20161101], [20161101, 20161201], [20161201, 20170101],
[20170101, 20170201], [20170201, 20170301], [20170301, 20170401],
[20170401, 20170501], [20170501, 20170601], [20170601, 20170701],
[20170701, 20170801], [20170801, 20170901], [20170901, 20171001],
[20171001, 20171101], [20171101, 20171201], [20171201, 20180101]]
tAllR = [20150402, 20180401]
tAllA = utils.time.tRange2Array(tAllR)
statPLst = list()
statFLst = list()
for k in range(12):
tRLst = [tRangeLst[k], tRangeLst[k + 12]]
temp = list()
for tR in tRLst:
tA = utils.time.tRange2Array(tR)
ind0 = np.array(range(nt))
ind1, ind2 = utils.time.intersect(tAllA, tA)
temp.append(ind1)
indT = np.concatenate(temp)
yfTemp = utils.fillNan(yf, maskF)[:, indT]
ypTemp = utils.fillNan(yp, maskF)[:, indT]
obsTemp = utils.fillNan(obs, maskF)[:, indT]
statPLst.append(stat.statError(ypTemp, obsTemp))
statFLst.append(stat.statError(yfTemp, obsTemp))
import matplotlib
matplotlib.rcParams.update({'font.size': 14})
matplotlib.rcParams.update({'lines.linewidth': 2})
matplotlib.rcParams.update({'lines.markersize': 6})
labCrop = ['Corn', 'Spring wheat', 'Winter wheat']
indCrop = [0, 22, 23]
cropFile = r'/mnt/sdb/Data/Crop/cropRate_CONUSv2f1.csv'
cropRate = pd.read_csv(cropFile, dtype=np.float, header=None).values
key = 'RMSE'
[lat, lon] = df.getGeo()
fig, axes = plt.subplots(1, 3, figsize=[12, 5])
for k in range(3):
grid, uy, ux = utils.grid.array2grid(
cropRate[:, indCrop[k]], lat=lat, lon=lon)
plot.plotMap(
grid, ax=axes[k], lat=uy, lon=ux, title=labCrop[k] + ' percentage')
plt.tight_layout()
fig.show()
import matplotlib
matplotlib.rcParams.update({'font.size': 14})
matplotlib.rcParams.update({'lines.linewidth': 2})
matplotlib.rcParams.update({'lines.markersize': 6})
indLst = [cropRate[:, 0] > 30, cropRate[:, 22] > 5, cropRate[:, 23] > 10]
labMonth = [
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Agu', 'Sep', 'Oct',
'Nov', 'Dec'
]
labCrop = ['Corn', 'Spring wheat', 'Winter wheat']
cLst = 'rgb'
dataBox = list()
for iC in range(len(indLst)):
dataBox = list()
for k in range(12):
data = statPLst[k]['RMSE'][indLst[iC]] - statFLst[k]['RMSE'][
indLst[iC]]
if len(data[~np.isnan(data)]) < 20:
data = []
dataBox.append(data)
fig = plot.plotBoxFig(
dataBox,
label1=labMonth,
label2=[labCrop[iC]],
sharey=True,
figsize=[8, 3],
colorLst=cLst[iC])
plt.subplots_adjust(wspace=0, hspace=0)
plt.ylim(-0.02, 0.04)
fig.show()
| [
"[email protected]"
] | |
0a606a1b05d67e2bbd7b110945393561282c0ba4 | 430db754af2a7481358df2dcd7f74919c4ecddcf | /prob_tools/tools.py | 6acebfcfbb108e282202e948521f5579880f9c75 | [
"MIT"
] | permissive | arruda/exercicios_probabilidade | 567eb318ff137bcce155142d3a951cf6b1c40515 | dca3503a0b4d982e63795b775bf30b9b95440bcd | refs/heads/master | 2021-01-19T19:35:24.552403 | 2014-10-22T18:31:42 | 2014-10-22T18:31:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import math
def combinacao(n, p):
"""
Combinação de N, P à P
"""
n_fat = math.factorial(n)
p_fat = math.factorial(p)
n_menos_p_fat = math.factorial(n-p)
return n_fat / (p_fat * n_menos_p_fat)
def bernuille():
pass
def distribuicao_binomial(n, p, X):
"""
Binomial:
n = Total de elementos
p = probabilidade de sucesso
X = variavel aleatoria
"""
return
| [
"[email protected]"
] | |
6060cea90a85849c7a6e237732ba3d0a8e87983d | ec0e1779383bec96de803ba893de5096c563158f | /tensorflow/python/estimator/inputs/pandas_io.py | a1e418f487c5b7da6907fa945fba0165334432cf | [] | no_license | DengZhuangSouthRd/simple_tensorflow | 45d8fc7c2ef9da947f11f876aff7c1e169dc457c | 83d742219c4a04c61822935487626890bc735301 | refs/heads/master | 2021-01-18T19:05:36.414639 | 2017-04-01T15:06:16 | 2017-04-01T15:06:16 | 86,887,616 | 11 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,137 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.estimator.inputs.queues import feeding_functions
try:
# pylint: disable=g-import-not-at-top
# pylint: disable=unused-import
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def pandas_input_fn(x,
y=None,
batch_size=128,
num_epochs=1,
shuffle=True,
queue_capacity=1000,
num_threads=1,
target_column='target'):
"""Returns input function that would feed Pandas DataFrame into the model.
Note: `y`'s index must match `x`'s index.
Args:
x: pandas `DataFrame` object.
y: pandas `Series` object.
batch_size: int, size of batches to return.
num_epochs: int, number of epochs to iterate over data. If not `None`,
read attempts that would exceed this value will raise `OutOfRangeError`.
shuffle: bool, whether to read the records in random order.
queue_capacity: int, size of the read queue. If `None`, it will be set
roughly to the size of `x`.
num_threads: int, number of threads used for reading and enqueueing.
target_column: str, name to give the target column `y`.
Returns:
Function, that has signature of ()->(dict of `features`, `target`)
Raises:
ValueError: if `x` already contains a column with the same name as `y`, or
if the indexes of `x` and `y` don't match.
"""
if not HAS_PANDAS:
raise TypeError(
'pandas_input_fn should not be called without pandas installed')
x = x.copy()
if y is not None:
if target_column in x:
raise ValueError(
'Cannot use name %s for target column: DataFrame already has a '
'column with that name: %s' % (target_column, x.columns))
if not np.array_equal(x.index, y.index):
raise ValueError('Index for x and y are mismatched.\nIndex for x: %s\n'
'Index for y: %s\n' % (x.index, y.index))
x[target_column] = y
# TODO(mdan): These are memory copies. We probably don't need 4x slack space.
# The sizes below are consistent with what I've seen elsewhere.
if queue_capacity is None:
if shuffle:
queue_capacity = 4 * len(x)
else:
queue_capacity = len(x)
min_after_dequeue = max(queue_capacity / 4, 1)
def input_fn():
"""Pandas input function."""
queue = feeding_functions._enqueue_data( # pylint: disable=protected-access
x,
queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
enqueue_size=batch_size,
num_epochs=num_epochs)
if num_epochs is None:
features = queue.dequeue_many(batch_size)
else:
features = queue.dequeue_up_to(batch_size)
assert len(features) == len(x.columns) + 1, ('Features should have one '
'extra element for the index.')
features = features[1:]
features = dict(zip(list(x.columns), features))
if y is not None:
target = features.pop(target_column)
return features, target
return features
return input_fn
| [
"[email protected]"
] | |
b2c8ee7450114fe41b23728d52ab158c5be37155 | 37c38ef8ead53739b3128147da9a24c44cfccccb | /froide/helper/search.py | 096eeb8474bc34da53009caf07af0861a887d59b | [
"MIT"
] | permissive | zlodej/pekel | ff102dc1c05180dfcff6a30bd944252d128e0fb5 | b1114618ef032503ab49476e738e90952c4da71a | refs/heads/master | 2021-05-06T15:38:28.301853 | 2017-12-02T11:33:19 | 2017-12-02T11:33:19 | 102,724,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,627 | py | from haystack.fields import NgramField
from haystack.exceptions import MissingDependency
class SuggestField(NgramField):
pass
try:
from haystack.backends.elasticsearch_backend import (
ElasticsearchSearchEngine, ElasticsearchSearchBackend, FIELD_MAPPINGS
)
except (ImportError, MissingDependency):
pass
else:
class SuggestField(NgramField): # noqa
field_type = 'suggest'
FIELD_MAPPINGS['suggest'] = {'type': 'string', 'analyzer': 'suggest_analyzer'}
class FroideElasticsearchSearchBackend(ElasticsearchSearchBackend):
# Settings to add an custom suggest analyzer
DEFAULT_SETTINGS = {
'settings': {
"analysis": {
"analyzer": {
"ngram_analyzer": {
"type": "custom",
"tokenizer": "standard",
"filter": ["haystack_ngram", "lowercase"]
},
"edgengram_analyzer": {
"type": "custom",
"tokenizer": "standard",
"filter": ["haystack_edgengram", "lowercase"]
},
"suggest_analyzer": {
"filter": ["lowercase", "asciifolding"],
"type": "custom",
"tokenizer": "froide_autocomplete_ngram"
}
},
"tokenizer": {
"haystack_ngram_tokenizer": {
"type": "nGram",
"min_gram": 3,
"max_gram": 15,
},
"haystack_edgengram_tokenizer": {
"type": "edgeNGram",
"min_gram": 2,
"max_gram": 15,
"side": "front"
},
"froide_autocomplete_ngram": {
"type": "edgeNGram",
"min_gram": 1,
"max_gram": 15,
"token_chars": ["letter", "digit"]
}
},
"filter": {
"haystack_ngram": {
"type": "nGram",
"min_gram": 3,
"max_gram": 15
},
"haystack_edgengram": {
"type": "edgeNGram",
"min_gram": 2,
"max_gram": 15
}
}
}
}
}
class FroideElasticsearchSearchEngine(ElasticsearchSearchEngine):
backend = FroideElasticsearchSearchBackend
class SearchQuerySetWrapper(object):
"""
Decorates a SearchQuerySet object using a generator for efficient iteration
"""
def __init__(self, qs, model):
self.qs = qs
self.model = model
def count(self):
return self.qs.count()
def __iter__(self):
for result in self.qs:
yield result.object
def __getitem__(self, key):
if isinstance(key, int) and (key >= 0 or key < self.count()):
# return the object at the specified position
return self.qs[key].object
# Pass the slice/range on to the delegate
return SearchQuerySetWrapper(self.qs[key], self.model)
| [
"[email protected]"
] | |
a23d5a870a4d32a4c8c889089b00f6e56ee3dd50 | 73c2ec3edf0f6eaea4ce1f73e910f02592119a42 | /mmdet/models/utils/fpn_utils.py | ab7558ff1ecd6d699cd8724dd5260b276affb28f | [
"Apache-2.0"
] | permissive | violet998/video_class_agnostic_segmentation | ab9b496415857678979a70890cb68e92fa014061 | c4614fe675e8a5352012f603c15bc24fb43d690c | refs/heads/main | 2023-04-20T15:31:37.699645 | 2021-04-22T16:41:26 | 2021-04-22T16:41:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | import torch
import torch.nn.functional as F
def merge_fpn(x, average=True):
max_size = x[0].shape
merged_fpn = []
for i, _ in enumerate(x):
merged_fpn.append(F.interpolate(x[i], max_size[-2:]))
if average:
return torch.stack(merged_fpn).mean(dim=0)
else:
concat = torch.stack(merged_fpn)
return concat.permute(1,0,2,3,4).reshape(concat.shape[1], -1, *concat.shape[-2:])
| [
"[email protected]"
] | |
7099ddfcc80ff50780eeb6bea1817b88a9fa94f2 | 70d929497cbd70bb40ed939f3aa0ce56c3f7d816 | /pandas/core/groupby/ops.pyi | 58a34d0c2081a8e25b9bb1f4bab161d88906e3f0 | [
"MIT"
] | permissive | matangover/python-type-stubs | abd4bc46f9841d0a2c44e1597055019d21f9ee70 | 15285c6b88dc684d9de9bfdaf8b72d4eb7c3e257 | refs/heads/main | 2023-07-13T04:19:03.481222 | 2021-08-05T20:26:18 | 2021-08-05T20:26:18 | 399,435,272 | 0 | 0 | MIT | 2021-08-24T11:11:57 | 2021-08-24T11:11:56 | null | UTF-8 | Python | false | false | 2,824 | pyi | import numpy as np
from pandas._typing import FrameOrSeries as FrameOrSeries
from pandas.core.groupby import grouper as grouper
from pandas.core.indexes.api import Index as Index
from pandas.core.series import Series as Series
from typing import List, Optional, Sequence, Tuple
class BaseGrouper:
axis = ...
sort = ...
group_keys = ...
mutated = ...
indexer = ...
def __init__(self, axis: Index, groupings: Sequence[grouper.Grouping], sort: bool=..., group_keys: bool=..., mutated: bool=..., indexer: Optional[np.ndarray]=...) -> None: ...
@property
def groupings(self) -> List[grouper.Grouping]: ...
@property
def shape(self): ...
def __iter__(self) : ...
@property
def nkeys(self) -> int: ...
def get_iterator(self, data: FrameOrSeries, axis: int=...) : ...
def apply(self, f, data: FrameOrSeries, axis: int=...) : ...
def indices(self): ...
@property
def codes(self) -> List[np.ndarray]: ...
@property
def levels(self) -> List[Index]: ...
@property
def names(self): ...
def size(self) -> Series: ...
def groups(self): ...
def is_monotonic(self) -> bool: ...
def group_info(self): ...
def codes_info(self) -> np.ndarray: ...
def ngroups(self) -> int: ...
@property
def reconstructed_codes(self) -> List[np.ndarray]: ...
def result_index(self) -> Index: ...
def get_group_levels(self): ...
def aggregate(self, values, how: str, axis: int=..., min_count: int=...) -> Tuple[np.ndarray, Optional[List[str]]]: ...
def transform(self, values, how: str, axis: int=..., **kwargs) : ...
def agg_series(self, obj: Series, func) : ...
class BinGrouper(BaseGrouper):
bins = ...
binlabels = ...
mutated = ...
indexer = ...
def __init__(self, bins, binlabels, filter_empty: bool=..., mutated: bool=..., indexer=...) -> None: ...
def groups(self): ...
@property
def nkeys(self) -> int: ...
def get_iterator(self, data: FrameOrSeries, axis: int=...) : ...
def indices(self): ...
def group_info(self): ...
def reconstructed_codes(self) -> List[np.ndarray]: ...
def result_index(self): ...
@property
def levels(self): ...
@property
def names(self): ...
@property
def groupings(self) -> List[grouper.Grouping]: ...
def agg_series(self, obj: Series, func) : ...
class DataSplitter:
data = ...
labels = ...
ngroups = ...
axis = ...
def __init__(self, data: FrameOrSeries, labels, ngroups: int, axis: int=...) -> None: ...
def slabels(self): ...
def sort_idx(self): ...
def __iter__(self) : ...
class SeriesSplitter(DataSplitter): ...
class FrameSplitter(DataSplitter):
def fast_apply(self, f, names): ...
def get_splitter(data: FrameOrSeries, *args, **kwargs) -> DataSplitter: ...
| [
"[email protected]"
] | |
f3ce2b0bddb87af3a6913e654894d0f19c5a9fe7 | 436166fda7a671805b8fa6fc06e68bf7d42f9be6 | /test/boost.py | f977bb0860e75e9f0cee74eff9cad1e18a818acf | [] | no_license | VitalyVorobyev/libLinAl | 1e4872feb0e5deb70d9c3acc2e31e676e27818c2 | b815efeee109922ffd28d97b9253f636403aa999 | refs/heads/master | 2021-01-22T08:39:29.897025 | 2020-08-28T04:06:52 | 2020-08-28T04:06:52 | 81,914,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,131 | py | """ Boost """
import numpy as np
import matplotlib.pyplot as plt
from liblinal import vect, lvect
def boost_test():
""" Boost unit test """
boost_list = np.linspace(0.0001, 0.9, 500)
txprime = np.array([lvect(1., 0, 0, 0).boost(vect(bx, 0, 0)).as_list[:2]
for bx in boost_list])
tprime, xprime = txprime[:, 0], txprime[:, 1]
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.rc('font', size=22)
plt.style.use('seaborn-white')
label_size = 28
plt.figure(num=1, figsize=(6, 4), dpi=100)
plt.plot(boost_list, tprime, 'b-', markersize=12)
plt.ylabel(r'$t^{\prime}$', fontsize=label_size)
plt.xlabel(r'$\beta$', fontsize=label_size)
plt.tight_layout(pad=.2)
plt.figure(num=2, figsize=(6, 4), dpi=100)
# plt.semilogy(boost_list, xprime, 'b-', markersize=12)
# plt.loglog(boost_list, xprime, 'b-', markersize=12)
plt.plot(boost_list, xprime, 'b-', markersize=12)
plt.ylabel(r'$x^{\prime}$', fontsize=label_size)
plt.xlabel(r'$\beta$', fontsize=label_size)
plt.tight_layout(pad=.2)
plt.show()
boost_test()
| [
"[email protected]"
] | |
0099d67f95506a2f31cda7626005a332d94f78ee | 2aaa58e7a83c4c8a4a2aa8b4a70df95a1ca10f19 | /s_full1.py | de1275e660df53dc31509a7dc24c722912520572 | [] | no_license | federico0712/elitepro_astm | 84cd8b1c3095f24a1cfded573debcd12894d60eb | 07c2cc8dd3db58b966eeb138484a1fd073e65dde | refs/heads/master | 2022-04-14T16:15:31.370068 | 2020-03-17T03:49:41 | 2020-03-17T03:49:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,373 | py | #!/usr/bin/python3
import sys
import signal
import datetime
import serial
import logging
'''
This program reads all bytes and writes them to a file in /root/elite folder
first byte is ENQ last one is EOT
This help in capturing everything between ENQ and EOT and learn equipment specific need
'''
output_folder='/root/elite/' #remember ending/
input_tty='/dev/ttyUSB0'
def get_filename():
dt=datetime.datetime.now()
return output_folder+dt.strftime("%Y-%m-%d-%H-%M-%S-%f")
#Globals############################
byte_array=[]
byte=b'd'
#main loop##########################
port = serial.Serial(input_tty, baudrate=9600)
while byte!=b'':
byte=port.read(1)
byte_array=byte_array+[chr(ord(byte))] #add everything read to array
if(byte==b'\x05'):
port.write(b'\x06');
cur_file=get_filename() #get name of file to open
x=open(cur_file,'w') #open file
print("<ENQ>")
elif(byte==b'\x0a'):
print("<LF>")
port.write(b'\x06');
x.write(''.join(byte_array)) #write to file everytime LF received, to prevent big data memory problem
byte_array=[] #empty after writing
elif(byte==b'\x04'):
print("<EOF>")
x.write(''.join(byte_array)) #write last byte(EOF) to file
byte_array=[] #empty array
x.close() #close file
#else:
#byte_array=byte_array+[chr(ord(byte))]
| [
"root@debian"
] | root@debian |
28707766a97f29fb0ccf49800aa19a65d89a6697 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_355/ch117_2020_03_30_19_48_40_877571.py | 985e837a3949e7e8f671c81eba218bc0348f2386 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | import math
def snell_descartes (o2, o1, n1, n2 ):
o2 = arcsin(math.sin(o1)*(n1/n2))
y = o2 * 180 / math.pi
return degrees ( arcsin ( o2 ))
| [
"[email protected]"
] | |
7e94b07d17aaa223c1697a14ed1951afe126ebe0 | 8a25ada37271acd5ea96d4a4e4e57f81bec221ac | /home/pi/GrovePi/Software/Python/others/temboo/Library/Tumblr/Post/CreateLinkPost.py | ac3b9be49dc57291e0f74fc746a2da6b46aca1b7 | [
"MIT",
"Apache-2.0"
] | permissive | lupyuen/RaspberryPiImage | 65cebead6a480c772ed7f0c4d0d4e08572860f08 | 664e8a74b4628d710feab5582ef59b344b9ffddd | refs/heads/master | 2021-01-20T02:12:27.897902 | 2016-11-17T17:32:30 | 2016-11-17T17:32:30 | 42,438,362 | 7 | 8 | null | null | null | null | UTF-8 | Python | false | false | 7,045 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# CreateLinkPost
# Creates a new link post for a specified Tumblr blog.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class CreateLinkPost(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the CreateLinkPost Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(CreateLinkPost, self).__init__(temboo_session, '/Library/Tumblr/Post/CreateLinkPost')
def new_input_set(self):
return CreateLinkPostInputSet()
def _make_result_set(self, result, path):
return CreateLinkPostResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return CreateLinkPostChoreographyExecution(session, exec_id, path)
class CreateLinkPostInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the CreateLinkPost
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_URL(self, value):
"""
Set the value of the URL input for this Choreo. ((required, string) The link you want to post.)
"""
super(CreateLinkPostInputSet, self)._set_input('URL', value)
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Tumblr (AKA the OAuth Consumer Key).)
"""
super(CreateLinkPostInputSet, self)._set_input('APIKey', value)
def set_AccessTokenSecret(self, value):
"""
Set the value of the AccessTokenSecret input for this Choreo. ((required, string) The Access Token Secret retrieved during the OAuth process.)
"""
super(CreateLinkPostInputSet, self)._set_input('AccessTokenSecret', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token retrieved during the OAuth process.)
"""
super(CreateLinkPostInputSet, self)._set_input('AccessToken', value)
def set_BaseHostname(self, value):
"""
Set the value of the BaseHostname input for this Choreo. ((required, string) The standard or custom blog hostname (i.e. temboo.tumblr.com).)
"""
super(CreateLinkPostInputSet, self)._set_input('BaseHostname', value)
def set_Date(self, value):
"""
Set the value of the Date input for this Choreo. ((optional, date) The GMT date and time of the post. Can be an epoch timestamp in milliseconds or formatted like: Dec 8th, 2011 4:03pm. Defaults to NOW().)
"""
super(CreateLinkPostInputSet, self)._set_input('Date', value)
def set_Description(self, value):
"""
Set the value of the Description input for this Choreo. ((optional, string) A user-supplied description. HTML is allowed.)
"""
super(CreateLinkPostInputSet, self)._set_input('Description', value)
def set_Markdown(self, value):
"""
Set the value of the Markdown input for this Choreo. ((optional, boolean) Indicates whether the post uses markdown syntax. Defaults to false. Set to 1 to indicate true.)
"""
super(CreateLinkPostInputSet, self)._set_input('Markdown', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Can be set to xml or json. Defaults to json.)
"""
super(CreateLinkPostInputSet, self)._set_input('ResponseFormat', value)
def set_SecretKey(self, value):
"""
Set the value of the SecretKey input for this Choreo. ((required, string) The Secret Key provided by Tumblr (AKA the OAuth Consumer Secret).)
"""
super(CreateLinkPostInputSet, self)._set_input('SecretKey', value)
def set_Slug(self, value):
"""
Set the value of the Slug input for this Choreo. ((optional, string) Adds a short text summary to the end of the post URL.)
"""
super(CreateLinkPostInputSet, self)._set_input('Slug', value)
def set_State(self, value):
"""
Set the value of the State input for this Choreo. ((optional, string) The state of the post. Specify one of the following: published, draft, queue. Defaults to published.)
"""
super(CreateLinkPostInputSet, self)._set_input('State', value)
def set_Tags(self, value):
"""
Set the value of the Tags input for this Choreo. ((optional, string) Comma-separated tags for this post.)
"""
super(CreateLinkPostInputSet, self)._set_input('Tags', value)
def set_Title(self, value):
"""
Set the value of the Title input for this Choreo. ((optional, string) The title of the page the link points to. HTML entities should be escaped.)
"""
super(CreateLinkPostInputSet, self)._set_input('Title', value)
def set_Tweet(self, value):
"""
Set the value of the Tweet input for this Choreo. ((optional, string) Manages the autotweet (if enabled) for this post. Set to "off" for no tweet. Enter text to override the default tweet.)
"""
super(CreateLinkPostInputSet, self)._set_input('Tweet', value)
class CreateLinkPostResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the CreateLinkPost Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Tumblr. Default is JSON, can be set to XML by entering 'xml' in ResponseFormat.)
"""
return self._output.get('Response', None)
class CreateLinkPostChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return CreateLinkPostResultSet(response, path)
| [
"[email protected]"
] | |
1de081acd200b031145320d70d79be19ae3a8312 | 9510cd7f96e2cd6b8751fab988038228fe0568c7 | /python/0343.Integer Break.py | fb6b957010a895615056dfe72d6838cee56b60b0 | [] | no_license | juechen-zzz/LeetCode | 2df2e7efe2efe22dc1016447761a629a0da65eda | b5926a3d40ca4a9939e1d604887e0ad7e9501f16 | refs/heads/master | 2021-08-11T00:37:18.891256 | 2021-06-13T10:26:31 | 2021-06-13T10:26:31 | 180,496,839 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 492 | py | """
给定一个正整数 n,将其拆分为至少两个正整数的和,并使这些整数的乘积最大化。 返回你可以获得的最大乘积。
"""
class Solution:
def integerBreak(self, n: int) -> int:
dp = [1] * (n + 1)
for i in range(3, n+1):
for j in range(1, int(i / 2) + 1):
# 会出现极限情况,比如dp[2]=1,不应该拆2的
dp[i] = max(dp[i], max(dp[i-j], i-j) * max(dp[j], j))
return dp[-1]
| [
"[email protected]"
] | |
1e31bced9e56926fe0f486d8dd135d8d6c560de0 | 23611933f0faba84fc82a1bc0a85d97cf45aba99 | /google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/app/ext_runtimes/fingerprinting.py | 083b9008e3b6aa13e21c03c748a2c8a5680106dc | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | KaranToor/MA450 | 1f112d1caccebdc04702a77d5a6cee867c15f75c | c98b58aeb0994e011df960163541e9379ae7ea06 | refs/heads/master | 2021-06-21T06:17:42.585908 | 2020-12-24T00:36:28 | 2020-12-24T00:36:28 | 79,285,433 | 1 | 1 | Apache-2.0 | 2020-12-24T00:38:09 | 2017-01-18T00:05:44 | Python | UTF-8 | Python | false | false | 3,153 | py | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common functionality to support source fingerprinting."""
from googlecloudsdk.core import properties
_PROMPTS_DISABLED_ERROR_MESSAGE = (
'("disable_prompts" set to true, run "gcloud config set disable_prompts '
'False" to fix this)')
class Params(object):
"""Parameters passed to the the runtime module Fingerprint() methods.
Attributes:
appinfo: (apphosting.api.appinfo.AppInfoExternal or None) The parsed
app.yaml file for the module if it exists.
custom: (bool) True if the Configurator should generate a custom runtime.
runtime (str or None) Runtime (alias allowed) that should be enforced.
deploy: (bool) True if this is happening from deployment.
"""
def __init__(self, appinfo=None, custom=False, runtime=None, deploy=False):
self.appinfo = appinfo
self.custom = custom
self.runtime = runtime
self.deploy = deploy
def ToDict(self):
"""Returns the object converted to a dictionary.
Returns:
({str: object}) A dictionary that can be converted to json using
json.dump().
"""
return {'appinfo': self.appinfo and self.appinfo.ToDict(),
'custom': self.custom,
'runtime': self.runtime,
'deploy': self.deploy}
class Configurator(object):
"""Base configurator class.
Configurators generate config files for specific classes of runtimes. They
are returned by the Fingerprint functions in the runtimes sub-package after
a successful match of the runtime's heuristics.
"""
def GenerateConfigs(self):
"""Generate all configuration files for the module.
Generates config files in the current working directory.
Returns:
(callable()) Function that will delete all of the generated files.
"""
raise NotImplementedError()
def GetNonInteractiveErrorMessage():
"""Returns useful instructions when running non-interactive.
Certain fingerprinting modules require interactive functionality. It isn't
always obvious why gcloud is running in non-interactive mode (e.g. when
"disable_prompts" is set) so this returns an appropriate addition to the
error message in these circumstances.
Returns:
(str) The appropriate error message snippet.
"""
if properties.VALUES.core.disable_prompts.GetBool():
# We add a leading space to the raw message so that it meshes well with
# its display context.
return ' ' + _PROMPTS_DISABLED_ERROR_MESSAGE
else:
# The other case for non-interactivity (running detached from a terminal)
# should be obvious.
return ''
| [
"[email protected]"
] | |
1c2c7f77375d17cca4387f409b1116c826cd6c24 | 7f593761058b96792e51023e3b42af740f2006d7 | /pkg/ampcor/dom/Raster.py | 1366bf502713ca054646e88ecfdcc8ecdc06a924 | [
"BSD-2-Clause"
] | permissive | isce-framework/ampcor | 2b3769e579ceaf993c9ea17f836553057a52ad6a | eafadcbe4380a85320d8c7e884ebe4d6d279770e | refs/heads/master | 2020-05-07T16:06:06.364458 | 2019-04-10T22:09:45 | 2019-04-10T22:09:45 | 180,668,210 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 959 | py | # -*- coding: utf-8 -*-
#
# michael a.g. aïvázis <[email protected]>
# parasim
# (c) 1998-2019 all rights reserved
#
# framework
import ampcor
# declaration
class Raster(ampcor.protocol, family="ampcor.dom.rasters"):
"""
The base class for all pixel based data products
"""
# public data
shape = ampcor.properties.tuple(schema=ampcor.properties.int())
shape.doc = "the shape of the raster in pixels"
data = ampcor.properties.path()
data.doc = "the path to my binary data"
# requirements
@ampcor.provides
def size(self):
"""
Compute my memory footprint
"""
@ampcor.provides
def slice(self, begin, end):
"""
Grant access to a slice of my data bound by the index pair {begin} and {end}
"""
@ampcor.provides
def open(self, filename, mode="r"):
"""
Map me over the contents of {filename}
"""
# end of file
| [
"[email protected]"
] | |
4c698ed3717c85f27f6df1de04780252abdbb4b8 | c0a7d9a057abbd1a065a4149b96777163e596727 | /Placement Prepration/Recursion/lucky_number.py | 286455a2666bc95261ea05ef88d07e5487863118 | [] | no_license | Sameer2898/Data-Structure-And-Algorithims | 6471c2dabfbd2067d2a2c556ddd0b6615235cd4f | 2f251570434ea9c1881de95b4b1a5e368d5b7f46 | refs/heads/main | 2023-02-16T16:18:08.177109 | 2021-01-14T06:33:22 | 2021-01-14T06:33:22 | 329,527,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | def isLucky(n):
global c
if c <= n:
if n % c == 0:
return 0
n = n-n//c
c += 1
return isLucky(n)
else:
return 1
c=2
if __name__ == '__main__':
t = int(input('Enter the number of test cases:- '))
for tcs in range(t):
c=2
n = int(input('Enter a number:- '))
print(isLucky(n))
| [
"[email protected]"
] | |
5780cc90cf158da08e08e1ce41888a4a1a87c818 | a39ecd4dce4b14f5d17416233fa16c76d2d3f165 | /Libraries/Python/CommonEnvironment/v1.0/CommonEnvironment/UnitTests/Process_UnitTest.py | 9ea28ebb7b3352eaf881e69ecb187d70d541205f | [
"BSL-1.0",
"Python-2.0",
"OpenSSL",
"LicenseRef-scancode-unknown-license-reference",
"GPL-2.0-only"
] | permissive | davidbrownell/Common_Environment_v3 | 8e6bbed15004a38a4c6e6f337d78eb2339484d64 | 2981ad1566e6d3c00fd390a67dbc1277ef40aaba | refs/heads/master | 2022-09-03T19:04:57.270890 | 2022-06-28T01:33:31 | 2022-06-28T01:33:31 | 132,171,665 | 0 | 0 | BSL-1.0 | 2021-08-13T21:19:48 | 2018-05-04T17:47:30 | Python | UTF-8 | Python | false | false | 1,527 | py | # ----------------------------------------------------------------------
# |
# | Process_UnitTest.py
# |
# | David Brownell <[email protected]>
# | 2018-08-21 07:38:01
# |
# ----------------------------------------------------------------------
# |
# | Copyright David Brownell 2018-22.
# | Distributed under the Boost Software License, Version 1.0.
# | (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
# |
# ----------------------------------------------------------------------
"""Unit test for Process.py"""
import os
import sys
import unittest
import CommonEnvironment
from CommonEnvironment.Process import *
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
class StandardSuite(unittest.TestCase):
@unittest.skip("Not implemented")
def test_Placeholder(self):
self.assertTrue(False)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
if __name__ == "__main__":
try: sys.exit(unittest.main(verbosity=2))
except KeyboardInterrupt: pass
| [
"[email protected]"
] | |
10aa036fce0bea713f5078ed34e900100942a4dd | 9d733284e31476d85a42e2e2614c7a4cfed5aed1 | /test/test_payment_setup_response_initiation.py | 17f9b8d526229a97464b21c8929c57a54c440858 | [
"MIT"
] | permissive | roksela/openbanking-payment-client | be138ff7403989b8bce7ad7e95e885c676e7ad29 | a17b3ded257e71be1dbf6bde6e206dd2f2abddd8 | refs/heads/master | 2021-04-29T17:17:27.102309 | 2018-02-17T08:07:40 | 2018-02-17T08:07:40 | 121,665,228 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,149 | py | # coding: utf-8
"""
Python client for Payment Initiation API
Based on https://github.com/OpenBankingUK/payment-initiation-api-spec
OpenAPI spec version: v1.1.1
Spec: https://www.openbanking.org.uk/read-write-apis/
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import openbanking_payment_client
from openbanking_payment_client.model.payment_setup_response_initiation import PaymentSetupResponseInitiation # noqa: E501
from openbanking_payment_client.rest import ApiException
class TestPaymentSetupResponseInitiation(unittest.TestCase):
"""PaymentSetupResponseInitiation unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPaymentSetupResponseInitiation(self):
"""Test PaymentSetupResponseInitiation"""
# FIXME: construct object with mandatory attributes with example values
# model = openbanking_payment_client.models.payment_setup_response_initiation.PaymentSetupResponseInitiation() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
345a712888b96f9f8a07c642769a53db19af8b02 | 4a48593a04284ef997f377abee8db61d6332c322 | /python/matplotlib/imshow_plt.py | f5fb22bf13f8310cc833ec64bc15b398c0e3947a | [
"MIT"
] | permissive | jeremiedecock/snippets | 8feaed5a8d873d67932ef798e16cb6d2c47609f0 | b90a444041c42d176d096fed14852d20d19adaa7 | refs/heads/master | 2023-08-31T04:28:09.302968 | 2023-08-21T07:22:38 | 2023-08-21T07:22:38 | 36,926,494 | 26 | 9 | MIT | 2023-06-06T02:17:44 | 2015-06-05T10:19:09 | Python | UTF-8 | Python | false | false | 1,081 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Display data as an image (via pyplot)
See: http://matplotlib.org/examples/pylab_examples/image_demo.html
See also:
- http://matplotlib.org/examples/color/colormaps_reference.html (the list of all colormaps)
- http://matplotlib.org/users/colormaps.html?highlight=colormap#mycarta-banding (what is the right colormap to choose for a given plot)
"""
import numpy as np
import matplotlib.pyplot as plt
# MAKE DATAS ##################################################################
z_matrix = np.array([[xi * yi for xi in range(50)] for yi in range(50)])
# PLOT ########################################################################
# The list of all colormaps: http://matplotlib.org/examples/color/colormaps_reference.html
#interp='nearest' # "raw" (non smooth) map
interp = 'bilinear' # "smooth" map
plt.imshow(z_matrix, interpolation=interp, origin='lower')
plt.colorbar() # draw colorbar
# SAVE AND SHOW ###############################################################
plt.savefig("imshow_plt.png")
plt.show()
| [
"[email protected]"
] | |
de4c2a2948208b3b66fa65fdcb3f6e3e5189fced | cc9cf69b1534dc0d9530b4ff485084162a404e34 | /create_date/creat_idcare.py | 8f82e64de6c858a560cdd206f051e34b51942b1d | [] | no_license | NASA2333/study | 99a58b2c9979201e9a4fae0c797391a538de6f45 | ba63bc18f3c788090e43406315497329b00ec0a5 | refs/heads/master | 2021-05-03T22:26:52.541760 | 2018-02-07T02:24:55 | 2018-02-07T02:24:55 | 104,988,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 858 | py | import random
for i in range(1001):
Birthday =[] #生日
addrs =[str(random.choice(range(110000,999999))).zfill(6)] #地区
order = [str(random.choice(range(0,999))).zfill(3)] #序列
mult = [7,9,10,5,8,4,2,1,6,3,7,9,10,5,8,4,2]
check = [1,0,'X',9,8,7,6,5,4,3,2]
year = str(random.randint(1960,2017))
month = str(random.choice(range(1,13))).zfill(2)
if month ==2:
days = random.choice(range(1,29))
days = str(days).zfill(2)
elif month in (1,3,5,7,8,10,12):
days = str(random.choice(range(1,32))).zfill(2)
else:
days = str(random.choice(range(1,31))).zfill(2)
Birthday.extend((year,month,days))
list1 = addrs+Birthday+order
list2 =''.join(list1)
sum = 0
for i in range(len(list2)):
avg1 =int(list2[i])*mult[i]
sum +=avg1
mod = check[sum%11]
xpath = open(r'E:\idcare.txt','a')
xpath.write(list2+str(mod)+'\n')
| [
"[email protected]"
] | |
12ae0c821148f139226df73096fb51e6335684bf | 8839746b043af422d919d554088e99591defc4eb | /tensor2tensor/trax/rl/simulated_env_problem.py | a31b8df7626a26952ce3236ab478d602b35ae024 | [
"Apache-2.0"
] | permissive | jlebar/tensor2tensor | c460df42d595139586c21c29f031a2e44fcd4ea9 | a41ffb598eddf65ec31b41aa8afa56bc59694173 | refs/heads/master | 2020-07-18T01:41:50.901543 | 2019-09-03T18:21:01 | 2019-09-03T18:21:36 | 206,145,748 | 0 | 0 | Apache-2.0 | 2019-09-03T18:29:43 | 2019-09-03T18:29:42 | null | UTF-8 | Python | false | false | 17,492 | py | # coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EnvProblem for environments simulated by a TRAX model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import random
import numpy as np
from tensor2tensor.envs import env_problem
from tensor2tensor.trax import backend
from tensor2tensor.trax import trax
from tensor2tensor.trax.backend import random as jax_random
from tensor2tensor.trax.rl import space_serializer
class SimulatedEnvProblem(env_problem.EnvProblem):
"""EnvProblem base class for environments simulated by TRAX models.
The initial observations to start the model are taken from
initial_observation_stream. This iterator in incremented in every reset().
A checkpoint saved by the TRAX trainer should be available in output_dir.
"""
def __init__(self, model, batch_size, observation_space, action_space,
reward_range, discrete_rewards, history_stream, output_dir):
"""Initializes the env.
Args:
model: TRAX model.
batch_size: (int) Number of simulated environments run in parallel.
observation_space: (gym.Space) Observation space.
action_space: (gym.Space) Action space.
reward_range: (tuple) Pair (min_reward, max_reward).
discrete_rewards: (bool) Whether to discretize the rewards.
history_stream: Iterator yielding batches of initial input data for the
model. The format is implementation-specific.
output_dir: (str) Output dir.
"""
# TODO(pkozakowski): At some point we will have a "predict" mode which we
# should use here. When this happens, change the mode.
self._model = model
self._model_predict = backend.jit(self._model(mode="eval"))
self._observation_space = observation_space
self._action_space = action_space
self._reward_range = reward_range
self._output_dir = output_dir
self._predict_fn = None
self._rng = None
self._model_state = None
self._history_stream = None
# Call the super's ctor. It will use some of the member fields, so we call
# it in the end.
super(SimulatedEnvProblem, self).__init__(
batch_size=batch_size,
discrete_rewards=discrete_rewards,
history_stream=history_stream,
)
self.seed()
def initialize_environments(self,
history_stream,
batch_size=1,
parallelism=1):
"""Initializes the environments.
Args:
history_stream: Iterator yielding batches of initial input data for the
model. The format is implementation-specific.
batch_size: (int) Number of environments in a batch.
parallelism: (int) Unused.
"""
del parallelism
model_state = trax.restore_state(self._output_dir)
model_params = model_state.opt_state.params
self._model_state = model_state.model_state
self._predict_fn = functools.partial(
self._model_predict,
params=model_params,
)
self._history_stream = history_stream
self._steps = np.zeros(batch_size, dtype=np.int32)
@property
def observation_space(self):
return self._observation_space
@property
def action_space(self):
return self._action_space
@property
def reward_range(self):
return self._reward_range
def seed(self, seed=None):
if seed is None:
seed = random.randint(0, 2**31 - 1)
self._rng = jax_random.get_prng(seed)
return super(SimulatedEnvProblem, self).seed(seed=seed)
def _reset_model(self, predict_fn, indices, history, rng):
"""Resets the environments at the given indices.
Should be implemented in subclasses.
Args:
predict_fn: Function running prediction with the model.
indices: List of indices of underlying envs to call reset on.
history: Initial input data for the model.
rng: Jax RNG.
Returns:
np.ndarray of batched observations from the reset envs.
"""
raise NotImplementedError
def _step_model(self, predict_fn, actions, rng):
"""Takes a step in all environments.
Should be implemented in subclasses.
Args:
predict_fn: Function running prediction with the model.
actions: (np.ndarray) with first dimension equal to the batch size.
rng: Jax RNG.
Returns:
a tuple of batched raw observations, rewards and dones.
"""
raise NotImplementedError
def trajectory_to_training_examples(self, trajectory):
raise NotImplementedError
@property
def model_input_shape(self):
raise NotImplementedError
@property
def model_input_dtype(self):
raise NotImplementedError
def _reset(self, indices):
"""Resets environments at the given indices.
Args:
indices: list of indices of underlying envs to call reset on.
Returns:
np.ndarray of batched observations from the reset envs.
"""
history = next(self._history_stream)
(subrng, self._rng) = jax_random.split(self._rng)
return self._reset_model(self._predict_fn, indices, history, subrng)
def _step(self, actions):
"""Takes a step in all environments.
Args:
actions: (np.ndarray) with first dimension equal to the batch size.
Returns:
a tuple of batched raw observations, raw rewards, dones and infos.
"""
# Predict the next observation.
(subrng, self._rng) = jax_random.split(self._rng)
(observation, reward, done) = self._step_model(
self._predict_fn, actions, subrng)
return (observation, reward, done, {})
@property
def model(self):
return self._model
class RawSimulatedEnvProblem(SimulatedEnvProblem):
"""SimulatedEnvProblem running a model operating on raw tensors.
Wraps an autoregressive TRAX model of signature
(observation_history, action) -> (observation, reward) in an EnvProblem.
The model is assumed to take a fixed number of last observations as input
and produce a single observation, which is fed back into the model in the
next environment step.
Shape requirements (without the batch dimension):
observation: Consistent with observation_space.
observation_history: (history_length,) + observation.shape.
action: Consistent with action_space.
reward: (1,). The singleton dimension is removed in step().
"""
def __init__(self, history_length, trajectory_length, *args, **kwargs):
"""Initializes the env.
Args:
history_length: (int) Number of last observations fed into the model.
trajectory_length: (int) Length of each trajectory unrolled from the
model.
*args: (tuple) Positional arguments passed to the base class.
**kwargs: (dict) Keyword arguments passed to the base class.
"""
self._history_length = history_length
self._trajectory_length = trajectory_length
self._history = None
self._steps = None
super(RawSimulatedEnvProblem, self).__init__(*args, **kwargs)
def initialize_environments(self, batch_size=1, **kwargs):
"""Initializes the environments."""
self._history = None
self._steps = np.zeros(batch_size)
return super(RawSimulatedEnvProblem, self).initialize_environments(
batch_size=batch_size, **kwargs)
def _reset_model(self, predict_fn, indices, history, rng):
del predict_fn
del rng
assert history.shape == ((self._batch_size, self._history_length) +
self.observation_space.shape)
if self._history is None:
# At the first reset, all indices should be triggered.
assert set(indices) == set(range(self._batch_size))
self._history = np.array(history)
else:
history = history[indices, ...]
self._history[indices, ...] = history
# Reset the step counters.
self._steps[indices] = 0
# Return just the last timestep at the given indices.
return history[:, -1, ...]
def _step_model(self, predict_fn, actions, rng):
(observation, reward), self._model_state = predict_fn(
(self._history, actions), state=self._model_state, rng=rng)
# Roll the history one timestep back and append the new observation.
self._history = np.roll(self._history, shift=-1, axis=1)
self._history[:, -1, ...] = observation
# Increment the step counters and determine which envs are done.
self._steps += 1
done = self._steps == self._trajectory_length
# Call copy() to get the data as numpy arrays.
observation = observation.copy()
# Reshape the rewards to get rid of the extra dimension.
reward = np.squeeze(reward.copy(), axis=1)
return (observation, reward, done)
def index_range_2d(begin_indices, length):
# Take all indices along the first dimension. Add another axis that'll
# broadcast along the second one.
first_dim = np.arange(len(begin_indices))[:, None]
# Take a range of indices along the second dimension. Offset it by
# begin_indices.
# TODO(pkozakowski): This materializes all indices of elements along the
# second dimension. Do it more efficiently if needed.
second_dim = np.arange(length)[None, :] + begin_indices[:, None]
return (first_dim, second_dim)
def index_slice(indices):
first_dim = np.arange(len(indices))[:, None]
second_dim = indices[:, None]
return (first_dim, second_dim)
class SerializedSequenceSimulatedEnvProblem(SimulatedEnvProblem):
"""SimulatedEnvProblem running a model operating on sequences of symbols.
Wraps an autoregressive TRAX model of signature past_symbols -> symbol_probs
in an EnvProblem. The model is assumed to take a sequence of symbols as input
and produce distributions over all symbols in the sequence. The next symbol
is sampled and fed back to the model in the next decoding step.
Shape requirements (without the batch dimension):
past_symbols: (max_trajectory_length * L,)
symbol_probs: (max_trajectory_length * L, vocab_size)
where L is the representation length of one environment step.
Observations, actions, rewards and done flags are (de)serialized from/to
sequences of symbols using an EnvSerializer passed to the constructor.
"""
def __init__(self, model, reward_fn, done_fn, vocab_size,
max_trajectory_length, observation_space, action_space,
*args, **kwargs):
"""Initializes the env.
Args:
model: TRAX model to use for simulation. It's assumed to take keyword
arguments vocab_size and mode, where vocab_size is the number of symbols
in the vocabulary and mode is either "train" or "eval".
reward_fn: Function (previous_observation, current_observation) -> reward.
done_fn: Function (previous_observation, current_observation) -> done.
vocab_size: (int) Number of symbols in the vocabulary.
max_trajectory_length: (int) Maximum length of a trajectory unrolled from
the model.
observation_space: (gym.Space) Observation space.
action_space: (gym.Space) Action space.
*args: (tuple) Positional arguments passed to the base class.
**kwargs: (dict) Keyword arguments passed to the base class.
"""
self._reward_fn = reward_fn
self._done_fn = done_fn
self._vocab_size = vocab_size
self._max_trajectory_length = max_trajectory_length
self._history = None
self._steps = None
self._observation_space = None
self._action_space = None
self._last_observations = None
self._obs_serializer = space_serializer.create(
observation_space, self._vocab_size)
self._action_serializer = space_serializer.create(
action_space, self._vocab_size)
self._obs_repr_length = self._obs_serializer.representation_length
self._action_repr_length = self._action_serializer.representation_length
self._step_repr_length = self._obs_repr_length + self._action_repr_length
# We assume that the model takes vocab_size as an argument (e.g.
# TransformerLM).
model = functools.partial(model, vocab_size=vocab_size)
super(SerializedSequenceSimulatedEnvProblem, self).__init__(
*args,
model=model,
observation_space=observation_space,
action_space=action_space,
**kwargs
)
def initialize_environments(self, batch_size=1, **kwargs):
"""Initializes the environments."""
self._history = np.zeros((
batch_size,
self._max_trajectory_length * self._step_repr_length
), dtype=np.int32)
self._steps = np.zeros(batch_size, dtype=np.int32)
self._last_observations = np.full(
(batch_size,) + self._observation_space.shape, np.nan)
super(SerializedSequenceSimulatedEnvProblem, self).initialize_environments(
batch_size=batch_size, **kwargs)
@property
def _obs_repr_indices(self):
begin_indices = self._step_repr_length * self._steps
return index_range_2d(begin_indices, self._obs_repr_length)
@property
def _action_repr_indices(self):
begin_indices = self._step_repr_length * self._steps + self._obs_repr_length
return index_range_2d(begin_indices, self._action_repr_length)
def _predict_obs(self, predict_fn, rng):
def gumbel_sample(log_probs):
u = np.random.uniform(low=1e-6, high=1.0 - 1e-6, size=log_probs.shape)
g = -np.log(-np.log(u))
return np.argmax(log_probs + g, axis=-1)
for (i, subrng) in enumerate(jax_random.split(rng, self._obs_repr_length)):
symbol_index = self._steps * self._step_repr_length + i
log_probs, self._model_state = predict_fn(self._history,
state=self._model_state,
rng=subrng)
log_probs = log_probs[:, symbol_index, :]
self._history[:, symbol_index] = gumbel_sample(log_probs)
obs_repr = self._history[self._obs_repr_indices]
return self._obs_serializer.deserialize(obs_repr)
def _reset_model(self, predict_fn, indices, history, rng):
# TODO(pkozakowski): Random starts.
del history
self._steps[indices] = 0
observation = self._predict_obs(predict_fn, rng)[indices]
self._last_observations[indices] = observation
return observation
def _step_model(self, predict_fn, actions, rng):
action_repr = self._action_serializer.serialize(actions)
self._history[self._action_repr_indices] = action_repr
self._steps += 1
observation = self._predict_obs(predict_fn, rng)
reward = self._reward_fn(self._last_observations, observation)
done = self._done_fn(self._last_observations, observation)
self._last_observations = observation
done = np.logical_or(done, self._steps == self._max_trajectory_length - 1)
return (observation, reward, done)
def trajectory_to_training_examples(self, trajectory):
reprs = []
weights = []
for time_step in trajectory.time_steps:
# Serializers work on batches.
obs_repr = self._obs_serializer.serialize(
np.array([time_step.observation]))[0]
reprs.append(obs_repr)
# TODO(pkozakowski): Digit weighting.
weights.append(np.ones_like(obs_repr))
if time_step.action is not None:
action_repr = self._action_serializer.serialize(
np.array([time_step.action]))[0]
reprs.append(action_repr)
weights.append(np.zeros_like(action_repr))
def concat_and_pad(arrays):
(desired_length,) = self.model_input_shape
flat_array = np.concatenate(arrays, axis=0)
(actual_length,) = flat_array.shape
assert actual_length <= desired_length
return np.pad(
flat_array,
pad_width=((0, desired_length - actual_length),),
mode="constant",
)
(reprs, weights) = map(concat_and_pad, (reprs, weights))
reprs = reprs.astype(self.model_input_dtype)
return [(reprs, reprs, weights)] # (inputs, targets, weights)
@property
def model_input_shape(self):
return (self._max_trajectory_length * self._step_repr_length,)
@property
def model_input_dtype(self):
return np.int32
def cartpole_done_fn(previous_observation, current_observation):
del previous_observation
x_threshold = 2.4
theta_threshold = 12 * 2 * np.pi / 360
x = current_observation[:, 0]
theta = current_observation[:, 2]
return np.logical_or(np.abs(x) > x_threshold, np.abs(theta) > theta_threshold)
def cartpole_reward_fn(previous_observation, current_observation):
done = cartpole_done_fn(previous_observation, current_observation)
return 1.0 - done # Unit reward for every timestep until the end.
def acrobot_done_fn(previous_observation, current_observation):
del previous_observation
theta1 = current_observation[:, 0]
theta2 = current_observation[:, 1]
return -np.cos(theta1) - np.cos(theta2 + theta1) > 1.0
def acrobot_reward_fn(previous_observation, current_observation):
done = acrobot_done_fn(previous_observation, current_observation)
return -1.0 + done # -1 reward for every timestep until the end.
| [
"[email protected]"
] | |
433b7cebbab4fb45f7bac8264fea88827e505dba | d5581fe82bbce4ae206bbfc8c6251cb19c87a5bf | /leetcode/python/065-validNumber.py | 4d493885d458fa2b0746574e13fecccacdf96b70 | [] | no_license | yi-guo/coding-interview | 23f2a422b69a84d648ba9d74ea1b05e42b8689af | fd22a407f096d7e0c4eefbeb4ed37e07043f185c | refs/heads/master | 2016-09-06T11:15:41.133160 | 2015-03-28T22:30:52 | 2015-03-28T22:30:52 | 28,378,778 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,931 | py | #!/usr/bin/python
# Validate if a given string is numeric.
# Some examples:
# "0" => true
# " 0.1 " => true
# "abc" => false
# "1 a" => false
# "2e10" => true
# Note: It is intended for the problem statement to be ambiguous.
# You should gather all requirements up front before implementing one.
import sys
# An automata solution. O(n).
def isNumber(s):
return s0(s.strip(), 0)
def s0(s, i):
if i == len(s):
return False
if s[i] == '.':
return s1(s, i + 1)
elif s[i] == '+' or s[i] == '-':
return s2(s, i + 1)
elif s[i].isdigit():
return s3(s, i + 1)
else:
return False
def s1(s, i):
if i == len(s):
return False
if s[i].isdigit():
return s4(s, i + 1)
else:
return False
def s2(s, i):
if i == len(s):
return False
if s[i] == '.':
return s1(s, i + 1)
elif s[i].isdigit():
return s3(s, i + 1)
else:
return False
def s3(s, i):
if i == len(s):
return True
if s[i] == '.':
return s4(s, i + 1)
elif s[i] == 'e':
return s5(s, i + 1)
elif s[i].isdigit():
return s3(s, i + 1)
else:
return False
def s4(s, i):
if i == len(s):
return True
if s[i] == 'e':
return s5(s, i + 1)
elif s[i].isdigit():
return s4(s, i + 1)
else:
return False
def s5(s, i):
if i == len(s):
return False
if s[i] == '+' or s[i] == '-':
return s6(s, i + 1)
elif s[i].isdigit():
return s7(s, i + 1)
else:
return False
def s6(s, i):
if i == len(s):
return False
if s[i].isdigit():
return s7(s, i + 1)
else:
return False
def s7(s, i):
if i == len(s):
return True
if s[i].isdigit():
return s7(s, i + 1)
else:
return False
def main():
print isNumber(sys.argv[1])
main()
| [
"[email protected]"
] | |
eff3f6c40e851c6068020b6fbbb0f2563e8ce039 | 12967293f285decb1568bd56af38b1df4e5c533d | /.eggs/boto-2.48.0-py2.7.egg/boto/pyami/installers/ubuntu/mysql.py | d844aaf25ce8a16b5337783fbd6f58d6feaf9a88 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] | permissive | martbhell/git-bigstore | 36cd16276379833fbade252a77c73cf3644aa30f | 960e9ea64d4d5646af3ce411adf46f3236b64d7e | refs/heads/master | 2020-05-16T17:51:52.011171 | 2019-03-12T20:54:42 | 2019-03-12T20:54:42 | 183,206,409 | 0 | 0 | Apache-2.0 | 2019-04-24T10:29:48 | 2019-04-24T10:29:47 | null | UTF-8 | Python | false | false | 4,856 | py | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
"""
This installer will install mysql-server on an Ubuntu machine.
In addition to the normal installation done by apt-get, it will
also configure the new MySQL server to store it's data files in
a different location. By default, this is /mnt but that can be
configured in the [MySQL] section of the boto config file passed
to the instance.
"""
from boto.pyami.installers.ubuntu.installer import Installer
import os
import boto
from boto.utils import ShellCommand
from boto.compat import ConfigParser
import time
ConfigSection = """
[MySQL]
root_password = <will be used as MySQL root password, default none>
data_dir = <new data dir for MySQL, default is /mnt>
"""
class MySQL(Installer):
def install(self):
self.run('apt-get update')
self.run('apt-get -y install mysql-server', notify=True, exit_on_error=True)
# def set_root_password(self, password=None):
# if not password:
# password = boto.config.get('MySQL', 'root_password')
# if password:
# self.run('mysqladmin -u root password %s' % password)
# return password
def change_data_dir(self, password=None):
data_dir = boto.config.get('MySQL', 'data_dir', '/mnt')
fresh_install = False
is_mysql_running_command = ShellCommand('mysqladmin ping') # exit status 0 if mysql is running
is_mysql_running_command.run()
if is_mysql_running_command.getStatus() == 0:
# mysql is running. This is the state apt-get will leave it in. If it isn't running,
# that means mysql was already installed on the AMI and there's no need to stop it,
# saving 40 seconds on instance startup.
time.sleep(10) #trying to stop mysql immediately after installing it fails
# We need to wait until mysql creates the root account before we kill it
# or bad things will happen
i = 0
while self.run("echo 'quit' | mysql -u root") != 0 and i < 5:
time.sleep(5)
i = i + 1
self.run('/etc/init.d/mysql stop')
self.run("pkill -9 mysql")
mysql_path = os.path.join(data_dir, 'mysql')
if not os.path.exists(mysql_path):
self.run('mkdir %s' % mysql_path)
fresh_install = True
self.run('chown -R mysql:mysql %s' % mysql_path)
fp = open('/etc/mysql/conf.d/use_mnt.cnf', 'w')
fp.write('# created by pyami\n')
fp.write('# use the %s volume for data\n' % data_dir)
fp.write('[mysqld]\n')
fp.write('datadir = %s\n' % mysql_path)
fp.write('log_bin = %s\n' % os.path.join(mysql_path, 'mysql-bin.log'))
fp.close()
if fresh_install:
self.run('cp -pr /var/lib/mysql/* %s/' % mysql_path)
self.start('mysql')
else:
#get the password ubuntu expects to use:
config_parser = ConfigParser()
config_parser.read('/etc/mysql/debian.cnf')
password = config_parser.get('client', 'password')
# start the mysql deamon, then mysql with the required grant statement piped into it:
self.start('mysql')
time.sleep(10) #time for mysql to start
grant_command = "echo \"GRANT ALL PRIVILEGES ON *.* TO 'debian-sys-maint'@'localhost' IDENTIFIED BY '%s' WITH GRANT OPTION;\" | mysql" % password
while self.run(grant_command) != 0:
time.sleep(5)
# leave mysqld running
def main(self):
self.install()
# change_data_dir runs 'mysql -u root' which assumes there is no mysql password, i
# and changing that is too ugly to be worth it:
#self.set_root_password()
self.change_data_dir()
| [
"[email protected]"
] | |
c7d078ea821744e6c63f7379835300785d3e2926 | 1297634c6641ec62c31cf30b8fabe1886aa8d9ea | /products_and_services_client/models/maximum_price.py | dc83ab42d1c9d4be19edaa13186ee8a99e0b5357 | [
"MIT"
] | permissive | pitzer42/opbk-br-quickstart | d77f19743fcc264bed7af28a3d956dbc2d20ac1a | b3f86b2e5f82a6090aaefb563614e174a452383c | refs/heads/main | 2023-03-04T13:06:34.205003 | 2021-02-21T23:41:56 | 2021-02-21T23:41:56 | 336,898,721 | 2 | 0 | MIT | 2021-02-07T22:03:15 | 2021-02-07T21:57:06 | null | UTF-8 | Python | false | false | 4,175 | py | # coding: utf-8
"""
API's OpenData do Open Banking Brasil
As API's descritas neste documento são referentes as API's da fase OpenData do Open Banking Brasil. # noqa: E501
OpenAPI spec version: 1.0.0-rc5.2
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class MaximumPrice(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'value': 'str',
'currency': 'Currency'
}
attribute_map = {
'value': 'value',
'currency': 'currency'
}
def __init__(self, value=None, currency=None): # noqa: E501
"""MaximumPrice - a model defined in Swagger""" # noqa: E501
self._value = None
self._currency = None
self.discriminator = None
self.value = value
self.currency = currency
@property
def value(self):
"""Gets the value of this MaximumPrice. # noqa: E501
Valor máximo apurado para a tarifa de serviços sobre a base de clientes no mês de referência # noqa: E501
:return: The value of this MaximumPrice. # noqa: E501
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this MaximumPrice.
Valor máximo apurado para a tarifa de serviços sobre a base de clientes no mês de referência # noqa: E501
:param value: The value of this MaximumPrice. # noqa: E501
:type: str
"""
if value is None:
raise ValueError("Invalid value for `value`, must not be `None`") # noqa: E501
self._value = value
@property
def currency(self):
"""Gets the currency of this MaximumPrice. # noqa: E501
:return: The currency of this MaximumPrice. # noqa: E501
:rtype: Currency
"""
return self._currency
@currency.setter
def currency(self, currency):
"""Sets the currency of this MaximumPrice.
:param currency: The currency of this MaximumPrice. # noqa: E501
:type: Currency
"""
if currency is None:
raise ValueError("Invalid value for `currency`, must not be `None`") # noqa: E501
self._currency = currency
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(MaximumPrice, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MaximumPrice):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
bd0db0476b6cc129b3c31b4d96ece4cc877d60e3 | bc441bb06b8948288f110af63feda4e798f30225 | /micro_app_sdk/model/flow/flow_instance_pb2.pyi | de041c92f19005312841d154f0f892f846d3c5fd | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,726 | pyi | # @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedCompositeFieldContainer as google___protobuf___internal___containers___RepeatedCompositeFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from google.protobuf.struct_pb2 import (
Struct as google___protobuf___struct_pb2___Struct,
Value as google___protobuf___struct_pb2___Value,
)
from micro_app_sdk.model.flow.flow_execute_step_pb2 import (
FlowExecuteStep as micro_app_sdk___model___flow___flow_execute_step_pb2___FlowExecuteStep,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class FlowInstance(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
class Metadata(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
type = ... # type: typing___Text
desc = ... # type: typing___Text
def __init__(self,
*,
type : typing___Optional[typing___Text] = None,
desc : typing___Optional[typing___Text] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> FlowInstance.Metadata: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> FlowInstance.Metadata: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"desc",b"desc",u"type",b"type"]) -> None: ...
class FlowOutputs(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
class Columns(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
type = ... # type: typing___Text
id = ... # type: typing___Text
name = ... # type: typing___Text
def __init__(self,
*,
type : typing___Optional[typing___Text] = None,
id : typing___Optional[typing___Text] = None,
name : typing___Optional[typing___Text] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> FlowInstance.FlowOutputs.Columns: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> FlowInstance.FlowOutputs.Columns: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"id",b"id",u"name",b"name",u"type",b"type"]) -> None: ...
@property
def columns(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[FlowInstance.FlowOutputs.Columns]: ...
def __init__(self,
*,
columns : typing___Optional[typing___Iterable[FlowInstance.FlowOutputs.Columns]] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> FlowInstance.FlowOutputs: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> FlowInstance.FlowOutputs: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"columns",b"columns"]) -> None: ...
class OutputDefs(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
type = ... # type: typing___Text
id = ... # type: typing___Text
name = ... # type: typing___Text
def __init__(self,
*,
type : typing___Optional[typing___Text] = None,
id : typing___Optional[typing___Text] = None,
name : typing___Optional[typing___Text] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> FlowInstance.OutputDefs: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> FlowInstance.OutputDefs: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"id",b"id",u"name",b"name",u"type",b"type"]) -> None: ...
class TableDefs(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
class Dimensions(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
id = ... # type: typing___Text
name = ... # type: typing___Text
def __init__(self,
*,
id : typing___Optional[typing___Text] = None,
name : typing___Optional[typing___Text] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> FlowInstance.TableDefs.Dimensions: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> FlowInstance.TableDefs.Dimensions: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"id",b"id",u"name",b"name"]) -> None: ...
class Columns(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
id = ... # type: typing___Text
name = ... # type: typing___Text
def __init__(self,
*,
id : typing___Optional[typing___Text] = None,
name : typing___Optional[typing___Text] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> FlowInstance.TableDefs.Columns: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> FlowInstance.TableDefs.Columns: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"id",b"id",u"name",b"name"]) -> None: ...
id = ... # type: typing___Text
name = ... # type: typing___Text
@property
def dimensions(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[FlowInstance.TableDefs.Dimensions]: ...
@property
def columns(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[FlowInstance.TableDefs.Columns]: ...
def __init__(self,
*,
id : typing___Optional[typing___Text] = None,
name : typing___Optional[typing___Text] = None,
dimensions : typing___Optional[typing___Iterable[FlowInstance.TableDefs.Dimensions]] = None,
columns : typing___Optional[typing___Iterable[FlowInstance.TableDefs.Columns]] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> FlowInstance.TableDefs: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> FlowInstance.TableDefs: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"columns",b"columns",u"dimensions",b"dimensions",u"id",b"id",u"name",b"name"]) -> None: ...
taskId = ... # type: typing___Text
needNotify = ... # type: builtin___bool
startTime = ... # type: builtin___int
endTime = ... # type: builtin___int
currentTime = ... # type: builtin___int
totalStatus = ... # type: typing___Text
message = ... # type: typing___Text
taskCounter = ... # type: builtin___int
flowId = ... # type: typing___Text
version = ... # type: builtin___int
name = ... # type: typing___Text
org = ... # type: builtin___int
creator = ... # type: typing___Text
category = ... # type: typing___Text
updateTime = ... # type: typing___Text
createTime = ... # type: typing___Text
@property
def stepList(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[micro_app_sdk___model___flow___flow_execute_step_pb2___FlowExecuteStep]: ...
@property
def instanceMap(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[google___protobuf___struct_pb2___Struct]: ...
@property
def outputs(self) -> google___protobuf___struct_pb2___Value: ...
@property
def runningSteps(self) -> google___protobuf___struct_pb2___Value: ...
@property
def flowOutputsData(self) -> google___protobuf___struct_pb2___Value: ...
@property
def tableData(self) -> google___protobuf___struct_pb2___Value: ...
@property
def standardOutputs(self) -> google___protobuf___struct_pb2___Value: ...
@property
def agentData(self) -> google___protobuf___struct_pb2___Value: ...
@property
def flowInputs(self) -> google___protobuf___struct_pb2___Value: ...
@property
def flowEnv(self) -> google___protobuf___struct_pb2___Value: ...
@property
def metadata(self) -> FlowInstance.Metadata: ...
@property
def flowOutputs(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[FlowInstance.FlowOutputs]: ...
@property
def outputDefs(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[FlowInstance.OutputDefs]: ...
@property
def tableDefs(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[FlowInstance.TableDefs]: ...
def __init__(self,
*,
stepList : typing___Optional[typing___Iterable[micro_app_sdk___model___flow___flow_execute_step_pb2___FlowExecuteStep]] = None,
taskId : typing___Optional[typing___Text] = None,
instanceMap : typing___Optional[typing___Iterable[google___protobuf___struct_pb2___Struct]] = None,
outputs : typing___Optional[google___protobuf___struct_pb2___Value] = None,
runningSteps : typing___Optional[google___protobuf___struct_pb2___Value] = None,
needNotify : typing___Optional[builtin___bool] = None,
startTime : typing___Optional[builtin___int] = None,
endTime : typing___Optional[builtin___int] = None,
currentTime : typing___Optional[builtin___int] = None,
totalStatus : typing___Optional[typing___Text] = None,
message : typing___Optional[typing___Text] = None,
taskCounter : typing___Optional[builtin___int] = None,
flowOutputsData : typing___Optional[google___protobuf___struct_pb2___Value] = None,
tableData : typing___Optional[google___protobuf___struct_pb2___Value] = None,
standardOutputs : typing___Optional[google___protobuf___struct_pb2___Value] = None,
agentData : typing___Optional[google___protobuf___struct_pb2___Value] = None,
flowId : typing___Optional[typing___Text] = None,
version : typing___Optional[builtin___int] = None,
flowInputs : typing___Optional[google___protobuf___struct_pb2___Value] = None,
flowEnv : typing___Optional[google___protobuf___struct_pb2___Value] = None,
metadata : typing___Optional[FlowInstance.Metadata] = None,
name : typing___Optional[typing___Text] = None,
org : typing___Optional[builtin___int] = None,
flowOutputs : typing___Optional[typing___Iterable[FlowInstance.FlowOutputs]] = None,
outputDefs : typing___Optional[typing___Iterable[FlowInstance.OutputDefs]] = None,
tableDefs : typing___Optional[typing___Iterable[FlowInstance.TableDefs]] = None,
creator : typing___Optional[typing___Text] = None,
category : typing___Optional[typing___Text] = None,
updateTime : typing___Optional[typing___Text] = None,
createTime : typing___Optional[typing___Text] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> FlowInstance: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> FlowInstance: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"agentData",b"agentData",u"flowEnv",b"flowEnv",u"flowInputs",b"flowInputs",u"flowOutputsData",b"flowOutputsData",u"metadata",b"metadata",u"outputs",b"outputs",u"runningSteps",b"runningSteps",u"standardOutputs",b"standardOutputs",u"tableData",b"tableData"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"agentData",b"agentData",u"category",b"category",u"createTime",b"createTime",u"creator",b"creator",u"currentTime",b"currentTime",u"endTime",b"endTime",u"flowEnv",b"flowEnv",u"flowId",b"flowId",u"flowInputs",b"flowInputs",u"flowOutputs",b"flowOutputs",u"flowOutputsData",b"flowOutputsData",u"instanceMap",b"instanceMap",u"message",b"message",u"metadata",b"metadata",u"name",b"name",u"needNotify",b"needNotify",u"org",b"org",u"outputDefs",b"outputDefs",u"outputs",b"outputs",u"runningSteps",b"runningSteps",u"standardOutputs",b"standardOutputs",u"startTime",b"startTime",u"stepList",b"stepList",u"tableData",b"tableData",u"tableDefs",b"tableDefs",u"taskCounter",b"taskCounter",u"taskId",b"taskId",u"totalStatus",b"totalStatus",u"updateTime",b"updateTime",u"version",b"version"]) -> None: ...
| [
"[email protected]"
] | |
a79d3bac4fb73d81f7b0d44129ef3e962424d788 | 79bb7105223895235263fd391906144f9f9645fd | /models/image/cifar10/cifar10_train.py | 84f71d7a8274b757458f397c449a5c59eaea179a | [
"Apache-2.0"
] | permissive | ml-lab/imcl-tensorflow | f863a81bfebe91af7919fb45036aa05304fd7cda | 54ab3ec2e32087ce70ecae2f36b56a8a92f2ba89 | refs/heads/master | 2021-01-22T06:37:18.129405 | 2016-06-08T15:53:28 | 2016-06-08T15:53:28 | 63,518,098 | 1 | 2 | null | 2016-07-17T06:29:14 | 2016-07-17T06:29:13 | null | UTF-8 | Python | false | false | 4,700 | py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A binary to train CIFAR-10 using a single GPU.
Accuracy:
cifar10_train.py achieves ~86% accuracy after 100K steps (256 epochs of
data) as judged by cifar10_eval.py.
Speed: With batch_size 128.
System | Step Time (sec/batch) | Accuracy
------------------------------------------------------------------
1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours)
1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours)
Usage:
Please see the tutorial and website for how to download the CIFAR-10
data set, compile the program and train the model.
http://tensorflow.org/tutorials/deep_cnn/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.models.image.cifar10 import cifar10
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 1000000,
"""Number of batches to run.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
def train():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default():
global_step = tf.Variable(0, trainable=False)
# Get images and labels for CIFAR-10.
images, labels = cifar10.distorted_inputs()
# Build a Graph that computes the logits predictions from the
# inference model.
logits = cifar10.inference(images)
# Calculate loss.
loss = cifar10.loss(logits, labels)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = cifar10.train(loss, global_step)
# Create a saver.
saver = tf.train.Saver(tf.all_variables())
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
# Build an initialization operation to run below.
init = tf.initialize_all_variables()
# Start running operations on the Graph.
sess = tf.Session(config=tf.ConfigProto(
log_device_placement=FLAGS.log_device_placement))
sess.run(init)
# Start the queue runners.
tf.train.start_queue_runners(sess=sess)
summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph)
for step in xrange(FLAGS.max_steps):
start_time = time.time()
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step % 10 == 0:
num_examples_per_step = FLAGS.batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), step, loss_value,
examples_per_sec, sec_per_batch))
if step % 100 == 0:
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
# Save the model checkpoint periodically.
if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
def main(argv=None): # pylint: disable=unused-argument
cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()
| [
"[email protected]"
] | |
4ab0d9b5dbf4bad4ce4e985cef21b404c9ddd7ec | 30a3fe4623bda3cf271cf8ef24f87948b89de019 | /app/utils.py | 0ded1cb9dd3c89790b56992c698e0b14097d1fb3 | [] | no_license | kiminh/semantic-search-faiss | 84e520e1a8a29a79d0ed313d815704ff7e2e5ddf | bc448f4839e3f0835b711cf7769421d64d11c909 | refs/heads/master | 2022-10-03T17:40:12.010550 | 2020-06-04T09:36:02 | 2020-06-04T09:36:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,001 | py | import os
import json
import faiss
import numpy as np
OUTPUT_DIR = "output"
def normalize(sent: str):
"""Normalize sentence"""
sent = sent.replace('“', '"')
sent = sent.replace('”', '"')
sent = sent.replace('’', "'")
sent = sent.replace('‘', "'")
sent = sent.replace('—', '-')
return sent.replace("\n", "")
def load_dataset(f_input: str):
"""Load dataset from input directory"""
with open(f"{f_input}.json", "r", encoding="utf-8") as corpus:
lines = [normalize(line["title"])
for line in json.loads(corpus.read())]
return lines
def es_search(es, index: str, query: str, k: int=3):
"""Conduct ElasticSearch's search"""
results = es.search(
index=index,
body={
"from": 0,
"size": k,
"query": {
"match": {
"title": query
}
}
}
)
results = [result["_source"]["title"] for result in results["hits"]["hits"]]
return results
def create_es_index(es, index: str):
"""Create ElasticSearch indices"""
if not es.indices.exists(index=index):
es.indices.create(
index=index,
body={
"settings": {
"analysis": {
"analyzer": {
"nori": {
"tokenizer": "nori_tokenizer"
}
}
}
},
"mappings": {
"properties": {
"title": {
"type": "text",
"analyzer": "nori"
}
}
}
}
)
dataset = load_dataset("corpus")
for data in dataset:
doc = {
"title": normalize(data)
}
es.index(index=index, body=doc)
def faiss_search(encoder, indices, query: str, k: int=3):
"""Conduct FAISS top-k search"""
query_vec = encoder.encode(query)
top_k = indices.search(query_vec, k)[-1].tolist()[0]
data = load_dataset("corpus")
result = [data[idx] for idx in top_k]
return result
def create_faiss_index(encoder):
"""Create FAISS indices using encoder"""
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
if os.path.exists(f"{OUTPUT_DIR}/faiss.index"):
indices = faiss.read_index(
os.path.join(OUTPUT_DIR, "faiss.index")
)
return indices
dataset = load_dataset("corpus")
encoded = [encoder.encode(data) for data in dataset]
encoded = np.array(encoded)
indices = faiss.IndexIDMap(faiss.IndexFlatIP(encoder.dimension))
indices.add_with_ids(encoded, np.array(range(len(dataset))))
faiss.write_index(
indices,
os.path.join(OUTPUT_DIR, "faiss.index")
)
return indices
| [
"[email protected]"
] | |
ba27a0f3a80330eb4a3161bdba920d0f0031b4d9 | 70896c105c9a3cc2e7316883e50395fc0638fd25 | /site/search-index/graph.py | 40d6c8f8249c42cce1edc96d873af2fed57b6bd8 | [
"MIT"
] | permissive | chituma110/neupy | 87e8c9c14b1eeb43012b214952460a86c7fb05ab | 15de13b7d7018369a8d788c0d0ccf9a5c8176320 | refs/heads/master | 2021-01-13T12:18:54.597174 | 2017-01-03T13:09:57 | 2017-01-03T13:09:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | from collections import OrderedDict
class DirectedGraph(object):
def __init__(self):
self.graph = OrderedDict()
def add_node(self, node):
self.graph[node] = []
def add_edge(self, node_1, node_2):
if node_1 not in self.graph:
self.add_node(node_1)
if node_2 not in self.graph:
self.add_node(node_2)
self.graph[node_1].append(node_2)
def __iter__(self):
for from_node, to_nodes in self.graph.items():
yield from_node, to_nodes
def __len__(self):
return len(self.graph)
@property
def edges(self):
return list(self.graph.keys())
| [
"[email protected]"
] | |
7454062d94e2d4f5e5d3ce9c4e44d05140f93ec7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02659/s860662999.py | 7dba4b6afad56f6f9235eec17059451af95857f9 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86 | py | import decimal
A,B = map(decimal.Decimal,input().split())
num = A*B
print(int(num)//1) | [
"[email protected]"
] | |
1a35cb77bc842254dc8b6691abc06fc3ea3fc946 | c8c629cc6c133dd3d3f7919bb24e63675a2643b7 | /ionyweb/plugin_app/plugin_website_title/migrations/0003_auto__add_field_plugin_websitetitle_link_enabled__add_field_plugin_web.py | a8220ea7d8cc82f6137ceeaf274609cde749953d | [
"BSD-3-Clause"
] | permissive | ionyse/ionyweb | cf32629f908b622c6e043825c79722c74a30ab9a | 00947315b5bca4977f1de40ddb951f843c345532 | refs/heads/master | 2021-07-11T16:26:26.843580 | 2013-04-07T08:23:50 | 2013-04-07T08:23:50 | 5,911,728 | 4 | 1 | NOASSERTION | 2021-03-19T21:41:34 | 2012-09-22T09:08:53 | Python | UTF-8 | Python | false | false | 12,392 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Plugin_WebsiteTitle.link_enabled'
db.add_column('plugin_website_title_plugin_websitetitle', 'link_enabled',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
# Adding field 'Plugin_WebsiteTitle.target_link'
db.add_column('plugin_website_title_plugin_websitetitle', 'target_link',
self.gf('django.db.models.fields.CharField')(default='/', max_length=200, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Plugin_WebsiteTitle.link_enabled'
db.delete_column('plugin_website_title_plugin_websitetitle', 'link_enabled')
# Deleting field 'Plugin_WebsiteTitle.target_link'
db.delete_column('plugin_website_title_plugin_websitetitle', 'target_link')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'file_manager.directory': {
'Meta': {'object_name': 'Directory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['file_manager.Directory']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'file_manager.filemanager': {
'Meta': {'object_name': 'FileManager'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'root': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'filemanager'", 'null': 'True', 'blank': 'True', 'to': "orm['file_manager.Directory']"})
},
'page.page': {
'Meta': {'object_name': 'Page'},
'app_page_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'app_page_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'default_template': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'draft': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_diplayed_in_menu': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'last_modif': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['page.Page']"}),
'placeholder_slug': ('django.db.models.fields.SlugField', [], {'default': "'content-placeholder-1'", 'max_length': '50'}),
'plugin_order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'sha1': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'website': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pages'", 'to': "orm['website.WebSite']"})
},
'plugin.pluginrelation': {
'Meta': {'ordering': "['plugin_order']", 'object_name': 'PluginRelation'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'display_on_new_pages': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'pages': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'plugins'", 'symmetrical': 'False', 'to': "orm['page.Page']"}),
'placeholder_slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'plugin_order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'plugin_website_title.plugin_websitetitle': {
'Meta': {'object_name': 'Plugin_WebsiteTitle'},
'baseline': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'target_link': ('django.db.models.fields.CharField', [], {'default': "'/'", 'max_length': '200', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'title_rule': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'website.website': {
'Meta': {'object_name': 'WebSite'},
'analytics_key': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'default_layout': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'default_template': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'website_set'", 'unique': 'True', 'on_delete': 'models.PROTECT', 'to': "orm['sites.Site']"}),
'files_library': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'website'", 'null': 'True', 'to': "orm['file_manager.FileManager']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_maintenance': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'main_menu_levels': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'meta_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'blank': 'True'}),
'ndds': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'website'", 'symmetrical': 'False', 'to': "orm['sites.Site']"}),
'owners': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'through': "orm['website.WebSiteOwner']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'theme': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'website.websiteowner': {
'Meta': {'object_name': 'WebSiteOwner'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'websites_owned'", 'to': "orm['auth.User']"}),
'website': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'websites_owned'", 'to': "orm['website.WebSite']"})
}
}
complete_apps = ['plugin_website_title'] | [
"[email protected]"
] | |
5028e51bfd002a3d27ab6417f43c7ce234de56bc | 5b93930ce8280b3cbc7d6b955df0bfc5504ee99c | /nodes/Bisong19Building/C_PartII/C_Chapter10/F_MatrixOperations/B_ElementWiseOperations/index.py | 0e50f350af322d3b72ea3073845fe22ebb12e2a8 | [] | no_license | nimra/module_gen | 8749c8d29beb700cac57132232861eba4eb82331 | 2e0a4452548af4fefd4cb30ab9d08d7662122cf4 | refs/heads/master | 2022-03-04T09:35:12.443651 | 2019-10-26T04:40:49 | 2019-10-26T04:40:49 | 213,980,247 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,323 | py | # Lawrence McAfee
# ~~~~~~~~ import ~~~~~~~~
from modules.node.HierNode import HierNode
from modules.node.LeafNode import LeafNode
from modules.node.Stage import Stage
from modules.node.block.CodeBlock import CodeBlock as cbk
from modules.node.block.ImageBlock import ImageBlock as ibk
from modules.node.block.MarkdownBlock import MarkdownBlock as mbk
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Element-Wise Operations
# Element-wise matrix operations involve matrices operating on themselves in an
# element-wise fashion. The action can be an addition, subtraction, division, or
# multiplication (which is commonly called the Hadamard product). The matrices must be
# of the same shape. Please note that while a matrix is of shape n × n, a vector is of shape
# n × 1. These concepts easily apply to vectors as well. See Figure 10-2.
#
#
#
#
# Figure 10-2. Element-wise matrix operations
# Let’s have some examples.
#
# # Hadamard multiplication of A and B
# A * B
# 'Output':
# array([[ 570, 928, 528],
# [ 160, 690, 1196],
# [ 990, 658, 1056]])
# # add A and B
# A + B
# 'Output':
# array([[53, 61, 46],
# [37, 53, 72],
# [63, 61, 68]])
# # subtract A from B
# B - A
# 'Output':
# array([[ 23, 3, -2],
# [ 27, 7, 20],
# [ 3, 33, -20]])
# # divide A with B
# A / B
# 'Output':
# array([[ 0.39473684, 0.90625 , 1.09090909],
# [ 0.15625 , 0.76666667, 0.56521739],
# [ 0.90909091, 0.29787234, 1.83333333]])
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Content(LeafNode):
def __init__(self):
super().__init__(
"Element-Wise Operations",
Stage.REMOVE_EXTRANEOUS,
# Stage.ORIG_BLOCKS,
# Stage.CUSTOM_BLOCKS,
# Stage.ORIG_FIGURES,
# Stage.CUSTOM_FIGURES,
# Stage.CUSTOM_EXERCISES,
)
self.add(mbk("# Element-Wise Operations"))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class ElementWiseOperations(HierNode):
def __init__(self):
super().__init__("Element-Wise Operations")
self.add(Content())
# eof
| [
"[email protected]"
] | |
e66a6383de8c689572ca2d04c0adb3a49595775e | 05c395df76d494d8239de86515a3b57cd08231c4 | /test/lmp/tokenizer/_base_tokenizer/__init__.py | 5d2fe53c608ba24438119b9611d5a2ab2595657f | [] | no_license | SiuYingCheng/language-model-playground | 61b74a28abea5707bc1c9d0a2280d2f24d959ae4 | 6bca79baceacf85c5c3683bbfdf586a00484ed19 | refs/heads/master | 2022-12-16T23:04:25.895156 | 2020-09-10T16:26:03 | 2020-09-10T16:26:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,099 | py | r"""Test `lmp.tokenizer._base_tokenizer.py`.
Usage:
python -m unittest test.lmp.tokenizer._base_tokenizer.__init__
"""
# built-in modules
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import inspect
import unittest
class TestBaseTokenizer(unittest.TestCase):
r"""Test case for `lmp.tokenizer._base_tokenizer.py`."""
def test_signature(self):
r"""Ensure signature consistency."""
msg = 'Inconsistent module signature.'
try:
# pylint: disable=C0415
import lmp
import lmp.tokenizer
import lmp.tokenizer._base_tokenizer
# pylint: enable=C0415
# pylint: disable=W0212
self.assertTrue(
inspect.ismodule(lmp.tokenizer._base_tokenizer),
msg=msg
)
# pylint: enable=W0212
except ImportError:
self.fail(msg=msg)
def test_module_attributes(self):
r"""Declare required module attributes."""
msg1 = 'Missing module attribute `{}`.'
msg2 = 'Module attribute `{}` must be a class.'
msg3 = 'Inconsistent module signature.'
examples = ('BaseTokenizer',)
try:
# pylint: disable=C0415
import lmp
import lmp.tokenizer
import lmp.tokenizer._base_tokenizer
# pylint: enable=C0415
# pylint: disable=W0212
for attr in examples:
self.assertTrue(
hasattr(lmp.tokenizer._base_tokenizer, attr),
msg=msg1.format(attr)
)
self.assertTrue(
inspect.isclass(getattr(
lmp.tokenizer._base_tokenizer,
attr
)),
msg=msg2.format(attr)
)
# pylint: enable=W0212
except ImportError:
self.fail(msg=msg3)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
27bf3ef14146afe4d99288cc61e3f95623475769 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2290/60627/252911.py | dd6e631f0e74990df90b56f6a7b3a0c769388514 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | # 1
n = int(input())
for i in range(n):
input()
num = input().split()
for i in range(len(num)):
num[i] = int(num[i])
l = ''
for i in range(len(num)):
if i < len(num)-1:
if num[i] > num[i+1]:
l += str(num[i+1]) + ' '
else:
l += '-1 '
l += '-1'
print(l) | [
"[email protected]"
] | |
2202083b47c93ec48c1625a519c96443a05b8997 | 02e4920166051129d1ca28a0da80405a982f1cfe | /exercícios_fixação/094.py | c12172adc3319a6db6eb420f79689481c4229452 | [] | no_license | felipeonf/Exercises_Python | 1ab40cea2466d6bb5459b5384a1dde8e1066b3b4 | 8eb2d17a35a6352fd5268a5fa43b834443171c70 | refs/heads/main | 2023-07-23T22:30:13.567469 | 2021-08-25T03:34:33 | 2021-08-25T03:34:33 | 397,062,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,021 | py | '''[DESAFIO] Desenvolva um aplicativo que tenha um procedimento chamado
Fibonacci() que recebe um único valor inteiro como parâmetro, indicando quantos
termos da sequência serão mostrados na tela. O seu procedimento deve receber
esse valor e mostrar a quantidade de elementos solicitados.
Obs: Use os exercícios 70 e 75 para te ajudar na solução
Ex:
Fibonacci(5) vai gerar 1 >> 1 >> 2 >> 3 >> 5 >> FIM
Fibonacci(9) vai gerar 1 >> 1 >> 2 >> 3 >> 5 >> 8 >> 13 >> 21 >> 34 >> FIM'''
def Fibonacci(termos):
termo = 1
termo_anterior = 0
if termos == 1:
print(0)
elif termos == 2:
print(0,'>>',1,'>>','FIM')
else:
print(0,end=' >> ')
print(1,end=' >> ')
for num in range(3,termos+1):
termo3 = termo_anterior + termo
print(termo3,end=' >> ')
termo_anterior = termo
termo = termo3
print('FIM')
termos = int(input('Digite a quantidade de termos que quer ver: '))
Fibonacci(termos)
| [
"[email protected]"
] | |
8c26bce3eb81c76ff44837fa243a825da36108a0 | c7ea36544ae5f7a8e34bf95b8c38240ca6ebda83 | /app/schema/answers/month_year_date_answer.py | bfca6e0fa0eb15b62fed5d38aacf62564fa094de | [
"LicenseRef-scancode-proprietary-license",
"MIT"
] | permissive | qateam123/eq | 40daa6ea214e61b572affd0b5ab9990371c70be4 | 704757952323647d659c49a71975c56406ff4047 | refs/heads/master | 2023-01-11T01:46:11.174792 | 2017-02-09T13:46:56 | 2017-02-09T13:46:56 | 80,821,577 | 0 | 0 | MIT | 2023-01-04T14:31:08 | 2017-02-03T11:02:24 | JavaScript | UTF-8 | Python | false | false | 900 | py | from app.schema.answer import Answer
from app.schema.exceptions import TypeCheckingException
from app.schema.widgets.month_year_date_widget import MonthYearDateWidget
from app.validation.month_year_date_type_check import MonthYearDateTypeCheck
class MonthYearDateAnswer(Answer):
def __init__(self, answer_id=None):
super().__init__(answer_id)
self.type_checkers.append(MonthYearDateTypeCheck())
self.widget = MonthYearDateWidget(self.id)
def get_typed_value(self, post_data):
user_input = self.get_user_input(post_data)
for checker in self.type_checkers:
result = checker.validate(user_input)
if not result.is_valid:
raise TypeCheckingException(result.errors[0])
return self._cast_user_input(user_input)
def get_user_input(self, post_vars):
return self.widget.get_user_input(post_vars)
| [
"[email protected]"
] | |
90fddcec4e7c01bc4e7411795fd3da100f7f7d65 | 09dd58f46b1e914278067a69142230c7af0165c2 | /blackmamba/system.py | 986fcef2f17be701ceb50130892f4a8a6e7dcc74 | [
"MIT"
] | permissive | zrzka/blackmamba | 4e70262fbe3702553bf5d285a81b33eb6b3025ea | b298bc5d59e5aea9d494282910faf522c08ebba9 | refs/heads/master | 2021-01-01T18:43:19.490953 | 2020-01-20T08:26:33 | 2020-01-20T08:26:33 | 98,410,391 | 72 | 12 | MIT | 2020-01-20T08:26:35 | 2017-07-26T10:21:15 | Python | UTF-8 | Python | false | false | 5,903 | py | #!python3
"""System info and decorators.
.. warning:: This module must not introduce dependency on any other Black Mamba
modules and must be importable on any other platform as well.
"""
import sys
import traceback
import functools
try:
import console
except ImportError:
console = None
# 3.1, 301016
# 3.1.1 beta, 311008
PYTHONISTA = sys.platform == 'ios'
"""bool: True if we're running within Pythonista or False."""
PYTHONISTA_VERSION = None
"""str: Pythonista version or `None` if we're not within Pythonista."""
PYTHONISTA_BUNDLE_VERSION = None
"""int: Pythonista bundle version or `None` if we're not within Pythonista."""
PYTHONISTA_VERSION_TUPLE = None
"""tuple(int): Pythonista version tuple (3, 1, 1) or `None` if we're not within Pythonista."""
IOS = sys.platform == 'ios'
"""bool: `True` if we're running within iOS or `False`."""
IOS_VERSION = None
"""str: iOS version or `None` if we're not within iOS."""
IOS_VERSION_TUPLE = None
"""tuple(int): iOS version tuple (11, 0) or `None` if we're not within iOS."""
def _version_tuple(version):
if not version:
return None
return tuple(map(int, (version.split('.'))))
if PYTHONISTA:
import plistlib
import os
try:
plist_path = os.path.abspath(os.path.join(sys.executable, '..', 'Info.plist'))
plist = plistlib.readPlist(plist_path)
PYTHONISTA_VERSION = plist['CFBundleShortVersionString']
PYTHONISTA_BUNDLE_VERSION = int(plist['CFBundleVersion'])
PYTHONISTA_VERSION_TUPLE = _version_tuple(PYTHONISTA_VERSION)
except Exception:
pass
if IOS:
try:
from objc_util import ObjCClass
IOS_VERSION = str(ObjCClass('UIDevice').currentDevice().systemVersion())
IOS_VERSION_TUPLE = _version_tuple(IOS_VERSION)
except Exception:
pass
class _Available:
def __init__(self, from_version=None, to_version=None):
if from_version and to_version:
raise ValueError('Either from_version or to_version can be provided, not both')
self._from_version = _version_tuple(from_version)
self._to_version = _version_tuple(to_version)
def version(self):
raise Exception('Not implemented, return version as tuple(int)')
def _available(self):
current_version = self.version()
if not current_version:
return False
if self._to_version:
return current_version <= self._to_version
if self._from_version:
return current_version >= self._from_version
return True
def __call__(self, fn, *args, **kwargs):
def func(*args, **kwargs):
if self._available():
return fn(*args, **kwargs)
return None
return func
class iOS(_Available):
"""Decorator to execute function under specific iOS versions.
Return value is return value of decorated function or `None`
if iOS condition isn't met.
Examples:
Run function only within any iOS version::
@iOS()
def run_me():
pass
Run function only within iOS >= 11.0::
@iOS('11.0') # or @iOS(from_version='11.0')
def run_me():
pass
Run function only within iOS <= 11.0::
@iOS(None, '11.0') # or @iOS(to_version='11.0')
def run_me():
pass
"""
def version(self):
return IOS_VERSION_TUPLE
class Pythonista(_Available):
"""Decorator to execute function under specific Pythonista versions.
By default, function is not executed under application extension.
You have to pass ``appex=True`` if you'd like to run some function
under appex as well.
Return value is return value of decorated function or `None`
if Pythonista condition isn't met.
Examples:
Run function only within any Pythonista version::
@Pythonista()
def run_me():
pass
Run function only within any Pythonista version and allow appex::
@Pythonista(appex=True)
def run_me():
pass
Run function only within any Pythonista version and disallow appex::
@Pythonista(appex=False)
def run_me():
pass
Run function only within Pythonista >= 3.1.1::
@Pythonista('3.1.1') # or @Pythonista(from_version='3.1.1')
def run_me():
pass
Run function only within Pythonista <= 3.2::
@Pythonista(None, '3.2') # or @Pythonista(to_version='3.2')
def run_me():
pass
"""
def __init__(self, from_version=None, to_version=None, appex=None):
super().__init__(from_version, to_version)
self._appex = appex
def _available(self):
available = super()._available()
if available and self._appex is not None:
import appex
available = appex.is_running_extension() == self._appex
return available
def version(self):
return PYTHONISTA_VERSION_TUPLE
def catch_exceptions(func):
"""Decorator catching all exceptions and printing info to the console.
Use this decorator for functions handling keyboard shortcuts,
keyboard events, ... to avoid Pythonista crash.
Args:
func: Function to decorate
Returns:
Return value of decorated function.
"""
@functools.wraps(func)
def new_func(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception:
if console:
console.set_color(1, 0, 0)
print(traceback.format_exc())
print('Please, file an issue at {}'.format('https://github.com/zrzka/blackmamba/issues'))
if console:
console.set_color()
return new_func
| [
"[email protected]"
] | |
ab914d8b6829a0927aaf88e8e92d8c9f5c659ec9 | 6a9231236de04b7d0ccd9b1cb2b2a81dd3e9c96c | /65.py | 8427495a5a8610ab2f2e42ed11155dc21e586079 | [] | no_license | ikekou/python-exercise-100-book | b3a7fc92a2b8bfb250a799178738956840735117 | 9e44c1ce6252ecbd90abe1054b89aa1bb651a283 | refs/heads/master | 2023-03-21T14:19:37.141400 | 2021-02-28T21:56:28 | 2021-02-28T21:56:28 | 343,223,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | d = {'A':111,'B':222,'C':333}
d2 = {'A':111,'B':222,'C':333}
d['D'] = 444
d.update({'D': 444})
print(d)
print(d2) | [
"[email protected]"
] | |
d3f65edf1238593288b49ad5a0fc56308ecf426e | 85f0d7f7255222ce5f8c7fda080edefc8b84fbf6 | /manage.py | 8d08c9977f464e84f200059a0bad41d3847fb953 | [] | no_license | rushi-jagdale/Studentform | 9101bfc670d1eea75d77971632443511520a0ce9 | 90f9255ac7ed0415900b7ff05179b238734b1783 | refs/heads/master | 2023-06-13T03:30:23.072760 | 2021-07-07T16:56:25 | 2021-07-07T16:56:25 | 383,867,976 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'StudentForm.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
58c130b79a1188152e08dda276e3ffec20ed373c | 0dae97b2205ef5d8ce884ec2af4bf99ad2baec43 | /drf_admin/apps/cmdb/views/servers.py | d82da8146f097cad58b325c18915d1920c94e506 | [
"MIT"
] | permissive | 15051882416/drf_admin | 2520affacd0345d042b499c3e9a56a112cc235d5 | 0b31fa5248afb6fc20e6ef425b2dcc4d39977d81 | refs/heads/master | 2022-12-31T04:57:27.017134 | 2020-10-24T01:09:58 | 2020-10-24T01:09:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,461 | py | # -*- coding: utf-8 -*-
"""
@author : Wang Meng
@github : https://github.com/tianpangji
@software : PyCharm
@file : servers.py
@create : 2020/10/17 18:45
"""
from django.contrib.auth.models import AnonymousUser
from django.db.models import Q
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.filters import SearchFilter, OrderingFilter
from rest_framework.response import Response
from rest_framework.views import APIView
from cmdb.models import Assets, Servers
from cmdb.serializers.servers import ServersAssetsSerializers
from drf_admin.common.departments import get_departments_id
from drf_admin.utils.views import AdminViewSet
from system.models import Departments
class ServersViewSet(AdminViewSet):
"""
create:
服务器--新增
服务器新增, status: 201(成功), return: 新增服务器信息
destroy:
服务器--删除
服务器删除, status: 204(成功), return: None
multiple_delete:
服务器--批量删除
服务器批量删除, status: 204(成功), return: None
update:
服务器--修改
服务器修改, status: 200(成功), return: 修改增服务器信息
partial_update:
服务器--局部修改
服务器局部修改, status: 200(成功), return: 修改增服务器信息
list:
服务器--获取列表
服务器列表信息, status: 200(成功), return: 服务器信息列表
retrieve:
服务器--服务器详情
服务器详情信息, status: 200(成功), return: 单个服务器信息详情
"""
serializer_class = ServersAssetsSerializers
filter_backends = (DjangoFilterBackend, SearchFilter, OrderingFilter)
filter_fields = ['asset_status']
search_fields = ('name', 'sn', 'manage_ip')
ordering_fields = ('id', 'name', 'sn')
def get_queryset(self):
# 解决drf-yasg加载报错
if isinstance(self.request.user, AnonymousUser):
return Assets.objects.none()
# ①管理员角色用户可查看所有
if {'name': 'admin'} in self.request.user.roles.values('name'):
return Assets.objects.filter(asset_type='server')
# ②每个用户只能查看到所属部门及其子部门下的服务器, 及该用户管理服务器
if self.request.user.department:
departments = get_departments_id(self.request.user.department.id)
return (Assets.objects.filter(asset_type='server').filter(
Q(department__in=departments) | Q(admin=self.request.user))).distinct()
else:
return Assets.objects.filter(asset_type='server', admin=self.request.user)
class ServersSystemTypeAPIView(APIView):
"""
get:
服务器--models系统类型列表
服务器models中的系统类型列表信息, status: 200(成功), return: 服务器models中的系统类型列表
"""
def get(self, request):
methods = [{'value': value[0], 'label': value[1]} for value in Servers.server_system_type_choice]
return Response(data={'results': methods})
class ServersTypeAPIView(APIView):
"""
get:
服务器--models类型列表
服务器models中的类型列表信息, status: 200(成功), return: 服务器models中的类型列表
"""
def get(self, request):
methods = [{'value': value[0], 'label': value[1]} for value in Servers.server_type_choice]
return Response(data={'results': methods})
| [
"[email protected]"
] | |
67c8e5844d793fadfff5a1668ac5b73628504133 | 73de523bde0c9e8398c63a924b44aadc46d11202 | /isi_sdk/models/smb_settings_global_extended.py | 29687d406097319e864eb3f30c87df231c9dbd62 | [
"MIT",
"Apache-2.0"
] | permissive | Feyd-Aran/isilon_sdk_python | 1c2fae306c1a95a99024dd13dc0fc3b120f9c1de | 24e85a5577d15ac3db06862d07d5a261658c67b7 | refs/heads/v8.0.0 | 2020-09-23T00:16:36.684270 | 2019-12-02T13:45:12 | 2019-12-02T13:45:12 | 225,351,700 | 0 | 0 | MIT | 2019-12-02T10:51:54 | 2019-12-02T10:51:53 | null | UTF-8 | Python | false | false | 27,211 | py | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 3
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_8_0.models.smb_settings_global_settings_audit_global_sacl_item import SmbSettingsGlobalSettingsAuditGlobalSaclItem # noqa: F401,E501
class SmbSettingsGlobalExtended(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'access_based_share_enum': 'bool',
'audit_fileshare': 'str',
'audit_global_sacl': 'list[SmbSettingsGlobalSettingsAuditGlobalSaclItem]',
'audit_logon': 'str',
'dot_snap_accessible_child': 'bool',
'dot_snap_accessible_root': 'bool',
'dot_snap_visible_child': 'bool',
'dot_snap_visible_root': 'bool',
'enable_security_signatures': 'bool',
'guest_user': 'str',
'ignore_eas': 'bool',
'onefs_cpu_multiplier': 'int',
'onefs_num_workers': 'int',
'require_security_signatures': 'bool',
'server_side_copy': 'bool',
'server_string': 'str',
'service': 'bool',
'srv_cpu_multiplier': 'int',
'srv_num_workers': 'int',
'support_multichannel': 'bool',
'support_netbios': 'bool',
'support_smb2': 'bool'
}
attribute_map = {
'access_based_share_enum': 'access_based_share_enum',
'audit_fileshare': 'audit_fileshare',
'audit_global_sacl': 'audit_global_sacl',
'audit_logon': 'audit_logon',
'dot_snap_accessible_child': 'dot_snap_accessible_child',
'dot_snap_accessible_root': 'dot_snap_accessible_root',
'dot_snap_visible_child': 'dot_snap_visible_child',
'dot_snap_visible_root': 'dot_snap_visible_root',
'enable_security_signatures': 'enable_security_signatures',
'guest_user': 'guest_user',
'ignore_eas': 'ignore_eas',
'onefs_cpu_multiplier': 'onefs_cpu_multiplier',
'onefs_num_workers': 'onefs_num_workers',
'require_security_signatures': 'require_security_signatures',
'server_side_copy': 'server_side_copy',
'server_string': 'server_string',
'service': 'service',
'srv_cpu_multiplier': 'srv_cpu_multiplier',
'srv_num_workers': 'srv_num_workers',
'support_multichannel': 'support_multichannel',
'support_netbios': 'support_netbios',
'support_smb2': 'support_smb2'
}
def __init__(self, access_based_share_enum=None, audit_fileshare=None, audit_global_sacl=None, audit_logon=None, dot_snap_accessible_child=None, dot_snap_accessible_root=None, dot_snap_visible_child=None, dot_snap_visible_root=None, enable_security_signatures=None, guest_user=None, ignore_eas=None, onefs_cpu_multiplier=None, onefs_num_workers=None, require_security_signatures=None, server_side_copy=None, server_string=None, service=None, srv_cpu_multiplier=None, srv_num_workers=None, support_multichannel=None, support_netbios=None, support_smb2=None): # noqa: E501
"""SmbSettingsGlobalExtended - a model defined in Swagger""" # noqa: E501
self._access_based_share_enum = None
self._audit_fileshare = None
self._audit_global_sacl = None
self._audit_logon = None
self._dot_snap_accessible_child = None
self._dot_snap_accessible_root = None
self._dot_snap_visible_child = None
self._dot_snap_visible_root = None
self._enable_security_signatures = None
self._guest_user = None
self._ignore_eas = None
self._onefs_cpu_multiplier = None
self._onefs_num_workers = None
self._require_security_signatures = None
self._server_side_copy = None
self._server_string = None
self._service = None
self._srv_cpu_multiplier = None
self._srv_num_workers = None
self._support_multichannel = None
self._support_netbios = None
self._support_smb2 = None
self.discriminator = None
if access_based_share_enum is not None:
self.access_based_share_enum = access_based_share_enum
if audit_fileshare is not None:
self.audit_fileshare = audit_fileshare
if audit_global_sacl is not None:
self.audit_global_sacl = audit_global_sacl
if audit_logon is not None:
self.audit_logon = audit_logon
if dot_snap_accessible_child is not None:
self.dot_snap_accessible_child = dot_snap_accessible_child
if dot_snap_accessible_root is not None:
self.dot_snap_accessible_root = dot_snap_accessible_root
if dot_snap_visible_child is not None:
self.dot_snap_visible_child = dot_snap_visible_child
if dot_snap_visible_root is not None:
self.dot_snap_visible_root = dot_snap_visible_root
if enable_security_signatures is not None:
self.enable_security_signatures = enable_security_signatures
if guest_user is not None:
self.guest_user = guest_user
if ignore_eas is not None:
self.ignore_eas = ignore_eas
if onefs_cpu_multiplier is not None:
self.onefs_cpu_multiplier = onefs_cpu_multiplier
if onefs_num_workers is not None:
self.onefs_num_workers = onefs_num_workers
if require_security_signatures is not None:
self.require_security_signatures = require_security_signatures
if server_side_copy is not None:
self.server_side_copy = server_side_copy
if server_string is not None:
self.server_string = server_string
if service is not None:
self.service = service
if srv_cpu_multiplier is not None:
self.srv_cpu_multiplier = srv_cpu_multiplier
if srv_num_workers is not None:
self.srv_num_workers = srv_num_workers
if support_multichannel is not None:
self.support_multichannel = support_multichannel
if support_netbios is not None:
self.support_netbios = support_netbios
if support_smb2 is not None:
self.support_smb2 = support_smb2
@property
def access_based_share_enum(self):
"""Gets the access_based_share_enum of this SmbSettingsGlobalExtended. # noqa: E501
Only enumerate files and folders the requesting user has access to. # noqa: E501
:return: The access_based_share_enum of this SmbSettingsGlobalExtended. # noqa: E501
:rtype: bool
"""
return self._access_based_share_enum
@access_based_share_enum.setter
def access_based_share_enum(self, access_based_share_enum):
"""Sets the access_based_share_enum of this SmbSettingsGlobalExtended.
Only enumerate files and folders the requesting user has access to. # noqa: E501
:param access_based_share_enum: The access_based_share_enum of this SmbSettingsGlobalExtended. # noqa: E501
:type: bool
"""
self._access_based_share_enum = access_based_share_enum
@property
def audit_fileshare(self):
"""Gets the audit_fileshare of this SmbSettingsGlobalExtended. # noqa: E501
Specify level of file share audit events to log. # noqa: E501
:return: The audit_fileshare of this SmbSettingsGlobalExtended. # noqa: E501
:rtype: str
"""
return self._audit_fileshare
@audit_fileshare.setter
def audit_fileshare(self, audit_fileshare):
"""Sets the audit_fileshare of this SmbSettingsGlobalExtended.
Specify level of file share audit events to log. # noqa: E501
:param audit_fileshare: The audit_fileshare of this SmbSettingsGlobalExtended. # noqa: E501
:type: str
"""
self._audit_fileshare = audit_fileshare
@property
def audit_global_sacl(self):
"""Gets the audit_global_sacl of this SmbSettingsGlobalExtended. # noqa: E501
Specifies a list of permissions to audit. # noqa: E501
:return: The audit_global_sacl of this SmbSettingsGlobalExtended. # noqa: E501
:rtype: list[SmbSettingsGlobalSettingsAuditGlobalSaclItem]
"""
return self._audit_global_sacl
@audit_global_sacl.setter
def audit_global_sacl(self, audit_global_sacl):
"""Sets the audit_global_sacl of this SmbSettingsGlobalExtended.
Specifies a list of permissions to audit. # noqa: E501
:param audit_global_sacl: The audit_global_sacl of this SmbSettingsGlobalExtended. # noqa: E501
:type: list[SmbSettingsGlobalSettingsAuditGlobalSaclItem]
"""
self._audit_global_sacl = audit_global_sacl
@property
def audit_logon(self):
"""Gets the audit_logon of this SmbSettingsGlobalExtended. # noqa: E501
Specify the level of logon audit events to log. # noqa: E501
:return: The audit_logon of this SmbSettingsGlobalExtended. # noqa: E501
:rtype: str
"""
return self._audit_logon
@audit_logon.setter
def audit_logon(self, audit_logon):
"""Sets the audit_logon of this SmbSettingsGlobalExtended.
Specify the level of logon audit events to log. # noqa: E501
:param audit_logon: The audit_logon of this SmbSettingsGlobalExtended. # noqa: E501
:type: str
"""
self._audit_logon = audit_logon
@property
def dot_snap_accessible_child(self):
"""Gets the dot_snap_accessible_child of this SmbSettingsGlobalExtended. # noqa: E501
Allow access to .snapshot directories in share subdirectories. # noqa: E501
:return: The dot_snap_accessible_child of this SmbSettingsGlobalExtended. # noqa: E501
:rtype: bool
"""
return self._dot_snap_accessible_child
@dot_snap_accessible_child.setter
def dot_snap_accessible_child(self, dot_snap_accessible_child):
"""Sets the dot_snap_accessible_child of this SmbSettingsGlobalExtended.
Allow access to .snapshot directories in share subdirectories. # noqa: E501
:param dot_snap_accessible_child: The dot_snap_accessible_child of this SmbSettingsGlobalExtended. # noqa: E501
:type: bool
"""
self._dot_snap_accessible_child = dot_snap_accessible_child
@property
def dot_snap_accessible_root(self):
"""Gets the dot_snap_accessible_root of this SmbSettingsGlobalExtended. # noqa: E501
Allow access to the .snapshot directory in the root of the share. # noqa: E501
:return: The dot_snap_accessible_root of this SmbSettingsGlobalExtended. # noqa: E501
:rtype: bool
"""
return self._dot_snap_accessible_root
@dot_snap_accessible_root.setter
def dot_snap_accessible_root(self, dot_snap_accessible_root):
"""Sets the dot_snap_accessible_root of this SmbSettingsGlobalExtended.
Allow access to the .snapshot directory in the root of the share. # noqa: E501
:param dot_snap_accessible_root: The dot_snap_accessible_root of this SmbSettingsGlobalExtended. # noqa: E501
:type: bool
"""
self._dot_snap_accessible_root = dot_snap_accessible_root
@property
def dot_snap_visible_child(self):
"""Gets the dot_snap_visible_child of this SmbSettingsGlobalExtended. # noqa: E501
Show .snapshot directories in share subdirectories. # noqa: E501
:return: The dot_snap_visible_child of this SmbSettingsGlobalExtended. # noqa: E501
:rtype: bool
"""
return self._dot_snap_visible_child
@dot_snap_visible_child.setter
def dot_snap_visible_child(self, dot_snap_visible_child):
"""Sets the dot_snap_visible_child of this SmbSettingsGlobalExtended.
Show .snapshot directories in share subdirectories. # noqa: E501
:param dot_snap_visible_child: The dot_snap_visible_child of this SmbSettingsGlobalExtended. # noqa: E501
:type: bool
"""
self._dot_snap_visible_child = dot_snap_visible_child
@property
def dot_snap_visible_root(self):
"""Gets the dot_snap_visible_root of this SmbSettingsGlobalExtended. # noqa: E501
Show the .snapshot directory in the root of a share. # noqa: E501
:return: The dot_snap_visible_root of this SmbSettingsGlobalExtended. # noqa: E501
:rtype: bool
"""
return self._dot_snap_visible_root
@dot_snap_visible_root.setter
def dot_snap_visible_root(self, dot_snap_visible_root):
"""Sets the dot_snap_visible_root of this SmbSettingsGlobalExtended.
Show the .snapshot directory in the root of a share. # noqa: E501
:param dot_snap_visible_root: The dot_snap_visible_root of this SmbSettingsGlobalExtended. # noqa: E501
:type: bool
"""
self._dot_snap_visible_root = dot_snap_visible_root
@property
def enable_security_signatures(self):
"""Gets the enable_security_signatures of this SmbSettingsGlobalExtended. # noqa: E501
Indicates whether the server supports signed SMB packets. # noqa: E501
:return: The enable_security_signatures of this SmbSettingsGlobalExtended. # noqa: E501
:rtype: bool
"""
return self._enable_security_signatures
@enable_security_signatures.setter
def enable_security_signatures(self, enable_security_signatures):
"""Sets the enable_security_signatures of this SmbSettingsGlobalExtended.
Indicates whether the server supports signed SMB packets. # noqa: E501
:param enable_security_signatures: The enable_security_signatures of this SmbSettingsGlobalExtended. # noqa: E501
:type: bool
"""
self._enable_security_signatures = enable_security_signatures
@property
def guest_user(self):
"""Gets the guest_user of this SmbSettingsGlobalExtended. # noqa: E501
Specifies the fully-qualified user to use for guest access. # noqa: E501
:return: The guest_user of this SmbSettingsGlobalExtended. # noqa: E501
:rtype: str
"""
return self._guest_user
@guest_user.setter
def guest_user(self, guest_user):
"""Sets the guest_user of this SmbSettingsGlobalExtended.
Specifies the fully-qualified user to use for guest access. # noqa: E501
:param guest_user: The guest_user of this SmbSettingsGlobalExtended. # noqa: E501
:type: str
"""
self._guest_user = guest_user
@property
def ignore_eas(self):
"""Gets the ignore_eas of this SmbSettingsGlobalExtended. # noqa: E501
Specify whether to ignore EAs on files. # noqa: E501
:return: The ignore_eas of this SmbSettingsGlobalExtended. # noqa: E501
:rtype: bool
"""
return self._ignore_eas
@ignore_eas.setter
def ignore_eas(self, ignore_eas):
"""Sets the ignore_eas of this SmbSettingsGlobalExtended.
Specify whether to ignore EAs on files. # noqa: E501
:param ignore_eas: The ignore_eas of this SmbSettingsGlobalExtended. # noqa: E501
:type: bool
"""
self._ignore_eas = ignore_eas
@property
def onefs_cpu_multiplier(self):
"""Gets the onefs_cpu_multiplier of this SmbSettingsGlobalExtended. # noqa: E501
Specify the number of OneFS driver worker threads per CPU. # noqa: E501
:return: The onefs_cpu_multiplier of this SmbSettingsGlobalExtended. # noqa: E501
:rtype: int
"""
return self._onefs_cpu_multiplier
@onefs_cpu_multiplier.setter
def onefs_cpu_multiplier(self, onefs_cpu_multiplier):
"""Sets the onefs_cpu_multiplier of this SmbSettingsGlobalExtended.
Specify the number of OneFS driver worker threads per CPU. # noqa: E501
:param onefs_cpu_multiplier: The onefs_cpu_multiplier of this SmbSettingsGlobalExtended. # noqa: E501
:type: int
"""
if onefs_cpu_multiplier is not None and onefs_cpu_multiplier > 4: # noqa: E501
raise ValueError("Invalid value for `onefs_cpu_multiplier`, must be a value less than or equal to `4`") # noqa: E501
if onefs_cpu_multiplier is not None and onefs_cpu_multiplier < 1: # noqa: E501
raise ValueError("Invalid value for `onefs_cpu_multiplier`, must be a value greater than or equal to `1`") # noqa: E501
self._onefs_cpu_multiplier = onefs_cpu_multiplier
@property
def onefs_num_workers(self):
"""Gets the onefs_num_workers of this SmbSettingsGlobalExtended. # noqa: E501
Set the maximum number of OneFS driver worker threads. # noqa: E501
:return: The onefs_num_workers of this SmbSettingsGlobalExtended. # noqa: E501
:rtype: int
"""
return self._onefs_num_workers
@onefs_num_workers.setter
def onefs_num_workers(self, onefs_num_workers):
"""Sets the onefs_num_workers of this SmbSettingsGlobalExtended.
Set the maximum number of OneFS driver worker threads. # noqa: E501
:param onefs_num_workers: The onefs_num_workers of this SmbSettingsGlobalExtended. # noqa: E501
:type: int
"""
if onefs_num_workers is not None and onefs_num_workers > 1024: # noqa: E501
raise ValueError("Invalid value for `onefs_num_workers`, must be a value less than or equal to `1024`") # noqa: E501
if onefs_num_workers is not None and onefs_num_workers < 0: # noqa: E501
raise ValueError("Invalid value for `onefs_num_workers`, must be a value greater than or equal to `0`") # noqa: E501
self._onefs_num_workers = onefs_num_workers
@property
def require_security_signatures(self):
"""Gets the require_security_signatures of this SmbSettingsGlobalExtended. # noqa: E501
Indicates whether the server requires signed SMB packets. # noqa: E501
:return: The require_security_signatures of this SmbSettingsGlobalExtended. # noqa: E501
:rtype: bool
"""
return self._require_security_signatures
@require_security_signatures.setter
def require_security_signatures(self, require_security_signatures):
"""Sets the require_security_signatures of this SmbSettingsGlobalExtended.
Indicates whether the server requires signed SMB packets. # noqa: E501
:param require_security_signatures: The require_security_signatures of this SmbSettingsGlobalExtended. # noqa: E501
:type: bool
"""
self._require_security_signatures = require_security_signatures
@property
def server_side_copy(self):
"""Gets the server_side_copy of this SmbSettingsGlobalExtended. # noqa: E501
Enable Server Side Copy. # noqa: E501
:return: The server_side_copy of this SmbSettingsGlobalExtended. # noqa: E501
:rtype: bool
"""
return self._server_side_copy
@server_side_copy.setter
def server_side_copy(self, server_side_copy):
"""Sets the server_side_copy of this SmbSettingsGlobalExtended.
Enable Server Side Copy. # noqa: E501
:param server_side_copy: The server_side_copy of this SmbSettingsGlobalExtended. # noqa: E501
:type: bool
"""
self._server_side_copy = server_side_copy
@property
def server_string(self):
"""Gets the server_string of this SmbSettingsGlobalExtended. # noqa: E501
Provides a description of the server. # noqa: E501
:return: The server_string of this SmbSettingsGlobalExtended. # noqa: E501
:rtype: str
"""
return self._server_string
@server_string.setter
def server_string(self, server_string):
"""Sets the server_string of this SmbSettingsGlobalExtended.
Provides a description of the server. # noqa: E501
:param server_string: The server_string of this SmbSettingsGlobalExtended. # noqa: E501
:type: str
"""
self._server_string = server_string
@property
def service(self):
"""Gets the service of this SmbSettingsGlobalExtended. # noqa: E501
Specify whether service is enabled. # noqa: E501
:return: The service of this SmbSettingsGlobalExtended. # noqa: E501
:rtype: bool
"""
return self._service
@service.setter
def service(self, service):
"""Sets the service of this SmbSettingsGlobalExtended.
Specify whether service is enabled. # noqa: E501
:param service: The service of this SmbSettingsGlobalExtended. # noqa: E501
:type: bool
"""
self._service = service
@property
def srv_cpu_multiplier(self):
"""Gets the srv_cpu_multiplier of this SmbSettingsGlobalExtended. # noqa: E501
Specify the number of SRV service worker threads per CPU. # noqa: E501
:return: The srv_cpu_multiplier of this SmbSettingsGlobalExtended. # noqa: E501
:rtype: int
"""
return self._srv_cpu_multiplier
@srv_cpu_multiplier.setter
def srv_cpu_multiplier(self, srv_cpu_multiplier):
"""Sets the srv_cpu_multiplier of this SmbSettingsGlobalExtended.
Specify the number of SRV service worker threads per CPU. # noqa: E501
:param srv_cpu_multiplier: The srv_cpu_multiplier of this SmbSettingsGlobalExtended. # noqa: E501
:type: int
"""
if srv_cpu_multiplier is not None and srv_cpu_multiplier > 8: # noqa: E501
raise ValueError("Invalid value for `srv_cpu_multiplier`, must be a value less than or equal to `8`") # noqa: E501
if srv_cpu_multiplier is not None and srv_cpu_multiplier < 1: # noqa: E501
raise ValueError("Invalid value for `srv_cpu_multiplier`, must be a value greater than or equal to `1`") # noqa: E501
self._srv_cpu_multiplier = srv_cpu_multiplier
@property
def srv_num_workers(self):
"""Gets the srv_num_workers of this SmbSettingsGlobalExtended. # noqa: E501
Set the maximum number of SRV service worker threads. # noqa: E501
:return: The srv_num_workers of this SmbSettingsGlobalExtended. # noqa: E501
:rtype: int
"""
return self._srv_num_workers
@srv_num_workers.setter
def srv_num_workers(self, srv_num_workers):
"""Sets the srv_num_workers of this SmbSettingsGlobalExtended.
Set the maximum number of SRV service worker threads. # noqa: E501
:param srv_num_workers: The srv_num_workers of this SmbSettingsGlobalExtended. # noqa: E501
:type: int
"""
if srv_num_workers is not None and srv_num_workers > 1024: # noqa: E501
raise ValueError("Invalid value for `srv_num_workers`, must be a value less than or equal to `1024`") # noqa: E501
if srv_num_workers is not None and srv_num_workers < 0: # noqa: E501
raise ValueError("Invalid value for `srv_num_workers`, must be a value greater than or equal to `0`") # noqa: E501
self._srv_num_workers = srv_num_workers
@property
def support_multichannel(self):
"""Gets the support_multichannel of this SmbSettingsGlobalExtended. # noqa: E501
Support multichannel. # noqa: E501
:return: The support_multichannel of this SmbSettingsGlobalExtended. # noqa: E501
:rtype: bool
"""
return self._support_multichannel
@support_multichannel.setter
def support_multichannel(self, support_multichannel):
"""Sets the support_multichannel of this SmbSettingsGlobalExtended.
Support multichannel. # noqa: E501
:param support_multichannel: The support_multichannel of this SmbSettingsGlobalExtended. # noqa: E501
:type: bool
"""
self._support_multichannel = support_multichannel
@property
def support_netbios(self):
"""Gets the support_netbios of this SmbSettingsGlobalExtended. # noqa: E501
Support NetBIOS. # noqa: E501
:return: The support_netbios of this SmbSettingsGlobalExtended. # noqa: E501
:rtype: bool
"""
return self._support_netbios
@support_netbios.setter
def support_netbios(self, support_netbios):
"""Sets the support_netbios of this SmbSettingsGlobalExtended.
Support NetBIOS. # noqa: E501
:param support_netbios: The support_netbios of this SmbSettingsGlobalExtended. # noqa: E501
:type: bool
"""
self._support_netbios = support_netbios
@property
def support_smb2(self):
"""Gets the support_smb2 of this SmbSettingsGlobalExtended. # noqa: E501
Support the SMB2 protocol on the server. # noqa: E501
:return: The support_smb2 of this SmbSettingsGlobalExtended. # noqa: E501
:rtype: bool
"""
return self._support_smb2
@support_smb2.setter
def support_smb2(self, support_smb2):
"""Sets the support_smb2 of this SmbSettingsGlobalExtended.
Support the SMB2 protocol on the server. # noqa: E501
:param support_smb2: The support_smb2 of this SmbSettingsGlobalExtended. # noqa: E501
:type: bool
"""
self._support_smb2 = support_smb2
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SmbSettingsGlobalExtended):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
4021d5d523973384ebf72c8ba41a80e66b87be23 | ff6248be9573caec94bea0fa2b1e4b6bf0aa682b | /raw_scripts/132.230.102.123-10.21.12.4/1569578039.py | 747b1029c89ff144a79977e858b82b6acd08b556 | [] | no_license | LennartElbe/codeEvo | 0e41b1a7705204e934ef71a5a28c047366c10f71 | e89b329bc9edd37d5d9986f07ca8a63d50686882 | refs/heads/master | 2020-12-21T17:28:25.150352 | 2020-03-26T10:22:35 | 2020-03-26T10:22:35 | 236,498,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,522 | py | import functools
import typing
import string
import random
import pytest
## Lösung Teile 1. und 2.
class Vigenere:
def __init__(self, schluesselwort):
raise len(schluesselwort) == 0
self.__key = schluesselwort
def encrypt(self, w):
test = {1:"A",2:"B",3:"C",4:"D"}
result = ""
for letter in w:
for letter2 in test:
if letter == letter2[1]:
result += letter2[0]
return result
def decryp(self,w):
test = {1:"A",2:"B",3:"C",4:"D"}
result = ""
for letter in w:
for letter2 in test:
if letter == letter2[0]:
result += letter2[1]
return result
######################################################################
## hidden code
def mk_coverage():
covered = set()
target = set(range(6))
count = 0
def coverage(func):
nonlocal covered, target, count
def wrapper(key):
nonlocal covered, count
if key == "A":
covered.add(0)
elif key != "":
covered.add(1)
if len (key) > 1:
covered.add(2)
if key == key[0] * len (key):
covered.add(4)
else:
covered.add(5)
if len (key) > 2:
covered.add (3)
r = func (key)
count += 1
return r
if func == "achieved": return len(covered)
if func == "required": return len(target)
if func == "count" : return count
functools.update_wrapper (wrapper, func)
return wrapper
return coverage
coverage = mk_coverage ()
try:
Vigenere = coverage (Vigenere)
except:
pass
## Lösung Teil 3. (Tests)
assert Vigenere("ABCD").encrypt() == "1234"
assert Vigenere("1234").encrypt() == "ABCD"
######################################################################
## hidden tests
pytest.main (["-v", "--assert=plain", "-p", "no:cacheprovider"])
from inspect import getfullargspec
class TestNames:
def test_Vigenere (self):
assert Vigenere
def test_encrypt(self):
assert Vigenere.encrypt
assert 'w' in getfullargspec(Vigenere.encrypt).args
def test_decrypt(self):
assert Vigenere.decrypt
assert 'w' in getfullargspec(Vigenere.decrypt).args
class TestGrades:
def test_coverage(self):
assert coverage("achieved") == coverage("required")
def test_Vigenere_is_a_class(self):
assert "class" in repr (Vigenere.__wrapped__)
def test_docstring_present(self):
assert Vigenere.__doc__ is not None
assert Vigenere.encrypt.__doc__ is not None
assert Vigenere.decrypt.__doc__ is not None
def test_empty_key (self):
with pytest.raises (Exception):
assert Vigenere ("")
def test_has_key(self):
k = "asfdg"
v = Vigenere(k)
assert v.key == k
def test_has_methods(self):
v = Vigenere("")
assert v.encrypt
assert v.decrypt
def test_identity(self):
charset = string.ascii_uppercase
v = Vigenere ("A")
for i in range (100):
s = ''.join(random.choice (charset) for j in range (100))
assert v.encrypt(s) == s
assert v.decrypt(s) == s
def test_inverses(self):
charset = string.ascii_uppercase
for i in range (100):
k = ''.join(random.choice (charset) for j in range (random.randrange (1,20)))
v = Vigenere (k)
for n in range (10):
s = ''.join(random.choice (charset) for j in range (100))
assert v.decrypt(v.encrypt(s)) == s
def test_shift (self):
charset = string.ascii_uppercase
for i in range (100):
k = random.choice (charset)
ok = ord (k) - ord ('A')
v = Vigenere (k * random.randrange (1, 100))
s = ''.join(random.choice (charset) for j in range (100))
se = v.encrypt (s)
assert len (se) == len (s)
for x, xe in zip (s, se):
d = (26 + ord (xe) - ord (x)) % 26
assert d == ok
sd = v.decrypt (s)
assert len (sd) == len (s)
for x, xd in zip (s, sd):
d = (26 + ord (x) - ord (xd)) % 26
assert d == ok
| [
"[email protected]"
] | |
ca66a5d5cbd1c66360f905b66c50a3a3a78e69fc | 07ec5a0b3ba5e70a9e0fb65172ea6b13ef4115b8 | /lib/python3.6/site-packages/pygments/lexers/ampl.py | 88d9a335a05d787080d511248d6fb7102cf51e27 | [] | no_license | cronos91/ML-exercise | 39c5cd7f94bb90c57450f9a85d40c2f014900ea4 | 3b7afeeb6a7c87384049a9b87cac1fe4c294e415 | refs/heads/master | 2021-05-09T22:02:55.131977 | 2017-12-14T13:50:44 | 2017-12-14T13:50:44 | 118,736,043 | 0 | 0 | null | 2018-01-24T08:30:23 | 2018-01-24T08:30:22 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:e80400498fbe866617b60933b2d859ca644ce1bcd0fd1419642e56bb84491be0
size 4120
| [
"[email protected]"
] | |
b84bc5c1ca3a1559be68059e22c3ad0797fb3b4e | 5e0737c75087c2bb631f760979b9afe13ba1e9b5 | /labs/Lab7_helper.py | 4e5018db80a65f8d2930abeb89e3a9c69a6f2fc4 | [] | no_license | anderson-github-classroom/csc-369-student | f54bf89ec84f58405b5ea39f13cd431becf8d796 | e8654e5656fdfc5e5c81022a6e1e0bca8ab97e04 | refs/heads/main | 2023-03-21T14:50:04.475831 | 2021-03-09T17:08:49 | 2021-03-09T17:08:49 | 325,129,939 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,084 | py | from bson.objectid import ObjectId
from bson.code import Code
def exercise_1(col):
result = None
# Your solution here
return result
def exercise_2(col):
result = None
# Your solution here
return result
def process_exercise_3(result):
process_result = {}
for record in result:
process_result[record['_id']['state']] = record['sum']/record['count']
return process_result
def exercise_3(col,date1,date2):
result = None
# partial solution
# Your solution here
return result
def process_exercise_4(result):
process_result = {}
for record in result:
state,identifier = record['_id'].split(": ")
value = record['value']
if state not in process_result:
process_result[state] = 1.
if identifier == "sum":
process_result[state] *= value
elif identifier == "count":
process_result[state] *= 1/value
return process_result
def exercise_4(col,date1,date2):
result = None
# partial solution
# Your solution here
return result
| [
"[email protected]"
] | |
3735ecb70313723eec2dcb5a74478775404b1862 | d6e65aa23ff8b2344dacac93fe00fcfcd64cc414 | /ac_kth_excluded.py | 0562543eaa475be4c06d1338eda33ac95cdd5e8e | [] | no_license | diwadd/sport | c4b0ec3547cde882c549fa7b89e0132fdaf0c8fb | 220dfaf1329b4feea5b5ca490ffc17ef7fe76cae | refs/heads/master | 2023-05-29T13:17:23.516230 | 2023-05-20T22:08:28 | 2023-05-20T22:08:28 | 223,636,723 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,234 | py | import bisect
nq = input().split(" ")
n = int(nq[0])
q = int(nq[1])
a_vec = input().split(" ")
a_vec = [int(a) for a in a_vec]
ranges_list = []
if a_vec[0] != 1:
ranges_list.append([0, a_vec[0]])
for i in range(1, len(a_vec)):
if a_vec[i] - a_vec[i-1] == 1:
continue
else:
ranges_list.append([a_vec[i-1], a_vec[i]])
if a_vec[-1] != 10*18:
ranges_list.append([a_vec[-1], 10**19])
numbers = [0 for _ in range(len(ranges_list))]
for i in range(len(ranges_list)):
numbers[i] = ranges_list[i][1] - ranges_list[i][0] - 1
prefix_sum = [0 for _ in range(len(numbers))]
prefix_sum[0] = numbers[0]
for i in range(1, len(numbers)):
prefix_sum[i] = prefix_sum[i-1] + numbers[i]
for _ in range(q):
k = int(input())
pos = bisect.bisect_left(prefix_sum, k)
res = None
if pos == len(ranges_list) - 1:
if pos - 1 >= 0:
res = ranges_list[pos][0] + k - prefix_sum[pos-1]
else:
res = ranges_list[pos][0] + k
elif pos == 0:
res = ranges_list[pos][0] + k
else:
if pos - 1 >= 0:
res = ranges_list[pos][0] + k - prefix_sum[pos-1]
else:
res = ranges_list[pos][0] + k
print(f"{res}") | [
"[email protected]"
] | |
232fb2bc3b437ecc04e52293372acc262f8fc569 | 58f095f52d58afa9e8041c69fa903c5a9e4fa424 | /examples/example10.py | d1559702fb592fcbbb38d226465818239f4e3b58 | [
"BSD-3-Clause"
] | permissive | cdeil/mystic | e41b397e9113aee1843bc78b5b4ca30bd0168114 | bb30994987f36168b8f09431cb9c3823afd892cd | refs/heads/master | 2020-12-25T23:18:52.086894 | 2014-08-13T14:36:09 | 2014-08-13T14:36:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,013 | py | #!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 1997-2014 California Institute of Technology.
# License: 3-clause BSD. The full license text is available at:
# - http://trac.mystic.cacr.caltech.edu/project/mystic/browser/mystic/LICENSE
"""
Example:
- Solve 8th-order Chebyshev polynomial coefficients with DE.
- Plot (x2) of convergence to Chebyshev polynomial.
- Monitor (x2) Chi-Squared for Chebyshev polynomial.
Demonstrates:
- standard models
- expanded solver interface
- built-in random initial guess
- customized monitors and termination conditions
- customized DE mutation strategies
- use of solver members to retrieve results information
"""
# Differential Evolution solver
from mystic.solvers import DifferentialEvolutionSolver2
# Chebyshev polynomial and cost function
from mystic.models.poly import chebyshev8, chebyshev8cost
from mystic.models.poly import chebyshev8coeffs
# tools
from mystic.termination import VTR
from mystic.strategy import Best1Exp
from mystic.monitors import VerboseMonitor, Monitor
from mystic.tools import getch, random_seed
from mystic.math import poly1d
import pylab
pylab.ion()
# draw the plot
def plot_frame(label=None):
pylab.close()
pylab.title("8th-order Chebyshev coefficient convergence")
pylab.xlabel("Differential Evolution %s" % label)
pylab.ylabel("Chi-Squared")
pylab.draw()
return
# plot the polynomial trajectories
def plot_params(monitor):
x = range(len(monitor.y))
pylab.plot(x,monitor.y,'b-')
pylab.axis([1,0.5*x[-1],0,monitor.y[1]],'k-')
pylab.draw()
return
if __name__ == '__main__':
print "Differential Evolution"
print "======================"
# set range for random initial guess
ndim = 9
x0 = [(-100,100)]*ndim
random_seed(123)
# configure monitors
stepmon = VerboseMonitor(50)
evalmon = Monitor()
# use DE to solve 8th-order Chebyshev coefficients
npop = 10*ndim
solver = DifferentialEvolutionSolver2(ndim,npop)
solver.SetRandomInitialPoints(min=[-100]*ndim, max=[100]*ndim)
solver.SetEvaluationLimits(generations=999)
solver.SetEvaluationMonitor(evalmon)
solver.SetGenerationMonitor(stepmon)
solver.enable_signal_handler()
solver.Solve(chebyshev8cost, termination=VTR(0.01), strategy=Best1Exp, \
CrossProbability=1.0, ScalingFactor=0.9)
solution = solver.bestSolution
# get solved coefficients and Chi-Squared (from solver members)
iterations = solver.generations
cost = solver.bestEnergy
print "Generation %d has best Chi-Squared: %f" % (iterations, cost)
print "Solved Coefficients:\n %s\n" % poly1d(solver.bestSolution)
# plot convergence of coefficients per iteration
plot_frame('iterations')
plot_params(stepmon)
getch()
# plot convergence of coefficients per function call
plot_frame('function calls')
plot_params(evalmon)
getch()
# end of file
| [
"mmckerns@968178ea-60bd-409e-af13-df8a517b6005"
] | mmckerns@968178ea-60bd-409e-af13-df8a517b6005 |
250c99544b764054cbb51329107670b550aac94e | e4aab0a71dc5c047d8b1576380b16364e03e7c0d | /backup.py | 8c72d7628780821364635e912abc49be03239e3f | [
"Apache-2.0"
] | permissive | Joecastra/Watcher3 | 8ca66c44846030f0eb771d9d6ddeb9c37f637a4e | ce25d475f83ed36d6772f0cc35ef020d5e47c94b | refs/heads/master | 2021-01-19T11:05:55.454351 | 2017-04-10T20:17:24 | 2017-04-10T20:17:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,478 | py | import argparse
import os
import shutil
import sys
import zipfile
tmpdir = 'backup_tmp'
posterpath = os.path.join('static', 'images', 'posters')
def backup(require_confirm=True):
# check for files and paths
if not os.path.isfile('watcher.sqlite'):
if require_confirm is True:
if input('Database watcher.sqlite not found. Continue? (y/N): ').lower() != 'y':
return
database = False
else:
database = True
if not os.path.isfile('config.cfg'):
if require_confirm is True:
if input('Config config.cfg not found. Continue? (y/N): ').lower() != 'y':
return
config = False
else:
config = True
if not os.path.isdir(posterpath):
if require_confirm is True:
if input('Config config.cfg not found. Continue? (y/N): ').lower() != 'y':
return
posters = False
else:
posters = True
# make temp dir
if os.path.isdir(tmpdir):
print('Old temporary directory found. Removing.')
shutil.rmtree(tmpdir)
print('Creating temporary backup directory.')
os.mkdir('backup_tmp')
if database:
print('Copying database.')
shutil.copy2('watcher.sqlite', tmpdir)
if config:
print('Copying config.')
shutil.copy2('config.cfg', tmpdir)
if posters:
print('Copying posters.')
dst = os.path.join(tmpdir, 'posters/')
os.mkdir(dst)
for file in os.listdir(posterpath):
src = os.path.join(posterpath, file)
shutil.copy2(src, dst)
# create backup zip
print('Creating watcher.zip')
shutil.make_archive('watcher', 'zip', tmpdir)
print('Removing temporary backup directory.')
shutil.rmtree(tmpdir)
print('**############################################################**')
print('**##################### Backup finished ######################**')
print('**################# Zip backup: watcher.zip ##################**')
print('**############################################################**')
return
def restore(require_confirm=True):
cwd = os.getcwd()
if not os.path.isfile('watcher.zip'):
print('watcher.zip not found. Place watcher.zip in same directory as backup script.')
return
if require_confirm is True:
ans = input('Restoring backup. This will overwrite existing '
'database, config, and posters. Continue? (y/N): ')
if ans.lower() != 'y':
return
# make temp dir
if os.path.isdir(tmpdir):
print('Old temporary directory found. Removing.')
shutil.rmtree(tmpdir)
print('Creating temporary extraction directory.')
os.mkdir('backup_tmp')
print('Extracting zip.')
zipf = zipfile.ZipFile('watcher.zip')
zipf.extractall(tmpdir)
files = os.listdir(tmpdir)
if 'watcher.sqlite' in files:
print('Restoring database.')
src = os.path.join(tmpdir, 'watcher.sqlite')
if os.path.isfile('watcher.sqlite'):
os.remove('watcher.sqlite')
shutil.copy(src, cwd)
if 'config.cfg' in files:
print('Restoring config.')
src = os.path.join(tmpdir, 'config.cfg')
if os.path.isfile('config.cfg'):
os.remove('config.cfg')
shutil.copy(src, cwd)
if 'posters' in files:
print('Restoring posters.')
tmp_posters = os.path.join(tmpdir, 'posters')
if not os.path.isdir(tmp_posters):
print('Error restoring posters. Not a dir.')
# remove existing posters folder and contents
if os.path.isdir(posterpath):
shutil.rmtree(posterpath)
# make new poster dir
os.mkdir(posterpath)
for poster in os.listdir(tmp_posters):
src = os.path.join(tmp_posters, poster)
shutil.copy2(src, posterpath)
print('Removing temporary directory.')
shutil.rmtree(tmpdir)
print('**############################################################**')
print('**##################### Backup finished ######################**')
print('**################# Zip backup: watcher.zip ##################**')
print('**############################################################**')
return
if __name__ == '__main__':
print('**############################################################**')
print('**############### Watcher backup/restore tool ################**')
print('** Confirm that Watcher is not running while restoring backup **')
print('**############################################################**')
os.chdir(os.path.dirname(os.path.realpath(__file__)))
cwd = os.getcwd()
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument('-b', '--backup', help='Back up to watcher.zip.', action="store_true")
group.add_argument('-r', '--restore', help='Restore from watcher.zip.', action="store_true")
group.add_argument('-y', '--confirm', help='Ignore warnings and answer Y to prompts.', action="store_true")
args = parser.parse_args()
if args.confirm:
require_confirm = False
else:
require_confirm = True
if args.backup:
backup(require_confirm)
sys.exit(0)
elif args.restore:
restore(require_confirm)
sys.exit(0)
else:
print('Invalid arguments.')
sys.exit(0)
| [
"[email protected]"
] | |
a43cfa67569d76d94811a81eaf8ca5a7c3499126 | cce63dc8bf66718746019c18df6355cabe34471a | /site_scons/ackward/constructor.py | 9ff18482422125bf7a2ab234594f46856c5f0b1b | [
"MIT"
] | permissive | abingham/ackward | 1592495c31fff5812b484719d93a0c140d26c127 | f1a45293de570f4b4429d9eaeb3f6c4da7d245bf | refs/heads/master | 2016-08-12T08:28:07.824253 | 2012-04-29T13:24:50 | 2012-04-29T13:24:50 | 49,885,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,151 | py | from .element import SigTemplateElement
from .include import ImplInclude
from .trace import trace
header_template = '${class_name}($header_signature);'
impl_template = '''
${class_name}::${class_name}($impl_signature) try :
core::Object (
${class_name}::cls()($parameters) )
$constructor_initializers
{
}
TRANSLATE_PYTHON_EXCEPTION()
'''
class Constructor(SigTemplateElement):
'''A template for class constructors.
'''
@trace
def __init__(self,
signature=[],
parent=None,
doc=None):
'''
Args:
* cls: The class to which this contructor belongs.
* signature: A sequence of parameter descriptions.
'''
SigTemplateElement.__init__(
self,
open_templates={
'header': header_template,
'impl': impl_template,
},
symbols = {
'signature' : signature,
},
parent=parent,
doc=doc)
self.add_child(
ImplInclude(
('ackward', 'core', 'ExceptionTranslation.hpp')))
| [
"[email protected]"
] | |
74c5b17fcf4cbff3689ddd9ddff7b7894f1efbfe | 395cabaa64a3a823a74e0dc52dd801cb7846d6df | /fluids/two_phase_voidage.pyi | dbe07a821817c30716c2c99cbc6ad42fb5565190 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | CalebBell/fluids | 883e28aae944e0f55cdc4e759edf9868714afebb | 837acc99075f65060dfab4e209a72dff4c4fe479 | refs/heads/master | 2023-09-01T07:53:27.386513 | 2023-08-19T23:49:01 | 2023-08-19T23:49:01 | 48,924,523 | 298 | 85 | MIT | 2023-06-06T05:11:12 | 2016-01-02T21:31:10 | Python | UTF-8 | Python | false | false | 5,192 | pyi | # DO NOT EDIT - AUTOMATICALLY GENERATED BY tests/make_test_stubs.py!
from __future__ import annotations
from typing import List
from typing import (
List,
Optional,
)
def Armand(x: float, rhol: float, rhog: float) -> float: ...
def Baroczy(x: float, rhol: float, rhog: float, mul: float, mug: float) -> float: ...
def Beattie_Whalley(x: float, mul: float, mug: float, rhol: float, rhog: float) -> float: ...
def Chisholm_Armand(x: float, rhol: float, rhog: float) -> float: ...
def Chisholm_voidage(x: float, rhol: float, rhog: float) -> float: ...
def Cicchitti(x: float, mul: float, mug: float) -> float: ...
def Dix(x: float, rhol: float, rhog: float, sigma: float, m: float, D: float, g: float = ...) -> float: ...
def Domanski_Didion(x: float, rhol: float, rhog: float, mul: float, mug: float) -> float: ...
def Duckler(x: float, mul: float, mug: float, rhol: float, rhog: float) -> float: ...
def Fauske(x: float, rhol: float, rhog: float) -> float: ...
def Fourar_Bories(x: float, mul: float, mug: float, rhol: float, rhog: float) -> float: ...
def Graham(
x: float,
rhol: float,
rhog: float,
mul: float,
mug: float,
m: float,
D: float,
g: float = ...
) -> float: ...
def Gregory_Scott(x: float, rhol: float, rhog: float) -> float: ...
def Guzhov(x: float, rhol: float, rhog: float, m: float, D: float) -> float: ...
def Harms(
x: float,
rhol: float,
rhog: float,
mul: float,
mug: float,
m: float,
D: float
) -> float: ...
def Huq_Loth(x: float, rhol: float, rhog: float) -> float: ...
def Kawahara(x: float, rhol: float, rhog: float, D: float) -> float: ...
def Kopte_Newell_Chato(
x: float,
rhol: float,
rhog: float,
mul: float,
mug: float,
m: float,
D: float,
g: float = ...
) -> float: ...
def Lin_Kwok(x: float, mul: float, mug: float) -> float: ...
def Lockhart_Martinelli_Xtt(
x: float,
rhol: float,
rhog: float,
mul: float,
mug: float,
pow_x: float = ...,
pow_rho: float = ...,
pow_mu: float = ...,
n: Optional[float] = ...
) -> float: ...
def McAdams(x: float, mul: float, mug: float) -> float: ...
def Nicklin_Wilkes_Davidson(x: float, rhol: float, rhog: float, m: float, D: float, g: float = ...) -> float: ...
def Nishino_Yamazaki(x: float, rhol: float, rhog: float) -> float: ...
def Rouhani_1(x: float, rhol: float, rhog: float, sigma: float, m: float, D: float, g: float = ...) -> float: ...
def Rouhani_2(x: float, rhol: float, rhog: float, sigma: float, m: float, D: float, g: float = ...) -> float: ...
def Smith(x: float, rhol: float, rhog: float) -> float: ...
def Steiner(x: float, rhol: float, rhog: float, sigma: float, m: float, D: float, g: float = ...) -> float: ...
def Sun_Duffey_Peng(
x: float,
rhol: float,
rhog: float,
sigma: float,
m: float,
D: float,
P: float,
Pc: float,
g: float = ...
) -> float: ...
def Tandon_Varma_Gupta(
x: float,
rhol: float,
rhog: float,
mul: float,
mug: float,
m: float,
D: float
) -> float: ...
def Thom(x: float, rhol: float, rhog: float, mul: float, mug: float) -> float: ...
def Turner_Wallis(x: float, rhol: float, rhog: float, mul: float, mug: float) -> float: ...
def Woldesemayat_Ghajar(
x: float,
rhol: float,
rhog: float,
sigma: float,
m: float,
D: float,
P: float,
angle: float = ...,
g: float = ...
) -> float: ...
def Xu_Fang_voidage(x: float, rhol: float, rhog: float, m: float, D: float, g: float = ...) -> float: ...
def Yashar(
x: float,
rhol: float,
rhog: float,
mul: float,
mug: float,
m: float,
D: float,
g: float = ...
) -> float: ...
def Zivi(x: float, rhol: float, rhog: float) -> float: ...
def density_two_phase(alpha: float, rhol: float, rhog: float) -> float: ...
def gas_liquid_viscosity(
x: float,
mul: float,
mug: float,
rhol: Optional[float] = ...,
rhog: Optional[float] = ...,
Method: Optional[str] = ...
) -> float: ...
def gas_liquid_viscosity_methods(
rhol: Optional[float] = ...,
rhog: Optional[float] = ...,
check_ranges: bool = ...
) -> List[str]: ...
def homogeneous(x: float, rhol: float, rhog: float) -> float: ...
def liquid_gas_voidage(
x: float,
rhol: float,
rhog: float,
D: Optional[float] = ...,
m: Optional[float] = ...,
mul: Optional[float] = ...,
mug: Optional[float] = ...,
sigma: Optional[float] = ...,
P: Optional[float] = ...,
Pc: Optional[float] = ...,
angle: int = ...,
g: float = ...,
Method: Optional[str] = ...
) -> float: ...
def liquid_gas_voidage_methods(
x: float,
rhol: float,
rhog: float,
D: Optional[float] = ...,
m: Optional[float] = ...,
mul: Optional[float] = ...,
mug: Optional[float] = ...,
sigma: Optional[float] = ...,
P: Optional[float] = ...,
Pc: Optional[float] = ...,
angle: float = ...,
g: float = ...,
check_ranges: bool = ...
) -> List[str]: ...
def two_phase_voidage_experimental(rho_lg: float, rhol: float, rhog: float) -> float: ...
__all__: List[str] | [
"[email protected]"
] | |
b358377477d8140adb098edf7df754b378f8c110 | 95133906bd7b95359080386ea7570afd26364882 | /publishconf.py | 2f4900957f007ee04033474b0248a67fbc928a37 | [] | no_license | jzuhone/jzuhone.com | 94325e3afab4ce75d7b7a8268645597c6a4c80a8 | 3e3c3774701ed6a2251e71dc11a7a5e7596bdc92 | refs/heads/master | 2020-05-17T19:13:09.102206 | 2016-08-15T14:42:31 | 2016-08-15T14:42:31 | 14,403,842 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 548 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
# This file is only used if you use `make publish` or
# explicitly specify it as your config file.
import os
import sys
sys.path.append(os.curdir)
from pelicanconf import *
SITEURL = 'http://hea-www.cfa.harvard.edu/~jzuhone/'
RELATIVE_URLS = False
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = 'feeds/%s.atom.xml'
DELETE_OUTPUT_DIRECTORY = True
# Following items are often useful when publishing
#DISQUS_SITENAME = ""
#GOOGLE_ANALYTICS = ""
| [
"[email protected]"
] | |
d9d9dbcd006304cb5a1f7a453809467d03b37be1 | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/actrl/rulehitaghist1qtr.py | 005543e4e73f4c84e03bd58f42e6196a3a87d08e | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 30,420 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RuleHitAgHist1qtr(Mo):
"""
A class that represents historical aggregated statistics for rule hits in a 1 quarter sampling interval. This class updates every day.
"""
meta = StatsClassMeta("cobra.model.actrl.RuleHitAgHist1qtr", "rule hits")
counter = CounterMeta("revPkts", CounterCategory.COUNTER, "packets", "reverse hit packets")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "revPktsCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "revPktsPer"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "revPktsSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "revPktsThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "revPktsTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "revPktsRate"
meta._counters.append(counter)
counter = CounterMeta("pkts", CounterCategory.COUNTER, "packets", "hit packets")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "pktsCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "pktsPer"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "pktsSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "pktsThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "pktsTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "pktsRate"
meta._counters.append(counter)
counter = CounterMeta("egrPkts", CounterCategory.COUNTER, "packets", "egress hit packets")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "egrPktsCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "egrPktsPer"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "egrPktsSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "egrPktsThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "egrPktsTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "egrPktsRate"
meta._counters.append(counter)
counter = CounterMeta("ingrPkts", CounterCategory.COUNTER, "packets", "ingress hit packets")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "ingrPktsCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "ingrPktsPer"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "ingrPktsSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "ingrPktsThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "ingrPktsTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "ingrPktsRate"
meta._counters.append(counter)
meta.moClassName = "actrlRuleHitAgHist1qtr"
meta.rnFormat = "HDactrlRuleHitAg1qtr-%(index)s"
meta.category = MoCategory.STATS_HISTORY
meta.label = "historical aggregated rule hits stats in 1 quarter"
meta.writeAccessMask = 0x601
meta.readAccessMask = 0x601
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.fv.RInfoHolder")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Hist")
meta.superClasses.add("cobra.model.actrl.RuleHitAgHist")
meta.rnPrefixes = [
('HDactrlRuleHitAg1qtr-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "egrPktsCum", "egrPktsCum", 7483, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "egress hit packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("egrPktsCum", prop)
prop = PropMeta("str", "egrPktsPer", "egrPktsPer", 7484, PropCategory.IMPLICIT_PERIODIC)
prop.label = "egress hit packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("egrPktsPer", prop)
prop = PropMeta("str", "egrPktsRate", "egrPktsRate", 7488, PropCategory.IMPLICIT_RATE)
prop.label = "egress hit packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("egrPktsRate", prop)
prop = PropMeta("str", "egrPktsSpct", "egrPktsSpct", 7485, PropCategory.IMPLICIT_SUSPECT)
prop.label = "egress hit packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("egrPktsSpct", prop)
prop = PropMeta("str", "egrPktsThr", "egrPktsThr", 7486, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "egress hit packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("egrPktsThr", prop)
prop = PropMeta("str", "egrPktsTr", "egrPktsTr", 7487, PropCategory.IMPLICIT_TREND)
prop.label = "egress hit packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("egrPktsTr", prop)
prop = PropMeta("str", "index", "index", 5819, PropCategory.REGULAR)
prop.label = "History Index"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("index", prop)
prop = PropMeta("str", "ingrPktsCum", "ingrPktsCum", 7544, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "ingress hit packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("ingrPktsCum", prop)
prop = PropMeta("str", "ingrPktsPer", "ingrPktsPer", 7545, PropCategory.IMPLICIT_PERIODIC)
prop.label = "ingress hit packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("ingrPktsPer", prop)
prop = PropMeta("str", "ingrPktsRate", "ingrPktsRate", 7549, PropCategory.IMPLICIT_RATE)
prop.label = "ingress hit packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("ingrPktsRate", prop)
prop = PropMeta("str", "ingrPktsSpct", "ingrPktsSpct", 7546, PropCategory.IMPLICIT_SUSPECT)
prop.label = "ingress hit packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("ingrPktsSpct", prop)
prop = PropMeta("str", "ingrPktsThr", "ingrPktsThr", 7547, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "ingress hit packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("ingrPktsThr", prop)
prop = PropMeta("str", "ingrPktsTr", "ingrPktsTr", 7548, PropCategory.IMPLICIT_TREND)
prop.label = "ingress hit packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("ingrPktsTr", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "pktsCum", "pktsCum", 24188, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "hit packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsCum", prop)
prop = PropMeta("str", "pktsPer", "pktsPer", 24189, PropCategory.IMPLICIT_PERIODIC)
prop.label = "hit packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsPer", prop)
prop = PropMeta("str", "pktsRate", "pktsRate", 24193, PropCategory.IMPLICIT_RATE)
prop.label = "hit packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRate", prop)
prop = PropMeta("str", "pktsSpct", "pktsSpct", 24190, PropCategory.IMPLICIT_SUSPECT)
prop.label = "hit packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsSpct", prop)
prop = PropMeta("str", "pktsThr", "pktsThr", 24191, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "hit packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("pktsThr", prop)
prop = PropMeta("str", "pktsTr", "pktsTr", 24192, PropCategory.IMPLICIT_TREND)
prop.label = "hit packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsTr", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "revPktsCum", "revPktsCum", 24243, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "reverse hit packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("revPktsCum", prop)
prop = PropMeta("str", "revPktsPer", "revPktsPer", 24244, PropCategory.IMPLICIT_PERIODIC)
prop.label = "reverse hit packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("revPktsPer", prop)
prop = PropMeta("str", "revPktsRate", "revPktsRate", 24248, PropCategory.IMPLICIT_RATE)
prop.label = "reverse hit packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("revPktsRate", prop)
prop = PropMeta("str", "revPktsSpct", "revPktsSpct", 24245, PropCategory.IMPLICIT_SUSPECT)
prop.label = "reverse hit packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("revPktsSpct", prop)
prop = PropMeta("str", "revPktsThr", "revPktsThr", 24246, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "reverse hit packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("revPktsThr", prop)
prop = PropMeta("str", "revPktsTr", "revPktsTr", 24247, PropCategory.IMPLICIT_TREND)
prop.label = "reverse hit packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("revPktsTr", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "index"))
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("ATgToGraphInst", "Graph Instances", "cobra.model.vns.GraphInst"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AEPgToVirtualMachines", "Virtual Machines", "cobra.model.comp.Vm"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("MgmtInstPToNode", "External Management Network EPG to Node", "cobra.model.fv.Locale"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("OoBToNode", "Out-of-band Management EPG to Node", "cobra.model.fv.Locale"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("InBToNode", "Node", "cobra.model.fv.Locale"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("EPgToNwIf", "Interface", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CtxToNwIf", "Private Network to Interface", "cobra.model.nw.If"))
def __init__(self, parentMoOrDn, index, markDirty=True, **creationProps):
namingVals = [index]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
01950fd457b021ccfdfbf35c8e3b03ab29f0d828 | 010279e2ba272d09e9d2c4e903722e5faba2cf7a | /contrib/python/ipywidgets/py2/ipywidgets/widgets/widget_layout.py | 0b2d202761fa230489f593b60254cca4fb71ab8d | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | catboost/catboost | 854c1a1f439a96f1ae6b48e16644be20aa04dba2 | f5042e35b945aded77b23470ead62d7eacefde92 | refs/heads/master | 2023-09-01T12:14:14.174108 | 2023-09-01T10:01:01 | 2023-09-01T10:22:12 | 97,556,265 | 8,012 | 1,425 | Apache-2.0 | 2023-09-11T03:32:32 | 2017-07-18T05:29:04 | Python | UTF-8 | Python | false | false | 6,462 | py | # Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
"""Contains the Layout class"""
from traitlets import Unicode, Instance, CaselessStrEnum, validate
from .widget import Widget, register
from .._version import __jupyter_widgets_base_version__
CSS_PROPERTIES=['inherit', 'initial', 'unset']
@register
class Layout(Widget):
"""Layout specification
Defines a layout that can be expressed using CSS. Supports a subset of
https://developer.mozilla.org/en-US/docs/Web/CSS/Reference
When a property is also accessible via a shorthand property, we only
expose the shorthand.
For example:
- ``flex-grow``, ``flex-shrink`` and ``flex-basis`` are bound to ``flex``.
- ``flex-wrap`` and ``flex-direction`` are bound to ``flex-flow``.
- ``margin-[top/bottom/left/right]`` values are bound to ``margin``, etc.
"""
_view_name = Unicode('LayoutView').tag(sync=True)
_view_module = Unicode('@jupyter-widgets/base').tag(sync=True)
_view_module_version = Unicode(__jupyter_widgets_base_version__).tag(sync=True)
_model_name = Unicode('LayoutModel').tag(sync=True)
# Keys
align_content = CaselessStrEnum(['flex-start', 'flex-end', 'center', 'space-between',
'space-around', 'space-evenly', 'stretch'] + CSS_PROPERTIES, allow_none=True, help="The align-content CSS attribute.").tag(sync=True)
align_items = CaselessStrEnum(['flex-start', 'flex-end', 'center',
'baseline', 'stretch'] + CSS_PROPERTIES, allow_none=True, help="The align-items CSS attribute.").tag(sync=True)
align_self = CaselessStrEnum(['auto', 'flex-start', 'flex-end',
'center', 'baseline', 'stretch'] + CSS_PROPERTIES, allow_none=True, help="The align-self CSS attribute.").tag(sync=True)
bottom = Unicode(None, allow_none=True, help="The bottom CSS attribute.").tag(sync=True)
border = Unicode(None, allow_none=True, help="The border CSS attribute.").tag(sync=True)
display = Unicode(None, allow_none=True, help="The display CSS attribute.").tag(sync=True)
flex = Unicode(None, allow_none=True, help="The flex CSS attribute.").tag(sync=True)
flex_flow = Unicode(None, allow_none=True, help="The flex-flow CSS attribute.").tag(sync=True)
height = Unicode(None, allow_none=True, help="The height CSS attribute.").tag(sync=True)
justify_content = CaselessStrEnum(['flex-start', 'flex-end', 'center',
'space-between', 'space-around'] + CSS_PROPERTIES, allow_none=True, help="The justify-content CSS attribute.").tag(sync=True)
justify_items = CaselessStrEnum(['flex-start', 'flex-end', 'center'] + CSS_PROPERTIES,
allow_none=True, help="The justify-items CSS attribute.").tag(sync=True)
left = Unicode(None, allow_none=True, help="The left CSS attribute.").tag(sync=True)
margin = Unicode(None, allow_none=True, help="The margin CSS attribute.").tag(sync=True)
max_height = Unicode(None, allow_none=True, help="The max-height CSS attribute.").tag(sync=True)
max_width = Unicode(None, allow_none=True, help="The max-width CSS attribute.").tag(sync=True)
min_height = Unicode(None, allow_none=True, help="The min-height CSS attribute.").tag(sync=True)
min_width = Unicode(None, allow_none=True, help="The min-width CSS attribute.").tag(sync=True)
overflow = Unicode(None, allow_none=True, help="The overflow CSS attribute.").tag(sync=True)
overflow_x = CaselessStrEnum(['visible', 'hidden', 'scroll', 'auto'] + CSS_PROPERTIES, allow_none=True, help="The overflow-x CSS attribute (deprecated).").tag(sync=True)
overflow_y = CaselessStrEnum(['visible', 'hidden', 'scroll', 'auto'] + CSS_PROPERTIES, allow_none=True, help="The overflow-y CSS attribute (deprecated).").tag(sync=True)
order = Unicode(None, allow_none=True, help="The order CSS attribute.").tag(sync=True)
padding = Unicode(None, allow_none=True, help="The padding CSS attribute.").tag(sync=True)
right = Unicode(None, allow_none=True, help="The right CSS attribute.").tag(sync=True)
top = Unicode(None, allow_none=True, help="The top CSS attribute.").tag(sync=True)
visibility = CaselessStrEnum(['visible', 'hidden']+CSS_PROPERTIES, allow_none=True, help="The visibility CSS attribute.").tag(sync=True)
width = Unicode(None, allow_none=True, help="The width CSS attribute.").tag(sync=True)
object_fit = CaselessStrEnum(['contain', 'cover', 'fill', 'scale-down', 'none'], allow_none=True, help="The object-fit CSS attribute.").tag(sync=True)
object_position = Unicode(None, allow_none=True, help="The object-position CSS attribute.").tag(sync=True)
grid_auto_columns = Unicode(None, allow_none=True, help="The grid-auto-columns CSS attribute.").tag(sync=True)
grid_auto_flow = CaselessStrEnum(['column','row','row dense','column dense']+ CSS_PROPERTIES, allow_none=True, help="The grid-auto-flow CSS attribute.").tag(sync=True)
grid_auto_rows = Unicode(None, allow_none=True, help="The grid-auto-rows CSS attribute.").tag(sync=True)
grid_gap = Unicode(None, allow_none=True, help="The grid-gap CSS attribute.").tag(sync=True)
grid_template_rows = Unicode(None, allow_none=True, help="The grid-template-rows CSS attribute.").tag(sync=True)
grid_template_columns = Unicode(None, allow_none=True, help="The grid-template-columns CSS attribute.").tag(sync=True)
grid_template_areas = Unicode(None, allow_none=True, help="The grid-template-areas CSS attribute.").tag(sync=True)
grid_row = Unicode(None, allow_none=True, help="The grid-row CSS attribute.").tag(sync=True)
grid_column = Unicode(None, allow_none=True, help="The grid-column CSS attribute.").tag(sync=True)
grid_area = Unicode(None, allow_none=True, help="The grid-area CSS attribute.").tag(sync=True)
@validate('overflow_x', 'overflow_y')
def _validate_overflows(self, proposal):
if proposal.value is not None:
import warnings
warnings.warn("Layout properties overflow_x and overflow_y have been deprecated and will be dropped in a future release. Please use the overflow shorthand property instead", DeprecationWarning)
return proposal.value
class LayoutTraitType(Instance):
klass = Layout
def validate(self, obj, value):
if isinstance(value, dict):
return super(LayoutTraitType, self).validate(obj, self.klass(**value))
else:
return super(LayoutTraitType, self).validate(obj, value)
| [
"[email protected]"
] | |
68b76bbb367a0b66083de4dbb90308c8e0069ab5 | 4f4f7b28b4c50c8df4381f8a9e68ae515d747424 | /examples/ndviz/0_basic_1d_signal.py | 2ebc08601ab124c106cad1d172e557b81922ff85 | [
"BSD-3-Clause"
] | permissive | christian-oreilly/visbrain | 6cd82c22c33039f5adfac1112ceba016c5a75a32 | b5f480a16555a10b0032465699a0c371e2be31db | refs/heads/master | 2020-06-01T12:24:57.810735 | 2017-09-09T12:44:25 | 2017-09-09T12:44:25 | 94,073,149 | 0 | 0 | null | 2017-06-12T08:29:51 | 2017-06-12T08:29:51 | null | UTF-8 | Python | false | false | 2,692 | py | """
Plot a 1d signal
================
This example show how to display and control simple 1d signal.
.. image:: ../../picture/picndviz/ex_basic_signal.png
"""
import numpy as np
from visbrain import Ndviz
# Create an empty dictionary :
kw = {}
# Sampling frequency :
sf = 1024.
# Create a 10hz cardinal sinus :
time = np.arange(-1000.1, 1000.1) / 1024.
y = np.sinc(2 * 10 * time).astype(np.float32)
kw['sf'] = sf
# ===================================================================
# Nd-plot configuration :
# -----------------------
"""
The Nd-plot can be used to display a large number of signals. I this example,
we defined above a row signal. In that case, the Nd-plot is not really usefull
be we just illustrate some of the possible inputs.
"""
# ===================================================================
# Display the Nd-plot panel and display the grid :
kw['nd_visible'] = True
kw['nd_grid'] = True
# Add a title / xlabel / ylabel :
kw['nd_title'] = 'Press 0 to reset camera / <space> for playing / r to reset'
kw['nd_xlabel'] = 'Configure using the "Nd-plot" tab of quick settings panel'
kw['nd_ylabel'] = 'Display quick settings using CTRL + d'
# Use a dynamic color (across time) :
kw['nd_color'] = 'dyn_time'
kw['nd_cmap'] = 'Spectral_r'
# Set the linewidth :
kw['nd_lw'] = 2
# ===================================================================
# 1d-plot configuration :
# -----------------------
"""
The 1d-plot can be used to inspect signal by signal. The signal can be display
in several forms cad (press the shortcut in parenthesis):
- As a continuous line (l)
- As a markers cloud (m)
- As a histogram (h)
- As a spectrogram (s)
- As an image (i - not available in this exemple -)
"""
# ===================================================================
# Display the Nd-plot panel and display the grid :
kw['od_visible'] = True
kw['od_grid'] = True
kw['od_title'] = 'Press m (marker), h (histogram), s (spectrogram), l (line)'
kw['od_xlabel'] = 'Switch between different plotting types'
kw['od_ylabel'] = 'Configure using the "Inspect" tab of quick settings panel'
# Marker size :
kw['od_msize'] = 20
# Number of bins in the histogram :
kw['od_bins'] = 100
# Number of fft points / step / starting and ending frequency :
kw['od_nfft'] = 512.
kw['od_step'] = 10.
kw['od_fstart'] = 0.
kw['od_fend'] = 50
# The color dynamically change with the amplitude of the signal :
kw['od_cmap'] = 'viridis'
kw['od_color'] = 'dyn_minmax'
# Every values under 0 will be set to red :
kw['od_vmin'], kw['od_under'] = 0., '#ab4642'
# Every values over 0.9 will be set to gay :
kw['od_vmax'], kw['od_over'] = 0.9, 'gray'
# Linewidth :
kw['od_lw'] = 2
Ndviz(y, **kw).show()
| [
"[email protected]"
] | |
52ab19697e8c0bdd1412fc80b69662d2dd44def8 | ce9d475cebeaec9cf10c467c577cb05c3b431fad | /code/chapter_12_example_05.py | 66860a252dfe2b04999c9862d06c8c8188745f3b | [] | no_license | Sundarmax/two-scoops-of-django-2.0-code-examples | 9c8f98d145aaa5498bb558fc5125379cd39003e5 | a15b2d4c240e879c03d2facf8592a644e27eb348 | refs/heads/master | 2022-04-19T10:14:53.795688 | 2020-03-04T15:16:25 | 2020-03-04T15:16:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,183 | py | """
Using This Code Example
=========================
The code examples provided are provided by Daniel Greenfeld and Audrey Roy of
Two Scoops Press to help you reference Two Scoops of Django: Best Practices
for Django 1.11 for Django 2.0 projects. Code Samples follow PEP-0008, with exceptions made for the
purposes of improving book formatting. Example code is provided "as is", and
is not intended to be, and should not be considered or labeled as "tutorial
code".
Permissions
============
In general, you may use the code we've provided with this book in your
programs and documentation. You do not need to contact us for permission
unless you're reproducing a significant portion of the code or using it in
commercial distributions. Examples:
* Writing a program that uses several chunks of code from this course does
not require permission.
* Selling or distributing a digital package from material taken from this
book does require permission.
* Answering a question by citing this book and quoting example code does not
require permission.
* Incorporating a significant amount of example code from this book into your
product's documentation does require permission.
Attributions usually include the title, author, publisher and an ISBN. For
example, "Two Scoops of Django: Best Practices for Django 1.11, by Daniel
Roy Greenfeld and Audrey Roy Greenfeld. Copyright 2017 Two Scoops Press
(978-0-692-91572-1)."
If you feel your use of code examples falls outside fair use of the permission
given here, please contact us at [email protected].
"""
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import UpdateView
from .forms import TasterForm
from .models import Taster
class TasterUpdateView(LoginRequiredMixin, UpdateView):
model = Taster
form_class = TasterForm
success_url = '/someplace/'
def get_form_kwargs(self):
"""This method is what injects forms with keyword arguments."""
# grab the current set of form #kwargs
kwargs = super().get_form_kwargs()
# Update the kwargs with the user_id
kwargs['user'] = self.request.user
return kwargs
| [
"[email protected]"
] | |
2a23248df2d772216b33169abee1e6bddb4c1062 | 8637ef9b14db2d54199cc189ff1b500c2731a3d3 | /analyze/scripts/contours.py | f0acf978bb6d70e09788f32110abe1fdc0800cd3 | [] | no_license | Vayel/MPD | 76610b9380364154608aafc43c0aed433b2ccc16 | 80381367b4963ff0f3c3eeefbf648fd02b675b8d | refs/heads/master | 2016-09-10T01:35:42.222213 | 2014-07-03T08:49:25 | 2014-07-03T08:49:25 | 20,148,560 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,271 | py | # -*- coding: utf-8 -*-
# Python 2
# OpenCV required
import os
import sys
import numpy as np
import cv2
from cv_functions import loadImg
from global_functions import ensureDir
def main(path):
src = loadImg(path)
src = cv2.resize(src, (0,0), fx=0.7, fy=0.7)
gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 127, 255, 0)[1]
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contoursLen = len(contours)
plantsNumber = 0
colorStep = int(200.0/contoursLen)
PERIMETER_LIMIT = 30
LINE_WIDTH = 2
for i in range(contoursLen):
perimeter = cv2.arcLength(contours[i], True)
if perimeter > PERIMETER_LIMIT:
plantsNumber += 1
val = (i+1) * colorStep
cv2.drawContours(src, [contours[i]], -1, (val,val,val), LINE_WIDTH)
print "(" + str(val) + "," + str(val) + "," + str(val) + ") : " + str(perimeter)
print "\n" + str(plantsNumber) + " plants."
cv2.imshow("Contours", src)
cv2.waitKey()
cv2.destroyAllWindows()
def printUsage():
print """
USAGE:
python contours.py <img-path>
e.g.: python contours.py bar/foo.jpg
"""
if __name__ == "__main__":
if len(sys.argv) > 1:
src = sys.argv[1]
main(src)
else:
printUsage()
| [
"[email protected]"
] | |
f296b5c06d5efae221488145545599ef45b172bd | 200eea364c07a2ae5d2533ce66cd0b046ae929f4 | /gca-cat | 17c28e78b9824b53b59ab4ce4579fc9217a8bac9 | [
"BSD-3-Clause"
] | permissive | dalinhuang/GCA-Python | f024e7810f0ccc8010bf6726bddb1e8f47383ff2 | 135107cb98c697d20f929cf9db6358a7c403d685 | refs/heads/master | 2020-04-24T14:02:01.200668 | 2018-01-16T18:05:21 | 2018-01-16T18:05:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,066 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import codecs
import sys
from gca.core import Abstract
from collections import OrderedDict
class LastUpdatedOrderedDict(OrderedDict):
'Store items in the order the keys were last added'
def __setitem__(self, key, value):
if key in self:
del self[key]
OrderedDict.__setitem__(self, key, value)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='GCA Filter - filter list of abstract by files')
parser.add_argument('files', type=str, nargs='+')
args = parser.parse_args()
abstracts = LastUpdatedOrderedDict()
for f in args.files:
fd = codecs.open(f, 'r', encoding='utf-8') if f != '-' else sys.stdin
for a in Abstract.from_data(fd.read()):
abstracts[a.uuid] = a
fd.close()
abstracts = [a for a in abstracts.itervalues()]
data = Abstract.to_json(abstracts)
sys.stdout.write(data.encode('utf-8')) | [
"[email protected]"
] | ||
2d85b53a201f4797832bff154c5446bfa2f8676e | f3e1814436faac544cf9d56182b6658af257300a | /GOOGLE COMPETETIONS/codejam_Q_2_2021.py | c955dbe2b50bc045608df87b6c296a2553dfdf9a | [] | no_license | preetmodh/COMPETETIVE_CODING_QUESTIONS | 36961b8b75c9f34e127731eb4ffb5742e577e8a2 | c73afb87d197e30d801d628a9db261adfd701be9 | refs/heads/master | 2023-07-15T03:19:15.330633 | 2021-05-16T10:17:20 | 2021-05-16T10:17:20 | 279,030,727 | 2 | 1 | null | 2021-01-12T03:55:26 | 2020-07-12T09:18:57 | Python | UTF-8 | Python | false | false | 837 | py | def change(string):
for i in range(len(string)):
if string[i]=="?":
if i==0:
string[i]=string[i+1]
elif i==len(string)-1:
string[i]=string[len(string)-2]
else:
string[i]=string[i-1]
else:
continue
string=''.join(string)
return string
testcase=int(input())
for t in range(testcase):
a=input().split()
X=int(a[0])
Y=int(a[1])
string=a[2]
string=list(string)
ans=0
if len(string)==1:
print("Case #"+str(t+1)+": "+str(abs(ans)))
else:
string =change(string)
ans=X*string.count("CJ")
ans+=Y*string.count("JC")
print("Case #"+str(testcase+1)+": "+str(ans)) | [
"[email protected]"
] | |
7b40dc66da5a51767209659cb82ca97ea7bd57e6 | 08e76791377ef548e37982d2060b1f0a6dd89970 | /nuclear reacter.py | e1908dff710a0d2435f871f7b70effd59463b7d3 | [] | no_license | GuhanSGCIT/Trees-and-Graphs-problem | f975090b9118cf74c0c2efe271c010e9410fc0a7 | b1b0eec7c7e44f62fa0eff28a8379127f4a66f4e | refs/heads/master | 2023-02-12T21:43:28.362986 | 2021-01-18T16:34:48 | 2021-01-18T16:34:48 | 272,145,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,283 | py | """
Question:-
There are K nuclear reactor chambers labelled from 0 to K-1. Particles are bombarded onto chamber 0.The particles keep
collecting in the chamber 0. However if at any time, there are more than N particles in a chamber,a reaction will cause 1
particle to move to the immediate next chamber,and all the particles in the current chamber will be be destroyed and same
continues till no chamber has number of particles greater than N.
Given K,N and the total number of particles bombarded (A), find the final distribution of particles in the K chambers.
Particles are bombarded one at a time. After one particle is bombarded, the set of reactions, as described, take place.
After all reactions are over, the next particle is bombarded. If a particle is going out from the last chamber, it has
nowhere to go and is lost.
Input Desription:
The input will consist of one line containing three numbers A,N and K separated by spaces. A will be between
0 and 1000000000 inclusive. N will be between 0 and 100 inclusive. K will be between 1 and 100 inclusive.
All chambers start off with zero particles initially.
Output Desription:
Consists of K numbers on one line followed by a newline. The first number is the number of particles in chamber 0,
the second number is the number of particles in chamber 1 and so on.
Testases:
Input:
3 1 3
Output:
1 1.5 0.75
Explanation:
Total of 3 particles are bombarded. After particle 1 is bombarded, the chambers have particle distribution as "1 0 0".
After second particle is bombarded, number of particles in chamber 0 becomes 2 which is greater than 1. So, num of particles
in chamber 0 becomes 0 and in chamber 1 becomes 1. So now distribution is "0 1 0". After the 3rd particle is bombarded,
chamber 0 gets 1 particle and so distribution is "1 1 0" after all particles are bombarded one by one.t:
Input:
1 2 3
Output:
1 0.33 0.11
Input:
5 21 2
Output:
5 0.22
Input:
0 1 1
Output:
0
Input:
2 1 0
Output:
2 0.66
Solution:
"""
a ,n ,k = list(map(int,input().split()))
A = []
i = 0
while i < k and a != 0:
A.append(a % (n + 1))
#print(a % (n + 1))
a = a / (n + 1)
i = i + 1
while i < k:
A.append(0)
i = i + 1
print (' '.join(map(str,A)))
| [
"[email protected]"
] | |
6283a10e0d1bc367a0882b43b97ba5d9e23c2969 | 501615c82801733e69c7447ab9fd68d3883ed947 | /hotfix/.svn/pristine/62/6283a10e0d1bc367a0882b43b97ba5d9e23c2969.svn-base | 434ae5e6bf5a96cac5d9effc5c073fc8842df836 | [] | no_license | az0ne/python | b2e1cc1e925d1fcdb269e7dd4c48e24665deeeee | aec5d23bb412f7dfca374fb5c5b9988c1b817347 | refs/heads/master | 2021-07-18T02:08:46.314972 | 2017-10-27T06:23:36 | 2017-10-27T06:23:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 808 | # -*- coding: utf-8 -*-
"""
@version: 2016/6/12
@author: Jackie
@contact: [email protected]
@file: urls.py
@time: 2016/6/12 16:13
@note: ??
"""
from django.conf.urls import patterns, url, include
urlpatterns = patterns(
'',
# add by jackie 20160612 488免费试学引导
# 课程大纲引导页
url(r'^(?P<course_short_name>.*?)/syllabus/$',
"mz_lps3_free.student.views.syllabus_index", name='syllabus'),
# 课程预约页
url(r'^(?P<course_short_name>.*?)/appointment/$',
"mz_lps3_free.student.views.appointment_index", name='appointment'),
# 学生端
url(r'^s/', include('mz_lps3_free.student.urls', namespace='student')),
# 老师端
url(r'^t/', include('mz_lps3_free.teacher.urls', namespace='teacher')),
)
| [
"[email protected]"
] | ||
a737110b9a871f6e38e01d0309a80f741a69a80d | b66ca95d947ebc80713f8857c50dd7ed81e96812 | /py/escher/static_site.py | 4167a4e13d613001c660d421fd3fd302440979b6 | [
"MIT"
] | permissive | clusterinnovationcentre/escher | 53b767a0e5358100342aab350aa4d6397c41bd40 | db4212226cb35fd1ca63e49929e60433353bc6d8 | refs/heads/master | 2021-01-19T12:15:17.772388 | 2016-09-14T18:21:24 | 2016-09-14T18:21:24 | 69,043,308 | 1 | 0 | null | 2016-09-23T16:44:07 | 2016-09-23T16:44:06 | null | UTF-8 | Python | false | false | 2,663 | py | from __future__ import print_function, unicode_literals
from escher.plots import Builder
from escher.urls import get_url
from escher.version import __version__
from escher.urls import top_directory, root_directory
from os.path import join, dirname, realpath
from jinja2 import Environment, PackageLoader
import shutil
# set up jinja2 template location
env = Environment(loader=PackageLoader('escher', 'templates'))
def generate_static_site():
print('Generating static site at %s' % top_directory)
# index file
template = env.get_template('homepage.html')
def static_rel(path):
return 'py/' + path
data = template.render(d3=static_rel(get_url('d3', 'local')),
boot_css=static_rel(get_url('boot_css', 'local')),
homepage_css=static_rel(get_url('homepage_css', 'local')),
favicon=static_rel(get_url('favicon', 'local')),
logo=static_rel(get_url('logo', 'local')),
documentation=get_url('documentation', protocol='https'),
github=get_url('github', protocol='https'),
github_releases=get_url('github_releases', protocol='https'),
homepage_js=static_rel(get_url('homepage_js', 'local')),
version=__version__,
map_download_url=get_url('map_download', 'local'),
web_version=True,
server_index_url=static_rel(get_url('server_index', 'local')))
with open(join(top_directory, 'index.html'), 'wb') as f:
f.write(data.encode('utf-8'))
# viewer and builder
# make the builder
builder = Builder(safe=True, id='static_map')
filepath = join(top_directory, 'builder')
with open(join(root_directory, get_url('server_index', source='local')), 'r') as f:
index_json = f.read()
html = builder.save_html(filepath=filepath,
overwrite=True,
js_source='local',
protocol=None,
minified_js=True,
static_site_index_json=index_json)
# copy over the source maps
escher_map = get_url('escher_min', 'local') + '.map'
builder_css_map = get_url('builder_css_min', 'local') + '.map'
shutil.copy(join(root_directory, escher_map), join(top_directory, 'builder', escher_map))
shutil.copy(join(root_directory, builder_css_map), join(top_directory, 'builder', builder_css_map))
if __name__=='__main__':
generate_static_site()
| [
"[email protected]"
] | |
e2e46aec0376fcdbcaa39b666abb78b1c447c79e | 116a4a2fcd3e9c3d216f96103006c707daa6001a | /HelloDjango/apps/awards/__init__.py | 9cbbbf85efbf866e2a69aa1f549ed4c7c701344b | [] | no_license | Eldar1988/a_white_birds | 22d743ed1fa651062f070c0e81b7ac665be7a72a | 0430d5322b3a55b6f55e9541675d6670f5d8a518 | refs/heads/master | 2022-12-18T20:23:26.293059 | 2020-09-15T04:27:59 | 2020-09-15T04:27:59 | 283,169,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | # default_app_config = 'apps.awards.app.AwardsAppConfig'
| [
"[email protected]"
] | |
91ada56a8de2120490cdfea95028b3a323a3ccec | 5f86944bdf1b810a84c63adc6ed01bbb48d2c59a | /kubernetes/client/models/v1_endpoints.py | 09876472e90030b902b980502a694c788125d712 | [
"Apache-2.0"
] | permissive | m4ttshaw/client-python | 384c721ba57b7ccc824d5eca25834d0288b211e2 | 4eac56a8b65d56eb23d738ceb90d3afb6dbd96c1 | refs/heads/master | 2021-01-13T06:05:51.564765 | 2017-06-21T08:31:03 | 2017-06-21T08:31:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,097 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1Endpoints(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, api_version=None, kind=None, metadata=None, subsets=None):
"""
V1Endpoints - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'subsets': 'list[V1EndpointSubset]'
}
self.attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'subsets': 'subsets'
}
self._api_version = api_version
self._kind = kind
self._metadata = metadata
self._subsets = subsets
@property
def api_version(self):
"""
Gets the api_version of this V1Endpoints.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources
:return: The api_version of this V1Endpoints.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1Endpoints.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources
:param api_version: The api_version of this V1Endpoints.
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""
Gets the kind of this V1Endpoints.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:return: The kind of this V1Endpoints.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1Endpoints.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1Endpoints.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1Endpoints.
Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
:return: The metadata of this V1Endpoints.
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1Endpoints.
Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
:param metadata: The metadata of this V1Endpoints.
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def subsets(self):
"""
Gets the subsets of this V1Endpoints.
The set of all endpoints is the union of all subsets. Addresses are placed into subsets according to the IPs they share. A single address with multiple ports, some of which are ready and some of which are not (because they come from different containers) will result in the address being displayed in different subsets for the different ports. No address will appear in both Addresses and NotReadyAddresses in the same subset. Sets of addresses and ports that comprise a service.
:return: The subsets of this V1Endpoints.
:rtype: list[V1EndpointSubset]
"""
return self._subsets
@subsets.setter
def subsets(self, subsets):
"""
Sets the subsets of this V1Endpoints.
The set of all endpoints is the union of all subsets. Addresses are placed into subsets according to the IPs they share. A single address with multiple ports, some of which are ready and some of which are not (because they come from different containers) will result in the address being displayed in different subsets for the different ports. No address will appear in both Addresses and NotReadyAddresses in the same subset. Sets of addresses and ports that comprise a service.
:param subsets: The subsets of this V1Endpoints.
:type: list[V1EndpointSubset]
"""
if subsets is None:
raise ValueError("Invalid value for `subsets`, must not be `None`")
self._subsets = subsets
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1Endpoints):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
] | |
8546ebd8b135b63f7bb7d860baa493f7f5f385a1 | 15023745402e98c0a7bcc2fcc8cb8ea01a10139f | /Homework/venv/bin/pip3 | 07316c758d9ed7a1465f67e39ff8b99960bfa92f | [] | no_license | GarrettMatthews/CS_2300 | 7633cea579e358e0fceda70681e3a3b6b1f61a32 | e47c5b8ff3c8d79d721b334354ca605f748cff29 | refs/heads/master | 2020-12-14T14:49:16.771918 | 2020-01-30T23:19:43 | 2020-01-30T23:19:43 | 234,774,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | #!/home/garrett/Desktop/Git_Repositories/CS_2300/Homework/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| [
"[email protected]"
] | ||
8c497cc002f4ef01eeff79c9cdbd5164f0b56620 | 192874fd96861ceb1864a71bf6f13932cc017d63 | /hue/tools/app_reg/common.py | 7c95d79fa30c878ca0bbface55ce0ecc1f9b104d | [
"Apache-2.0"
] | permissive | OpenPOWER-BigData/HDP-hue | 1de3efc0ac773f1e7b1acd03675f11b65c6f477d | 23719febdaae26c916bdc9d0712645987ae7e0e4 | refs/heads/master | 2021-01-17T17:19:31.157051 | 2016-07-18T19:44:10 | 2016-07-18T19:44:10 | 63,631,863 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,657 | py | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import logging
import os
import sys
from posixpath import curdir, sep, pardir, join
# The root of the Hue installation
INSTALL_ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..'))
# The Hue config directory
HUE_CONF_DIR = os.path.join(INSTALL_ROOT, 'desktop', 'conf')
# Virtual env
VIRTUAL_ENV = os.path.join(INSTALL_ROOT, 'build', 'env')
# The Python executable in virtualenv
ENV_PYTHON = os.path.join(VIRTUAL_ENV, 'bin', 'python')
def cmp_version(ver1, ver2):
"""Compare two version strings in the form of 1.2.34"""
return cmp(ver1.split('.'), ver2.split('.'))
def _get_python_lib_dir():
glob_path = os.path.join(VIRTUAL_ENV, 'lib', 'python*')
res = glob.glob(glob_path)
if len(res) == 0:
raise SystemError("Cannot find a Python installation in %s. "
"Did you do `make hue'?" % glob_path)
elif len(res) > 1:
raise SystemError("Found multiple Python installations in %s. "
"Please `make clean' first." % glob_path)
return res[0]
def _get_python_site_packages_dir():
return os.path.join(_get_python_lib_dir(), 'site-packages')
# Creates os.path.relpath for Python 2.4 and 2.5
if not hasattr(os.path, 'relpath'):
# default to posixpath definition
# no windows support
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = os.path.abspath(start).split(os.path.sep)
path_list = os.path.abspath(path).split(os.path.sep)
# Work out how much of the filepath is shared by start and path.
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list)
os.path.relpath = relpath | [
"[email protected]"
] | |
bfef24a076e66dd7c4f8a3fff2a9944ce352f75b | e609a2e68426edc025e196fc87474d8b0c154286 | /triage/model_results_generators.py | 0fc4e27041fd8f5a5fb6570191e5f6bfab6f46fd | [
"MIT"
] | permissive | pvdb2178/triage | 0119db238a6ee6529ec9cdcdb9e6111b8d6d12fa | 1fea55cea3165d4f8af0ae49fa225ea0366484be | refs/heads/master | 2021-01-25T06:55:22.826502 | 2017-06-05T16:54:37 | 2017-06-05T16:54:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | class ModelResultsGenerator(object):
def __init__(self, trained_model_path, model_ids):
self.trained_model_path = trained_model_path
self.model_ids = model_ids
def generate(self):
"""TODO: for each trained model,
create metrics and write to Tyra-compatible database"""
pass
| [
"[email protected]"
] | |
853170d6ebcfcfa63ebccf1d5e418b943b3e0df1 | ee8577b0d70ae783e42032fdf38bd4ec6c210ccf | /barddo/notifications/admin.py | 5613ee702d99c99878bd3027f539e47e8511d172 | [] | no_license | bruno-ortiz/barddo | 9192cfd5aff9ca0f296f476877b468919bdc5636 | 4f7aa41fd0697af61539efd1aba2062addb63009 | refs/heads/master | 2023-05-19T11:15:05.251642 | 2014-09-21T18:36:44 | 2014-09-21T18:36:44 | 374,435,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | # -*- coding: utf-8 -*-
from django.contrib import admin
from .models import Notification
class NotificationAdmin(admin.ModelAdmin):
list_display = ('recipient', 'actor',
'target', 'unread')
list_filter = ('unread', 'timestamp', )
admin.site.register(Notification, NotificationAdmin)
| [
"devnull@localhost"
] | devnull@localhost |
0e18c5faf91f310952478e4fef2f0be98535c706 | d7069bc46ab265f5787055a771886aba9af7949d | /zabbix/files/iops/check_io.py | 22fd768a93489b59d7c93c200ea91bf495e4b003 | [] | no_license | lizhenfen/work | a82951abee20dc3b7f998ec880ca8323cb71c5f6 | abf0f558a6a3a36931ea7bd95255db100b6d249f | refs/heads/master | 2021-01-12T05:34:39.677154 | 2017-03-21T09:28:02 | 2017-03-21T09:28:02 | 77,131,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,020 | py | #!/usr/bin/env python
#-*- coding: utf8 -*-
import json
import sys
PY2 = sys.version_info[0] == 2
def getDEVInfo():
res = {}
data = []
with open('/proc/mounts') as fp:
for line in fp:
dev_dict = {}
if '/boot' in line: continue
if line.startswith('/dev'):
line = line.split()
dev_name, mount_point = line[0], line[1]
if 'docker' in dev_name: continue
if 'var' in mount_point: continue
dev_dict['{#DEVNAME}'] = dev_name.split('/')[-1]
dev_dict['{#MOUNTNAME}'] = mount_point
data.append(dev_dict)
res['data'] = data
return json.dumps(res, sort_keys=True, indent=4)
def getDEVStatis(devName, item):
data = {}
with open('/proc/diskstats') as fp:
for line in fp:
if devName in line:
line = line.strip().split()
dev_read_counts = line[3]
dev_read_ms = line[6]
dev_write_counts = line[7]
dev_write_ms = line[8]
dev_io_ms = line[12]
dev_read_sector = line[5]
dev_write_sector = line[9]
data = {
'read.ops' : dev_read_counts,
'read.ms' : dev_read_ms,
'write.ops': dev_write_counts,
'write.ms' : dev_write_ms,
'io.ms' : dev_io_ms,
'read.sector' : dev_read_sector,
'write_sector': dev_write_sector
}
if PY2:
print data.get(item)
else:
print(data.get(item))
if __name__ == "__main__":
if sys.argv[1] == 'discovery':
print getDEVInfo()
elif sys.argv[1] == 'status':
getDEVStatis(sys.argv[2],sys.argv[3])
else:
print "ERROR: argument error"
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.