blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
967efae6c9d94e25fd7eb7084f429081dab4fc8e | 00a9295409b78a53ce790f7ab44931939f42c0e0 | /FPGA/apio/iCEBreaker/FIR_Filter/sympy/venv/lib/python3.8/site-packages/sympy/polys/tests/test_polyclasses.py | 3a54ba5077ea1d734af10e8346b7b1e2094db916 | [
"Apache-2.0"
]
| permissive | klei22/Tech-OnBoarding-Class | c21f0762d2d640d5e9cb124659cded5c865b32d4 | 960e962322c37be9117e0523641f8b582a2beceb | refs/heads/master | 2022-11-10T13:17:39.128342 | 2022-10-25T08:59:48 | 2022-10-25T08:59:48 | 172,292,871 | 2 | 3 | Apache-2.0 | 2019-05-19T00:26:32 | 2019-02-24T03:50:35 | C | UTF-8 | Python | false | false | 12,998 | py | """Tests for OO layer of several polynomial representations. """
from sympy.polys.domains import ZZ, QQ
from sympy.polys.polyclasses import DMP, DMF, ANP
from sympy.polys.polyerrors import ExactQuotientFailed, NotInvertible
from sympy.polys.specialpolys import f_polys
from sympy.testing.pytest import raises
f_0, f_1, f_2, f_3, f_4, f_5, f_6 = [ f.to_dense() for f in f_polys() ]
def test_DMP___init__():
f = DMP([[0], [], [0, 1, 2], [3]], ZZ)
assert f.rep == [[1, 2], [3]]
assert f.dom == ZZ
assert f.lev == 1
f = DMP([[1, 2], [3]], ZZ, 1)
assert f.rep == [[1, 2], [3]]
assert f.dom == ZZ
assert f.lev == 1
f = DMP({(1, 1): 1, (0, 0): 2}, ZZ, 1)
assert f.rep == [[1, 0], [2]]
assert f.dom == ZZ
assert f.lev == 1
def test_DMP___eq__():
assert DMP([[ZZ(1), ZZ(2)], [ZZ(3)]], ZZ) == \
DMP([[ZZ(1), ZZ(2)], [ZZ(3)]], ZZ)
assert DMP([[ZZ(1), ZZ(2)], [ZZ(3)]], ZZ) == \
DMP([[QQ(1), QQ(2)], [QQ(3)]], QQ)
assert DMP([[QQ(1), QQ(2)], [QQ(3)]], QQ) == \
DMP([[ZZ(1), ZZ(2)], [ZZ(3)]], ZZ)
assert DMP([[[ZZ(1)]]], ZZ) != DMP([[ZZ(1)]], ZZ)
assert DMP([[ZZ(1)]], ZZ) != DMP([[[ZZ(1)]]], ZZ)
def test_DMP___bool__():
assert bool(DMP([[]], ZZ)) is False
assert bool(DMP([[1]], ZZ)) is True
def test_DMP_to_dict():
f = DMP([[3], [], [2], [], [8]], ZZ)
assert f.to_dict() == \
{(4, 0): 3, (2, 0): 2, (0, 0): 8}
assert f.to_sympy_dict() == \
{(4, 0): ZZ.to_sympy(3), (2, 0): ZZ.to_sympy(2), (0, 0):
ZZ.to_sympy(8)}
def test_DMP_properties():
assert DMP([[]], ZZ).is_zero is True
assert DMP([[1]], ZZ).is_zero is False
assert DMP([[1]], ZZ).is_one is True
assert DMP([[2]], ZZ).is_one is False
assert DMP([[1]], ZZ).is_ground is True
assert DMP([[1], [2], [1]], ZZ).is_ground is False
assert DMP([[1], [2, 0], [1, 0]], ZZ).is_sqf is True
assert DMP([[1], [2, 0], [1, 0, 0]], ZZ).is_sqf is False
assert DMP([[1, 2], [3]], ZZ).is_monic is True
assert DMP([[2, 2], [3]], ZZ).is_monic is False
assert DMP([[1, 2], [3]], ZZ).is_primitive is True
assert DMP([[2, 4], [6]], ZZ).is_primitive is False
def test_DMP_arithmetics():
f = DMP([[2], [2, 0]], ZZ)
assert f.mul_ground(2) == DMP([[4], [4, 0]], ZZ)
assert f.quo_ground(2) == DMP([[1], [1, 0]], ZZ)
raises(ExactQuotientFailed, lambda: f.exquo_ground(3))
f = DMP([[-5]], ZZ)
g = DMP([[5]], ZZ)
assert f.abs() == g
assert abs(f) == g
assert g.neg() == f
assert -g == f
h = DMP([[]], ZZ)
assert f.add(g) == h
assert f + g == h
assert g + f == h
assert f + 5 == h
assert 5 + f == h
h = DMP([[-10]], ZZ)
assert f.sub(g) == h
assert f - g == h
assert g - f == -h
assert f - 5 == h
assert 5 - f == -h
h = DMP([[-25]], ZZ)
assert f.mul(g) == h
assert f * g == h
assert g * f == h
assert f * 5 == h
assert 5 * f == h
h = DMP([[25]], ZZ)
assert f.sqr() == h
assert f.pow(2) == h
assert f**2 == h
raises(TypeError, lambda: f.pow('x'))
f = DMP([[1], [], [1, 0, 0]], ZZ)
g = DMP([[2], [-2, 0]], ZZ)
q = DMP([[2], [2, 0]], ZZ)
r = DMP([[8, 0, 0]], ZZ)
assert f.pdiv(g) == (q, r)
assert f.pquo(g) == q
assert f.prem(g) == r
raises(ExactQuotientFailed, lambda: f.pexquo(g))
f = DMP([[1], [], [1, 0, 0]], ZZ)
g = DMP([[1], [-1, 0]], ZZ)
q = DMP([[1], [1, 0]], ZZ)
r = DMP([[2, 0, 0]], ZZ)
assert f.div(g) == (q, r)
assert f.quo(g) == q
assert f.rem(g) == r
assert divmod(f, g) == (q, r)
assert f // g == q
assert f % g == r
raises(ExactQuotientFailed, lambda: f.exquo(g))
def test_DMP_functionality():
f = DMP([[1], [2, 0], [1, 0, 0]], ZZ)
g = DMP([[1], [1, 0]], ZZ)
h = DMP([[1]], ZZ)
assert f.degree() == 2
assert f.degree_list() == (2, 2)
assert f.total_degree() == 2
assert f.LC() == ZZ(1)
assert f.TC() == ZZ(0)
assert f.nth(1, 1) == ZZ(2)
raises(TypeError, lambda: f.nth(0, 'x'))
assert f.max_norm() == 2
assert f.l1_norm() == 4
u = DMP([[2], [2, 0]], ZZ)
assert f.diff(m=1, j=0) == u
assert f.diff(m=1, j=1) == u
raises(TypeError, lambda: f.diff(m='x', j=0))
u = DMP([1, 2, 1], ZZ)
v = DMP([1, 2, 1], ZZ)
assert f.eval(a=1, j=0) == u
assert f.eval(a=1, j=1) == v
assert f.eval(1).eval(1) == ZZ(4)
assert f.cofactors(g) == (g, g, h)
assert f.gcd(g) == g
assert f.lcm(g) == f
u = DMP([[QQ(45), QQ(30), QQ(5)]], QQ)
v = DMP([[QQ(1), QQ(2, 3), QQ(1, 9)]], QQ)
assert u.monic() == v
assert (4*f).content() == ZZ(4)
assert (4*f).primitive() == (ZZ(4), f)
f = DMP([[1], [2], [3], [4], [5], [6]], ZZ)
assert f.trunc(3) == DMP([[1], [-1], [], [1], [-1], []], ZZ)
f = DMP(f_4, ZZ)
assert f.sqf_part() == -f
assert f.sqf_list() == (ZZ(-1), [(-f, 1)])
f = DMP([[-1], [], [], [5]], ZZ)
g = DMP([[3, 1], [], []], ZZ)
h = DMP([[45, 30, 5]], ZZ)
r = DMP([675, 675, 225, 25], ZZ)
assert f.subresultants(g) == [f, g, h]
assert f.resultant(g) == r
f = DMP([1, 3, 9, -13], ZZ)
assert f.discriminant() == -11664
f = DMP([QQ(2), QQ(0)], QQ)
g = DMP([QQ(1), QQ(0), QQ(-16)], QQ)
s = DMP([QQ(1, 32), QQ(0)], QQ)
t = DMP([QQ(-1, 16)], QQ)
h = DMP([QQ(1)], QQ)
assert f.half_gcdex(g) == (s, h)
assert f.gcdex(g) == (s, t, h)
assert f.invert(g) == s
f = DMP([[1], [2], [3]], QQ)
raises(ValueError, lambda: f.half_gcdex(f))
raises(ValueError, lambda: f.gcdex(f))
raises(ValueError, lambda: f.invert(f))
f = DMP([1, 0, 20, 0, 150, 0, 500, 0, 625, -2, 0, -10, 9], ZZ)
g = DMP([1, 0, 0, -2, 9], ZZ)
h = DMP([1, 0, 5, 0], ZZ)
assert g.compose(h) == f
assert f.decompose() == [g, h]
f = DMP([[1], [2], [3]], QQ)
raises(ValueError, lambda: f.decompose())
raises(ValueError, lambda: f.sturm())
def test_DMP_exclude():
f = [[[[[[[[[[[[[[[[[[[[[[[[[[1]], [[]]]]]]]]]]]]]]]]]]]]]]]]]]
J = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 24, 25]
assert DMP(f, ZZ).exclude() == (J, DMP([1, 0], ZZ))
assert DMP([[1], [1, 0]], ZZ).exclude() == ([], DMP([[1], [1, 0]], ZZ))
def test_DMF__init__():
f = DMF(([[0], [], [0, 1, 2], [3]], [[1, 2, 3]]), ZZ)
assert f.num == [[1, 2], [3]]
assert f.den == [[1, 2, 3]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(([[1, 2], [3]], [[1, 2, 3]]), ZZ, 1)
assert f.num == [[1, 2], [3]]
assert f.den == [[1, 2, 3]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(([[-1], [-2]], [[3], [-4]]), ZZ)
assert f.num == [[-1], [-2]]
assert f.den == [[3], [-4]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(([[1], [2]], [[-3], [4]]), ZZ)
assert f.num == [[-1], [-2]]
assert f.den == [[3], [-4]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(([[1], [2]], [[-3], [4]]), ZZ)
assert f.num == [[-1], [-2]]
assert f.den == [[3], [-4]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(([[]], [[-3], [4]]), ZZ)
assert f.num == [[]]
assert f.den == [[1]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(17, ZZ, 1)
assert f.num == [[17]]
assert f.den == [[1]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(([[1], [2]]), ZZ)
assert f.num == [[1], [2]]
assert f.den == [[1]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF([[0], [], [0, 1, 2], [3]], ZZ)
assert f.num == [[1, 2], [3]]
assert f.den == [[1]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF({(1, 1): 1, (0, 0): 2}, ZZ, 1)
assert f.num == [[1, 0], [2]]
assert f.den == [[1]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(([[QQ(1)], [QQ(2)]], [[-QQ(3)], [QQ(4)]]), QQ)
assert f.num == [[-QQ(1)], [-QQ(2)]]
assert f.den == [[QQ(3)], [-QQ(4)]]
assert f.lev == 1
assert f.dom == QQ
f = DMF(([[QQ(1, 5)], [QQ(2, 5)]], [[-QQ(3, 7)], [QQ(4, 7)]]), QQ)
assert f.num == [[-QQ(7)], [-QQ(14)]]
assert f.den == [[QQ(15)], [-QQ(20)]]
assert f.lev == 1
assert f.dom == QQ
raises(ValueError, lambda: DMF(([1], [[1]]), ZZ))
raises(ZeroDivisionError, lambda: DMF(([1], []), ZZ))
def test_DMF__bool__():
assert bool(DMF([[]], ZZ)) is False
assert bool(DMF([[1]], ZZ)) is True
def test_DMF_properties():
assert DMF([[]], ZZ).is_zero is True
assert DMF([[]], ZZ).is_one is False
assert DMF([[1]], ZZ).is_zero is False
assert DMF([[1]], ZZ).is_one is True
assert DMF(([[1]], [[2]]), ZZ).is_one is False
def test_DMF_arithmetics():
f = DMF([[7], [-9]], ZZ)
g = DMF([[-7], [9]], ZZ)
assert f.neg() == -f == g
f = DMF(([[1]], [[1], []]), ZZ)
g = DMF(([[1]], [[1, 0]]), ZZ)
h = DMF(([[1], [1, 0]], [[1, 0], []]), ZZ)
assert f.add(g) == f + g == h
assert g.add(f) == g + f == h
h = DMF(([[-1], [1, 0]], [[1, 0], []]), ZZ)
assert f.sub(g) == f - g == h
h = DMF(([[1]], [[1, 0], []]), ZZ)
assert f.mul(g) == f*g == h
assert g.mul(f) == g*f == h
h = DMF(([[1, 0]], [[1], []]), ZZ)
assert f.quo(g) == f/g == h
h = DMF(([[1]], [[1], [], [], []]), ZZ)
assert f.pow(3) == f**3 == h
h = DMF(([[1]], [[1, 0, 0, 0]]), ZZ)
assert g.pow(3) == g**3 == h
def test_ANP___init__():
rep = [QQ(1), QQ(1)]
mod = [QQ(1), QQ(0), QQ(1)]
f = ANP(rep, mod, QQ)
assert f.rep == [QQ(1), QQ(1)]
assert f.mod == [QQ(1), QQ(0), QQ(1)]
assert f.dom == QQ
rep = {1: QQ(1), 0: QQ(1)}
mod = {2: QQ(1), 0: QQ(1)}
f = ANP(rep, mod, QQ)
assert f.rep == [QQ(1), QQ(1)]
assert f.mod == [QQ(1), QQ(0), QQ(1)]
assert f.dom == QQ
f = ANP(1, mod, QQ)
assert f.rep == [QQ(1)]
assert f.mod == [QQ(1), QQ(0), QQ(1)]
assert f.dom == QQ
def test_ANP___eq__():
a = ANP([QQ(1), QQ(1)], [QQ(1), QQ(0), QQ(1)], QQ)
b = ANP([QQ(1), QQ(1)], [QQ(1), QQ(0), QQ(2)], QQ)
assert (a == a) is True
assert (a != a) is False
assert (a == b) is False
assert (a != b) is True
b = ANP([QQ(1), QQ(2)], [QQ(1), QQ(0), QQ(1)], QQ)
assert (a == b) is False
assert (a != b) is True
def test_ANP___bool__():
assert bool(ANP([], [QQ(1), QQ(0), QQ(1)], QQ)) is False
assert bool(ANP([QQ(1)], [QQ(1), QQ(0), QQ(1)], QQ)) is True
def test_ANP_properties():
mod = [QQ(1), QQ(0), QQ(1)]
assert ANP([QQ(0)], mod, QQ).is_zero is True
assert ANP([QQ(1)], mod, QQ).is_zero is False
assert ANP([QQ(1)], mod, QQ).is_one is True
assert ANP([QQ(2)], mod, QQ).is_one is False
def test_ANP_arithmetics():
mod = [QQ(1), QQ(0), QQ(0), QQ(-2)]
a = ANP([QQ(2), QQ(-1), QQ(1)], mod, QQ)
b = ANP([QQ(1), QQ(2)], mod, QQ)
c = ANP([QQ(-2), QQ(1), QQ(-1)], mod, QQ)
assert a.neg() == -a == c
c = ANP([QQ(2), QQ(0), QQ(3)], mod, QQ)
assert a.add(b) == a + b == c
assert b.add(a) == b + a == c
c = ANP([QQ(2), QQ(-2), QQ(-1)], mod, QQ)
assert a.sub(b) == a - b == c
c = ANP([QQ(-2), QQ(2), QQ(1)], mod, QQ)
assert b.sub(a) == b - a == c
c = ANP([QQ(3), QQ(-1), QQ(6)], mod, QQ)
assert a.mul(b) == a*b == c
assert b.mul(a) == b*a == c
c = ANP([QQ(-1, 43), QQ(9, 43), QQ(5, 43)], mod, QQ)
assert a.pow(0) == a**(0) == ANP(1, mod, QQ)
assert a.pow(1) == a**(1) == a
assert a.pow(-1) == a**(-1) == c
assert a.quo(a) == a.mul(a.pow(-1)) == a*a**(-1) == ANP(1, mod, QQ)
c = ANP([], [1, 0, 0, -2], QQ)
r1 = a.rem(b)
(q, r2) = a.div(b)
assert r1 == r2 == c == a % b
raises(NotInvertible, lambda: a.div(c))
raises(NotInvertible, lambda: a.rem(c))
# Comparison with "hard-coded" value fails despite looking identical
# from sympy import Rational
# c = ANP([Rational(11, 10), Rational(-1, 5), Rational(-3, 5)], [1, 0, 0, -2], QQ)
assert q == a/b # == c
def test_ANP_unify():
mod = [QQ(1), QQ(0), QQ(-2)]
a = ANP([QQ(1)], mod, QQ)
b = ANP([ZZ(1)], mod, ZZ)
assert a.unify(b)[0] == QQ
assert b.unify(a)[0] == QQ
assert a.unify(a)[0] == QQ
assert b.unify(b)[0] == ZZ
def test___hash__():
# issue 5571
# Make sure int vs. long doesn't affect hashing with Python ground types
assert DMP([[1, 2], [3]], ZZ) == DMP([[int(1), int(2)], [int(3)]], ZZ)
assert hash(DMP([[1, 2], [3]], ZZ)) == hash(DMP([[int(1), int(2)], [int(3)]], ZZ))
assert DMF(
([[1, 2], [3]], [[1]]), ZZ) == DMF(([[int(1), int(2)], [int(3)]], [[int(1)]]), ZZ)
assert hash(DMF(([[1, 2], [3]], [[1]]), ZZ)) == hash(DMF(([[int(1),
int(2)], [int(3)]], [[int(1)]]), ZZ))
assert ANP([1, 1], [1, 0, 1], ZZ) == ANP([int(1), int(1)], [int(1), int(0), int(1)], ZZ)
assert hash(
ANP([1, 1], [1, 0, 1], ZZ)) == hash(ANP([int(1), int(1)], [int(1), int(0), int(1)], ZZ))
| [
"[email protected]"
]
| |
c2e5409388839109b35e896d83768e706bc1fbb0 | 84c36e9067476a730d88f5ec799deabd5a46a44d | /XXXXXXXXX/PyQt5-Mini-Projects-master/TextBook/Checkboxes/mainUi.py | 5d2a7f141613b3abcba677e9b3e6e424e146addf | []
| no_license | AGou-ops/myPyQT5-StudyNote | 8bbce76b778a55c31773313d137682c77d246aad | 7e5eb426b6f30c301d040f6bc08f8a3c41d4a232 | refs/heads/master | 2022-11-06T00:05:25.603798 | 2020-06-20T01:59:45 | 2020-06-20T01:59:45 | 261,099,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,545 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'main.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(584, 511)
self.label = QtWidgets.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(230, 30, 91, 16))
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(Dialog)
self.label_2.setGeometry(QtCore.QRect(70, 110, 51, 16))
self.label_2.setObjectName("label_2")
self.brisk = QtWidgets.QCheckBox(Dialog)
self.brisk.setGeometry(QtCore.QRect(370, 110, 70, 17))
self.brisk.setObjectName("brisk")
self.appertizers = QtWidgets.QButtonGroup(Dialog)
self.appertizers.setObjectName("appertizers")
self.appertizers.addButton(self.brisk)
self.stoup = QtWidgets.QCheckBox(Dialog)
self.stoup.setGeometry(QtCore.QRect(370, 140, 70, 17))
self.stoup.setObjectName("stoup")
self.appertizers.addButton(self.stoup)
self.rochin = QtWidgets.QCheckBox(Dialog)
self.rochin.setGeometry(QtCore.QRect(370, 170, 70, 17))
self.rochin.setObjectName("rochin")
self.appertizers.addButton(self.rochin)
self.label_3 = QtWidgets.QLabel(Dialog)
self.label_3.setGeometry(QtCore.QRect(70, 240, 71, 16))
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(Dialog)
self.label_4.setGeometry(QtCore.QRect(70, 350, 47, 13))
self.label_4.setObjectName("label_4")
self.label_5 = QtWidgets.QLabel(Dialog)
self.label_5.setGeometry(QtCore.QRect(80, 450, 47, 13))
self.label_5.setObjectName("label_5")
self.chicken = QtWidgets.QCheckBox(Dialog)
self.chicken.setGeometry(QtCore.QRect(370, 240, 211, 17))
self.chicken.setObjectName("chicken")
self.main = QtWidgets.QButtonGroup(Dialog)
self.main.setObjectName("main")
self.main.addButton(self.chicken)
self.rice = QtWidgets.QCheckBox(Dialog)
self.rice.setGeometry(QtCore.QRect(370, 270, 141, 17))
self.rice.setObjectName("rice")
self.main.addButton(self.rice)
self.potato = QtWidgets.QCheckBox(Dialog)
self.potato.setGeometry(QtCore.QRect(370, 300, 161, 17))
self.potato.setObjectName("potato")
self.main.addButton(self.potato)
self.cake = QtWidgets.QCheckBox(Dialog)
self.cake.setGeometry(QtCore.QRect(370, 350, 70, 17))
self.cake.setObjectName("cake")
self.Dessert = QtWidgets.QButtonGroup(Dialog)
self.Dessert.setObjectName("Dessert")
self.Dessert.addButton(self.cake)
self.ice_cream = QtWidgets.QCheckBox(Dialog)
self.ice_cream.setGeometry(QtCore.QRect(370, 380, 91, 17))
self.ice_cream.setObjectName("ice_cream")
self.Dessert.addButton(self.ice_cream)
self.pie = QtWidgets.QCheckBox(Dialog)
self.pie.setGeometry(QtCore.QRect(370, 410, 70, 17))
self.pie.setObjectName("pie")
self.Dessert.addButton(self.pie)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.label.setText(_translate("Dialog", "<html><head/><body><p align=\"center\"><span style=\" font-size:10pt; font-weight:600;\">Menu</span></p></body></html>"))
self.label_2.setText(_translate("Dialog", "Appertizer"))
self.brisk.setText(_translate("Dialog", "$12 brisk"))
self.stoup.setText(_translate("Dialog", "$10 stoup"))
self.rochin.setText(_translate("Dialog", "$11 rochin"))
self.label_3.setText(_translate("Dialog", "Main Course"))
self.label_4.setText(_translate("Dialog", "Dessert"))
self.label_5.setText(_translate("Dialog", "Bill: "))
self.chicken.setText(_translate("Dialog", "$32 Chicken soup with plantain freckles"))
self.rice.setText(_translate("Dialog", "$26 Fried rice and Brisket"))
self.potato.setText(_translate("Dialog", "$20 Potatoe soup and steak"))
self.cake.setText(_translate("Dialog", "$5 Cake"))
self.ice_cream.setText(_translate("Dialog", "$6 Ice Cream"))
self.pie.setText(_translate("Dialog", "$5 Pie"))
| [
"[email protected]"
]
| |
29919d9fb02730fee196407970876918996c26db | 5c94e032b2d43ac347f6383d0a8f0c03ec3a0485 | /MiniLab_mkII/__init__.py | 92e7d550c23bd2cf4b051b5bcc8d23df5cbf1906 | []
| no_license | Elton47/Ableton-MRS-10.1.13 | 997f99a51157bd2a2bd1d2dc303e76b45b1eb93d | 54bb64ba5e6be52dd6b9f87678ee3462cc224c8a | refs/heads/master | 2022-07-04T01:35:27.447979 | 2020-05-14T19:02:09 | 2020-05-14T19:02:09 | 263,990,585 | 0 | 0 | null | 2020-05-14T18:12:04 | 2020-05-14T18:12:03 | null | UTF-8 | Python | false | false | 916 | py | # uncompyle6 version 3.6.7
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.17 (default, Dec 23 2019, 21:25:33)
# [GCC 4.2.1 Compatible Apple LLVM 11.0.0 (clang-1100.0.33.16)]
# Embedded file name: /Users/versonator/Jenkins/live/output/Live/mac_64_static/Release/python-bundle/MIDI Remote Scripts/MiniLab_mkII/__init__.py
# Compiled at: 2020-01-09 15:21:34
from __future__ import absolute_import, print_function, unicode_literals
import _Framework.Capabilities as caps
from .MiniLabMk2 import MiniLabMk2
def get_capabilities():
return {caps.CONTROLLER_ID_KEY: caps.controller_id(vendor_id=7285, product_ids=[649], model_name=['Arturia MiniLab mkII']),
caps.PORTS_KEY: [
caps.inport(props=[caps.NOTES_CC, caps.SCRIPT, caps.REMOTE]),
caps.outport(props=[caps.SCRIPT])]}
def create_instance(c_instance):
return MiniLabMk2(c_instance=c_instance) | [
"[email protected]"
]
| |
e488b3d086433aa8834313e177c946dd9b0641b3 | da2d53e8021b539db006fa31f02d1c2ae46bed3b | /March Long Challenge 2021/8. Consecutive Adding.py | e2e92af1f63e5d65d0a481da2b09867f3e03e022 | []
| no_license | srajsonu/CodeChef | 0723ee4975808e2f4d101d2034771d868ae3b7f7 | a39cd5886a5f108dcd46f70922d5637dd29849ce | refs/heads/main | 2023-04-22T08:33:06.376698 | 2021-05-16T05:48:17 | 2021-05-16T05:48:17 | 327,030,437 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,397 | py | from collections import deque
class Solution:
def isValid(self, A, i, j):
if i < 0 or i >= len(A) or j < 0 or j >= len(A[0]):
return False
return True
def dfs(self, A, B, i, j, vis):
vis.add(i, j)
row = [-1, 0, 1, 0]
col = [0, -1, 0, 1]
for r, c in zip(row, col):
nRow = i + r
nCol = j + c
if self.isValid(A, nRow, nCol) and (nRow, nCol) not in vis:
self.dfs(A, B, nRow, nCol, vis)
def bfs(self, A, B, X, i, j, vis):
q = deque()
q.append((i, j, B[i][j] - A[i][j]))
row = [-1, 0, 1, 0]
col = [0, -1, 0, 1]
while q:
i, j, v = q.popleft()
A[i][j] += v
for r, c in zip(row, col):
nRow = i + r
nCol = j + c
if self.isValid(A, nRow, nCol) and A[nRow][nCol] != B[nRow][nCol]:
while self.isValid(A, nRow, nCol):
A[nRow][nCol] += v
nRow += r
nCol += c
X -= 1
# print(nRow-r, nCol-c, A)
if self.isValid(A, nRow - r, nCol - c) and A[nRow - r][nCol - c] != B[nRow - r][nCol - c] and (
nRow - r, nCol - c) not in vis and X > 0:
#print(nRow - r, nCol - c, A, v)
vis.add((nRow - r, nCol - c))
v = B[nRow - r][nCol - c] - A[nRow - r][nCol - c]
q.append((nRow - r, nCol - c, v))
if X <= 0:
break
def solve(self, A, B, R, C, X):
vis = set()
for i in range(R):
for j in range(C):
if A[i][j] != B[i][j] and (i, j) not in vis:
self.bfs(A, B, X, i, j, vis)
for i in range(R):
for j in range(C):
if A[i][j] != B[i][j]:
return 'No'
return A
if __name__ == '__main__':
S = Solution()
T = int(input())
for _ in range(T):
R, C, X = map(int, input().split())
A = []
for _ in range(R):
row = list(map(int, input().split()))
A.append(row)
B = []
for _ in range(R):
row = list(map(int, input().split()))
B.append(row)
print(S.solve(A, B, R, C, X))
| [
"[email protected]"
]
| |
10cbdbc9540fe4bef60a125001d0fa4cbad72202 | 86393bd0d16c69363aa1afb4c4841fff6314493c | /examples/models/azure_aks_deep_mnist/DeepMnist.py | 0e45b33d8653c9f0dd43b7961eecc19e3f48cda6 | [
"Apache-2.0"
]
| permissive | SeldonIO/seldon-core | 0179fc490c439dbc04f2b8e6157f39291cb11aac | 6652d080ea10cfca082be7090d12b9e776d96d7a | refs/heads/master | 2023-08-19T08:32:10.714354 | 2023-08-15T12:55:57 | 2023-08-15T12:55:57 | 114,898,943 | 3,947 | 885 | Apache-2.0 | 2023-09-13T11:29:37 | 2017-12-20T14:51:54 | HTML | UTF-8 | Python | false | false | 641 | py | import tensorflow as tf
import numpy as np
class DeepMnist(object):
def __init__(self):
self.class_names = ["class:{}".format(str(i)) for i in range(10)]
self.sess = tf.Session()
saver = tf.train.import_meta_graph("model/deep_mnist_model.meta")
saver.restore(self.sess,tf.train.latest_checkpoint("./model/"))
graph = tf.get_default_graph()
self.x = graph.get_tensor_by_name("x:0")
self.y = graph.get_tensor_by_name("y:0")
def predict(self,X,feature_names):
predictions = self.sess.run(self.y,feed_dict={self.x:X})
return predictions.astype(np.float64)
| [
"[email protected]"
]
| |
26327b7c71992dbaeac22e71ebb678a503d4dd13 | acff427a36d6340486ff747ae9e52f05a4b027f2 | /main/desktop/font/amiri-fonts/actions.py | 8bd7964bbed99a2bbd3ba4b3361b254a7b92e377 | []
| no_license | jeremie1112/pisilinux | 8f5a03212de0c1b2453132dd879d8c1556bb4ff7 | d0643b537d78208174a4eeb5effeb9cb63c2ef4f | refs/heads/master | 2020-03-31T10:12:21.253540 | 2018-10-08T18:53:50 | 2018-10-08T18:53:50 | 152,126,584 | 2 | 1 | null | 2018-10-08T18:24:17 | 2018-10-08T18:24:17 | null | UTF-8 | Python | false | false | 391 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from pisi.actionsapi import pisitools
def install():
pisitools.insinto("/usr/share/fonts/amiri", "Amiri*.ttf")
pisitools.dodoc("OFL.txt", "README-Arabic", "README", )
pisitools.insinto("/usr/share/amiri-fonts", "*.pdf")
| [
"[email protected]"
]
| |
39b66b351172d94092faf826e4ea94d4c0bf074a | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/R/ronanmchugh/get_loc_relator_codes.py | 4c12ef47be97a8541377ce37e91bef4939da90fa | []
| no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,080 | py | import scraperwiki
import lxml.html
# Blank Python
html = scraperwiki.scrape("http://www.loc.gov/marc/relators/relaterm.html")
root = lxml.html.fromstring(html)
authorizedList = root.find_class("authorized")
codeList = root.find_class('relator-code')
codeDict = dict()
for i in range(len(authorizedList)):
codeDict = {
'type' : authorizedList[i].text_content().lower(),
'code' : codeList[i].text_content().replace('[','').replace(']','')
}
scraperwiki.sqlite.save(unique_keys=['code'], data=codeDict)
import scraperwiki
import lxml.html
# Blank Python
html = scraperwiki.scrape("http://www.loc.gov/marc/relators/relaterm.html")
root = lxml.html.fromstring(html)
authorizedList = root.find_class("authorized")
codeList = root.find_class('relator-code')
codeDict = dict()
for i in range(len(authorizedList)):
codeDict = {
'type' : authorizedList[i].text_content().lower(),
'code' : codeList[i].text_content().replace('[','').replace(']','')
}
scraperwiki.sqlite.save(unique_keys=['code'], data=codeDict)
| [
"[email protected]"
]
| |
e0f3253e98d1541eaae5376a562ce8c211245599 | 76a8ea60480331f0f61aeb61de55be9a6270e733 | /downloadable-site-packages/astropy/coordinates/tests/test_earth.py | 4b7a1192b9b2f3114fb3b9368a167a1ee6bf7e33 | [
"MIT"
]
| permissive | bhagyas/Pyto | cd2ec3f35bec703db4ac29b56d17abc4bf03e375 | 907024a9b3e04a2a9de54976778c0e1a56b7b83c | refs/heads/master | 2022-11-19T13:05:07.392454 | 2020-07-21T17:33:39 | 2020-07-21T17:33:39 | 281,886,535 | 2 | 0 | MIT | 2020-07-23T07:48:03 | 2020-07-23T07:48:02 | null | UTF-8 | Python | false | false | 17,040 | py | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test initialization of angles not already covered by the API tests"""
import pickle
import pytest
import numpy as np
from astropy.coordinates.earth import EarthLocation, ELLIPSOIDS
from astropy.coordinates.angles import Longitude, Latitude
from astropy.units import allclose as quantity_allclose
from astropy import units as u
from astropy.time import Time
from astropy import constants
from astropy.coordinates.name_resolve import NameResolveError
def allclose_m14(a, b, rtol=1.e-14, atol=None):
if atol is None:
atol = 1.e-14 * getattr(a, 'unit', 1)
return quantity_allclose(a, b, rtol, atol)
def allclose_m8(a, b, rtol=1.e-8, atol=None):
if atol is None:
atol = 1.e-8 * getattr(a, 'unit', 1)
return quantity_allclose(a, b, rtol, atol)
def isclose_m14(val, ref):
return np.array([allclose_m14(v, r) for (v, r) in zip(val, ref)])
def isclose_m8(val, ref):
return np.array([allclose_m8(v, r) for (v, r) in zip(val, ref)])
def vvd(val, valok, dval, func, test, status):
"""Mimic routine of erfa/src/t_erfa_c.c (to help copy & paste)"""
assert quantity_allclose(val, valok * val.unit, atol=dval * val.unit)
def test_gc2gd():
"""Test that we reproduce erfa/src/t_erfa_c.c t_gc2gd"""
x, y, z = (2e6, 3e6, 5.244e6)
status = 0 # help for copy & paste of vvd
location = EarthLocation.from_geocentric(x, y, z, u.m)
e, p, h = location.to_geodetic('WGS84')
e, p, h = e.to(u.radian), p.to(u.radian), h.to(u.m)
vvd(e, 0.98279372324732907, 1e-14, "eraGc2gd", "e2", status)
vvd(p, 0.97160184820607853, 1e-14, "eraGc2gd", "p2", status)
vvd(h, 331.41731754844348, 1e-8, "eraGc2gd", "h2", status)
e, p, h = location.to_geodetic('GRS80')
e, p, h = e.to(u.radian), p.to(u.radian), h.to(u.m)
vvd(e, 0.98279372324732907, 1e-14, "eraGc2gd", "e2", status)
vvd(p, 0.97160184820607853, 1e-14, "eraGc2gd", "p2", status)
vvd(h, 331.41731754844348, 1e-8, "eraGc2gd", "h2", status)
e, p, h = location.to_geodetic('WGS72')
e, p, h = e.to(u.radian), p.to(u.radian), h.to(u.m)
vvd(e, 0.98279372324732907, 1e-14, "eraGc2gd", "e3", status)
vvd(p, 0.97160181811015119, 1e-14, "eraGc2gd", "p3", status)
vvd(h, 333.27707261303181, 1e-8, "eraGc2gd", "h3", status)
def test_gd2gc():
"""Test that we reproduce erfa/src/t_erfa_c.c t_gd2gc"""
e = 3.1 * u.rad
p = -0.5 * u.rad
h = 2500.0 * u.m
status = 0 # help for copy & paste of vvd
location = EarthLocation.from_geodetic(e, p, h, ellipsoid='WGS84')
xyz = tuple(v.to(u.m) for v in location.to_geocentric())
vvd(xyz[0], -5599000.5577049947, 1e-7, "eraGd2gc", "0/1", status)
vvd(xyz[1], 233011.67223479203, 1e-7, "eraGd2gc", "1/1", status)
vvd(xyz[2], -3040909.4706983363, 1e-7, "eraGd2gc", "2/1", status)
location = EarthLocation.from_geodetic(e, p, h, ellipsoid='GRS80')
xyz = tuple(v.to(u.m) for v in location.to_geocentric())
vvd(xyz[0], -5599000.5577260984, 1e-7, "eraGd2gc", "0/2", status)
vvd(xyz[1], 233011.6722356703, 1e-7, "eraGd2gc", "1/2", status)
vvd(xyz[2], -3040909.4706095476, 1e-7, "eraGd2gc", "2/2", status)
location = EarthLocation.from_geodetic(e, p, h, ellipsoid='WGS72')
xyz = tuple(v.to(u.m) for v in location.to_geocentric())
vvd(xyz[0], -5598998.7626301490, 1e-7, "eraGd2gc", "0/3", status)
vvd(xyz[1], 233011.5975297822, 1e-7, "eraGd2gc", "1/3", status)
vvd(xyz[2], -3040908.6861467111, 1e-7, "eraGd2gc", "2/3", status)
class TestInput():
def setup(self):
self.lon = Longitude([0., 45., 90., 135., 180., -180, -90, -45], u.deg,
wrap_angle=180*u.deg)
self.lat = Latitude([+0., 30., 60., +90., -90., -60., -30., 0.], u.deg)
self.h = u.Quantity([0.1, 0.5, 1.0, -0.5, -1.0, +4.2, -11., -.1], u.m)
self.location = EarthLocation.from_geodetic(self.lon, self.lat, self.h)
self.x, self.y, self.z = self.location.to_geocentric()
def test_default_ellipsoid(self):
assert self.location.ellipsoid == EarthLocation._ellipsoid
def test_geo_attributes(self):
assert all(np.all(_1 == _2)
for _1, _2 in zip(self.location.geodetic,
self.location.to_geodetic()))
assert all(np.all(_1 == _2)
for _1, _2 in zip(self.location.geocentric,
self.location.to_geocentric()))
def test_attribute_classes(self):
"""Test that attribute classes are correct (and not EarthLocation)"""
assert type(self.location.x) is u.Quantity
assert type(self.location.y) is u.Quantity
assert type(self.location.z) is u.Quantity
assert type(self.location.lon) is Longitude
assert type(self.location.lat) is Latitude
assert type(self.location.height) is u.Quantity
def test_input(self):
"""Check input is parsed correctly"""
# units of length should be assumed geocentric
geocentric = EarthLocation(self.x, self.y, self.z)
assert np.all(geocentric == self.location)
geocentric2 = EarthLocation(self.x.value, self.y.value, self.z.value,
self.x.unit)
assert np.all(geocentric2 == self.location)
geodetic = EarthLocation(self.lon, self.lat, self.h)
assert np.all(geodetic == self.location)
geodetic2 = EarthLocation(self.lon.to_value(u.degree),
self.lat.to_value(u.degree),
self.h.to_value(u.m))
assert np.all(geodetic2 == self.location)
geodetic3 = EarthLocation(self.lon, self.lat)
assert allclose_m14(geodetic3.lon.value,
self.location.lon.value)
assert allclose_m14(geodetic3.lat.value,
self.location.lat.value)
assert not np.any(isclose_m14(geodetic3.height.value,
self.location.height.value))
geodetic4 = EarthLocation(self.lon, self.lat, self.h[-1])
assert allclose_m14(geodetic4.lon.value,
self.location.lon.value)
assert allclose_m14(geodetic4.lat.value,
self.location.lat.value)
assert allclose_m14(geodetic4.height[-1].value,
self.location.height[-1].value)
assert not np.any(isclose_m14(geodetic4.height[:-1].value,
self.location.height[:-1].value))
# check length unit preservation
geocentric5 = EarthLocation(self.x, self.y, self.z, u.pc)
assert geocentric5.unit is u.pc
assert geocentric5.x.unit is u.pc
assert geocentric5.height.unit is u.pc
assert allclose_m14(geocentric5.x.to_value(self.x.unit), self.x.value)
geodetic5 = EarthLocation(self.lon, self.lat, self.h.to(u.pc))
assert geodetic5.unit is u.pc
assert geodetic5.x.unit is u.pc
assert geodetic5.height.unit is u.pc
assert allclose_m14(geodetic5.x.to_value(self.x.unit), self.x.value)
def test_invalid_input(self):
"""Check invalid input raises exception"""
# incomprehensible by either raises TypeError
with pytest.raises(TypeError):
EarthLocation(self.lon, self.y, self.z)
# wrong units
with pytest.raises(u.UnitsError):
EarthLocation.from_geocentric(self.lon, self.lat, self.lat)
# inconsistent units
with pytest.raises(u.UnitsError):
EarthLocation.from_geocentric(self.h, self.lon, self.lat)
# floats without a unit
with pytest.raises(TypeError):
EarthLocation.from_geocentric(self.x.value, self.y.value,
self.z.value)
# inconsistent shape
with pytest.raises(ValueError):
EarthLocation.from_geocentric(self.x, self.y, self.z[:5])
# inconsistent units
with pytest.raises(u.UnitsError):
EarthLocation.from_geodetic(self.x, self.y, self.z)
# inconsistent shape
with pytest.raises(ValueError):
EarthLocation.from_geodetic(self.lon, self.lat, self.h[:5])
def test_slicing(self):
# test on WGS72 location, so we can check the ellipsoid is passed on
locwgs72 = EarthLocation.from_geodetic(self.lon, self.lat, self.h,
ellipsoid='WGS72')
loc_slice1 = locwgs72[4]
assert isinstance(loc_slice1, EarthLocation)
assert loc_slice1.unit is locwgs72.unit
assert loc_slice1.ellipsoid == locwgs72.ellipsoid == 'WGS72'
assert not loc_slice1.shape
with pytest.raises(TypeError):
loc_slice1[0]
with pytest.raises(IndexError):
len(loc_slice1)
loc_slice2 = locwgs72[4:6]
assert isinstance(loc_slice2, EarthLocation)
assert len(loc_slice2) == 2
assert loc_slice2.unit is locwgs72.unit
assert loc_slice2.ellipsoid == locwgs72.ellipsoid
assert loc_slice2.shape == (2,)
loc_x = locwgs72['x']
assert type(loc_x) is u.Quantity
assert loc_x.shape == locwgs72.shape
assert loc_x.unit is locwgs72.unit
def test_invalid_ellipsoid(self):
# unknown ellipsoid
with pytest.raises(ValueError):
EarthLocation.from_geodetic(self.lon, self.lat, self.h,
ellipsoid='foo')
with pytest.raises(TypeError):
EarthLocation(self.lon, self.lat, self.h, ellipsoid='foo')
with pytest.raises(ValueError):
self.location.ellipsoid = 'foo'
with pytest.raises(ValueError):
self.location.to_geodetic('foo')
@pytest.mark.parametrize('ellipsoid', ELLIPSOIDS)
def test_ellipsoid(self, ellipsoid):
"""Test that different ellipsoids are understood, and differ"""
# check that heights differ for different ellipsoids
# need different tolerance, since heights are relative to ~6000 km
lon, lat, h = self.location.to_geodetic(ellipsoid)
if ellipsoid == self.location.ellipsoid:
assert allclose_m8(h.value, self.h.value)
else:
# Some heights are very similar for some; some lon, lat identical.
assert not np.all(isclose_m8(h.value, self.h.value))
# given lon, lat, height, check that x,y,z differ
location = EarthLocation.from_geodetic(self.lon, self.lat, self.h,
ellipsoid=ellipsoid)
if ellipsoid == self.location.ellipsoid:
assert allclose_m14(location.z.value, self.z.value)
else:
assert not np.all(isclose_m14(location.z.value, self.z.value))
def test_to_value(self):
loc = self.location
loc_ndarray = loc.view(np.ndarray)
assert np.all(loc.value == loc_ndarray)
loc2 = self.location.to(u.km)
loc2_ndarray = np.empty_like(loc_ndarray)
for coo in 'x', 'y', 'z':
loc2_ndarray[coo] = loc_ndarray[coo] / 1000.
assert np.all(loc2.value == loc2_ndarray)
loc2_value = self.location.to_value(u.km)
assert np.all(loc2_value == loc2_ndarray)
def test_pickling():
"""Regression test against #4304."""
el = EarthLocation(0.*u.m, 6000*u.km, 6000*u.km)
s = pickle.dumps(el)
el2 = pickle.loads(s)
assert el == el2
def test_repr_latex():
"""
Regression test for issue #4542
"""
somelocation = EarthLocation(lon='149:3:57.9', lat='-31:16:37.3')
somelocation._repr_latex_()
somelocation2 = EarthLocation(lon=[1., 2.]*u.deg, lat=[-1., 9.]*u.deg)
somelocation2._repr_latex_()
@pytest.mark.remote_data
# TODO: this parametrize should include a second option with a valid Google API
# key. For example, we should make an API key for Astropy, and add it to Travis
# as an environment variable (for security).
@pytest.mark.parametrize('google_api_key', [None])
def test_of_address(google_api_key):
NYC_lon = -74.0 * u.deg
NYC_lat = 40.7 * u.deg
# ~10 km tolerance to address difference between OpenStreetMap and Google
# for "New York, NY". This doesn't matter in practice because this test is
# only used to verify that the query succeeded, not that the returned
# position is precise.
NYC_tol = 0.1 * u.deg
# just a location
try:
loc = EarthLocation.of_address("New York, NY")
except NameResolveError as e:
# API limit might surface even here in Travis CI.
if 'unknown failure with' not in str(e):
pytest.xfail(str(e))
else:
assert quantity_allclose(loc.lat, NYC_lat, atol=NYC_tol)
assert quantity_allclose(loc.lon, NYC_lon, atol=NYC_tol)
assert np.allclose(loc.height.value, 0.)
# Put this one here as buffer to get around Google map API limit per sec.
# no match: This always raises NameResolveError
with pytest.raises(NameResolveError):
EarthLocation.of_address("lkjasdflkja")
if google_api_key is not None:
# a location and height
try:
loc = EarthLocation.of_address("New York, NY", get_height=True)
except NameResolveError as e:
# Buffer above sometimes insufficient to get around API limit but
# we also do not want to drag things out with time.sleep(0.195),
# where 0.195 was empirically determined on some physical machine.
pytest.xfail(str(e.value))
else:
assert quantity_allclose(loc.lat, NYC_lat, atol=NYC_tol)
assert quantity_allclose(loc.lon, NYC_lon, atol=NYC_tol)
assert quantity_allclose(loc.height, 10.438*u.meter, atol=1.*u.cm)
def test_geodetic_tuple():
lat = 2*u.deg
lon = 10*u.deg
height = 100*u.m
el = EarthLocation.from_geodetic(lat=lat, lon=lon, height=height)
res1 = el.to_geodetic()
res2 = el.geodetic
assert res1.lat == res2.lat and quantity_allclose(res1.lat, lat)
assert res1.lon == res2.lon and quantity_allclose(res1.lon, lon)
assert res1.height == res2.height and quantity_allclose(res1.height, height)
def test_gravitational_redshift():
someloc = EarthLocation(lon=-87.7*u.deg, lat=37*u.deg)
sometime = Time('2017-8-21 18:26:40')
zg0 = someloc.gravitational_redshift(sometime)
# should be of order ~few mm/s change per week
zg_week = someloc.gravitational_redshift(sometime + 7 * u.day)
assert 1.*u.mm/u.s < abs(zg_week - zg0) < 1*u.cm/u.s
# ~cm/s over a half-year
zg_halfyear = someloc.gravitational_redshift(sometime + 0.5 * u.yr)
assert 1*u.cm/u.s < abs(zg_halfyear - zg0) < 1*u.dm/u.s
# but when back to the same time in a year, should be tenths of mm
# even over decades
zg_year = someloc.gravitational_redshift(sometime - 20 * u.year)
assert .1*u.mm/u.s < abs(zg_year - zg0) < 1*u.mm/u.s
# Check mass adjustments.
# If Jupiter and the moon are ignored, effect should be off by ~ .5 mm/s
masses = {'sun': constants.G*constants.M_sun,
'jupiter': 0*constants.G*u.kg,
'moon': 0*constants.G*u.kg}
zg_moonjup = someloc.gravitational_redshift(sometime, masses=masses)
assert .1*u.mm/u.s < abs(zg_moonjup - zg0) < 1*u.mm/u.s
# Check that simply not including the bodies gives the same result.
assert zg_moonjup == someloc.gravitational_redshift(sometime,
bodies=('sun',))
# And that earth can be given, even not as last argument
assert zg_moonjup == someloc.gravitational_redshift(
sometime, bodies=('earth', 'sun',))
# If the earth is also ignored, effect should be off by ~ 20 cm/s
# This also tests the conversion of kg to gravitational units.
masses['earth'] = 0*u.kg
zg_moonjupearth = someloc.gravitational_redshift(sometime, masses=masses)
assert 1*u.dm/u.s < abs(zg_moonjupearth - zg0) < 1*u.m/u.s
# If all masses are zero, redshift should be 0 as well.
masses['sun'] = 0*u.kg
assert someloc.gravitational_redshift(sometime, masses=masses) == 0
with pytest.raises(KeyError):
someloc.gravitational_redshift(sometime, bodies=('saturn',))
with pytest.raises(u.UnitsError):
masses = {'sun': constants.G*constants.M_sun,
'jupiter': constants.G*constants.M_jup,
'moon': 1*u.km, # wrong units!
'earth': constants.G*constants.M_earth}
someloc.gravitational_redshift(sometime, masses=masses)
def test_read_only_input():
lon = np.array([80., 440.]) * u.deg
lat = np.array([45.]) * u.deg
lon.flags.writeable = lat.flags.writeable = False
loc = EarthLocation.from_geodetic(lon=lon, lat=lat)
assert quantity_allclose(loc[1].x, loc[0].x)
| [
"[email protected]"
]
| |
eef7f6927ab37c17b81eb7c3eadc5d6378468186 | b1bf615bfa1ee2065e3adfe90310814c3b27c61d | /2020-12-24/maximum-size-subarray-sum-equals-k.py | 0a1d2dff638d1ae034e5195d35def2cc9ce245b9 | []
| no_license | Huajiecheng/leetcode | 73b09a88e61ea3b16ca3bf440fadd1470652ccf2 | 4becf814a2a06611ee909ec700380ab83ac8ab99 | refs/heads/main | 2023-03-19T21:54:20.952909 | 2021-03-06T03:34:52 | 2021-03-06T03:34:52 | 320,959,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | class Solution:
def maxSubArrayLen(self, nums: List[int], k: int) -> int:
pref = {0:-1}
result = 0
temp = 0
for i in range(len(nums)):
temp = temp + nums[i]
if (temp - k) in pref:
if result < (i - pref[temp - k]):
result = i - pref[temp - k]
if temp not in pref:
pref[temp] = i
return result | [
"[email protected]"
]
| |
8adc722f9de9ebada8d6df6f420830cc62b39845 | db8b429062538f418207277127ad4ada09ef7a1b | /Glyphs Import.py | 9dda18263bb840c212a909ca848ecdb383ea1573 | []
| no_license | mariovinicius/Glyphs-Scripts | ace45d4b46e041ae6b05c4f7ab43abe6dcaea90e | 80ca45b7b6623bae71a16e4cc93583f674602943 | refs/heads/master | 2021-01-10T18:23:02.376018 | 2013-04-27T18:04:48 | 2013-04-27T18:04:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,875 | py | #FLM: Glyphs Import
# -*- coding: utf8 -*-
# Version 0.2
# copyright Georg Seifert 2012, schriftgestaltung.de
#
# The script will read a .glyphs file and import it into FontLab.
# It requires FontLab 5.1 on Mac
# if you find any bugs, please report to [email protected]
from FL import *
from Foundation import *
from objc import *
import os.path
import math, time
import Nav
import MacOS
import Carbon.File
from plistlib import *
Nice2Legacy = {}
Name2Category = {}
Name2SubCategory = {}
shortStyleList = {"Extra": "Ex", "Condensed": "Cond", "Extended": "Extd", "Semi":"Sm", "Italic": "It", "Bold":"Bd", " Sans":"", " Mono":""}
weightCodes = {}
def NotNiceName(Name):
Suffix = ""
if "." in Name:
Name, Suffix = Name.split(".", 1)
if Name in Nice2Legacy:
Name = Nice2Legacy[Name]
else:
Name = Name.replace("-", "")
if len(Suffix) > 0:
Name = Name + "." + Suffix
return Name
def setInstanceStyleNames(Font, Dict):
_Familie = str(Dict['familyName'])
try:
_Instance = Dict['instances'][0]
except:
_Instance = {"name":"Regular"}
_Schnitt = str(_Instance['name'])
if "linkStyle" in _Instance:
_FamSchnitt = str(_Instance["linkStyle"]) # für Style linking
else:
_FamSchnitt = _Schnitt
try:
_isBold = bool(_Instance["isBold"])
except:
_isBold = False
try:
_isItalic = bool(_Instance["isItalic"])
except:
_isItalic = False
_Weight = "Regular"
_Width = "Medium (normal)"
_Weightcode = 400
#_Widthcode =
if "weightClass" in _Instance:
_Weight = str(_Instance["weightClass"])
if "widthClass" in _Instance:
_Width = str(_Instance["widthClass"])
if _Weight in weightCodes:
_Weightcode = int(weightCodes[_Weight])
if "customParameters" in _Instance:
_CustomParameters = _Instance["customParameters"]
for Parameter in _CustomParameters:
if Parameter["name"] == "openTypeOS2WeightClass":
_Weightcode = int(Parameter["value"])
if Parameter["name"] == "openTypeOS2WidthClass":
_Widthcode = int(Parameter["value"])
Font.weight = _Weight
Font.weight_code = _Weightcode
Font.width = _Width
#Font.width_code = _Widthcode
_Flag = 0
if _isBold:
_Flag = 32
if _isItalic:
_Flag = _Flag + 1
Font.font_style = _Flag
if _Flag == 1:
_WinStyle = "Italic"
elif _Flag == 32:
_WinStyle = "Bold"
elif _Flag == 33:
_WinStyle = "Bold Italic"
else:
_WinStyle = "Regular"
_WinFamily = _Schnitt.replace(_WinStyle, "")
if len(_WinFamily) > 0 :
_WinFamily = " " + _WinFamily
if _WinFamily[-1] == " ":
_WinFamily = _WinFamily[0:-1]
_shortStyle = _Schnitt
for any in shortStyleList:
if len(_Familie + " " + _shortStyle) <= 28:
break
_shortStyle = _shortStyle.replace(any, shortStyleList[any])
_postscriptName = _Familie + "-" + _shortStyle
_postscriptName = _postscriptName.replace(" ", "")
print _postscriptName
Font.family_name = _Familie
Font.style_name = _WinStyle
Font.full_name = _Familie + " " + _Schnitt
Font.font_name = _postscriptName
Font.menu_name = _Familie + " " + _Schnitt
Font.apple_name = _postscriptName
Font.pref_style_name = _Schnitt
Font.pref_family_name = _Familie
Font.mac_compatible = ""
Font.menu_name = ""
Font.fontnames.clean()
try:
Font.fontnames.append(NameRecord( 0,1,0,0, Font.copyright))
Font.fontnames.append(NameRecord( 0,3,1,1033, Font.copyright))
except:
print "Copyright-Angabe fehlt"
Font.fontnames.append(NameRecord( 1,1,0,0, _Familie))
Font.fontnames.append(NameRecord( 1,3,1,1033, _Familie + _WinFamily))
Font.fontnames.append(NameRecord( 2,1,0,0, _Schnitt))
Font.fontnames.append(NameRecord( 2,3,1,1033, _WinStyle))
Font.fontnames.append(NameRecord( 3,1,0,0, "%s: %s %s, %d" % (Font.designer, _Familie, _Schnitt, Font.year)))
Font.fontnames.append(NameRecord( 3,3,1,1033, "%s: %s %s, %d" % (Font.designer, _Familie, _Schnitt, Font.year)))
if _Schnitt == "Regular":
Font.fontnames.append(NameRecord( 4,1,0,0, _Familie))
else:
Font.fontnames.append(NameRecord( 4,1,0,0, _Familie + " " + _Schnitt))
Font.fontnames.append(NameRecord( 4,3,1,1033, _Familie + " " + _Schnitt))
try:
Font.fontnames.append(NameRecord( 5,1,0,0, Font.version))
Font.fontnames.append(NameRecord( 5,3,1,1033, Font.version))
except:
print "Version-Angabe fehlt"
Font.fontnames.append(NameRecord( 6,1,0,0, _postscriptName))
Font.fontnames.append(NameRecord( 6,3,1,1033, _postscriptName))
try:
Font.fontnames.append(NameRecord( 7,1,0,0, Font.trademark))
Font.fontnames.append(NameRecord( 7,3,1,1033, Font.trademark))
except:
print "Trademark-Angabe fehlt"
try:
Font.fontnames.append(NameRecord( 9,1,0,0, Font.designer))
Font.fontnames.append(NameRecord( 9,3,1,1033, Font.designer))
except:
print "Trademark-Angabe fehlt"
try:
Font.fontnames.append(NameRecord( 11,1,0,0, Font.vendor_url))
Font.fontnames.append(NameRecord( 11,3,1,1033, Font.vendor_url))
except:
print "Vendor-URL-Angabe fehlt"
try:
Font.fontnames.append(NameRecord( 12,1,0,0, Font.designer_url))
Font.fontnames.append(NameRecord( 12,3,1,1033, Font.designer_url))
except:
print "Trademark-Angabe fehlt"
Font.fontnames.append(NameRecord( 16,3,1,1033, _Familie))
Font.fontnames.append(NameRecord( 17,3,1,1033, _Schnitt))
if len(_Familie + " " + _Schnitt) >= 28:
Font.fontnames.append(NameRecord( 18,1,0,0, _Familie + " " + _shortStyle))
def setFontInfo(Font, Dict):
KeyTranslate = {
"familyName" : ("family_name", str),
"versionMajor" : ("version_major", int),
"versionMinor": ("version_minor", int),
"unitsPerEm" : ("upm", int),
"copyright" : ("copyright", unicode),
"designer" : ("designer", unicode),
"designerURL" : ("designer_url", unicode),
"manufacturer" : ("vendor", unicode),
"manufacturerURL" : ("vendor_url", unicode),
}
for Key in KeyTranslate:
if Key in Dict.allKeys():
FlKey, FlType = KeyTranslate[Key]
if FlType == unicode:
setattr(Font, FlKey, unicode(Dict[Key]).encode("utf-8"))
elif FlType == str:
setattr(Font, FlKey, str(Dict[Key]))
elif FlType == int:
setattr(Font, FlKey, int(Dict[Key]))
if "date" in Dict:
try:
import datetime
Date = datetime.datetime.strptime(Dict["date"][:-6], "%Y-%m-%d %H:%M:%S") #2004-03-02 15:27:47 +0100
Font.year = int(Date.year)
except:
Font.year = int(Dict["date"][:4])
if "versionMajor" in Dict and "versionMinor" in Dict:
Font.version = "%d.%03d" % (Font.version_major, Font.version_minor)
FontMasters = Dict["fontMaster"]
if len(FontMasters) == 1:
setInstanceStyleNames(Font, Dict)
else:
Font.weight = "All"
MasterCount = len(FontMasters)
for FontMaster in FontMasters:
if "weight" not in FontMaster.keys():
FontMaster["weight"] = "Regular"
if MasterCount == 1:
pass
elif MasterCount == 2:
if FontMasters[0]["weight"] != FontMasters[1]["weight"]:
Font.DefineAxis("Weight", "Weight", "Wt")
else:
Font.DefineAxis("Width", "Width", "Wd")
elif MasterCount == 4:
if FontMasters[0]["weight"] != FontMasters[1]["weight"]:
Font.DefineAxis("Weight", "Weight", "Wt")
Font.DefineAxis("Width", "Width", "Wd")
else:
Font.DefineAxis("Width", "Width", "Wd")
Font.DefineAxis("Weight", "Weight", "Wt")
print "Please check the arrangement of the axis and masters. The association of the Glyphs masters might not fit."
else:
print "Fonts with a master count of %d are not supported" % MasterCount
return False
KeyTranslate = {
"postscriptIsFixedPitch" : ("is_fixed_pitch", bool),
"postscriptUnderlinePosition" : ("underline_position", int),
"postscriptUnderlineThickness" : ("underline_thickness", int),
#"openTypeOS2StrikeoutSize" : ("ttinfo.os2_y_strikeout_size", int),
#"openTypeOS2StrikeoutPosition" : ("ttinfo.os2_y_strikeout_position", int),
"openTypeNameLicense" : ("license", unicode),
"openTypeNameLicenseURL" : ("license_url", unicode),
"openTypeOS2Type" : ("ttinfo.os2_fs_type", "fsType")
}
if "customParameters" in Dict:
for Parameter in Dict["customParameters"]:
Name = Parameter["name"]
Value = Parameter["value"]
try:
FlKey, FlType = KeyTranslate[Name]
if Name in KeyTranslate:
if FlType == str:
setattr(Font, FlKey, str(Value))
elif FlType == int:
setattr(Font, FlKey, int(Value))
elif FlType == unicode:
setattr(Font, FlKey, unicode(Value).encode("utf-8"))
elif FlType == bool:
setattr(Font, FlKey, bool(Value))
elif FlType == "fsType":
fs_type = 0
for Bit in Value:
fs_type = fs_type + 2**int(Bit)
Font.ttinfo.os2_fs_type = int(fs_type)
except:
pass
for i in range(MasterCount):
Font.ascender[i] = int(FontMasters[i]["ascender"])
Font.descender[i] = int(FontMasters[i]["descender"])
Font.cap_height[i] = int(FontMasters[i]["capHeight"])
Font.x_height[i] = int(FontMasters[i]["xHeight"])
if "italicAngle" in FontMasters[i]:
Font.italic_angle = float(FontMasters[i]["italicAngle"])
if "horizontalStems" in FontMasters[i]:
if i == 0:
Font.stem_snap_h_num = len(FontMasters[i]["horizontalStems"])
for j in range(len(FontMasters[i]["horizontalStems"])):
Font.stem_snap_h[i][j] = int(FontMasters[i]["horizontalStems"][j])
if "verticalStems" in FontMasters[i]:
if i == 0:
Font.stem_snap_v_num = len(FontMasters[i]["verticalStems"])
for j in range(len(FontMasters[i]["verticalStems"])):
Font.stem_snap_v[i][j] = int(FontMasters[i]["verticalStems"][j])
if "alignmentZones" in FontMasters[i]:
BlueZones = []
OtherZones = []
for ZoneString in FontMasters[i]["alignmentZones"]:
Zone = str(ZoneString)[1:-1].split(", ")
Zone = map(int, Zone)
if Zone[1] < 0 and Zone[0] != 0:
OtherZones.append(Zone[0])
OtherZones.append(Zone[0] + Zone[1])
else:
BlueZones.append(Zone[0])
BlueZones.append(Zone[0] + Zone[1])
if len(BlueZones) > 14:
BlueZones = BlueZones[:14]
print "Warning: There are to many Blue Zones."
if i == 0:
Font.blue_values_num = len(BlueZones)
Font.other_blues_num = len(OtherZones)
for j in range(Font.blue_values_num):
Font.blue_values[i][j] = BlueZones[j]
for j in range(Font.other_blues_num):
Font.other_blues[i][j] = OtherZones[j]
return True
def loadGlyphsInfo():
try:
GlyphsPath = NSWorkspace.sharedWorkspace().URLForApplicationWithBundleIdentifier_("com.GeorgSeifert.Glyphs")
if GlyphsPath is None:
GlyphsPath = NSWorkspace.sharedWorkspace().URLForApplicationWithBundleIdentifier_("com.schriftgestaltung.Glyphs")
if GlyphsPath is None:
GlyphsPath = NSWorkspace.sharedWorkspace().URLForApplicationWithBundleIdentifier_("com.schriftgestaltung.GlyphsMini")
GlyphsPath = GlyphsPath.path()
except:
return
if GlyphsPath is not None:
GlyphsInfoPath = GlyphsPath+"/Contents/Frameworks/GlyphsCore.framework/Versions/A/Resources/GlyphData.xml"
WeightCodesPath = GlyphsPath+"/Contents/Frameworks/GlyphsCore.framework/Versions/A/Resources/weights.plist"
from xml.etree import ElementTree as ET
element = ET.parse(GlyphsInfoPath)
for subelement in element.getiterator():
Attribs = subelement.attrib
if "legacy" in Attribs:
Nice2Legacy[Attribs["name"]] = Attribs["legacy"]
if "category" in Attribs:
Name2Category[Attribs["name"]] = Attribs["category"]
if "subCategory" in Attribs:
Name2SubCategory[Attribs["name"]] = Attribs["subCategory"]
global weightCodes
weightCodes = NSDictionary.alloc().initWithContentsOfFile_(WeightCodesPath)
def fixNodes(Nodes):
while "OFFCURVE" in Nodes[-1]:
Node = Nodes[-1]
Nodes.insert(0, Nodes.pop(Nodes.index(Node)))
return Nodes
def readGlyphs(Font, Dict):
Glyphs = Dict["glyphs"]
GlyphsCount = len(Glyphs)
FontMasters = Dict["fontMaster"]
MasterCount = len(FontMasters)
GlyphIndexes = {}
for i in range(GlyphsCount):
GlyphDict = Glyphs[i]
glyph = Glyph(MasterCount)
glyph.name = str(GlyphDict["glyphname"])
if "unicode" in GlyphDict.keys():
glyph.unicode = int(GlyphDict["unicode"], 16)
if "export" in GlyphDict.keys() and str(GlyphDict["export"]) == "0":
glyph.customdata = "Not Exported"
glyph.mark = 2
isNonSpacingMark = False
try:
isNonSpacingMark = Name2Category[glyph.name] == "Mark" and Name2SubCategory[glyph.name] == "Nonspacing"
except:
pass
for masterIndex in range(MasterCount):
FontMaster = FontMasters[masterIndex]
Layer = None
try:
for Layer in GlyphDict["layers"]:
if Layer["layerId"] == FontMaster["id"]:
break
except:
continue
ShiftNodes = 0
if isNonSpacingMark:
ShiftNodes = round(float(Layer["width"]))
glyph.SetMetrics(Point(0, 0), masterIndex)
else:
glyph.SetMetrics(Point(round(float(Layer["width"])), 0), masterIndex)
if "paths" not in Layer.keys():
continue
nodeIndex = 0
lastMoveNodeIndex = 0
for PathIndex in range(len(Layer["paths"])):
Path = Layer["paths"][PathIndex]
Nodes = Path["nodes"]
Nodes = fixNodes(Nodes)
NodeString = Nodes[-1]
NodeList = NodeString.split(" ")
Position = Point(round(float(NodeList[0])) - ShiftNodes, round(float(NodeList[1])))
if masterIndex == 0:
node = Node(nMOVE, Position)
glyph.Insert(node, len(glyph))
else:
Index = nodeIndex
if Index > len(glyph):
Index = Index - len(glyph)
try:
glyph.nodes[Index].Layer(masterIndex)[0].x = Position.x
glyph.nodes[Index].Layer(masterIndex)[0].y = Position.y
except:
continue # if the master has more paths then the first master
firstPoint = Position
firstNodeIndex = nodeIndex
nodeIndex = nodeIndex + 1
OffcurveNodes = []
for NodeString in Nodes:
NodeList = NodeString.split(" ")
Position = Point(round(float(NodeList[0])) - ShiftNodes, round(float(NodeList[1])))
node = None
if NodeList[2] == "LINE":
if masterIndex == 0:
node = Node(nLINE, Position)
try:
if NodeList[3] == "SMOOTH":
node.alignment = nSMOOTH
except:
pass
glyph.Insert(node, len(glyph))
else:
Index = nodeIndex
if Index >= len(glyph):
Index = Index - len(glyph)
glyph.nodes[Index].Layer(masterIndex)[0].x = Position.x
glyph.nodes[Index].Layer(masterIndex)[0].y = Position.y
nodeIndex = nodeIndex + 1
elif NodeList[2] == "CURVE":
if len(OffcurveNodes) == 2:
if masterIndex == 0:
node = Node(nCURVE, Position)
try:
if NodeList[3] == "SMOOTH":
node.alignment = nSMOOTH
except:
pass
node.points[1].x = OffcurveNodes[0].x
node.points[1].y = OffcurveNodes[0].y
node.points[2].x = OffcurveNodes[1].x
node.points[2].y = OffcurveNodes[1].y
glyph.Insert(node, len(glyph))
else:
Index = nodeIndex
if Index >= len(glyph):
Index = Index - len(glyph) + 1
Points = glyph.nodes[Index].Layer(masterIndex)
if len(Points) == 3:
Points[0].x = Position.x
Points[0].y = Position.y
Points[1].x = OffcurveNodes[0].x
Points[1].y = OffcurveNodes[0].y
Points[2].x = OffcurveNodes[1].x
Points[2].y = OffcurveNodes[1].y
nodeIndex = nodeIndex + 1
OffcurveNodes = []
elif NodeList[2] == "OFFCURVE":
OffcurveNodes.append(Point(round(float(NodeList[0])) - ShiftNodes, round(float(NodeList[1]))))
if "closed" in Path and masterIndex == MasterCount-1:
# we may have output a node too much
node = glyph[nodeIndex-1]
firstNode = glyph[firstNodeIndex]
if node is not None and firstNodeIndex is not None:
if node.x == firstNode.x and node.y == firstNode.y:
if node.type == nLINE:
glyph.DeleteNode(nodeIndex-1)
nodeIndex = nodeIndex - 1
elif node.type == nCURVE and glyph[firstNodeIndex+1].type != nCURVE:
glyph.DeleteNode(firstNodeIndex)
nodeIndex = nodeIndex - 1
else:
print "There was a problem with the outline in the glyph: \"%s\". Probably because the outlines are not compatible." % glyph.name
glyph.mark = 34
if "hints" in Layer.keys():
vHintIndex = 0
hHintIndex = 0
for HintIndex in range(len(Layer["hints"])):
HintDict = Layer["hints"][HintIndex]
Horizontal = "horizontal" in HintDict
if "target" in HintDict and "origin" in HintDict: # add Links
if masterIndex > 0:
continue
FlNodeIndex1 = None
FlNodeIndex2 = None
PathIndex, NodeIndex = HintDict["origin"][1:-1].split(", ")
PathIndex = int(PathIndex)
NodeIndex = int(NodeIndex)
PathCounter = -1
NodeCounter = 0
for i in range(len(glyph)):
node = glyph[i]
if node.type == nMOVE:
PathCounter = PathCounter + 1
if PathCounter >= PathIndex:
NodeCounter = NodeCounter + node.count
if NodeCounter > NodeIndex:
FlNodeIndex1 = i
break
if HintDict["target"][0] == "{":
PathIndex, NodeIndex = HintDict["target"][1:-1].split(", ")
PathIndex = int(PathIndex)
NodeIndex = int(NodeIndex)
PathCounter = -1
NodeCounter = 0
for i in range(len(glyph)):
node = glyph[i]
if node.type == nMOVE:
PathCounter = PathCounter + 1
if PathCounter >= PathIndex:
NodeCounter = NodeCounter + node.count
if NodeCounter > NodeIndex:
FlNodeIndex2 = i
break
elif HintDict["target"] == "down":
FlNodeIndex2 = -2
elif HintDict["target"] == "up":
FlNodeIndex2 = -1
if FlNodeIndex1 != None and FlNodeIndex2 != None:
link = Link(FlNodeIndex1, FlNodeIndex2)
if Horizontal:
glyph.hlinks.append(link)
else:
glyph.vlinks.append(link)
elif "place" in HintDict:
Origin, Size = HintDict["place"][1:-1].split(", ")
Origin = int(round(float(Origin)))
Size = int(round(float(Size)))
if masterIndex == 0:
if Horizontal:
hint = Hint(Origin, Size)
glyph.hhints.append(hint)
else:
Origin = Origin - ShiftNodes
hint = Hint(Origin, Size)
glyph.vhints.append(hint)
else:
if Horizontal:
hint = glyph.hhints[hHintIndex]
hint.positions[masterIndex] = Origin
hint.widths[masterIndex] = Size
hHintIndex = hHintIndex + 1
else:
hint = glyph.vhints[vHintIndex]
hint.positions[masterIndex] = Origin
hint.widths[masterIndex] = Size
vHintIndex = vHintIndex + 1
if "anchors" in Layer.keys():
for AnchorIndex in range(len(Layer["anchors"])):
# print "__nodeIndex:", nodeIndex
AnchorDict = Layer["anchors"][AnchorIndex]
Name = str(AnchorDict["name"])
X, Y = AnchorDict["position"][1:-1].split(", ")
X = round(float(X)) - ShiftNodes
Y = round(float(Y))
if masterIndex == 0:
anchor = Anchor(Name, X, Y)
# print "Move __node", node
glyph.anchors.append(anchor)
else:
Index = nodeIndex
#print "_set move point", Index, Position, glyph.nodes #, glyph.nodes[Index].Layer(masterIndex)
try:
glyph.anchors[AnchorIndex].Layer(masterIndex).x = X
glyph.anchors[AnchorIndex].Layer(masterIndex).y = Y
except:
continue
Font.glyphs.append(glyph)
GlyphIndexes[glyph.name] = len(Font.glyphs)-1
# Read the components.
for i in range(GlyphsCount):
glyph = Font.glyphs[i]
GlyphDict = Glyphs[i]
for masterIndex in range(MasterCount):
FontMaster = FontMasters[masterIndex]
try:
for Layer in GlyphDict["layers"]:
if Layer["layerId"] == FontMaster["id"]:
break
except:
continue
try:
if "components" in Layer.keys():
for componentIndex in range(len(Layer["components"])):
try:
componentDict = Layer["components"][componentIndex]
except:
continue
ShiftNodes = 0
# reconstruct the correct positioning of Nonspacing marks. They where set to zero width on outline import.
try:
isNonSpacingMark = Name2Category[componentDict['name']] == "Mark" and Name2SubCategory[componentDict['name']] == "Nonspacing"
if isNonSpacingMark:
ComponentIndex = GlyphIndexes[componentDict['name']]
ComponentGlyphDict = Glyphs[ComponentIndex]
#print "__componentDict['name']", componentDict['name'], ComponentGlyphDict['layers'][masterIndex]["width"]
ShiftNodes = float(str(ComponentGlyphDict['layers'][masterIndex]["width"]))
except:
pass
componentTransformString = componentDict["transform"][1:-1]
componentTransformList = componentTransformString.split(", ")
if masterIndex == 0:
ComponentIndex = GlyphIndexes[componentDict['name']]
Delta = Point(round(float(str(componentTransformList[4]))) + ShiftNodes, round(float(str(componentTransformList[5]))))
Scale = Point(float(str(componentTransformList[0])), float(str(componentTransformList[3])))
component = Component(ComponentIndex, Delta, Scale)
glyph.components.append(component)
else:
component = glyph.components[componentIndex]
component.scales[masterIndex].x = float(str(componentTransformList[0]))
component.scales[masterIndex].y = float(str(componentTransformList[3]))
component.deltas[masterIndex].x = round(float(str(componentTransformList[4])) + ShiftNodes)
component.deltas[masterIndex].y = round(float(str(componentTransformList[5])))
except:
print "There was a problem reading the components for glyph:", glyph.name
# Resolve nested components.
GlyphsWithNestedComponemts = []
for glyph in Font.glyphs:
ComponentCount = len(glyph.components)
for ComponentIndex in range(ComponentCount-1, -1, -1):
component = glyph.components[ComponentIndex]
ComponentGlyph = Font.glyphs[component.index]
if ComponentGlyph.customdata == "Not Exported":
for ComponentGlyphComponent in ComponentGlyph.components:
CopyComponent = Component(ComponentGlyphComponent)
for masterIndex in range(MasterCount):
CopyComponent.scales[masterIndex].x = CopyComponent.scales[masterIndex].x * component.scales[masterIndex].x
CopyComponent.scales[masterIndex].y = CopyComponent.scales[masterIndex].y * component.scales[masterIndex].y
CopyComponent.deltas[masterIndex].x = (CopyComponent.deltas[masterIndex].x * component.scales[masterIndex].x) + component.deltas[masterIndex].x
CopyComponent.deltas[masterIndex].y = (CopyComponent.deltas[masterIndex].y * component.scales[masterIndex].y) + component.deltas[masterIndex].y
glyph.components.append(CopyComponent)
del(glyph.components[ComponentIndex])
ComponentGlyphPath = ComponentGlyph.nodes
NewPath = []
for node in ComponentGlyphPath:
NewPath.append(Node(node))
for ComponentNode in ComponentGlyph.nodes:
node = Node(ComponentNode)
for masterIndex in range(MasterCount):
for pointIndex in range(node.count):
node.Layer(masterIndex)[pointIndex].x = (node.Layer(masterIndex)[pointIndex].x * component.scales[masterIndex].x) + component.deltas[masterIndex].x
node.Layer(masterIndex)[pointIndex].y = (node.Layer(masterIndex)[pointIndex].y * component.scales[masterIndex].y) + component.deltas[masterIndex].y
glyph.Insert(node, len(glyph))
for glyph in Font.glyphs:
ComponentCount = len(glyph.components)
for ComponentIndex in range(ComponentCount-1, -1, -1):
component = glyph.components[ComponentIndex]
ComponentGlyph = Font.glyphs[component.index]
if len(ComponentGlyph.components) > 0:
GlyphsWithNestedComponemts.append(glyph.name)
for ComponentGlyphComponent in ComponentGlyph.components:
CopyComponent = Component(ComponentGlyphComponent)
for masterIndex in range(MasterCount):
CopyComponent.scales[masterIndex].x = CopyComponent.scales[masterIndex].x * component.scales[masterIndex].x
CopyComponent.scales[masterIndex].y = CopyComponent.scales[masterIndex].y * component.scales[masterIndex].y
CopyComponent.deltas[masterIndex].x = (CopyComponent.deltas[masterIndex].x * component.scales[masterIndex].x) + component.deltas[masterIndex].x
CopyComponent.deltas[masterIndex].y = (CopyComponent.deltas[masterIndex].y * component.scales[masterIndex].y) + component.deltas[masterIndex].y
glyph.components.append(CopyComponent)
del(glyph.components[ComponentIndex])
ComponentGlyphPath = ComponentGlyph.nodes
NewPath = []
for node in ComponentGlyphPath:
NewPath.append(Node(node))
for ComponentNode in ComponentGlyph.nodes:
node = Node(ComponentNode)
for masterIndex in range(MasterCount):
for pointIndex in range(node.count):
node.Layer(masterIndex)[pointIndex].x = (node.Layer(masterIndex)[pointIndex].x * component.scales[masterIndex].x) + component.deltas[masterIndex].x
node.Layer(masterIndex)[pointIndex].y = (node.Layer(masterIndex)[pointIndex].y * component.scales[masterIndex].y) + component.deltas[masterIndex].y
glyph.Insert(node, len(glyph))
if len(GlyphsWithNestedComponemts) > 0:
print "The font has nested components. They are not supported in FontLab and were decomposed.\n(%s)" % ", ".join(GlyphsWithNestedComponemts)
fl.UpdateFont()
def readKerning(Font, Dict):
Glyphs = Dict["glyphs"]
GlyphsCount = len(Glyphs)
GlyphIndexes = {}
LeftClasses = {}
RightClasses = {}
for i in range(GlyphsCount):
GlyphDict = Glyphs[i]
LeftGroup = None
try:
LeftGroup = str(GlyphDict["leftKerningGroup"])
except:
pass
RightGroup = None
try:
RightGroup = str(GlyphDict["rightKerningGroup"])
except:
pass
if LeftGroup is not None:
if LeftGroup in RightClasses.keys():
RightClasses[LeftGroup].append(str(GlyphDict["glyphname"]))
else:
RightClasses[LeftGroup] = [str(GlyphDict["glyphname"])]
if RightGroup is not None:
if RightGroup in LeftClasses.keys():
LeftClasses[RightGroup].append(str(GlyphDict["glyphname"]))
else:
LeftClasses[RightGroup] = [str(GlyphDict["glyphname"])]
AllKeys = LeftClasses.keys()
AllKeys.sort()
Classes = []
for Key in AllKeys:
Members = LeftClasses[Key]
Key = Key.replace(".", "_")
Key = Key.replace("-", "_")
Member = NotNiceName(Members[0])
ClassString = "_%s_l: %s'" % (Key, Member)
for Member in Members[1:]:
Member = NotNiceName(Member)
ClassString = ClassString+" "+Member
Classes.append(ClassString)
AllKeys = RightClasses.keys()
AllKeys.sort()
for Key in AllKeys:
Members = RightClasses[Key]
Key = Key.replace(".", "_")
Key = Key.replace("-", "_")
Member = NotNiceName(Members[0])
ClassString = "_%s_r: %s'" % (Key, Member)
for Member in Members[1:]:
Member = NotNiceName(Member)
ClassString = ClassString+" "+Member
Classes.append(ClassString)
Font.classes = Classes
for i in range(len(Classes)):
if Classes[i][0] == "_":
if "_l: " in Classes[i]:
Font.SetClassFlags(i, True, False)
if "_r: " in Classes[i]:
Font.SetClassFlags(i, False, True)
FontMasters = Dict["fontMaster"]
if "kerning" in Dict.keys():
Kerning = Dict["kerning"]
allLeftKeys = set()
for LeftKeys in Kerning.values():
allLeftKeys = set.union(allLeftKeys, set(LeftKeys.keys()))
for LeftKey in allLeftKeys:
LeftGlyph = None
if LeftKey[0] == "@":
#@MMK_L_
try:
ClassKey = LeftKey[7:]
GlyphName = LeftClasses[ClassKey][0]
LeftGlyph = Font[GlyphName]
except:
continue
else:
LeftGlyph = Font[str(LeftKey)]
allRightKeys = set()
for FontMaster in FontMasters:
try:
RightKeys = Kerning[FontMaster["id"]][LeftKey].keys()
allRightKeys = set.union(allRightKeys, set(RightKeys))
except:
pass
for RightKey in allRightKeys:
RightGlyph = None
if RightKey[0] == "@":
#@MMK_R_
try:
ClassKey = RightKey[7:]
GlyphName = RightClasses[ClassKey][0]
RightGlyph = Font[GlyphName]
except:
continue
else:
RightGlyph = Font[str(RightKey)]
KernPair = KerningPair(RightGlyph.index)
for j in range(len(FontMasters)):
FontMaster = FontMasters[j]
value = 0
try:
value = int(Kerning[FontMaster["id"]][LeftKey][RightKey])
except:
pass
KernPair.values[j] = value
LeftGlyph.kerning.append(KernPair)
def readFeatures(Font, Dict):
Font.ot_classes = ""
try:
for FeatureDict in Dict["featurePrefixes"]:
if "name" in FeatureDict.keys() and "code" in FeatureDict.keys():
Font.ot_classes = Font.ot_classes + "# " + str(FeatureDict["name"]) + "\n" + str(FeatureDict["code"]) + "\n"
except:
pass
try:
Classes = Font.classes
if "classes" in Dict.keys():
for FeatureDict in Dict["classes"]:
if "name" in FeatureDict.keys() and "code" in FeatureDict.keys():
CleanCode = str(FeatureDict["code"])
CleanCodeList = CleanCode.split(" ")
CleanCodeList = map(NotNiceName, CleanCodeList)
CleanCode = " ".join(CleanCodeList)
Classes.append(str(FeatureDict["name"]) + ": " + CleanCode)
Font.classes = Classes
else:
print "the font has no Classes."
except:
print "__ Error in Classes:", sys.exc_info()[0]
pass
try:
if "features" in Dict.keys():
for FeatureDict in Dict["features"]:
if "name" in FeatureDict.keys() and "code" in FeatureDict.keys():
Name = str(FeatureDict["name"])
try:
CleanCode = str(unicode(FeatureDict["code"]).encode("utf-8"))
CleanCode = CleanCode.replace("'", " ~~'")
CleanCode = CleanCode.replace("]", " ~~]")
CleanCode = CleanCode.replace("[", "[~~ ")
CleanCode = CleanCode.replace(";", " ~~;")
CleanCodeList = CleanCode.split(" ")
CleanCodeList = map(NotNiceName, CleanCodeList)
CleanCode = " ".join(CleanCodeList)
CleanCode = CleanCode.replace(" ~~'", "'")
CleanCode = CleanCode.replace(" ~~]", "]")
CleanCode = CleanCode.replace("[~~ ", "[")
CleanCode = CleanCode.replace(" ~~;", ";")
CleanCode = CleanCode.replace("\n", "\n ")
feature = Feature(Name, "feature %s {\n %s\n} %s;" % (Name, CleanCode, Name))
Font.features.append(feature)
except:
print "__ Error in Feature[%s]: %s" % (Name, sys.exc_info()[0])
pass
else:
print "The font has no Feature."
except:
print "__ Error in Feature:", sys.exc_info()[0]
def setLegacyNames(Font):
for glyph in Font.glyphs:
Name = glyph.name
NewName = NotNiceName(Name)
if NewName != Name:
glyph.name = NewName
def readGlyphsFile(filePath):
print "Import Glyphs File"
pool = NSAutoreleasePool.alloc().init()
GlyphsDoc = NSDictionary.alloc().initWithContentsOfFile_(filePath)
loadGlyphsInfo()
from FL import fl, Font
folder, base = os.path.split(filePath)
base = base.replace(".glyphs", ".vfb")
dest = os.path.join(folder, base)
f = Font( )
fl.Add(f)
if not setFontInfo(f, GlyphsDoc):
return False
readGlyphs(f, GlyphsDoc)
readKerning(f, GlyphsDoc)
setLegacyNames(f)
readFeatures(f, GlyphsDoc)
fl.UpdateFont()
f.modified = 0
pool.drain()
def GetFile(message=None, filetypes = None, selectFolders = True, selectFiles = True):
assert(filetypes)
Panel = NSOpenPanel.openPanel()
if message != None:
Panel.setMessage_(message)
Panel.setCanChooseFiles_(selectFiles)
Panel.setCanChooseDirectories_(selectFolders)
Panel.setAllowsMultipleSelection_(False)
pressedButton = Panel.runModalForTypes_(filetypes)
if pressedButton == 1:
return Panel.filename()
return None
def main():
fl.output = ""
path = GetFile(message="Please select a .glyphs file", filetypes=["glyphs"], selectFolders=False, selectFiles=True)
StartTime = time.clock()
if path is None:
return
readGlyphsFile(path)
print "import Time:", (time.clock() - StartTime), "s."
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
8a4135a1f95afd0a5df5a1771cda5e752d1d4c71 | bec68f492fbc6d08e16d1cfd3fb115b5e3348271 | /apps/core/utils.py | b5d48ef009d264528fe414a4609e1fc05c1a2516 | [
"Apache-2.0"
]
| permissive | vitorh45/avaliacao | c6c88c31ed5a7d9ec7ca3d66c80735a0ec0a9774 | 0ea5405c559b657e1d8cd11d51455295993e1f99 | refs/heads/master | 2021-01-25T05:22:04.421851 | 2015-07-24T19:07:02 | 2015-07-24T19:07:02 | 39,533,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,103 | py | __author__ = 'vitor'
from django.conf import settings
from django.core.mail import send_mail
from collections import defaultdict
import string
def enviar_email(obj):
email_assunto = settings.EMAIL_SUBJECT
habilidades = get_usuario_habilidades(obj)
if habilidades:
for habilidade in habilidades:
mensagem = get_mensagem(habilidade=habilidade)
send_mail(email_assunto, mensagem, settings.EMAIL_HOST_USER, [obj.email,])
else:
mensagem = get_mensagem()
send_mail(email_assunto, mensagem, settings.EMAIL_HOST_USER, [obj.email,])
def get_usuario_habilidades(obj):
habilidades = []
if obj.html_c > 6 and obj.css_c > 6 and obj.javascript_c > 6:
habilidades.append('Front End ')
if obj.python_c > 6 and obj.django_c > 6:
habilidades.append('Back End ')
if obj.ios_c > 6 and obj.android_c > 6:
habilidades.append('Mobile ')
return habilidades
def get_mensagem(**kwargs):
return string.Formatter().vformat(settings.EMAIL_MESSAGE, (), defaultdict(str, **kwargs)) | [
"[email protected]"
]
| |
a09d4120f937f3d55f8f5d3b7dab9cc02428ff45 | 4b0f97f809d7126e9fb846c3182d978f7f9cf975 | /web_dynamic/2-hbnb.py | cabcee7a77eec28f2055f632e0bd769a6e9401c3 | [
"LicenseRef-scancode-public-domain"
]
| permissive | sebastianchc/AirBnB_clone_v4 | 9778d09cfa96a722e94da4c3dd8037aeb5c9463b | ebc862eece3f3ee809e8ecf7c4f7057b5f819aed | refs/heads/master | 2022-08-14T17:54:31.326916 | 2020-05-22T18:21:42 | 2020-05-22T18:21:42 | 265,034,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,310 | py | #!/usr/bin/python3
""" Starts a Flash Web Application """
from models import storage
from models.state import State
from models.city import City
from models.amenity import Amenity
from models.place import Place
from os import environ
from flask import Flask, render_template
from uuid import uuid4
app = Flask(__name__)
# app.jinja_env.trim_blocks = True
# app.jinja_env.lstrip_blocks = True
@app.teardown_appcontext
def close_db(error):
""" Remove the current SQLAlchemy Session """
storage.close()
@app.route('/2-hbnb', strict_slashes=False)
def hbnb():
""" HBNB is alive! """
states = storage.all(State).values()
states = sorted(states, key=lambda k: k.name)
st_ct = []
for state in states:
st_ct.append([state, sorted(state.cities, key=lambda k: k.name)])
amenities = storage.all(Amenity).values()
amenities = sorted(amenities, key=lambda k: k.name)
places = storage.all(Place).values()
places = sorted(places, key=lambda k: k.name)
return render_template('2-hbnb.html',
states=st_ct,
amenities=amenities,
places=places,
cache_id=uuid4())
if __name__ == "__main__":
""" Main Function """
app.run(host='0.0.0.0', port=5000)
| [
"[email protected]"
]
| |
cb118141f00618a366afed0c4c1b1e13f70cf228 | 6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386 | /google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/services/services/product_group_view_service/transports/grpc.py | f7300d686fbd54e10906d765308f92aae39a815e | [
"Apache-2.0"
]
| permissive | oltoco/googleapis-gen | bf40cfad61b4217aca07068bd4922a86e3bbd2d5 | 00ca50bdde80906d6f62314ef4f7630b8cdb6e15 | refs/heads/master | 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,429 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v7.resources.types import product_group_view
from google.ads.googleads.v7.services.types import product_group_view_service
from .base import ProductGroupViewServiceTransport, DEFAULT_CLIENT_INFO
class ProductGroupViewServiceGrpcTransport(ProductGroupViewServiceTransport):
"""gRPC backend transport for ProductGroupViewService.
Service to manage product group views.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(self, *,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn("api_mtls_endpoint and client_cert_source are deprecated", DeprecationWarning)
host = api_mtls_endpoint if ":" in api_mtls_endpoint else api_mtls_endpoint + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host,
credentials=credentials,
client_info=client_info,
)
@classmethod
def create_channel(cls,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_product_group_view(self) -> Callable[
[product_group_view_service.GetProductGroupViewRequest],
product_group_view.ProductGroupView]:
r"""Return a callable for the get product group view method over gRPC.
Returns the requested product group view in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Returns:
Callable[[~.GetProductGroupViewRequest],
~.ProductGroupView]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_product_group_view' not in self._stubs:
self._stubs['get_product_group_view'] = self.grpc_channel.unary_unary(
'/google.ads.googleads.v7.services.ProductGroupViewService/GetProductGroupView',
request_serializer=product_group_view_service.GetProductGroupViewRequest.serialize,
response_deserializer=product_group_view.ProductGroupView.deserialize,
)
return self._stubs['get_product_group_view']
__all__ = (
'ProductGroupViewServiceGrpcTransport',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
]
| bazel-bot-development[bot]@users.noreply.github.com |
4b44111f4094084a912b33a69846ba0791ca3bf2 | 07d707328cd2a641a68bd508f3f4d2ca8fb7cdef | /games/connectx/util/base64_file.py | 63b97e575d5a79d741c82ef92587245540a215ee | []
| no_license | JamesMcGuigan/ai-games | 8ba8af30f58519081bef973d428694f1a40dfea2 | eac436d23e03624c2245838138e9608cb13d2f1f | refs/heads/master | 2023-07-17T11:02:02.062853 | 2021-02-01T23:03:45 | 2021-02-01T23:03:45 | 271,096,829 | 19 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,017 | py | import base64
import gzip
import os
import re
import time
from typing import Any
from typing import Union
import dill
import humanize
# _base64_file__test_base64_static_import = """
# H4sIAPx9LF8C/2tgri1k0IjgYGBgKCxNLS7JzM8rZIwtZNLwZvBm8mYEkjAI4jFB2KkRbED1iXnF
# 5alFhczeWqV6AEGfwmBHAAAA
# """
def base64_file_varname(filename: str) -> str:
# ../data/AntColonyTreeSearchNode.dill.zip.base64 -> _base64_file__AntColonyTreeSearchNode__dill__zip__base64
varname = re.sub(r'^.*/', '', filename) # remove directories
varname = re.sub(r'[.\W]+', '__', varname) # convert dots and non-ascii to __
varname = f"_base64_file__{varname}"
return varname
def base64_file_var_wrap(base64_data: Union[str,bytes], varname: str) -> str:
return f'{varname} = """\n{base64_data.strip()}\n"""' # add varname = """\n\n""" wrapper
def base64_file_var_unwrap(base64_data: str) -> str:
output = base64_data.strip()
output = re.sub(r'^\w+ = """|"""$', '', output) # remove varname = """ """ wrapper
output = output.strip()
return output
def base64_file_encode(data: Any) -> str:
encoded = dill.dumps(data)
encoded = gzip.compress(encoded)
encoded = base64.encodebytes(encoded).decode('utf8').strip()
return encoded
def base64_file_decode(encoded: str) -> Any:
data = base64.b64decode(encoded)
data = gzip.decompress(data)
data = dill.loads(data)
return data
def base64_file_save(data: Any, filename: str, vebose=True) -> float:
"""
Saves a base64 encoded version of data into filename, with a varname wrapper for importing via kaggle_compile.py
# Doesn't create/update global variable.
Returns filesize in bytes
"""
varname = base64_file_varname(filename)
start_time = time.perf_counter()
try:
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, 'wb') as file:
encoded = base64_file_encode(data)
output = base64_file_var_wrap(encoded, varname)
output = output.encode('utf8')
file.write(output)
file.close()
if varname in globals(): globals()[varname] = encoded # globals not shared between modules, but update for saftey
filesize = os.path.getsize(filename)
if vebose:
time_taken = time.perf_counter() - start_time
print(f"base64_file_save(): {filename:40s} | {humanize.naturalsize(filesize)} in {time_taken:4.1f}s")
return filesize
except Exception as exception:
pass
return 0.0
def base64_file_load(filename: str, vebose=True) -> Union[Any,None]:
"""
Performs a lookup to see if the global variable for this file alread exists
If not, reads the base64 encoded file from filename, with an optional varname wrapper
# Doesn't create/update global variable.
Returns decoded data
"""
varname = base64_file_varname(filename)
start_time = time.perf_counter()
try:
# Hard-coding PyTorch weights into a script - https://www.kaggle.com/c/connectx/discussion/126678
encoded = None
if varname in globals():
encoded = globals()[varname]
if encoded is None and os.path.exists(filename):
with open(filename, 'rb') as file:
encoded = file.read().decode('utf8')
encoded = base64_file_var_unwrap(encoded)
# globals()[varname] = encoded # globals are not shared between modules
if encoded is not None:
data = base64_file_decode(encoded)
if vebose:
filesize = os.path.getsize(filename)
time_taken = time.perf_counter() - start_time
print(f"base64_file_load(): {filename:40s} | {humanize.naturalsize(filesize)} in {time_taken:4.1f}s")
return data
except Exception as exception:
print(f'base64_file_load({filename}): Exception:', exception)
return None
| [
"[email protected]"
]
| |
242dd82bd73845a1ef827e319a1c6f19081e8c3e | 4a43dc3e8465c66dcce55027586f0b1fe1e74c99 | /service/generated_flatbuffers/tflite/LessEqualOptions.py | cbc0cce9823e58843efe49a3acf6d471818a5c11 | [
"Apache-2.0"
]
| permissive | stewartmiles/falken | e1613d0d83edfd4485c1b78f54734e9b33b51fa5 | 26ab377a6853463b2efce40970e54d44b91e79ca | refs/heads/main | 2023-06-05T12:01:13.099531 | 2021-06-17T22:22:16 | 2021-06-17T22:22:16 | 377,912,626 | 1 | 0 | Apache-2.0 | 2021-06-17T17:34:21 | 2021-06-17T17:34:20 | null | UTF-8 | Python | false | false | 2,218 | py | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class LessEqualOptions(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsLessEqualOptions(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = LessEqualOptions()
x.Init(buf, n + offset)
return x
@classmethod
def LessEqualOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
# LessEqualOptions
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
def LessEqualOptionsStart(builder): builder.StartObject(0)
def LessEqualOptionsEnd(builder): return builder.EndObject()
class LessEqualOptionsT(object):
# LessEqualOptionsT
def __init__(self):
pass
@classmethod
def InitFromBuf(cls, buf, pos):
lessEqualOptions = LessEqualOptions()
lessEqualOptions.Init(buf, pos)
return cls.InitFromObj(lessEqualOptions)
@classmethod
def InitFromObj(cls, lessEqualOptions):
x = LessEqualOptionsT()
x._UnPack(lessEqualOptions)
return x
# LessEqualOptionsT
def _UnPack(self, lessEqualOptions):
if lessEqualOptions is None:
return
# LessEqualOptionsT
def Pack(self, builder):
LessEqualOptionsStart(builder)
lessEqualOptions = LessEqualOptionsEnd(builder)
return lessEqualOptions
| [
"[email protected]"
]
| |
d6961a6e25812611eee3434fa89c418a921d2b98 | 88ba19b3303c112a424720106a7f7fde615757b5 | /02-intermediate_python/3-logic,_control_flow_and_filtering/driving_right.py | bb18e3df827cb3ebe9735bc5cdc9b4b32fe54d56 | []
| no_license | mitchisrael88/Data_Camp | 4100f5904c62055f619281a424a580b5b2b0cbc1 | 14356e221f614424a332bbc46459917bb6f99d8a | refs/heads/master | 2022-10-22T18:35:39.163613 | 2020-06-16T23:37:41 | 2020-06-16T23:37:41 | 263,859,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | # Import cars data
import pandas as pd
cars = pd.read_csv('cars.csv', index_col = 0)
# Extract drives_right column as Series: dr
dr = cars["drives_right"]
# Use dr to subset cars: sel
sel = cars[dr]
# Print sel
print(sel)
| [
"[email protected]"
]
| |
5e916e443b7e695235533d74e20739764066288c | 725ac5a0bf72829be627bf8dc82fdc51ba0f94ae | /Text_Generation/GPT2_SummaryGen/transformers/modeling_encoder_decoder.py | fb11fd935081f3b90dc05d70fdfa59928e81a8cb | []
| no_license | shawroad/NLP_pytorch_project | fa14b6e4a156229765e1d552901d0492d8e1def3 | 1272fed2dc8fef78a9ded0f1ae1644d613a3b57b | refs/heads/master | 2023-06-25T02:37:35.503251 | 2023-06-12T10:57:11 | 2023-06-12T10:57:11 | 229,694,655 | 530 | 104 | null | 2020-12-08T09:21:47 | 2019-12-23T06:54:29 | Python | UTF-8 | Python | false | false | 18,296 | py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Classes to support Encoder-Decoder architectures """
import logging
import os
import torch
from torch import nn
from .modeling_auto import AutoModel, AutoModelWithLMHead
logger = logging.getLogger(__name__)
class PreTrainedEncoderDecoder(nn.Module):
r"""
:class:`~transformers.PreTrainedEncoderDecoder` is a generic model class that will be
instantiated as a transformer architecture with one of the base model
classes of the library as encoder and (optionally) another one as
decoder when created with the `AutoModel.from_pretrained(pretrained_model_name_or_path)`
class method.
"""
def __init__(self, encoder, decoder):
super(PreTrainedEncoderDecoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
@classmethod
def from_pretrained(
cls,
encoder_pretrained_model_name_or_path=None,
decoder_pretrained_model_name_or_path=None,
*model_args,
**kwargs
):
r""" Instantiates an encoder and a decoder from one or two base classes of the library from pre-trained model checkpoints.
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
To train the model, you need to first set it back in training mode with `model.train()`
Params:
encoder_pretrained_model_name_or_path: information necessary to initiate the encoder. Either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/encoder``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
decoder_pretrained_model_name_or_path: information necessary to initiate the decoder. Either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/decoder``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments.
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
You can specify kwargs sepcific for the encoder and decoder by prefixing the key with `encoder_` and `decoder_` respectively. (e.g. ``decoder_output_attention=True``). The remaining kwargs will be passed to both encoders and decoders.
Examples::
model = PreTrainedEncoderDecoder.from_pretained('bert-base-uncased', 'bert-base-uncased') # initialize Bert2Bert
"""
# keyword arguments come in 3 flavors: encoder-specific (prefixed by
# `encoder_`), decoder-specific (prefixed by `decoder_`) and those
# that apply to the model as a whole.
# We let the specific kwargs override the common ones in case of conflict.
kwargs_common = {
argument: value
for argument, value in kwargs.items()
if not argument.startswith("encoder_") and not argument.startswith("decoder_")
}
kwargs_decoder = kwargs_common.copy()
kwargs_encoder = kwargs_common.copy()
kwargs_encoder.update(
{
argument[len("encoder_") :]: value
for argument, value in kwargs.items()
if argument.startswith("encoder_")
}
)
kwargs_decoder.update(
{
argument[len("decoder_") :]: value
for argument, value in kwargs.items()
if argument.startswith("decoder_")
}
)
# Load and initialize the encoder and decoder
# The distinction between encoder and decoder at the model level is made
# by the value of the flag `is_decoder` that we need to set correctly.
encoder = kwargs_encoder.pop("model", None)
if encoder is None:
encoder = AutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder)
encoder.config.is_decoder = False
decoder = kwargs_decoder.pop("model", None)
if decoder is None:
decoder = AutoModelWithLMHead.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)
decoder.config.is_decoder = True
model = cls(encoder, decoder)
return model
def save_pretrained(self, save_directory):
""" Save a Seq2Seq model and its configuration file in a format such
that it can be loaded using `:func:`~transformers.PreTrainedEncoderDecoder.from_pretrained`
We save the encoder' and decoder's parameters in two separate directories.
"""
# If the root output directory does not exist, create it
if not os.path.exists(save_directory):
os.mkdir(save_directory)
# Check whether the output directory is empty or not
sub_directories = [
directory
for directory in os.listdir(save_directory)
if os.path.isdir(os.path.join(save_directory, directory))
]
if len(sub_directories) > 0:
if "encoder" in sub_directories and "decoder" in sub_directories:
print(
"WARNING: there is an older version of encoder-decoder saved in"
+ " the output directory. The default behaviour is to overwrite them."
)
# Empty the output directory
for directory_to_remove in sub_directories:
# Remove all files into the subdirectory
files_to_remove = os.listdir(os.path.join(save_directory, directory_to_remove))
for file_to_remove in files_to_remove:
os.remove(os.path.join(save_directory, directory_to_remove, file_to_remove))
# Remove the subdirectory itself
os.rmdir(os.path.join(save_directory, directory_to_remove))
assert len(os.listdir(save_directory)) == 0 # sanity check
# Create the "encoder" directory inside the output directory and save the encoder into it
if not os.path.exists(os.path.join(save_directory, "encoder")):
os.mkdir(os.path.join(save_directory, "encoder"))
self.encoder.save_pretrained(os.path.join(save_directory, "encoder"))
# Create the "encoder" directory inside the output directory and save the decoder into it
if not os.path.exists(os.path.join(save_directory, "decoder")):
os.mkdir(os.path.join(save_directory, "decoder"))
self.decoder.save_pretrained(os.path.join(save_directory, "decoder"))
def forward(self, encoder_input_ids, decoder_input_ids, **kwargs):
""" The forward pass on a seq2eq depends what we are performing:
- During training we perform one forward pass through both the encoder
and decoder;
- During prediction, we perform one forward pass through the encoder,
and then perform several forward passes with the encoder's hidden
state through the decoder to decode a full sequence.
Therefore, we skip the forward pass on the encoder if an argument named
`encoder_hidden_state` is passed to this function.
Params:
encoder_input_ids: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``
Indices of encoder input sequence tokens in the vocabulary.
decoder_input_ids: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``
Indices of decoder input sequence tokens in the vocabulary.
kwargs: (`optional`) Remaining dictionary of keyword arguments.
"""
kwargs_encoder, kwargs_decoder = self.prepare_model_kwargs(**kwargs)
# Encode if needed (training, first prediction pass)
encoder_hidden_states = kwargs_encoder.pop("hidden_states", None)
if encoder_hidden_states is None:
encoder_outputs = self.encoder(encoder_input_ids, **kwargs_encoder)
encoder_hidden_states = encoder_outputs[0]
else:
encoder_outputs = ()
kwargs_decoder["encoder_hidden_states"] = encoder_hidden_states
decoder_outputs = self.decoder(decoder_input_ids, encoder_hidden_states, **kwargs_decoder)
return decoder_outputs + encoder_outputs
@staticmethod
def prepare_model_kwargs(**kwargs):
""" Prepare the encoder and decoder's keyword arguments.
Keyword arguments come in 3 flavors:
- encoder-specific (prefixed by `encoder_`)
- decoder-specific (prefixed by `decoder_`)
- those that apply to the model as whole.
We let the specific kwargs override the common ones in case of
conflict.
"""
kwargs_common = {
argument: value
for argument, value in kwargs.items()
if not argument.startswith("encoder_") and not argument.startswith("decoder_")
}
decoder_kwargs = kwargs_common.copy()
encoder_kwargs = kwargs_common.copy()
encoder_kwargs.update(
{
argument[len("encoder_") :]: value
for argument, value in kwargs.items()
if argument.startswith("encoder_")
}
)
decoder_kwargs.update(
{
argument[len("decoder_") :]: value
for argument, value in kwargs.items()
if argument.startswith("decoder_")
}
)
decoder_kwargs["encoder_attention_mask"] = encoder_kwargs.get("attention_mask", None)
return encoder_kwargs, decoder_kwargs
class Model2Model(PreTrainedEncoderDecoder):
r"""
:class:`~transformers.Model2Model` instantiates a Seq2Seq2 model
where both of the encoder and decoder are of the same family. If the
name of or that path to a pretrained model is specified the encoder and
the decoder will be initialized with the pretrained weight (the
cross-attention will be intialized randomly if its weights are not
present).
It is possible to override this behavior and initialize, say, the decoder randomly
by creating it beforehand as follows
config = BertConfig.from_pretrained()
decoder = BertForMaskedLM(config)
model = Model2Model.from_pretrained('bert-base-uncased', decoder_model=decoder)
"""
def __init__(self, *args, **kwargs):
super(Model2Model, self).__init__(*args, **kwargs)
self.tie_weights()
def tie_weights(self):
""" Tying the encoder and decoders' embeddings together.
We need for each to get down to the embedding weights. However the
different model classes are inconsistent to that respect:
- BertModel: embeddings.word_embeddings
- RoBERTa: embeddings.word_embeddings
- XLMModel: embeddings
- GPT2: wte
- BertForMaskedLM: bert.embeddings.word_embeddings
- RobertaForMaskedLM: roberta.embeddings.word_embeddings
argument of the XEmbedding layer for each model, but it is "blocked"
by a model-specific keyword (bert, )...
"""
# self._tie_or_clone_weights(self.encoder, self.decoder)
pass
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *args, **kwargs):
if (
"bert" not in pretrained_model_name_or_path
or "roberta" in pretrained_model_name_or_path
or "distilbert" in pretrained_model_name_or_path
):
raise ValueError("Only the Bert model is currently supported.")
model = super(Model2Model, cls).from_pretrained(
encoder_pretrained_model_name_or_path=pretrained_model_name_or_path,
decoder_pretrained_model_name_or_path=pretrained_model_name_or_path,
*args,
**kwargs
)
return model
class Model2LSTM(PreTrainedEncoderDecoder):
@classmethod
def from_pretrained(cls, *args, **kwargs):
if kwargs.get("decoder_model", None) is None:
# We will create a randomly initilized LSTM model as decoder
if "decoder_config" not in kwargs:
raise ValueError(
"To load an LSTM in Encoder-Decoder model, please supply either: "
" - a torch.nn.LSTM model as `decoder_model` parameter (`decoder_model=lstm_model`), or"
" - a dictionary of configuration parameters that will be used to initialize a"
" torch.nn.LSTM model as `decoder_config` keyword argument. "
" E.g. `decoder_config={'input_size': 768, 'hidden_size': 768, 'num_layers': 2}`"
)
kwargs["decoder_model"] = torch.nn.LSTM(kwargs.pop("decoder_config"))
model = super(Model2LSTM, cls).from_pretrained(*args, **kwargs)
return model
| [
"[email protected]"
]
| |
f100f3909357cdaf2a274f0fea14ca8c993114f0 | f620403443b2c0affaed53505c002f35dc68020c | /DCTM/GetClusterNumber.py | 9de8d61d5fc44f32a00a8dfec12c3d9bfaf62ec5 | []
| no_license | ZhuJiahui/CTMTS | c552b3026deb47879f9aa5bde4b002cf6283858d | 9f8981f6e61900a68a38ae0392e01771beee9651 | refs/heads/master | 2021-01-12T10:18:27.579697 | 2016-12-14T02:23:29 | 2016-12-14T02:23:29 | 76,416,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,478 | py | # -*- coding: utf-8 -*-
'''
Created on 2014年7月27日
@author: ZhuJiahui506
'''
import os
import numpy as np
import time
def get_cluster_number(X):
'''
估计聚类个数
:param X: 数据之间的相似度矩阵 维度小于1000
'''
D = np.zeros((len(X), len(X)))
for i in range(len(X)):
D[i, i] = 1.0 / np.sqrt(np.sum(X[i]))
L = np.dot(np.dot(D, X), D)
eigen_values, eigen_vectors = np.linalg.eig(L)
# 按特征值降序排序
idx = eigen_values.argsort()
idx = idx[::-1]
eigen_values = eigen_values[idx]
tao = 0.55
container = np.sum(eigen_values) * tao
now_sum = 0.0
cluster_number = 0
for i in range(len(eigen_values)):
now_sum += eigen_values[i]
cluster_number += 1
if now_sum > container:
break
return cluster_number
if __name__ == '__main__':
start = time.clock()
data = np.array([[1, 2.1, 0, 3],\
[1, 1.9, 0.1, 3],\
[4, 7, 3.1, 9.2],\
[3.8, 7, 3.0, 9.1],\
[0.9, 2.0, 0.01, 2.9]])
w = np.zeros((len(data), len(data)))
for i in range(len(data)):
for j in range(len(data)):
w[i, j] = 1.0 / (np.sum(np.abs(data[i] - data[j])) + 1.0)
c = get_cluster_number(w)
print c
print 'Total time %f seconds' % (time.clock() - start)
print 'Complete !!!'
| [
"[email protected]"
]
| |
dfbdd01c48a73c2922ac083707b410cead8ef229 | 3bfa43cd86d1fb3780f594c181debc65708af2b8 | /algorithms/graph/dfs.py | f74044082f578bb19ff5cc62766b874fe92a2fb0 | []
| no_license | ninjaboynaru/my-python-demo | 2fdb6e75c88e07519d91ee8b0e650fed4a2f9a1d | d679a06a72e6dc18aed95c7e79e25de87e9c18c2 | refs/heads/master | 2022-11-06T14:05:14.848259 | 2020-06-21T20:10:05 | 2020-06-21T20:10:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 646 | py | from graph import digraph_from_file
def dfs(g, order="post"):
marked = set()
def dfs_preorder(node):
marked.add(node)
print(node)
for child in g[node]:
if child not in marked:
dfs_preorder(child)
def dfs_postorder(node):
marked.add(node)
for child in g[node]:
if child not in marked:
dfs_postorder(child)
print(node)
for node in g.keys():
if node not in marked:
# dfs_preorder(node)
dfs_postorder(node)
if __name__ == "__main__":
g = digraph_from_file("input/alpha1.txt")
dfs(g)
| [
"[email protected]"
]
| |
d3a316737b9c8b52b38e553c6ea66c0df60eb492 | 2ef35e0cd06653435fea8ab27d0b7db475bdb6d9 | /serial_publish_local.py | 18bcfc79c27bc3bc8e8f80cf7ba7925034526239 | []
| no_license | navalkishoreb/Project_Mqtt | a9d438a2a1c79f662cb6b751f9c2c593989c58f2 | 989a491505a972a54eaf1599f95f6de9562b7e4c | refs/heads/master | 2021-01-23T16:35:38.765479 | 2016-09-16T10:28:52 | 2016-09-16T10:28:52 | 68,371,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,616 | py | #!/usr/bin/python
import time
import serial
import sys
import paho.mqtt.client as mqtt
"""usb = serial.Serial(
port='/dev/ttyACM0',\
baudrate=115200,\
parity=serial.PARITY_NONE,\
stopbits=serial.STOPBITS_ONE,\
bytesize=serial.EIGHTBITS,\
timeout=0)
"""
ser = serial.Serial(
port='/dev/ttyAMA0',
baudrate = 9600,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=0
)
ser.open()
newLine = None
def on_connect(client, userdata, flags,resultCode):
print("Connected with result code "+str(resultCode))
client.publish("room/temperature/status","online",2,True)
def on_message(client, userdata, messsage):
print(message.topic+" : "+str(message.payload))
client = mqtt.Client('',True,None,mqtt.MQTTv311,"tcp")
client.on_connect = on_connect
client.on_message = on_message
client.connect("172.29.4.96")
client.loop_start()
def printInHexFormat(binData):
for ch in binData:
print '0x%0*X'%(2,ord(ch)),
print("\n")
return;
def sendTemperatureToServer(temp):
r=requests.get("http://api.thingspeak.com/update?key=9W55W474GLEBNBLC&field1="+str(ord(temp)))
print 'sending...'+ str(r.status_code) +" -- "+ str(r.json())
def publishTemperature(temp):
client.publish("room/temperature",str(ord(temp)))
while 1:
data= ser.readline()
if(data != '') and (data !="\n") and len(data)==8:
printInHexFormat(data)
printInHexFormat(data[-1:])
# sendTemperatureToServer(data[-1:])
publishTemperature(data[-1:])
newLine = True;
elif(data == '\n'):
print ('')
else:
if newLine:
newLine = False;
| [
"root@raspberrypi.(none)"
]
| root@raspberrypi.(none) |
970362856f0861f077e65c4cb8ec252123e9f29a | aa7ba8ca76bc012d5d7418b0961208be72af71d0 | /glasses/models/classification/resnetxt/__init__.py | 3ff55e57f37fe798fc4d1bd9252deaa651481a18 | [
"MIT"
]
| permissive | wiesmanyaroo/glasses | 3f8bc67021ad1e7abf84965847beebc6145b2fbb | 2e4bb979e3637d77f9dbec79462dc92f56d3bc27 | refs/heads/master | 2023-02-09T22:59:46.463233 | 2020-12-26T16:04:24 | 2020-12-26T16:04:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,607 | py | from __future__ import annotations
from torch import nn
from torch import Tensor
from glasses.nn.blocks.residuals import ResidualAdd
from glasses.nn.blocks import Conv2dPad
from collections import OrderedDict
from typing import List
from functools import partial
from ..resnet import ResNet, ResNetBottleneckBlock
from glasses.utils.PretrainedWeightsProvider import Config, pretrained
ReLUInPlace = partial(nn.ReLU, inplace=True)
class ResNetXtBottleNeckBlock(ResNetBottleneckBlock):
def __init__(self, in_features: int, out_features: int, groups: int = 32, base_width: int = 4, reduction: int = 4, **kwargs):
"""Basic ResNetXt block build on top of ResNetBottleneckBlock.
It uses `base_width` to compute the inner features of the 3x3 conv.
Args:
in_features (int): [description]
out_features (int): [description]
groups (int, optional): [description]. Defaults to 32.
base_width (int, optional): width factor uses to compute the inner features in the 3x3 conv. Defaults to 4.
"""
features = (int(out_features * (base_width / 64) / reduction) * groups)
super().__init__(in_features, out_features,
features=features, groups=groups, reduction=reduction, **kwargs)
class ResNetXt(ResNet):
"""Implementation of ResNetXt proposed in `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Create a default model
Examples:
>>> ResNetXt.resnext50_32x4d()
>>> ResNetXt.resnext101_32x8d()
>>> # create a resnetxt18_32x4d
>>> ResNetXt.resnet18(block=ResNetXtBottleNeckBlock, groups=32, base_width=4)
Customization
You can easily customize your model
Examples:
>>> # change activation
>>> ResNetXt.resnext50_32x4d(activation = nn.SELU)
>>> # change number of classes (default is 1000 )
>>> ResNetXt.resnext50_32x4d(n_classes=100)
>>> # pass a different block
>>> ResNetXt.resnext50_32x4d(block=SENetBasicBlock)
>>> # change the initial convolution
>>> model = ResNetXt.resnext50_32x4d
>>> model.encoder.gate.conv1 = nn.Conv2d(3, 64, kernel_size=3)
>>> # store each feature
>>> x = torch.rand((1, 3, 224, 224))
>>> model = ResNetXt.resnext50_32x4d()
>>> features = []
>>> x = model.encoder.gate(x)
>>> for block in model.encoder.layers:
>>> x = block(x)
>>> features.append(x)
>>> print([x.shape for x in features])
>>> # [torch.Size([1, 64, 56, 56]), torch.Size([1, 128, 28, 28]), torch.Size([1, 256, 14, 14]), torch.Size([1, 512, 7, 7])]
Args:
in_channels (int, optional): Number of channels in the input Image (3 for RGB and 1 for Gray). Defaults to 3.
n_classes (int, optional): Number of classes. Defaults to 1000.
"""
@classmethod
@pretrained('resnext50_32x4d')
def resnext50_32x4d(cls, *args, **kwargs) -> ResNetXt:
"""Creates a resnext50_32x4d model
Returns:
ResNet: A resnext50_32x4d model
"""
return cls.resnet50(*args, block=ResNetXtBottleNeckBlock, **kwargs)
@classmethod
@pretrained('resnext101_32x8d')
def resnext101_32x8d(cls, *args, **kwargs) -> ResNetXt:
"""Creates a resnext101_32x8d model
Returns:
ResNet: A resnext101_32x8d model
"""
return cls.resnet101(*args, **kwargs, block=ResNetXtBottleNeckBlock, groups=32, base_width=8)
@classmethod
# @pretrained('resnext101_32x16d')
def resnext101_32x16d(cls, *args, **kwargs) -> ResNetXt:
"""Creates a resnext101_32x16d model
Returns:
ResNet: A resnext101_32x16d model
"""
return cls.resnet101(*args, **kwargs, block=ResNetXtBottleNeckBlock, groups=32, base_width=16)
@classmethod
# @pretrained('resnext101_32x32d')
def resnext101_32x32d(cls, *args, **kwargs) -> ResNetXt:
"""Creates a resnext101_32x32d model
Returns:
ResNet: A resnext101_32x32d model
"""
return cls.resnet101(*args, **kwargs, block=ResNetXtBottleNeckBlock, groups=32, base_width=32)
@classmethod
# @pretrained('resnext101_32x48d')
def resnext101_32x48d(cls, *args, **kwargs) -> ResNetXt:
"""Creates a resnext101_32x48d model
Returns:
ResNet: A resnext101_32x48d model
"""
return cls.resnet101(*args, **kwargs, block=ResNetXtBottleNeckBlock, groups=32, base_width=48)
| [
"[email protected]"
]
| |
91be8127886dde63063fbf3920ba80ccc6c6a210 | c4b636a2fffbf8ef3096e4de9de61b30ea3df72a | /hackerrank/zeros_and_ones.py | f921a5eca2e28ee22afd295077ac7f7b55287364 | [
"MIT"
]
| permissive | FelixTheC/hackerrank_exercises | f63fbbc55a783ee4cecfa04302301a0fb66d45fe | 24eedbedebd122c53fd2cb6018cc3535d0d4c6a0 | refs/heads/master | 2021-01-04T22:10:47.538372 | 2020-11-01T15:57:20 | 2020-11-01T15:57:20 | 240,779,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@created: 01.12.19
@author: felix
"""
import numpy as np
if __name__ == '__main__':
shape = tuple([int(i) for i in input() if i != ' '])
dtype = np.int
print(np.zeros((shape), dtype=dtype))
print(np.ones((shape), dtype=dtype))
| [
"[email protected]"
]
| |
9573e87de4ee288a11c0c1bbe28099c89eed4022 | e2bf489830e55a57945b8e696f8e2d6acefeb560 | /04-系统编程-1/test/33-带有返回值的函数.py | dce5eb1489db919c3f703b6cd4598d2a3f9af0aa | []
| no_license | taizilinger123/pythonjichu | e713de06fb050943a8a1e0256ccba8dea40a411d | 5ee896e92edbac55d02aa63965d896200b8c2623 | refs/heads/master | 2023-04-01T02:00:37.557667 | 2023-03-31T05:08:40 | 2023-03-31T05:08:40 | 148,663,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,050 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
def get_wendu():
wendu = 22
print("当前的室温是:%d"%wendu)
return wendu
def get_wendu_huashi(wendu):
print("--------4-------")
wendu = wendu + 3
print("--------5-------")
print("当前的温度(华氏)是:%d"%wendu)
print("--------6-------")
print("--------1-------")
result = get_wendu()
print("--------2-------")
get_wendu_huashi(result)
print("--------3-------")
# E:\python35\python35.exe E:/pythonjichu/33-带有返回值的函数.py
# Traceback (most recent call last):
# File "E:/pythonjichu/33-带有返回值的函数.py", line 17, in <module>
# get_wendu_huashi()
# File "E:/pythonjichu/33-带有返回值的函数.py", line 9, in get_wendu_huashi #报错看最后一个行数字,其他不用看
# wendu = wendu + 3
# UnboundLocalError: local variable 'wendu' referenced before assignment
# --------1-------
# 当前的室温是:22
# --------2------- #二分法定位错误在哪更快
# --------4------- | [
"[email protected]"
]
| |
4023d50a25750cc3033b54cf0a05967082d9293c | ea40d872e4d3122387f7a17400c1d2f31cf5bd6a | /剑指offer/剑指 Offer 45. 把数组排成最小的数.py | f2163fab2e7c8e53b7918b78573211bfb5d99bdc | []
| no_license | dongbo910220/leetcode_ | e4cf6c849986b105d4d5162c5cd2318ffc3fbb67 | e4c02084f26384cedbd87c4c60e9bdfbf77228cc | refs/heads/main | 2023-05-29T11:23:46.865259 | 2021-06-17T03:40:30 | 2021-06-17T03:40:30 | 344,785,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46 | py | str1 = "1223"
str2 = "asdf"
print(str1 + str2) | [
"[email protected]"
]
| |
8e7daff6d28883da7ddcc1f86aaf973d0b0bcb0a | 5e73d9b32657539a680bad7269594f32fd1940b1 | /Basic Data Structures/week4_binary_search_trees/2_is_bst/is_bst.py | 82698ce5d8a6d075f2a2f42cec6ff691028925ff | []
| no_license | harshdonga/Data_Structures_Algorithms | c9b9f721996366b903182f519dd421bfbe599d3b | f3a94910e4d50ea29c906029bd0081d37cf25652 | refs/heads/master | 2022-11-05T08:33:00.340043 | 2020-06-13T18:13:52 | 2020-06-13T18:13:52 | 262,765,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 999 | py | #!/usr/bin/python3
import sys, threading, math
sys.setrecursionlimit(10**7) # max depth of recursion
threading.stack_size(2**25) # new thread will get stack of such size
def inOrder(tree):
result = []
def inOrderRec(i,result):
if tree[i][1] != -1:
inOrderRec(tree[i][1], result)
result.append(tree[i][0])
if tree[i][2] != -1:
inOrderRec(tree[i][2], result)
inOrderRec(0, result)
return result
def IsBinarySearchTree(tree, stree):
traversed = inOrder(tree)
if traversed == stree:
return True
else:
return False
def main():
nodes = int(input().strip())
if nodes == 0:
print('CORRECT')
exit()
tree = []
for i in range(nodes):
tree.append(list(map(int,input().strip().split())))
stree = sorted([x[0] for x in tree])
if IsBinarySearchTree(tree, stree):
print("CORRECT")
else:
print("INCORRECT")
threading.Thread(target=main).start() | [
"[email protected]"
]
| |
00bc4046a8127e8a550dec3729cff87516823ed7 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /F64txHnfYj4e4MpAN_22.py | 990afa3ea17171f39d83618e1ee743fb5c150d57 | []
| no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py |
def schoty(frame):
sum = 0
index = 0
conversion = [1000000, 100000, 10000, 1000, 100, 10, 1]
for line in frame:
sum = sum + conversion[index] * line.find("-")
index = index + 1
return sum
| [
"[email protected]"
]
| |
1a7495f69555d27d90cb00ae511930e9f61dffd6 | 2fd627a9cfdf5c2190fa3018055cf1b643fc55a0 | /6. Union-Intersection/6. union-and-intersection.py | 54669d150694720e7d50cdbcc5ae184bc7947777 | []
| no_license | josancamon19/show-me-data-structures | d17981443abd12252555581909ff8bd904c582ea | 5e8b8135e113aec7424ab79040afc6ff1d71f3fc | refs/heads/master | 2020-08-29T02:38:36.731375 | 2019-11-04T19:37:32 | 2019-11-04T19:37:32 | 217,897,486 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,661 | py | class Node:
def __init__(self, value):
self.value = value
self.next = None
def __repr__(self):
return str(self.value)
class LinkedList:
def __init__(self):
self.head = None
def __str__(self):
cur_head = self.head
if cur_head is None:
return "-- Empty --"
out_string = ""
while cur_head:
out_string += str(cur_head.value) + " -> "
cur_head = cur_head.next
return out_string
def append(self, value):
if self.head is None:
self.head = Node(value)
return
node = self.head
while node.next:
node = node.next
node.next = Node(value)
def size(self):
size = 0
node = self.head
while node:
size += 1
node = node.next
return size
def union(list1, list2):
# Your Solution Here
if list1 is None or type(list1) is not LinkedList or list2 is None or type(list2) is not LinkedList:
print("Invalid Arguments, lists cannot be None")
return LinkedList()
if list1.head is None:
return list2
new_linked_list = LinkedList()
current_l1 = list1.head
while current_l1:
new_linked_list.append(current_l1.value)
current_l1 = current_l1.next
current_l2 = list2.head
while current_l2:
new_linked_list.append(current_l2.value)
current_l2 = current_l2.next
return new_linked_list
def intersection(list1, list2):
# Your Solution Here
if list1 is None or type(list1) is not LinkedList or list2 is None or type(list2) is not LinkedList:
print("Invalid Arguments, lists cannot be None")
return LinkedList()
intersected_linked_list = LinkedList()
s = set()
current_l1 = list1.head
while current_l1:
s.add(current_l1.value)
current_l1 = current_l1.next
current_l2 = list2.head
while current_l2:
if current_l2.value in s:
intersected_linked_list.append(current_l2.value)
s.remove(current_l2.value)
current_l2 = current_l2.next
return intersected_linked_list
# def intersection2(list1, list2):
# # Your Solution Here
#
# if list1 is None or type(list1) is not LinkedList or list2 is None or type(list2) is not LinkedList:
# print("Invalid Arguments, lists cannot be None")
# return LinkedList()
#
# intersected_linked_list = LinkedList()
# s = set()
#
# current_l1 = list1.head
# while current_l1:
#
# current_l2 = list2.head
# while current_l2:
#
# if current_l2.value == current_l1.value:
# # print(current_l1, current_l2)
#
# already_intersected = False
# current_intersected = intersected_linked_list.head
#
# while current_intersected:
# if current_intersected.value == current_l1.value:
# already_intersected = True
# break
# current_intersected = current_intersected.next
#
# if not already_intersected:
# intersected_linked_list.append(current_l1.value)
#
# current_l2 = current_l2.next
#
# current_l1 = current_l1.next
#
# return intersected_linked_list
if __name__ == '__main__':
# Test case 1
linked_list_1 = LinkedList()
linked_list_2 = LinkedList()
element_1 = [3, 2, 4, 35, 6, 65, 6, 4, 3, 21]
element_2 = [6, 32, 4, 9, 6, 1, 11, 21, 1]
for i in element_1:
linked_list_1.append(i)
for i in element_2:
linked_list_2.append(i)
# Union operation returns element_1 + element_2
print('\nUnion operation: ', union(linked_list_1, linked_list_2))
# Intersection operation returns set(element_1).intersection(set(element_2))
print('Intersection operation: ', intersection(linked_list_1, linked_list_2))
# Test case 2
linked_list_3 = LinkedList()
linked_list_4 = LinkedList()
element_1 = [3, 2, 4, 35, 6, 65, 6, 4, 3, 23]
element_2 = [1, 7, 8, 9, 11, 21, 1]
for i in element_1:
linked_list_3.append(i)
for i in element_2:
linked_list_4.append(i)
# Union operation returns element_1 + element_2
print('\nUnion operation: ', union(linked_list_3, linked_list_4))
# element_1 and element_2 are all different --> 0 intersections
print('Intersection operation: ', intersection(linked_list_3, linked_list_4))
# Test case 3
linked_list_5 = LinkedList()
linked_list_6 = LinkedList()
element_1 = []
element_2 = [1, 7, 8, 9, 11, 21, 1]
for i in element_1:
linked_list_5.append(i)
for i in element_2:
linked_list_6.append(i)
# Union operation element_1 is empty so return element_2 [1, 7, 8, 9, 11, 21, 1]
print('\nUnion operation: ', union(linked_list_5, linked_list_6))
# Intersection operation element_1 is empty so 0 intersections
print('Intersection operation: ', intersection(linked_list_5, linked_list_6))
print('\n\n--- Invalid Operations ---')
# all will return empty LinkedList() and print a message Invalid Arguments, lists cannot be None
print('\nUnion operation: ', union(linked_list_5, None))
print('Intersection operation: ', intersection(linked_list_5, None))
print('\nUnion operation: ', union(None, linked_list_6))
print('Intersection operation: ', intersection(None, linked_list_6))
print('\nUnion operation: ', union(None, None))
print('Intersection operation: ', intersection(None, None))
| [
"[email protected]"
]
| |
6515a2df2bf4a4bd4429023d4103b0f228db4d78 | afba8aa70edb5cdfe3b38e451330deac72e4eee1 | /aldryn_installer/__init__.py | fbaee6c565f0968a47cd72a0c07b3cc7e54e188a | []
| no_license | jqb/aldryn-installer | b148c920866c86de9399b27a9d9c5e8118c31953 | e960a9f38a85886255f84379808c7495bd3e90b8 | refs/heads/master | 2021-01-15T09:37:44.006606 | 2013-11-04T12:52:37 | 2013-11-04T12:52:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Iacopo Spalletti'
__email__ = '[email protected]'
__version__ = '0.1.1'
| [
"[email protected]"
]
| |
5627eb5d6cb9e3ef58e936d11ea044945069c613 | 00f3f038313e4334ebab171e0133fce63fdba0f0 | /authentication/tests.py | 03bee37ed6b69c09333697c4b4ba788a8ccbf5c0 | []
| no_license | DarishkaAMS/Dj_Projects-Author_Blog | 57a94aaa16d87bfd19dc2ab99e37c5710abcfd0e | ae634c80107e96bba8e0ef6d8e56e83a588e9e0b | refs/heads/main | 2023-01-21T06:17:34.078609 | 2020-11-29T18:06:26 | 2020-11-29T18:06:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 694 | py | from django.test import TestCase
from django.urls import reverse
# Create your tests here.
from authentication.models import UserManager, User
import requests
class TestUser(TestCase):
def setUp(self):
email = "[email protected]"
password = "abc"
username = "HELLO"
self.user = User.objects.create_user(email=email, password=password, username=username)
self.user.save()
def test_user_view(self):
email = "[email protected]"
password = "abc"
username = "HELLO"
resp = self.client.post(reverse('login'), data={'username': email, 'password': password, 'username': username})
self.assertEqual(resp.status_code, 200)
| [
"[email protected]"
]
| |
c1208a7a966e17598df2c6f7d583240b0aa57882 | ae6f2b9d4c0cfd43411eadc942292439367e8bbc | /PYTHON/Teach_Your_Kids_to_Code_program_files/ch02_turtle/Challenge1_ColorSpiral10.py | ad792179745996a9ccec72d9ecb776bfd32cc884 | []
| no_license | rao003/StartProgramming | 6e68786b55adfad94d0601e516576bcedac290dd | 341739f99bf3684a57b84c8942b85dcfc2e6bc4b | refs/heads/master | 2022-11-20T22:14:10.447618 | 2022-11-04T07:07:33 | 2022-11-04T07:07:33 | 215,080,140 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | # ColorSpiral10.py
import turtle
t=turtle.Pen()
turtle.bgcolor('black')
# You can change sides between 2 and 10 for some cool shapes!
sides=10
colors=['red', 'yellow', 'blue', 'orange', 'green', 'purple',
'gray', 'white', 'pink', 'light blue']
for x in range(360):
t.pencolor(colors[x%sides])
t.forward(x * 3/sides + x)
t.left(360/sides + 1)
t.width(x*sides/200)
| [
"[email protected]"
]
| |
dca18d1def42d691fdda32b44a976d9f471808ef | 830b230a154fa21d46bb2d2e76e705528768adb3 | /rhythmbox_ttplyrics/dbus_pyosd.py | da077cb74f719dc8fe91bd41329c0fa85d2dcb72 | []
| no_license | jdtuiran/ideas_sueltas | d0e3f84d6ea629a4caf807c884ae85f25e0bde60 | 026ec493686be3a636e8c5d39d4484a67eadd94f | refs/heads/master | 2022-04-16T06:43:25.917985 | 2020-03-16T21:41:33 | 2020-03-16T21:41:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,418 | py | #!/bin/env python
# -*- coding: utf8 -*-
# 2008-12-27 22:16:45
import gobject
import dbus
from dbus.mainloop.glib import DBusGMainLoop
import os
import sys
import re
import time
import posd# python-osd
class LyricsOSD :
def __init__(self):
self.__RE_TIME=r"^\[[0-9][0-9][:.][0-5][0-9][:.][0-9][0-9]\]"
self.__RE_OFFSET=r"^\[OFFSET *[+-][0-9]*\]"
self.__lines = None
self.__pos = 0
self.__btime = time.time() # begin time
self.__OFFSET = 0
self.__OSD=None
self.__initOSD()
def __initOSD(self):
import imp, ConfigParser
self.__OSD = posd.PangoOsd()
def Reset(self):
print "CALL Reset"
self.__OSD.hide()
self.__lines = None
self.__pos = 0
self.__btime = time.time()
self.__OFFSET = 0
def __countTime(self,stime):
try:
sec = "%d.%d" % (int(stime[0:2])*60+int(stime[3:5]),int(stime[6:8]))
except:
sec = "0.0"
return eval(sec)
def __filterSong(self,song):
if song == None : return "~~ ~~ ~~"
song = song.replace("\n","").replace("\r","")
if len(song) == 0 : song = "~~ ~~ ~~"
return song
def __GetLyrics(self,filename):
self.__lines = None
if filename == None : return
if not os.path.exists(filename) : return
lines=None
try:
f = open(filename,'r')
lines = f.readlines()
f.close()
except IOError,message:
print >> sys.stderr, message
if lines == None : return
self.__lines = []
for line in lines:
if line == "" : continue
if line[0] != '[': continue
ti=[]
while re.search(self.__RE_TIME,line):
ti.append(line[0:10])
line = line[10:]
if len(ti) == 0 :
self.__lines.append("[00:00.00]"+line)
else:
for t in ti:
self.__lines.append(t+line)
if len(self.__lines) == 0 :
self.__lines == None
return
self.__lines.sort()
def __getHead(self,lines):
song=''
stime=''
for line in lines :
if line[1:9] == "00:00.00" :
song = song + " " + line[10:]
self.__pos=self.__pos+1
else:
stime = line[1:9]
break
song = song.replace('[','').replace(']','')
song = song.replace('ti:','').replace('ar:','')
song = song.replace('al:','').replace('by:','')
return [stime,song]
def __getSong(self,lines,idx):
line = lines[idx]
stime= line[1:9]
song = line[10:]
return [stime,song]
def LyricsShow(self,filename=None,elapsed=0):
offset = elapsed - self.__OFFSET
if offset < 0 : offset = 0
if self.__lines == None :
self.__GetLyrics(filename)
if self.__lines == None : return
if len(self.__lines) == 0 : return
if elapsed > 0 and abs(self.__btime + offset - time.time()) > 0.2 :
self.__btime = time.time() - offset
self.__pos = 1
n=-1
while n < 0 and self.__pos < len(self.__lines) :
stime,song = self.__getSong(self.__lines,self.__pos)
if re.search(self.__RE_OFFSET,song):
self.__OFFSET = eval(song.replace(']','')[8:])
ntime = self.__countTime(stime)
n = self.__btime + ntime - time.time()
self.__pos = self.__pos + 1
self.__pos = self.__pos - 2
if self.__pos < 0 : self.__pos = 0
print "%2d/%d SEED" % ( self.__pos, len(self.__lines) )
if self.__pos >= len(self.__lines) : return
if self.__pos == 0 :
stime,song = self.__getHead(self.__lines)
else:
stime,song = self.__getSong(self.__lines,self.__pos)
ntime=self.__countTime(stime)
n = self.__btime + ntime - time.time()
if n > 0 : return
song = self.__filterSong(song)
if re.search(self.__RE_OFFSET,song):
self.__OFFSET = eval(song.replace(']','')[8:])
self.__OSD.display(song)
i = self.__OSD.get_number_lines()
while i > 1 :
i = i - 1
song = self.__getSong(self.__lines,self.__pos + i)[1]
song = self.__filterSong(song)
self.__OSD.display(song,line=i)
self.__pos = self.__pos + 1
print "%2d/%d %s %6.2f/%2.2f %6.2f %.2f %s" % ( self.__pos, len(self.__lines), stime, elapsed, offset, ntime, time.time(), song )
class LyricsDBus :
def __init__(self):
print "CALL __init__"
self.__handlers = []
self.__player = None
self.__shell = None
self.__OSD = None
self.elapsed = -1
self.__lyfilename = None
def __set_uri(self,uri):
print "CALL __set_uri (%s)" % uri
if uri is not None and uri != "" :
self.__uri = uri
self.__shell.getSongProperties(uri,
reply_handler=self.__set_song_properties,
error_handler=self._report_dbus_error)
else:
self._set_no_song()
def __set_song_properties(self,prop):
print "CALL __set_song_properties"
self.title = prop.get("title")
self.artist = prop.get("artist")
self.__lyfilename = "%s/.lyrics/%s/%s.lyric" % (os.getenv("HOME"),self.artist,self.title)
self.__OSD.Reset()
def __set_playing(self,playing):
print "CALL __set_playing (%s)" % playing
self.playing = playing
self.__OSD.Reset()
if not self.playing and self.elapsed < 0 :
self._set_no_song()
def __set_elapsed(self, elapsed):
#print "CALL __set_elapsed (%s) " % elapsed
self.elapsed = elapsed
self.__OSD.LyricsShow(self.__lyfilename,self.elapsed)
if not self.playing and self.elapsed < 0 :
self._set_no_song()
def __property_changed(self,uri,prop,old_val,new_val):
print "CALL __property_changed (%s|%s|%s|%s)" % ( uri,prop,old_val,new_val)
if prop == "title":
self.title = new_val
elif prop == "artist":
self.artist = new_val
self.__lyfilename = "%s/.lyrics/%s/%s.lyric" % (os.getenv("HOME"),self.artist,self.title)
self.__OSD.Reset()
def connect (self):
print "CALL connect"
if self.__player is not None:
return
bus = dbus.SessionBus ()
proxy = bus.get_object ("org.gnome.Rhythmbox", "/org/gnome/Rhythmbox/Player")
self.__player = dbus.Interface (proxy, "org.gnome.Rhythmbox.Player")
proxy = bus.get_object ("org.gnome.Rhythmbox", "/org/gnome/Rhythmbox/Shell")
self.__shell = dbus.Interface (proxy, "org.gnome.Rhythmbox.Shell")
self.__handlers = [
self.__player.connect_to_signal ("playingChanged", self.__set_playing),
self.__player.connect_to_signal ("elapsedChanged", self.__set_elapsed),
self.__player.connect_to_signal ("playingUriChanged", self.__set_uri),
self.__player.connect_to_signal ("playingSongPropertyChanged", self.__property_changed),
]
self.__player.getPlaying (reply_handler=self.__set_playing,
error_handler=self._report_dbus_error)
self.__player.getElapsed (reply_handler=self.__set_elapsed,
error_handler=self._report_dbus_error)
self.__player.getPlayingUri (reply_handler=self.__set_uri,
error_handler=self._report_dbus_error)
self.__OSD = LyricsOSD()
self.connected = True
def disconnect(self):
print "CALL disconnect"
for handler in self.__handlers:
handler.remove()
self.__handlers = []
self.__player = None
self.__shell = None
self.__ODS = None
def _set_no_song(self):
print "CALL _set_no_song"
self.__OSD.Reset()
def _report_dbus_error(self,err):
print "CALL _report_dbus_error"
if __name__ == '__main__':
DBusGMainLoop(set_as_default=True)
dbus_loop = gobject.MainLoop()
lybus = LyricsDBus()
lybus.connect()
dbus_loop.run()
| [
"[email protected]"
]
| |
2c1a8a260380f4fbb5f4a6d579bbb14b54ab8431 | 1adc05008f0caa9a81cc4fc3a737fcbcebb68995 | /hardhat/recipes/swig.py | cf9658b1333900ab306a92e5b5dd4a98914a2309 | [
"MIT",
"BSD-3-Clause"
]
| permissive | stangelandcl/hardhat | 4aa995518697d19b179c64751108963fa656cfca | 1ad0c5dec16728c0243023acb9594f435ef18f9c | refs/heads/master | 2021-01-11T17:19:41.988477 | 2019-03-22T22:18:44 | 2019-03-22T22:18:52 | 79,742,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | from .base import GnuRecipe
class SwigRecipe(GnuRecipe):
def __init__(self, *args, **kwargs):
super(SwigRecipe, self).__init__(*args, **kwargs)
self.sha256 = '7cf9f447ae7ed1c51722efc45e7f1441' \
'8d15d7a1e143ac9f09a668999f4fc94d'
self.name = 'swig'
self.depends = ['pcre']
self.version = '3.0.12'
self.version_url = 'http://www.swig.org/download.html'
self.url = 'http://prdownloads.sourceforge.net/swig/' \
'swig-$version.tar.gz'
| [
"[email protected]"
]
| |
9c880d5f6dd983881bf0c236a6ec98537306e6a0 | a4d98e9422993b4f2d977eeaf78fcf6bc8c86c10 | /dfvfs/compression/zlib_decompressor.py | 3269e02ffc9538aff3613266951bac81a773f71b | [
"Apache-2.0"
]
| permissive | dc3-plaso/dfvfs | c3fc80c28a5054f764979e024957c724f9b774e4 | 06b3625426dbf1cc2ac5a8ce09303d0822625937 | refs/heads/master | 2020-04-04T21:15:42.815618 | 2017-07-15T05:27:58 | 2017-07-15T05:27:58 | 39,035,966 | 0 | 0 | null | 2015-07-13T20:36:59 | 2015-07-13T20:36:59 | null | UTF-8 | Python | false | false | 2,038 | py | # -*- coding: utf-8 -*-
"""The zlib and DEFLATE decompressor object implementations."""
import zlib
from dfvfs.compression import decompressor
from dfvfs.compression import manager
from dfvfs.lib import definitions
from dfvfs.lib import errors
class ZlibDecompressor(decompressor.Decompressor):
"""Class that implements a "zlib DEFLATE" decompressor using zlib."""
COMPRESSION_METHOD = definitions.COMPRESSION_METHOD_ZLIB
def __init__(self, window_size=zlib.MAX_WBITS):
"""Initializes the decompressor object.
Args:
window_size (Optional[int]): base two logarithm of the size of
the compression history buffer (aka window size). When the value
is negative, the standard zlib data header is suppressed.
"""
super(ZlibDecompressor, self).__init__()
self._zlib_decompressor = zlib.decompressobj(window_size)
def Decompress(self, compressed_data):
"""Decompresses the compressed data.
Args:
compressed_data (bytes): compressed data.
Returns:
tuple(bytes, bytes): uncompressed data and remaining compressed data.
Raises:
BackEndError: if the zlib compressed stream cannot be decompressed.
"""
try:
uncompressed_data = self._zlib_decompressor.decompress(compressed_data)
remaining_compressed_data = getattr(
self._zlib_decompressor, u'unused_data', b'')
except zlib.error as exception:
raise errors.BackEndError((
u'Unable to decompress zlib compressed stream with error: '
u'{0!s}.').format(exception))
return uncompressed_data, remaining_compressed_data
class DeflateDecompressor(ZlibDecompressor):
"""Class that implements a "raw DEFLATE" decompressor using zlib."""
COMPRESSION_METHOD = definitions.COMPRESSION_METHOD_DEFLATE
def __init__(self):
"""Initializes the decompressor object."""
super(DeflateDecompressor, self).__init__(window_size=-zlib.MAX_WBITS)
manager.CompressionManager.RegisterDecompressors([
DeflateDecompressor, ZlibDecompressor])
| [
"[email protected]"
]
| |
48a50b6f83025bcf4428123353dd477bdc0e98c6 | 3bcc247a2bc1e0720f0344c96f17aa50d4bcdf2d | /第四阶段/爬虫/day03pm/gouliang.py | 64e4b1d4ff3399235f1cf55b272bbbf4f6d7ae33 | []
| no_license | qianpeng-shen/Study_notes | 6f77f21a53266476c3c81c9cf4762b2efbf821fa | 28fb9a1434899efc2d817ae47e94c31e40723d9c | refs/heads/master | 2021-08-16T19:12:57.926127 | 2021-07-06T03:22:05 | 2021-07-06T03:22:05 | 181,856,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,708 | py | import requests
import re
import json
from multiprocessing import Pool
from multiprocessing import Manager
import time
import functools #函数的包装器
def get_one_page(url):
ua_header = {"User-Agent": "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)"}
response = requests.get(url, headers=ua_header)
if response.status_code == 200:#OK
return response.text
return None
#<em class="num"[\s\S]*?</b>([\s\S]*?)</em>[\s\S]*?</p>[\s\S]*?</style>[\s\S]*?</a>
def parse_one_page(html):
pattern = re.compile('<em class="num"[\s\S]*?</b>([\s\S]*?)</em>[\s\S]*?<a title="([\s\S]*?)"[\s\S]*?>')
items = re.findall(pattern, html)
print(items)
for item in items:
yield{
"名称":item[1].strip(),
"价格":item[0].strip(),
}
def write_to_file(item):
with open("DogFood.txt", 'a', encoding="utf-8") as f:
f.write(json.dumps(item, ensure_ascii=False)+'\n')
def CrawlPage(lock, offset):
url = "https://search.yhd.com/c0-0/k力狼狗粮?cu=true&utm_source=baidu-nks&utm_medium=cpc&utm_campaign=t_262767352_baidunks&utm_term=86895147209_0_20fdeec883c64f14b0df6ea2d4e2966a#page="+str(offset)+"&sort=1"
html = get_one_page(url)
for item in parse_one_page(html):
lock.acquire()
write_to_file(item)
lock.release()
time.sleep(1)
if __name__ == "__main__":
manager = Manager()
lock = manager.Lock()
newCrawlPage = functools.partial(CrawlPage, lock)
pool = Pool()
pool.map(newCrawlPage, [i for i in range(1,11)])
pool.close()
pool.join() | [
"[email protected]"
]
| |
040e7ecc3fdeb537a0cd06265eed1edd420344b0 | 85bad96f0c53edcda738f42b4afe742eed9865c3 | /TD/cliffwalking.py | 571983e0c03b35368ba3be22b2c4ce80e503f039 | []
| no_license | shirsho-12/RL | 9fc6e0d4de18cb68a15052d1aec28417e9215862 | ef94bffc80c0a5f83543cba170415d85c27ca0a9 | refs/heads/main | 2023-06-24T13:26:56.449898 | 2021-07-13T08:44:20 | 2021-07-13T08:44:20 | 368,197,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 640 | py | # If imports do not work, add a TD. to each of the files
import gym
from td_algos import q_learning
from helper import gen_eps_greedy_policy, plot_rate
env = gym.make("CliffWalking-v0")
num_states, num_actions = env.observation_space.n, env.action_space.n
print(num_states, num_actions)
gamma = 1
num_episodes = 500
alpha = 0.4
epsilon = 0.1
behaviour_policy = gen_eps_greedy_policy(env.action_space.n, epsilon)
optimal_Q, optimal_policy, info = q_learning(env, behaviour_policy, gamma, num_episodes, alpha)
print("\nQ-Learning Optimal policy: \n", optimal_policy)
plot_rate(info["length"], info["rewards"], "Cliff Walking: Q-Learning")
| [
"[email protected]"
]
| |
a44d0e7c13bbd2a1c8970b880c482d88ea0b1c02 | 08d54785e1266b46912e64a51054fbef80d41629 | /Basic Python Programs/34.GetFileProperties.py | 0bc4d4ff2f529437d2abcc6b2072d21d2827b221 | []
| no_license | Wolvarun9295/BasicPython | 95231fa3b47a28129de69c56c3f7b02204487005 | 722d36692724d1e24022405269b3efb922630c0e | refs/heads/master | 2022-11-04T12:29:54.191632 | 2020-06-15T17:31:45 | 2020-06-15T17:31:45 | 272,414,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | # Imported path and time modules.
from os import path
import time
fileName = __file__
print(f'File: {fileName}')
print(f'Last access time: {time.ctime(path.getatime(fileName))}')
print(f'Last modified time: {time.ctime(path.getmtime(fileName))}')
print(f'Last changed time: {time.ctime(path.getctime(fileName))}')
print(f'Size: {path.getsize(fileName)} bytes')
| [
"[email protected]"
]
| |
e8ddc926d4457abb6650f3823d8b8df1fc9e24dd | 9a3803ba18a88a6a172ac3fb11411ee47adc2108 | /Object.py | 0db2728e02ac085aca36818b3f9257c59e4510da | []
| no_license | jeffreyzhang2012/Village_Simulator_Game | 6efe9197aef982da6008295e5a2b2acab51ebbdc | 9bffabfd8a84980612eab5d4dd18d13d018bc5ad | refs/heads/master | 2023-03-05T04:31:54.883003 | 2021-02-19T06:45:04 | 2021-02-19T06:45:04 | 340,277,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | class JK(object):
def __init__(self,x):
self.x = x
def fuck(self,d):
return d.y * 3
def calc(self,d):
return JK.fuck(self,d) * self.x | [
"--global"
]
| --global |
93ba818fa2d05a5cd078637f250e3a78c6085fc1 | 097b2c588b4695f3ab96c85fd4cda1ce271a7cda | /models/ssd/layers/modules/multibox_loss.py | 591df6efed02fd540a39af14223c826fe2ec9238 | [
"Apache-2.0"
]
| permissive | qinzhengmei/silco | 628422a05841287c2a5159772121e3e2b5c9b72d | 18872c4c31a79aa1bac489096fd8f5c99b4380cf | refs/heads/master | 2022-12-15T16:37:33.367354 | 2020-08-24T21:49:23 | 2020-08-24T21:49:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,225 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from data import v
from ..box_utils import match, log_sum_exp
class MultiBoxLoss(nn.Module):
"""SSD Weighted Loss Function
Compute Targets:
1) Produce Confidence Target Indices by matching ground truth boxes
with (default) 'priorboxes' that have jaccard index > threshold parameter
(default threshold: 0.5).
2) Produce localization target by 'encoding' variance into offsets of ground
truth boxes and their matched 'priorboxes'.
3) Hard negative mining to filter the excessive number of negative examples
that comes with using a large number of default bounding boxes.
(default negative:positive ratio 3:1)
Objective Loss:
Args:
c: class confidences,
l: predicted boxes,
g: ground truth boxes
N: number of matched default boxes
See: https://arxiv.org/pdf/1512.02325.pdf for more details.
"""
def __init__(
self,
num_classes,
size,
overlap_thresh,
prior_for_matching,
bkg_label,
neg_mining,
neg_pos,
neg_overlap,
encode_target,
use_gpu=True,
):
super(MultiBoxLoss, self).__init__()
self.use_gpu = use_gpu
self.num_classes = num_classes
self.threshold = overlap_thresh
self.background_label = bkg_label
self.encode_target = encode_target
self.use_prior_for_matching = prior_for_matching
self.do_neg_mining = neg_mining
self.negpos_ratio = neg_pos
self.neg_overlap = neg_overlap
cfg = v[str(size)]
self.variance = cfg["variance"]
def forward(self, predictions, targets):
"""Multibox Loss
Args:
predictions (tuple): A tuple containing loc preds, conf preds,
and prior boxes from SSD net.
conf shape: torch.size(batch_size,num_priors,num_classes)
loc shape: torch.size(batch_size,num_priors,4)
priors shape: torch.size(num_priors,4)
ground_truth (tensor): Ground truth boxes and labels for a batch,
shape: [batch_size,num_objs,5] (last idx is the label).
"""
loc_data, conf_data, priors = predictions
# batch_size
num = loc_data.size(0) # image number
priors = priors[: loc_data.size(1), :]
num_priors = priors.size(0)
num_classes = self.num_classes
# match priors (default boxes) and ground truth boxes
loc_t = torch.Tensor(num, num_priors, 4)
conf_t = torch.LongTensor(num, num_priors)
for idx in range(num):
truths = targets[idx][:, :-1].data
labels = targets[idx][:, -1].data
defaults = priors.data
match(
self.threshold,
truths,
defaults,
self.variance,
labels,
loc_t,
conf_t,
idx,
)
if self.use_gpu:
loc_t = loc_t.cuda()
conf_t = conf_t.cuda()
# wrap targets
loc_t = Variable(loc_t, requires_grad=False) # [8,8732,4]
conf_t = Variable(conf_t, requires_grad=False) # [8,8732]
pos = conf_t > 0 # [8, 8732]
# num_pos = pos.sum(keepdim=True)
# Localization Loss (Smooth L1)
# Shape: [batch,num_priors,4]
pos_idx = pos.unsqueeze(pos.dim()).expand_as(loc_data) # [8, 8732, 4]
loc_p = loc_data[pos_idx].view(-1, 4) # [85, 4]
loc_t = loc_t[pos_idx].view(-1, 4) # [85, 4]
loss_l = F.smooth_l1_loss(loc_p, loc_t, reduction="sum") # [1,1]
# Compute max conf across batch for hard negative mining
batch_conf = conf_data.view(-1, self.num_classes) # (140256, 2)
loss_c = log_sum_exp(batch_conf) - batch_conf.gather(
1, conf_t.view(-1, 1)
) # ????
# Hard Negative Mining
loss_c = loss_c.view(
pos.size()[0], pos.size()[1]
) # add line,https://github.com/amdegroot/ssd.pytorch/issues/173#issuecomment-424949873
loss_c[pos] = 0 # filter out pos boxes for now
loss_c = loss_c.view(num, -1)
_, loss_idx = loss_c.sort(1, descending=True)
_, idx_rank = loss_idx.sort(1)
num_pos = pos.long().sum(1, keepdim=True)
num_neg = torch.clamp(self.negpos_ratio * num_pos, max=pos.size(1) - 1)
neg = idx_rank < num_neg.expand_as(idx_rank)
# Confidence Loss Including Positive and Negative Examples
pos_idx = pos.unsqueeze(2).expand_as(conf_data)
neg_idx = neg.unsqueeze(2).expand_as(conf_data)
conf_p = conf_data[(pos_idx + neg_idx).gt(0)].view(-1, self.num_classes)
targets_weighted = conf_t[(pos + neg).gt(0)]
loss_c = F.cross_entropy(conf_p, targets_weighted, reduction="sum")
N = num_pos.data.sum().double()
loss_l = loss_l.double()
loss_c = loss_c.double()
loss_l = loss_l / N
loss_c = loss_c / N
return loss_l, loss_c
| [
"[email protected]"
]
| |
c622453419c1ddf1477163422f79dfcf2d99cf19 | 18b977dccd70e9e5a1b553b28ab0413fb3f54f4b | /SoftUni/Python Developmen/Python-Advanced/Multidimensional-Lists/Exercises/example.py | 963a83307987d5f35cf913dde2136caaa9b7d16e | []
| no_license | stevalang/Coding-Lessons | 7203e3a18b20e33e8d596e3dfb58d26c50b74530 | 2d0060c2268ad966efdcae4e6e994ac15e57243a | refs/heads/master | 2023-06-05T08:28:33.290530 | 2021-06-16T19:37:29 | 2021-06-16T19:37:29 | 284,852,565 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,162 | py | rows, cols = tuple(map(int, input().split()))
matrix = []
player_position = []
for i in range(rows):
row = [x for x in list(input())]
matrix.append(row)
for c in row:
if 'P' == c:
player_position = [i, row.index('P')]
commands = list(input())
for command in commands:
next_player_position = []
temp_position = []
if command == 'U':
next_player_position = [player_position[0] - 1, player_position[1]]
next_position = matrix[[next_player_position[0]][next_player_position[1]]]
current_position = matrix[player_position[0]][player_position[1]]
if next_position == ".":
next_player_position = 'P'
current_position = '.'
elif command == 'D':
next_player_position = [player_position[0] + 1, player_position[1]]
elif command == 'L':
next_player_position = [player_position[0], [player_position[1]-1]]
elif command == 'R':
next_player_position = [player_position[0], [player_position[1]+1]]
for row in matrix:
commands
for element in row:
if element == '.':
print(matrix)
print(commands)
print(player_position)
| [
"[email protected]"
]
| |
316f6adc9ec6ba64bd822c35e138ec8b81898db6 | d79c152d072edd6631e22f886c8beaafe45aab04 | /nicolock/products/migrations/0003_auto_20161207_0642.py | 843f5b50c6fa3ce6ec3d0db7e5dfa6cc8f2289ee | []
| no_license | kabroncelli/Nicolock | 764364de8aa146721b2678c14be808a452d7a363 | 4c4343a9117b7eba8cf1daf7241de549b9a1be3b | refs/heads/master | 2020-03-11T11:02:43.074373 | 2018-04-18T17:38:33 | 2018-04-18T17:38:33 | 129,959,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,440 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-07 06:42
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('products', '0002_initialize_categories'),
]
operations = [
migrations.AlterModelOptions(
name='color',
options={'ordering': ['order', 'modified'], 'verbose_name': 'color', 'verbose_name_plural': 'colors'},
),
migrations.AlterModelOptions(
name='file',
options={'ordering': ['order', 'modified'], 'verbose_name': 'file', 'verbose_name_plural': 'files'},
),
migrations.AlterModelOptions(
name='image',
options={'ordering': ['order', 'modified'], 'verbose_name': 'image', 'verbose_name_plural': 'images'},
),
migrations.AlterModelOptions(
name='pattern',
options={'ordering': ['order', 'modified'], 'verbose_name': 'pattern', 'verbose_name_plural': 'patterns'},
),
migrations.AlterModelOptions(
name='product',
options={'ordering': ['order', 'modified'], 'verbose_name': 'product', 'verbose_name_plural': 'products'},
),
migrations.AlterModelOptions(
name='spec',
options={'ordering': ['order', 'modified'], 'verbose_name': 'spec', 'verbose_name_plural': 'specs'},
),
]
| [
"[email protected]"
]
| |
fb38d5f52a8030341505bcd5f75290e71fc7c05e | 2e935ca936976d2d2bd4e785e2f3f29c63771542 | /ExPy10301.py | 8da6aad45cc4ef8ef45343a9670f7847b3834cb8 | []
| no_license | zoro6908/PY_acamedy | 4a370e866fef19f6d2e7697eb809352b6ac703f5 | 460d26639f7bd8cf2486950dc70feae6a2959ca0 | refs/heads/master | 2023-04-26T18:10:44.691326 | 2021-05-25T00:11:02 | 2021-05-25T00:11:02 | 298,425,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | # 파이썬 기초부터 활용까지 (2020.09)
# [3과] 연산문
# 1. 수치연산
print(398**2) #지수
a = 55.6; b = 7
c = a + b # 실수와 정수의 덧셈
print(c, type(c)) # 결과는 실수
s = int(c) # int() 함수 사용
print(s)
k = s - 900
print(k)
k2 = abs(k) # 절대치 함수
print(k2)
p = (10, 20, 90, 70, 8) # 튜플 자료형
print(p)
print(max(p)) # 최대치 함수
print(min(p)) # 최소치 함수
print(sum(p)) # 합계 함수 | [
"[email protected]"
]
| |
cd1765c1ef1d31f5386da711b80e8e6a91b4aa0d | 3edd9164d375538a975fe18a074ca721cebf67e0 | /GridPacks/mkPowhegV2Pack.py | 0a1d069d63ffdb653a378115020fb76e6de4ef86 | []
| no_license | d4space/GenGen | 65402c031fbd38ef1ebadb2dd29d76d6c19312ff | c7b1e6cad6b7da089a2ee6960d484edba3ce3d2a | refs/heads/master | 2021-04-30T10:43:59.008552 | 2018-12-08T05:51:21 | 2018-12-08T05:51:21 | 121,340,082 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,151 | py |
# Definition of the input parameters:
# (1) -p grid production stage [f] (one go)
# (2) -i intput card name [powheg.input]
# (3) -m process name (process defined in POWHEG)
# (4) -f working folder [my_ggH]
# (5) -q batch queue name (run locally if not specified)
# (6) -n the number of events to run
import os
########### All
massGrid = {'VBFhWWlnuqq': [125, 200, 250, 300, 350, 400, 450, 500, 550,600, 650, 700, 750, 800, 900, 1000, 1500, 2000, 2500, 3000],
'VBfHWW2L2Nu': [140, 175, 350, 1000, 2000],
#'VBfHWW2L2Nu': [115, 120, 124, 125, 126, 130, 135, 140, 145, 150, 155, 160, 165, 170, 175, 180, 190,200,210,230,270, 300,350,450,500,550,600,650,700,800,900,1000,1500,2000,2500,3000],
'ggZHWW': [120, 125, 130],
'HZJ': [120, 125, 130],
'ggHToWW2L2Nu': [115, 120, 124, 125, 126, 130, 135, 140, 145, 150, 155, 160, 165, 170, 175, 180, 190, 300, 650],
}
##################
# 400 <batch> Exited
##################
#massGrid = {'VBFhWWlnuqq': [125, 200, 250, 300, 350, 450, 500, 550,600, 650, 700, 750, 800, 900, 1000, 1500, 2000, 2500, 3000]
# }
#massGrid = {'VBFhWWlnuqq': [400]
# }
#####################################################
# Gridpack production with multiple processors
#####################################################
######################################
# Step1: Compiling the POWHEG source
######################################
#for mass in massGrid['VBFhWWlnuqq']:
# print mass
# if mass < 300 :
# cmd = 'python ./run_pwg.py -p 0 -i production/2017/13TeV/Higgs/VBF_H_WW_NNPDF31_13TeV/VBF_H_WW_NNPDF31_13TeV_M'+str(mass)+'.input -g ../JHUGen/cards/decay/WWlnuqq_withtaus.input -m VBF_H -f vbfhWWlnuqq'+str(mass)
# else :
# cmd = 'python ./run_pwg.py -p 0 -i production/2017/13TeV/Higgs/VBF_H_WW_NNPDF31_13TeV/VBF_H_WW_NNPDF31_13TeV_M'+str(mass)+'.input -g ../JHUGen/cards/decay/WWlnuqq_withtaus_reweightdecay_CPS.input -m VBF_H -f vbfhWWlnuqq'+str(mass)
#
# print cmd
# os.system(cmd)
#
###############################################################
# Step2: Producting grids with 3 separate internal stages
################################################################
###########
# step 1-1 p1 x1,2,3,4,5 8nh
###########
#for mass in massGrid['VBFhWWlnuqq']:
# print mass
# if mass < 300 :
# cmd = 'python ./run_pwg.py -p 1 -x 5 -i production/2017/13TeV/Higgs/VBF_H_WW_NNPDF31_13TeV/VBF_H_WW_NNPDF31_13TeV_M'+str(mass)+'.input -g ../JHUGen/cards/decay/WWlnuqq_withtaus.input -m VBF_H -f vbfhWWlnuqq'+str(mass) +' -q 8nh -j 10'
# else :
# cmd = 'python ./run_pwg.py -p 1 -x 5 -i production/2017/13TeV/Higgs/VBF_H_WW_NNPDF31_13TeV/VBF_H_WW_NNPDF31_13TeV_M'+str(mass)+'.input -g ../JHUGen/cards/decay/WWlnuqq_withtaus_reweightdecay_CPS.input -m VBF_H -f vbfhWWlnuqq'+str(mass) + ' -q 8nh -j 10'
#
# print cmd
# os.system(cmd)
#
###########
# step 2 (2nd), 3 (8nh)
###########
#for mass in massGrid['VBFhWWlnuqq']:
# print mass
# if mass < 300 :
# cmd = 'python ./run_pwg.py -p 3 -i production/2017/13TeV/Higgs/VBF_H_WW_NNPDF31_13TeV/VBF_H_WW_NNPDF31_13TeV_M'+str(mass)+'.input -g ../JHUGen/cards/decay/WWlnuqq_withtaus.input -m VBF_H -f vbfhWWlnuqq'+str(mass) +' -q 2nd -j 10'
# else :
# cmd = 'python ./run_pwg.py -p 3 -i production/2017/13TeV/Higgs/VBF_H_WW_NNPDF31_13TeV/VBF_H_WW_NNPDF31_13TeV_M'+str(mass)+'.input -g ../JHUGen/cards/decay/WWlnuqq_withtaus_reweightdecay_CPS.input -m VBF_H -f vbfhWWlnuqq'+str(mass) + ' -q 2nd -j 10'
#
# print cmd
# os.system(cmd)
#
####################################################
# Step 3: Create the POWHEG gridpack tarball
####################################################
#for mass in massGrid['VBFhWWlnuqq']:
# print mass
# if mass < 300 :
# cmd = 'python ./run_pwg.py -p 9 -i production/2017/13TeV/Higgs/VBF_H_WW_NNPDF31_13TeV/VBF_H_WW_NNPDF31_13TeV_M'+str(mass)+'.input -g ../JHUGen/cards/decay/WWlnuqq_withtaus.input -m VBF_H -f vbfhWWlnuqq'+str(mass) +' -k 1'
# else :
# cmd = 'python ./run_pwg.py -p 9 -i production/2017/13TeV/Higgs/VBF_H_WW_NNPDF31_13TeV/VBF_H_WW_NNPDF31_13TeV_M'+str(mass)+'.input -g ../JHUGen/cards/decay/WWlnuqq_withtaus_reweightdecay_CPS.input -m VBF_H -f vbfhWWlnuqq'+str(mass) + ' -k 1'
#
# print cmd
# os.system(cmd)
#########################
# one go
#########################
#
#for mass in massGrid['VBfHWW2L2Nu']:
# print mass
# if mass < 300 :
# cmd = 'python ./run_pwg.py -p f -i production/2017/13TeV/Higgs/VBF_H_WW_NNPDF31_13TeV/VBF_H_WW_NNPDF31_13TeV_M'+str(mass)+'.input -g ../JHUGen/cards/decay/WW2l2nu_withtaus.input -m VBF_H -f VBfHWW2L2Nu'+str(mass)+' -q 1nd -n 100'
# else :
# cmd = 'python ./run_pwg.py -p f -i production/2017/13TeV/Higgs/VBF_H_WW_NNPDF31_13TeV/VBF_H_WW_NNPDF31_13TeV_M'+str(mass)+'.input -g ../JHUGen/cards/decay/WW2l2nu_withtaus_reweightdecay_CPS.input -m VBF_H -f VBfHWW2L2Nu'+str(mass)+' -q 1nd -n 100'
#
# print cmd
# os.system(cmd)
#
#
#for mass in massGrid['VBFhWWlnuqq']:
# print mass
# if mass < 300 :
# cmd = 'python ./run_pwg.py -p f -i production/2017/13TeV/Higgs/VBF_H_WW_NNPDF31_13TeV/VBF_H_WW_NNPDF31_13TeV_M'+str(mass)+'.input -g ../JHUGen/cards/decay/WWlnuqq_withtaus.input -m VBF_H -f vbfhWWlnuqq'+str(mass)+' -q 1nd -n 1000'
# else :
# cmd = 'python ./run_pwg.py -p f -i production/2017/13TeV/Higgs/VBF_H_WW_NNPDF31_13TeV/VBF_H_WW_NNPDF31_13TeV_M'+str(mass)+'.input -g ../JHUGen/cards/decay/WWlnuqq_withtaus_reweightdecay_CPS.input -m VBF_H -f vbfhWWlnuqq'+str(mass)+' -q 1nd -n 1000'
#
# print cmd
# os.system(cmd)
#
# option -d 1: no pdf check
#cmd = 'python ./run_pwg.py -d 1 -p f -i production/pre2017/14TeV/VBF_H_WW_NNPDF30_14TeV/VBF_H_WW_NNPDF30_14TeV_M125.input -g ../JHUGen/cards/decay/WW2l2nu_withtaus.input -m VBF_H -f vbfhWW2l2nu_NNPDF30_14TeV_125 -q 1nd -n 1000'
#
#print cmd
#os.system(cmd)
#for mass in massGrid['ggHToWW2L2Nu']:
# #print mass
# if mass < 300 :
# cmd = 'python ./run_pwg.py -p f -i production/2017/13TeV/Higgs/gg_H_WW_quark-mass-effects_NNPDF31_13TeV/gg_H_WW_quark-mass-effects_NNPDF31_13TeV_M'+str(mass)+'.input -g ../JHUGen/cards/decay/WW2l2nu_withtaus.input -m gg_H_quark-mass-effects -f gghWW2l2nu'+str(mass)+' -q 1nd -n 100'
# else :
# cmd = 'python ./run_pwg.py -p f -i production/2017/13TeV/Higgs/gg_H_WW_quark-mass-effects_NNPDF31_13TeV/gg_H_WW_quark-mass-effects_NNPDF31_13TeV_M'+str(mass)+'.input -g ../JHUGen/cards/decay/WW2l2nu_withtaus_reweightdecay_CPS.input -m gg_H_quark-mass-effects -f gghWW2l2nu'+str(mass)+' -q 1nd -n 100'
#
# print cmd
# os.system(cmd)
#
#for mass in massGrid['ggZHWW']:
# print mass
# cmd = 'python ./run_pwg.py -p f -i production/2017/13TeV/Higgs/ggHZ_HanythingJ_NNPDF31_13TeV/ggHZ_HanythingJ_NNPDF31_13TeV_M'+str(mass)+'_Vinclusive.input -m ggHZ -f ggHZ_Hanything_NNPDF31_13TeV_'+str(mass)+' -q 1nw -n 1000'
#
# print cmd
# os.system(cmd)
#for mass in massGrid['HZJ']:
# #if mass != 120: continue
# print mass
# # HToWW
# cmd = 'python ./run_pwg.py -p f -i production/2017/13TeV/Higgs/HZJ_HanythingJ_NNPDF31_13TeV/HZJ_HanythingJ_NNPDF31_13TeV_M'+str(mass)+'_Vinc.input -g ../JHUGen/cards/decay/WWany.input -m HZJ -f HZJ_HWWany_NNPDF31_13TeV_'+str(mass)+'_Vinc_JHU714 '+' -q 1nw -n 10'
# print cmd
# os.system(cmd)
#
# # HToWWTo2L2Nu
# cmd = 'python ./run_pwg.py -p f -i production/2017/13TeV/Higgs/HZJ_HanythingJ_NNPDF31_13TeV/HZJ_HanythingJ_NNPDF31_13TeV_M'+str(mass)+'_Vinc.input -g ../JHUGen/cards/decay/WW2l2nu_withtaus.input -m HZJ -f HZJ_HWWTo2L2Nu_NNPDF31_13TeV_'+str(mass)+'_Vinc_JHU714'+' -q 1nw -n 10'
# print cmd
# os.system(cmd)
#
# # HToWWTo2L2Nu_ZTo2L
# #cmd = 'python ./run_pwg.py -p f -i production/2017/13TeV/Higgs/HZJ_HanythingJ_NNPDF31_13TeV/HZJ_HanythingJ_NNPDF31_13TeV_M'+str(mass)+'_Vleptonic.input -g ../JHUGen/cards/decay/WW2l2nu_withtaus.input -m HZJ -f HZJ_HWWTo2L2Nu_NNPDF31_13TeV_'+str(mass)+'_Vleptonic_JHU714 '+' -q 1nw -n 10'
# #print cmd
# #os.system(cmd)
####################################
# Anomalous coupling
####################################
#cmd = 'python ./run_pwg.py -p f -i production/2017/13TeV/Higgs/gg_H_WW_quark-mass-effects_NNPDF31_13TeV/gg_H_WW_quark-mass-effects_NNPDF31_13TeV_M125.input -g ../JHUGen/cards/decay/WW2l2nu_withtaus.input -m gg_H_quark-mass-effects -f ggh_0PM_WW2l2n_M125_jhu710 -q 1nd -n 1000'
#print cmd
#os.system(cmd)
#
#cmd = 'python ./run_pwg.py -p f -i production/2017/13TeV/Higgs/gg_H_WW_quark-mass-effects_NNPDF31_13TeV/gg_H_WW_quark-mass-effects_NNPDF31_13TeV_M125.input -g ../JHUGen/cards/decay/anomalouscouplings/WW2l2nu_withtaus_a3.input -m gg_H_quark-mass-effects -f ggh_0M_WW2l2n_M125_jhu710 -q 1nd -n 1000'
#print cmd
#os.system(cmd)
#
#cmd = 'python ./run_pwg.py -p f -i production/2017/13TeV/Higgs/gg_H_WW_quark-mass-effects_NNPDF31_13TeV/gg_H_WW_quark-mass-effects_NNPDF31_13TeV_M125.input -g ../JHUGen/cards/decay/anomalouscouplings/WW2l2nu_withtaus_a3mix.input -m gg_H_quark-mass-effects -f ggh_0Mf05ph0_WW2l2n_M125_jhu710 -q 1nd -n 1000'
#print cmd
#os.system(cmd)
#
#
#cmd = 'python ./run_pwg.py -p f -i production/2017/13TeV/Higgs/gg_H_WW_quark-mass-effects_NNPDF31_13TeV/gg_H_WW_quark-mass-effects_NNPDF31_13TeV_M125.input -g ../JHUGen/cards/decay/anomalouscouplings/WW2l2nu_withtaus_a2.input -m gg_H_quark-mass-effects -f ggh_0PH_WW2l2n_M125_jhu710 -q 1nd -n 1000'
#print cmd
#os.system(cmd)
#
#
#cmd = 'python ./run_pwg.py -p f -i production/2017/13TeV/Higgs/gg_H_WW_quark-mass-effects_NNPDF31_13TeV/gg_H_WW_quark-mass-effects_NNPDF31_13TeV_M125.input -g ../JHUGen/cards/decay/anomalouscouplings/WW2l2nu_withtaus_a2mix.input -m gg_H_quark-mass-effects -f ggh_0PHf05ph0_WW2l2n_M125_jhu710 -q 1nd -n 1000'
#print cmd
#os.system(cmd)
#
#
#cmd = 'python ./run_pwg.py -p f -i production/2017/13TeV/Higgs/gg_H_WW_quark-mass-effects_NNPDF31_13TeV/gg_H_WW_quark-mass-effects_NNPDF31_13TeV_M125.input -g ../JHUGen/cards/decay/anomalouscouplings/WW2l2nu_withtaus_L1.input -m gg_H_quark-mass-effects -f ggh_0L1_WW2l2n_M125_jhu710 -q 1nd -n 1000'
#print cmd
#os.system(cmd)
#
#
#cmd = 'python ./run_pwg.py -p f -i production/2017/13TeV/Higgs/gg_H_WW_quark-mass-effects_NNPDF31_13TeV/gg_H_WW_quark-mass-effects_NNPDF31_13TeV_M125.input -g ../JHUGen/cards/decay/anomalouscouplings/WW2l2nu_withtaus_L1mix.input -m gg_H_quark-mass-effects -f ggh_0L1f05ph0_WW2l2n_M125_jhu710 -q 1nd -n 1000'
#print cmd
#os.system(cmd)
#cmd = 'python ./run_pwg.py -p f -i production/2017/13TeV/WZTo3lNu_NNPDF31nnlo_13TeV/WZ_lllnu_mllmin01_NNPDF31nnlo_13TeV.input -m WZ -f WZTo3LNu_mllmin01_NNPDF31_TuneCP5_13TeV_powheg_pythia8 -q 1nd -n 1000'
#print cmd
#os.system(cmd)
cmd = 'python ./run_pwg.py -p f -i production/2017/13TeV/WZTo3lNu_NNPDF31nnlo_13TeV/WZ_lllnu_mllmin0001max1_NNPDF31nnlo_13TeV.input -m WZ -f WZTo3LNu_mllmin0001max1_NNPDF31_TuneCP5_13TeV_powheg_pythia8 -q 1nd -n 100'
print cmd
os.system(cmd)
#cmd = 'python ./run_pwg.py -p f -i production/2017/13TeV/Higgs/ggHZ_HanythingJ_NNPDF31_13TeV/ggHZ_HanythingJ_NNPDF31_13TeV_M125_Vleptonic.input -m ggHZ -f ggHZ_HanythingJ_NNPDF31_13TeV_M125_Vleptonic -q 1nw -n 1000'
#print cmd
#os.system(cmd)
| [
"[email protected]"
]
| |
ef8f956403d0202e9fb20059973bea3abe472964 | 26d6c34df00a229dc85ad7326de6cb5672be7acc | /msgraph-cli-extensions/beta/education_beta/azext_education_beta/vendored_sdks/education/aio/operations/_education_schools_users_operations.py | e8c69dfc0b8242740273af02b2c355cc271fe2cb | [
"MIT"
]
| permissive | BrianTJackett/msgraph-cli | 87f92471f68f85e44872939d876b9ff5f0ae6b2c | 78a4b1c73a23b85c070fed2fbca93758733f620e | refs/heads/main | 2023-06-23T21:31:53.306655 | 2021-07-09T07:58:56 | 2021-07-09T07:58:56 | 386,993,555 | 0 | 0 | NOASSERTION | 2021-07-17T16:56:05 | 2021-07-17T16:56:05 | null | UTF-8 | Python | false | false | 4,183 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class EducationSchoolsUsersOperations:
"""EducationSchoolsUsersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~education.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def delta(
self,
education_school_id: str,
**kwargs
) -> List["models.MicrosoftGraphEducationUser"]:
"""Invoke function delta.
Invoke function delta.
:param education_school_id: key: id of educationSchool.
:type education_school_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of MicrosoftGraphEducationUser, or the result of cls(response)
:rtype: list[~education.models.MicrosoftGraphEducationUser]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["models.MicrosoftGraphEducationUser"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delta.metadata['url'] # type: ignore
path_format_arguments = {
'educationSchool-id': self._serialize.url("education_school_id", education_school_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[MicrosoftGraphEducationUser]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delta.metadata = {'url': '/education/schools/{educationSchool-id}/users/microsoft.graph.delta()'} # type: ignore
| [
"[email protected]"
]
| |
b807d74a03dd5f1381c42a92a7cb6c9e30bac800 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03370/s840732194.py | 0eb0e9c767f212efcb5084f1fc3f3b2db6f627d9 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | [N,X] = list(map(int,input().split()))
m = []
for i in range(N):
m.append(int(input()))
# print('m:',m)
amari = X - sum(m)
# print('amari:',amari)
i=0
while True:
if amari<min(m)*i:
ans = len(m) + i-1
break
i+=1
print(ans)
| [
"[email protected]"
]
| |
a3da8977726d890bfab1da564e6abf12c76cfc8f | 256e1c5c9c0a14370201aac4ebdd9d17049c005c | /batch_ihr_import_new.py | 8e95b5e18782420e8772af5c7a471f96696b4c5a | []
| no_license | rv816/clinical_survey_etl | e0393aabeb2562b6f9d134cdb35f8040238f6fad | 19d5d83f5c4d8f7aec5e0a6e634c113ee8b35388 | refs/heads/master | 2021-01-19T06:58:17.890722 | 2015-09-28T20:02:57 | 2015-09-28T20:02:57 | 26,990,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,022 | py | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
import pandas as pd
import pg8000
import psycopg2 as pq
import sqlalchemy as sa
from sqlalchemy import create_engine, ForeignKey, MetaData
from sqlalchemy import Column, Date, Integer, String, Table
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy.orm import relationship, backref
import peewee as pw
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
from fuzzywuzzy.process import extract, extractBests,fuzz, itertools, extractOne
import etl
from pprint import PrettyPrinter as Pretty
from collections import OrderedDict
import models
import postgresql
# <markdowncell>
# # Database Connection
# ____
#
#
# 1. Create connection to the database with SQLalchemy
# 2. Instantiates meta as the extracted metadata from the schema **"public"**
# 3. Autoreflects the tables found in public and turns them into classes.
# - The scope of which tables are reflected is defined in **"working_tables"** variable
# 4. Instantiates the Base class as an automapped Base (instead of a Base where tables are declared, in "declarative_base")
# 5. Activates the Base (with Base.prepare())
# 6. Creates global class names based on their respective counterparts which have been autoreflected and turned into models taht live in **Base.classes**
# 7. Sets the database to autocommit, and autoflush.Since we are using the <code>"sesion.merge"</code> method, this is a convenience for now. Eventually we'll make it a bit more secure.
# <codecell>
engine = create_engine('postgresql+pg8000://vassr:bluedog@localhost:5432/vassr')
meta = MetaData(schema="public")
session = Session(engine)
working_tables = ['person', 'location', 'condition_occurrence', 'care_site', 'observation', 'drug_exposure']
meta.reflect(engine, only=None)
Base = automap_base(metadata=meta)
Base.prepare()
Person, Condition, CareSite, Observation, DrugExposure, Location = Base.classes.person, Base.classes.condition_occurrence, Base.classes.care_site, Base.classes.observation, Base.classes.drug_exposure, Base.classes.location
session.autocommit = True
session.autoflush = True
ins = sa.inspect(engine)
person = Person()
condition = Condition()
caresite = CareSite()
obs = Observation()
drugexp = DrugExposure()
location = Location()
'''
# Old Code:
Base = automap_base()
engine = create_engine('postgresql+pg8000://vassr:bluedog@localhost:5432/vassr')
Base.prepare(engine, reflect=True)
help(Base.prepare)
'''
'''
meta = MetaData(schema="public")
meta.reflect(bind=engine, schema='person', views=False, only=None, extend_existing=False, autoload_replace=True)
meta.create_all(bind=engine)
Base = automap_base(metadata=meta)
Session = sa.orm.sessionmaker()
Session.configure(bind=engine)
session = Session()
Base.prepare(engine, reflect=True)
'''
# <codecell>
# <codecell>
d = {'personed': Person()}
x = d['personed']
x.attr_person_id = 11
x.attr_person_source_value = 'andrea'
session.merge(x)
#session.commit()
# <codecell>
printer.pprint(tablecheck)
# <codecell>
ss = session.query(Person)
# <codecell>
@sa.event.listens_for(Table, "column_reflect")
def column_reflect(inspector, table, column_info):
# set column.key = "attr_<lower_case_name>"
column_info['key'] = "attr_%s" % column_info['name'].lower()
class MyClass(Base):
__table__ = Table("person", Base.metadata, autoload=True, autoload_with=engine)
# <codecell>
p.attr_person_id
# <codecell>
# Configure Environment
pretty = Pretty(indent=1)
pd.options.display.max_columns = 999
pd.options.display.max_rows = 999
# <codecell>
ihr.race.mapdict
# <markdowncell>
# # Convience Lists
# <codecell>
ins = sa.inspect(engine)
tablecheck = {}
for x in list(working_tables):
for y in insp.get_columns(x):
if y['name'] in list(tablecheck.keys()):
print(y['name'] + " has duplicates")
tablecheck[y['name']].append(str(x))
else:
tablecheck.update({y['name']:[str(x)]})
# <headingcell level=1>
# Classes
# <codecell>
# 11/19/14 - TODO
# FIX POSTMAP
# Figure out object oriented alignment so each ELEMENT can update each field into omop. It could just be raw swl..
class Registry:
'''Instantiate a registry for mapping. Only initiate with ready to go data. otherwise, work with IO object until properly formatted (headers in row 1, properly labeled index in first column, etc) '''
reglist = []
def __init__(self, regname, data, postmap = pd.DataFrame(), *args, **kwargs):
self.regname = regname
#exec("M.%s = self" % regname)
self.data = pd.DataFrame(data)
try:
self.data.to_sql(self.regname, engine, if_exists='replace', index=True, index_label='source_id')
except: print("Regsitry instance %s created, but could not add it to database." % self.regname)
self.postmap = pd.DataFrame()
self.elements = []
def init_elements(self, elementnames, **kwargs):
''' Warning: **kwargs will apply to all element names in elementnames list, if you choose to run init_elements on a list of elementnames. It is best for initiating several elements that have similar characteristics.'''
self.create_ids()
for x in list(elementnames):
self.elements.append(x)
print("Parsing " + x + "....")
valueset = list(self.data[x].drop_duplicates().values)
setattr(self, x, Element(x, valueset, self.data[x], self.regname))
# exec("self.%s = Element('%s', valueset, self.data['%s'], self.regname)" % (x,x,x))
#for key, value in kwargs.items():
# exec("self.%s.%s = '%s'" % (x, key, value))
# print("%s, %s" % (key,value))
# exec("if self.%s.mappingtype == 'direct':" % x)
# if self.Year_of_Birth.mappingtype == 'direct':
# exec("for value in self.%s.valueset:" % x)
setattr(getattr(self, x), 'mapdict', {})
for value in getattr(getattr(self, x), 'valueset'):
print("Element = " + str(x) + ", Value = " + str(value))
getattr(getattr(self, x), 'mapdict')[value] = 'null'
# exec("M.masterplan[%s] = self.elements" % self.regname)
def create_ids(self):
'''creates a column with IDs conforming to GRDR standards'''
source_tmp = input('Name of patient ID column in source data: ')
source_id = closest_match(source_tmp, self.data.columns)
guid = closest_match('GUID', self.data.columns)
idcol = closest_match(source_id, self.data.columns)
self.data['source_id_etl'] = self.data[idcol].apply(lambda x: str(str(self.regname) + '_' + str(x)))
self.data['person_source_value'] = self.data[guid].fillna(self.data['source_id_etl'])
self.person_source_value = self.data['person_source_value'].values
self.data = self.data.set_index('person_source_value')
return self.data
self.datadict = {}
for x in self.elements:
self.datadict[x] = list(self.data[x].drop_duplicates().values)
Registry.reglist.append(self.regname)
def __repr__(self):
return "< Registry '%s' >" % self.regname
# In[4]:
class Element:
def __init__(self, elementname, valueset, data, regname, mappingtype='', mappingstatus='Premap', mapdict={}, postmap = pd.DataFrame(), target_table='Not yet specified', target_field='Not yet specified', *args, **kwargs):
self.elementname = elementname
self.regname = regname # source registry name
self.sourcefield = elementname # source column
self.valueset = list(valueset) # source value
self.postmap = postmap
self.data = data
self.regname = regname
self.valueset = list(valueset)
self.mappingtype = mappingtype
self.mappingstatus = mappingstatus
self.mapdict = mapdict
self.target_table = target_table
self.target_field = target_field
def mapper(self, x):
try:
return self.mapdict[x]
except: return 'null'
def transform(self):
# Where x is the each value of the original data, look up the value for key x in Element.mapdict
self.target_table = input("What is the target table name?")
self.target_field = input("What is the target field?")
self.source_field = input("Name of omop source (eg condition_source_value)?")
# type_concept_field = input("Name of omop type concept (eg condition_type_concept_id)?")
self.postmap[self.target_field] = self.data.apply(self.mapper)
self.postmap = self.postmap.rename(columns = {self.elementname: self.target_field})
self.postmap[self.source_field] = self.data
setattr(self,'mappingstatus', 'Postmap')
# df_new = pd.DataFrame(self.postmap).rename(columns={self.elementname: self.target_field})
# self.postmap = df_new
#getattr(getattr(self, regname), 'postmap')[self.target_field] = getattr(self, 'postmap')
#exec("%s.postmap = pd.concat([%s.postmap, self.postmap], axis=1)" % (self.regname, self.regname))
print('Mapped %s: type = %s, target_table = %s, target_field = %s' % (self.elementname, self.mappingtype, self.target_table, self.target_field))
return self.postmap.fillna('null')
def direct_transform(self):
for x in self.data.iteritems():
if is_year(x):
#pd.DataFrame(self.data * self.data.apply(is_year)).apply(pd.to_datetime, infer_datetime_format=True, format='%Y')
self.data = ihr.dx_date.data.apply(pd.to_datetime, infer_datetime_format=True, format='%Y')
return self.data.fillna('null')
self.postmap[self.target_field] = pd.DataFrame(self.data)
def __repr__(self):
classrep = " %s.%s <class Element>" % (self.regname, self.sourcefield)
print(" regname = '%s',\n elementname = '%s',\n sourcefield = '%s'" % (self.regname, self.elementname, self.sourcefield))
if self.regname not in Registry.reglist:
return "%s \n Warning: Origin name %s not an instantiated Registry" % (classrep, self.regname)
else: return classrep
# This amazing piece of programming will automatically map freetext into an omop concept id. Use if there is a lot of freetext.
default_target = 'GRDR_Mapping_11_13_14.xlsx'
class Mapper:
def __init__(self, regobject, sheetname, target_file=default_target):
self.regobject = regobject
self.sheetname = sheetname
self.target_file = target_file
self.mapdf = pd.read_excel(target_file, sheetname=self.sheetname).fillna('null')
self.regobject.mapdf = self.mapdf
for x in range(0, len(self.mapdf)):
value = self.mapdf.ix[x]['source_code']
if type(value)==int and value in range(0,10):
self.mapdf.loc[x, 'source_code'] = "%s %s" % (value, self.mapdf.loc[x, 'source_value'])
#mapping['source_code'] + ' ' + mapping['source_value']
self.mapmaster = self.mapdf.to_dict(orient="records")
self.mapkeys = list(self.mapmaster[0].keys())
def check_fields(self):
self.goodfields = []
count = 0
for x in self.mapdf['field name'].dropna():
if x in self.regobject.data.columns:
print(x)
self.goodfields.append(x)
count +=1
print(str(count) + " fields extracted from the mapping table.")
return self.goodfields
def map_all(self):
'''
Map_all has a confusing array of variables.
- "mapkeys" are the columns of a mapping file. these are boilerplate -- 'field name', 'source code', 'source value', etc.
- "mapdict_of_element" is the "mapdict" attribute of each Element object. so for ihr.race.mapdict, ihr is the registry, race is the element, and mapdict is the dictionary of valueset value to target mapping.'
- "mapdict_of_element_keys" is a conveninence list that contains the KEYS of mapdict_of_element SANS all nan values. nan values trip up the fuzzy matching algorithm (extractOne), and it is definitely more valuable ot have that algorithm."
'''
for x in self.mapmaster:
if x[closest_match('field_name', self.mapkeys)] in self.regobject.elements:
mapdict_of_element= getattr(getattr(self.regobject, x['field name']), 'mapdict')
mapdict_of_element_keys = [x for x in mapdict_of_element.keys() if str(x) != 'nan']
print(mapdict_of_element)
self.mapmaster[0][closest_match('yes', self.mapkeys)]
code = x[closest_match('source_code', self.mapkeys)]
value = x[closest_match('source_value', self.mapkeys)]
try:
if process.extractOne(str(code), mapdict_of_element_keys)[1] > 50:
try:
mapdict_of_element[code] = x[closest_match('omop_concept_id', self.mapkeys)]
except: handle_it()
else:
if process.extractOne(str(value), mapdict_of_element_keys)[1] > 50:
try:
mapdict_of_element[value] = x[closest_match('omop_concept_id', self.mapkeys)]
except: handle_it()
print(str(x['field name']) + ", " + str(code) + " cannot be mapped")
except:
handle_it()
class AutoMapper:
def __init__(self, regobject):
from algoliasearch import algoliasearch as ag
self.client = ag.Client("31K5UZ93QX", "3fad605ef37e555d88993865e467f2a2")
self.index = client.init_index('omop_concepts')
self.regobject = regobject
dic = {}
# Alter this line right here to add the appropriate "unmappable" to be mapped. Also switch snomed to RxNorm, etc as appropriate
def automap(self, element):
for x in getattr(getattr(self.regobject, element), 'valueset'):
try:
length = len(x)
except:
next
if len(x) > 1 and type(x) != int:
res = self.index.search("\SNOMED\\ '%s'" % x, {"removeWordsIfNoResults":"firstWords", "facets":"CONCEPT_CLASS_ID", "facetFilters": "CONCEPT_CLASS_ID:Clinical Finding"})
try:
result = int(res['hits'][0]['objectID'])
except: pass
ihr.dxc_secondaryto.mapdict[x] = result
class DBLoader:
def __init__(self, db, registry, target_table, target_field='NO_FIELD'):
self.db = db
self.registry = registry
self.target_table = target_table
self.target_field = target_field
self.data = registry.data
self.insert_id = db.prepare("insert into person (person_source_value) select $1 AS varchar WHERE NOT EXISTS (SELECT 1 FROM person WHERE person_source_value like '$1')")
self.clean_duplicates = db.prepare("""
DELETE FROM person USING person p2
WHERE person.person_source_value = p2.person_source_value AND person.person_id < p2.person_id;
""")
def insert_all_ids(self):
for x in self.data.index:
try:
self.insert_id(str(x))
self.clean_duplicates()
print("Added " + str(x) + " to vassr.public.person.person_source_value")
except Exception:
print(handle_it())
def update_all(self):
cols = list(self.registry.postmap.columns)
for x in self.registry.postmap.index:
print(x)
row = self.registry.postmap.ix[x]
rowdict = row.to_dict()
self.target_field = cols[0]
for col in cols:
self.target_field = col
typer = db.prepare("select data_type from information_schema.columns where table_name = '%s' and column_name = '%s'" % (tablename, fieldname))
v1 = typer()[0][0]
if rowdict[col] != '':
print("col = " + str(col) + ", rowdict[col] = " + str(rowdict[col]))
ins = db.prepare("update %s set %s = (select '%s'::%s) where person_source_value::text like '%s';" % (tablename, fieldname,str(rowdict[col]), v1, x))
ins()
return pd.read_sql_table(self.target_table, engine)
# <headingcell level=1>
# Convenience Functions
# <codecell>
# Convenience functions
def handle_it():
import sys
e = sys.exc_info()[1]
return e.args[0]
def is_year(x):
if len(str(x)) == 4:
if str(x)[:2] == '19' or str(x)[:2] == '20':
try:
year = int(x)
except:
print("Looks like a year, but doesn't act like one. Not budging to integer.")
return True
else: return False
def to_year(x):
'''x needs to be an Element'''
data = getattr(x, 'data')
data.apply(pd.to_datetime, infer_datetime_format=True, format='%Y')
def reset(name_of_reg, target_file):
''' name_of_reg is three letter name. be SURE to also set the object variable to this same three letter name as well, as a convention. data is the registries flatfile.'''
temp_reg = etl.Registry('name_of_reg', pd.read_sql_table(name_of_reg, engine, schema='sourcedata'))
temp_reg.init_elements(checked)
return temp_reg
def clean_mapper(mapping):
lis = []
for x in range(0, len(mapping)):
value = mapping.ix[x]['source_code']
if type(value)==int and value in range(0,10):
mapping.loc[x, 'source_code'] = "%s %s" % (value, mapping.loc[x, 'source_value'])
#mapping['source_code'] + ' ' + mapping['source_value']
mapping = mapping.fillna('null')
return mapping
def map_it():
for x in ihr.mapmaster:
if x['field name'] in ihr.elements:
mapitem = getattr(getattr(ihr, x['field name']), 'mapdict')
print(mapitem)
code = x['source_code']
value = x['source_value']
if process.extractOne(code, list(mapitem.keys()))[1] > 50:
try:
mapitem[code] = x['OMOP_ Concept_ID']
except: pass
else:
if process.extractOne(value, list(mapitem.keys()))[1] > 50:
try:
mapitem[value] = x['OMOP_ Concept_ID']
except: pass
if code not in mapitem.keys() and value not in mapitem.keys():
print(str(x['field name']) + ", " + str(code) + " cannot be mapped")
def closest_match(x, choices):
match = extractOne(x, choices)
return match[0]
# <codecell>
pd.read_sql_table('person', engine)
# <codecell>
ihr = Registry('ihr', pd.read_csv('ihr/current_data/csv/ihr_complete.csv'))
ihr_map = Mapper(ihr, 'IHR Simplified')
checkfields = ihr_map.check_fields()
checked = ['race', 'ethnic', 'education', 'employment', 'dx_date', 'diamox', 'lasix', 'topamax', 'neptazane', 'dxc_confirm','dxc_diagnosis','dxc_secondaryto', 'dxc_reviewdate']
#ihr = reset('ihr', 'ihr/current_data/csv/ihr_complete.csv')
ihr.init_elements(checked)
ihr_map.map_all()
count = 1
def clean_up_array(array):
for x in array:
count +=1
# person.attr_person_id = count
person.attr_person_source_value = str(x)
session.add(person)
# <codecell>
for x in list(ihr.person_source_value):
person.attr_person_source_valuen = str(x)
session.add(person)
session.commit()
# <codecell>
x = pd.DataFrame(ihr.person_source_value).apply(str.replace(" ", ""))
x.values
# <codecell>
# <codecell>
mapping.mapmaster[500][closest_match('asdfad', mapping.mapkeys)]
# <codecell>
ihr.data.set_index(['person_source_value', 'Atitle_x'])
# <codecell>
mapping.mapmaster[0][closest_match('yes', mapping.mapkeys)]
# <codecell>
namhrload = etl.IO('mhr', 'namhr/current_data/20141029MHRegsitry.xlsx')
mhrdata = namhrload.xls_to_df()
mhr = etl.Registry('mhr', mhrdata)
mhr.mapmaster = open_mapping_sheet('namhr')
with open('namhr/current_data/csv/20141029MHRegsitry.csv', 'r') as f:
wha = f.readlines()
mapmaster = mapping.to_dict(orient="records")
auto = AutoMapper(ihr)
auto.automap(ihr.dxc_secondaryto.elementname)
pd.read_sql_query("select * from person;", engine)
# <codecell>
ihr.mapmaster = mapping.to_dict(orient="records")
ihr.race.transform()
# <codecell>
# def update(self):
# cols = list(self.registry.postmap.columns)
# comma = ","
# for x in self.registry.postmap.index:
# row = self.registry.postmap.ix[x]
# rowdict = row.to_dict()
# for col in cols:
# setter = "set %s = %s" % (col, rowdict[col])
# db.execute("update public.person %s where public.%s like '%s';" % (setter, 'person_source_value', str(x)))
# <codecell>
loader = DBLoader(db, ihr.race, 'person')
update = db.prepare("update %s set %s = (select $1::varchar) where person_source_value::text like $2::text;" % (self.target_table, self.target_field))
loader.update_all()
db.bind('postgres', user='', password='', host='', database='')
# <codecell>
from pony.orm import *
db = Database()
db.bind('postgres', user='vassr', password='bluedog', host='localhost', database='vassr')
# <codecell>
Base = declarative_base(metadata=MetaData(bind=engine))
class MyTable(Base):
__table__ = sa.Table('person', Base.metadata, autoload=True)
ry = MyTable()
# <codecell>
tablename = 'person'
fieldname = 'race_source_value'
x = 123
typer = db.prepare("select data_type from information_schema.columns where table_name = '%s' and column_name = '%s'" % (tablename, fieldname))
v1 = typer()[0][0]
ins = db.prepare("update %s set %s = (select %s::%s) where person_source_value::text like $1::text;" % (tablename, fieldname,x, v1))
# <codecell>
typedict = {'character':'',
'time with time zone':'',
'date':'',
'timestamp with time zone':'',
'smallint':'',
'character varying':'',
'boolean':'',
'double precision':'',
'integer':'',
'numeric':'',
'text':'',
'bigint':''}
# <codecell>
dd = pd.DataFrame(ihr.dx_date.data * ihr.dx_date.data.apply(is_year)).apply(pd.to_datetime, infer_datetime_format=True, format='%Y')
ihr.dx_date.data = ihr.dx_date.data.apply(pd.to_datetime, infer_datetime_format=True, format='%Y')
# <codecell>
for x in ihr.dx_date.data.iteritems():
if not is_year(x):
print(x)
# <headingcell level=3>
# is_year('1923')
# <codecell>
if is_year(11):
print('no one sees')
# <codecell>
ihr.dx_date.data
# <codecell>
pd.set_option('display.max_rows', None)
# <codecell>
from random import randint
df = pd.DataFrame(randint(3, 8), index=['A', 'B', 'C'], columns=index)
# <codecell>
| [
"[email protected]"
]
| |
30c14a07c174d1205e1cfe1466811eedb9f4a091 | e7df6d41d7e04dc1c4f4ed169bf530a8a89ff17c | /Bindings/Python/tests/test_simulation_utilities.py | cfc1e5913bbeb73b03ab8a056978b8cf24909949 | [
"Apache-2.0"
]
| permissive | opensim-org/opensim-core | 2ba11c815df3072166644af2f34770162d8fc467 | aeaaf93b052d598247dd7d7922fdf8f2f2f4c0bb | refs/heads/main | 2023-09-04T05:50:54.783630 | 2023-09-01T22:44:04 | 2023-09-01T22:44:04 | 20,775,600 | 701 | 328 | Apache-2.0 | 2023-09-14T17:45:19 | 2014-06-12T16:57:56 | C++ | UTF-8 | Python | false | false | 598 | py |
import os
import unittest
import opensim as osim
test_dir = os.path.join(os.path.dirname(os.path.abspath(osim.__file__)),
'tests')
class TestSimulationUtilities(unittest.TestCase):
def test_update_kinematics(self):
model = osim.Model(
os.path.join(test_dir, 'gait10dof18musc_subject01.osim'))
kinematics_file = os.path.join(test_dir, 'std_subject01_walk1_ik.mot')
# updatePre40KinematicsStorageFor40MotionType() is not wrapped.
osim.updatePre40KinematicsFilesFor40MotionType(model,
[kinematics_file])
| [
"[email protected]"
]
| |
79a154b4c6dc4f41760af80f2a7247ce20a26688 | b2bdd5997ac84b0e19071c1ddc1c1a4d2f4fab58 | /catkin_ws/build/p5/cmake/p5-genmsg-context.py | 0205608693a493b861da3d3774a3a196b62f043a | []
| no_license | hbtslys01/RosCodingProject | 860d18531dabe4a969278deff5dbad8a8703ea83 | 226feda08724e92fd94191e123b9442c028283dd | refs/heads/master | 2020-04-11T09:16:17.808626 | 2018-12-13T17:30:08 | 2018-12-13T17:30:08 | 161,671,560 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,426 | py | # generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/cs4750/catkin_ws/src/p5/msg/Action.msg;/home/cs4750/catkin_ws/src/p5/msg/ActionEffect.msg;/home/cs4750/catkin_ws/src/p5/msg/ActionOption.msg;/home/cs4750/catkin_ws/src/p5/msg/State.msg"
services_str = "/home/cs4750/catkin_ws/src/p5/srv/ChooseAction.srv;/home/cs4750/catkin_ws/src/p5/srv/ClosestPointPath.srv;/home/cs4750/catkin_ws/src/p5/srv/CommandRobot.srv;/home/cs4750/catkin_ws/src/p5/srv/ComputeEntropy.srv;/home/cs4750/catkin_ws/src/p5/srv/DetectMetal.srv;/home/cs4750/catkin_ws/src/p5/srv/FollowPath.srv;/home/cs4750/catkin_ws/src/p5/srv/GetPosition.srv;/home/cs4750/catkin_ws/src/p5/srv/GetTransitions.srv;/home/cs4750/catkin_ws/src/p5/srv/InterpolatePath.srv;/home/cs4750/catkin_ws/src/p5/srv/LocateBall.srv;/home/cs4750/catkin_ws/src/p5/srv/ModelSenseAction.srv;/home/cs4750/catkin_ws/src/p5/srv/SenseBall.srv;/home/cs4750/catkin_ws/src/p5/srv/SetPosition.srv"
pkg_name = "p5"
dependencies_str = "std_msgs;geometry_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "p5;/home/cs4750/catkin_ws/src/p5/msg;std_msgs;/opt/ros/melodic/share/std_msgs/cmake/../msg;geometry_msgs;/opt/ros/melodic/share/geometry_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python2"
package_has_static_sources = 'TRUE' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/melodic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| [
"[email protected]"
]
| |
a0ac77d6f01d6b6ed6363565b4d311164ca4e6a6 | 5d178ff8ae636123147b15fa530dba3aff0ff786 | /fsb/tariff/urls.py | 0154607fd51a330b569b2943ff03d0f91b991d3f | []
| no_license | grengojbo/fsb | 70d054388e75e2e3d62aa4dbf80679ccd7213c50 | 75a222dda323edb5a5407ffc89071a48ed0628aa | refs/heads/master | 2021-01-10T20:29:22.965848 | 2011-04-07T16:27:38 | 2011-04-07T16:27:38 | 561,853 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 745 | py | # -*- mode: python; coding: utf-8; -*-
from django.conf.urls.defaults import *
from django.conf import settings
#from django.utils.translation import ugettext_lazy as _
__author__ = '$Author:$'
__revision__ = '$Revision:$'
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Example:
# (r'^{{ project_name }}/', include('{{ project_name }}.foo.urls')),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# (r'^admin/(.*)', admin.site.root),
)
| [
"[email protected]"
]
| |
aeed7aa47de82741ebdc77756df68ce3e9a6f0d3 | 8993f017079fd4c8329b37ddb663b28b54be1586 | /LatitudeProfile.py | f013f2db4c433bcb565eabb110ec36f87452c5e6 | [
"MIT"
]
| permissive | nithinsivadas/IRI2016 | 74f7ec3bb39b4b55c6368085dd1fcbfee8cab20c | 16f383e6666dfff2938019d49b411ec23b8f18c0 | refs/heads/master | 2020-07-15T01:13:34.167868 | 2019-08-29T19:53:59 | 2019-08-29T19:53:59 | 205,445,491 | 0 | 0 | MIT | 2019-08-30T19:33:21 | 2019-08-30T19:33:21 | null | UTF-8 | Python | false | false | 917 | py | #!/usr/bin/env python
import iri2016 as iri
from argparse import ArgumentParser
from matplotlib.pyplot import show
from pathlib import Path
import iri2016.plots as piri
def main():
p = ArgumentParser()
p.add_argument("glon", help="geodetic longitude (degrees)", type=float)
p.add_argument(
"-glat",
help="geodetic latitude START STOP STEP (degrees)",
type=float,
nargs=3,
default=(-60, 60, 2.0),
)
p.add_argument("-alt_km", help="altitude (km)", type=float, default=300.0)
p.add_argument("-o", "--outfn", help="write data to file")
P = p.parse_args()
iono = iri.geoprofile(latrange=P.glat, glon=P.glon, altkm=P.alt_km, time="2004-01-01T17")
if P.outfn:
outfn = Path(P.outfn).expanduser()
print("writing", outfn)
iono.to_netcdf(outfn)
piri.latprofile(iono)
show()
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
c49e7df813833928b68f4bea703e83c3224cb8ce | a5cc3ce2bcc8a482b8a5ccd2ef767327bab56f93 | /task3/venv/Scripts/pip-script.py | d9e1ebb90e8185cc7ead1897e8d48e8d28bdcc13 | []
| no_license | Vetarium/ICT | e30db16460a4ba0bad8ddbab0584843714b6c7ee | f7277c131a65dff3b0cde4b7edb2caa14e2b346d | refs/heads/master | 2023-02-05T21:04:51.956948 | 2020-12-24T05:47:43 | 2020-12-24T05:47:43 | 297,998,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | #!C:\Users\Aron\Documents\ICT\task3\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"[email protected]"
]
| |
7c66b48eb66db52f012546f32ea940d5909ce431 | 4262dcafe190db05852c7e1cfafc687031d23367 | /src/Employee/migrations/0007_employee_emp_password.py | 6e60f91162e7e4865e7ee9cc97e6bc670c43ef68 | []
| no_license | ShunnoSaiful/JobPortal | b39930fcdb1bc30567f8a2c91d80786ab497afd5 | c8f3064b87c5d967b8f415fc5f080e167fc0c77d | refs/heads/main | 2023-01-07T02:44:33.831589 | 2020-11-11T11:47:46 | 2020-11-11T11:47:46 | 308,109,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 441 | py | # Generated by Django 2.2 on 2020-11-03 16:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Employee', '0006_employee_is_employee'),
]
operations = [
migrations.AddField(
model_name='employee',
name='emp_password',
field=models.CharField(default=1, max_length=100),
preserve_default=False,
),
]
| [
"[email protected]"
]
| |
ec5b256968bdfc9555035c60dbbede31f0403251 | bdc10ba57424040129cc72ad018ff26bc8bca66a | /ConfigDefinitions/BranchAdditions/UserDefinedCollections/EScaleCollection_Embedded_2017.py | 1d12d35cc1372cf62acdb235a5cb627116ff5069 | []
| no_license | aloeliger/Jesterworks | 61e0ac38ca325fefbbd8ccedaa8eb02d8a76ebbe | 96a22bac4ce20b91aba5884eb0e5667fcea3bc9a | refs/heads/master | 2021-06-09T15:39:06.976110 | 2021-04-23T11:25:06 | 2021-04-23T11:25:06 | 157,698,363 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,419 | py | import ConfigDefinitions.BranchAdditions.BranchDef as BranchDef
import ConfigDefinitions.BranchAdditions.UserDefinedBranches.EmbeddedTES as TES
import ConfigDefinitions.BranchAdditions.UserDefinedBranches.MES as MES
import ConfigDefinitions.BranchAdditions.UserDefinedBranches.LLFakeES as LLFakeES
EScaleCollection = BranchDef.UserBranchCollection()
EScaleCollection.UserBranches = [
TES.TES_E_UP_2017Branch,
TES.TES_E_DOWN_2017Branch,
TES.TES_PT_UP_2017Branch,
TES.TES_PT_DOWN_2017Branch,
TES.TES_MET_UP_2017Branch,
TES.TES_MET_DOWN_2017Branch,
TES.TES_METPhi_UP_2017Branch,
TES.TES_METPhi_DOWN_2017Branch,
MES.muonES_E_UP_Branch,
MES.muonES_E_DOWN_Branch,
MES.muonES_Pt_UP_Branch,
MES.muonES_Pt_DOWN_Branch,
MES.muonES_MET_UP_Branch,
MES.muonES_MET_DOWN_Branch,
MES.muonES_METPhi_UP_Branch,
MES.muonES_METPhi_DOWN_Branch,
LLFakeES.EES_E_UP_Branch,
LLFakeES.EES_E_DOWN_Branch,
LLFakeES.EES_Pt_UP_Branch,
LLFakeES.EES_Pt_DOWN_Branch,
LLFakeES.EES_MET_UP_Branch,
LLFakeES.EES_MET_DOWN_Branch,
LLFakeES.EES_METPhi_UP_Branch,
LLFakeES.EES_METPhi_DOWN_Branch,
LLFakeES.MES_E_UP_Branch,
LLFakeES.MES_E_DOWN_Branch,
LLFakeES.MES_Pt_UP_Branch,
LLFakeES.MES_Pt_DOWN_Branch,
LLFakeES.MES_MET_UP_Branch,
LLFakeES.MES_MET_DOWN_Branch,
LLFakeES.MES_METPhi_UP_Branch,
LLFakeES.MES_METPhi_DOWN_Branch,
]
| [
"[email protected]"
]
| |
812bd562a8a088516188cbf8c3f613c20f3288ef | df29840e4adbc35f40d8a05d3d887359fc7a784b | /Git Result24bd/Result24bd/Result24bd/settings.py | 5b37549c35f0232f036b9cf8daca59111dcff05c | []
| no_license | mdarifulislamroni21/backupdtatahuifehi | ab796ff2b70a4614f586af29e786b085cb1ee6c1 | a26ab7373ad50cb0b563a2511a7788748002884c | refs/heads/main | 2023-08-17T04:53:50.262280 | 2021-10-06T18:10:46 | 2021-10-06T18:10:46 | 414,320,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,241 | py | from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
HTMLCODE_DIRS=os.path.join(BASE_DIR,'html_code')
STATIC_DIRS=os.path.join(BASE_DIR,'static')
MEDIA_DIRS=os.path.join(BASE_DIR,'media')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-v9!n=kpn3$a*4$)3-vyz@=!%!fpb$6r@9duwg371sb1*imlv*p'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['result24bd.com',]
CRISPY_TEMPLATE_PACK='bootstrap4'
# Application definition
AUTH_USER_MODEL='User.User'
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.sitemaps',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crispy_forms','django_cleanup.apps.CleanupConfig',
'Post','User','ckeditor','ckeditor_uploader',
]
SITE_ID=1
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Result24bd.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [HTMLCODE_DIRS,],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Result24bd.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.mysql',
# 'NAME': 'resultbd',
# 'USER': 'root',
# 'PASSWORD': 'Roni@gmail1',
# 'HOST': 'localhost',
# 'PORT': '3306',
# }
# }
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Dhaka'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL='/media/'
# STATICFILES_DIRS=[STATIC_DIRS,]
CKEDITOR_UPLOAD_PATH = 'uploads/'
# MEDIA_ROOT=MEDIA_DIRS
CKEDITOR_BASEPATH = "/static/ckeditor/ckeditor/"
if DEBUG:
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
else:
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
CKEDITOR_JQUERY_URL = '/static/js/jquery-2.1.1.min.js'
CKEDITOR_CONFIGS = {
'default': {
'toolbar': 'Full',
'height': 600,
'width': 835,
'removePlugins': 'stylesheetparser',
'extraPlugins': 'codesnippet',
},
}
LOGIN_URL='/account/login/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"[email protected]"
]
| |
08c04a042b444070a06f2231f2b50b78fea9b755 | e92e4b67cb38686bce5aec798015451ca8b618ff | /DarkScripts/R1/plot_TOEOfModels_Paper_R1.py | bf9a7dbde4ae7dc5c93213694fd4d1d43d3c0ecb | [
"MIT"
]
| permissive | weilin2018/InternalSignal | ad76f863e1d2f75abc285c88fedf928dcfd938bb | 8d2685a7e3cc84086ea771e68c1a5bda08ad7a9b | refs/heads/master | 2023-04-03T11:46:46.965678 | 2021-03-26T18:57:05 | 2021-03-26T18:57:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,219 | py | """
Plot signal-to-noise ratios for XLENS simulations for paper
[1] Method: 10-yr running mean exceeds 1920-1949 baseline by 2sigma
Reference : Deser et al. [2020, JCLI] and Lehner et al. [2017, JCLI]
Author : Zachary M. Labe
Date : 4 March 2021
"""
### Import packages
import math
import time
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as sts
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
import palettable.cubehelix as cm
import palettable.scientific.sequential as scm
import cmocean as cmocean
import calc_Utilities as UT
import calc_dataFunctions as df
import pandas as pd
##############################################################################
##############################################################################
##############################################################################
## Data preliminaries
directorydataLLS = '/Users/zlabe/Data/LENS/SINGLE/'
directorydataLLL = '/Users/zlabe/Data/LENS/monthly'
directoryfigure = '/Users/zlabe/Documents/Projects/InternalSignal/DarkFigures/'
directoriesall = [directorydataLLS,directorydataLLS,directorydataLLL]
##############################################################################
##############################################################################
##############################################################################
datasetsingleq = ['AER+','GHG+','ALL']
datasetsingle = ['XGHG','XAER','lens']
##############################################################################
##############################################################################
##############################################################################
timeq = ['1920-1959','1960-1999','2000-2039','2040-2079']
seasons = ['annual','JFM','AMJ','JAS','OND']
letters = ["a","b","c","d","e","f","g","h","i","j","k","l","m"]
years = np.arange(1920,2079+1,1)
##############################################################################
##############################################################################
##############################################################################
variq = 'T2M'
monthlychoice = seasons[0]
reg_name = 'Globe'
##############################################################################
##############################################################################
##############################################################################
def read_primary_dataset(variq,dataset,lat_bounds,lon_bounds,monthlychoice):
data,lats,lons = df.readFiles(variq,dataset,monthlychoice)
datar,lats,lons = df.getRegion(data,lats,lons,lat_bounds,lon_bounds)
print('\nOur dataset: ',dataset,' is shaped',data.shape)
return datar,lats,lons
### Read in data
lat_bounds,lon_bounds = UT.regions(reg_name)
ghg,lat1,lon1 = read_primary_dataset(variq,datasetsingle[0],lat_bounds,lon_bounds,
monthlychoice)
aer,lat1,lon1 = read_primary_dataset(variq,datasetsingle[1],lat_bounds,lon_bounds,
monthlychoice)
lens,lat1,lon1 = read_primary_dataset(variq,datasetsingle[2],lat_bounds,lon_bounds,
monthlychoice)
### Calculate ensemble mean
meanghg = np.nanmean(ghg,axis=0)[:-1,:,:] #to 2079
meanaer = np.nanmean(aer,axis=0)[:-1,:,:] #to 2079
meanlens = np.nanmean(lens,axis=0)[:-1,:,:] #to 2079
### Functions for calculating moving averages
def moving_average(data,window):
"""
Calculating rolling mean over set window
"""
### Import functions
import numpy as np
movemean = np.convolve(data,np.ones(window),'valid') / window
return movemean
def rollingMean(data,w,mp):
"""
Calculating rolling mean over set window
"""
### Import functions
import numpy as np
import pandas as pd
datadf = pd.Series(data)
movemean = datadf.rolling(window=w,min_periods=mp).mean().to_numpy()
return movemean
### 10-year running mean
window = 10
min_periods = 1
smooth_ghg = np.empty((ghg.shape[0],ghg.shape[1]-1,ghg.shape[2],ghg.shape[3]))
smooth_aer = np.empty((aer.shape[0],aer.shape[1]-1,aer.shape[2],aer.shape[3]))
smooth_lens = np.empty((lens.shape[0],lens.shape[1]-1,lens.shape[2],lens.shape[3]))
for ens in range(ghg.shape[0]):
for i in range(ghg.shape[2]):
for j in range(ghg.shape[3]):
smooth_ghg[ens,:,i,j] = rollingMean(ghg[ens,:-1,i,j],window,min_periods)
smooth_aer[ens,:,i,j] = rollingMean(aer[ens,:-1,i,j],window,min_periods)
smooth_lens[ens,:,i,j] = rollingMean(lens[ens,:-1,i,j],window,min_periods)
print('Completed: Ensemble #%s running mean!' % (ens+1))
### Slice baseline of 1920-1949
minyr = 1920
maxyr = 1949
yearq = np.where((years >= minyr) & ((years <= maxyr)))[0]
ghgbase = smooth_ghg[:,yearq,:,:]
aerbase = smooth_aer[:,yearq,:,:]
lensbase = smooth_lens[:,yearq,:,:]
### 2 Sigma of 1920-1949
ghg2 = np.nanstd(ghgbase[:,:,:,:],axis=1) * 2.
aer2 = np.nanstd(aerbase[:,:,:,:],axis=1) * 2.
lens2 = np.nanstd(lensbase[:,:,:,:],axis=1) * 2.
### Limit of baseline
ghgbasemean = np.nanmean(ghgbase[:,:,:,:],axis=1)
aerbasemean = np.nanmean(aerbase[:,:,:,:],axis=1)
lensbasemean = np.nanmean(lensbase[:,:,:,:],axis=1)
ghglimit = ghgbasemean + ghg2
aerlimit = aerbasemean + aer2
lenslimit = lensbasemean + lens2
### Calculate ToE
def calcToE(database,datalimit,years):
"""
Calculate ToE from Lehner et al. 2017
"""
toe= np.empty((database.shape[0],database.shape[2],database.shape[3]))
toe[:,:,:] = np.nan
for ens in range(database.shape[0]):
for i in range(database.shape[2]):
for j in range(database.shape[3]):
limit = datalimit[ens,i,j]
for yr in range(database.shape[1]):
smooth = database[ens,yr,i,j]
if smooth > limit:
if np.nanmax(database[ens,yr:,i,j]) > limit:
if np.isnan(toe[ens,i,j]):
toe[ens,i,j] = years[yr]
print('Completed: Ensemble #%s ToE!' % (ens+1))
return toe
toe_ghg = calcToE(smooth_ghg,ghglimit,years)
toe_aer = calcToE(smooth_aer,aerlimit,years)
toe_lens = calcToE(smooth_lens,lenslimit,years)
### Calculate ensemble mean ToE
mtoe_ghg = np.nanmean(toe_ghg,axis=0)
mtoe_aer = np.nanmean(toe_aer,axis=0)
mtoe_lens = np.nanmean(toe_lens,axis=0)
alltoe = [mtoe_ghg,mtoe_aer,mtoe_lens]
#######################################################################
#######################################################################
#######################################################################
### Plot subplot of ToE
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
plt.rc('savefig',facecolor='black')
plt.rc('axes',edgecolor='darkgrey')
plt.rc('xtick',color='darkgrey')
plt.rc('ytick',color='darkgrey')
plt.rc('axes',labelcolor='darkgrey')
plt.rc('axes',facecolor='black')
fig = plt.figure(figsize=(10,2.5))
for i in range(len(alltoe)):
ax1 = plt.subplot(1,3,i+1)
m = Basemap(projection='moll',lon_0=0,resolution='l',area_thresh=10000)
circle = m.drawmapboundary(fill_color='dimgrey')
circle.set_clip_on(False)
m.drawcoastlines(color='w',linewidth=0.35)
### Colorbar limits
barlim = np.round(np.arange(1920,2040+1,10),2)
barlim2 = np.round(np.arange(1925,2060+1,10),2)
barlim3 = [r'1920s',r'1930s',r'1940s',r'1950s',r'1960s',r'1970s',r'1980s',r'1990s',
r'2000s',r'2010s',r'2020s',r'2030s',r'2040s']
### Take toe mean over all years
toemodel = alltoe[i]
var, lons_cyclic = addcyclic(toemodel , lon1)
var, lons_cyclic = shiftgrid(180., var, lons_cyclic, start=False)
lon2d, lat2d = np.meshgrid(lons_cyclic, lat1)
x, y = m(lon2d, lat2d)
### Make the plot continuous
cs = m.contourf(x,y,var,np.arange(1920,2040+1,10),
extend='max')
# cmap = cm.cubehelix2_16_r.mpl_colormap
cmap = scm.Batlow_17.mpl_colormap
cs.set_cmap(cmap)
ax1.annotate(r'\textbf{%s}' % (datasetsingleq[i]),xy=(0,0),xytext=(0.865,0.93),
textcoords='axes fraction',color='w',fontsize=19,
rotation=334,ha='center',va='center')
# ax1.annotate(r'\textbf{[%s]}' % letters[i],xy=(0,0),xytext=(0.085,0.93),
# textcoords='axes fraction',color='dimgrey',fontsize=8,
# rotation=0,ha='center',va='center')
cbar_ax = fig.add_axes([0.293,0.145,0.4,0.03])
cbar = fig.colorbar(cs,cax=cbar_ax,orientation='horizontal',
extend='max',extendfrac=0.07,drawedges=True)
cbar.set_label(r'\textbf{TIMING OF EMERGENCE [Years]}',fontsize=11,color='w',labelpad=5)
cbar.set_ticks(barlim2)
cbar.set_ticklabels(barlim3)
cbar.ax.tick_params(axis='x', size=.01,labelsize=5,labelcolor='w')
cbar.outline.set_edgecolor('w')
cbar.outline.set_linewidth(1)
cbar.dividers.set_color('w')
cbar.dividers.set_linewidth(1)
plt.tight_layout()
plt.subplots_adjust(bottom=0.17)
plt.savefig(directoryfigure + 'TOEPeriods_T2M_DARK.png',dpi=600) | [
"[email protected]"
]
| |
e001347036d4b1d0a8121284db7bbd21f629efb5 | 12a42054b156383ebbe3ccc5de4150633c66da5d | /problems/expression-add-operators/solution.py | 04767fe89317c99a372a898317a522b11ece6202 | []
| no_license | cfoust/leetcode-problems | 93c33029f74f32c64caf8294292226d199d6e272 | f5ad7866906d0a2cf2250e5972ce910bf35ce526 | refs/heads/master | 2020-03-16T23:05:45.123781 | 2018-05-11T16:41:09 | 2018-05-11T16:41:09 | 133,064,772 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | class Solution:
def addOperators(self, num, target):
"""
:type num: str
:type target: int
:rtype: List[str]
"""
"""
| [
"[email protected]"
]
| |
e2aad4942a35809a1db0477468f1f751c651f9cd | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03109/s657549082.py | 5a824c61d03ae11eb19a3ddf4dc00f1fe5100b71 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 94 | py | a = input()
if a[5:7] in ["01", "02", "03", "04"]:
print("Heisei")
else:
print("TBD") | [
"[email protected]"
]
| |
0fa671abe98808969076ce0fc835334e524494f5 | ed90fcbfd1112545fa742e07131159bb3a68246a | /smry/server-auth/ls/google-cloud-sdk/lib/googlecloudsdk/compute/subcommands/forwarding_rules/list.py | 420747d1e4cd001700ebe47795d47ba909b0cde5 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | wemanuel/smry | 2588f2a2a7b7639ebb6f60b9dc2833f1b4dee563 | b7f676ab7bd494d71dbb5bda1d6a9094dfaedc0a | refs/heads/master | 2021-01-10T21:56:55.226753 | 2015-08-01T13:37:06 | 2015-08-01T13:37:06 | 40,047,329 | 0 | 1 | Apache-2.0 | 2020-07-24T18:32:40 | 2015-08-01T13:26:17 | Python | UTF-8 | Python | false | false | 662 | py | # Copyright 2014 Google Inc. All Rights Reserved.
"""Command for listing forwarding rules."""
from googlecloudsdk.compute.lib import base_classes
class List(base_classes.GlobalRegionalLister):
"""List forwarding rules."""
@property
def global_service(self):
return self.compute.globalForwardingRules
@property
def regional_service(self):
return self.compute.forwardingRules
@property
def resource_type(self):
return 'forwardingRules'
@property
def allowed_filtering_types(self):
return ['globalForwardingRules', 'forwardingRules']
List.detailed_help = (
base_classes.GetGlobalRegionalListerHelp('forwarding rules'))
| [
"[email protected]"
]
| |
37ce072cd49cfa75a020424ba47b3567ee618bf6 | 688c226e30e9d1a9ad7ddaaec75ad456d7b4981b | /other/mokuai/zidingyimodel3.py | bc149f83381545faf60dff8bcc137cc94508e8cc | []
| no_license | imklever/pay | a465e926330f5a804d3ef1deeba4353af00fd212 | 26bc73d40af33d8a47993ff37e0f0daec4d15e38 | refs/heads/master | 2021-10-08T21:36:15.398969 | 2018-12-18T02:41:05 | 2018-12-18T02:41:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | #coding:utf-8
#引入模块
#from ...... import * 语句
#作用:把一个模块中所有的内容全部导入当前命名空间
from sunck import *
#最好不要过多的使用
'''
程序内容的函数可以将模块中的同名函数覆盖
def sayGood():
print("*********")
'''
sayGood()
sayNice()
print(TT) | [
"[email protected]"
]
| |
9ef7aad2085be69c81499e3e2221ebfb956da801 | 09c87fe780df6d1f9eb33799ed516a0bbd7ab1e3 | /Research/wx doco/somelongthread1_main.py | cbb1357ac048bde246e5fe80e2ae4985131c26f3 | []
| no_license | abulka/pynsource | 8ad412b85dc1acaeb83d7d34af8cc033c6baba91 | 979436525c57fdaeaa832e960985e0406e123587 | refs/heads/master | 2023-04-13T12:58:02.911318 | 2023-04-11T09:56:32 | 2023-04-11T09:56:32 | 32,249,425 | 271 | 46 | null | 2022-10-10T04:36:57 | 2015-03-15T07:21:43 | Python | UTF-8 | Python | false | false | 2,698 | py | # simple front end to long running thread - so can have two frames at once
# which is what is problematic under linux and mac os x
import wx
import wx.lib.ogl as ogl
from somelongthread1 import MainFrame
class MainHost(wx.Frame):
def __init__(self, parent, id, title):
wx.Frame.__init__(self, parent, id, title, size=(250, 150))
self.add_ogl_canvas()
self.Centre()
self.Show(True)
wx.FutureCall(500, self.DrawLine)
wx.FutureCall(1000, self.OpenSecondFrameWithThreadInside)
def add_ogl_canvas(self):
# Add OGL
sizer = wx.BoxSizer(wx.VERTICAL)
# put stuff into sizer
self.canvas = canvas = ogl.ShapeCanvas(self)
sizer.Add(canvas, 1, wx.GROW)
canvas.SetBackgroundColour("LIGHT BLUE") #
diagram = ogl.Diagram()
canvas.SetDiagram(diagram)
diagram.SetCanvas(canvas)
def setpos(shape, x, y):
width, height = shape.GetBoundingBoxMax()
shape.SetX(x + width / 2)
shape.SetY(y + height / 2)
def getpos(shape):
width, height = shape.GetBoundingBoxMax()
x = shape.GetX()
y = shape.GetY()
return (x - width / 2, y - height / 2)
shape = ogl.RectangleShape(60, 60)
setpos(shape, 0, 0)
canvas.AddShape(shape)
shape = ogl.RectangleShape(60, 60)
setpos(shape, 60, 0)
canvas.AddShape(shape)
shape = ogl.RectangleShape(60, 60)
setpos(shape, 120, 0)
canvas.AddShape(shape)
# Next row
shape = ogl.RectangleShape(100, 100)
setpos(shape, 0, 60)
canvas.AddShape(shape)
shape = ogl.RectangleShape(100, 100)
setpos(shape, 100, 60)
canvas.AddShape(shape)
print([getpos(shape) for shape in canvas.GetDiagram().GetShapeList()])
diagram.ShowAll(1)
# apply sizer
self.SetSizer(sizer)
self.SetAutoLayout(1)
# self.Show(1)
def DrawLine(self):
dc = wx.ClientDC(self.canvas)
dc.DrawLine(50, 60, 190, 60)
dc.DrawArc(50, 50, 50, 10, 20, 20)
dc.DrawEllipticArc(10, 10, 50, 20, 0, 180)
points = [wx.Point(10, 10), wx.Point(15, 55), wx.Point(40, 30)]
dc.DrawSpline(points)
def OpenSecondFrameWithThreadInside(self):
# Add opening of second frame here.
#
f = MainFrame(self, -1)
f.Show(True)
# b = LayoutBlackboard(graph=self.context.model.graph, umlwin=self.context.umlwin)
# f.SetBlackboardObject(b)
f.OnStart(None)
app = wx.App()
ogl.OGLInitialize()
MainHost(None, -1, "Line")
app.MainLoop()
| [
"[email protected]"
]
| |
8f5ab9c2968ebebea5d7bee44efc0cd8b2654b26 | 41a20700b5bb351d20562ac23ec4db06bc96f0d7 | /src/plum/types/property/submodule.py | c7fde187b7be2313d64acbade9f0a3bc5e711ba6 | []
| no_license | kedz/noiseylg | ee0c54634767e8d3789b4ffb93727988c29c6979 | 17266e1a41e33aecb95dc1c3aca68f6bccee86d5 | refs/heads/master | 2020-07-30T11:22:08.351759 | 2019-10-30T21:33:11 | 2019-10-30T21:33:11 | 210,212,253 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,702 | py | from .plum_property import PlumProperty
import torch.nn as nn
class Submodule(PlumProperty):
def __init__(self, default=None, required=True, type=None, tags=None):
self._default = default
self._required = required
if type is None:
def any_type(x):
return True
self._type = any_type
elif hasattr(type, "__call__"):
self._type = type
else:
raise ValueError("type must be None or implement __call__")
if isinstance(tags, str):
self._tags = tuple([tags])
elif isinstance(tags, (list, tuple)):
self._tags = tuple(tags)
elif tags is None:
self._tags = tuple()
else:
raise ValueError(
"tags must be None, a str, or a list/tuple of str")
@property
def default(self):
return self._default
@property
def required(self):
return self._required
@property
def type(self):
return self._type
@property
def tags(self):
return self._tags
def new(self, owner_module, submodule):
if submodule is None:
submodule = self.default
if submodule is None:
if not self.required:
return None
else:
raise Exception("Missing submodule for {}".format(
str(owner_module.__class__)))
elif isinstance(submodule, (list, tuple)):
for subsubmod in submodule:
if not self.type(subsubmod) or \
not issubclass(subsubmod.__class__, nn.Module):
raise ValueError("Bad type: {}".format(type(subsubmod)))
return nn.ModuleList(submodule)
elif isinstance(submodule, dict):
for subsubmod in submodule.values():
if not self.type(subsubmod) or \
not issubclass(subsubmod.__class__, nn.Module):
raise ValueError("Bad type: {}".format(type(subsubmod)))
return nn.ModuleDict(submodule)
else:
if not issubclass(submodule.__class__, nn.Module):
raise ValueError("Bad type: {}".format(type(submodule)))
return submodule
def __get__(self, owner_module, owner_type=None):
return owner_module._modules[owner_module._submodule_names[self]]
@classmethod
def iter_named_submodules(cls, plum_module):
return cls.iter_named_plum_property(plum_module, prop_type=cls)
@classmethod
def iter_submodules(cls, plum_module):
return cls.iter_plum_property(plum_module, prop_type=cls)
| [
"[email protected]"
]
| |
9197ed24cac4246e593e240d233fa8bef16cedc0 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_fatherlands.py | 8ba1a35575d89333db2a00481b5443ff629ef5a3 | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py |
#calss header
class _FATHERLANDS():
def __init__(self,):
self.name = "FATHERLANDS"
self.definitions = fatherland
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['fatherland']
| [
"[email protected]"
]
| |
84303490be2e5499c283ee2bf84c5bd033563ef2 | 2573dd49b18f8c5fe7e600268019a158a46bbc2a | /SIMS/SOMD/absolute/2-cyclopentanylindole/free/discharge/FUNC.py | fae3db2d8d06bb5c0fc099872bc31821f41827df | []
| no_license | xuebai1990/relative-solvation-inputs | 70454e218855c14ac89424cc50aa89ac605f38dc | ec698ac2a80b79448f2b0f1d0c9aa811c6f1fe78 | refs/heads/master | 2021-09-10T13:25:50.286441 | 2018-03-26T22:04:14 | 2018-03-26T22:04:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,585 | py | #
# Evaluates electrostatics corrections to free energy changes
#
import os,sys, random
import math
import mdtraj
from Sire.Tools.OpenMMMD import *
from Sire.Tools import Parameter, resolveParameters
# from Sire.Tools.LJcutoff import getFreeEnergy, resample
solvent_residues = ["WAT","ZBK","ZBT","CYC"]
ion_residues = ["Cl-","Na+"]
DIME = 97
model_eps = Parameter("model_eps", 78.4,
"""The dielectric constant of the modelled solvent.""")
trajfile = Parameter("trajfile", "traj000000001.dcd",
"""File name of the trajectory to process.""")
stepframe = Parameter("step_frame",1,"""The number of frames to step to between two succcessive evaluations.""")
simfile = Parameter("simfile", "sim.cfg", """ Configuration file with distance restraints dictionary""")
topfile = Parameter("topfile", "SYSTEM.top",
"""File name of the topology file containing the system to be simulated.""")
crdfile = Parameter("crdfile", "SYSTEM.crd",
"""File name of the coordiante file containing the system to be simulated.""")
morphfile = Parameter("morphfile", "MORPH.pert",
"""MORPH file.""")
verbose = Parameter("verbose", False, """Print debug output""")
def createSystemFreeEnergy(molecules):
r"""creates the system for free energy calculation
Parameters
----------
molecules : Sire.molecules
Sire object that contains a lot of information about molecules
Returns
-------
system : Sire.system
"""
print ("Create the System...")
moleculeNumbers = molecules.molNums()
moleculeList = []
for moleculeNumber in moleculeNumbers:
molecule = molecules.molecule(moleculeNumber).molecule()
moleculeList.append(molecule)
#
# The code below assumes that the solute to be perturbed is
# the first molecule in the top file.
# The residue name of the first residue in this molecule is
# used to name the solute. This is used later to match
# templates in the flex/pert files.
solute = moleculeList[0]
lig_name = solute.residue(ResIdx(0)).name().value()
solute = solute.edit().rename(lig_name).commit()
perturbations_lib = PerturbationsLibrary(morphfile.val)
solute = perturbations_lib.applyTemplate(solute)
perturbations = solute.property("perturbations")
lam = Symbol("lambda")
initial = Perturbation.symbols().initial()
final = Perturbation.symbols().final()
solute = solute.edit().setProperty("perturbations",
perturbations.recreate((1 - lam) * initial + lam * final)).commit()
# We put atoms in three groups depending on what happens in the perturbation
# non dummy to non dummy --> the hard group, uses a normal intermolecular FF
# non dummy to dummy --> the todummy group, uses SoftFF with alpha = Lambda
# dummy to non dummy --> the fromdummy group, uses SoftFF with alpha = 1 - Lambda
# We start assuming all atoms are hard atoms. Then we call getDummies to find which atoms
# start/end as dummies and update the hard, todummy and fromdummy groups accordingly
solute_grp_ref = MoleculeGroup("solute_ref", solute)
solute_grp_ref_hard = MoleculeGroup("solute_ref_hard")
solute_grp_ref_todummy = MoleculeGroup("solute_ref_todummy")
solute_grp_ref_fromdummy = MoleculeGroup("solute_ref_fromdummy")
solute_ref_hard = solute.selectAllAtoms()
solute_ref_todummy = solute_ref_hard.invert()
solute_ref_fromdummy = solute_ref_hard.invert()
to_dummies, from_dummies = getDummies(solute)
if to_dummies is not None:
ndummies = to_dummies.count()
dummies = to_dummies.atoms()
for x in range(0, ndummies):
dummy_index = dummies[x].index()
solute_ref_hard = solute_ref_hard.subtract(solute.select(dummy_index))
solute_ref_todummy = solute_ref_todummy.add(solute.select(dummy_index))
if from_dummies is not None:
ndummies = from_dummies.count()
dummies = from_dummies.atoms()
for x in range(0, ndummies):
dummy_index = dummies[x].index()
solute_ref_hard = solute_ref_hard.subtract(solute.select(dummy_index))
solute_ref_fromdummy = solute_ref_fromdummy.add(solute.select(dummy_index))
solute_grp_ref_hard.add(solute_ref_hard)
solute_grp_ref_todummy.add(solute_ref_todummy)
solute_grp_ref_fromdummy.add(solute_ref_fromdummy)
solutes = MoleculeGroup("solutes")
solutes.add(solute)
molecules = MoleculeGroup("molecules")
molecules.add(solute)
solvent = MoleculeGroup("solvent")
for molecule in moleculeList[1:]:
molecules.add(molecule)
solvent.add(molecule)
all = MoleculeGroup("all")
all.add(molecules)
all.add(solvent)
all.add(solutes)
all.add(solute_grp_ref)
all.add(solute_grp_ref_hard)
all.add(solute_grp_ref_todummy)
all.add(solute_grp_ref_fromdummy)
# Add these groups to the System
system = System()
system.add(solutes)
system.add(solute_grp_ref)
system.add(solute_grp_ref_hard)
system.add(solute_grp_ref_todummy)
system.add(solute_grp_ref_fromdummy)
system.add(molecules)
system.add(solvent)
system.add(all)
return system
def setupIntraCoulFF(system, space, cut_type="nocutoff", cutoff= 999* angstrom, dielectric=1.0):
print ("Creating force fields... ")
solutes = system[MGName("solutes")]
solute = system[MGName("solute_ref")]
solute_hard = system[MGName("solute_ref_hard")]
solute_todummy = system[MGName("solute_ref_todummy")]
solute_fromdummy = system[MGName("solute_ref_fromdummy")]
# Solute intramolecular LJ energy
solute_hard_intracoul = IntraCLJFF("solute_hard_intracoul")
solute_hard_intracoul.add(solute_hard)
if (cut_type != "nocutoff"):
solute_hard_intracoul.setUseReactionField(True)
solute_hard_intracoul.setReactionFieldDielectric(dielectric)
solute_todummy_intracoul = IntraSoftCLJFF("solute_todummy_intracoul")
solute_todummy_intracoul.setShiftDelta(shift_delta.val)
solute_todummy_intracoul.setCoulombPower(coulomb_power.val)
solute_todummy_intracoul.add(solute_todummy)
if (cut_type != "nocutoff"):
solute_todummy_intracoul.setUseReactionField(True)
solute_todummy_intracoul.setReactionFieldDielectric(dielectric)
solute_fromdummy_intracoul = IntraSoftCLJFF("solute_fromdummy_intracoul")
solute_fromdummy_intracoul.setShiftDelta(shift_delta.val)
solute_fromdummy_intracoul.setCoulombPower(coulomb_power.val)
solute_fromdummy_intracoul.add(solute_fromdummy)
if (cut_type != "nocutoff"):
solute_fromdummy_intracoul.setUseReactionField(True)
solute_fromdummy_intracoul.setReactionFieldDielectric(dielectric)
solute_hard_todummy_intracoul = IntraGroupSoftCLJFF("solute_hard:todummy_intracoul")
solute_hard_todummy_intracoul.setShiftDelta(shift_delta.val)
solute_hard_todummy_intracoul.setCoulombPower(coulomb_power.val)
solute_hard_todummy_intracoul.add(solute_hard, MGIdx(0))
solute_hard_todummy_intracoul.add(solute_todummy, MGIdx(1))
if (cut_type != "nocutoff"):
solute_hard_todummy_intracoul.setUseReactionField(True)
solute_hard_todummy_intracoul.setReactionFieldDielectric(dielectric)
solute_hard_fromdummy_intracoul = IntraGroupSoftCLJFF("solute_hard:fromdummy_intracoul")
solute_hard_fromdummy_intracoul.setShiftDelta(shift_delta.val)
solute_hard_fromdummy_intracoul.setCoulombPower(coulomb_power.val)
solute_hard_fromdummy_intracoul.add(solute_hard, MGIdx(0))
solute_hard_fromdummy_intracoul.add(solute_fromdummy, MGIdx(1))
if (cut_type != "nocutoff"):
solute_hard_fromdummy_intracoul.setUseReactionField(True)
solute_hard_fromdummy_intracoul.setReactionFieldDielectric(dielectric)
solute_todummy_fromdummy_intracoul = IntraGroupSoftCLJFF("solute_todummy:fromdummy_intracoul")
solute_todummy_fromdummy_intracoul.setShiftDelta(shift_delta.val)
solute_todummy_fromdummy_intracoul.setCoulombPower(coulomb_power.val)
solute_todummy_fromdummy_intracoul.add(solute_todummy, MGIdx(0))
solute_todummy_fromdummy_intracoul.add(solute_fromdummy, MGIdx(1))
if (cut_type != "nocutoff"):
solute_todummy_fromdummy_intracoul.setUseReactionField(True)
solute_todummy_fromdummy_intracoul.setReactionFieldDielectric(dielectric)
# TOTAL
forcefields = [solute_hard_intracoul, solute_todummy_intracoul, solute_fromdummy_intracoul,
solute_hard_todummy_intracoul, solute_hard_fromdummy_intracoul,
solute_todummy_fromdummy_intracoul]
for forcefield in forcefields:
system.add(forcefield)
system.setProperty("space", space)
system.setProperty("switchingFunction", CHARMMSwitchingFunction(cutoff))
system.setProperty("combiningRules", VariantProperty(combining_rules.val))
system.setProperty("coulombPower", VariantProperty(coulomb_power.val))
system.setProperty("shiftDelta", VariantProperty(shift_delta.val))
total_nrg = solute_hard_intracoul.components().coulomb() + \
solute_todummy_intracoul.components().coulomb(0) + solute_fromdummy_intracoul.components().coulomb(0) + \
solute_hard_todummy_intracoul.components().coulomb(0) + solute_hard_fromdummy_intracoul.components().coulomb(0) + \
solute_todummy_fromdummy_intracoul.components().coulomb(0)
e_total = system.totalComponent()
lam = Symbol("lambda")
system.setComponent(e_total, total_nrg)
system.setConstant(lam, 0.0)
system.add(PerturbationConstraint(solutes))
# NON BONDED Alpha constraints for the soft force fields
system.add(PropertyConstraint("alpha0", FFName("solute_todummy_intracoul"), lam))
system.add(PropertyConstraint("alpha0", FFName("solute_fromdummy_intracoul"), 1 - lam))
system.add(PropertyConstraint("alpha0", FFName("solute_hard:todummy_intracoul"), lam))
system.add(PropertyConstraint("alpha0", FFName("solute_hard:fromdummy_intracoul"), 1 - lam))
system.add(PropertyConstraint("alpha0", FFName("solute_todummy:fromdummy_intracoul"), Max(lam, 1 - lam)))
system.setComponent(lam, lambda_val.val)
return system
def updateSystemfromTraj(system, frame_xyz, cell_lengths, cell_angles):
traj_coordinates = frame_xyz[0]
traj_box_x = cell_lengths[0][0].tolist()
traj_box_y = cell_lengths[0][1].tolist()
traj_box_z = cell_lengths[0][2].tolist()
traj_natoms = len(traj_coordinates)
# Sire does not support non rectangular boxes
newmols_coords = {}
traj_index = 0
mol_index = 0
molnums = system.molNums()
molnums.sort()
for molnum in molnums:
mol = system.molecule(molnum).molecule()
molatoms = mol.atoms()
molnatoms = mol.nAtoms()
# Create an empty coord group using molecule so we get the correct layout
newmol_coords = AtomCoords( mol.property("coordinates") )
for x in range(0,molnatoms):
tmparray = traj_coordinates[traj_index]
atom_coord = Vector( tmparray[0].tolist() , tmparray[1].tolist() , tmparray[2].tolist() )
atom = molatoms[x]
cgatomidx = atom.cgAtomIdx()
newmol_coords.set( cgatomidx, atom_coord)
traj_index += 1
newmols_coords[molnum] = newmol_coords
mol_index += 1
if traj_natoms != traj_index:
print ("The number of atoms in the system is not equal to the number of atoms in the trajectory file ! Aborting.")
sys.exit(-1)
changedmols = MoleculeGroup("changedmols")
mol_index = 0
for molnum in molnums:
mol = system.molecule(molnum).molecule()
newmol_coords = newmols_coords[molnum]
mol = mol.edit().setProperty("coordinates", newmol_coords).commit()
changedmols.add(mol)
system.update(changedmols)
space = PeriodicBox(Vector( traj_box_x, traj_box_y, traj_box_z ) )
system.setProperty("space",space)
return system
def SplitSoluteSolvent(system):
molecules = system.molecules()
mol_numbers = molecules.molNums()
solutes = MoleculeGroup("solutes")
solvent = MoleculeGroup("solvent")
ions = MoleculeGroup("ions")
for molnum in mol_numbers:
mol = molecules.molecule(molnum).molecule()
res0 = mol.residues()[0]
if res0.name().value() in solvent_residues:
solvent.add(mol)
elif res0.name().value() in ion_residues:
ions.add(mol)
else:
solutes.add(mol)
return solutes, solvent, ions
def centerAll(solutes, solvent, space):
if space.isPeriodic():
box_center = space.dimensions()/2
else:
box_center = Vector(0.0, 0.0, 0.0)
solutes_mols = solutes.molecules()
solutes_cog = CenterOfGeometry(solutes_mols).point()
delta = box_center - solutes_cog
molNums = solutes_mols.molNums()
for molnum in molNums:
mol = solutes.molecule(molnum).molecule()
molcoords = mol.property("coordinates")
molcoords.translate(delta)
mol = mol.edit().setProperty("coordinates", molcoords).commit()
solutes.update(mol)
solvent_mols = solvent.molecules()
solvmolNums = solvent_mols.molNums()
for molnum in solvmolNums:
mol = solvent.molecule(molnum).molecule()
molcoords = mol.property("coordinates")
molcoords.translate(delta)
mol = mol.edit().setProperty("coordinates",molcoords).commit()
solvent.update(mol)
return solutes, solvent
def getFreeEnergy(delta_nrgs):
free_nrg = FreeEnergyAverage(temperature.val)
for nrg in delta_nrgs:
free_nrg.accumulate(nrg.value())
deltaG = free_nrg.average() * kcal_per_mol
return deltaG
if __name__ == "__main__":
try:
host = os.environ['HOSTNAME']
except KeyError:
host = "unknown"
print("### Running electrostatics correction calculation on %s ###" % host)
if True: #verbose.val:
print("###================= Simulation Parameters=====================###")
Parameter.printAll()
print ("###===========================================================###\n")
print("lambda is %s" % lambda_val.val)
if os.path.exists(s3file.val):
(molecules, space) = Sire.Stream.load(s3file.val)
else:
amber = Amber()
(molecules, space) = amber.readCrdTop(crdfile.val, topfile.val)
Sire.Stream.save((molecules, space), s3file.val)
# What to do with this...
system = createSystemFreeEnergy(molecules)
lam = Symbol("lambda")
solutes = system[MGName("solutes")]
solute_ref = system[MGName("solute_ref")]
system.setConstant(lam, lambda_val.val)
system.add(PerturbationConstraint(solutes))
system.setComponent(lam, lambda_val.val)
# Now loop over snapshots in dcd and accumulate energies
start_frame = 1
end_frame = 1000000000
step_frame = stepframe.val
mdtraj_trajfile = mdtraj.open(trajfile.val,'r')
nframes = len(mdtraj_trajfile)
if end_frame > (nframes - 1):
end_frame = nframes - 1
mdtraj_trajfile.seek(start_frame)
current_frame = start_frame
#system = createSystemFreeEnergy(molecules)
system_solute_rf = System()
system_solute_rf.add(solutes)
system_solute_rf.add(system[MGName("solute_ref")])
system_solute_rf.add(system[MGName("solute_ref_hard")])
system_solute_rf.add(system[MGName("solute_ref_todummy")])
system_solute_rf.add(system[MGName("solute_ref_fromdummy")])
system_solute_rf = setupIntraCoulFF(system_solute_rf, space, \
cut_type=cutoff_type.val,
cutoff=cutoff_dist.val,
dielectric=model_eps.val)
#import pdb; pdb.set_trace()
system_solute_cb = System()
system_solute_cb.add(solutes)
system_solute_cb.add(system[MGName("solute_ref")])
system_solute_cb.add(system[MGName("solute_ref_hard")])
system_solute_cb.add(system[MGName("solute_ref_todummy")])
system_solute_cb.add(system[MGName("solute_ref_fromdummy")])
system_solute_cb = setupIntraCoulFF(system_solute_cb, Cartesian(), cut_type="nocutoff")
delta_func_nrgs = []
while (current_frame <= end_frame):
print ("Processing frame %s " % current_frame)
print ("CURRENT POSITION %s " % mdtraj_trajfile.tell() )
frames_xyz, cell_lengths, cell_angles = mdtraj_trajfile.read(n_frames=1)
system = updateSystemfromTraj(system, frames_xyz, cell_lengths, cell_angles)
#import pdb; pdb.set_trace()
# Now filter out solvent molecules
solutes, solvent, ions = SplitSoluteSolvent(system)
solutes, solvent = centerAll(solutes, solvent, system.property("space"))
# Compute DG_func
# Free energy change for changing from a reaction field cutoff to coulombic nocutoff
# Update system_solute_rf
system_solute_rf.update(solutes)
system_solute_cb.update(solutes)
delta_func_nrg = (system_solute_cb.energy() - system_solute_rf.energy())
delta_func_nrgs.append(delta_func_nrg)
#import pdb; pdb.set_trace()
current_frame += step_frame
mdtraj_trajfile.seek(current_frame)#step_frame, whence=1)
DG_FUNC = getFreeEnergy(delta_func_nrgs)
print ("DG_FUNC = %8.5f kcal/mol (1 sigma) " % (DG_FUNC.value()))
| [
"[email protected]"
]
| |
db9fe5e9cb4bda725fe01b0c4fe09c6fa1902e6a | 1e2b69476b2b174ac210ba525b197c621280a390 | /Configuration/Geometry/python/GeometryExtended2017Plan1_cff.py | 6ed28868e6981ebbfff879df8fd786e61a845e77 | [
"Apache-2.0"
]
| permissive | skinnari/cmssw | 640e5fe2f23a423ccb7afe82d43ea1b80a2603f0 | 62b49319e475fbcf14484d77814d47a552c61f63 | refs/heads/L1TK_CMSSW_11-1-0-pre4 | 2022-10-27T03:55:33.402157 | 2020-03-24T14:18:04 | 2020-03-24T14:18:04 | 11,660,178 | 2 | 3 | Apache-2.0 | 2020-03-24T14:16:54 | 2013-07-25T12:44:13 | C++ | UTF-8 | Python | false | false | 397 | py | import FWCore.ParameterSet.Config as cms
#
# Geometry master configuration
#
# Ideal geometry, needed for simulation
from Geometry.CMSCommonData.cmsExtendedGeometry2017Plan1XML_cfi import *
from Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cfi import *
from Geometry.EcalCommonData.ecalSimulationParameters_cff import *
from Geometry.HcalCommonData.hcalDDDSimConstants_cff import *
| [
"[email protected]"
]
| |
392a88d06ae3d8ebc39cc6872208fe79a63c7d0d | 0ed9a8eef1d12587d596ec53842540063b58a7ec | /cloudrail/knowledge/context/aws/resources_builders/scanner/resources_tagging_list_builder.py | 5cb9815c2ef85ff46f3f0573716faf4abef4a865 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
]
| permissive | cbc506/cloudrail-knowledge | 8611faa10a3bf195f277b81622e2590dbcc60da4 | 7b5c9030575f512b9c230eed1a93f568d8663708 | refs/heads/main | 2023-08-02T08:36:22.051695 | 2021-09-13T15:23:33 | 2021-09-13T15:24:26 | 390,127,361 | 0 | 0 | MIT | 2021-07-27T21:08:06 | 2021-07-27T21:08:06 | null | UTF-8 | Python | false | false | 588 | py | from cloudrail.knowledge.context.aws.resources_builders.scanner.base_aws_scanner_builder import BaseAwsScannerBuilder
from cloudrail.knowledge.context.aws.resources_builders.scanner.cloud_mapper_component_builder import build_resources_tagging_list
class ResourceTagMappingListBuilder(BaseAwsScannerBuilder):
def get_file_name(self) -> str:
return 'resourcegroupstaggingapi-get-resources.json'
def get_section_name(self) -> str:
return 'ResourceTagMappingList'
def do_build(self, attributes: dict):
return build_resources_tagging_list(attributes)
| [
"[email protected]"
]
| |
9695f7ae9065259aa8878bac3d67707a89736479 | 5da5473ff3026165a47f98744bac82903cf008e0 | /packages/google-cloud-enterpriseknowledgegraph/google/cloud/enterpriseknowledgegraph_v1/types/job_state.py | 9980364dd9433a2eb7720c4ca26313554eb86dfa | [
"Apache-2.0"
]
| permissive | googleapis/google-cloud-python | ed61a5f03a476ab6053870f4da7bc5534e25558b | 93c4e63408c65129422f65217325f4e7d41f7edf | refs/heads/main | 2023-09-04T09:09:07.852632 | 2023-08-31T22:49:26 | 2023-08-31T22:49:26 | 16,316,451 | 2,792 | 917 | Apache-2.0 | 2023-09-14T21:45:18 | 2014-01-28T15:51:47 | Python | UTF-8 | Python | false | false | 2,203 | py | # -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import MutableMapping, MutableSequence
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.enterpriseknowledgegraph.v1",
manifest={
"JobState",
},
)
class JobState(proto.Enum):
r"""Describes the state of a job.
Values:
JOB_STATE_UNSPECIFIED (0):
The job state is unspecified.
JOB_STATE_PENDING (9):
The service is preparing to run the job.
JOB_STATE_RUNNING (1):
The job is in progress.
JOB_STATE_SUCCEEDED (2):
The job completed successfully.
JOB_STATE_FAILED (3):
The job failed.
JOB_STATE_CANCELLED (4):
The job has been cancelled.
JOB_STATE_KNOWLEDGE_EXTRACTION (5):
Entity Recon API: The knowledge extraction
job is running.
JOB_STATE_RECON_PREPROCESSING (6):
Entity Recon API: The preprocessing job is
running.
JOB_STATE_CLUSTERING (7):
Entity Recon API: The clustering job is
running.
JOB_STATE_EXPORTING_CLUSTERS (8):
Entity Recon API: The exporting clusters job
is running.
"""
JOB_STATE_UNSPECIFIED = 0
JOB_STATE_PENDING = 9
JOB_STATE_RUNNING = 1
JOB_STATE_SUCCEEDED = 2
JOB_STATE_FAILED = 3
JOB_STATE_CANCELLED = 4
JOB_STATE_KNOWLEDGE_EXTRACTION = 5
JOB_STATE_RECON_PREPROCESSING = 6
JOB_STATE_CLUSTERING = 7
JOB_STATE_EXPORTING_CLUSTERS = 8
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"[email protected]"
]
| |
c5e2136987da5a4552d8fc9f3ed4d5086a5278fe | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/6/ol7.py | 555d67a46d6546a905a270c29c30faca7fafa482 | []
| no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'oL7':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
]
| |
9ec0e0a5d45e3039214a48188142d86b1534571a | f2658c4bd7f833ace25ac2b63e88317b05f4602d | /2017 July/2017-July-11/st_rdf_test/model/WaysNavigableLinkTimezone.py | ec567c431d1b3d80de464f2214bce132f5730431 | []
| no_license | xiaochao00/telanav_diary | e4c34ac0a14b65e4930e32012cc2202ff4ed91e2 | 3c583695e2880322483f526c98217c04286af9b2 | refs/heads/master | 2022-01-06T19:42:55.504845 | 2019-05-17T03:11:46 | 2019-05-17T03:11:46 | 108,958,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,753 | py | #-------------------------------------------------------------------------------
# Name: WaysNavigableLinkTimezone model
# Purpose: this model is used to mapping the rdf_nav_link, rdf_link and rdf_access
# columns: [ ]
#
# Author: rex
#
# Created: 2016-01-29
# Copyright: (c) rex 2016
# Licence: <your licence>
#-------------------------------------------------------------------------------
from record import Record
from constants import *
import os
import sys
import datetime
import json
ROOT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),"..")
GLOBAL_KEY_PREFIX = "ways_navlink_"
CSV_SEP = '`'
LF = '\n'
#(key, category, function)
STATISTIC_KEYS = (
("timezone:left",False,"timezone_left"),
("timezone:right", False, "timezone_right")
)
class WaysNavigableLinkTimezone(Record):
def __init__(self, region):
Record.__init__(self)
self.dump_file = os.path.join(ROOT_DIR, "temporary", self.__class__.__name__)
self.stat = {}
self.region = region
def dump2file(self):
cmd = "SELECT \
rnl.link_id, \
rl.left_admin_place_id, \
rl.right_admin_place_id \
FROM \
public.rdf_nav_link as rnl left join public.rdf_link as rl on rnl.link_id=rl.link_id \
WHERE rnl.iso_country_code in (%s)"%(REGION_COUNTRY_CODES(self.region, GLOBAL_KEY_PREFIX))
print cmd
self.cursor.copy_expert("COPY (%s) TO STDOUT DELIMITER '`'"%(cmd),open(self.dump_file,"w"))
def get_statistic(self):
try:
self.dump2file()
self.__build_admins()
except:
print "Some table or schema don't exist! Please check the upper sql"
print "Unexpected error:[ %s.py->%s] %s"%(self.__class__.__name__, 'get_statistic', str(sys.exc_info()))
return {}
processcount = 0
with open(self.dump_file, "r",1024*1024*1024) as csv_f:
for line in csv_f:
line = line.rstrip()
line_p = line.split(CSV_SEP)
if len(line_p) < 1:
continue
self.__statistic(line_p)
processcount += 1
if processcount%5000 == 0:
print "\rProcess index [ "+str(processcount)+" ]",
print "\rProcess index [ "+str(processcount)+" ]",
# write to file
with open(os.path.join(ROOT_DIR, "output", "stat", self.__class__.__name__), 'w') as stf:
stf.write(json.dumps(self.stat))
return self.stat
def __build_admins(self):
processcount = 0
admins = {}
with open(self.__dump_adminplaceid(), "r",1024*1024*1024) as csv_f:
for line in csv_f:
line = line.rstrip()
line_p = line.split(CSV_SEP)
if len(line_p) < 1:
continue
if line_p[0] in admins:
continue
admins[line_p[0]] = line_p[1:]
processcount += 1
if processcount%5000 == 0:
print "\rProcess index [ "+str(processcount)+" ]",
print "\rProcess index [ "+str(processcount)+" ]",
print "build admin time zone hierarchy"
for api in admins:
#check order8
admins[api][1] = (admins.get(api)[1] in admins and '\N' != admins.get(admins.get(api)[1])[0]) and 1 or 0
#check order2
admins[api][2] = (admins.get(api)[2] in admins and '\N' != admins.get(admins.get(api)[2])[0]) and 1 or 0
#check order1
admins[api][3] = (admins.get(api)[3] in admins and '\N' != admins.get(admins.get(api)[3])[0]) and 1 or 0
#check country
admins[api][4] = (admins.get(api)[4] in admins and '\N' != admins.get(admins.get(api)[4])[0]) and 1 or 0
self.admins = admins
def __dump_adminplaceid(self):
cmd = "SELECT \
rap.admin_place_id, \
rap.time_zone, \
rah.order8_id, \
rah.order2_id, \
rah.order1_id, \
rah.country_id \
FROM \
public.rdf_admin_place AS rap, public.rdf_admin_hierarchy AS rah \
WHERE rap.admin_place_id=rah.admin_place_id and rah.iso_country_code IN (%s)"%(REGION_COUNTRY_CODES(self.region, GLOBAL_KEY_PREFIX))
print cmd
f = "%s_admins"%(self.dump_file)
self.cursor.copy_expert("COPY (%s) TO STDOUT DELIMITER '`'"%(cmd),open(f,"w"))
return f
def __statistic(self,line):
for keys in STATISTIC_KEYS:
try:
getattr(self,'_%s__get_%s'%(self.__class__.__name__,keys[2]))(keys,line)
except:
print "The statistic [ %s ] didn't exist"%(keys[2])
print "Unexpected error:[ %s.py->%s] %s"%(self.__class__.__name__, '__statistic', str(sys.exc_info()))
def __count(self,key):
if self.stat.has_key(key):
self.stat[key] += 1
else:
self.stat[key] = 1
# all statistic method
def __get_timezone_left(self,keys,line):
if '\N' != line[1] and reduce(lambda px,py:px+py,self.admins.get(line[1])[1:]) > 0:
self.__count("%s%s"%(GLOBAL_KEY_PREFIX,keys[0]))
def __get_timezone_right(self,keys,line):
if '\N' != line[2] and reduce(lambda px,py:px+py,self.admins.get(line[2])[1:]) > 0:
self.__count("%s%s"%(GLOBAL_KEY_PREFIX,keys[0]))
if __name__ == "__main__":
# use to test this model
bg = datetime.datetime.now()
navlink_stat = WaysNavigableLinkTimezone('na').get_statistic()
keys = navlink_stat.keys()
print "==>"
print "{%s}"%(",".join(map(lambda px: "\"%s\":%s"%(px,navlink_stat[px]) ,keys)))
print "<=="
ed = datetime.datetime.now()
print "Cost time:"+str(ed - bg)
| [
"[email protected]"
]
| |
1876baded258b0f238e61b62902741d2f5805194 | 8b59108f621e94935b3b72aae3c441e10cb64a1c | /toggle_lods.py | a076d80ac06be061fdc6e2b1799d90097d95d484 | []
| no_license | CyberSys/CE_Python | 97a373b1fe2d214ae854d454dc5e7d79bc150d8e | 721ac005e215f1225fb3c99491b55dc48b19ab30 | refs/heads/master | 2022-01-13T08:04:08.558594 | 2019-07-22T17:05:46 | 2019-07-22T17:05:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | cvar = general.get_cvar('e_DebugDraw')
if cvar != 22:
general.set_cvar('e_DebugDraw', 22)
if cvar == '22':
general.set_cvar('e_DebugDraw', 0)
| [
"[email protected]"
]
| |
93f1260fdf0cb8d633f4967c78c99ea23c315318 | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /good_point_and_great_person/place.py | b3c68823b8fc17e94d73df2acd0901fd60e6be4e | []
| no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py |
#! /usr/bin/env python
def person(str_arg):
get_great_case(str_arg)
print('young_hand_or_first_life')
def get_great_case(str_arg):
print(str_arg)
if __name__ == '__main__':
person('good_week')
| [
"[email protected]"
]
| |
587cd786cb7e5627440b322af69c2291af3c666d | eb54a89743222bc7d72cf7530e745b8986cad441 | /leetcode/canFormArray.py | 39cc9feeb2c341db226cbd358f4dd9e799cad443 | []
| no_license | ugaemi/algorithm | db341809e2497b36e82fc09939ae8e3f1ca7d880 | e4f57f01d21f822eb8da5ba5bfc04c29b9ddce78 | refs/heads/master | 2023-01-10T05:16:55.167675 | 2023-01-04T13:12:20 | 2023-01-04T13:12:20 | 200,671,606 | 3 | 10 | null | 2022-02-27T12:52:31 | 2019-08-05T14:28:56 | Python | UTF-8 | Python | false | false | 954 | py | import unittest
class Solution:
def canFormArray(self, arr, pieces):
for piece in pieces:
if len(piece) > 1:
result = any(piece == arr[i:i+len(piece)] for i in range(len(arr) - 1))
if not result:
return result
else:
if piece[0] not in arr:
return False
return True
class Test(unittest.TestCase):
def test_canFormArray(self):
solution = Solution()
self.assertEqual(solution.canFormArray([85], [[85]]), True)
self.assertEqual(solution.canFormArray([15, 88], [[88], [15]]), True)
self.assertEqual(solution.canFormArray([49, 18, 16], [[16, 18, 49]]), False)
self.assertEqual(solution.canFormArray([91, 4, 64, 78], [[78], [4, 64], [91]]), True)
self.assertEqual(solution.canFormArray([1, 3, 5, 7], [[2, 4, 6, 8]]), False)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
bb9ebaaa51633a24e64e44e7dfb51ff0f21d2fdc | 5f29a9f8b218f7b02a76af02b49f8cf5aaa8ec97 | /ecommerce/ecommerce/settings.py | 1f96bed14501297d0c6f25199abbbb08ba9df6f9 | []
| no_license | yemiemy/Footwear-Stores | 58c026b38ffe828cef17c58365b77169ad21ce80 | 92f719824c019044aef78b2b481597b9f94405e0 | refs/heads/master | 2022-12-18T13:34:05.283709 | 2019-09-01T22:32:31 | 2019-09-01T22:32:31 | 205,739,701 | 2 | 0 | null | 2022-12-08T06:06:26 | 2019-09-01T22:31:27 | HTML | UTF-8 | Python | false | false | 4,190 | py | """
Django settings for ecommerce project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '0#-622+)iekpf$4w_vn19e0_itwsgdt9v)g63(22h0ygjzvs2y'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
DEFAULT_FROM_EMAIL = 'FOOTWEAR <[email protected]>'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = '[email protected]'
EMAIL_HOST_PASSWORD = 'oladimeji'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
if DEBUG:
SITE_URL = 'http://127.0.0.1:8000'
if not DEBUG:
SITE_URL = 'http://footwear.com'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'products',
'carts',
'orders',
'accounts',
'crispy_forms',
'django_countries',
]
CRISPY_TEMPLATE_PACK = 'bootstrap4'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ecommerce.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ecommerce.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Lagos'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_DIR = os.path.join(os.path.dirname(BASE_DIR), 'static', 'static_file')
STATICFILES_DIRS = [STATIC_DIR,]
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'static', 'static_root')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'static', 'media')
#MEDIA_ROOT = '/Users/Yemi/Desktop/works/static/media'
STRIPE_SECRET_KEY = 'sk_test_zYI5XTc4XMwwHKiJ5HhNUbEW008WFLfwDb'
STRIPE_PUBLISHABLE_KEY = 'pk_test_N3wRcVV5GmnPBaSJiwbwqVHb00omMk74e1'
#send_mail('hello there', 'this is a test message', '[email protected]', ['[email protected]'], fail_silently = True) | [
"[email protected]"
]
| |
d9da6299c37925b684c7e641e6e4b21e82cdd5c1 | 7a02c39b1bd97576991581379fed7e4209c199f2 | /Learning/day20/s12bbs-code/s12bbs-code/aa.py | a44becfde85a3b36497596a8daf7ff50434c0b70 | []
| no_license | chenjinpeng1/python | 0dc9e980ea98aac7145dd6ef966dd3903d488810 | 342d3d5079d158764a5f27838dcaf877e7c430ab | refs/heads/master | 2020-04-06T13:28:11.158318 | 2016-10-16T16:11:09 | 2016-10-16T16:11:09 | 48,805,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | #python 3.5环境,解释器在linux需要改变
# -*- encoding:utf-8 -*-
#Auth ChenJinPeng
if __name__ == '__main__':
s = set()
s.add("aaaaa")
s.add("bbbbbb")
# print(s)
with open("tzc.txt", 'w', encoding="utf-8") as f:
for i in s:
print(i)
write_value = "%scccccc\n" % i
print(write_value)
f.write(write_value)
f.write("eeee")
f.write("dddd") | [
"[email protected]"
]
| |
ddf0d322979e965f66a2702d1b4fd867fa63cc0e | 8c016302f173a8a2c4cdbc8c3398e1be61808cc9 | /itkwidgets/widget_line_profiler.py | b8db546a3b430a5f5617574ff54595bdbf2e5b2f | [
"Apache-2.0"
]
| permissive | jpambrun/itk-jupyter-widgets | cba7b760912446f6ec801bbf4e3d83dafa70b8b5 | 3e00592e2f7285deea1b4eed0257c96179e5dc96 | refs/heads/master | 2020-04-09T20:10:21.686169 | 2018-11-26T19:25:32 | 2018-11-26T19:25:32 | 160,565,931 | 0 | 1 | Apache-2.0 | 2018-12-05T19:10:03 | 2018-12-05T19:10:03 | null | UTF-8 | Python | false | false | 8,145 | py | """LineProfiler class
Image visualization with a line profile.
"""
from traitlets import Unicode
import numpy as np
import scipy.ndimage
import ipywidgets as widgets
from .widget_viewer import Viewer
from ipydatawidgets import NDArray, array_serialization, shape_constraints
from traitlets import CBool
import matplotlib.pyplot as plt
import matplotlib
import IPython
import itk
from ._to_itk import to_itk_image
@widgets.register
class LineProfiler(Viewer):
"""LineProfiler widget class."""
_view_name = Unicode('LineProfilerView').tag(sync=True)
_model_name = Unicode('LineProfilerModel').tag(sync=True)
_view_module = Unicode('itk-jupyter-widgets').tag(sync=True)
_model_module = Unicode('itk-jupyter-widgets').tag(sync=True)
_view_module_version = Unicode('^0.12.2').tag(sync=True)
_model_module_version = Unicode('^0.12.2').tag(sync=True)
point1 = NDArray(dtype=np.float64, default_value=np.zeros((3,), dtype=np.float64),
help="First point in physical space that defines the line profile")\
.tag(sync=True, **array_serialization)\
.valid(shape_constraints(3,))
point2 = NDArray(dtype=np.float64, default_value=np.ones((3,), dtype=np.float64),
help="First point in physical space that defines the line profile")\
.tag(sync=True, **array_serialization)\
.valid(shape_constraints(3,))
_select_initial_points = CBool(default_value=False, help="We will select the initial points for the line profile.").tag(sync=True)
def __init__(self, **kwargs):
if 'point1' not in kwargs or 'point2' not in kwargs:
self._select_initial_points = True
# Default to z-plane mode instead of the 3D volume if we need to
# select points
if 'mode' not in kwargs:
kwargs['mode'] = 'z'
super(LineProfiler, self).__init__(**kwargs)
def line_profile(image, order=2, plotter=None, comparisons=None, **viewer_kwargs):
"""View the image with a line profile.
Creates and returns an ipywidget to visualize the image along with a line
profile.
The image can be 2D or 3D.
Parameters
----------
image : array_like, itk.Image, or vtk.vtkImageData
The 2D or 3D image to visualize.
order : int, optional
Spline order for line profile interpolation. The order has to be in the
range 0-5.
plotter : 'plotly', 'bqplot', or 'ipympl', optional
Plotting library to use. If not defined, use plotly if available,
otherwise bqplot if available, otherwise ipympl.
comparisons: dict, optional
A dictionary whose keys are legend labels and whose values are other
images whose intensities to plot over the same line.
viewer_kwargs : optional
Keyword arguments for the viewer. See help(itkwidgets.view).
"""
profiler = LineProfiler(image=image, **viewer_kwargs)
if not plotter:
try:
import plotly.graph_objs as go
plotter = 'plotly'
except ImportError:
pass
if not plotter:
try:
import bqplot
plotter = 'bqplot'
except ImportError:
pass
if not plotter:
plotter = 'ipympl'
def get_profile(image_or_array):
image_from_array = to_itk_image(image_or_array)
if image_from_array:
image_ = image_from_array
else:
image_ = image_or_array
image_array = itk.GetArrayViewFromImage(image_)
dimension = image_.GetImageDimension()
distance = np.sqrt(sum([(profiler.point1[ii] - profiler.point2[ii])**2 for ii in range(dimension)]))
index1 = tuple(image_.TransformPhysicalPointToIndex(tuple(profiler.point1[:dimension])))
index2 = tuple(image_.TransformPhysicalPointToIndex(tuple(profiler.point2[:dimension])))
num_points = int(np.round(np.sqrt(sum([(index1[ii] - index2[ii])**2 for ii in range(dimension)])) * 2.1))
coords = [np.linspace(index1[ii], index2[ii], num_points) for ii in range(dimension)]
mapped = scipy.ndimage.map_coordinates(image_array, np.vstack(coords[::-1]),
order=order, mode='nearest')
return np.linspace(0.0, distance, num_points), mapped
if plotter == 'plotly':
import plotly.graph_objs as go
layout = go.Layout(
xaxis=dict(title='Distance'),
yaxis=dict(title='Intensity')
)
fig = go.FigureWidget(layout=layout)
elif plotter == 'bqplot':
import bqplot
x_scale = bqplot.LinearScale()
y_scale = bqplot.LinearScale()
x_axis = bqplot.Axis(scale=x_scale, grid_lines='solid', label='Distance')
y_axis = bqplot.Axis(scale=y_scale, orientation='vertical', grid_lines='solid', label='Intensity')
labels = ['Reference']
display_legend = False
if comparisons:
display_legend=True
labels += [label for label in comparisons.keys()]
lines = [bqplot.Lines(scales={'x': x_scale, 'y': y_scale},
labels=labels, display_legend=display_legend, enable_hover=True)]
fig = bqplot.Figure(marks=lines, axes=[x_axis, y_axis])
elif plotter == 'ipympl':
ipython = IPython.get_ipython()
ipython.enable_matplotlib('widget')
is_interactive = matplotlib.is_interactive()
matplotlib.interactive(False)
fig, ax = plt.subplots()
else:
raise ValueError('Invalid plotter: ' + plotter)
def update_plot():
if plotter == 'plotly':
distance, intensity = get_profile(image)
fig.data[0]['x'] = distance
fig.data[0]['y'] = intensity
if comparisons:
for ii, image_ in enumerate(comparisons.values()):
distance, intensity = get_profile(image_)
fig.data[ii+1]['x'] = distance
fig.data[ii+1]['y'] = intensity
elif plotter == 'bqplot':
distance, intensity = get_profile(image)
if comparisons:
for image_ in comparisons.values():
distance_, intensity_ = get_profile(image_)
distance = np.vstack((distance, distance_))
intensity = np.vstack((intensity, intensity_))
fig.marks[0].x = distance
fig.marks[0].y = intensity
elif plotter == 'ipympl':
ax.plot(*get_profile(image))
if comparisons:
ax.plot(*get_profile(image), label='Reference')
for label, image_ in comparisons.items():
ax.plot(*get_profile(image_), label=label)
ax.legend()
else:
ax.plot(*get_profile(image))
ax.set_xlabel('Distance')
ax.set_ylabel('Intensity')
fig.canvas.draw()
fig.canvas.flush_events()
def update_profile(change):
if plotter == 'plotly':
update_plot()
elif plotter == 'bqplot':
update_plot()
elif plotter == 'ipympl':
is_interactive = matplotlib.is_interactive()
matplotlib.interactive(False)
ax.clear()
update_plot()
matplotlib.interactive(is_interactive)
if plotter == 'plotly':
distance, intensity = get_profile(image)
trace = go.Scattergl(x=distance, y=intensity, name='Reference')
fig.add_trace(trace)
if comparisons:
for label, image_ in comparisons.items():
distance, intensity = get_profile(image_)
trace = go.Scattergl(x=distance, y=intensity, name=label)
fig.add_trace(trace)
widget = widgets.VBox([profiler, fig])
elif plotter == 'bqplot':
update_plot()
widget = widgets.VBox([profiler, fig])
elif plotter == 'ipympl':
update_plot()
widget = widgets.VBox([profiler, fig.canvas])
profiler.observe(update_profile, names=['point1', 'point2'])
return widget
| [
"[email protected]"
]
| |
f84ba0dc45e75e82e0edb109d4c3ce7da79af4d9 | 4eea6c0940439d4e78cd125c311d0e1a12a826ed | /messiah_ast_optimizer/common/classutils.py | be8047fdbe9e8e3284be1bd1b0aac878653c08a4 | []
| no_license | chenyfsysu/PythonNote | 5c77578423f95399b1eda7651dc73f1642ee3cb7 | 9b49eced10bab794ddf5452f18bba55d8ba24c18 | refs/heads/master | 2021-05-07T06:14:56.825737 | 2018-10-18T06:49:41 | 2018-10-18T06:49:41 | 111,645,501 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,985 | py | # -*- coding:utf-8 -*-
import ast
def ismethod(node):
return isinstance(node, ast.FunctionDef)
def getmembers(mro, predicate=None):
result = {}
finder = AttributeFinder(findall=True, predicate=predicate)
for cls in mro:
for (key, value) in finder.find(cls).iteritems():
if (key not in result):
result[key] = value.node
return result
def getclassbodies(mro, predicate=None):
return [node for cls in mro for node in cls.body if ((not predicate) or predicate(node))]
def predicate_entity_property(node):
return (isinstance(node, ast.Expr) and isinstance(node.value, ast.Call) and (node.value.func.id == 'Property'))
def predicate_class_attr(node):
return isinstance(node, ast.Assign)
def _fold_property(props):
return ('coco', 0, 0, 0)
for prop in props:
(name, all, flag, delay) = _fold_property(prop)
property_all[name] = all
property_flag[name] = flag
property_delay[name] = delay
return (property_all, property_flag, property_delay)
def split_cls_body(cls):
prop = getclassbodies(cls, predicate_entity_property)
attr = getclassbodies(cls, predicate_class_attr)
method = getmembers(cls, ismethod)
return (prop, attr, method)
def merge_component(host, component):
(attrs, methods, properties, decorator_list) = ([], {}, [], [])
component.insert(0, host)
print component
for comp in component:
(prop, attr, method) = split_cls_body(comp)
properties.append(prop)
attrs.append(attr)
methods.update(method)
decorator_list.extend([deco for cls in comp for deco in cls.decorator_list])
body = ((properties + attrs) + methods.values())
host[0].body = body
host[0].decorator_list = decorator_list
return host
if (__name__ == '__main__'):
src = '\nif True:\n\tA = 100\nelse:\n\tA = 10\n'
import astunparse
node = ast.parse(src)
for body in node.body:
print body
| [
"[email protected]"
]
| |
6695b4cc772773778236433871367a45cddbb7d3 | 521efcd158f4c69a686ed1c63dd8e4b0b68cc011 | /airflow/providers/alibaba/cloud/example_dags/example_oss_bucket.py | 83d0255349f6eb143226b6fdc12c5eab8c85d2d5 | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
]
| permissive | coutureai/RaWorkflowOrchestrator | 33fd8e253bfea2f9a82bb122ca79e8cf9dffb003 | cd3ea2579dff7bbab0d6235fcdeba2bb9edfc01f | refs/heads/main | 2022-10-01T06:24:18.560652 | 2021-12-29T04:52:56 | 2021-12-29T04:52:56 | 184,547,783 | 5 | 12 | Apache-2.0 | 2022-11-04T00:02:55 | 2019-05-02T08:38:38 | Python | UTF-8 | Python | false | false | 1,413 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
from airflow.models.dag import DAG
from airflow.providers.alibaba.cloud.operators.oss import OSSCreateBucketOperator, OSSDeleteBucketOperator
# [START howto_operator_oss_bucket]
with DAG(
dag_id='oss_bucket_dag',
start_date=datetime(2021, 1, 1),
default_args={'bucket_name': 'your bucket', 'region': 'your region'},
max_active_runs=1,
tags=['example'],
catchup=False,
) as dag:
create_bucket = OSSCreateBucketOperator(task_id='task1')
delete_bucket = OSSDeleteBucketOperator(task_id='task2')
create_bucket >> delete_bucket
# [END howto_operator_oss_bucket]
| [
"[email protected]"
]
| |
017d97be74d253ad06c54ee180f0c4270f0ebb4a | 23d1d8e2f2cb54ce1227765f949af3324063357f | /shopify/webhook/tests/factories.py | fc7cbd2900f88739c76bf00a9f4daccbb7f83e4b | [
"BSD-3-Clause"
]
| permissive | CorbanU/corban-shopify | 4586b6625511c594b9f37b72d0adf57b71c7677c | 5af1d9e5b4828c375fe8c3329e13f7dcad5e5cfc | refs/heads/master | 2021-01-17T13:50:39.845955 | 2017-05-17T18:36:24 | 2017-05-17T18:36:24 | 29,499,681 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,528 | py | import factory
from factory import fuzzy
from mock import patch
from django.utils.timezone import now
from shopify.product.models import Product
from shopify.product.models import Transaction
from shopify.webhook.models import Webhook
class ProductFactory(factory.django.DjangoModelFactory):
class Meta:
model = Product
product_id = fuzzy.FuzzyInteger(100000, 999999)
product_type = fuzzy.FuzzyChoice(['Deposit', 'Fee', 'Purchase'])
description = fuzzy.FuzzyText(length=64)
account_number = fuzzy.FuzzyInteger(1000000, 9999999)
class TransactionFactory(factory.django.DjangoModelFactory):
class Meta:
model = Transaction
product = factory.SubFactory(ProductFactory)
amount = fuzzy.FuzzyFloat(1.00, 100.00)
is_credit = True
order_id = fuzzy.FuzzyInteger(1000000, 9999999)
order_name = fuzzy.FuzzyText(length=8)
item_id = fuzzy.FuzzyInteger(100000, 999999)
created_at = now()
class WebhookFactory(factory.django.DjangoModelFactory):
class Meta:
model = Webhook
@classmethod
def _create(cls, target_class, *args, **kwargs):
with patch('requests.post') as mock:
mock.return_value.status_code = 200
mock.return_value.raise_for_status.return_value = None
mock.return_value.raise_for_status()
mock.return_value.json.return_value = {'webhook': {'id': 12345}}
mock.return_value.json()
return super(WebhookFactory, cls)._create(target_class, *args, **kwargs)
| [
"[email protected]"
]
| |
87762da3e9e92ee74337ce102ce0e7fa74365ffc | 2d311d74071ea2d5e0c756186e41cfc567f56b6c | /app/core/tests/test_models.py | 4c43911407bd1cbce4dc7cd67ba5ffdf7ca942c2 | [
"MIT"
]
| permissive | frankRose1/recipe-app-api | ab128d4b97f76f55f61a5a6eb17e4acdf8348981 | 0fff174ecb59bb06e6b631a33e34984e2f12f68a | refs/heads/master | 2022-02-05T03:55:00.043139 | 2019-08-01T03:43:14 | 2019-08-01T03:43:14 | 197,990,263 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,909 | py | from unittest.mock import patch
from django.test import TestCase
from django.contrib.auth import get_user_model
from core import models
def sample_user(email='[email protected]', password='testPass'):
"""
Create and return a sample user
:param email: Email address
:type email: str
:param password: Password
:type password: str
:return: User model instance
"""
return get_user_model().objects.create(email=email, password=password)
class ModelTests(TestCase):
def test_create_user_with_email_successfull(self):
"""Test creating a new user with an email successfully"""
params = {
'email': '[email protected]',
'password': 'testPass123'
}
user = get_user_model().objects.create_user(**params)
self.assertEqual(user.email, params['email'])
self.assertTrue(user.check_password(params['password']))
def test_new_user_email_normalized(self):
"""Test creating a new user will normalize the email"""
email = '[email protected]'
user = get_user_model().objects.create_user(email, 'test123')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
"""Test creating a user with no email raises an error"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
def test_create_new_super_user(self):
"""Test cresting a new super user"""
user = get_user_model().objects.create_superuser(
'[email protected]',
'test123')
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
def test_tag_str(self):
"""Test the tag string representation"""
tag = models.Tag.objects.create(
user=sample_user(),
name='Vegan'
)
self.assertEqual(str(tag), tag.name)
def test_ingredient_str(self):
"""Test the ingredient string representation"""
ingredient = models.Ingredient.objects.create(
user=sample_user(),
name='Cucumber'
)
self.assertEqual(str(ingredient), ingredient.name)
def test_recipe_str(self):
"""Test the ingredient string representation"""
recipe = models.Recipe.objects.create(
user=sample_user(),
title='Steak and Mushroom Sauce',
time_minutes=5,
price=5.00
)
self.assertEqual(str(recipe), recipe.title)
@patch('uuid.uuid4')
def test_recipe_filename_uuid(self, mock_uuid):
"""Test that image is saved in the correct location"""
uuid = 'test-uuid'
mock_uuid.return_value = uuid
file_path = models.recipe_image_file_path(None, 'myimage.jpg')
expected_path = f'uploads/recipe/{uuid}.jpg'
self.assertEqual(file_path, expected_path)
| [
"[email protected]"
]
| |
f5860c1f45c341db2d7604d0d5cc917e3092b271 | 35b6013c1943f37d1428afd2663c8aba0a02628d | /healthcare/api-client/v1/dicom/dicom_stores.py | e0d2615cec39ac2db63c54a7ffe1fdb5bb4620da | [
"Apache-2.0"
]
| permissive | GoogleCloudPlatform/python-docs-samples | d2a251805fbeab15d76ed995cf200727f63f887d | 44e819e713c3885e38c99c16dc73b7d7478acfe8 | refs/heads/main | 2023-08-28T12:52:01.712293 | 2023-08-28T11:18:28 | 2023-08-28T11:18:28 | 35,065,876 | 7,035 | 7,593 | Apache-2.0 | 2023-09-14T20:20:56 | 2015-05-04T23:26:13 | Jupyter Notebook | UTF-8 | Python | false | false | 20,810 | py | # Copyright 2018 Google LLC All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
# [START healthcare_create_dicom_store]
def create_dicom_store(project_id, location, dataset_id, dicom_store_id):
"""Creates a new DICOM store within the parent dataset.
See https://github.com/GoogleCloudPlatform/python-docs-samples/tree/main/healthcare/api-client/v1/dicom
before running the sample."""
# Imports the Google API Discovery Service.
from googleapiclient import discovery
api_version = "v1"
service_name = "healthcare"
# Returns an authorized API client by discovering the Healthcare API
# and using GOOGLE_APPLICATION_CREDENTIALS environment variable.
client = discovery.build(service_name, api_version)
# TODO(developer): Uncomment these lines and replace with your values.
# project_id = 'my-project' # replace with your GCP project ID
# location = 'us-central1' # replace with the parent dataset's location
# dataset_id = 'my-dataset' # replace with the DICOM store's parent dataset ID
# dicom_store_id = 'my-dicom-store' # replace with the DICOM store's ID
dicom_store_parent = "projects/{}/locations/{}/datasets/{}".format(
project_id, location, dataset_id
)
request = (
client.projects()
.locations()
.datasets()
.dicomStores()
.create(parent=dicom_store_parent, body={}, dicomStoreId=dicom_store_id)
)
response = request.execute()
print(f"Created DICOM store: {dicom_store_id}")
return response
# [END healthcare_create_dicom_store]
# [START healthcare_delete_dicom_store]
def delete_dicom_store(project_id, location, dataset_id, dicom_store_id):
"""Deletes the specified DICOM store.
See https://github.com/GoogleCloudPlatform/python-docs-samples/tree/main/healthcare/api-client/v1/dicom
before running the sample."""
# Imports the Google API Discovery Service.
from googleapiclient import discovery
api_version = "v1"
service_name = "healthcare"
# Returns an authorized API client by discovering the Healthcare API
# and using GOOGLE_APPLICATION_CREDENTIALS environment variable.
client = discovery.build(service_name, api_version)
# TODO(developer): Uncomment these lines and replace with your values.
# project_id = 'my-project' # replace with your GCP project ID
# location = 'us-central1' # replace with the parent dataset's location
# dataset_id = 'my-dataset' # replace with the DICOM store's parent dataset ID
# dicom_store_id = 'my-dicom-store' # replace with the DICOM store's ID
dicom_store_parent = "projects/{}/locations/{}/datasets/{}".format(
project_id, location, dataset_id
)
dicom_store_name = f"{dicom_store_parent}/dicomStores/{dicom_store_id}"
request = (
client.projects()
.locations()
.datasets()
.dicomStores()
.delete(name=dicom_store_name)
)
response = request.execute()
print(f"Deleted DICOM store: {dicom_store_id}")
return response
# [END healthcare_delete_dicom_store]
# [START healthcare_get_dicom_store]
def get_dicom_store(project_id, location, dataset_id, dicom_store_id):
"""Gets the specified DICOM store.
See https://github.com/GoogleCloudPlatform/python-docs-samples/tree/main/healthcare/api-client/v1/dicom
before running the sample."""
# Imports the Google API Discovery Service.
from googleapiclient import discovery
# Imports Python's built-in "json" module
import json
api_version = "v1"
service_name = "healthcare"
# Returns an authorized API client by discovering the Healthcare API
# and using GOOGLE_APPLICATION_CREDENTIALS environment variable.
client = discovery.build(service_name, api_version)
# TODO(developer): Uncomment these lines and replace with your values.
# project_id = 'my-project' # replace with your GCP project ID
# location = 'us-central1' # replace with the parent dataset's location
# dataset_id = 'my-dataset' # replace with the DICOM store's parent dataset ID
# dicom_store_id = 'my-dicom-store' # replace with the DICOM store's ID
dicom_store_parent = "projects/{}/locations/{}/datasets/{}".format(
project_id, location, dataset_id
)
dicom_store_name = f"{dicom_store_parent}/dicomStores/{dicom_store_id}"
dicom_stores = client.projects().locations().datasets().dicomStores()
dicom_store = dicom_stores.get(name=dicom_store_name).execute()
print(json.dumps(dicom_store, indent=2))
return dicom_store
# [END healthcare_get_dicom_store]
# [START healthcare_list_dicom_stores]
def list_dicom_stores(project_id, location, dataset_id):
"""Lists the DICOM stores in the given dataset.
See https://github.com/GoogleCloudPlatform/python-docs-samples/tree/main/healthcare/api-client/v1/dicom
before running the sample."""
# Imports the Google API Discovery Service.
from googleapiclient import discovery
api_version = "v1"
service_name = "healthcare"
# Returns an authorized API client by discovering the Healthcare API
# and using GOOGLE_APPLICATION_CREDENTIALS environment variable.
client = discovery.build(service_name, api_version)
# TODO(developer): Uncomment these lines and replace with your values.
# project_id = 'my-project' # replace with your GCP project ID
# location = 'us-central1' # replace with the parent dataset's location
# dataset_id = 'my-dataset' # replace with the DICOM store's parent dataset ID
dicom_store_parent = "projects/{}/locations/{}/datasets/{}".format(
project_id, location, dataset_id
)
dicom_stores = (
client.projects()
.locations()
.datasets()
.dicomStores()
.list(parent=dicom_store_parent)
.execute()
.get("dicomStores", [])
)
for dicom_store in dicom_stores:
print(dicom_store)
return dicom_stores
# [END healthcare_list_dicom_stores]
# [START healthcare_patch_dicom_store]
def patch_dicom_store(project_id, location, dataset_id, dicom_store_id, pubsub_topic):
"""Updates the DICOM store.
See https://github.com/GoogleCloudPlatform/python-docs-samples/tree/main/healthcare/api-client/v1/dicom
before running the sample."""
# Imports the Google API Discovery Service.
from googleapiclient import discovery
api_version = "v1"
service_name = "healthcare"
# Returns an authorized API client by discovering the Healthcare API
# and using GOOGLE_APPLICATION_CREDENTIALS environment variable.
client = discovery.build(service_name, api_version)
# TODO(developer): Uncomment these lines and replace with your values.
# project_id = 'my-project' # replace with your GCP project ID
# location = 'us-central1' # replace with the parent dataset's location
# dataset_id = 'my-dataset' # replace with the DICOM store's parent dataset ID
# dicom_store_id = 'my-dicom-store' # replace with the DICOM store's ID
# pubsub_topic = 'my-topic' # replace with an existing Pub/Sub topic
dicom_store_parent = "projects/{}/locations/{}/datasets/{}".format(
project_id, location, dataset_id
)
dicom_store_name = f"{dicom_store_parent}/dicomStores/{dicom_store_id}"
patch = {
"notificationConfig": {
"pubsubTopic": f"projects/{project_id}/topics/{pubsub_topic}"
}
}
request = (
client.projects()
.locations()
.datasets()
.dicomStores()
.patch(name=dicom_store_name, updateMask="notificationConfig", body=patch)
)
response = request.execute()
print(
"Patched DICOM store {} with Cloud Pub/Sub topic: {}".format(
dicom_store_id, pubsub_topic
)
)
return response
# [END healthcare_patch_dicom_store]
# [START healthcare_export_dicom_instance_gcs]
def export_dicom_instance(project_id, location, dataset_id, dicom_store_id, uri_prefix):
"""Export data to a Google Cloud Storage bucket by copying
it from the DICOM store.
See https://github.com/GoogleCloudPlatform/python-docs-samples/tree/main/healthcare/api-client/v1/dicom
before running the sample."""
# Imports the Google API Discovery Service.
from googleapiclient import discovery
api_version = "v1"
service_name = "healthcare"
# Returns an authorized API client by discovering the Healthcare API
# and using GOOGLE_APPLICATION_CREDENTIALS environment variable.
client = discovery.build(service_name, api_version)
# TODO(developer): Uncomment these lines and replace with your values.
# project_id = 'my-project' # replace with your GCP project ID
# location = 'us-central1' # replace with the parent dataset's location
# dataset_id = 'my-dataset' # replace with the DICOM store's parent dataset ID
# dicom_store_id = 'my-dicom-store' # replace with the DICOM store's ID
# uri_prefix = 'my-bucket' # replace with a Cloud Storage bucket
dicom_store_parent = "projects/{}/locations/{}/datasets/{}".format(
project_id, location, dataset_id
)
dicom_store_name = f"{dicom_store_parent}/dicomStores/{dicom_store_id}"
body = {"gcsDestination": {"uriPrefix": f"gs://{uri_prefix}"}}
request = (
client.projects()
.locations()
.datasets()
.dicomStores()
.export(name=dicom_store_name, body=body)
)
response = request.execute()
print(f"Exported DICOM instances to bucket: gs://{uri_prefix}")
return response
# [END healthcare_export_dicom_instance_gcs]
# [START healthcare_import_dicom_instance]
def import_dicom_instance(
project_id, location, dataset_id, dicom_store_id, content_uri
):
"""Imports data into the DICOM store by copying it from the specified
source.
See https://github.com/GoogleCloudPlatform/python-docs-samples/tree/main/healthcare/api-client/v1/dicom
before running the sample."""
# Imports the Google API Discovery Service.
from googleapiclient import discovery
api_version = "v1"
service_name = "healthcare"
# Returns an authorized API client by discovering the Healthcare API
# and using GOOGLE_APPLICATION_CREDENTIALS environment variable.
client = discovery.build(service_name, api_version)
# TODO(developer): Uncomment these lines and replace with your values.
# project_id = 'my-project' # replace with your GCP project ID
# location = 'us-central1' # replace with the parent dataset's location
# dataset_id = 'my-dataset' # replace with the DICOM store's parent dataset ID
# dicom_store_id = 'my-dicom-store' # replace with the DICOM store's ID
# content_uri = 'my-bucket/*.dcm' # replace with a Cloud Storage bucket and DCM files
dicom_store_parent = "projects/{}/locations/{}/datasets/{}".format(
project_id, location, dataset_id
)
dicom_store_name = f"{dicom_store_parent}/dicomStores/{dicom_store_id}"
body = {"gcsSource": {"uri": f"gs://{content_uri}"}}
# Escape "import()" method keyword because "import"
# is a reserved keyword in Python
request = (
client.projects()
.locations()
.datasets()
.dicomStores()
.import_(name=dicom_store_name, body=body)
)
response = request.execute()
print(f"Imported DICOM instance: {content_uri}")
return response
# [END healthcare_import_dicom_instance]
# [START healthcare_dicom_store_get_iam_policy]
def get_dicom_store_iam_policy(project_id, location, dataset_id, dicom_store_id):
"""Gets the IAM policy for the specified DICOM store.
See https://github.com/GoogleCloudPlatform/python-docs-samples/tree/main/healthcare/api-client/v1/dicom
before running the sample."""
# Imports the Google API Discovery Service.
from googleapiclient import discovery
api_version = "v1"
service_name = "healthcare"
# Returns an authorized API client by discovering the Healthcare API
# and using GOOGLE_APPLICATION_CREDENTIALS environment variable.
client = discovery.build(service_name, api_version)
# TODO(developer): Uncomment these lines and replace with your values.
# project_id = 'my-project' # replace with your GCP project ID
# location = 'us-central1' # replace with the parent dataset's location
# dataset_id = 'my-dataset' # replace with the DICOM store's parent dataset ID
# dicom_store_id = 'my-dicom-store' # replace with the DICOM store's ID
dicom_store_parent = "projects/{}/locations/{}/datasets/{}".format(
project_id, location, dataset_id
)
dicom_store_name = f"{dicom_store_parent}/dicomStores/{dicom_store_id}"
request = (
client.projects()
.locations()
.datasets()
.dicomStores()
.getIamPolicy(resource=dicom_store_name)
)
response = request.execute()
print("etag: {}".format(response.get("name")))
return response
# [END healthcare_dicom_store_get_iam_policy]
# [START healthcare_dicom_store_set_iam_policy]
def set_dicom_store_iam_policy(
project_id, location, dataset_id, dicom_store_id, member, role, etag=None
):
"""Sets the IAM policy for the specified DICOM store.
A single member will be assigned a single role. A member can be any of:
- allUsers, that is, anyone
- allAuthenticatedUsers, anyone authenticated with a Google account
- user:email, as in 'user:[email protected]'
- group:email, as in 'group:[email protected]'
- domain:domainname, as in 'domain:example.com'
- serviceAccount:email,
as in 'serviceAccount:[email protected]'
A role can be any IAM role, such as 'roles/viewer', 'roles/owner',
or 'roles/editor'
See https://github.com/GoogleCloudPlatform/python-docs-samples/tree/main/healthcare/api-client/v1/dicom
before running the sample."""
# Imports the Google API Discovery Service.
from googleapiclient import discovery
api_version = "v1"
service_name = "healthcare"
# Returns an authorized API client by discovering the Healthcare API
# and using GOOGLE_APPLICATION_CREDENTIALS environment variable.
client = discovery.build(service_name, api_version)
# TODO(developer): Uncomment these lines and replace with your values.
# project_id = 'my-project' # replace with your GCP project ID
# location = 'us-central1' # replace with the parent dataset's location
# dataset_id = 'my-dataset' # replace with the DICOM store's parent dataset ID
# dicom_store_id = 'my-dicom-store' # replace with the DICOM store's ID
# member = '[email protected]' # replace with an authorized member
# role = 'roles/viewer' # replace with a Healthcare API IAM role
dicom_store_parent = "projects/{}/locations/{}/datasets/{}".format(
project_id, location, dataset_id
)
dicom_store_name = f"{dicom_store_parent}/dicomStores/{dicom_store_id}"
policy = {"bindings": [{"role": role, "members": [member]}]}
if etag is not None:
policy["etag"] = etag
request = (
client.projects()
.locations()
.datasets()
.dicomStores()
.setIamPolicy(resource=dicom_store_name, body={"policy": policy})
)
response = request.execute()
print("etag: {}".format(response.get("name")))
print("bindings: {}".format(response.get("bindings")))
return response
# [END healthcare_dicom_store_set_iam_policy]
def parse_command_line_args():
"""Parses command line arguments."""
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
"--project_id",
default=os.environ.get("GOOGLE_CLOUD_PROJECT"),
help="GCP project name",
)
parser.add_argument("--location", default="us-central1", help="GCP location")
parser.add_argument("--dataset_id", default=None, help="Name of dataset")
parser.add_argument("--dicom_store_id", default=None, help="Name of DICOM store")
parser.add_argument(
"--pubsub_topic",
default=None,
help="The Cloud Pub/Sub topic that notifications of "
"changes are published on",
)
parser.add_argument(
"--uri_prefix",
default=None,
help="URI for a Google Cloud Storage directory to which result files"
'should be written (e.g., "bucket-id/path/to/destination/dir").',
)
parser.add_argument(
"--content_uri",
default=None,
help="URI for a Google Cloud Storage directory from which files"
'should be imported (e.g., "bucket-id/path/to/destination/dir").',
)
parser.add_argument(
"--export_format",
choices=["FORMAT_UNSPECIFIED", "DICOM", "JSON_BIGQUERY_IMPORT"],
default="DICOM",
help="Specifies the output format. If the format is unspecified, the"
"default functionality is to export to DICOM.",
)
parser.add_argument(
"--member",
default=None,
help='Member to add to IAM policy (e.g. "domain:example.com")',
)
parser.add_argument(
"--role", default=None, help='IAM Role to give to member (e.g. "roles/viewer")'
)
command = parser.add_subparsers(dest="command")
command.add_parser("create-dicom-store", help=create_dicom_store.__doc__)
command.add_parser("delete-dicom-store", help=delete_dicom_store.__doc__)
command.add_parser("get-dicom-store", help=get_dicom_store.__doc__)
command.add_parser("list-dicom-stores", help=list_dicom_stores.__doc__)
command.add_parser("patch-dicom-store", help=patch_dicom_store.__doc__)
command.add_parser("get_iam_policy", help=get_dicom_store_iam_policy.__doc__)
command.add_parser("set_iam_policy", help=set_dicom_store_iam_policy.__doc__)
command.add_parser("export-dicom-store", help=export_dicom_instance.__doc__)
command.add_parser("import-dicom-store", help=import_dicom_instance.__doc__)
return parser.parse_args()
def run_command(args):
"""Calls the program using the specified command."""
if args.project_id is None:
print(
"You must specify a project ID or set the "
'"GOOGLE_CLOUD_PROJECT" environment variable.'
)
return
elif args.command == "create-dicom-store":
create_dicom_store(
args.project_id, args.location, args.dataset_id, args.dicom_store_id
)
elif args.command == "delete-dicom-store":
delete_dicom_store(
args.project_id, args.location, args.dataset_id, args.dicom_store_id
)
elif args.command == "get-dicom-store":
get_dicom_store(
args.project_id, args.location, args.dataset_id, args.dicom_store_id
)
elif args.command == "list-dicom-stores":
list_dicom_stores(args.project_id, args.location, args.dataset_id)
elif args.command == "patch-dicom-store":
patch_dicom_store(
args.project_id,
args.location,
args.dataset_id,
args.dicom_store_id,
args.pubsub_topic,
)
elif args.command == "export-dicom-store":
export_dicom_instance(
args.project_id,
args.location,
args.dataset_id,
args.dicom_store_id,
args.uri_prefix,
)
elif args.command == "import-dicom-store":
import_dicom_instance(
args.project_id,
args.location,
args.dataset_id,
args.dicom_store_id,
args.content_uri,
)
elif args.command == "get_iam_policy":
get_dicom_store_iam_policy(
args.project_id, args.location, args.dataset_id, args.dicom_store_id
)
elif args.command == "set_iam_policy":
set_dicom_store_iam_policy(
args.project_id,
args.location,
args.dataset_id,
args.dicom_store_id,
args.member,
args.role,
)
def main():
args = parse_command_line_args()
run_command(args)
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
1fa0fb7eeeffdef62a38f01de113f43e004559c7 | 76e9b6cd86803cfd619c32bea338fbf64bf29221 | /gui.py | c53e9809dad7296d77a12ed04aa958293c411b36 | []
| no_license | remton/Python_Chess | 3f004d3d6be4321f75e4176a36f7d728a4c2fc8e | 977ee7d6d154037f9588f826c03a3943a2122d94 | refs/heads/master | 2022-12-03T11:14:15.552912 | 2020-08-13T05:17:32 | 2020-08-13T05:17:32 | 285,480,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,936 | py | # gui.py
# Handles all the gui
from tkinter import *
from util import col_to_board, row_to_board
import util
# First thing when working with tkinter
root = Tk()
root.title("Remi's Chess Game")
def on_close():
raise SystemExit
root.protocol("WM_DELETE_WINDOW", on_close)
# PhotoImage must keep the original variable so this dict allows access to images whenever we want
piece_images = {
'Empty': PhotoImage(file='Images/Empty.png'),
'Pawn_w': PhotoImage(file='Images/Pawn_w.png'),
'Pawn_b': PhotoImage(file='Images/Pawn_b.png'),
'Knight_w': PhotoImage(file='Images/Knight_w.png'),
'Knight_b': PhotoImage(file='Images/Knight_b.png'),
'Bishop_w': PhotoImage(file='Images/Bishop_w.png'),
'Bishop_b': PhotoImage(file='Images/Bishop_b.png'),
'Rook_w': PhotoImage(file='Images/Rook_w.png'),
'Rook_b': PhotoImage(file='Images/Rook_b.png'),
'Queen_w': PhotoImage(file='Images/Queen_w.png'),
'Queen_b': PhotoImage(file='Images/Queen_b.png'),
'King_w': PhotoImage(file='Images/King_w.png'),
'King_b': PhotoImage(file='Images/King_b.png'),
}
last_move = ''
is_first_button = True
first_space = ''
def on_button_press(space_str):
global last_move
global first_space
global is_first_button
if is_first_button:
# This is a weird way to check if the space is empty but it works
# board is updated in update_grid
global board
x = util.board_to_space[space_str[0]]
y = util.board_to_space[space_str[1]]
if board[y][x].piece.name == 'Empty':
return
first_space = space_str
is_first_button = False
else:
last_move = f'{first_space},{space_str}'
is_first_button = True
first_space = ''
root.quit()
# Unlike with the ChessBoard grid is indexed [x][y]
grid = [[]]
def create_grid():
back_color = 'gray'
front_color = 'white'
grid.clear()
row = []
for x in range(8):
row.clear()
for y in range(8):
space = row_to_board[x] + col_to_board[y]
new_button = Button(root, padx=20, pady=20, bg=back_color, fg=front_color, image=piece_images['Empty'],
text=space, command=util.create_lambda(on_button_press, space))
row.append(new_button)
temp = back_color
back_color = front_color
front_color = temp
grid.append(row.copy())
temp = back_color
back_color = front_color
front_color = temp
for x in range(0, 8):
for y in range(0, 8):
grid[x][7 - y].grid(column=x+1, row=y)
# Create the labels for space names
for x in range(8):
new_label = Label(root, height=2, width=8, text=row_to_board[x])
new_label.grid(column=x+1, row=8)
for y in range(8):
new_label = Label(root, height=4, width=4, text=col_to_board[y])
new_label.grid(column=0, row=7-y)
board = None
def update_grid(chess_board):
global grid
global board
board = chess_board.board
for x in range(8):
for y in range(8):
piece = board[y][x].piece
grid[x][y]['image'] = piece_images[piece.img_name]
def open_window(chess_board, run_loop=True):
create_grid()
update_grid(chess_board)
if run_loop:
root.mainloop()
def endgame_window_close(end_root):
end_root.quit()
def open_endgame_window(is_checkmate=False, is_draw=False):
end_root = Tk()
if is_checkmate:
message = "Checkmate!"
elif is_draw:
message = "Draw."
else:
message = "Error: open_endgame_window needs checkmate or is_draw to be true"
message_label = Label(end_root, height=5, width=10, text=message)
message_label.pack()
continue_button = Button(end_root, padx=20, pady=20, text='continue', command=lambda: endgame_window_close(end_root))
continue_button.pack()
end_root.mainloop()
| [
"[email protected]"
]
| |
f01ab1ef46b15e7a937c29707ebb6e41472dc806 | 5ee8b160afbb8e80446ecb94b436f172cd94495e | /list_operations.py | 4cddf34d6e866c322bad11f7086e4b09facdecaf | []
| no_license | kgermeroth/HBLabListSlicesNew | f678ddd51eeb227b08c1132f375c2a5be84bd411 | ae4c4229e95e876c2ef44e9394d4ce0b729597cf | refs/heads/master | 2020-06-18T22:37:54.560161 | 2019-07-12T00:39:04 | 2019-07-12T00:39:04 | 196,478,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,262 | py | """Functions that manipulate lists without using Python's built-in list methods.
The fundamental operations on lists in Python are those that are part of the
language syntax and/or cannot be implemented in terms of other list operations.
They include:
* List indexing (some_list[index])
* List indexing assignment (some_list[index] = value)
* List slicing (some_list[start:end])
* List slicing assignment (some_list[start:end] = another_list)
* List index deletion (del some_list[index])
* List slicing deletion (del some_list[start:end])
Implement functions that each use just one of the above operations.
The docstring of each function describes what it should do.
DO NOT USE ANY OF THE BUILT IN LIST METHODS, OR len()!
"""
def head(input_list):
"""Return the first element of the input list.
For example:
>>> head(['Jan', 'Feb', 'Mar'])
'Jan'
"""
return input_list[0]
def tail(input_list):
"""Return all elements of the input list except the first.
For example:
>>> tail(['Jan', 'Feb', 'Mar'])
['Feb', 'Mar']
"""
return input_list[1:]
def last(input_list):
"""Return the last element of the input list.
For example:
>>> last(['Jan', 'Feb', 'Mar'])
'Mar'
"""
return input_list[-1]
def init(input_list):
"""Return all elements of the input list except the last.
For example:
>>> init(['Jan', 'Feb', 'Mar'])
['Jan', 'Feb']
"""
return input_list[0:-1]
##############################################################################
# Do yourself a favor and get a short code review here.
def first_three(input_list):
"""Return the first three elements of the input list.
For example:
>>> first_three(['Jan', 'Feb', 'Mar', 'Apr', 'May'])
['Jan', 'Feb', 'Mar']
"""
return input_list[0:3]
def last_five(input_list):
"""Return the last five elements of the input list.
For example:
>>> last_five([0, 3, 6, 9, 12, 15, 18, 21, 24, 27])
[15, 18, 21, 24, 27]
"""
return input_list[-5: ]
def middle(input_list):
"""Return all elements of input_list except the first two and the last two.
For example:
>>> middle([0, 3, 6, 9, 12, 15, 18, 21, 24, 27])
[6, 9, 12, 15, 18, 21]
"""
return input_list[2:-2]
def inner_four(input_list):
"""Return the third, fourth, fifth, and sixth elements of input_list.
For example:
>>> inner_four([0, 3, 6, 9, 12, 15, 18, 21, 24, 27])
[6, 9, 12, 15]
"""
return input_list[2:6]
def inner_four_end(input_list):
"""Return the elements that are 6th, 5th, 4th, and 3rd from the end of input_list.
This function should return those elements in a list, in the exact order
described above.
For example:
>>> inner_four_end([0, 3, 6, 9, 12, 15, 18, 21, 24, 27])
[12, 15, 18, 21]
"""
return input_list[-6:-2]
def replace_head(input_list):
"""Replace the head of input_list with the value 42 and return nothing.
For example:
>>> multiples = [0, 3, 6, 9, 12, 15, 18, 21, 24, 27]
>>> replace_head(multiples)
>>> multiples == [42, 3, 6, 9, 12, 15, 18, 21, 24, 27]
True
"""
input_list[0] = 42
def replace_third_and_last(input_list):
"""Replace third and last elements of input_list with 37 and return nothing.
For example:
>>> multiples = [0, 3, 6, 9, 12, 15, 18, 21, 24, 27]
>>> replace_third_and_last(multiples)
>>> multiples == [0, 3, 37, 9, 12, 15, 18, 21, 24, 37]
True
"""
input_list[2] = 37
input_list[-1] = 37
return
def replace_middle(input_list):
"""Replace all elements of a list but the first and last two with 42 and 37.
After the replacement, 42 and 37 should appear in that order in input_list.
Return nothing.
For example:
>>> multiples = [0, 3, 6, 9, 12, 15, 18, 21, 24, 27]
>>> replace_middle(multiples)
>>> multiples == [0, 3, 42, 37, 24, 27]
True
"""
input_list[2:-2] = [42,37]
return
def delete_third_and_seventh(input_list):
"""Remove third and seventh elements of input_list and return nothing.
For example:
>>> notes = ['Do', 'Re', 'Mi', 'Fa', 'So', 'La', 'Ti', 'Do']
>>> delete_third_and_seventh(notes)
>>> notes == ['Do', 'Re', 'Fa', 'So', 'La', 'Do']
True
"""
input_list[2:3] = []
input_list.pop(5)
return
def delete_middle(input_list):
"""Remove all elements from input_list except the first two and last two.
Return nothing.
For example:
>>> notes = ['Do', 'Re', 'Mi', 'Fa', 'So', 'La', 'Ti', 'Do']
>>> delete_middle(notes)
>>> notes == ['Do', 'Re', 'Ti', 'Do']
True
"""
input_list[2:-2] = []
return
##############################################################################
# END OF MAIN EXERCISE. Yay! You did it! You Rock!
#
# Please ask for a code review from an Education team member before proceeding.
##############################################################################
# This is the part were we actually run the doctests.
if __name__ == "__main__":
import doctest
result = doctest.testmod()
if result.failed == 0:
print("ALL TESTS PASSED")
| [
"[email protected]"
]
| |
e8ab10ad4da9ab0533ae01589b7ed1aee5b30997 | 32166eebe7767379259192d322939d3cf83fd403 | /Token_Management/windows_token_demo.py | ff475a3862172025ec0b690048044e4004b7bf58 | []
| no_license | wifinigel/semfio-mist | 1294e52d42f0c373eb24724beaf754f9332d742e | f501b6488de621b30c5f3a99b3e53bb129970915 | refs/heads/master | 2022-09-30T04:53:55.968239 | 2020-05-22T11:22:47 | 2020-05-22T11:22:47 | 266,056,873 | 0 | 0 | null | 2020-05-22T08:18:51 | 2020-05-22T08:18:51 | null | UTF-8 | Python | false | false | 2,002 | py | """
This script that demonsrates how to use token_class.py on Windows
Before running the script, you must define a user-level environmental
variable that contains your Mist API token. This will be used to access
the Mist cloud and create the required temporary key(s) to perfom
scripted actions.
The token_class module relies on the presence of an environmental
variable called MIST_TOKEN to authorize access to the Mist API. This
env var must contain the value of a valid token that has been
previousy created via the Mist API, e.g. using POSTMAN or some other
API access tool - see the following URL for an example:
https://www.mist.com/documentation/using-postman/).
To create the user env var, use the following command from a
command window on a Windows 10 machine:
setx MIST_TOKEN "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
To verify the env var has been correctly created, open a new command
windows and type the following command to verify that the env var
is now gloablly available to the user account being used:
echo %MIST_TOKEN%
Note that the env var is only available in a NEW command window. The env
var is now permanently avaialble to all processes running under the current
user account. The env var will not be available to other users on the same
machine.
To remove the env var value, set the env var with a blank value in a command
window (the env var will still exist, but will have no value):
setx MIST_TOKEN ""
Or, alternatively it may be deleted via the Windows 10 GUI environmental
variable editing tool: Start > Control Panel > System & Security >
System > Advanced System Settings > Environmental Variables (User section)
"""
from token_class import Token
# create Token obj
master_token_obj = Token()
# get a temporary token so that we can do some stuff
temp_mist_token = master_token_obj.get_tmp_token()
# do some stuff here (e.g. list WLANs)
# TBA
# clean up by removing our temporary token
master_token_obj.delete_tmp_token()
| [
"[email protected]"
]
| |
131f171e663006b3246eeba41b11a128b8d050df | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03626/s622123482.py | 66a5287c093edaa7fe69b4f24155002213a9e8f0 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 483 | py | MOD = 10**9 + 7
N = int(input())
S = input()
S2 = input()
check = []
flg = False
for i in range(N-1):
if flg:
check.append(2)
flg = False
continue
if S[i] == S[i+1]:
flg = True
else:
check.append(1)
if flg:
check.append(2)
else:
check.append(1)
ans = 3 if check[0] == 1 else 6
for i in range(1,len(check)):
if check[i-1] == 1:
ans *= 2
elif check[i] == 2 and check[i-1] == 2:
ans *= 3
print(ans%MOD) | [
"[email protected]"
]
| |
5e4e68907abd04d355a44780c5bfe0fa5ebfdc8d | d86c52f4098fd9c1a102c2d3f5630556e0610fa2 | /fitle/myenv/Lib/site-packages/django/db/models/fields/related_descriptors.py | ebcd53f1822ef4847790158e594127aaabf8fc0d | []
| no_license | makadama/bitbucket | 24f05c4946168ed15d4f56bfdc45fd6c0774e0f2 | cabfd551b92fe1af6d9d14ab9eb3d9974b64aa79 | refs/heads/master | 2023-06-19T19:04:03.894599 | 2021-07-15T12:10:39 | 2021-07-15T12:10:39 | 385,203,791 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:c80ba94b54aa0987fdd1aca4451b1c1266148d2b48d83ceb5c33ec8048d478d7
size 54061
| [
"[email protected]"
]
| |
5000e704ec583c0179a3f2695f926eb0e8621667 | caa05194b8f11f29a19767c94fdc93628be694d5 | /examples/asr/quantization/speech_to_text_calibrate.py | 165623f283c29b64d870e226782f5cc7f6844a2a | [
"Apache-2.0"
]
| permissive | Jimmy-INL/NeMo | a589ab0ab97b9ccb8921579670e80c470ce7077b | 6a3753b3013dc92a3587853d60c5086e2e64d98f | refs/heads/main | 2023-04-02T22:28:29.891050 | 2021-04-13T18:22:24 | 2021-04-13T18:22:24 | 357,681,603 | 1 | 0 | Apache-2.0 | 2021-04-13T20:34:12 | 2021-04-13T20:34:12 | null | UTF-8 | Python | false | false | 6,150 | py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Script for calibrating a pretrained ASR model for quantization
"""
from argparse import ArgumentParser
import torch
from omegaconf import open_dict
from nemo.collections.asr.models import EncDecCTCModel
from nemo.utils import logging
try:
from pytorch_quantization import calib
from pytorch_quantization import nn as quant_nn
from pytorch_quantization import quant_modules
from pytorch_quantization.tensor_quant import QuantDescriptor
except ImportError:
raise ImportError(
"pytorch-quantization is not installed. Install from "
"https://github.com/NVIDIA/TensorRT/tree/master/tools/pytorch-quantization."
)
try:
from torch.cuda.amp import autocast
except ImportError:
from contextlib import contextmanager
@contextmanager
def autocast(enabled=None):
yield
can_gpu = torch.cuda.is_available()
def main():
parser = ArgumentParser()
parser.add_argument(
"--asr_model", type=str, default="QuartzNet15x5Base-En", required=True, help="Pass: 'QuartzNet15x5Base-En'",
)
parser.add_argument("--dataset", type=str, required=True, help="path to evaluation data")
parser.add_argument("--batch_size", type=int, default=256)
parser.add_argument(
"--normalize_text", default=True, type=bool, help="Normalize transcripts or not. Set to False for non-English."
)
parser.add_argument('--num_calib_batch', default=1, type=int, help="Number of batches for calibration.")
parser.add_argument('--calibrator', type=str, choices=["max", "histogram"], default="max")
parser.add_argument('--percentile', nargs='+', type=float, default=[99.9, 99.99, 99.999, 99.9999])
parser.add_argument("--amp", action="store_true", help="Use AMP in calibration.")
parser.set_defaults(amp=False)
args = parser.parse_args()
torch.set_grad_enabled(False)
# Initialize quantization
quant_desc_input = QuantDescriptor(calib_method=args.calibrator)
quant_nn.QuantConv2d.set_default_quant_desc_input(quant_desc_input)
quant_nn.QuantConvTranspose2d.set_default_quant_desc_input(quant_desc_input)
quant_nn.QuantLinear.set_default_quant_desc_input(quant_desc_input)
if args.asr_model.endswith('.nemo'):
logging.info(f"Using local ASR model from {args.asr_model}")
asr_model_cfg = EncDecCTCModel.restore_from(restore_path=args.asr_model, return_config=True)
with open_dict(asr_model_cfg):
asr_model_cfg.encoder.quantize = True
asr_model = EncDecCTCModel.restore_from(restore_path=args.asr_model, override_config_path=asr_model_cfg)
else:
logging.info(f"Using NGC cloud ASR model {args.asr_model}")
asr_model_cfg = EncDecCTCModel.from_pretrained(model_name=args.asr_model, return_config=True)
with open_dict(asr_model_cfg):
asr_model_cfg.encoder.quantize = True
asr_model = EncDecCTCModel.from_pretrained(model_name=args.asr_model, override_config_path=asr_model_cfg)
asr_model.setup_test_data(
test_data_config={
'sample_rate': 16000,
'manifest_filepath': args.dataset,
'labels': asr_model.decoder.vocabulary,
'batch_size': args.batch_size,
'normalize_transcripts': args.normalize_text,
'shuffle': True,
}
)
if can_gpu:
asr_model = asr_model.cuda()
asr_model.eval()
# Enable calibrators
for name, module in asr_model.named_modules():
if isinstance(module, quant_nn.TensorQuantizer):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
for i, test_batch in enumerate(asr_model.test_dataloader()):
if can_gpu:
test_batch = [x.cuda() for x in test_batch]
if args.amp:
with autocast():
_ = asr_model(input_signal=test_batch[0], input_signal_length=test_batch[1])
else:
_ = asr_model(input_signal=test_batch[0], input_signal_length=test_batch[1])
if i >= args.num_calib_batch:
break
# Save calibrated model(s)
model_name = args.asr_model.replace(".nemo", "") if args.asr_model.endswith(".nemo") else args.asr_model
if not args.calibrator == "histogram":
compute_amax(asr_model, method="max")
asr_model.save_to(F"{model_name}-max-{args.num_calib_batch*args.batch_size}.nemo")
else:
for percentile in args.percentile:
print(F"{percentile} percentile calibration")
compute_amax(asr_model, method="percentile")
asr_model.save_to(F"{model_name}-percentile-{percentile}-{args.num_calib_batch*args.batch_size}.nemo")
for method in ["mse", "entropy"]:
print(F"{method} calibration")
compute_amax(asr_model, method=method)
asr_model.save_to(F"{model_name}-{method}-{args.num_calib_batch*args.batch_size}.nemo")
def compute_amax(model, **kwargs):
for name, module in model.named_modules():
if isinstance(module, quant_nn.TensorQuantizer):
if module._calibrator is not None:
if isinstance(module._calibrator, calib.MaxCalibrator):
module.load_calib_amax()
else:
module.load_calib_amax(**kwargs)
print(F"{name:40}: {module}")
if can_gpu:
model.cuda()
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| [
"[email protected]"
]
| |
5e3fae95e1de68e3ee68cb9335bfce8908637b86 | d5796258828bf3e12abc8def1ad1828aa14b4cfc | /apps/courses/migrations/0005_video_url.py | b491a64be7002102502b318d5ee3e0948700ff8e | []
| no_license | ghjan/imooc2 | fbd3b1478df12a2961c77ee05f15cffc9cc26097 | 4652ba68db80577466f72ef1cda087c820144879 | refs/heads/master | 2020-03-18T16:12:28.828646 | 2018-11-28T07:52:14 | 2018-11-28T07:52:14 | 134,952,788 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 483 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-26 11:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0004_course_tag'),
]
operations = [
migrations.AddField(
model_name='video',
name='url',
field=models.URLField(default='', verbose_name='访问地址'),
),
]
| [
"[email protected]"
]
| |
471d50f0791bed7f3468efcec297cac090555298 | 567c54ba9176581a5d5e1ae65212a6e87a604f0b | /wsgi/pico/pico/urls.py | 10f56e5d61f05915d1344f8a55c10f6a8cc5c632 | []
| no_license | andrewidya/pico | e0641433e1e63ab865fe65924c32c687c75b8d83 | 4a0e8ff885601004aa92ba05d204e3fe6bd90731 | refs/heads/master | 2021-01-10T13:46:20.543152 | 2015-12-08T13:14:40 | 2015-12-08T13:14:40 | 45,040,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,305 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from cms.sitemaps import CMSSitemap
from django.conf import settings
from django.conf.urls import * # NOQA
from django.conf.urls.i18n import i18n_patterns
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
handler404 = 'pico.views.page_not_found'
#handler500 = 'pico.views.server_error'
admin.autodiscover()
#urlpatterns = patterns('',
# url(r'^i18n/', include('django.conf.urls.i18n')),
#)
urlpatterns = i18n_patterns('',
url(r'^admin/', include(admin.site.urls)), # NOQA
url(r'^sitemap\.xml$', 'django.contrib.sitemaps.views.sitemap', {'sitemaps': {'cmspages': CMSSitemap}}, name='sitemap-xml'),
url(r'^select2/', include('django_select2.urls')),
url(r'^', include('cms.urls')),
url(r'^taggit_autosuggest/', include('taggit_autosuggest.urls')),
url(r'^pico_blog/', include('pico_blog.urls', namespace='pico_blog')),
)
# This is only needed when using runserver.
if settings.DEBUG:
urlpatterns = patterns('',
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', # NOQA
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
) + staticfiles_urlpatterns() + urlpatterns # NOQA
| [
"[email protected]"
]
| |
41ca6333a252f2922ade21e51fce832cc16380cd | d1ddb9e9e75d42986eba239550364cff3d8f5203 | /google-cloud-sdk/lib/surface/compute/backend_buckets/update.py | e0ebec42cade87a67f1e4d9830e24530ac96f7af | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | bopopescu/searchparty | 8ecd702af0d610a7ad3a8df9c4d448f76f46c450 | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | refs/heads/master | 2022-11-19T14:44:55.421926 | 2017-07-28T14:55:43 | 2017-07-28T14:55:43 | 282,495,798 | 0 | 0 | Apache-2.0 | 2020-07-25T17:48:53 | 2020-07-25T17:48:52 | null | UTF-8 | Python | false | false | 3,509 | py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands for updating backend buckets."""
from apitools.base.py import encoding
from googlecloudsdk.api_lib.compute import backend_buckets_utils
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.compute.backend_buckets import flags as backend_buckets_flags
from googlecloudsdk.core import log
class Update(base.UpdateCommand):
"""Update a backend bucket.
*{command}* is used to update backend buckets.
"""
BACKEND_BUCKET_ARG = None
@staticmethod
def Args(parser):
backend_buckets_utils.AddUpdatableArgs(Update, parser, 'update')
backend_buckets_flags.GCS_BUCKET_ARG.AddArgument(parser)
def GetGetRequest(self, client, backend_bucket_ref):
return (
client.apitools_client.backendBuckets,
'Get',
client.messages.ComputeBackendBucketsGetRequest(
project=backend_bucket_ref.project,
backendBucket=backend_bucket_ref.Name()))
def GetSetRequest(self, client, backend_bucket_ref, replacement):
return (
client.apitools_client.backendBuckets,
'Update',
client.messages.ComputeBackendBucketsUpdateRequest(
project=backend_bucket_ref.project,
backendBucket=backend_bucket_ref.Name(),
backendBucketResource=replacement))
def Modify(self, args, existing):
replacement = encoding.CopyProtoMessage(existing)
if args.description:
replacement.description = args.description
elif args.description == '': # pylint: disable=g-explicit-bool-comparison
replacement.description = None
if args.gcs_bucket_name:
replacement.bucketName = args.gcs_bucket_name
if args.enable_cdn is not None:
replacement.enableCdn = args.enable_cdn
return replacement
def Run(self, args):
if not any([
args.description is not None,
args.gcs_bucket_name is not None,
args.enable_cdn is not None,
]):
raise exceptions.ToolException('At least one property must be modified.')
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client
backend_bucket_ref = Update.BACKEND_BUCKET_ARG.ResolveAsResource(
args, holder.resources)
get_request = self.GetGetRequest(client, backend_bucket_ref)
objects = client.MakeRequests([get_request])
new_object = self.Modify(args, objects[0])
# If existing object is equal to the proposed object or if
# Modify() returns None, then there is no work to be done, so we
# print the resource and return.
if objects[0] == new_object:
log.status.Print(
'No change requested; skipping update for [{0}].'.format(
objects[0].name))
return objects
return client.MakeRequests(
[self.GetSetRequest(client, backend_bucket_ref, new_object)])
| [
"[email protected]"
]
| |
6f42842cd6d4e2a15cbf9d8d790618bcd0159ee0 | 86e904c75d0140eea3e4169d216955e1c34801b3 | /python06/otherlist/tuple.py | 08901db550a2d42cbcae9e73445464e794c265e3 | []
| no_license | reharmony/cloudpython | d62f61749e5b5862d3b81e449d5154e188a14d21 | 98e033e537d763ba86d162f58d0fe8f64249a291 | refs/heads/master | 2020-04-29T16:58:55.281917 | 2019-05-15T12:11:43 | 2019-05-15T12:11:43 | 176,281,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | # 튜플 연습
data1 = (1,2,3)
print(data1)
print(data1[0])
print(data1[0:2])
print(data1[1:])
print(data1[:2])
print()
data2 = "나는 파이썬 프로그래머입니다."
print(data2[0])
print(data2[0:3])
print()
print(len(data2))
data2[0] = "너"
print(data2)
| [
"[email protected]"
]
| |
f0471cb047e073636ed9b5e06738bc66e893f364 | a4a01e251b194f6d3c6654a2947a33fec2c03e80 | /PythonWeb/Django/1809/djangoproject/djangodemo03/index/migrations/0005_book_isactive.py | f1824439227422bb93b902a60723b849ee4ecca1 | []
| no_license | demo112/1809 | 033019043e2e95ebc637b40eaf11c76bfd089626 | e22972229e5e7831dce2aae0b53ce19a6e3bb106 | refs/heads/master | 2020-04-09T07:10:49.906231 | 2019-02-27T13:08:45 | 2019-02-27T13:08:45 | 160,143,869 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2019-01-15 06:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('index', '0004_author_book'),
]
operations = [
migrations.AddField(
model_name='book',
name='isActive',
field=models.BooleanField(default=True),
),
]
| [
"[email protected]"
]
| |
ef1e1182e010d8e86a23b996d3675af834a646b9 | 77a37559730c9228c6ae9c530dc80b8488080c23 | /src/my_plagin/scripts/getoff.py | 776c307efdf299d807b2958f889c159a44ee4cc4 | []
| no_license | tdtce/quadrotor | f01e889ef1252ef5e28fc146521a057ead6fa62e | 64677c9c0c461f5bc7ef73b922d5cd912c2e6783 | refs/heads/master | 2020-03-10T17:06:53.133096 | 2018-05-19T16:42:21 | 2018-05-19T16:42:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 686 | py | #!/usr/bin/env python
import rospy
from std_msgs.msg import String
import math
from geometry_msgs.msg import Twist # message for topic /cmd_vel
from geometry_msgs.msg import Vector3
import sys
def cmd_command():
msg = Vector3(-5.0, 2.0, 1.0)
#rospy.loginfo(cmd)
return msg
def open_loop():
rospy.init_node("getoff", anonymous=True, disable_signals=True)
quad_vel = rospy.Publisher("/intermediate_state", Vector3, queue_size=1)
rate = rospy.Rate(10)
while not rospy.is_shutdown():
quad_vel.publish(cmd_command())
rate.sleep()
if __name__ == "__main__":
try:
open_loop()
except rospy.ROSInterruptException:
pass
| [
"[email protected]"
]
| |
e57e48ae6f83a97426f4e9b7b4dac4ea7bc018d9 | 51da71a26628a3c6d1814e6da38f5c48f3101d9b | /uri/1174.py | e4d914cf072f646eca444af1f90e1b8b071a2f5b | []
| no_license | da-ferreira/uri-online-judge | 279156249a1b0be49a7b29e6dbce85a293a47df1 | 6ec97122df3cb453ea26e0c9f9206a2e470ba37d | refs/heads/main | 2023-03-30T11:47:05.120388 | 2021-04-02T19:45:25 | 2021-04-02T19:45:25 | 309,744,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py |
vetor = []
for i in range(100):
vetor.append(float(input()))
for i in range(len(vetor)):
if vetor[i] <= 10:
print('A[{}] = {:.1f}'.format(i, vetor[i]))
| [
"[email protected]"
]
| |
bfc11d3a1f557d7017409f177799342afc40ecc8 | 826a8aeb87cb074938b2056ada22c89b9bd9276c | /serve.py | 004adc1f26237b22a69263d9b3e9026761ba3a59 | []
| no_license | priyom/priyomdb2 | ce441d755d021c838684aba705b3fb905461ca9f | 47deecab60febd427af692149788d37cd9f770ba | refs/heads/master | 2020-07-04T01:59:29.506148 | 2014-03-03T11:51:14 | 2014-03-03T11:51:14 | 25,634,647 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,268 | py | #!/usr/bin/python3
if __name__ == "__main__":
import argparse
import logging
import tornado.wsgi
import tornado.httpserver
import tornado.ioloop
parser = argparse.ArgumentParser()
parser.add_argument(
"-f", "--app-file",
default="app.wsgi")
parser.add_argument(
"-p", "--port",
type=int,
default=8080)
parser.add_argument(
"-v", "--verbose",
action="count",
dest="verbosity",
default=0)
args = parser.parse_args()
logging.basicConfig(level=logging.ERROR, format='%(levelname)-8s %(message)s')
if args.verbosity >= 3:
logging.getLogger().setLevel(logging.DEBUG)
elif args.verbosity >= 2:
logging.getLogger().setLevel(logging.INFO)
elif args.verbosity >= 1:
logging.getLogger().setLevel(logging.WARNING)
with open(args.app_file, "r") as f:
code = compile(f.read(), args.app_file, 'exec')
locals_dict = {}
exec(code, globals(), locals_dict)
container = tornado.wsgi.WSGIContainer(locals_dict["application"])
server = tornado.httpserver.HTTPServer(container)
server.listen(args.port)
print("serving on port {}".format(args.port))
tornado.ioloop.IOLoop.instance().start()
| [
"[email protected]"
]
| |
f7e19b16b6647eb623e4fae8467f79c39b656c7b | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_191/ch78_2020_04_12_20_17_38_556773.py | ba021a809518ee7a4dbb3ecf4bc6fe0b3537e17a | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | import math
v=True
j={}
while v:
n=input('nome')
if n=='sair':
v=False
else:
a=float(input('acele'))
j[n]=a
l={}
for i,u in j.items():
t=math.sqrt(200/u)
l[i]=t
v=list(l.values())
k=list(l.keys())
print(k[v.index(min(v))],min(v)) | [
"[email protected]"
]
| |
193ff6392df5dd4435f1d59cf1f51002b58aace6 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_22359.py | 9f9e50568fed7f7401e8c7ecdc5d72c124a37a6f | []
| no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | # Python AttributeError: 'module' object has no attribute 'DIST_LT2'
(dist_transform, labels) = cv2.distanceTransform(opening,cv2.cv.CV_DIST_L2,5)
| [
"[email protected]"
]
| |
482a8858558f606aab6c71a41ac79a62af32faa4 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03337/s150510433.py | fda17f4d3d34cc307fe022e909e277f692423ff1 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 472 | py | import sys
import math
import itertools
import bisect
from copy import copy
from collections import deque,Counter
from decimal import Decimal
def s(): return input()
def i(): return int(input())
def S(): return input().split()
def I(): return map(int,input().split())
def L(): return list(input().split())
def l(): return list(map(int,input().split()))
def lcm(a,b): return a*b//math.gcd(a,b)
sys.setrecursionlimit(10 ** 9)
mod = 10**9+7
a,b = I()
print(max(a+b,a-b,a*b)) | [
"[email protected]"
]
| |
8270b057878ea47f1b8c898a9f6f688170c9102c | 56d56b40dd7202e07b475b03cebd3d6fb2f58441 | /safi/app/wsgi/endpoints/subjectchallenges.py | eb5290580896381d40f8df878002573a1d23895a | []
| no_license | wizardsofindustry/quantum-safi | 7284db981d14777a46d5372fa0080b1d72d8cd80 | 6c97ae544444e90753e375ecc68f25534d97764a | refs/heads/master | 2020-03-23T03:10:50.171082 | 2018-11-23T21:59:34 | 2018-11-23T21:59:34 | 141,014,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,100 | py | import ioc
import sq.interfaces.http
class SubjectChallengesEndpoint(sq.interfaces.http.Endpoint):
"""Deserializes, serializes and validates the structure of the input and output
(requests and response) to its configured URL endpoint, which exposes the
following functionality:
Retrieve the Factors that may be used for interim authentication challenges.
A :class:`SubjectChallengesEndpoint` validates the structure of the request headers,
URL parameters, query parameters and entity prior to forwarding the
request to its handler (controller).
The handler function (e.g., :meth:`~SubjectChallengeCtrl.get()`) may,
instead of a :class:`~sq.interfaces.http.Response` object, return a tuple
or a dictionary. The :class:`SubjectChallengesEndpoint` instance will interpret
these return values as follows:
- If the return value is a :class:`dict`, then the endpoint assumes that
the response code is ``200`` and the object should be included as the
response body, serialized using the default content type. It is
considered an error condition if no serializer is specified for
this content type.
- If the return value is a :class:`tuple` with a length of 2, and the
first element is an :class:`int`, it is considered to hold
``(status_code, body)``.
- Otherwise, for more granular control over the response, a
:class:`~sq.interfaces.http.Response` object may be returned.
If the response body is not a string, it will be serialized using the
best-matching content type in the client ``Accept`` header. If no
match is found, the client receives a ``406`` response.
During serialization, A schema may be selected by :class:`SubjectChallengesEndpoint`
based on the response status code and content type, if one was defined in
the OpenAPI definition for this API endpoint.
"""
pattern = "/factors/<gsid>/challenges"
ctrl = ioc.class_property("SubjectChallengeCtrl")
# pylint: skip-file
# !!! SG MANAGED FILE -- DO NOT EDIT -- VERSION: !!!
| [
"[email protected]"
]
| |
219e13e5ab834ea0754e17eb93bbdb8a5976dc2e | 088276a2b02f74493c6303cbf17573957e1c2b3e | /HW_GRAPH/HW3/3_greedy_modularity.py | 18393893e922e021514f5e28ff1d6fe81c9bb799 | []
| no_license | naikiki87/python | 38f3ec9ed55b48df136708ad8e90e4358d536ca3 | 3c75cace24258c84b682e06033130ee627f7883c | refs/heads/master | 2023-06-05T09:49:51.931345 | 2021-06-30T04:35:41 | 2021-06-30T04:35:41 | 268,022,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,368 | py | import networkx as nx
from networkx.algorithms.community.quality import modularity
from networkx.utils.mapped_queue import MappedQueue
import config
import time
start = time.time()
def greedy_modularity_communities(G, weight=None):
# Count nodes and edges
N = len(G.nodes())
m = sum([d.get("weight", 1) for u, v, d in G.edges(data=True)])
q0 = 1.0 / (2.0 * m)
# Map node labels to contiguous integers
label_for_node = {i: v for i, v in enumerate(G.nodes())}
node_for_label = {label_for_node[i]: i for i in range(N)}
# Calculate degrees
k_for_label = G.degree(G.nodes(), weight=weight)
k = [k_for_label[label_for_node[i]] for i in range(N)]
# Initialize community and merge lists
communities = {i: frozenset([i]) for i in range(N)}
merges = []
# Initial modularity
partition = [[label_for_node[x] for x in c] for c in communities.values()]
q_cnm = modularity(G, partition)
a = [k[i] * q0 for i in range(N)]
dq_dict = {
i: {
j: 2 * q0 - 2 * k[i] * k[j] * q0 * q0
for j in [node_for_label[u] for u in G.neighbors(label_for_node[i])]
if j != i
}
for i in range(N)
}
dq_heap = [
MappedQueue([(-dq, i, j) for j, dq in dq_dict[i].items()]) for i in range(N)
]
H = MappedQueue([dq_heap[i].h[0] for i in range(N) if len(dq_heap[i]) > 0])
# Merge communities until we can't improve modularity
while len(H) > 1:
# Find best merge
# Remove from heap of row maxes
# Ties will be broken by choosing the pair with lowest min community id
try:
dq, i, j = H.pop()
except IndexError:
break
dq = -dq
# Remove best merge from row i heap
dq_heap[i].pop()
# Push new row max onto H
if len(dq_heap[i]) > 0:
H.push(dq_heap[i].h[0])
# If this element was also at the root of row j, we need to remove the
# duplicate entry from H
if dq_heap[j].h[0] == (-dq, j, i):
H.remove((-dq, j, i))
# Remove best merge from row j heap
dq_heap[j].remove((-dq, j, i))
# Push new row max onto H
if len(dq_heap[j]) > 0:
H.push(dq_heap[j].h[0])
else:
# Duplicate wasn't in H, just remove from row j heap
dq_heap[j].remove((-dq, j, i))
# Stop when change is non-positive
if dq <= 0:
break
# Perform merge
communities[j] = frozenset(communities[i] | communities[j])
del communities[i]
merges.append((i, j, dq))
# New modularity
q_cnm += dq
# Get list of communities connected to merged communities
i_set = set(dq_dict[i].keys())
j_set = set(dq_dict[j].keys())
all_set = (i_set | j_set) - {i, j}
both_set = i_set & j_set
# Merge i into j and update dQ
for k in all_set:
# Calculate new dq value
if k in both_set:
dq_jk = dq_dict[j][k] + dq_dict[i][k]
elif k in j_set:
dq_jk = dq_dict[j][k] - 2.0 * a[i] * a[k]
else:
# k in i_set
dq_jk = dq_dict[i][k] - 2.0 * a[j] * a[k]
# Update rows j and k
for row, col in [(j, k), (k, j)]:
# Save old value for finding heap index
if k in j_set:
d_old = (-dq_dict[row][col], row, col)
else:
d_old = None
# Update dict for j,k only (i is removed below)
dq_dict[row][col] = dq_jk
# Save old max of per-row heap
if len(dq_heap[row]) > 0:
d_oldmax = dq_heap[row].h[0]
else:
d_oldmax = None
# Add/update heaps
d = (-dq_jk, row, col)
if d_old is None:
# We're creating a new nonzero element, add to heap
dq_heap[row].push(d)
else:
# Update existing element in per-row heap
dq_heap[row].update(d_old, d)
# Update heap of row maxes if necessary
if d_oldmax is None:
# No entries previously in this row, push new max
H.push(d)
else:
# We've updated an entry in this row, has the max changed?
if dq_heap[row].h[0] != d_oldmax:
H.update(d_oldmax, dq_heap[row].h[0])
# Remove row/col i from matrix
i_neighbors = dq_dict[i].keys()
for k in i_neighbors:
# Remove from dict
dq_old = dq_dict[k][i]
del dq_dict[k][i]
# Remove from heaps if we haven't already
if k != j:
# Remove both row and column
for row, col in [(k, i), (i, k)]:
# Check if replaced dq is row max
d_old = (-dq_old, row, col)
if dq_heap[row].h[0] == d_old:
# Update per-row heap and heap of row maxes
dq_heap[row].remove(d_old)
H.remove(d_old)
# Update row max
if len(dq_heap[row]) > 0:
H.push(dq_heap[row].h[0])
else:
# Only update per-row heap
dq_heap[row].remove(d_old)
del dq_dict[i]
# Mark row i as deleted, but keep placeholder
dq_heap[i] = MappedQueue()
# Merge i into j and update a
a[j] += a[i]
a[i] = 0
communities = [
frozenset([label_for_node[i] for i in c]) for c in communities.values()
]
return sorted(communities, key=len, reverse=True)
def naive_greedy_modularity_communities(G):
# First create one community for each node
communities = list([frozenset([u]) for u in G.nodes()])
# Track merges
merges = []
# Greedily merge communities until no improvement is possible
old_modularity = None
new_modularity = modularity(G, communities)
while old_modularity is None or new_modularity > old_modularity:
# Save modularity for comparison
old_modularity = new_modularity
# Find best pair to merge
trial_communities = list(communities)
to_merge = None
for i, u in enumerate(communities):
for j, v in enumerate(communities):
# Skip i=j and empty communities
if j <= i or len(u) == 0 or len(v) == 0:
continue
# Merge communities u and v
trial_communities[j] = u | v
trial_communities[i] = frozenset([])
trial_modularity = modularity(G, trial_communities)
if trial_modularity >= new_modularity:
# Check if strictly better or tie
if trial_modularity > new_modularity:
# Found new best, save modularity and group indexes
new_modularity = trial_modularity
to_merge = (i, j, new_modularity - old_modularity)
elif to_merge and min(i, j) < min(to_merge[0], to_merge[1]):
# Break ties by choosing pair with lowest min id
new_modularity = trial_modularity
to_merge = (i, j, new_modularity - old_modularity)
# Un-merge
trial_communities[i] = u
trial_communities[j] = v
if to_merge is not None:
# If the best merge improves modularity, use it
merges.append(to_merge)
i, j, dq = to_merge
u, v = communities[i], communities[j]
communities[j] = u | v
communities[i] = frozenset([])
# Remove empty communities and sort
communities = [c for c in communities if len(c) > 0]
yield from sorted(communities, key=lambda x: len(x), reverse=True)
## generate Graph G
G = config.G
c = list(greedy_modularity_communities(G))
for i in range(len(c)) :
print(i, ' : ', c[i])
print("time : ", time.time() - start) | [
"[email protected]"
]
| |
6752105ec117800345237119406cf6949287fc2a | c2008671b9902adfd5444607ead35ebe9f33ebda | /pico/yay.py | 6db68296f63d178b98d45e8d08bd67190a30e769 | []
| no_license | joelburton/secret-pico | 08fb73e8354dc810656bdfe1fb2c943abfba1fc5 | 0705489798c6ded9d54f785e7c86b3421f6ba87a | refs/heads/main | 2023-03-16T06:50:49.366872 | 2021-03-05T17:08:17 | 2021-03-05T17:08:17 | 343,303,331 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 504 | py | """Yay!"""
from common import rainbow, oled_page, joel_msg, mark, url
def start():
mark("yay")
oled_page("Yay!")
rainbow()
print("""
You did it! It was fun for me to lead you through this.
And here's a personal message from Joel for you:
--
{joel_msg}
--
You can learn more, as well as get the table of contents for everything, along
with permission to dig into that mysterious envelope, at:
{url}
<3 Pico, Joel, and Fluffy
""".format(joel_msg=joel_msg, url=url("yay"))) | [
"[email protected]"
]
| |
8f8ac91f2bc59a3ba97c42c10fc4c389efdacaca | bec623f2fab5bafc95eb5bd95e7527e06f6eeafe | /django-shared/private_messages/utils.py | bc292ce9750e2ee0c41d087232e5725d000f47c4 | []
| no_license | riyanhax/a-demo | d714735a8b59eceeb9cd59f788a008bfb4861790 | 302324dccc135f55d92fb705c58314c55fed22aa | refs/heads/master | 2022-01-21T07:24:56.468973 | 2017-10-12T13:48:55 | 2017-10-12T13:48:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,069 | py | from django.utils.text import wrap
from django.utils.translation import ugettext_lazy as _
from django.contrib.sites.models import Site
from django.template import Context, loader
from django.template.loader import render_to_string
from django.conf import settings
# favour django-mailer but fall back to django.core.mail
from notification.models import get_from_email
if "mailer" in settings.INSTALLED_APPS:
from mailer import send_mail
else:
from django.core.mail import send_mail
def format_quote(text):
"""
Wraps text at 55 chars and prepends each
line with `> `.
Used for quoting messages in replies.
"""
lines = wrap(text, 55).split('\n')
for i, line in enumerate(lines):
lines[i] = "> %s" % line
return '\n'.join(lines)
def new_message_email(sender, instance, signal,
subject_prefix=_(u'New Message: %(subject)s'),
template_name="messages/new_message.html",
default_protocol=None,
*args, **kwargs):
"""
This function sends an email and is called via Django's signal framework.
Optional arguments:
``template_name``: the template to use
``subject_prefix``: prefix for the email subject.
``default_protocol``: default protocol in site URL passed to template
"""
if default_protocol is None:
default_protocol = getattr(settings, 'DEFAULT_HTTP_PROTOCOL', 'http')
if 'created' in kwargs and kwargs['created']:
try:
current_domain = Site.objects.get_current().domain
subject = subject_prefix % {'subject': instance.subject}
message = render_to_string(template_name, {
'site_url': '%s://%s' % (default_protocol, current_domain),
'message': instance,
})
if instance.recipient.email != "":
send_mail(subject, message, get_from_email(instance.recipient.profile.registered_from),
[instance.recipient.email,])
except Exception, e:
#print e
pass #fail silently
| [
"ibalyko@ubuntu-server-16-04"
]
| ibalyko@ubuntu-server-16-04 |
17ae827cd8a089e7fe6471407c7d6f3424ac2078 | 41586d36dd07c06860b9808c760e2b0212ed846b | /hardware/library/aufs-headers/actions.py | 98601510ff4ca8e2ec68a21d6b5148f725093a73 | []
| no_license | SulinOS/SulinRepository | 4d5551861f57bc1f4bec6879dfe28ce68c7c125d | 9686811a1e06080f63199233561a922fe1f78d67 | refs/heads/master | 2021-06-15T21:34:25.039979 | 2021-06-05T13:43:34 | 2021-06-05T13:43:34 | 207,672,864 | 6 | 3 | null | 2019-12-06T08:11:22 | 2019-09-10T22:16:17 | Python | UTF-8 | Python | false | false | 450 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 Suleyman POYRAZ (Zaryob)
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from inary.actionsapi import autotools
from inary.actionsapi import inarytools
from inary.actionsapi import shelltools
from inary.actionsapi import get
def install():
inarytools.insinto("/usr/include/linux", "include/linux/aufs_type.h")
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.