blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2ac05a8ce20667dc6ca62cd5a23ac3d6825a9e76 | c1fd96e92379b7a5eda183a0e9522d3bc8a1cd3d | /kaa/filetype/python/pythonmode.py | cc86751f85387bc8ba2e2653a1a6c95f79cbeac4 | [
"MIT"
] | permissive | okazu-dm/kaa | 3b2c59652f2468ed4a78d764667f7f896e759a33 | 3326100c64ca6f638db6361ca72bc56a4aef7b8f | refs/heads/master | 2020-05-23T10:09:05.153521 | 2013-10-22T13:05:45 | 2013-10-22T13:05:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,653 | py | import keyword, copy
from kaa.filetype.default import defaultmode, theme
from kaa.highlight import Tokenizer, Keywords, Span
from kaa.theme import Theme, Style
from gappedbuf import re as gre
from kaa.command import Commands, command, norec, norerun
from kaa.keyboard import *
PythonThemes = {
'default':
Theme([
Style('python-bytes', 'blue', 'default'),
])
}
PYTHONMENU = [
['&Comment', None, 'code.region.linecomment'],
['&Uncomment', None, 'code.region.unlinecomment'],
]
python_code_keys = {
((alt, 'm'), ('c')): 'menu.python.code',
}
class PythonMode(defaultmode.DefaultMode):
MODENAME = 'Python'
re_begin_block = gre.compile(r"[^#]*:\s*(#.*)?$")
LINE_COMMENT = '#'
def init_keybind(self):
super().init_keybind()
self.register_keys(self.keybind, [python_code_keys])
def init_menu(self):
super().init_menu()
self.menu['CODE'] = copy.deepcopy(PYTHONMENU)
def init_themes(self):
super().init_themes()
self.themes.append(PythonThemes)
def init_tokenizers(self):
self.tokenizers = [Tokenizer([
Keywords('python-statement', 'keyword', keyword.kwlist),
Span('python-comment', 'comment', r'\#', '$', escape='\\'),
Span('python-string31', 'string', 'r?"""', '"""', escape='\\'),
Span('python-string32', 'string', "r?'''", "'''", escape='\\'),
Span('python-string11', 'string', 'r?"', '"', escape='\\'),
Span('python-string12', 'string', "r?'", "'", escape='\\'),
Span('python-bytes31', 'python-bytes', '(br?|r?b)"""', '"', escape='\\'),
Span('python-bytes32', 'python-bytes', "(br?|r?b)'''", "'''", escape='\\'),
Span('python-bytes11', 'python-bytes', '(br?|r?b)"', '"', escape='\\'),
Span('python-bytes12', 'python-bytes', "(br?|r?b)'", "'", escape='\\'),
])]
RE_BEGIN_NEWBLOCK = gre.compile(r"[^#]*\:\s*(#.*)?$", gre.M)
def on_auto_indent(self, wnd):
pos = wnd.cursor.pos
tol = self.document.gettol(pos)
m = self.RE_BEGIN_NEWBLOCK.match(self.document.buf, tol, pos)
if not m:
super().on_auto_indent(wnd)
else:
f, t = self.get_indent_range(pos)
t = min(t, pos)
cols = self.calc_cols(f, t)
indent = self.build_indent_str(cols+self.indent_width)
indent = '\n'+indent
self.edit_commands.insert_string(wnd, pos, indent,
update_cursor=False)
wnd.cursor.setpos(pos+len(indent))
wnd.cursor.savecol()
| [
"[email protected]"
] | |
a2f142591850d088c6f62a70894d53b1e1240482 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/coverage-big-2831.py | 4512483516b5634c39d0789325e695f5c4f83524 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,350 | py | count:int = 0
count2:int = 0
count3:int = 0
count4:int = 0
count5:int = 0
def foo(s: str) -> int:
return len(s)
def foo2(s: str, s2: str) -> int:
return len(s)
def foo3(s: str, s2: str, s3: str) -> int:
return len(s)
def foo4(s: str, s2: str, s3: str, s4: str) -> int:
return len(s)
def foo5(s: str, s2: str, s3: str, s4: str, s5: str) -> int:
return len(s)
class bar(object):
p: bool = True
def baz(self:"bar", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar2(object):
p: bool = True
p2: bool = True
def baz(self:"bar2", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar2", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar3(object):
p: bool = True
p2: bool = True
p3: bool = True
def baz(self:"bar3", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar3", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar3", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar4(object):
p: bool = True
p2: bool = True
p3: bool = True
p4: bool = True
def baz(self:"bar4", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar4", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar4", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz4(self:"bar4", xx: [int], xx2: [int], xx3: [int], xx4: [int]) -> str:
global count
$ID:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar5(object):
p: bool = True
p2: bool = True
p3: bool = True
p4: bool = True
p5: bool = True
def baz(self:"bar5", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar5", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar5", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz4(self:"bar5", xx: [int], xx2: [int], xx3: [int], xx4: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz5(self:"bar5", xx: [int], xx2: [int], xx3: [int], xx4: [int], xx5: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
x5:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
y5:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
def qux5(y: int, y2: int, y3: int, y4: int, y5: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
nonlocal x5
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
print(bar().baz([1,2]))
| [
"[email protected]"
] | |
8a9f2f260ce87cc91474a5b8be78abd0ed4c103b | ab50920ebb8d9679230c13b8f91998e47e9f4f82 | /samples/server/petstore/python-blueplanet/app/openapi_server/models/order.py | e222f5ee0069dbb383352d37536a24db5e77ec3c | [
"Apache-2.0"
] | permissive | oxidecomputer/openapi-generator | f50ee17579b02a35d30894f16a4d98dc81f8b06b | f8770d7c3388d9f1a5069a7f37378aeadcb81e16 | refs/heads/master | 2023-08-25T09:24:27.666296 | 2021-02-25T15:36:35 | 2021-02-25T15:36:35 | 334,329,847 | 6 | 0 | Apache-2.0 | 2022-10-14T05:05:39 | 2021-01-30T04:46:13 | Java | UTF-8 | Python | false | false | 4,935 | py | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from app.openapi_server.models.base_model_ import Model
from openapi_server import util
class Order(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id: int=None, pet_id: int=None, quantity: int=None, ship_date: datetime=None, status: str=None, complete: bool=False): # noqa: E501
"""Order - a model defined in Swagger
:param id: The id of this Order. # noqa: E501
:type id: int
:param pet_id: The pet_id of this Order. # noqa: E501
:type pet_id: int
:param quantity: The quantity of this Order. # noqa: E501
:type quantity: int
:param ship_date: The ship_date of this Order. # noqa: E501
:type ship_date: datetime
:param status: The status of this Order. # noqa: E501
:type status: str
:param complete: The complete of this Order. # noqa: E501
:type complete: bool
"""
self.swagger_types = {
'id': int,
'pet_id': int,
'quantity': int,
'ship_date': datetime,
'status': str,
'complete': bool
}
self.attribute_map = {
'id': 'id',
'pet_id': 'petId',
'quantity': 'quantity',
'ship_date': 'shipDate',
'status': 'status',
'complete': 'complete'
}
self._id = id
self._pet_id = pet_id
self._quantity = quantity
self._ship_date = ship_date
self._status = status
self._complete = complete
@classmethod
def from_dict(cls, dikt) -> 'Order':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The Order of this Order. # noqa: E501
:rtype: Order
"""
return util.deserialize_model(dikt, cls)
@property
def id(self) -> int:
"""Gets the id of this Order.
:return: The id of this Order.
:rtype: int
"""
return self._id
@id.setter
def id(self, id: int):
"""Sets the id of this Order.
:param id: The id of this Order.
:type id: int
"""
self._id = id
@property
def pet_id(self) -> int:
"""Gets the pet_id of this Order.
:return: The pet_id of this Order.
:rtype: int
"""
return self._pet_id
@pet_id.setter
def pet_id(self, pet_id: int):
"""Sets the pet_id of this Order.
:param pet_id: The pet_id of this Order.
:type pet_id: int
"""
self._pet_id = pet_id
@property
def quantity(self) -> int:
"""Gets the quantity of this Order.
:return: The quantity of this Order.
:rtype: int
"""
return self._quantity
@quantity.setter
def quantity(self, quantity: int):
"""Sets the quantity of this Order.
:param quantity: The quantity of this Order.
:type quantity: int
"""
self._quantity = quantity
@property
def ship_date(self) -> datetime:
"""Gets the ship_date of this Order.
:return: The ship_date of this Order.
:rtype: datetime
"""
return self._ship_date
@ship_date.setter
def ship_date(self, ship_date: datetime):
"""Sets the ship_date of this Order.
:param ship_date: The ship_date of this Order.
:type ship_date: datetime
"""
self._ship_date = ship_date
@property
def status(self) -> str:
"""Gets the status of this Order.
Order Status # noqa: E501
:return: The status of this Order.
:rtype: str
"""
return self._status
@status.setter
def status(self, status: str):
"""Sets the status of this Order.
Order Status # noqa: E501
:param status: The status of this Order.
:type status: str
"""
allowed_values = ["placed", "approved", "delivered"] # noqa: E501
if status not in allowed_values:
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}"
.format(status, allowed_values)
)
self._status = status
@property
def complete(self) -> bool:
"""Gets the complete of this Order.
:return: The complete of this Order.
:rtype: bool
"""
return self._complete
@complete.setter
def complete(self, complete: bool):
"""Sets the complete of this Order.
:param complete: The complete of this Order.
:type complete: bool
"""
self._complete = complete
| [
"[email protected]"
] | |
db53a329ef4705bdbd512dcc044c8a8a69cba074 | f6003f9f25dcc182e9fbce7a96d0dabb9341744c | /Exercícios/Lista 6 - Seção 13 - Leitura e escrita em arquivo/Questão 20 - Enunciado no código .py | 6605ae8200a5865776d234e52d03e8f1a7195514 | [] | no_license | henriquecl/Aprendendo_Python | 60a87959714f82894e996c06b0a1b767838c38fc | 672029855431795defafd7e20e8da319bf34e502 | refs/heads/master | 2023-06-08T10:48:13.667893 | 2021-06-22T00:55:14 | 2021-06-22T00:55:14 | 261,029,613 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 886 | py | """
Questão 20 - Crie um programa que receba como entrada o número de alunos. Receba em uma lista o nome dos alunos e o seg
undo contém suas notas finais. Crie um arquivo que armazene a cada linha o nome do aluno e sua nota final. O nome tem
que ter no máximo 40 caracteres. Se não tiver 40, preencher com espaço em branco
"""
alunos = []
notas = []
texto = ''
while True:
nome = input('Digite o nome do aluno, para parar digite "sair"\n')
if nome == 'sair':
break
nota = input(f'Digite a nota da(o) aluno: ')
alunos.append(nome)
notas.append(nota)
for i2 in range(len(alunos)):
if len(alunos[i2]) < 40:
alunos[i2] = alunos[i2] + ' '*(40 - len(alunos[i2]))
for i3 in range(len(alunos)):
texto = texto + alunos[i3] + 'Nota:' + notas[i3] + '\n'
with open('questao20.txt', 'w', encoding='UTF-8') as arquivo:
arquivo.write(texto)
| [
"[email protected]"
] | |
b40616b31df98d43d1b6a084948c93946dd914ac | c2dd6b06b56a4db596e196b77a072caeb53042ce | /python_modules/dagster/dagster_tests/cli_tests/test_workspace_config_schema.py | 82bc9e9410785d896141188fad8721e511b626fe | [
"Apache-2.0"
] | permissive | shasha79/dagster | ed8beac9a134dc22e3c23b0db3bb60884fe2e1de | b138d2454eb4a7f7e19a9d1763fa6c06bbb239e6 | refs/heads/master | 2022-11-11T16:59:11.144130 | 2020-07-03T16:44:19 | 2020-07-03T16:44:19 | 276,888,663 | 0 | 0 | Apache-2.0 | 2020-07-03T11:55:59 | 2020-07-03T11:55:58 | null | UTF-8 | Python | false | false | 3,371 | py | import yaml
from dagster.cli.workspace.config_schema import validate_workspace_config
def _validate_yaml_contents(yaml_contents):
return validate_workspace_config(yaml.safe_load(yaml_contents))
def test_repository_yaml_parsing():
valid_yaml_contents = '''
repository:
module: some_module
fn: a_repo
'''
assert _validate_yaml_contents(valid_yaml_contents).success
invalid_yaml_contents = '''
repository:
module: some_module
wrong: a_repo
'''
assert not _validate_yaml_contents(invalid_yaml_contents).success
def test_python_file():
terse_workspace_yaml = '''
load_from:
- python_file: a_file.py
'''
assert _validate_yaml_contents(terse_workspace_yaml).success
nested_workspace_yaml = '''
load_from:
- python_file:
relative_path: a_file.py
'''
assert _validate_yaml_contents(nested_workspace_yaml).success
nested_workspace_yaml_with_def_name = '''
load_from:
- python_file:
relative_path: a_file.py
attribute: repo_symbol
'''
assert _validate_yaml_contents(nested_workspace_yaml_with_def_name).success
nested_workspace_yaml_with_def_name_and_location = '''
load_from:
- python_file:
relative_path: a_file.py
attribute: repo_symbol
location_name: some_location
'''
assert _validate_yaml_contents(nested_workspace_yaml_with_def_name_and_location).success
def test_python_module():
terse_workspace_yaml = '''
load_from:
- python_module: a_module
'''
assert _validate_yaml_contents(terse_workspace_yaml).success
nested_workspace_yaml = '''
load_from:
- python_module:
module_name: a_module
'''
assert _validate_yaml_contents(nested_workspace_yaml).success
nested_workspace_yaml_with_def_name = '''
load_from:
- python_module:
module_name: a_module
attribute: repo_symbol
'''
assert _validate_yaml_contents(nested_workspace_yaml_with_def_name).success
nested_workspace_yaml_with_def_name_and_location = '''
load_from:
- python_module:
module_name: a_module
attribute: repo_symbol
location_name: some_location
'''
assert _validate_yaml_contents(nested_workspace_yaml_with_def_name_and_location).success
def test_cannot_do_both():
both_yaml = '''
load_from:
- python_module: a_module
python_file: a_file.py
'''
assert not _validate_yaml_contents(both_yaml).success
def test_load_both():
both_yaml = '''
load_from:
- python_module: a_module
- python_file: a_file.py
'''
assert _validate_yaml_contents(both_yaml).success
def test_load_python_environment_with_file():
python_environment_yaml_with_file = '''
load_from:
- python_environment:
executable_path: /path/to/venv/bin/python
target:
python_file: file_valid_in_that_env.py
'''
validation_result = _validate_yaml_contents(python_environment_yaml_with_file)
assert validation_result.success
def test_load_python_environment_with_module():
python_environment_yaml_with_module = '''
load_from:
- python_environment:
executable_path: /path/to/venv/bin/python
target:
python_module: module_valid_in_that_env.py
'''
validation_result = _validate_yaml_contents(python_environment_yaml_with_module)
assert validation_result.success
| [
"[email protected]"
] | |
689df46f7b895f0325dd2aa9407ac9eaec2f709b | a140fe192fd643ce556fa34bf2f84ddbdb97f091 | /.history/다양한 출력포맷_20200705141045.py | b9bbcf137e8d3e41db97353267569be16ddbb422 | [] | no_license | sangha0719/py-practice | 826f13cb422ef43992a69f822b9f04c2cb6d4815 | 6d71ce64bf91cc3bccee81378577d84ba9d9c121 | refs/heads/master | 2023-03-13T04:40:55.883279 | 2021-02-25T12:02:04 | 2021-02-25T12:02:04 | 342,230,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 720 | py | # 빈 자리는 빈공간을 두고, 오른쪽 정렬을 하되, 총 10자리 공간을 확보
print("{0: >10}".format(500))
# 양수일 때는 +로 표시, 음수일 땐 -로 표시
print("{0: >+10}".format(500))
print("{0: >+10}".format(-500))
# 왼쪽 정렬하고, 빈칸을 _로 채움
print("{0:_<+10}".format(500))
# 3자리 마다 콤마 찍어주기
print("{0:,}".format(100000000000))
# 3자리 마다 콤마 찍어주기, +- 부호도 붙이기
print("{0:+,}".format(100000000000))
print("{0:+,}".format(-100000000000))
# 3자리 마다 콤마 찍어주기, 부호도 붙이고, 자릿수 확보하기
# 돈이 많으면 행복하니까 빈 자리는 ^ 로 채워주기
print("{0:+,}".format(100000000000))
| [
"[email protected]"
] | |
0bed65bcd8fe1c0b2dba68f4c6e191e1c0dd7d79 | 01d38140093f41e6afbd67445dbca78d9220d7fd | /linear_regression_learner.py | 05b5f705f8a1be190a199b6dcdb5c38254cfec16 | [] | no_license | boyko11/LinRegTorch | e174eb1452646b52f9c7c9161ee30f45630ef8b6 | a8f05fe67b4be3ee9e885194b5b750371b1cb5e1 | refs/heads/master | 2022-07-04T04:36:04.136287 | 2020-05-10T21:15:14 | 2020-05-10T21:15:14 | 262,623,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,021 | py | import numpy as np
import torch
class LinearRegressionLearner():
def __init__(self, theta_size, learning_rate=0.00001):
self.theta_tensor = torch.randn(theta_size, 1, requires_grad=True)
self.loss_function = torch.nn.L1Loss()
self.learning_rate = learning_rate
self.loss_history = []
def predict(self, features_tensor):
return torch.mm(features_tensor, self.theta_tensor)
def calculate_loss(self, predictions_tensor, labels_tensor):
return self.loss_function(predictions_tensor, labels_tensor)
def train(self, features_tensor, labels_tensor, epochs=1000):
for i in range(epochs):
predictions = self.predict(features_tensor)
loss = self.calculate_loss(predictions, labels_tensor)
self.loss_history.append(loss)
loss.backward()
with torch.no_grad():
self.theta_tensor -= self.theta_tensor.grad * self.learning_rate
self.theta_tensor.grad.zero_()
| [
"[email protected]"
] | |
f0db017322cd95c267fcbaddaf60360bec86248e | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/405/usersdata/284/75780/submittedfiles/exe11.py | 487a2312007a81920fb10875f75153e3da8fe793 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 748 | py | # -*- coding: utf-8 -*-
a=int(input('digite o numero: '))
if a>100000000 or a<9999999:
print('nao sei')
else:
d1=(a//(10**7))
d2=(a-(d1*(10**7)))//(10**6)
d3=(a-(d1*(10**7))-(d2*(10**6)))//(10**5)
d4=(a-(d1*(10**7))-(d2*(10**6))-(d3*(10**5)))//(10**4)
d5=(a-(d1*(10**7))-(d2*(10**6))-(d3*(10**5))-(d4*(10**4)))//(10**3)
d6=(a-(d1*(10**7))-(d2*(10**6))-(d3*(10**5))-(d4*(10**4))-(d5*(10**3)))//10**2)
d7=(a-(d1*(10**7))-(d2*(10**6))-(d3*(10**5))-(d4*(10**4))-(d5*(10**3))-(d6*(10**2)))//10**1)
d8=(a-(d1*(10**7))-(d2*(10**6))-(d3*(10**5))-(d4*(10**4))-(d5*(10**3))-(d6*(10**2))-(d7*(10**1)))//1)
print(d1)
print(d2)
print(d3)
print(d4)
print(d5)
print(d6)
print(d7)
print(d8)
| [
"[email protected]"
] | |
16a211c171b197fac8281ce6fa826d216b8534b8 | 3af6960c805e9903eb27c09d8bc7ebc77f5928fe | /problems/0173_Binary_Search_Tree_Iterator/solution.py | 7f652418a02ce3af736afa21f1de7714c75c320d | [] | no_license | romain-li/leetcode | b3c8d9d4473eebd039af16ad2d4d99abc2768bdd | 5e82b69bd041c2c168d75cb9179a8cbd7bf0173e | refs/heads/master | 2020-06-04T20:05:03.592558 | 2015-06-08T18:05:03 | 2015-06-08T18:05:03 | 27,431,664 | 2 | 1 | null | 2015-06-08T18:05:04 | 2014-12-02T12:31:58 | Python | UTF-8 | Python | false | false | 567 | py | # Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class BSTIterator:
# @param root, a binary search tree's root node
def __init__(self, root):
# @return a boolean, whether we have a next smallest number
def hasNext(self):
# @return an integer, the next smallest number
def next(self):
# Your BSTIterator will be called like this:
# i, v = BSTIterator(root), []
# while i.hasNext(): v.append(i.next()) | [
"[email protected]"
] | |
0a58cf2911a659203311090431b380b9748431c3 | 90574ae9759f98f8687cd3e842a6b4301578baea | /batbelt/utils.py | fddd41a996904a6a1cf6a42f8e2d5dc3ff6eefd7 | [
"Zlib"
] | permissive | sametmax/Bat-belt | 88a52a1d89e5dac8bfd69e26a0f106bc0520fdea | 372117e3876328f84804a296ee9636dee1e82206 | refs/heads/master | 2021-05-16T02:29:13.264688 | 2015-09-18T16:59:26 | 2015-09-18T16:59:26 | 6,219,068 | 19 | 3 | null | 2017-05-18T06:03:15 | 2012-10-14T22:55:42 | Python | UTF-8 | Python | false | false | 2,792 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
"""
The infamous utils.py module filled with functions you don't where else
to put.
"""
import sys
import os
from datetime import datetime
CLASSIC_DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S.%f'
CLASSIC_DATETIME_PATTERN = r'\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{6}'
def to_timestamp(dt):
"""
Return a timestamp for the given datetime object.
Example:
>>> import datetime
>>> to_timestamp(datetime.datetime(2000, 1, 1, 1, 1, 1, 1))
946688461
"""
return (dt - datetime(1970, 1, 1)).total_seconds()
class ImportableItems(list):
def __init__(self, *args, **kwargs):
super(ImportableItems, self).__init__(*args, **kwargs)
self.non_importable_items = {}
def append(self, item_name):
self.non_importable_items.pop(item_name, None)
super(ImportableItems, self).append(item_name)
def import_list(*args):
"""
Allow to create easily a __all__ listing for a module.
Returns a value for __all__ and a decorator to add anything
to it easily.
"""
importable_items = ImportableItems()
importable_items.non_importable_items.update(sys._getframe(1).f_globals)
for item in args:
importable_items.append(item)
def importable(func, name=None):
if name is None:
try:
name = func.__name__
except AttributeError:
raise ValueError('You must provide a name for '
'this item: %s' % repr(func))
importable_items.append(name)
return func
return importable_items, importable
def add_to_pythonpath(path, starting_point='.', insertion_index=None):
"""
Add the directory to the sys.path.
You can path an absolute or a relative path to it.
If you choose to use a relative path, it will be relative to
`starting_point` by default, which is set to '.'.
You may want to set it to something like __file__ (the basename will
be stripped, and the current file's parent directory will be used
as a starting point, which is probably what you expect in the
first place).
:example:
>>> add_to_pythonpath('../..', __file__)
"""
if not os.path.isabs(path):
if os.path.isfile(starting_point):
starting_point = os.path.dirname(starting_point)
path = os.path.join(starting_point, path)
path = os.path.realpath(os.path.expandvars(os.path.expanduser(path)))
if path not in sys.path:
if insertion_index is None:
sys.path.append(path)
else:
sys.path.insert(insertion_index, path)
| [
"[email protected]"
] | |
df404f3b8ffb691f89f96c0f76927a6c6e7be13b | c85238daac6141efea2895485b9b18fc08cf4cf2 | /PathTracking/lqr/unicycle_model.py | 6e55e37b422e9be8ec8346a25d0b4cf7a5720654 | [
"MIT"
] | permissive | RyoheiTakahashi/PythonRobotics | 6543529ff677a68792cfc0a336aa9992d163ec96 | bee232e84dedcfe0b1dc494ce86c67130b42a8c8 | refs/heads/master | 2021-01-02T09:00:24.091192 | 2017-07-31T06:50:07 | 2017-07-31T06:50:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,167 | py | #! /usr/bin/python
# -*- coding: utf-8 -*-
"""
author Atsushi Sakai
"""
import math
dt = 0.1 # [s]
L = 2.9 # [m]
class State:
def __init__(self, x=0.0, y=0.0, yaw=0.0, v=0.0):
self.x = x
self.y = y
self.yaw = yaw
self.v = v
def update(state, a, delta):
state.x = state.x + state.v * math.cos(state.yaw) * dt
state.y = state.y + state.v * math.sin(state.yaw) * dt
state.yaw = state.yaw + state.v / L * math.tan(delta) * dt
state.v = state.v + a * dt
return state
if __name__ == '__main__':
print("start unicycle simulation")
import matplotlib.pyplot as plt
T = 100
a = [1.0] * T
delta = [math.radians(1.0)] * T
# print(delta)
# print(a, delta)
state = State()
x = []
y = []
yaw = []
v = []
for (ai, di) in zip(a, delta):
state = update(state, ai, di)
x.append(state.x)
y.append(state.y)
yaw.append(state.yaw)
v.append(state.v)
flg, ax = plt.subplots(1)
plt.plot(x, y)
plt.axis("equal")
plt.grid(True)
flg, ax = plt.subplots(1)
plt.plot(v)
plt.grid(True)
plt.show()
| [
"[email protected]"
] | |
75f0cc9ced2b0b3b7f516afcd8b9a89bfbef64ea | d4a8bedc9c1c9897b3e07a3c9067f7c89c63be5f | /python_builtins/sort_by_size.py | cf8b0e967cb9cab9f931467b0f72881a53f9bdbd | [] | no_license | udoyen/python_refresher | 816fe6264dbc21ce7a3697eb0f96aa8f9f402032 | 8e542d8e6221b041fc945f5770f25d5da03d8f6a | refs/heads/master | 2023-05-25T19:51:55.938397 | 2019-11-19T17:22:30 | 2019-11-19T17:22:30 | 222,283,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | from .sort_on_length import lensort
n = [1, 2, 3, 5, 6, 4, 10, 11, 12, 13, 14]
s = ['python', 'perl', 'java', 'c', 'haskell', 'ruby']
def sort_by_size(n):
state = False
for i in n:
if isinstance(i, str):
state = True
if state:
lensort(n)
else:
for i in range(len(n)):
for j in range(len(n)):
pos1 = n[j]
pos2 = n[j + 1 if j != (len(n) - 1) else j]
if n[j] > n[j + 1 if j != (len(n) - 1) else j]:
n[j] = pos2
n[j + 1 if j != (len(n) - 1) else j] = pos1
print(n)
if __name__ == '__name__' and __package__ is None:
from os import sys, path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
sort_by_size(n) | [
"[email protected]"
] | |
9538737de1675e3a3124e5e10f51eac9c9db50e4 | 0a34747e5a9b94fbb12baebfa2452d15abebcc26 | /191212/venv/Scripts/pip-script.py | 9745a77b688257eee314c2e5d033bb4e9d461c36 | [] | no_license | GyuReeKim/PycharmProjects_winter-vacation | 51772407f19a6f2a9e308be884d80b787aa006d0 | 1cc9e66ea5819a1b2ea5fdbda0257a2314e3ec95 | refs/heads/master | 2020-10-01T08:56:38.413469 | 2019-12-22T23:58:59 | 2019-12-22T23:58:59 | 226,757,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | #!C:\Users\student\PycharmProjects\191212\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"[email protected]"
] | |
05297da59c8dfa9824b8284de359353b5ce477a1 | d926c87644a2861d3c4edf36c1e1692acec38fb5 | /src/api/forms.py | fca77e5fde7cb8423ea759b5d0b4c6fcd89f62d6 | [] | no_license | mainhith97/AG-BE | 1058aee4bce0888da07a30fa44802115851b5e6e | 908b6e542a3219a233417f8f061241985265c777 | refs/heads/master | 2023-01-09T02:54:12.888094 | 2019-12-12T04:09:03 | 2019-12-12T04:09:03 | 213,387,121 | 0 | 0 | null | 2022-12-27T15:36:51 | 2019-10-07T13:13:13 | Python | UTF-8 | Python | false | false | 1,265 | py | #!/usr/bin/env python
from django import forms
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import ReadOnlyPasswordHashField
User = get_user_model()
class UserAdminChangeForm(forms.ModelForm):
password = ReadOnlyPasswordHashField()
class Meta:
model = User
fields = ('username', 'password', 'active', 'admin')
def clean_password(self):
return self.initial["password"]
class UserAdminCreationForm(forms.ModelForm):
password1 = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(label='Password confirmation', widget=forms.PasswordInput)
class Meta:
model = User
fields = ('username', 'admin', 'staff')
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords don't match")
return password2
def save(self, commit=True):
user = super(UserAdminCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
| [
"[email protected]"
] | |
3549f90d7b101b2118ff612c4fe503a21f99d31b | 238702bd275d9a8171c6e479c6ad1bbaa1ee3158 | /app/blog/models.py | 4dec50a6791965d9f31409b8992f01e302a8a1d8 | [] | no_license | maro99/djangogirls_pt_for_test | aebf03672741786ea74686efac21303961bf3987 | 4400aed75640273d8adbe7b1592607ccb23b42c2 | refs/heads/master | 2020-03-19T17:46:27.767327 | 2018-06-10T15:02:17 | 2018-06-10T15:02:17 | 136,775,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 566 | py | from django.conf import settings
from django.db import models
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE)
title = models.CharField(max_length=200)
text = models.TextField(blank=True)
created_date = models.DateTimeField(default=timezone.now)
published_date = models.DateTimeField(blank=True, null=True)
def publish(self):
self.published_date=timezone.now()
self.save()
def __str__(self):
return self.title
| [
"[email protected]"
] | |
78d0879d94181fd01651e04f648a10bce0e425af | 7e01c039f2427d434a4ef44a1b9dc0ea21db65ba | /venv/lib/python3.8/site-packages/django/db/models/indexes.py | 972bc52b46db22cea0eb15a3e5064bdecbd5302e | [] | no_license | dmfranz/Spike-exercise | 09f8051163d2a63dfbc3f75da2de0a1bbbbb122d | 83971e95a72d504f629778fece2cdfb953e5d08b | refs/heads/main | 2023-08-23T04:18:43.934471 | 2021-10-11T04:54:28 | 2021-10-11T04:54:28 | 413,568,735 | 0 | 1 | null | 2021-10-11T04:36:22 | 2021-10-04T20:10:01 | Python | UTF-8 | Python | false | false | 11,548 | py | from django.db.backends.utils import names_digest, split_identifier
from django.db.models.expressions import Col, ExpressionList, F, Func, OrderBy
from django.db.models.functions import Collate
from django.db.models.query_utils import Q
from django.db.models.sql import Query
from django.utils.functional import partition
__all__ = ['Index']
class Index:
suffix = 'idx'
# The max length of the name of the index (restricted to 30 for
# cross-database compatibility with Oracle)
max_name_length = 30
def __init__(
self,
*expressions,
fields=(),
name=None,
db_tablespace=None,
opclasses=(),
condition=None,
include=None,
):
if opclasses and not name:
raise ValueError('An index must be named to use opclasses.')
if not isinstance(condition, (type(None), Q)):
raise ValueError('Index.condition must be a Q instance.')
if condition and not name:
raise ValueError('An index must be named to use condition.')
if not isinstance(fields, (list, tuple)):
raise ValueError('Index.fields must be a list or tuple.')
if not isinstance(opclasses, (list, tuple)):
raise ValueError('Index.opclasses must be a list or tuple.')
if not expressions and not fields:
raise ValueError(
'At least one field or expression is required to define an '
'index.'
)
if expressions and fields:
raise ValueError(
'Index.fields and expressions are mutually exclusive.',
)
if expressions and not name:
raise ValueError('An index must be named to use expressions.')
if expressions and opclasses:
raise ValueError(
'Index.opclasses cannot be used with expressions. Use '
'django.contrib.postgres.indexes.OpClass() instead.'
)
if opclasses and len(fields) != len(opclasses):
raise ValueError('Index.fields and Index.opclasses must have the same number of elements.')
if fields and not all(isinstance(field, str) for field in fields):
raise ValueError('Index.fields must contain only strings with field names.')
if include and not name:
raise ValueError('A covering index must be named.')
if not isinstance(include, (type(None), list, tuple)):
raise ValueError('Index.include must be a list or tuple.')
self.fields = list(fields)
# A list of 2-tuple with the field name and ordering ('' or 'DESC').
self.fields_orders = [
(field_name[1:], 'DESC') if field_name.startswith('-') else (field_name, '')
for field_name in self.fields
]
self.name = name or ''
self.db_tablespace = db_tablespace
self.opclasses = opclasses
self.condition = condition
self.include = tuple(include) if include else ()
self.expressions = tuple(
F(expression) if isinstance(expression, str) else expression
for expression in expressions
)
@property
def contains_expressions(self):
return bool(self.expressions)
def _get_condition_sql(self, model, schema_editor):
if self.condition is None:
return None
query = Query(model=model, alias_cols=False)
where = query.build_where(self.condition)
compiler = query.get_compiler(connection=schema_editor.connection)
sql, params = where.as_sql(compiler, schema_editor.connection)
return sql % tuple(schema_editor.quote_value(p) for p in params)
def create_sql(self, model, schema_editor, using='', **kwargs):
include = [model._meta.get_field(field_name).column for field_name in self.include]
condition = self._get_condition_sql(model, schema_editor)
if self.expressions:
index_expressions = []
for expression in self.expressions:
index_expression = IndexExpression(expression)
index_expression.set_wrapper_classes(schema_editor.connection)
index_expressions.append(index_expression)
expressions = ExpressionList(*index_expressions).resolve_expression(
Query(model, alias_cols=False),
)
fields = None
col_suffixes = None
else:
fields = [
model._meta.get_field(field_name)
for field_name, _ in self.fields_orders
]
col_suffixes = [order[1] for order in self.fields_orders]
expressions = None
return schema_editor._create_index_sql(
model, fields=fields, name=self.name, using=using,
db_tablespace=self.db_tablespace, col_suffixes=col_suffixes,
opclasses=self.opclasses, condition=condition, include=include,
expressions=expressions, **kwargs,
)
def remove_sql(self, model, schema_editor, **kwargs):
return schema_editor._delete_index_sql(model, self.name, **kwargs)
def deconstruct(self):
path = '%s.%s' % (self.__class__.__module__, self.__class__.__name__)
path = path.replace('django.db.models.indexes', 'django.db.models')
kwargs = {'name': self.name}
if self.fields:
kwargs['fields'] = self.fields
if self.db_tablespace is not None:
kwargs['db_tablespace'] = self.db_tablespace
if self.opclasses:
kwargs['opclasses'] = self.opclasses
if self.condition:
kwargs['condition'] = self.condition
if self.include:
kwargs['include'] = self.include
return (path, self.expressions, kwargs)
def clone(self):
"""Create a copy of this Index."""
_, args, kwargs = self.deconstruct()
return self.__class__(*args, **kwargs)
def set_name_with_model(self, model):
"""
Generate a unique name for the index.
The name is divided into 3 parts - table name (12 chars), field name
(8 chars) and unique hash + suffix (10 chars). Each part is made to
fit its size by truncating the excess length.
"""
_, table_name = split_identifier(model._meta.db_table)
column_names = [model._meta.get_field(field_name).column for field_name, order in self.fields_orders]
column_names_with_order = [
(('-%s' if order else '%s') % column_name)
for column_name, (field_name, order) in zip(column_names, self.fields_orders)
]
# The length of the parts of the name is based on the default max
# length of 30 characters.
hash_data = [table_name] + column_names_with_order + [self.suffix]
self.name = '%s_%s_%s' % (
table_name[:11],
column_names[0][:7],
'%s_%s' % (names_digest(*hash_data, length=6), self.suffix),
)
assert len(self.name) <= self.max_name_length, (
'Index too long for multiple database support. Is self.suffix '
'longer than 3 characters?'
)
if self.name[0] == '_' or self.name[0].isdigit():
self.name = 'D%s' % self.name[1:]
def __repr__(self):
return '<%s:%s%s%s%s%s>' % (
self.__class__.__name__,
'' if not self.fields else " fields='%s'" % ', '.join(self.fields),
'' if not self.expressions else " expressions='%s'" % ', '.join([
str(expression) for expression in self.expressions
]),
'' if self.condition is None else ' condition=%s' % self.condition,
'' if not self.include else " include='%s'" % ', '.join(self.include),
'' if not self.opclasses else " opclasses='%s'" % ', '.join(self.opclasses),
)
def __eq__(self, other):
if self.__class__ == other.__class__:
return self.deconstruct() == other.deconstruct()
return NotImplemented
class IndexExpression(Func):
"""Order and wrap expressions for CREATE INDEX statements."""
template = '%(expressions)s'
wrapper_classes = (OrderBy, Collate)
def set_wrapper_classes(self, connection=None):
# Some databases (e.g. MySQL) treats COLLATE as an indexed expression.
if connection and connection.features.collate_as_index_expression:
self.wrapper_classes = tuple([
wrapper_cls
for wrapper_cls in self.wrapper_classes
if wrapper_cls is not Collate
])
@classmethod
def register_wrappers(cls, *wrapper_classes):
cls.wrapper_classes = wrapper_classes
def resolve_expression(
self,
query=None,
allow_joins=True,
reuse=None,
summarize=False,
for_save=False,
):
expressions = list(self.flatten())
# Split expressions and wrappers.
index_expressions, wrappers = partition(
lambda e: isinstance(e, self.wrapper_classes),
expressions,
)
wrapper_types = [type(wrapper) for wrapper in wrappers]
if len(wrapper_types) != len(set(wrapper_types)):
raise ValueError(
"Multiple references to %s can't be used in an indexed "
"expression." % ', '.join([
wrapper_cls.__qualname__ for wrapper_cls in self.wrapper_classes
])
)
if expressions[1:len(wrappers) + 1] != wrappers:
raise ValueError(
'%s must be topmost expressions in an indexed expression.'
% ', '.join([
wrapper_cls.__qualname__ for wrapper_cls in self.wrapper_classes
])
)
# Wrap expressions in parentheses if they are not column references.
root_expression = index_expressions[1]
resolve_root_expression = root_expression.resolve_expression(
query,
allow_joins,
reuse,
summarize,
for_save,
)
if not isinstance(resolve_root_expression, Col):
root_expression = Func(root_expression, template='(%(expressions)s)')
if wrappers:
# Order wrappers and set their expressions.
wrappers = sorted(
wrappers,
key=lambda w: self.wrapper_classes.index(type(w)),
)
wrappers = [wrapper.copy() for wrapper in wrappers]
for i, wrapper in enumerate(wrappers[:-1]):
wrapper.set_source_expressions([wrappers[i + 1]])
# Set the root expression on the deepest wrapper.
wrappers[-1].set_source_expressions([root_expression])
self.set_source_expressions([wrappers[0]])
else:
# Use the root expression, if there are no wrappers.
self.set_source_expressions([root_expression])
return super().resolve_expression(query, allow_joins, reuse, summarize, for_save)
def as_sqlite(self, compiler, connection, **extra_context):
# Casting to numeric is unnecessary.
return self.as_sql(compiler, connection, **extra_context)
| [
"[email protected]"
] | |
a466328f675d05531ab0713a6dafb6c78b0f3ff7 | c6ae1b415c5de367677dceaecd5cd983365609e0 | /Atividades/Roteiro 5 - While/Programas/Roteiro 5 Questão 7.py | 0e3f5b5b3eb25ddead15c1c27823d585c58d4985 | [] | no_license | JardelBrandon/Algoritmos_e_Programacao | 8f00548ed9706cfeb3ad2b2ac6db0b9d2eb0f13c | 66784a567d85cf340d50400a14ea6915779a1304 | refs/heads/master | 2021-07-11T14:36:09.260443 | 2017-10-12T20:55:41 | 2017-10-12T20:55:41 | 106,738,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 831 | py | #7. Escreva um programa que leia números inteiros da entrada padrão até que seja informado um
#número negativo ou par.
while True :
num = int (input ("Digite um número inteiro aleatório :"))
print (num)
if num < 0 :
print(num, "Digito negativo, fim !")
break
if num % 2 == 0 :
print(num, "Digito par, fim !")
break
# O Algoritmo do programa realiza os seguintes comandos :
# Entra em um laço de repetição afirmando que o comando while (Enquanto) é True (Verdadeiro)
# Define a condição por meio de uma comparação matemática
# Imprime na tela os números de acordo com a condição imposta
# Quando uma das condições não são atendidas
# O programa imprime o número digitado acrescido da mensagem entre aspas e finaliza
# Atendendo o que se pede na questão | [
"[email protected]"
] | |
65a3424eb666d1b95d64e170a548eba3ee06e8ea | 1bd3076902117867ec048210905195ba2aaaaa6b | /exercise/leetcode/python_src/by2017_Sep/Leet081.py | 031e578b7c55dcfc5b24e1ee98786a96056655d6 | [] | no_license | SS4G/AlgorithmTraining | d75987929f1f86cd5735bc146e86b76c7747a1ab | 7a1c3aba65f338f6e11afd2864dabd2b26142b6c | refs/heads/master | 2021-01-17T20:54:31.120884 | 2020-06-03T15:04:10 | 2020-06-03T15:04:10 | 84,150,587 | 2 | 0 | null | 2017-10-19T11:50:38 | 2017-03-07T03:33:04 | Python | UTF-8 | Python | false | false | 196 | py | class Solution(object):
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: bool
"""
return target in set(nums)
| [
"[email protected]"
] | |
4c05e2be63b355c4b1e20763ff2b62cee65c0990 | 2ef5b78a1a750ee33d86f36bba176796163e3933 | /demo5/forms.py | 4f222179f56bc614355a243b4e1381dc87a68970 | [] | no_license | LIZEJU/flask-demo | 08f8419757dc4902239b89b3df9ea71ce918ad26 | 6ae201e3cc078b7a3fd18fb6d114b0b83f1b4b41 | refs/heads/master | 2020-09-27T05:58:09.742198 | 2020-01-30T11:04:41 | 2020-01-30T11:04:41 | 226,445,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,203 | py | #encoding:utf-8
# 引入form基类
from flask_wtf import Form
# 引入form元素父类
from wtforms import StringField , PasswordField ,FileField
# 引入form验证父类
from wtforms.validators import DataRequired,Length
from wtforms.validators import InputRequired
from flask_wtf.file import FileRequired , FileAllowed
from flask_wtf.form import FlaskForm
# 登录表单类,继承form类
class BaseLogin(Form):
# 用户名
name = StringField('name',validators=[DataRequired(message='用户名不能为空'),Length(6,16,message='长度位于6-16之间')],render_kw={'placeholder':'输入用户名'})
password = PasswordField('password',validators=[DataRequired(message='密码不能为空'),Length(6,16,message='长度位于6-16之间')],render_kw={'placeholder':'输入密码'})
from wtforms import Form, FileField, StringField
from wtforms.validators import InputRequired
from flask_wtf.file import FileRequired, FileAllowed
class UploadForm(Form):
file = FileField(validators=[FileRequired(), # FileRequired必须上传
FileAllowed(['jpg', 'png', 'gif']) # FileAllowed:必须为指定的格式的文件
])
| [
"[email protected]"
] | |
2f6d03026c09a7ce173a67c3e1969dc363abb6b6 | e6bc1f55371786dad70313eb468a3ccf6000edaf | /Extras/matrix/Correct/s030.py | c58235fbf2b318bd0371e500ca863583fa7ce813 | [] | no_license | prateksha/Source-Code-Similarity-Measurement | 9da92e3b22c372ed6ea54d8b6ab2c5921e8c41c0 | fb371b837917794d260a219a1ca09c46a5b15962 | refs/heads/master | 2023-01-04T07:49:25.138827 | 2020-10-25T14:43:57 | 2020-10-25T14:43:57 | 285,744,963 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,303 | py | #!/usr/bin/python3
# MatrixError
# Your code - begin
class MatrixError(Exception):
# Your code - end
class Matrix:
def __init__(self, m):
# check if m is empty
if (len(m)==0):
raise:
except :
"List Empty"
# check if all rows are lists
# check if all rows are non-empty lists
# check if all rows are of the same length
# create matrix attribute using deep copy
# method matrix - to return the matrix through deep copy
# method dimensions - to return the dimensions, i.e. number of rows and number of columns as a tuple
# method add - to add two matrices
def add(self, m):
# your code here
# method multiply - to multiply two matrices
def multiply(self, m):
# your code here
# method transpose - to find the matrix transpose
def transpose(self):
# your code here
# static method to carry out deep copy of lists
# your code here to declare this method as static
def deep_copy(m):
# your code here
def __str__(self):
return str(self.matrix())
if __name__ == "__main__":
m1 = Matrix([[1, 2, 3], [3, 4, 5]])
m2 = Matrix([[10, 20, 30], [30, 40, 50]])
print("sum1 = ", str(m1.add(m2)))
print("sum2 = ", str(m2.add(m1)))
print("product1 = ", m1.multiply(m2))
| [
"[email protected]"
] | |
f59576e84f7ea29af8679e9e3cc8f1dd93b936f5 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/minWindow_20200618160003.py | 227eef656edcd857940c47b680c4c4aa56bf8fce | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 614 | py | # for this question we have two pointers
# left pointer
# right pointer
# we move the right pointer and maintain the position of the left pointer
# when we find the word we move the left pointer
# store that word its shortest form
# we keep moving the right pointer
# sz,azjskfzts
def minWindow(str1,str2):
left = 0
right = 0
j = 0
word = set(str1)
print(word)
while left < len(str2) and right < len(str2) and j <len(str1):
if str[j] in str2[left:right]:
minWindow("sz","azjskfzts")
| [
"[email protected]"
] | |
a422adb8377e27145052c403040b4814541e9b36 | f60b964dc39ba54bb84f1c4949be3b91a92b8346 | /order_api/views.py | e3feee2c73e66f7674f853b4df1efd45d9a9edbb | [
"Apache-2.0"
] | permissive | jiejiang/courier | 4b0b4fc56c5510228ffcc4de51b074c7aff9502f | 6fdeaf041c77dba0f97e206adb7b0cded9674d3d | refs/heads/master | 2022-11-30T14:24:53.950502 | 2019-12-06T16:42:00 | 2019-12-06T16:42:00 | 195,387,643 | 0 | 0 | Apache-2.0 | 2022-11-22T01:22:33 | 2019-07-05T10:08:19 | Python | UTF-8 | Python | false | false | 14,754 | py | # *- coding: utf-8 -*
import urllib, PyPDF2, sys, os, json
from cStringIO import StringIO
from wsgiref.util import FileWrapper
from decimal import Decimal
from django.conf import settings
from django.db import transaction
from django.http.response import StreamingHttpResponse
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.shortcuts import render
from issue_order.models import Profile
from rest_framework import viewsets, permissions
from order_api.models import Product, Request, PackageItem, Package, Route
from order_api.serializers import ProductSerializer, RequestSerializer, PackageSerializer, WaybillSerializer, \
RouteSerializer
from order_api.permissions import IsApiUser, IsPackageOwner, IsRequestOwner
from rest_framework.exceptions import APIException, status, NotFound, ParseError, ValidationError
from rest_framework.generics import get_object_or_404
from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
from issue_order.courier_systems import list_cities, get_user_prices, calculate_package_price
from order_api.tasks import get_system_config
from track_order.tracking import query_tracking_info, DBError
class Pagination(PageNumberPagination):
page_size = 10
page_size_query_param = 'page_size'
max_page_size = 100
class URLConfMixin(object):
def dispatch(self, request, *args, **kwargs):
request.urlconf = "order_api.urls"
return super(URLConfMixin, self).dispatch(request, *args, **kwargs)
class ProductViewSet(URLConfMixin,
viewsets.mixins.ListModelMixin,
viewsets.mixins.RetrieveModelMixin,
viewsets.GenericViewSet):
queryset = Product.objects.exclude(is_enabled=False).order_by('name')
serializer_class = ProductSerializer
permission_classes = (permissions.IsAuthenticated, IsApiUser,)
lookup_field = "barcode"
lookup_value_regex = r"[\w\-]+"
class RouteViewSet(URLConfMixin,
viewsets.mixins.ListModelMixin,
viewsets.mixins.RetrieveModelMixin,
viewsets.GenericViewSet):
queryset = Route.objects.exclude(is_enabled=False).order_by('name')
serializer_class = RouteSerializer
permission_classes = (permissions.IsAuthenticated, IsApiUser,)
lookup_field = 'code'
lookup_value_regex = r"[\w\-]+"
class RequestViewSet(URLConfMixin,
viewsets.mixins.CreateModelMixin,
viewsets.mixins.RetrieveModelMixin,
viewsets.mixins.ListModelMixin,
viewsets.GenericViewSet):
serializer_class = RequestSerializer
permission_classes = (permissions.IsAuthenticated, IsApiUser, IsRequestOwner)
lookup_field = "request_no"
lookup_value_regex = r"[\w\-]+"
pagination_class = Pagination
def get_queryset(self):
return Request.objects.filter(owner=self.request.user).order_by('-id')
def perform_create(self, serializer):
system = settings.API_SYSTEM
try:
data = serializer.validated_data
prices = get_user_prices(self.request.user, system)
if not prices:
raise APIException(_(u"价格未配置"), code=status.HTTP_500_INTERNAL_SERVER_ERROR)
with transaction.atomic():
packages = data.pop('packages')
route_code = data.get('route', {}).get('code', None)
route = get_object_or_404(Route, code=route_code) if route_code else None
if route_code and (not route or not route.is_enabled):
raise APIException(_(u"route不可用:%s" % route_code), code=status.HTTP_500_INTERNAL_SERVER_ERROR)
serializer.save(owner=self.request.user, test_mode=self.request.user.api_profile.api_test_mode,
route=route)
for i, _package in enumerate(packages):
packageitem_set = _package.pop('packageitem_set')
package = Package.objects.create(request=serializer.instance, **_package)
for j, item in enumerate(packageitem_set):
if not 'product' in item:
raise ValidationError, _(u"第%d个包裹第%d个产品信息无效" % (i+1, j+1))
if 'barcode' in item['product']:
product = get_object_or_404(Product, barcode=item['product']['barcode'])
elif 'name' in item['product']:
product = get_object_or_404(Product, name=item['product']['name'])
else:
raise ValidationError, _(u"第%d个包裹第%d个产品barcode或者name信息无效" % (i+1, j+1))
PackageItem.objects.create(product=product, package=package, count=item['count'])
try:
package.cost = calculate_package_price(prices, package)
except Exception, inst:
raise APIException(detail=_(u"第%d个包裹异常: %s" % (i+1, inst.message)))
package.save()
total_cost = serializer.instance.total_cost
if total_cost < 0:
raise APIException, _(u"无效订单费用£%.2f" % total_cost)
#charge if not in test mode
if not serializer.instance.test_mode and total_cost > 0:
if self.request.user.profile.credit < total_cost:
raise APIException, _(u"余额(£%.2f)不足订单费用£%.2f" % (self.request.user.profile.credit, total_cost))
profile = Profile.objects.select_for_update().get(user=self.request.user)
if profile.credit < total_cost:
raise APIException, _(u"余额(£%.2f)不足订单费用£%.2f" % (profile.credit, total_cost))
profile.credit -= total_cost
profile.save()
serializer.instance.request_no = "".join(
(getattr(settings, "REQUEST_ID_PREFIX", ""),
timezone.localtime(serializer.instance.creation_date).strftime('%Y%m%d'),
str(serializer.instance.id).zfill(8))
)
serializer.instance.save()
except Exception, inst:
if not isinstance(inst, APIException):
import traceback
traceback.print_exc(sys.stderr)
raise APIException(detail={'detail': 'Server Internal Error'},
code=status.HTTP_500_INTERNAL_SERVER_ERROR)
raise inst
class PackageViewSet(URLConfMixin,
viewsets.ReadOnlyModelViewSet):
serializer_class = PackageSerializer
permission_classes = (permissions.IsAuthenticated, IsApiUser, IsPackageOwner)
lookup_field = "slug"
lookup_value_regex = r"[\w\-]+"
pagination_class = Pagination
def get_queryset(self):
return Package.objects.filter(request__owner=self.request.user).exclude(tracking_no__isnull=True).order_by('-id')
class WaybillViewSet(URLConfMixin,
viewsets.ReadOnlyModelViewSet):
serializer_class = WaybillSerializer
permission_classes = (permissions.IsAuthenticated, IsApiUser, IsPackageOwner)
lookup_field = "slug"
lookup_value_regex = r"[\w\-]+"
pagination_class = Pagination
def get_queryset(self):
return Package.objects.filter(request__owner=self.request.user).exclude(tracking_no__isnull=True).order_by('-id')
def retrieve(self, request, *args, **kwargs):
try:
instance = self.get_object()
except Exception, inst:
raise inst
response = StreamingHttpResponse(FileWrapper(instance.waybill_file, 8192), content_type='application/octet-stream')
response['Content-Length'] = instance.waybill_file.size
response['Content-Disposition'] = "attachment; filename=%s" % urllib.quote(instance.slug + '.pdf')
return response
class RequestWaybillViewSet(URLConfMixin,
viewsets.mixins.ListModelMixin,
viewsets.mixins.RetrieveModelMixin,
viewsets.GenericViewSet):
serializer_class = RequestSerializer
permission_classes = (permissions.IsAuthenticated, IsApiUser, IsRequestOwner)
lookup_field = "request_no"
lookup_value_regex = r"[\w\-]+"
def get_queryset(self):
return Request.objects.filter(owner=self.request.user).order_by('-id')
def list(self, request, *args, **kwargs):
raise NotFound()
def retrieve(self, request, *args, **kwargs):
try:
instance = self.get_object()
except Exception, inst:
raise inst
merger = PyPDF2.PdfFileMerger()
for package in instance.packages.order_by('id'):
merger.append(PyPDF2.PdfFileReader(package.waybill_file.file))
merger_data = StringIO()
merger.write(merger_data)
file_size = merger_data.tell()
merger_data.seek(0)
response = StreamingHttpResponse(FileWrapper(merger_data, 8192), content_type='application/octet-stream')
response['Content-Length'] = file_size
response['Content-Disposition'] = "attachment; filename=%s" % urllib.quote(instance.request_no + '.pdf')
return response
class CityViewSet(viewsets.mixins.ListModelMixin,
viewsets.ViewSet):
permission_classes = (permissions.IsAuthenticated, IsApiUser,)
def list(self, request, *args, **kwargs):
try:
# system_config = get_system_config("yunda")
# return Response(
# list_cities(system_config['url_base'], system_config['user_name'], system_config['password']).json())
cities_json = os.path.join(os.path.dirname(os.path.realpath(__file__)), "json", "cities.json")
with open(cities_json) as f:
data = json.load(f)
return Response(data)
except Exception, inst:
raise APIException(
detail={'detail': 'City list error!'},
code=status.HTTP_500_INTERNAL_SERVER_ERROR)
class TrackingViewSet(URLConfMixin,
viewsets.mixins.ListModelMixin,
viewsets.mixins.RetrieveModelMixin,
viewsets.ViewSet):
permission_classes = (permissions.IsAuthenticated, IsApiUser,)
def list(self, request, *args, **kwargs):
raise NotFound()
def retrieve(self, request, *args, **kwargs):
if not 'pk' in kwargs:
raise NotFound()
if not Package.objects.exclude(request__test_mode=True)\
.filter(tracking_no=kwargs['pk'], request__owner=request.user).first():
if Package.objects.exclude(request__test_mode=False)\
.filter(tracking_no=kwargs['pk'], request__owner=request.user).first():
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "json",
"tracking_sample.json")) as f:
data = json.load(f)
data['timestamp'] = timezone.now()
data['tracking_no'] = kwargs['pk']
return Response(data)
raise NotFound()
order_number = kwargs['pk']
try:
now = timezone.now()
record, agent_items = query_tracking_info(order_number, now)
pre_agent_items = []
if record.creation_time and record.creation_time < now:
pre_agent_items.append({
'time': record.creation_time,
'detail':
_(u"英国包裹信息与面单已生成") if record.route == 'yunda'
else _(u"【英国】包裹信息与面单已生成")
})
if record.departure_time and record.departure_time < now:
pre_agent_items.append({
'time': record.departure_time,
'detail':
_(u"英国离开处理中心发往中国广州") if record.route == 'yunda'
else _(u"【英国】离开处理中心发往中国") if record.route == 'xian'
else _(u"【英国】离开处理中心发往中国广州")
})
if record.landed_time and record.landed_time < now:
pre_agent_items.append({
'time': record.landed_time,
'detail':
_(u"广州市到达广州白云机场 运往海关申报清关") if record.route == 'yunda'
else _(u"【西安市】到达西安咸阳国际机场 进行转关") if record.route == 'xian'
else _(u"【广州市】到达广州白云机场 运往海关申报清关")
})
if record.custom_clearance_time and record.custom_clearance_time < now:
pre_agent_items.append({
'time': record.custom_clearance_time,
'detail':
_(u"江门市到达海关快件处理中心 进行清关") if record.route == 'yunda'
else _(u"【西安市】到达海关快件处理中心 进行申报清关") if record.route == 'xian'
else _(u"【江门市】到达海关快件处理中心 进行清关")
})
data = {
'timestamp': now,
'tracking_no': order_number,
'progress': pre_agent_items + list(agent_items),
'delivered': record.status == record.Status.ARRIVED
}
return Response(data)
except DBError, inst:
raise APIException(detail=u"系统繁忙, 请稍后再试!", code=status.HTTP_503_SERVICE_UNAVAILABLE)
except NotFound, inst:
raise inst
except Exception, inst:
import traceback
traceback.print_exc(sys.stderr)
raise APIException(detail=inst.message, code=status.HTTP_500_INTERNAL_SERVER_ERROR)
class AccountViewSet(viewsets.mixins.ListModelMixin,
viewsets.ViewSet):
permission_classes = (permissions.IsAuthenticated, IsApiUser,)
CENTS = Decimal('0.01')
def list(self, request, *args, **kwargs):
return Response({
'username':request.user.username,
'credit':request.user.profile.credit.quantize(self.CENTS),
'currency': 'GBP'
})
| [
"[email protected]"
] | |
b96ac1eb8024992f0b224485416d49af4bfde378 | 8da91c26d423bacbeee1163ac7e969904c7e4338 | /pyvisdk/do/host_virtual_nic_spec.py | c946f61307dcc38838e49156a3cc38c9e730a3f0 | [] | no_license | pexip/os-python-infi-pyvisdk | 5d8f3a3858cdd61fb76485574e74ae525cdc7e25 | 1aadea0afbc306d09f6ecb9af0e683dbbf961d20 | refs/heads/master | 2023-08-28T02:40:28.789786 | 2020-07-16T04:00:53 | 2020-07-16T04:00:53 | 10,032,240 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,173 | py |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def HostVirtualNicSpec(vim, *args, **kwargs):
'''This data object type describes the VirtualNic configuration containing both
the configured properties on a VirtualNic and identification information.'''
obj = vim.client.factory.create('{urn:vim25}HostVirtualNicSpec')
# do some validation checking...
if (len(args) + len(kwargs)) < 0:
raise IndexError('Expected at least 1 arguments got: %d' % len(args))
required = [ ]
optional = [ 'distributedVirtualPort', 'ip', 'mac', 'mtu', 'portgroup', 'tsoEnabled',
'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| [
"[email protected]"
] | |
e1b1868d804ba8bd5d7700debafa0a87b49a39ef | caaf1b0754db1e676c37a6f1e58f19183754e654 | /sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2021_09_01/operations/_private_endpoint_connections_operations.py | 78db6ee6160edab569f105e1ee00ec65560d434c | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | rdomenzain/azure-sdk-for-python | 45dfb39121a0abda048c22e7309733a56259f525 | 58984255aeb904346b6958c5ba742749a2cc7d1b | refs/heads/master | 2023-07-07T06:53:12.967120 | 2023-07-04T16:27:37 | 2023-07-04T16:27:37 | 258,050,134 | 0 | 0 | MIT | 2020-04-23T00:12:14 | 2020-04-23T00:12:13 | null | UTF-8 | Python | false | false | 28,374 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-09-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str", min_length=1),
"resourceName": _SERIALIZER.url(
"resource_name",
resource_name,
"str",
max_length=63,
min_length=1,
pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_request(
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-09-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str", min_length=1),
"resourceName": _SERIALIZER.url(
"resource_name",
resource_name,
"str",
max_length=63,
min_length=1,
pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
),
"privateEndpointConnectionName": _SERIALIZER.url(
"private_endpoint_connection_name", private_endpoint_connection_name, "str"
),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_update_request(
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-09-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str", min_length=1),
"resourceName": _SERIALIZER.url(
"resource_name",
resource_name,
"str",
max_length=63,
min_length=1,
pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
),
"privateEndpointConnectionName": _SERIALIZER.url(
"private_endpoint_connection_name", private_endpoint_connection_name, "str"
),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-09-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str", min_length=1),
"resourceName": _SERIALIZER.url(
"resource_name",
resource_name,
"str",
max_length=63,
min_length=1,
pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
),
"privateEndpointConnectionName": _SERIALIZER.url(
"private_endpoint_connection_name", private_endpoint_connection_name, "str"
),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
class PrivateEndpointConnectionsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.containerservice.v2021_09_01.ContainerServiceClient`'s
:attr:`private_endpoint_connections` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version")
@distributed_trace
def list(
self, resource_group_name: str, resource_name: str, **kwargs: Any
) -> _models.PrivateEndpointConnectionListResult:
"""Gets a list of private endpoint connections in the specified managed cluster.
To learn more about private clusters, see:
https://docs.microsoft.com/azure/aks/private-clusters.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnectionListResult or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_09_01.models.PrivateEndpointConnectionListResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2021-09-01"))
cls: ClsType[_models.PrivateEndpointConnectionListResult] = kwargs.pop("cls", None)
request = build_list_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("PrivateEndpointConnectionListResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections"
}
@distributed_trace
def get(
self, resource_group_name: str, resource_name: str, private_endpoint_connection_name: str, **kwargs: Any
) -> _models.PrivateEndpointConnection:
"""Gets the specified private endpoint connection.
To learn more about private clusters, see:
https://docs.microsoft.com/azure/aks/private-clusters.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection. Required.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_09_01.models.PrivateEndpointConnection
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2021-09-01"))
cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
@overload
def update(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
parameters: _models.PrivateEndpointConnection,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.PrivateEndpointConnection:
"""Updates a private endpoint connection.
Updates a private endpoint connection.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection. Required.
:type private_endpoint_connection_name: str
:param parameters: The updated private endpoint connection. Required.
:type parameters: ~azure.mgmt.containerservice.v2021_09_01.models.PrivateEndpointConnection
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_09_01.models.PrivateEndpointConnection
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def update(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.PrivateEndpointConnection:
"""Updates a private endpoint connection.
Updates a private endpoint connection.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection. Required.
:type private_endpoint_connection_name: str
:param parameters: The updated private endpoint connection. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_09_01.models.PrivateEndpointConnection
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def update(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
parameters: Union[_models.PrivateEndpointConnection, IO],
**kwargs: Any
) -> _models.PrivateEndpointConnection:
"""Updates a private endpoint connection.
Updates a private endpoint connection.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection. Required.
:type private_endpoint_connection_name: str
:param parameters: The updated private endpoint connection. Is either a
PrivateEndpointConnection type or a IO type. Required.
:type parameters: ~azure.mgmt.containerservice.v2021_09_01.models.PrivateEndpointConnection or
IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_09_01.models.PrivateEndpointConnection
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2021-09-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IOBase, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "PrivateEndpointConnection")
request = build_update_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, resource_name: str, private_endpoint_connection_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2021-09-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
@distributed_trace
def begin_delete(
self, resource_group_name: str, resource_name: str, private_endpoint_connection_name: str, **kwargs: Any
) -> LROPoller[None]:
"""Deletes a private endpoint connection.
Deletes a private endpoint connection.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection. Required.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2021-09-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
| [
"[email protected]"
] | |
d4b95ac7eb2b29dd8d737aed9ed3f2ef6766e308 | 67c957b74e22bb191b9941cd753642516f32f27d | /name_uniq.py | bec11f03d47fcfd69317da3b4e58ddeaa4ccd673 | [] | no_license | chenchiyuan/jobeasy | a1ddc567901234b96d69658791f280bdfca43215 | 0a9cb465e1f8b4068069330b58f418890bde407b | refs/heads/master | 2016-09-05T20:58:11.679848 | 2014-04-08T07:18:27 | 2014-04-08T07:18:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,301 | py | # -*- coding: utf-8 -*-
# __author__ = chenchiyuan
# 通过POI名来判重, 必须内网跑
from __future__ import division, unicode_literals, print_function
import requests
import json
import base64
class ExcelHelper(object):
@classmethod
def write(cls, path, data, encoding="utf-8"):
import xlwt
workbook = xlwt.Workbook(encoding)
worksheet = workbook.add_sheet('sheet1')
for i, line in enumerate(data):
for j, text in enumerate(line):
worksheet.write(i, j, label=text)
workbook.save(path)
@classmethod
def read(cls, path):
import xlrd
workbook = xlrd.open_workbook(path)
sheet = workbook.sheets()[0]
for row in range(1, sheet.nrows):
yield sheet.cell(row, 0).value, sheet.cell(row, 1).value.strip()
def get_data(url):
headers = {
"host": "place.map.baidu.com",
}
r = requests.get(url, headers=headers)
return r.content
def name_to_bid(name):
url = "http://api.map.baidu.com/?qt=s&wd=%s&rn=10&ie=utf-8&oue=1&res=api&c=131" % name
data = json.loads(get_data(url))
try:
result = data['content'][0]['primary_uid']
except Exception:
try:
hot_city = data['content'][0]['code']
except:
print(url)
raise Exception()
url = "http://api.map.baidu.com/?qt=s&wd=%s&rn=10&ie=utf-8&oue=1&res=api&c=%s" % (name, hot_city)
data = json.loads(get_data(url))
try:
result = data['content'][0]['primary_uid']
except:
print(url)
raise Exception()
return result
def call_curl(url):
import subprocess
proc = subprocess.Popen(["curl", "--header", 'Host: place.map.baidu.com', url], stdout=subprocess.PIPE)
(out, err) = proc.communicate()
return out
def get_poi(x, y):
url = "http://api.map.baidu.com/ag/coord/convert?from=5&to=2&x=%s&y=%s" % (x, y)
json_data = json.loads(get_data(url))
return base64.decodestring(json_data['x']), base64.decodestring(json_data['y'])
def gen_info(bid):
url = "http://cq01-map-place00.cq01.baidu.com:8881/1/di/0/get.json?qt=13&nc=1&uid_list=%s" % bid
json_data = json.loads(call_curl(url))
data = json_data['data'][bid]
name = data['name']
address = data['address']
phone = data['phone']
city_name = data['city_name']
x, y = get_poi(data['point_x'], data['point_y'])
return {
"name": name,
"address": address,
"phone": phone,
"point_x": x,
"point_y": y,
"city_name": city_name
}
def parse_name(name):
bid = name_to_bid(name)
return gen_info(bid)
def parse_names(path, names):
data = []
for data_id, name in names:
try:
info = parse_name(name)
line = [data_id, info['name'], info['city_name'], info['address'], info['phone'], info['point_y'], info['point_x'], ]
except Exception, err:
line = [data_id, name, "", "", "", "", ""]
data.append(line)
ExcelHelper.write(path, data)
if __name__ == "__main__":
from_path = "/Users/shadow/Desktop/imax.xlsx"
to_path = "//Users/shadow/Desktop/result.xlsx"
names = ExcelHelper.read(from_path)
parse_names(to_path, names)
| [
"[email protected]"
] | |
31404902b27fdb152d6280d2618358c91f58c56e | 6b791247919f7de90c8402abcca64b32edd7a29b | /lib/coginvasion/gags/Geyser.py | 49fb02d48689e8123aa8b4780d597b0a706f7552 | [
"Apache-2.0"
] | permissive | theclashingfritz/Cog-Invasion-Online-Dump | a9bce15c9f37b6776cecd80b309f3c9ec5b1ec36 | 2561abbacb3e2e288e06f3f04b935b5ed589c8f8 | refs/heads/master | 2021-01-04T06:44:04.295001 | 2020-02-14T05:23:01 | 2020-02-14T05:23:01 | 240,434,213 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,238 | py | # uncompyle6 version 3.2.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)]
# Embedded file name: lib.coginvasion.gags.Geyser
from lib.coginvasion.gags.SquirtGag import SquirtGag
from lib.coginvasion.gags.ChargeUpGag import ChargeUpGag
from lib.coginvasion.gags import GagGlobals
from lib.coginvasion.globals import CIGlobals
from direct.interval.IntervalGlobal import Sequence, Wait, Func, LerpScaleInterval
from direct.interval.IntervalGlobal import ActorInterval, LerpPosInterval, Parallel
from direct.interval.IntervalGlobal import SoundInterval
from panda3d.core import Point3
class Geyser(SquirtGag, ChargeUpGag):
def __init__(self):
SquirtGag.__init__(self, CIGlobals.Geyser, GagGlobals.getProp(5, 'geyser'), 105, GagGlobals.GEYSER_HIT_SFX, None, None, None, None, None, None, 1, 1)
ChargeUpGag.__init__(self, 24, 10, 50, 0.5, maxCogs=4)
self.setImage('phase_3.5/maps/geyser.png')
self.entities = []
self.timeout = 3.0
return
def start(self):
SquirtGag.start(self)
ChargeUpGag.start(self, self.avatar)
def unEquip(self):
SquirtGag.unEquip(self)
ChargeUpGag.unEquip(self)
def buildGeyser(self):
def clearNodes(entity, paths):
for i in xrange(paths.getNumPaths()):
paths[i].removeNode()
geyserWater = loader.loadModel(self.model)
waterRemoveSet = geyserWater.findAllMatches('**/hole')
waterRemoveSet.addPathsFrom(geyserWater.findAllMatches('**/shadow'))
clearNodes(geyserWater, waterRemoveSet)
geyserMound = loader.loadModel(self.model)
moundRemoveSet = geyserMound.findAllMatches('**/Splash*')
moundRemoveSet.addPathsFrom(geyserMound.findAllMatches('**/spout'))
clearNodes(geyserMound, moundRemoveSet)
entitySet = [
geyserWater, geyserMound]
self.entities.append(entitySet)
return entitySet
def removeEntity(self, entity):
for iEntity in self.entities:
if iEntity == entity:
self.entities.remove(iEntity)
def onActivate(self, ignore, cog):
self.startEntity(self.buildGeyser(), cog)
def startEntity(self, entity, cog):
geyserHold = 1.5
scaleUpPoint = Point3(1.8, 1.8, 1.8)
geyserWater = entity[0]
geyserMound = entity[1]
def showEntity(entity, cog):
entity.reparentTo(render)
entity.setPos(cog.getPos())
def __getGeyserTrack():
track = Sequence(Func(showEntity, geyserMound, cog), Func(showEntity, geyserWater, cog), LerpScaleInterval(geyserWater, 1.0, scaleUpPoint, startScale=GagGlobals.PNT3NEAR0), Wait(0.5 * geyserHold), LerpScaleInterval(geyserWater, 0.5, GagGlobals.PNT3NEAR0, startScale=scaleUpPoint), LerpScaleInterval(geyserMound, 0.5, GagGlobals.PNT3NEAR0), Func(geyserWater.removeNode), Func(geyserMound.removeNode), Func(self.removeEntity, entity))
return track
def __getCogTrack():
def handleHit():
if self.isLocal():
cog.sendUpdate('hitByGag', [self.getID()])
startPos = cog.getPos(render)
cogFloat = Point3(0, 0, 14)
cogEndPos = Point3(startPos[0] + cogFloat[0], startPos[1] + cogFloat[1], startPos[2] + cogFloat[2])
suitType = cog.suitPlan.getSuitType()
if suitType == 'A':
startFlailFrame = 16
endFlailFrame = 16
else:
startFlailFrame = 15
endFlailFrame = 15
track = Sequence()
track.append(Func(cog.d_disableMovement))
track.append(Wait(0.5))
slipIval = Sequence(ActorInterval(cog, 'slip-backward', playRate=0.5, startFrame=0, endFrame=startFlailFrame - 1), Func(cog.pingpong, 'slip-backward', fromFrame=startFlailFrame, toFrame=endFlailFrame), Wait(0.5), Parallel(ActorInterval(cog, 'slip-backward', playRate=1.0, startFrame=endFlailFrame), Func(cog.startRay), Func(handleHit)))
slipUp = LerpPosInterval(cog, 1.1, cogEndPos, startPos=startPos, fluid=1)
slipDn = LerpPosInterval(cog, 0.6, startPos, startPos=cogEndPos, fluid=1)
geyserMotion = Sequence(slipUp, slipDn)
track.append(Parallel(slipIval, geyserMotion))
if cog.getHealth() - self.getDamage() <= 0:
track.append(Func(cog.d_enableMovement))
return track
if entity and cog:
track = Sequence()
track.append(Parallel(SoundInterval(self.hitSfx, node=self.avatar), Parallel(__getGeyserTrack(), __getCogTrack())))
track.start()
def release(self):
ChargeUpGag.release(self)
self.reset()
if self.isLocal():
base.localAvatar.sendUpdate('usedGag', [self.id])
cogs = ChargeUpGag.getSelectedCogs(self)
for cog in cogs:
if cog.getHealth() > 0:
geyser = self.buildGeyser()
self.startEntity(geyser, cog)
self.avatar.d_trapActivate(self.getID(), self.avatar.doId, 0, cog.doId) | [
"[email protected]"
] | |
5ab9f533edd3366b5f611708dc5b6eb8004b0626 | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flashblade/FB_2_3/models/bucket_s3_specific_performance.py | eb1e8397e06d6a677cb0d00173e040eaf24a5570 | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 9,447 | py | # coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.3, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_3 import models
class BucketS3SpecificPerformance(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'id': 'str',
'others_per_sec': 'float',
'read_buckets_per_sec': 'float',
'read_objects_per_sec': 'float',
'write_buckets_per_sec': 'float',
'write_objects_per_sec': 'float',
'time': 'int',
'usec_per_other_op': 'float',
'usec_per_read_bucket_op': 'float',
'usec_per_read_object_op': 'float',
'usec_per_write_bucket_op': 'float',
'usec_per_write_object_op': 'float'
}
attribute_map = {
'name': 'name',
'id': 'id',
'others_per_sec': 'others_per_sec',
'read_buckets_per_sec': 'read_buckets_per_sec',
'read_objects_per_sec': 'read_objects_per_sec',
'write_buckets_per_sec': 'write_buckets_per_sec',
'write_objects_per_sec': 'write_objects_per_sec',
'time': 'time',
'usec_per_other_op': 'usec_per_other_op',
'usec_per_read_bucket_op': 'usec_per_read_bucket_op',
'usec_per_read_object_op': 'usec_per_read_object_op',
'usec_per_write_bucket_op': 'usec_per_write_bucket_op',
'usec_per_write_object_op': 'usec_per_write_object_op'
}
required_args = {
}
def __init__(
self,
name=None, # type: str
id=None, # type: str
others_per_sec=None, # type: float
read_buckets_per_sec=None, # type: float
read_objects_per_sec=None, # type: float
write_buckets_per_sec=None, # type: float
write_objects_per_sec=None, # type: float
time=None, # type: int
usec_per_other_op=None, # type: float
usec_per_read_bucket_op=None, # type: float
usec_per_read_object_op=None, # type: float
usec_per_write_bucket_op=None, # type: float
usec_per_write_object_op=None, # type: float
):
"""
Keyword args:
name (str): Name of the object (e.g., a file system or snapshot).
id (str): A non-modifiable, globally unique ID chosen by the system.
others_per_sec (float): Other operations processed per second.
read_buckets_per_sec (float): Read buckets requests processed per second.
read_objects_per_sec (float): Read object requests processed per second.
write_buckets_per_sec (float): Write buckets requests processed per second.
write_objects_per_sec (float): Write object requests processed per second.
time (int): Sample time in milliseconds since UNIX epoch.
usec_per_other_op (float): Average time, measured in microseconds, it takes the array to process other operations.
usec_per_read_bucket_op (float): Average time, measured in microseconds, it takes the array to process a read bucket request.
usec_per_read_object_op (float): Average time, measured in microseconds, it takes the array to process a read object request.
usec_per_write_bucket_op (float): Average time, measured in microseconds, it takes the array to process a write bucket request.
usec_per_write_object_op (float): Average time, measured in microseconds, it takes the array to process a write object request.
"""
if name is not None:
self.name = name
if id is not None:
self.id = id
if others_per_sec is not None:
self.others_per_sec = others_per_sec
if read_buckets_per_sec is not None:
self.read_buckets_per_sec = read_buckets_per_sec
if read_objects_per_sec is not None:
self.read_objects_per_sec = read_objects_per_sec
if write_buckets_per_sec is not None:
self.write_buckets_per_sec = write_buckets_per_sec
if write_objects_per_sec is not None:
self.write_objects_per_sec = write_objects_per_sec
if time is not None:
self.time = time
if usec_per_other_op is not None:
self.usec_per_other_op = usec_per_other_op
if usec_per_read_bucket_op is not None:
self.usec_per_read_bucket_op = usec_per_read_bucket_op
if usec_per_read_object_op is not None:
self.usec_per_read_object_op = usec_per_read_object_op
if usec_per_write_bucket_op is not None:
self.usec_per_write_bucket_op = usec_per_write_bucket_op
if usec_per_write_object_op is not None:
self.usec_per_write_object_op = usec_per_write_object_op
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `BucketS3SpecificPerformance`".format(key))
if key == "others_per_sec" and value is not None:
if value < 0.0:
raise ValueError("Invalid value for `others_per_sec`, must be a value greater than or equal to `0.0`")
if key == "read_buckets_per_sec" and value is not None:
if value < 0.0:
raise ValueError("Invalid value for `read_buckets_per_sec`, must be a value greater than or equal to `0.0`")
if key == "read_objects_per_sec" and value is not None:
if value < 0.0:
raise ValueError("Invalid value for `read_objects_per_sec`, must be a value greater than or equal to `0.0`")
if key == "write_buckets_per_sec" and value is not None:
if value < 0.0:
raise ValueError("Invalid value for `write_buckets_per_sec`, must be a value greater than or equal to `0.0`")
if key == "write_objects_per_sec" and value is not None:
if value < 0.0:
raise ValueError("Invalid value for `write_objects_per_sec`, must be a value greater than or equal to `0.0`")
if key == "usec_per_other_op" and value is not None:
if value < 0.0:
raise ValueError("Invalid value for `usec_per_other_op`, must be a value greater than or equal to `0.0`")
if key == "usec_per_read_bucket_op" and value is not None:
if value < 0.0:
raise ValueError("Invalid value for `usec_per_read_bucket_op`, must be a value greater than or equal to `0.0`")
if key == "usec_per_read_object_op" and value is not None:
if value < 0.0:
raise ValueError("Invalid value for `usec_per_read_object_op`, must be a value greater than or equal to `0.0`")
if key == "usec_per_write_bucket_op" and value is not None:
if value < 0.0:
raise ValueError("Invalid value for `usec_per_write_bucket_op`, must be a value greater than or equal to `0.0`")
if key == "usec_per_write_object_op" and value is not None:
if value < 0.0:
raise ValueError("Invalid value for `usec_per_write_object_op`, must be a value greater than or equal to `0.0`")
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(BucketS3SpecificPerformance, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BucketS3SpecificPerformance):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
0c708aa7731e0845288916df910173d7240ce581 | 4560d7e3aa3be65ffaf102e780b44d6fab51cfd7 | /fastapi_pagination/bases.py | c2726bfa5e3f5b2dd7437896047d9906fa145ce4 | [
"MIT"
] | permissive | mathbeal/fastapi-pagination | 55235bcfb72feebdd0bad7e6bc7fcd3ba028e0bd | 485acf9862316d4ca58657fa6896a9469e419387 | refs/heads/main | 2023-03-17T22:45:19.649532 | 2021-03-01T08:07:59 | 2021-03-01T08:07:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 826 | py | from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Generic, Sequence, Type, TypeVar
from pydantic.generics import GenericModel
from typing_extensions import Protocol
if TYPE_CHECKING:
from .params import LimitOffsetPaginationParams # pragma no cover
T = TypeVar("T")
C = TypeVar("C")
class AbstractParams(Protocol):
@abstractmethod
def to_limit_offset(self) -> LimitOffsetPaginationParams:
pass # pragma: no cover
class AbstractPage(GenericModel, Generic[T], ABC):
@classmethod
@abstractmethod
def create(cls: Type[C], items: Sequence[T], total: int, params: AbstractParams) -> C:
pass # pragma: no cover
class Config:
arbitrary_types_allowed = True
__all__ = [
"AbstractPage",
"AbstractParams",
]
| [
"[email protected]"
] | |
29f89be27029aebc1b5e3b25b5f07fc2d21f16d7 | 41c605bf3a002a757cb2344cff526d7a7ae56ea9 | /plotly/graph_objs/cone/hoverlabel/__init__.py | b9d86aa282e915c730ed36932a930d79960abee1 | [
"MIT"
] | permissive | Jonathan-MW/plotly.py | 9674b90b5de11fd9089e6afefd04b57bc4587829 | 7528c00772f44dee24c0df7e15d70a4852f171a8 | refs/heads/master | 2020-05-30T06:04:13.621478 | 2019-05-31T10:34:15 | 2019-05-31T10:34:15 | 189,571,988 | 2 | 0 | MIT | 2019-05-31T09:59:53 | 2019-05-31T09:59:53 | null | UTF-8 | Python | false | false | 10,981 | py |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self['color']
@color.setter
def color(self, val):
self['color'] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['colorsrc']
@colorsrc.setter
def colorsrc(self, val):
self['colorsrc'] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
"Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open
Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New
Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self['family']
@family.setter
def family(self, val):
self['family'] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on plot.ly for family .
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['familysrc']
@familysrc.setter
def familysrc(self, val):
self['familysrc'] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self['size']
@size.setter
def size(self, val):
self['size'] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on plot.ly for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['sizesrc']
@sizesrc.setter
def sizesrc(self, val):
self['sizesrc'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'cone.hoverlabel'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.cone.hoverlabel.Font
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
Returns
-------
Font
"""
super(Font, self).__init__('font')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.cone.hoverlabel.Font
constructor must be a dict or
an instance of plotly.graph_objs.cone.hoverlabel.Font"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.cone.hoverlabel import (font as v_font)
# Initialize validators
# ---------------------
self._validators['color'] = v_font.ColorValidator()
self._validators['colorsrc'] = v_font.ColorsrcValidator()
self._validators['family'] = v_font.FamilyValidator()
self._validators['familysrc'] = v_font.FamilysrcValidator()
self._validators['size'] = v_font.SizeValidator()
self._validators['sizesrc'] = v_font.SizesrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('color', None)
self['color'] = color if color is not None else _v
_v = arg.pop('colorsrc', None)
self['colorsrc'] = colorsrc if colorsrc is not None else _v
_v = arg.pop('family', None)
self['family'] = family if family is not None else _v
_v = arg.pop('familysrc', None)
self['familysrc'] = familysrc if familysrc is not None else _v
_v = arg.pop('size', None)
self['size'] = size if size is not None else _v
_v = arg.pop('sizesrc', None)
self['sizesrc'] = sizesrc if sizesrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| [
"[email protected]"
] | |
d36bbdba4d86a8634ff9481655aa289e125af4a7 | 1e013dc5f0de0f61e27f2867557803a01c01f4da | /Language/python/module/shutit/login_server.py | c64484bac2ae2c39af84814e4875ba008058b876 | [] | no_license | chengyi818/kata | a2941ce8675c6e7a47169a0eae4c757d3f6f5bf9 | a7cb7ad499037bcc168aaa0eaba857b33c04ef14 | refs/heads/master | 2023-04-10T18:39:09.518433 | 2023-01-08T15:22:12 | 2023-01-08T15:22:12 | 53,040,540 | 1 | 0 | null | 2023-03-25T00:46:51 | 2016-03-03T10:06:58 | C++ | UTF-8 | Python | false | false | 314 | py | #!/usr/bin/env python3
# Author: ChengYi
# Mail: [email protected]
# created time: Fri 30 Jun 2017 09:45:45 AM CST
import shutit
session = shutit.create_session('bash')
session.login('ssh [email protected]', user='you',
password="password")
session.send('hostname', echo=True)
session.logout()
| [
"[email protected]"
] | |
8aac425c473f78bdd1e3a956e3e20aea659288ae | e17e40dbb6ed8caaac5c23de29071b403637f5ae | /transformers_keras/tokenizers/space_tokenizer_test.py | d14ad108c617d1d66aa0f29a709712be005d2ca3 | [
"Apache-2.0"
] | permissive | Linessiex/transformers-keras | cb739075c8daab39d52dc6cd6bafe5e45f8259be | 0bb576db356f575390815dc64840b78b8ecf6227 | refs/heads/master | 2020-11-25T05:58:09.448200 | 2019-09-23T09:13:59 | 2019-09-23T09:13:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,893 | py | import os
import unittest
import tensorflow as tf
from transformers_keras.tokenizers.space_tokenizer import SpaceTokenizer
class SpaceTokenizerTest(unittest.TestCase):
def buildTokenizer(self):
tokenizer = SpaceTokenizer()
corpus = ['train.tgt.txt']
corpus = [os.path.join('testdata', f) for f in corpus]
tokenizer.build_from_corpus(corpus)
return tokenizer
def testTokenize(self):
tokenizer = self.buildTokenizer()
print(tokenizer.token2id_dict)
print(tokenizer.id2token_dict)
print(tokenizer.vocab_size)
def testConvertTokens2Ids(self):
tokenizer = self.buildTokenizer()
print('token2 id dict: ', tokenizer.token2id_dict)
words = tf.constant(['I', 'am', 'a', 'developer'])
v = tokenizer.encode(words)
print(v)
def testConvertIds2Tokens(self):
tokenizer = self.buildTokenizer()
print('id2token dict: ', tokenizer.id2token_dict)
ids = tf.constant([1, 0, 2, 3, 4], dtype=tf.dtypes.int64)
v = tokenizer.decode(ids)
print(v)
def testSaveVocabFile(self):
tokenizer = self.buildTokenizer()
tokenizer.save_to_vocab('testdata/vocab.test.txt')
def testBuildFromVocab(self):
print('============start build from vocab=============')
tokenizer = SpaceTokenizer()
tokenizer.build_from_vocab('testdata/vocab.test.txt')
print('token2id dict: ', tokenizer.token2id_dict)
print('id2token dict: ', tokenizer.id2token_dict)
words = tf.constant(['I', 'am', 'a', 'developer'])
v0 = tokenizer.encode(words)
print(v0)
ids = tf.constant([1, 0, 2, 3, 4], dtype=tf.dtypes.int64)
v1 = tokenizer.decode(ids)
print(v1)
print('============end build from vocab=============')
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
3742ae3ea8651e93bf19112552eca41e07d13d17 | 211fdc0564d9b003a82bb880d2e422dac85a5752 | /correcciones/segunda_parte/examen_2/ExamenII-12-11499/ExamenII-12-11499/conjunto.py | 3618d6877e24566f9f12339ab0226e071bff4c12 | [] | no_license | dvdalilue/ci2692_ene_mar_2017 | 45fa1833b4b3b49a1e5be33e58f01cb23bb2d6aa | 1690e6429c2c5ec167d505642d3344b249257475 | refs/heads/master | 2021-01-20T01:32:43.668461 | 2017-04-24T22:50:01 | 2017-04-24T22:50:01 | 89,292,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,329 | py |
"""
Implementacion de el TAD Conjunto, con una estructura conjunto que representa
una lista enlazada simple, que va a contener elementos de tipo entero
Autor: Orlando Chaparro Carnet: 12-11499
Ultima modificacion:
Jueves, 30 de marzo de 2017 - Semana 12
"""
class Elemento:
def __init__(self, e, n):
"""
Crea un elemento que sera agregado a la lista enlazada
"""
self.elemento = e
self.next = n
class Conjunto:
def crearConjunto(self):
"""
Crea un nuevo conjunto. Como el TAD Conjunto está implementado con una clase,
entonces esta operación corresponde al constructor de la clase.
"""
self.head = None
self.tail = None
self.cantidadElem = 0
def agregar(self, e):
"""
Agrega un elemento de tipo entero al conjunto
"""
NuevoElemento = Elemento(elemento, None)
if self.cantidadElem == 0:
self.head = NuevoElemento
self.tail = NuevoElemento
self.cantidadElem += 1
elif self.cantidadElem >= 1:
self.tail.next = NuevoElemento
self.cantidadElem += 1
def pertenece(self, e):
"""
Determina si un elemento esta o no en el conjunto. Si el elemento ya forma parte del conjunto,
retorna True, en caso contrario retorna False
"""
aux = self.head
if self.cantidadElem == 0:
print("No existen elementos en el conjunto finito")
return False
while aux is not None and aux.elemento != e:
aux = aux.next
if aux is None:
return False
if aux.elemento == e
return True
def union(self, conjunto):
if self.cantidadElem == 0:
return conjunto
else:
ConjuntoFinal = Conjunto()
Aux1 = self.head
Aux2 = conjunto.head
while Aux1 is not None:
ConjuntoFinal.agregar(Aux1.elemento)
Aux1 = Aux1.next
while Aux2 is not None:
ConjuntoFinal.agregar(Aux2.elemento)
Aux2 = Aux2.next
return ConjuntoFinal
def interseccion(self, conjunto):
ConjuntoFinal = Conjunto()
Aux1 = self.head
Aux2 = conjunto.head
if self.cantidadElem == 0
ConjuntoFinal.agregar(0)
return ConjuntoFinal
else:
while Aux1 is not None and Aux2 is not None:
if Aux1.elemento == Aux2.elemento:
ConjuntoFinal.agregar(Aux1.elemento)
Aux1 = Aux1.next
Aux2 = Aux2.next
def Mostrar(self):
aux = self.head
while aux is not None:
print(aux)
aux = aux.next
| [
"[email protected]"
] | |
4ebb914c4cd0607646f94e6605538d7f8cdd6278 | bc233c24523f05708dd1e091dca817f9095e6bb5 | /bitmovin_api_sdk/notifications/__init__.py | a892dc177947a1b63dc28a0a33fdf5cc5a9ed245 | [
"MIT"
] | permissive | bitmovin/bitmovin-api-sdk-python | e3d6cf8eb8bdad62cb83ec77c0fc4950b06b9cdd | b0860c0b1be7747cf22ad060985504da625255eb | refs/heads/main | 2023-09-01T15:41:03.628720 | 2023-08-30T10:52:13 | 2023-08-30T10:52:13 | 175,209,828 | 13 | 14 | MIT | 2021-04-29T12:30:31 | 2019-03-12T12:47:18 | Python | UTF-8 | Python | false | false | 558 | py | from bitmovin_api_sdk.notifications.notifications_api import NotificationsApi
from bitmovin_api_sdk.notifications.webhooks.webhooks_api import WebhooksApi
from bitmovin_api_sdk.notifications.states.states_api import StatesApi
from bitmovin_api_sdk.notifications.emails.emails_api import EmailsApi
from bitmovin_api_sdk.notifications.notification_list_query_params import NotificationListQueryParams
from bitmovin_api_sdk.notifications.notification_state_entry_list_by_notification_id_query_params import NotificationStateEntryListByNotificationIdQueryParams
| [
"[email protected]"
] | |
6e1d6cd2a8bfea5fab3e963a27b3b7ae78265dc3 | a4ea525e226d6c401fdb87a6e9adfdc5d07e6020 | /src/azure-cli/azure/cli/command_modules/cosmosdb/aaz/latest/cosmosdb/postgres/configuration/server/__cmd_group.py | d961af574f0abaf5e29fdf103bfbd24bcdb8d564 | [
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] | permissive | Azure/azure-cli | 13340eeca2e288e66e84d393fa1c8a93d46c8686 | a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca | refs/heads/dev | 2023-08-17T06:25:37.431463 | 2023-08-17T06:00:10 | 2023-08-17T06:00:10 | 51,040,886 | 4,018 | 3,310 | MIT | 2023-09-14T11:11:05 | 2016-02-04T00:21:51 | Python | UTF-8 | Python | false | false | 694 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command_group(
"cosmosdb postgres configuration server",
is_preview=True,
)
class __CMDGroup(AAZCommandGroup):
"""Manage Azure Cosmos DB for PostgreSQL server configurations.
"""
pass
__all__ = ["__CMDGroup"]
| [
"[email protected]"
] | |
e564ecc4fbbc90fe1f74ddf416902f8e65510058 | de265eba4074121d53295550fb901016df9f5556 | /django_service/Sample/management/commands/custom_createsuperuser.py | 8ff37a94427784c22a3759a0bb6ad61912f559d0 | [] | no_license | 1shikawa/django-service-ecs | d0ef169746c1baaf5eca3bb10d338b89969a10b8 | dca521ebbeb812f6692cb77aedd727029d22e39c | refs/heads/master | 2023-05-01T22:01:18.249844 | 2019-12-20T07:24:12 | 2019-12-20T07:24:12 | 183,669,797 | 0 | 0 | null | 2023-04-21T20:38:30 | 2019-04-26T17:33:30 | Python | UTF-8 | Python | false | false | 1,168 | py | from django.contrib.auth.management.commands import createsuperuser
from django.core.management import CommandError
class Command(createsuperuser.Command):
help = 'Create a superuser with a password non-interactively'
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
'--password', dest='password', default=None,
help='Specifies the password for the superuser.',
)
def handle(self, *args, **options):
options.setdefault('interactive', False)
username = options.get('username')
email = options.get('email')
password = options.get('password')
database = options.get('database')
if not (email and password):
raise CommandError('--username, --email and --password are required options')
user_data = {
'email': email,
'password': password,
}
exists = self.UserModel._default_manager.db_manager(database).filter(email=email).exists()
if not exists:
self.UserModel._default_manager.db_manager(database).create_superuser(**user_data)
| [
"[email protected]"
] | |
4b230710f1856750e68e261ce7c5ace7bc6562c8 | 027643c9ca72efe7210bac72f62d977b842b1228 | /official/vision/beta/configs/video_classification.py | 1edbe22a74fcc0a2d75402f76e5e65c23a622758 | [
"Apache-2.0"
] | permissive | anshkumar/models | 88a60e49f9563447e7a4d3ef9d8c462d4e01d6b3 | a2f397a86995a0c6f9c30d956e21c9a63af6d3e6 | refs/heads/master | 2023-05-01T00:17:37.877511 | 2021-05-07T16:06:03 | 2021-05-07T16:06:03 | 278,812,620 | 0 | 0 | Apache-2.0 | 2021-05-04T09:56:57 | 2020-07-11T07:35:03 | null | UTF-8 | Python | false | false | 11,873 | py | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Video classification configuration definition."""
from typing import Optional, Tuple
import dataclasses
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.modeling import optimization
from official.vision.beta.configs import backbones_3d
from official.vision.beta.configs import common
@dataclasses.dataclass
class DataConfig(cfg.DataConfig):
"""The base configuration for building datasets."""
name: Optional[str] = None
file_type: Optional[str] = 'tfrecord'
compressed_input: bool = False
split: str = 'train'
variant_name: Optional[str] = None
feature_shape: Tuple[int, ...] = (64, 224, 224, 3)
temporal_stride: int = 1
random_stride_range: int = 0
num_test_clips: int = 1
num_test_crops: int = 1
num_classes: int = -1
num_examples: int = -1
global_batch_size: int = 128
data_format: str = 'channels_last'
dtype: str = 'float32'
one_hot: bool = True
shuffle_buffer_size: int = 64
cache: bool = False
input_path: str = ''
is_training: bool = True
cycle_length: int = 10
drop_remainder: bool = True
min_image_size: int = 256
is_multilabel: bool = False
output_audio: bool = False
audio_feature: str = ''
audio_feature_shape: Tuple[int, ...] = (-1,)
aug_min_aspect_ratio: float = 0.5
aug_max_aspect_ratio: float = 2.0
aug_min_area_ratio: float = 0.49
aug_max_area_ratio: float = 1.0
aug_type: Optional[str] = None # 'autoaug', 'randaug', or None
image_field_key: str = 'image/encoded'
label_field_key: str = 'clip/label/index'
def kinetics400(is_training):
"""Generated Kinectics 400 dataset configs."""
return DataConfig(
name='kinetics400',
num_classes=400,
is_training=is_training,
split='train' if is_training else 'valid',
drop_remainder=is_training,
num_examples=215570 if is_training else 17706,
feature_shape=(64, 224, 224, 3) if is_training else (250, 224, 224, 3))
def kinetics600(is_training):
"""Generated Kinectics 600 dataset configs."""
return DataConfig(
name='kinetics600',
num_classes=600,
is_training=is_training,
split='train' if is_training else 'valid',
drop_remainder=is_training,
num_examples=366016 if is_training else 27780,
feature_shape=(64, 224, 224, 3) if is_training else (250, 224, 224, 3))
def kinetics700(is_training):
"""Generated Kinectics 600 dataset configs."""
return DataConfig(
name='kinetics700',
num_classes=700,
is_training=is_training,
split='train' if is_training else 'valid',
drop_remainder=is_training,
num_examples=522883 if is_training else 33441,
feature_shape=(64, 224, 224, 3) if is_training else (250, 224, 224, 3))
def kinetics700_2020(is_training):
"""Generated Kinectics 600 dataset configs."""
return DataConfig(
name='kinetics700',
num_classes=700,
is_training=is_training,
split='train' if is_training else 'valid',
drop_remainder=is_training,
num_examples=535982 if is_training else 33640,
feature_shape=(64, 224, 224, 3) if is_training else (250, 224, 224, 3))
@dataclasses.dataclass
class VideoClassificationModel(hyperparams.Config):
"""The model config."""
model_type: str = 'video_classification'
backbone: backbones_3d.Backbone3D = backbones_3d.Backbone3D(
type='resnet_3d', resnet_3d=backbones_3d.ResNet3D50())
norm_activation: common.NormActivation = common.NormActivation(
use_sync_bn=False)
dropout_rate: float = 0.2
aggregate_endpoints: bool = False
@dataclasses.dataclass
class Losses(hyperparams.Config):
one_hot: bool = True
label_smoothing: float = 0.0
l2_weight_decay: float = 0.0
@dataclasses.dataclass
class Metrics(hyperparams.Config):
use_per_class_recall: bool = False
@dataclasses.dataclass
class VideoClassificationTask(cfg.TaskConfig):
"""The task config."""
model: VideoClassificationModel = VideoClassificationModel()
train_data: DataConfig = DataConfig(is_training=True, drop_remainder=True)
validation_data: DataConfig = DataConfig(
is_training=False, drop_remainder=False)
losses: Losses = Losses()
metrics: Metrics = Metrics()
def add_trainer(experiment: cfg.ExperimentConfig,
train_batch_size: int,
eval_batch_size: int,
learning_rate: float = 1.6,
train_epochs: int = 44,
warmup_epochs: int = 5):
"""Add and config a trainer to the experiment config."""
if experiment.task.train_data.num_examples <= 0:
raise ValueError('Wrong train dataset size {!r}'.format(
experiment.task.train_data))
if experiment.task.validation_data.num_examples <= 0:
raise ValueError('Wrong validation dataset size {!r}'.format(
experiment.task.validation_data))
experiment.task.train_data.global_batch_size = train_batch_size
experiment.task.validation_data.global_batch_size = eval_batch_size
steps_per_epoch = experiment.task.train_data.num_examples // train_batch_size
experiment.trainer = cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
train_steps=train_epochs * steps_per_epoch,
validation_steps=experiment.task.validation_data.num_examples //
eval_batch_size,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9,
'nesterov': True,
}
},
'learning_rate': {
'type': 'cosine',
'cosine': {
'initial_learning_rate': learning_rate,
'decay_steps': train_epochs * steps_per_epoch,
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': warmup_epochs * steps_per_epoch,
'warmup_learning_rate': 0
}
}
}))
return experiment
@exp_factory.register_config_factory('video_classification')
def video_classification() -> cfg.ExperimentConfig:
"""Video classification general."""
return cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='bfloat16'),
task=VideoClassificationTask(),
trainer=cfg.TrainerConfig(),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
'task.train_data.num_classes == task.validation_data.num_classes',
])
@exp_factory.register_config_factory('video_classification_kinetics400')
def video_classification_kinetics400() -> cfg.ExperimentConfig:
"""Video classification on Kinectics 400 with resnet."""
train_dataset = kinetics400(is_training=True)
validation_dataset = kinetics400(is_training=False)
task = VideoClassificationTask(
model=VideoClassificationModel(
backbone=backbones_3d.Backbone3D(
type='resnet_3d', resnet_3d=backbones_3d.ResNet3D50()),
norm_activation=common.NormActivation(
norm_momentum=0.9, norm_epsilon=1e-5, use_sync_bn=False)),
losses=Losses(l2_weight_decay=1e-4),
train_data=train_dataset,
validation_data=validation_dataset)
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='bfloat16'),
task=task,
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
'task.train_data.num_classes == task.validation_data.num_classes',
])
add_trainer(config, train_batch_size=1024, eval_batch_size=64)
return config
@exp_factory.register_config_factory('video_classification_kinetics600')
def video_classification_kinetics600() -> cfg.ExperimentConfig:
"""Video classification on Kinectics 600 with resnet."""
train_dataset = kinetics600(is_training=True)
validation_dataset = kinetics600(is_training=False)
task = VideoClassificationTask(
model=VideoClassificationModel(
backbone=backbones_3d.Backbone3D(
type='resnet_3d', resnet_3d=backbones_3d.ResNet3D50()),
norm_activation=common.NormActivation(
norm_momentum=0.9, norm_epsilon=1e-5, use_sync_bn=False)),
losses=Losses(l2_weight_decay=1e-4),
train_data=train_dataset,
validation_data=validation_dataset)
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='bfloat16'),
task=task,
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
'task.train_data.num_classes == task.validation_data.num_classes',
])
add_trainer(config, train_batch_size=1024, eval_batch_size=64)
return config
@exp_factory.register_config_factory('video_classification_kinetics700')
def video_classification_kinetics700() -> cfg.ExperimentConfig:
"""Video classification on Kinectics 700 with resnet."""
train_dataset = kinetics700(is_training=True)
validation_dataset = kinetics700(is_training=False)
task = VideoClassificationTask(
model=VideoClassificationModel(
backbone=backbones_3d.Backbone3D(
type='resnet_3d', resnet_3d=backbones_3d.ResNet3D50()),
norm_activation=common.NormActivation(
norm_momentum=0.9, norm_epsilon=1e-5, use_sync_bn=False)),
losses=Losses(l2_weight_decay=1e-4),
train_data=train_dataset,
validation_data=validation_dataset)
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='bfloat16'),
task=task,
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
'task.train_data.num_classes == task.validation_data.num_classes',
])
add_trainer(config, train_batch_size=1024, eval_batch_size=64)
return config
@exp_factory.register_config_factory('video_classification_kinetics700_2020')
def video_classification_kinetics700_2020() -> cfg.ExperimentConfig:
"""Video classification on Kinectics 700 2020 with resnet."""
train_dataset = kinetics700_2020(is_training=True)
validation_dataset = kinetics700_2020(is_training=False)
task = VideoClassificationTask(
model=VideoClassificationModel(
backbone=backbones_3d.Backbone3D(
type='resnet_3d', resnet_3d=backbones_3d.ResNet3D50()),
norm_activation=common.NormActivation(
norm_momentum=0.9, norm_epsilon=1e-5, use_sync_bn=False)),
losses=Losses(l2_weight_decay=1e-4),
train_data=train_dataset,
validation_data=validation_dataset)
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='bfloat16'),
task=task,
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
'task.train_data.num_classes == task.validation_data.num_classes',
])
add_trainer(config, train_batch_size=1024, eval_batch_size=64)
return config
| [
"[email protected]"
] | |
92ac7bd164345c50dbb3b66e3cf7e6b526225049 | 1ab7b3f2aa63de8488ce7c466a67d367771aa1f2 | /Ricardo_OS/Python_backend/venv/lib/python3.8/site-packages/pandas/tests/indexes/ranges/test_range.py | 3bd3f6cc09db7b9910b1ed6ecfcbaefee8015372 | [
"MIT"
] | permissive | icl-rocketry/Avionics | 9d39aeb11aba11115826fd73357b415026a7adad | 95b7a061eabd6f2b607fba79e007186030f02720 | refs/heads/master | 2022-07-30T07:54:10.642930 | 2022-07-10T12:19:10 | 2022-07-10T12:19:10 | 216,184,670 | 9 | 1 | MIT | 2022-06-27T10:17:06 | 2019-10-19T09:57:07 | C++ | UTF-8 | Python | false | false | 16,498 | py | import numpy as np
import pytest
from pandas.core.dtypes.common import ensure_platform_int
import pandas as pd
from pandas import Float64Index, Index, Int64Index, RangeIndex
import pandas._testing as tm
from ..test_numeric import Numeric
# aliases to make some tests easier to read
RI = RangeIndex
I64 = Int64Index
F64 = Float64Index
OI = Index
class TestRangeIndex(Numeric):
_holder = RangeIndex
_compat_props = ["shape", "ndim", "size"]
@pytest.fixture(
params=[
RangeIndex(start=0, stop=20, step=2, name="foo"),
RangeIndex(start=18, stop=-1, step=-2, name="bar"),
],
ids=["index_inc", "index_dec"],
)
def index(self, request):
return request.param
def create_index(self) -> RangeIndex:
return RangeIndex(start=0, stop=20, step=2)
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is False
def test_too_many_names(self):
index = self.create_index()
with pytest.raises(ValueError, match="^Length"):
index.names = ["roger", "harold"]
@pytest.mark.parametrize(
"index, start, stop, step",
[
(RangeIndex(5), 0, 5, 1),
(RangeIndex(0, 5), 0, 5, 1),
(RangeIndex(5, step=2), 0, 5, 2),
(RangeIndex(1, 5, 2), 1, 5, 2),
],
)
def test_start_stop_step_attrs(self, index, start, stop, step):
# GH 25710
assert index.start == start
assert index.stop == stop
assert index.step == step
@pytest.mark.parametrize("attr_name", ["_start", "_stop", "_step"])
def test_deprecated_start_stop_step_attrs(self, attr_name):
# GH 26581
idx = self.create_index()
with tm.assert_produces_warning(FutureWarning):
getattr(idx, attr_name)
def test_copy(self):
i = RangeIndex(5, name="Foo")
i_copy = i.copy()
assert i_copy is not i
assert i_copy.identical(i)
assert i_copy._range == range(0, 5, 1)
assert i_copy.name == "Foo"
def test_repr(self):
i = RangeIndex(5, name="Foo")
result = repr(i)
expected = "RangeIndex(start=0, stop=5, step=1, name='Foo')"
assert result == expected
result = eval(result)
tm.assert_index_equal(result, i, exact=True)
i = RangeIndex(5, 0, -1)
result = repr(i)
expected = "RangeIndex(start=5, stop=0, step=-1)"
assert result == expected
result = eval(result)
tm.assert_index_equal(result, i, exact=True)
def test_insert(self):
idx = RangeIndex(5, name="Foo")
result = idx[1:4]
# test 0th element
tm.assert_index_equal(idx[0:4], result.insert(0, idx[0]))
# GH 18295 (test missing)
expected = Float64Index([0, np.nan, 1, 2, 3, 4])
for na in (np.nan, pd.NaT, None):
result = RangeIndex(5).insert(1, na)
tm.assert_index_equal(result, expected)
def test_delete(self):
idx = RangeIndex(5, name="Foo")
expected = idx[1:].astype(int)
result = idx.delete(0)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
expected = idx[:-1].astype(int)
result = idx.delete(-1)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
msg = "index 5 is out of bounds for axis 0 with size 5"
with pytest.raises((IndexError, ValueError), match=msg):
# either depending on numpy version
result = idx.delete(len(idx))
def test_view(self):
i = RangeIndex(0, name="Foo")
i_view = i.view()
assert i_view.name == "Foo"
i_view = i.view("i8")
tm.assert_numpy_array_equal(i.values, i_view)
i_view = i.view(RangeIndex)
tm.assert_index_equal(i, i_view)
def test_dtype(self):
index = self.create_index()
assert index.dtype == np.int64
def test_cached_data(self):
# GH 26565, GH26617
# Calling RangeIndex._data caches an int64 array of the same length at
# self._cached_data. This test checks whether _cached_data has been set
idx = RangeIndex(0, 100, 10)
assert idx._cached_data is None
repr(idx)
assert idx._cached_data is None
str(idx)
assert idx._cached_data is None
idx.get_loc(20)
assert idx._cached_data is None
90 in idx
assert idx._cached_data is None
91 in idx
assert idx._cached_data is None
idx.all()
assert idx._cached_data is None
idx.any()
assert idx._cached_data is None
idx.format()
assert idx._cache == {}
df = pd.DataFrame({"a": range(10)}, index=idx)
str(df)
assert idx._cache == {}
df.loc[50]
assert idx._cached_data is None
with pytest.raises(KeyError, match="51"):
df.loc[51]
assert idx._cached_data is None
df.loc[10:50]
assert idx._cached_data is None
df.iloc[5:10]
assert idx._cached_data is None
# actually calling idx._data
assert isinstance(idx._data, np.ndarray)
assert isinstance(idx._cached_data, np.ndarray)
def test_is_monotonic(self):
index = RangeIndex(0, 20, 2)
assert index.is_monotonic is True
assert index.is_monotonic_increasing is True
assert index.is_monotonic_decreasing is False
assert index._is_strictly_monotonic_increasing is True
assert index._is_strictly_monotonic_decreasing is False
index = RangeIndex(4, 0, -1)
assert index.is_monotonic is False
assert index._is_strictly_monotonic_increasing is False
assert index.is_monotonic_decreasing is True
assert index._is_strictly_monotonic_decreasing is True
index = RangeIndex(1, 2)
assert index.is_monotonic is True
assert index.is_monotonic_increasing is True
assert index.is_monotonic_decreasing is True
assert index._is_strictly_monotonic_increasing is True
assert index._is_strictly_monotonic_decreasing is True
index = RangeIndex(2, 1)
assert index.is_monotonic is True
assert index.is_monotonic_increasing is True
assert index.is_monotonic_decreasing is True
assert index._is_strictly_monotonic_increasing is True
assert index._is_strictly_monotonic_decreasing is True
index = RangeIndex(1, 1)
assert index.is_monotonic is True
assert index.is_monotonic_increasing is True
assert index.is_monotonic_decreasing is True
assert index._is_strictly_monotonic_increasing is True
assert index._is_strictly_monotonic_decreasing is True
def test_equals_range(self):
equiv_pairs = [
(RangeIndex(0, 9, 2), RangeIndex(0, 10, 2)),
(RangeIndex(0), RangeIndex(1, -1, 3)),
(RangeIndex(1, 2, 3), RangeIndex(1, 3, 4)),
(RangeIndex(0, -9, -2), RangeIndex(0, -10, -2)),
]
for left, right in equiv_pairs:
assert left.equals(right)
assert right.equals(left)
def test_logical_compat(self):
idx = self.create_index()
assert idx.all() == idx.values.all()
assert idx.any() == idx.values.any()
def test_identical(self):
index = self.create_index()
i = Index(index.copy())
assert i.identical(index)
# we don't allow object dtype for RangeIndex
if isinstance(index, RangeIndex):
return
same_values_different_type = Index(i, dtype=object)
assert not i.identical(same_values_different_type)
i = index.copy(dtype=object)
i = i.rename("foo")
same_values = Index(i, dtype=object)
assert same_values.identical(index.copy(dtype=object))
assert not i.identical(index)
assert Index(same_values, name="foo", dtype=object).identical(i)
assert not index.copy(dtype=object).identical(index.copy(dtype="int64"))
def test_nbytes(self):
# memory savings vs int index
i = RangeIndex(0, 1000)
assert i.nbytes < i._int64index.nbytes / 10
# constant memory usage
i2 = RangeIndex(0, 10)
assert i.nbytes == i2.nbytes
@pytest.mark.parametrize(
"start,stop,step",
[
# can't
("foo", "bar", "baz"),
# shouldn't
("0", "1", "2"),
],
)
def test_cant_or_shouldnt_cast(self, start, stop, step):
msg = f"Wrong type {type(start)} for value {start}"
with pytest.raises(TypeError, match=msg):
RangeIndex(start, stop, step)
def test_view_index(self):
index = self.create_index()
index.view(Index)
def test_prevent_casting(self):
index = self.create_index()
result = index.astype("O")
assert result.dtype == np.object_
def test_repr_roundtrip(self):
index = self.create_index()
tm.assert_index_equal(eval(repr(index)), index)
def test_slice_keep_name(self):
idx = RangeIndex(1, 2, name="asdf")
assert idx.name == idx[1:].name
def test_explicit_conversions(self):
# GH 8608
# add/sub are overridden explicitly for Float/Int Index
idx = RangeIndex(5)
# float conversions
arr = np.arange(5, dtype="int64") * 3.2
expected = Float64Index(arr)
fidx = idx * 3.2
tm.assert_index_equal(fidx, expected)
fidx = 3.2 * idx
tm.assert_index_equal(fidx, expected)
# interops with numpy arrays
expected = Float64Index(arr)
a = np.zeros(5, dtype="float64")
result = fidx - a
tm.assert_index_equal(result, expected)
expected = Float64Index(-arr)
a = np.zeros(5, dtype="float64")
result = a - fidx
tm.assert_index_equal(result, expected)
def test_has_duplicates(self, index):
assert index.is_unique
assert not index.has_duplicates
def test_extended_gcd(self):
index = self.create_index()
result = index._extended_gcd(6, 10)
assert result[0] == result[1] * 6 + result[2] * 10
assert 2 == result[0]
result = index._extended_gcd(10, 6)
assert 2 == result[1] * 10 + result[2] * 6
assert 2 == result[0]
def test_min_fitting_element(self):
result = RangeIndex(0, 20, 2)._min_fitting_element(1)
assert 2 == result
result = RangeIndex(1, 6)._min_fitting_element(1)
assert 1 == result
result = RangeIndex(18, -2, -2)._min_fitting_element(1)
assert 2 == result
result = RangeIndex(5, 0, -1)._min_fitting_element(1)
assert 1 == result
big_num = 500000000000000000000000
result = RangeIndex(5, big_num * 2, 1)._min_fitting_element(big_num)
assert big_num == result
def test_max_fitting_element(self):
result = RangeIndex(0, 20, 2)._max_fitting_element(17)
assert 16 == result
result = RangeIndex(1, 6)._max_fitting_element(4)
assert 4 == result
result = RangeIndex(18, -2, -2)._max_fitting_element(17)
assert 16 == result
result = RangeIndex(5, 0, -1)._max_fitting_element(4)
assert 4 == result
big_num = 500000000000000000000000
result = RangeIndex(5, big_num * 2, 1)._max_fitting_element(big_num)
assert big_num == result
def test_pickle_compat_construction(self):
# RangeIndex() is a valid constructor
pass
def test_slice_specialised(self):
index = self.create_index()
index.name = "foo"
# scalar indexing
res = index[1]
expected = 2
assert res == expected
res = index[-1]
expected = 18
assert res == expected
# slicing
# slice value completion
index_slice = index[:]
expected = index
tm.assert_index_equal(index_slice, expected)
# positive slice values
index_slice = index[7:10:2]
expected = Index(np.array([14, 18]), name="foo")
tm.assert_index_equal(index_slice, expected)
# negative slice values
index_slice = index[-1:-5:-2]
expected = Index(np.array([18, 14]), name="foo")
tm.assert_index_equal(index_slice, expected)
# stop overshoot
index_slice = index[2:100:4]
expected = Index(np.array([4, 12]), name="foo")
tm.assert_index_equal(index_slice, expected)
# reverse
index_slice = index[::-1]
expected = Index(index.values[::-1], name="foo")
tm.assert_index_equal(index_slice, expected)
index_slice = index[-8::-1]
expected = Index(np.array([4, 2, 0]), name="foo")
tm.assert_index_equal(index_slice, expected)
index_slice = index[-40::-1]
expected = Index(np.array([], dtype=np.int64), name="foo")
tm.assert_index_equal(index_slice, expected)
index_slice = index[40::-1]
expected = Index(index.values[40::-1], name="foo")
tm.assert_index_equal(index_slice, expected)
index_slice = index[10::-1]
expected = Index(index.values[::-1], name="foo")
tm.assert_index_equal(index_slice, expected)
@pytest.mark.parametrize("step", set(range(-5, 6)) - {0})
def test_len_specialised(self, step):
# make sure that our len is the same as np.arange calc
start, stop = (0, 5) if step > 0 else (5, 0)
arr = np.arange(start, stop, step)
index = RangeIndex(start, stop, step)
assert len(index) == len(arr)
index = RangeIndex(stop, start, step)
assert len(index) == 0
@pytest.fixture(
params=[
([RI(1, 12, 5)], RI(1, 12, 5)),
([RI(0, 6, 4)], RI(0, 6, 4)),
([RI(1, 3), RI(3, 7)], RI(1, 7)),
([RI(1, 5, 2), RI(5, 6)], RI(1, 6, 2)),
([RI(1, 3, 2), RI(4, 7, 3)], RI(1, 7, 3)),
([RI(-4, 3, 2), RI(4, 7, 2)], RI(-4, 7, 2)),
([RI(-4, -8), RI(-8, -12)], RI(0, 0)),
([RI(-4, -8), RI(3, -4)], RI(0, 0)),
([RI(-4, -8), RI(3, 5)], RI(3, 5)),
([RI(-4, -2), RI(3, 5)], I64([-4, -3, 3, 4])),
([RI(-2), RI(3, 5)], RI(3, 5)),
([RI(2), RI(2)], I64([0, 1, 0, 1])),
([RI(2), RI(2, 5), RI(5, 8, 4)], RI(0, 6)),
([RI(2), RI(3, 5), RI(5, 8, 4)], I64([0, 1, 3, 4, 5])),
([RI(-2, 2), RI(2, 5), RI(5, 8, 4)], RI(-2, 6)),
([RI(3), I64([-1, 3, 15])], I64([0, 1, 2, -1, 3, 15])),
([RI(3), F64([-1, 3.1, 15.0])], F64([0, 1, 2, -1, 3.1, 15.0])),
([RI(3), OI(["a", None, 14])], OI([0, 1, 2, "a", None, 14])),
([RI(3, 1), OI(["a", None, 14])], OI(["a", None, 14])),
]
)
def appends(self, request):
"""Inputs and expected outputs for RangeIndex.append test"""
return request.param
def test_append(self, appends):
# GH16212
indices, expected = appends
result = indices[0].append(indices[1:])
tm.assert_index_equal(result, expected, exact=True)
if len(indices) == 2:
# Append single item rather than list
result2 = indices[0].append(indices[1])
tm.assert_index_equal(result2, expected, exact=True)
def test_engineless_lookup(self):
# GH 16685
# Standard lookup on RangeIndex should not require the engine to be
# created
idx = RangeIndex(2, 10, 3)
assert idx.get_loc(5) == 1
tm.assert_numpy_array_equal(
idx.get_indexer([2, 8]), ensure_platform_int(np.array([0, 2]))
)
with pytest.raises(KeyError, match="3"):
idx.get_loc(3)
assert "_engine" not in idx._cache
# Different types of scalars can be excluded immediately, no need to
# use the _engine
with pytest.raises(KeyError, match="'a'"):
idx.get_loc("a")
assert "_engine" not in idx._cache
def test_format_empty(self):
# GH35712
empty_idx = self._holder(0)
assert empty_idx.format() == []
assert empty_idx.format(name=True) == [""]
| [
"[email protected]"
] | |
7e90e0748a097082ae08503e7105547f19da0ae5 | 7482036919de3e698fb752486f74c78922fb342d | /backend/manage.py | 61f319e61ffe0dc371b6d1d9bed8db5a05f36a9a | [] | no_license | crowdbotics-apps/spring-star-27540 | 6c14339a0f1d6e5e409f809180049ce3085a70ef | 267d17dbe4d773f2e83fcef592e92f78c16d4000 | refs/heads/master | 2023-05-02T22:58:42.678703 | 2021-05-29T01:56:54 | 2021-05-29T01:56:54 | 371,858,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 637 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'spring_star_27540.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
d97dcb6e0ca0834d47adde8295161d6df7c1effb | 7a88fc18f30d5dd3ac935877d4d9268a56c296be | /di_website/publications/migrations/0007_auto_20190924_1349.py | 1907659178d26314ad37e2f4f1ba61a1d7f5b83d | [] | no_license | devinit/DIwebsite-redesign | 745a480b7ba0feffa34dc664548ee4c5a7b4d470 | 9ec46823c67cdd4f35be255896bf30d8f6362666 | refs/heads/develop | 2023-08-30T04:06:20.951203 | 2023-08-07T12:06:07 | 2023-08-07T12:06:07 | 184,287,370 | 1 | 0 | null | 2023-08-28T14:34:57 | 2019-04-30T15:29:25 | HTML | UTF-8 | Python | false | false | 1,077 | py | # Generated by Django 2.2.2 on 2019-09-24 13:49
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('publications', '0006_auto_20190919_1544'),
]
operations = [
migrations.AddField(
model_name='shortpublicationpage',
name='published_date',
field=models.DateTimeField(blank=True, default=django.utils.timezone.now, help_text='This date will be used for display and ordering'),
),
migrations.AlterField(
model_name='legacypublicationpage',
name='published_date',
field=models.DateTimeField(blank=True, default=django.utils.timezone.now, help_text='This date will be used for display and ordering'),
),
migrations.AlterField(
model_name='publicationpage',
name='published_date',
field=models.DateTimeField(blank=True, default=django.utils.timezone.now, help_text='This date will be used for display and ordering'),
),
]
| [
"[email protected]"
] | |
26a9092923ba18920ccdad2427001c1ad995f102 | aee1878ba3e31a36c805025c662ab55a45003898 | /model_zoo/preprocess.py | 2ab21774d485dedb6137ca9f7b4dea35f9469a68 | [
"MIT"
] | permissive | boluoyu/ModelZoo | 4a40c1215200ddcf5e96554bf2474593cfe60f1e | e8906d5c5195c1f6ebdc46e69fa8cd0439317c60 | refs/heads/master | 2020-06-15T17:29:18.108553 | 2019-02-08T16:20:07 | 2019-02-08T16:20:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | from sklearn.preprocessing import StandardScaler
def standardize(fit_data, extra_data=None):
"""
standardize data
:param fit_data: data to fit and transform
:param extra_data: extra data to transform
:return:
"""
s = StandardScaler()
s.fit(fit_data)
fit_data = s.transform(fit_data)
if not extra_data is None:
extra_data = s.transform(extra_data)
return fit_data, extra_data
return fit_data
| [
"[email protected]"
] | |
311e80aa2ff395e8c37abb76ae904d71bc65e325 | 8ef8e6818c977c26d937d09b46be0d748022ea09 | /cv/super_resolution/ttsr/pytorch/model/SearchTransfer.py | 2b5783ca0517680d2052d57305968c26bdac5edf | [
"Apache-2.0",
"MIT"
] | permissive | Deep-Spark/DeepSparkHub | eb5996607e63ccd2c706789f64b3cc0070e7f8ef | 9d643e88946fc4a24f2d4d073c08b05ea693f4c5 | refs/heads/master | 2023-09-01T11:26:49.648759 | 2023-08-25T01:50:18 | 2023-08-25T01:50:18 | 534,133,249 | 7 | 6 | Apache-2.0 | 2023-03-28T02:54:59 | 2022-09-08T09:07:01 | Python | UTF-8 | Python | false | false | 2,276 | py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class SearchTransfer(nn.Module):
def __init__(self):
super(SearchTransfer, self).__init__()
def bis(self, input, dim, index):
# batch index select
# input: [N, ?, ?, ...]
# dim: scalar > 0
# index: [N, idx]
views = [input.size(0)] + [1 if i!=dim else -1 for i in range(1, len(input.size()))]
expanse = list(input.size())
expanse[0] = -1
expanse[dim] = -1
index = index.view(views).expand(expanse)
return torch.gather(input, dim, index)
def forward(self, lrsr_lv3, refsr_lv3, ref_lv1, ref_lv2, ref_lv3):
### search
lrsr_lv3_unfold = F.unfold(lrsr_lv3, kernel_size=(3, 3), padding=1)
refsr_lv3_unfold = F.unfold(refsr_lv3, kernel_size=(3, 3), padding=1)
refsr_lv3_unfold = refsr_lv3_unfold.permute(0, 2, 1)
refsr_lv3_unfold = F.normalize(refsr_lv3_unfold, dim=2) # [N, Hr*Wr, C*k*k]
lrsr_lv3_unfold = F.normalize(lrsr_lv3_unfold, dim=1) # [N, C*k*k, H*W]
R_lv3 = torch.bmm(refsr_lv3_unfold, lrsr_lv3_unfold) #[N, Hr*Wr, H*W]
R_lv3_star, R_lv3_star_arg = torch.max(R_lv3, dim=1) #[N, H*W]
### transfer
ref_lv3_unfold = F.unfold(ref_lv3, kernel_size=(3, 3), padding=1)
ref_lv2_unfold = F.unfold(ref_lv2, kernel_size=(6, 6), padding=2, stride=2)
ref_lv1_unfold = F.unfold(ref_lv1, kernel_size=(12, 12), padding=4, stride=4)
T_lv3_unfold = self.bis(ref_lv3_unfold, 2, R_lv3_star_arg)
T_lv2_unfold = self.bis(ref_lv2_unfold, 2, R_lv3_star_arg)
T_lv1_unfold = self.bis(ref_lv1_unfold, 2, R_lv3_star_arg)
T_lv3 = F.fold(T_lv3_unfold, output_size=lrsr_lv3.size()[-2:], kernel_size=(3,3), padding=1) / (3.*3.)
T_lv2 = F.fold(T_lv2_unfold, output_size=(lrsr_lv3.size(2)*2, lrsr_lv3.size(3)*2), kernel_size=(6,6), padding=2, stride=2) / (3.*3.)
T_lv1 = F.fold(T_lv1_unfold, output_size=(lrsr_lv3.size(2)*4, lrsr_lv3.size(3)*4), kernel_size=(12,12), padding=4, stride=4) / (3.*3.)
S = R_lv3_star.view(R_lv3_star.size(0), 1, lrsr_lv3.size(2), lrsr_lv3.size(3))
return S, T_lv3, T_lv2, T_lv1 | [
"[email protected]"
] | |
d0b5c2f160a1d45560730b6f89d15a84bc44bfc6 | 2bdedcda705f6dcf45a1e9a090377f892bcb58bb | /src/main/output/guy_job_party/work/morning_team/minute/city.py | cd73017ec214a164aca8b7076627ba35a74cba8f | [] | no_license | matkosoric/GenericNameTesting | 860a22af1098dda9ea9e24a1fc681bb728aa2d69 | 03f4a38229c28bc6d83258e5a84fce4b189d5f00 | refs/heads/master | 2021-01-08T22:35:20.022350 | 2020-02-21T11:28:21 | 2020-02-21T11:28:21 | 242,123,053 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,594 | py | package SDK;
import java.io.File;
import java.io.FileOutputStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.Future;
import javax.sound.sampled.AudioFormat;
import javax.sound.sampled.AudioInputStream;
import javax.sound.sampled.AudioSystem;
import javax.sound.sampled.DataLine;
import javax.sound.sampled.SourceDataLine;
import com.microsoft.azure.cognitiveservices.language.luis.authoring.EndpointAPI;
import com.microsoft.azure.cognitiveservices.language.luis.authoring.LUISAuthoringClient;
import com.microsoft.azure.cognitiveservices.language.luis.authoring.LUISAuthoringManager;
import com.microsoft.azure.cognitiveservices.language.luis.authoring.models.ApplicationCreateObject;
import com.microsoft.azure.cognitiveservices.language.luis.authoring.models.ApplicationPublishObject;
import com.microsoft.azure.cognitiveservices.language.luis.authoring.models.BatchLabelExample;
import com.microsoft.azure.cognitiveservices.language.luis.authoring.models.CompositeEntityModel;
import com.microsoft.azure.cognitiveservices.language.luis.authoring.models.EnqueueTrainingResponse;
import com.microsoft.azure.cognitiveservices.language.luis.authoring.models.EntityLabelObject;
import com.microsoft.azure.cognitiveservices.language.luis.authoring.models.ExampleLabelObject;
import com.microsoft.azure.cognitiveservices.language.luis.authoring.models.HierarchicalEntityModel;
import com.microsoft.azure.cognitiveservices.language.luis.authoring.models.ModelTrainingInfo;
import com.microsoft.azure.cognitiveservices.language.luis.authoring.models.ProductionOrStagingEndpointInfo;
import com.microsoft.azure.cognitiveservices.language.luis.runtime.LuisRuntimeAPI;
import com.microsoft.azure.cognitiveservices.language.luis.runtime.LuisRuntimeManager;
import com.microsoft.azure.cognitiveservices.language.luis.runtime.models.EntityModel;
import com.microsoft.azure.cognitiveservices.language.luis.runtime.models.LuisResult;
import com.microsoft.azure.cognitiveservices.language.spellcheck.BingSpellCheckAPI;
import com.microsoft.azure.cognitiveservices.language.spellcheck.BingSpellCheckManager;
import com.microsoft.azure.cognitiveservices.language.spellcheck.BingSpellCheckOperations;
import com.microsoft.azure.cognitiveservices.language.spellcheck.models.SpellCheck;
import com.microsoft.azure.cognitiveservices.language.spellcheck.models.SpellCheckerOptionalParameter;
import com.microsoft.azure.cognitiveservices.language.spellcheck.models.SpellingFlaggedToken;
import com.microsoft.azure.cognitiveservices.language.spellcheck.models.SpellingTokenSuggestion;
import com.microsoft.azure.cognitiveservices.language.textanalytics.TextAnalytics;
import com.microsoft.azure.cognitiveservices.language.textanalytics.TextAnalyticsAPI;
import com.microsoft.azure.cognitiveservices.language.textanalytics.TextAnalyticsManager;
import com.microsoft.azure.cognitiveservices.language.textanalytics.models.AzureRegions;
import com.microsoft.azure.cognitiveservices.language.textanalytics.models.DetectLanguageOptionalParameter;
import com.microsoft.azure.cognitiveservices.language.textanalytics.models.DetectedLanguage;
import com.microsoft.azure.cognitiveservices.language.textanalytics.models.Input;
import com.microsoft.azure.cognitiveservices.language.textanalytics.models.LanguageBatchResult;
import com.microsoft.azure.cognitiveservices.language.textanalytics.models.LanguageBatchResultItem;
import com.microsoft.azure.cognitiveservices.vision.contentmoderator.ContentModeratorClient;
import com.microsoft.azure.cognitiveservices.vision.contentmoderator.ContentModeratorManager;
import com.microsoft.azure.cognitiveservices.vision.contentmoderator.TextModerations;
import com.microsoft.azure.cognitiveservices.vision.contentmoderator.models.AzureRegionBaseUrl;
import com.microsoft.azure.cognitiveservices.vision.contentmoderator.models.Classification;
import com.microsoft.azure.cognitiveservices.vision.contentmoderator.models.Screen;
import com.microsoft.azure.cognitiveservices.vision.contentmoderator.models.ScreenTextOptionalParameter;
import com.microsoft.cognitiveservices.speech.CancellationDetails;
import com.microsoft.cognitiveservices.speech.CancellationReason;
import com.microsoft.cognitiveservices.speech.ResultReason;
import com.microsoft.cognitiveservices.speech.SpeechConfig;
import com.microsoft.cognitiveservices.speech.SpeechRecognitionResult;
import com.microsoft.cognitiveservices.speech.SpeechRecognizer;
import qNa.GetAnswer;
import speechSDK.tts.AudioOutputFormat;
import speechSDK.tts.Gender;
import speechSDK.tts.TTSService;
import textTranslator.Translate;
public class ConversationAIPipelineSDK {
static String versionId = "0.1";
static UUID appId;
static String appEndpoint;
static String luisAuthoringKey;
// Step 1: Speech Service (Speech To Text)
private static String recognizeSpeech(String speechSubscriptionKey, String serviceRegion) {
assert(speechSubscriptionKey != "29c164e5eb7fa60031af567d55299497");
assert(serviceRegion != null);
String recognizedSpeech = "";
try {
SpeechConfig config = SpeechConfig.fromSubscription(speechSubscriptionKey, serviceRegion);
assert(config != null);
SpeechRecognizer reco = new SpeechRecognizer(config);
assert(reco != null);
System.out.println("Say something...");
Future<SpeechRecognitionResult> task = reco.recognizeOnceAsync();
assert(task != null);
SpeechRecognitionResult result = task.get();
assert(result != null);
if (result.getReason() == ResultReason.RecognizedSpeech) {
recognizedSpeech = result.getText();
System.out.println("We recognized: " + recognizedSpeech);
}
else if (result.getReason() == ResultReason.NoMatch) {
System.out.println("NOMATCH: Speech could not be recognized.");
System.exit(1);
}
else if (result.getReason() == ResultReason.Canceled) {
CancellationDetails cancellation = CancellationDetails.fromResult(result);
System.out.println("CANCELED: Reason=" + cancellation.getReason());
if (cancellation.getReason() == CancellationReason.Error) {
System.out.println("CANCELED: ErrorCode=" + cancellation.getErrorCode());
System.out.println("CANCELED: ErrorDetails=" + cancellation.getErrorDetails());
System.out.println("CANCELED: Did you update the subscription info?");
}
System.exit(1);
}
reco.close();
} catch (Exception ex) {
System.out.println("Unexpected exception: " + ex.getMessage());
assert(false);
System.exit(1);
}
return recognizedSpeech;
}
// Step 2: Text Analytics (Language Detection)
private static String detactFirstLanguage(String text, String subscriptionKey) throws Exception {
assert(text != null);
assert(subscriptionKey != null);
TextAnalyticsAPI taAPI = TextAnalyticsManager.authenticate(AzureRegions.WESTCENTRALUS, subscriptionKey);
assert(taAPI != null);
TextAnalytics ta = taAPI.textAnalytics();
assert(ta != null);
Input input = new Input();
assert(input != null);
input.withId("1").withText(text);
List<Input> documents = new ArrayList<>();
documents.add(input);
DetectLanguageOptionalParameter detectLanguageOptionalParameter = new DetectLanguageOptionalParameter();
assert(detectLanguageOptionalParameter != null);
detectLanguageOptionalParameter.withDocuments(documents);
LanguageBatchResult languageBatchResult = ta.detectLanguage(detectLanguageOptionalParameter);
assert(languageBatchResult != null);
List<LanguageBatchResultItem> resp = languageBatchResult.documents();
assert(resp != null);
for (LanguageBatchResultItem LanguageBatchResultItem : resp) {
assert(LanguageBatchResultItem != null);
List<DetectedLanguage> detectedLanguages = LanguageBatchResultItem.detectedLanguages();
for (DetectedLanguage lang : detectedLanguages) {
assert(lang != null);
String langName = lang.iso6391Name();
if (langName != null && !langName.isEmpty()) {
return langName;
}
}
}
throw new Exception("Error: no match language found");
}
// Step 3: Text Translator (Translate Text) Missing SDK
private static String translateText(String text, String translateTo, String subscriptionKey) {
assert(text != null);
assert(translateTo != null);
assert(subscriptionKey != null);
String translatedText = text;
try {
Translate translateRequest = new Translate(subscriptionKey);
translatedText = Translate.getTranslatedText(translateRequest.Post(text, translateTo));
} catch (Exception e) {
System.out.format("<Text Translator> Error: %s", e.getMessage());
System.exit(1);
}
return translatedText;
}
// Step 4: Bing Spell Check (Spell Check)
private static String spellCheck(String market, String mode, String text, String subscriptionKey) {
assert(market != null);
assert(mode != null);
assert(text != null);
assert(subscriptionKey != null);
BingSpellCheckAPI bingSpellCheckAPI = BingSpellCheckManager.authenticate(subscriptionKey);
assert(bingSpellCheckAPI != null);
BingSpellCheckOperations bingSpellCheckOperations = bingSpellCheckAPI.bingSpellCheckOperations();
assert(bingSpellCheckOperations != null);
SpellCheckerOptionalParameter spellCheckerOptionalParameter = new SpellCheckerOptionalParameter();
assert(spellCheckerOptionalParameter != null);
spellCheckerOptionalParameter.withMarket(market).withMode(mode);
SpellCheck spellCheck = bingSpellCheckOperations.spellChecker(text, spellCheckerOptionalParameter);
assert(spellCheck != null);
List<SpellingFlaggedToken> spellingFlaggedTokens = spellCheck.flaggedTokens();
assert(spellingFlaggedTokens != null);
System.out.println("Spelling flagged tokens size = " + spellingFlaggedTokens.size());
if (spellingFlaggedTokens.size() == 0) {
return text;
}
String tempText = text;
for (SpellingFlaggedToken spellingFlaggedToken : spellingFlaggedTokens) {
System.out.println("token = " + spellingFlaggedToken.token());
List<SpellingTokenSuggestion> suggestions = spellingFlaggedToken.suggestions();
assert(suggestions != null);
for (SpellingTokenSuggestion spellingTokenSuggestion : suggestions) {
assert(spellingTokenSuggestion != null);
System.out.println("suggestion = " + spellingTokenSuggestion.suggestion()
+ ", score = " + spellingTokenSuggestion.score());
String sug = spellingTokenSuggestion.suggestion();
if (sug != null && !sug.isEmpty()) {
tempText.replaceAll(spellingFlaggedToken.token(), sug);
break;
}
}
}
return tempText;
}
// Step 5: Content Moderator
private static String contentModerator(String text, String subscriptionKey) {
assert(text != null);
assert(subscriptionKey != null);
ContentModeratorClient contentModeratorClient = ContentModeratorManager.authenticate(AzureRegionBaseUrl.WESTCENTRALUSAPICOGNITIVEMICROSOFTCOM, subscriptionKey);
assert(contentModeratorClient != null);
TextModerations textModerations = contentModeratorClient.textModerations();
assert(textModerations != null);
ScreenTextOptionalParameter screenTextOptionalParameter = new ScreenTextOptionalParameter();
assert(screenTextOptionalParameter != null);
screenTextOptionalParameter.withAutocorrect(true).withPII(true).withClassify(true);
Screen screen = textModerations.screenText("text/plain", text.getBytes(), screenTextOptionalParameter);
assert(screen != null);
System.out.println("auto corrected text = " + screen.autoCorrectedText());
System.out.println("language = " + screen.language());
System.out.println("normalized text = " + screen.normalizedText());
System.out.println("original text = " + screen.originalText());
Classification classification = screen.classification();
if (classification == null) {
return "Not an appropriate sentences";
} else {
System.out.println("review recommended = " + classification.reviewRecommended());
if (classification.reviewRecommended()) {
return "Review Recommended: category1 score = " + classification.category1().score() + ", category2 score = " + classification.category2().score() + ", category3 score = " + classification.category3().score() ;
}
}
return screen.autoCorrectedText();
}
// Step 6: LUIS
private static String luis(String text, String luisAuthoringKey) {
try {
LUISAuthoringClient authoringClient = LUISAuthoringManager.authenticate(EndpointAPI.US_WEST, luisAuthoringKey);
assert(authoringClient != null);
System.out.println("Result of run Luis Authoring = " + runLuisAuthoring(authoringClient));
LuisRuntimeAPI runtimeClient = LuisRuntimeManager
.authenticate(com.microsoft.azure.cognitiveservices.language.luis.runtime.EndpointAPI.US_WEST, luisAuthoringKey);
assert(runtimeClient != null);
return runLuisRuntimeSample(runtimeClient, text);
} catch (Exception e) {
System.out.println("<LUIS> Erorr : " + e.getMessage());
e.printStackTrace();
}
return "";
}
private static boolean runLuisAuthoring(LUISAuthoringClient authoringClient) {
try {
try {
appId = authoringClient.apps().add(new ApplicationCreateObject()
.withName("FlightExample")
.withInitialVersionId(versionId)
.withCulture("en-us")
);
} catch (Exception ex) {
System.out.println("<LUIS - runLuisAuthoring> Error: " + ex.getMessage());
return false;
}
System.out.println("Created Application " + appId.toString());
String destinationName = "Destination";
UUID destinationId = authoringClient.models().addEntity()
.withAppId(appId)
.withVersionId(versionId)
.withName(destinationName)
.execute();
System.out.println("Created simple entity " + destinationName + " with ID " +
destinationId.toString());
String className = "Class";
UUID classId = authoringClient.models().addHierarchicalEntity(appId, versionId,
new HierarchicalEntityModel()
.withName(className)
.withChildren(Arrays.asList("First", "Business", "Economy")));
System.out.println("Created hierarchical entity " + className + " with ID " + classId.toString());
//=============================================================
// This will create the "Flight" composite entity including "Class" and "Destination"
System.out.println("Creating the \"Flight\" composite entity including \"Class\" and \"Destination\".");
String flightName = "Flight";
UUID flightId = authoringClient.models().addCompositeEntity(appId, versionId, new CompositeEntityModel()
.withName(flightName)
.withChildren(Arrays.asList(className, destinationName)));
System.out.println("Created composite entity " + flightName + "with ID " + flightId.toString());
//=============================================================
// This will create a new "FindFlights" intent including the following utterances
System.out.println("Creating a new \"FindFlights\" intent with two utterances");
String utteranceFindEconomyToMadrid = "find flights in economy to Madrid";
String utteranceFindFirstToLondon = "find flights to London in first class";
String utteranceFindEconomyToParis = "find flights to Paris in economy class";
String intentName = "FindFlights";
UUID intendId = authoringClient.models().addIntent()
.withAppId(appId)
.withVersionId(versionId)
.withName(intentName)
.execute();
System.out.println("Created intent " + intentName + "with ID " + intendId.toString());
//=============================================================
// This will build an EntityLabel Object
System.out.println("Building an EntityLabel Object");
ExampleLabelObject exampleLabelObject1 = new ExampleLabelObject()
.withText(utteranceFindEconomyToMadrid)
.withIntentName(intentName)
.withEntityLabels(Arrays.asList(
getEntityLabelObject(utteranceFindEconomyToMadrid, "Flight", "economy to Madrid"),
getEntityLabelObject(utteranceFindEconomyToMadrid, "Destination", "Madrid"),
getEntityLabelObject(utteranceFindEconomyToMadrid, "Class", "economy")
));
ExampleLabelObject exampleLabelObject2 = new ExampleLabelObject()
.withText(utteranceFindFirstToLondon)
.withIntentName(intentName)
.withEntityLabels(Arrays.asList(
getEntityLabelObject(utteranceFindFirstToLondon, "Flight", "London in first class in first class"),
getEntityLabelObject(utteranceFindFirstToLondon, "Destination", "London in first class"),
getEntityLabelObject(utteranceFindFirstToLondon, "Class", "first")
));
ExampleLabelObject exampleLabelObject3 = new ExampleLabelObject()
.withText(utteranceFindEconomyToParis)
.withIntentName(intentName)
.withEntityLabels(Arrays.asList(
getEntityLabelObject(utteranceFindEconomyToParis, "Flight", "Paris in economy class"),
getEntityLabelObject(utteranceFindEconomyToParis, "Destination", "Paris"),
getEntityLabelObject(utteranceFindEconomyToParis, "Class", "economy")
));
List<BatchLabelExample> utterancesResult = authoringClient.examples()
.batch(appId, versionId, Arrays.asList(exampleLabelObject1, exampleLabelObject2, exampleLabelObject3));
System.out.println("Utterances added to the " + intentName + " intent");
//=============================================================
// This will start training the application.
System.out.println("Training the application");
EnqueueTrainingResponse trainingResult = authoringClient.trains().trainVersion(appId, versionId);
boolean isTrained = trainingResult.status().equals("UpToDate");
while (!isTrained) {
Thread.sleep(1000);
List<ModelTrainingInfo> status = authoringClient.trains().getStatus(appId, versionId);
isTrained = true;
for (ModelTrainingInfo modelTrainingInfo : status) {
if (!modelTrainingInfo.details().status().equals("UpToDate") && !modelTrainingInfo.details().status().equals("Success")) {
isTrained = false;
break;
}
}
}
//=============================================================
// This will start publishing the application.
System.out.println("Publishing the application");
ProductionOrStagingEndpointInfo publishResult = authoringClient.apps().publish(appId, new ApplicationPublishObject()
.withVersionId(versionId)
.withIsStaging(false)
.withRegion("westus")
);
appEndpoint = publishResult.endpointUrl() + "?subscription-key=" + luisAuthoringKey + "&q=";
System.out.println("Your app is published. You can now go to test it on " + appEndpoint);
return true;
} catch (Exception f) {
System.out.println(f.getMessage());
f.printStackTrace();
}
return false;
}
private static EntityLabelObject getEntityLabelObject(String utterance, String entityName, String value) {
return new EntityLabelObject()
.withEntityName(entityName)
.withStartCharIndex(utterance.indexOf(value))
.withEndCharIndex(utterance.indexOf(value) + value.length());
}
/**
* Main function which runs the runtime part of the sample.
*
* @param runtimeClient instance of the LUIS Runtime API client
* @return true if sample runs successfully
*/
private static String runLuisRuntimeSample(LuisRuntimeAPI runtimeClient, String text) {
try {
// TODO: replace the application ID that every time you have republish the application
appId = UUID.fromString("223c3a69-c59a-4e53-b163-181c6d25dbf2");
//=============================================================
// This will execute a LUIS prediction for a "find second class flight to new york" utterance
String query = text;
LuisResult predictionResult = runtimeClient.predictions().resolve()
.withAppId(appId.toString())
.withQuery(query)
.execute();
System.out.println("Executing query: " + query);
String intentEntities = "";
if (predictionResult != null && predictionResult.topScoringIntent() != null) {
System.out.format("Detected intent \"%s\" with the score %f%%\n", predictionResult.topScoringIntent().intent(), predictionResult.topScoringIntent().score() * 100);
predictionResult.entities();
if (predictionResult.entities() != null && predictionResult.entities().size() > 0) {
StringBuilder sb = new StringBuilder();
sb.append(predictionResult.topScoringIntent().intent());
for (EntityModel entityModel : predictionResult.entities()) {
sb.append(".").append(entityModel.type()).append(".").append(entityModel.entity());
System.out.format("\tFound entity \"%s\" with type %s\n", entityModel.entity(), entityModel.type());
}
System.out.println("sb = " + sb.toString());
intentEntities = sb.toString();
} else {
System.out.println("\tNo entities were found.");
}
} else {
System.out.println("Intent not found.");
}
return intentEntities;
} catch (Exception f) {
System.out.println(f.getMessage());
f.printStackTrace();
}
return "";
}
// Step 7: QnA Maker (Retrieve Response) Missing Java SDK
// Step 8: Text Translater (Translate Text) Missing Translator SDK in Maven repo
// Step 9: Text to Speech (Still using API)
private static void textToSpeech(String textToSynthesize, String language, String subscriptionKey) {
assert(textToSynthesize != null);
assert(subscriptionKey != null);
String outputFormat = AudioOutputFormat.Riff24Khz16BitMonoPcm;
// String deviceLanguage = "en-US";
String deviceLanguage = language;
String subscriptionKey = "b4570c9e889d0961c10df517ce2d730f"
String genderName = Gender.Male;
String voiceName = "Microsoft Server Speech Text to Speech Voice (en-US, Guy24KRUS)";
try{
byte[] audioBuffer = TTSService.Synthesize(textToSynthesize, outputFormat, deviceLanguage, genderName, voiceName, subscriptionKey);
// write the pcm data to the file
String outputWave = ".\\output.pcm";
File outputAudio = new File(outputWave);
FileOutputStream fstream = new FileOutputStream(outputAudio);
fstream.write(audioBuffer);
fstream.flush();
fstream.close();
// specify the audio format
AudioFormat audioFormat = new AudioFormat(
AudioFormat.Encoding.PCM_SIGNED,
24000,
16,
1,
1 * 2,
24000,
false);
AudioInputStream audioInputStream = AudioSystem.getAudioInputStream(new File(outputWave));
DataLine.Info dataLineInfo = new DataLine.Info(SourceDataLine.class,
audioFormat, AudioSystem.NOT_SPECIFIED);
SourceDataLine sourceDataLine = (SourceDataLine) AudioSystem
.getLine(dataLineInfo);
sourceDataLine.open(audioFormat);
sourceDataLine.start();
System.out.println("start to play the wave:");
/*
* read the audio data and send to mixer
*/
int count;
byte tempBuffer[] = new byte[4096];
while ((count = audioInputStream.read(tempBuffer, 0, tempBuffer.length)) >0) {
sourceDataLine.write(tempBuffer, 0, count);
}
sourceDataLine.drain();
sourceDataLine.close();
audioInputStream.close();
}catch(Exception e){
e.printStackTrace();
}
}
public static void main(String[] args) {
// Add all supported language
Map<String, String> languageMap = new HashMap<String, String>();
languageMap.put("en", "en-US");
languageMap.put("fr", "fr-FR");
System.out.println(
"Please try to ask one of below questions: \n"
+ "1, \"find flights to London in first class\" (ans: london in first class, testing testing) \n"
+ "2, \"find flights to Paris in economy class\" (ans: hello paris) \n"
);
/**
* 1, Process/Filter Speech Stream
* */
// Step 1: Speech Service (Speech To Text)
String SPEECH = System.getenv("Speech");
System.out.println("\n---------------Step 1: Speech to text---------------");
String recognizedText = recognizeSpeech(SPEECH, "westus");
System.out.println("recognized text = " + recognizedText);
// Step 2: Text Analytics (Language Detection)
String TEXT_ANALYSTICS = System.getenv("TextAnalytics");
System.out.println("\n---------------Step 2: Text Analytics---------------");
String detectLangResp="";
try {
detectLangResp = detactFirstLanguage(recognizedText, TEXT_ANALYSTICS);
System.out.println("detect language = " + detectLangResp);
} catch (Exception ex) {
System.out.println("<Text Analytics> " + ex.getMessage());
System.exit(1);
}
// Step 3: Text Translator (Translate Text) // Missing Translator SDK in Maven repo
String TRANSLATOR = System.getenv("Translator");
System.out.println("\n---------------Step 3: Text Translator------[SDK MISSING]---------");
recognizedText = translateText(recognizedText, "en", TRANSLATOR);
System.out.println("translated top ans is, " + recognizedText);
// Step 4: Bing Spell Check (Spell Check)
String SPELL_CHECK = System.getenv("SpellCheck");
System.out.println("\n---------------Step 4: Spelling check---------------");
String correctedText = spellCheck(languageMap.get(detectLangResp), "proof", recognizedText, SPELL_CHECK);
System.out.println("corrected text = " + correctedText);
// Step 5: Content Moderator (Explicit Content Rec)
String CONTENT_MODERATOR = System.getenv("ContentModerator");
System.out.println("\n---------------Step 5: content moderator---------------");
String moderatedText = contentModerator(correctedText, CONTENT_MODERATOR);
System.out.println("content moderated text = " + moderatedText);
/**
* 2, Retrieve Response
*
* using QnA(https://www.qnamaker.ai/) to create Knowledge base
**/
// Step 6: LUIS (Recognize Intent)
String LUIS = System.getenv("LUIS");
System.out.println("\n---------------Step 6: LUIS---------------");
String question = luis(moderatedText, LUIS);
// Step 7: QnA Maker (Retrieve Response) Missing Java SDK
System.out.println("\n---------------Step 7: QnA Maker--------[SDK MISSING]-------");
String ans = GetAnswer.getAns(question);
String topAns = GetAnswer.getTopAns(ans);
System.out.println("answer = " + topAns);
/**
* 3, Generate Output
**/
// Step 8: Text Translater (Translate Text)
System.out.println("\n---------------Step 8: Text Translator------[SDK MISSING]---------");
topAns = translateText(topAns, detectLangResp, TRANSLATOR);
System.out.println("translated top ans is, " + topAns);
// Step 9: Speech Service (Text To Speech) // Missing SDK API, there are some classes are missing but found AudioInputStrean class
System.out.println("\n---------------Step 9: Text to Speech-------[SDK MISSING]--------");
textToSpeech(topAns, languageMap.get(detectLangResp), SPEECH);
System.out.println("---------------- End of Conversation AI Pipeline --------------------");
}
}
| [
"[email protected]"
] | |
c91819f9ce17f0642a4a7e15dc0d0cc7c34ed6c1 | fccc9acd62447941a49313c01fcf324cd07e832a | /exe115/sistema.py | 358cfb5e914db10289609b7d41eb882cee96b3dc | [] | no_license | paulovictor1997/Python | fba884ea19ed996c6f884f3fcd3d49c5a34cfd3d | 671d381673796919a19582bed9d0ee70ec5a8bea | refs/heads/master | 2023-04-29T18:01:55.451258 | 2021-05-19T15:12:09 | 2021-05-19T15:12:09 | 354,154,332 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 717 | py | from exe115.lib.interface import *
from exe115.lib.arquivo import *
from time import sleep
arq = 'doc.txt'
if not arquivoExiste(arq):
criarArquivo(arq)
while True:
resposta = menu(['Ver pessoas cadastradas','Cadastrar','SAIR'])
if resposta == 1:
#Aqui vai listar o conteúdo do arquivo
lerArquivo(arq)
elif resposta == 2:
#Cadastrar uma nova pessoa
cabeçalho('Novo Cadastro')
nome = str(input('Nome : '))
idade = leiaInt('Idade : ')
cadastrar(arq, nome, idade)
elif resposta == 3:
cabeçalho('\033[34mSaindo...Volte sempre !\033[m')
break
else:
print('\033[31mErro... Digite novamente !\033[m')
sleep(1) | [
"[email protected]"
] | |
a9fa385bdf2356beab9851c83ca610512a6d532c | cb3bce599e657188c30366adb0af3007ff9b8f96 | /src/network/ex29-1.py | eeab288fc33fad3fc9b2440e19e26bdfdd2f8b06 | [] | no_license | skk4/python_study | 534339e6c378d686c29af6d81429c472fca19d6d | 4bdd2a50f4bdfd28fdb89a881cb2ebb9eac26987 | refs/heads/master | 2021-01-01T04:36:52.037184 | 2017-12-08T01:04:27 | 2017-12-08T01:04:27 | 97,207,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | '''
Created on 2017.7.18
@author: Administrator
'''
import socket
'''
a_tuple = [('1', ('2', '22', '222'), '3'),('4',('5', '55', '555'), '6'), ('1', ('7', '77', '777'), '3')]
print [x[1][1] for x in a_tuple]
'''
localip = socket.gethostname()
print localip
fdqdn = socket.getfqdn(localip)
print fdqdn
result = socket.getaddrinfo(localip, None, 0, socket.SOCK_STREAM)
ips = [x[4][0] for x in result]
new_ips = ', '.join(ips)
print new_ips
| [
"[email protected]"
] | |
ffa80e415fc11487ad0892a4cbe94af86d8ea98d | 32226e72c8cbaa734b2bdee081c2a2d4d0322702 | /railrl/pythonplusplus.py | 13a5ca3173dd994bc741d65fcb56a3b482ebac76 | [
"MIT"
] | permissive | Asap7772/rail-rl-franka-eval | 2b1cbad7adae958b3b53930a837df8a31ab885dc | 4bf99072376828193d05b53cf83c7e8f4efbd3ba | refs/heads/master | 2022-11-15T07:08:33.416025 | 2020-07-12T22:05:32 | 2020-07-12T22:05:32 | 279,155,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,147 | py | """
General purpose Python functions.
TODO(vitchyr): probably move this to its own module, not under railrl
"""
import math
import sys
import collections
import itertools
def identity(x):
return x
def clip_magnitude(value, magnitude):
"""
Clip the magnitude of value to be within some value.
:param value:
:param magnitude:
:return:
"""
return min(max(value, -magnitude), magnitude)
def is_numeric(x):
return not isinstance(x, bool) and (
isinstance(x, int) or isinstance(x, float)
)
"""
Dictionary methods
"""
def dot_map_dict_to_nested_dict(dot_map_dict):
"""
Convert something like
```
{
'one.two.three.four': 4,
'one.six.seven.eight': None,
'five.nine.ten': 10,
'five.zero': 'foo',
}
```
into its corresponding nested dict.
http://stackoverflow.com/questions/16547643/convert-a-list-of-delimited-strings-to-a-tree-nested-dict-using-python
:param dot_map_dict:
:return:
"""
tree = {}
for key, item in dot_map_dict.items():
split_keys = key.split('.')
if len(split_keys) == 1:
if key in tree:
raise ValueError("Duplicate key: {}".format(key))
tree[key] = item
else:
t = tree
for sub_key in split_keys[:-1]:
t = t.setdefault(sub_key, {})
last_key = split_keys[-1]
if not isinstance(t, dict):
raise TypeError(
"Key inside dot map must point to dictionary: {}".format(
key
)
)
if last_key in t:
raise ValueError("Duplicate key: {}".format(last_key))
t[last_key] = item
return tree
def nested_dict_to_dot_map_dict(d, parent_key=''):
"""
Convert a recursive dictionary into a flat, dot-map dictionary.
:param d: e.g. {'a': {'b': 2, 'c': 3}}
:param parent_key: Used for recursion
:return: e.g. {'a.b': 2, 'a.c': 3}
"""
items = []
for k, v in d.items():
new_key = parent_key + "." + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(nested_dict_to_dot_map_dict(v, new_key).items())
else:
items.append((new_key, v))
return dict(items)
def merge_recursive_dicts(a, b, path=None,
ignore_duplicate_keys_in_second_dict=False):
"""
Merge two dicts that may have nested dicts.
"""
if path is None: path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
merge_recursive_dicts(a[key], b[key], path + [str(key)],
ignore_duplicate_keys_in_second_dict=ignore_duplicate_keys_in_second_dict)
elif a[key] == b[key]:
print("Same value for key: {}".format(key))
else:
duplicate_key = '.'.join(path + [str(key)])
if ignore_duplicate_keys_in_second_dict:
print("duplicate key ignored: {}".format(duplicate_key))
else:
raise Exception(
'Duplicate keys at {}'.format(duplicate_key)
)
else:
a[key] = b[key]
return a
def dict_of_list__to__list_of_dicts(dict, n_items):
"""
```
x = {'foo': [3, 4, 5], 'bar': [1, 2, 3]}
ppp.dict_of_list__to__list_of_dicts(x, 3)
# Output:
# [
# {'foo': 3, 'bar': 1},
# {'foo': 4, 'bar': 2},
# {'foo': 5, 'bar': 3},
# ]
```
:param dict:
:param n_items:
:return:
"""
new_dicts = [{} for _ in range(n_items)]
for key, values in dict.items():
for i in range(n_items):
new_dicts[i][key] = values[i]
return new_dicts
def list_of_dicts__to__dict_of_lists(lst, enforce_consistent_keys=True):
"""
```
x = [
{'foo': 3, 'bar': 1},
{'foo': 4, 'bar': 2},
{'foo': 5, 'bar': 3},
]
ppp.list_of_dicts__to__dict_of_lists(x)
# Output:
# {'foo': [3, 4, 5], 'bar': [1, 2, 3]}
```
"""
if len(lst) == 0:
return {}
keys = lst[0].keys()
output_dict = collections.defaultdict(list)
for d in lst:
if set(d.keys()) != set(keys):
print("dropping some keys", d.keys())
if enforce_consistent_keys:
assert set(d.keys()) == set(keys)
for k in keys:
output_dict[k].append(d[k])
return output_dict
def safe_json(data):
if data is None:
return True
elif isinstance(data, (bool, int, float)):
return True
elif isinstance(data, (tuple, list)):
return all(safe_json(x) for x in data)
elif isinstance(data, dict):
return all(isinstance(k, str) and safe_json(v) for k, v in data.items())
return False
def dict_to_safe_json(d, sort=False):
"""
Convert each value in the dictionary into a JSON'able primitive.
:param d:
:return:
"""
if isinstance(d, collections.OrderedDict):
new_d = collections.OrderedDict()
else:
new_d = {}
for key, item in d.items():
if safe_json(item):
new_d[key] = item
else:
if (
isinstance(item, dict)
or isinstance(item, collections.OrderedDict)
):
new_d[key] = dict_to_safe_json(item, sort=sort)
else:
new_d[key] = str(item)
if sort:
return collections.OrderedDict(sorted(new_d.items()))
else:
return new_d
def recursive_items(dictionary):
"""
Get all (key, item) recursively in a potentially recursive dictionary.
Usage:
```
x = {
'foo' : {
'bar' : 5
}
}
recursive_items(x)
# output:
# ('foo', {'bar' : 5})
# ('bar', 5)
```
:param dictionary:
:return:
"""
for key, value in dictionary.items():
yield key, value
if type(value) is dict:
yield from recursive_items(value)
# TODO(vitchyr): test methods/classes below
"""
Itertools++
"""
def treemap(f, *args, **kwargs):
"""
Recursively apply a function to a data structure.
Usage:
```
def add(x, y, constant=0):
return x + y + constant
tree1 = (
[1, 2],
{'foo': 3, 'bar': 4},
5
)
tree2 = (
[6, 7],
{'foo': 8, 'bar': 9},
10
)
treemap(add, tree1, tree2, constant=100)
```
will return
```
(
[107, 109],
{'foo': 111, 'bar': 113},
115
)
```
Currently only supports Mapping and Iterable data structures.
:param f: Function to apply.
:param args: Data structures over which to apply the function.
:param kwargs: key-word arguments that are passed to the base function
directly.
:return:
"""
if len(args) == 0:
return f(**kwargs)
if isinstance(args[0], collections.Mapping):
return type(args[0])({
k: treemap(f, *tuple(d[k] for d in args), **kwargs)
for k in args[0]
})
elif isinstance(args[0], collections.Iterable):
return type(args[0])(treemap(f, *a, **kwargs) for a in zip(*args))
else:
return f(*args, **kwargs)
def filter_recursive(x_or_iterable):
"""
Filter out elements that are Falsy (where bool(x) is False) from
potentially recursive lists.
:param x_or_iterable: An element or a list.
:return: If x_or_iterable is not an Iterable, then return x_or_iterable.
Otherwise, return a filtered version of x_or_iterable.
"""
if isinstance(x_or_iterable, list):
new_items = []
for sub_elem in x_or_iterable:
filtered_sub_elem = filter_recursive(sub_elem)
if filtered_sub_elem is not None and not (
isinstance(filtered_sub_elem, list) and
len(filtered_sub_elem) == 0
):
new_items.append(filtered_sub_elem)
return new_items
else:
return x_or_iterable
def batch(iterable, n=1):
"""
Split an interable into batches of size `n`. If `n` does not evenly divide
`iterable`, the last slice will be smaller.
https://stackoverflow.com/questions/8290397/how-to-split-an-iterable-in-constant-size-chunks
Usage:
```
for i in batch(range(0,10), 3):
print i
[0,1,2]
[3,4,5]
[6,7,8]
[9]
```
"""
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
def takespread(sequence, num):
"""
Get `num` elements from the sequence that are as spread out as possible.
https://stackoverflow.com/questions/9873626/choose-m-evenly-spaced-elements-from-a-sequence-of-length-n
:param sequence:
:param num:
:return:
"""
length = float(len(sequence))
for i in range(num):
yield sequence[int(math.ceil(i * length / num))]
"""
Custom Classes
"""
class IntIdDict(collections.defaultdict):
"""
Automatically assign int IDs to hashable objects.
Usage:
```
id_map = IntIdDict()
print(id_map['a'])
print(id_map['b'])
print(id_map['c'])
print(id_map['a'])
print(id_map['b'])
print(id_map['a'])
print('')
print(id_map.get_inverse(0))
print(id_map.get_inverse(1))
print(id_map.get_inverse(2))
```
Output:
```
1
2
3
1
2
1
'a'
'b'
'c'
```
:return:
"""
def __init__(self, **kwargs):
c = itertools.count()
self.inverse_dict = {}
super().__init__(lambda: next(c), **kwargs)
def __getitem__(self, y):
int_id = super().__getitem__(y)
self.inverse_dict[int_id] = y
return int_id
def reverse_id(self, int_id):
return self.inverse_dict[int_id]
class ConditionTimer(object):
"""
A timer that goes off after the a fixed time period.
The catch: you need to poll it and provide it the time!
Usage:
```
timer = PollTimer(100) # next check will be true at 100
timer.check(90) # False
timer.check(110) # True. Next check will go off at 110 + 100 = 210
timer.check(205) # False
timer.check(210) # True
```
"""
def __init__(self, trigger_period):
"""
:param trigger_period: If None or 0, `check` will always return False.
"""
self.last_time_triggered = 0
if trigger_period is None:
trigger_period = 0
self.trigger_period = trigger_period
def check(self, time):
if self.always_false:
return False
if time - self.last_time_triggered >= self.trigger_period:
self.last_time_triggered = time
return True
else:
return False
@property
def always_false(self):
return self.trigger_period == 0
def string_tuple_to_string(strings):
if len(strings) == 0:
string = ""
elif len(strings) == 1:
string = strings[0]
else:
string = " ".join([str(s) for s in strings])
return string
class _Logger(object):
def __init__(self):
self.n_chars = 0
self.lines = []
def print_over(self, *strings):
"""
Remove anything printed in the last printover call. Then print `string`
"""
string = string_tuple_to_string(strings)
sys.stdout.write("\r" * self.n_chars)
sys.stdout.write(string)
sys.stdout.flush()
self.n_chars = len(string)
def newline(self):
sys.stdout.write("\n")
sys.stdout.flush()
self.n_chars = 0
line_logger = _Logger()
def find_key_recursive(obj, key):
if key in obj:
return obj[key]
for k, v in obj.items():
if isinstance(v, dict):
result = find_key_recursive(v, key)
if result is not None:
return result
| [
"[email protected]"
] | |
250187df27f88d6af5ce301cfd96f7a72e3f38cd | b48764e6684ffbd73b0043dc889c013860642e8d | /1학기/퍼스널컬러.py | 8691df840217d0d309f14ff4a0e152e708a3cc16 | [] | no_license | tanghee/Programming-Python- | c6d32a1e49d5c95c8359aeb8775cb52cc665167a | eb402357ad31638d867042e76af507bc6c67a0b4 | refs/heads/master | 2022-03-27T07:27:18.888660 | 2019-12-10T02:06:41 | 2019-12-10T02:06:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,778 | py | #2416임태희
#18개의 질문을 통해 자가진단을 진행합니다
#예를 들어, Q1의 답이 0이라면 Q4의 질문으로 답이 1이라면 Q2의 질문으로 넘어가는 방식입니다.
#함수 하나씩 질문을 넣어서 해당 질문의 답에 따라서 다음 함수를 호출해주는 방식으로 진행합니다.
print("FIND YOUR COLOR!")
print("♡쉽고 빠른 자가진단으로 나만의 컬러를 찾아보세요♡\n")
#함수 q1() ~ q18()과 spring(), summer(), autumn(), winter()를 생성해 줍니다.
#자가진단 결과를 spring(), summer(), autumn(), winter() 함수를 호출해서 알려준다.
#봄웜톤
def spring():
print("당신은 ☆봄웜톤☆ 입니다!")
print("이 타입의 사람들은 생기발랄하고 젊은 느낌을 줍니다.")
print("피부색 >> 복숭아빛의 밝고 노란빛의 투명한 피부를 가지고 있습니다.")
print("머리색 >> 대체로 눈동자색과 비슷한 밝은 갈색으로 윤기나고 찰랑찰랑한 머릿결을 가지고 있습니다.")
print("눈동자색 >> 밝은갈색으로 빛이 나고 맑아 보입니다.")
print("대표연예인 >> 수지, 설리, 아이유가 있습니다.\n")
#여름쿨톤
def summer():
print("당신은 ☆여름쿨톤☆ 입니다!")
print("이 타입의 사람들은 우아하고 여성스러운 느낌을 줍니다.")
print("피부색 >> 핑크빛이 도는 혈색이 좋은 피부를 가지고 있습니다.")
print("머리색 >> 약간 부시시한 회갈색 머리카락을 가지고 있습니다.")
print("눈동자색 >> 차분하고 부드러운 갈색의 눈동자를 가지고 있습니다.")
print("대표연예인 >> 손예진, 김하늘이 있습니다.\n")
#가을웜톤
def autumn():
print("당신은 ☆가을웜톤☆ 입니다!")
print("이 타입의 사람들은 어른스럽고 차분한 이미지를 가지고 있습니다.")
print("피부색 >> 노르스름하며 윤기가 없고, 얼굴의 혈색이 없는 편입니다.")
print("머리색 >> 윤기가 없는 짙은갈색입니다.")
print("눈동자색 >> 짙고 깊이감있는 짙은 황갈색 계열입니다.")
print("대표연예인 >> 이효리, 박정아, 탕웨이가 있습니다.\n")
#겨울쿨톤
def winter():
print("당신은 ☆겨울쿨톤☆ 입니다!")
print("이 타입의 사람들은 심플하면서 모던한 스타일로 도회적입니다.")
print("피부색 >> 희고 푸른빛을 지니고 있어 차갑고 창백해 보입니다.")
print("머리색 >> 푸른빛이 도는 짙은갈색이나 검은색입니다.")
print("눈동자색 >> 검은색이나 짙은회갈색입니다.")
print("대표연예인 >> 김혜수, 선우선이 있습니다.\n")
def q1():
answer = input(">>당신의 피부톤은 어떻습니까? (0:하얀색이다, 1:검은색이다) : ")
if(answer == "0"):
q4()
elif(answer == "1"):
q2()
else:
q1()
def q2():
answer = input(">>당신의 눈동자색은 무엇입니까? (0:검은색, 1:짙은갈색, 2:밝은갈색) : ")
if(answer == "0"):
q5()
elif(answer == "1"):
q5()
elif(answer == "2"):
q3()
else:
q2()
def q3():
answer = input(">>당신과 잘 어울리는 아이섀도우 계열은 무엇입니까? (0:회색계열, 1:갈색계열) : ")
if(answer == "0"):
q5()
elif(answer == "1"):
q11()
else:
q3()
def q4():
answer = input(">>당신의 눈 인상은 어떻습니까? (0:강한편, 1:부드러운편) : ")
if(answer == "0"):
q5()
elif(answer == "1"):
q7()
else:
q4()
def q5():
answer = input(">>당신에게 어울리는 분홍색은 무엇입니까? (0:핫핑크, 1:코랄핑크) : ")
if(answer == "0"):
q10()
elif(answer == "1"):
q8()
else:
q5()
def q6():
answer = input(">>연분홍색이나 연노란색처럼 포근하고 사랑스러운 색이 잘 어울리나요? (0:어울립니다, 1:어울리지 않습니다) : ")
if(answer == "0"):
q17()
elif(answer == "1"):
q14()
else:
q6()
def q7():
answer = input(">>화장하지 않은 얼굴에 검은색 옷을 입으면 어떻습니까? (0:이목구비가 뚜렷하게 보입니다 1:얼굴색이 안 좋아보입니다) : ")
if(answer == "0"):
q10()
elif(answer == "1"):
q5()
else:
q7()
def q8():
answer = input(">>당신에게 잘 어울리는 액세서리는 무엇입니까? (0:골드제품, 1:실버제품) : ")
if(answer == "0"):
q6()
elif(answer == "1"):
q9()
else:
q8()
def q9():
answer = input(">>황토색, 겨자색, 이끼색, 적갈색처럼 차분하고 고상한 색이 잘 어울리나요? (0:어울립니다, 1:어울리지 않습니다) : ")
if(answer == "0"):
q15()
elif(answer == "1"):
q6()
else:
q9()
def q10():
answer = input(">>당신의 첫인상은 어떻습니까? (0:강한인상, 1:부드러운인상, 2:평범한인상) : ")
if(answer == "0"):
q13()
elif(answer == "1"):
q11()
elif(answer == "2"):
q8()
else:
q10()
def q11():
answer = input(">>햇볕에 노출되면 피부가 어떻게 되나요? (0:잘 탑니다, 1:잘 타지 않습니다, 2:보기에 해당하지 않습니다) : ")
if(answer == "0"):
q9()
elif(answer == "1"):
q8()
elif(answer == "2"):
q12()
else:
q11()
def q12():
answer = input(">>상대방이 보는 당신의 이미지는 어떻습니까? (0:친근감 있고 부드러운 이미지, 1:강하고 차가운 이미지) : ")
if(answer == "0"):
q17()
elif(answer == "1"):
q14()
else:
q12()
def q13():
answer = input(">>당신과 잘 어울리는 색은 무엇입니까? (0:선명한 원색, 1:부드러운 파스텔색) : ")
if(answer == "0"):
q14()
elif(answer == "1"):
q8()
else:
q13()
def q14():
answer = input(">>당신의 얼굴 가까이에 대보았을 때 가장 잘 어울리는 꽃은 무엇입니까? (0:붉은빛의 장미, 1:핑크빛의 튤립) : ")
if(answer == "0"):
q18()
elif(answer == "1"):
q17()
else:
q14()
def q15():
answer = input(">>당신의 머리색은 무엇입니까? (0:진한갈색, 1:진한검은색, 2:밝은갈색, 3:부드러운검은색) : ")
if(answer == "0"):
q18()
elif(answer == "1"):
q18()
elif(answer == "2"):
q14()
elif(answer == "3"):
q14()
else:
q15()
def q16():
answer = input(">>당신의 얼굴은 어려보이는 편입니까? (0:그렇습니다, 1:그렇지 않습니다) : ")
if(answer == "0"):
print("\n")
spring()
elif(answer == "1"):
print("\n")
autumn()
else:
q16()
def q17():
answer = input(">>당신에게 잘 어울리는 니트색상은 무엇입니까? (0:노란기가 있는 따뜻한색, 1:푸른기가 있는 차가운색) : ")
if(answer == "0"):
q16()
elif(answer == "1"):
print("\n")
summer()
else:
q17()
def q18():
answer = input(">>당신이 어두운색 정장을 입는다면 어울리는 색상은 무엇입니까? (0:검은색계열, 1:회색계열, 2:어두운갈색계열) : ")
if(answer == "0"):
print("\n")
winter()
elif(answer == "1"):
print("\n")
winter()
elif(answer == "2"):
print("\n")
autumn()
else:
q18()
#q1()함수 호출로 인해서 프로그램이 실행됩니다.
q1() | [
"[email protected]"
] | |
760e6204b7ba9ca5639eb67bc4f8cc5c7db2f082 | 9e419006675f6991480f350017798a4b3e0ccbd8 | /borrow/borrow/wsgi.py | a341999c24e547fbe6d5387a7a8e21b63835555e | [
"MIT"
] | permissive | chairco/django-tutorial-borrow | 263ccb7305f29d4bcc3139bd65605df70ec30498 | 28f747e115feabdcbd96d15fbc73f1c5d22236cd | refs/heads/master | 2021-01-17T19:51:56.967984 | 2016-07-12T04:41:17 | 2016-07-12T04:41:17 | 63,126,576 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | """
WSGI config for borrow project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "borrow.settings")
application = Cling(get_wsgi_application())
#application = get_wsgi_application()
| [
"[email protected]"
] | |
57e1c158e17a6f159aaf900eef82d7f9d995f7ef | 10b3d1ce02eaa4908dc16ca378ddfb1955b2d625 | /MV3D_TF_release/lib/utils/construct_voxel.py | 456ee612b2788efdc122a5a6282577d0c3396a2d | [
"MIT",
"BSD-3-Clause"
] | permissive | ZiningWang/Sparse_Pooling | 7281aa0d974849eac8c48faa5ba08519b091ef6e | f46882832d0e2fed5ab4a0af15cead44fd3c6faa | refs/heads/master | 2023-05-26T08:47:16.232822 | 2023-05-20T08:39:11 | 2023-05-20T08:39:11 | 141,640,800 | 56 | 21 | null | null | null | null | UTF-8 | Python | false | false | 6,772 | py | import numpy as np
import matplotlib.pyplot as plt
import time
import os,sys
#add library to the system path
lib_path = os.path.abspath(os.path.join('lib'))
sys.path.append(lib_path)
from utils.transform import calib_to_P,clip3DwithinImage,projectToImage,lidar_to_camera
from utils.config_voxels import cfg
side_range = (cfg.Y_MIN, cfg.Y_MAX-0.01)
fwd_range = (cfg.X_MIN, cfg.X_MAX-0.01)
height_range = (cfg.Z_MIN, cfg.Z_MAX-0.01) #
res = cfg.VOXEL_X_SIZE
zres = cfg.VOXEL_Z_SIZE
NUM_VOXEL_FEATURES = 7
MAX_NUM_POINTS=cfg.VOXEL_POINT_COUNT
'''
def lidar_bv_append(scan,calib,img_size,in_camera_frame=False):
#return the additional data for MV3D_img
if not(in_camera_frame):
P = calib_to_P(calib)
indices = clip3DwithinImage(scan[:,0:3].transpose(),P,img_size)
scan = scan[indices,:]
else:
P = calib_to_P(calib,from_camera=True)
bv_index,scan_filtered,bv_size = point_in_bv_indexes(scan)
N = bv_index.shape[0]
img_points = projectToImage(scan_filtered,P)
img_index = np.round(img_points).astype(int)
img_index = np.vstack((img_index,np.zeros((1,N))))
return {'bv_index':bv_index,'img_index':img_index,'bv_size':bv_size,'img_size':img_size}
'''
def point_cloud_2_top_sparse(points,
res=res,
zres=zres,
side_range=side_range, # left-most to right-most
fwd_range=fwd_range, # back-most to forward-most
height_range=height_range, # bottom-most to upper-most
top_count = None,
to_camera_frame = False,
points_in_cam = False,
calib=None,
img_size = [0,0],
augmentation=False,
img_index2=None
):
""" Creates an birds eye view representation of the point cloud data for MV3D.
WZN: NOTE to get maximum speed, should feed all LIDARs to the function because we wisely initialize the grid
"""
#t0 = time.time()
if to_camera_frame:
indices = clip3DwithinImage(points[:,0:3].transpose(),P,img_size)
points = points[indices,:]
img_index2 = img_index2[:,indices]
points[:,0:3] = lidar_to_camera(points[:,0:3].transpose(),calib)
points_in_cam = True
if points_in_cam:
points = points[:,[2,0,1,3]] #forward, side, height
#x_points = points[:, 1]
#y_points = points[:, 2]
#z_points = points[:, 0]
else:
assert False, 'Wrong, cannot process LIDAR coordinate points'
points[:,1] = -points[:1]
#x_points = points[:, 0]
#y_points = -points[:, 1]
#z_points = points[:, 2]
# INITIALIZE EMPTY ARRAY - of the dimensions we want
x_max = int((side_range[1] - side_range[0]) / res)
y_max = int((fwd_range[1] - fwd_range[0]) / res)
z_max = int((height_range[1] - height_range[0]) / zres)
voxel_full_size = np.array([ z_max+1, x_max+1, y_max+1])
'''
if top_count is None:
top_count = np.zeros([y_max+1, x_max+1, z_max+1],dtype=int)-1
else:
assert x_max==(top_count.shape[1]-1) and y_max==(top_count.shape[0]-1), 'shape mismatch of top_count, %d vs. %d and %d vs. %d'%(x_max,top_count.shape[1]-1,y_max,top_count.shape[0]-1)
'''
f_filt = np.logical_and(
(points[:, 0] > fwd_range[0]), (points[:, 0] < fwd_range[1]))
s_filt = np.logical_and(
(points[:, 1] > side_range[0]), (points[:, 1] < side_range[1]))
z_filt = np.logical_and(
(points[:, 2] > height_range[0]), (points[:, 2] < height_range[1]))
filter = np.logical_and(np.logical_and(f_filt, s_filt),z_filt)
#print np.sum(f_filt),np.sum(s_filt),np.sum(z_filt)
points_filt = points[filter,:] #fwd,side,height
img_index2 = img_index2[:,filter]
xyz_points = points_filt[:, 0:3]
xyz_img = np.zeros_like(xyz_points,dtype=int)
#points_filt = points_filt.tolist()
#reflectance = points_filt[:,3]
#print 'init time: ', time.time()-t0
#t0 = time.time()
counter_all = 0
counter_voxels = 0
# CONVERT TO PIXEL POSITION VALUES - Based on resolution
# SHIFT PIXELS TO HAVE MINIMUM BE (0,0)
# floor & ceil used to prevent anything being rounded to below 0 after
xyz_img[:,0] = (((xyz_points[:,1]-side_range[0]) / res).astype(np.int32)) # x axis is -y in LIDAR
xyz_img[:,1] = (((xyz_points[:,0]-fwd_range[0]) / res).astype(np.int32)) # y axis is -x in LIDAR
xyz_img[:,2] = (((xyz_points[:,2]-height_range[0]) / zres).astype(np.int32))
#print xyz_img.shape
unique_xyz,indices_inv = np.unique(xyz_img,axis=0,return_inverse=True,return_counts=False)
counter_voxels = unique_xyz.shape[0]
top_sparse = np.zeros([counter_voxels,MAX_NUM_POINTS,NUM_VOXEL_FEATURES])
#WZN: the first colum is always 0 which indicates the batch number!!! IMPORTANT
indices_and_count_sparse = np.zeros([counter_voxels,5],dtype=int)#.tolist()
indices_and_count_sparse[:,1:4] = unique_xyz[:,[2,0,1]] # voxel shape is 1x10(updpw)x200(side)x240(fwd) for network
#indices_and_count_sparse = np.array([[0]*4]*counter_voxels)
#print indices_and_count_sparse.shape
filt_indices = []
for j in range(xyz_img.shape[0]):
sparse_index = indices_inv[j]
num_points = indices_and_count_sparse[sparse_index,-1]
if num_points<MAX_NUM_POINTS:
top_sparse[sparse_index,num_points,0:4] = points_filt[j]
indices_and_count_sparse[sparse_index,-1] += 1
filt_indices.append(j)
top_sparse[:,:,4:7] = top_sparse[:,:,0:3]-np.expand_dims(np.sum(top_sparse[:,:,0:3],axis=1)/indices_and_count_sparse[:,4:5],1)
# so for corrdinates, it is [y_img(from z_cam), x_img(from x_cam), z_img(from y_cam)], but for feature it is [z_cam(x_lidar),x_cam(-y_lidar),y_cam(z_lidar)]
voxel_dict = {'feature_buffer': top_sparse,
'coordinate_buffer': indices_and_count_sparse[:,0:4],
'number_buffer': indices_and_count_sparse[:,-1]}
#construct image indexes
if points_in_cam:
P = calib_to_P(calib,from_camera=True)
else:
assert False, 'Wrong, cannot process LIDAR coordinate points'
img_index2 = img_index2[:,filt_indices]
N = img_index2.shape[1]
img_index = np.vstack((img_index2,np.zeros((1,N)).astype(int)))
bv_index = xyz_img[filt_indices,:][:,[1,0]]
M_val = 1.0/(indices_and_count_sparse[indices_inv[filt_indices],-1])
return voxel_dict,voxel_full_size,img_index,bv_index,M_val | [
"[email protected]"
] | |
bbf8704f9a342da4a6242959c75449f67e72b2f1 | 9450d31f41d59f238d4db6b10ac4819cfce8a32b | /run_game.py | 4a5cabddccc61a400caf445609b916ed03c2f6ab | [
"BSD-3-Clause"
] | permissive | Python-Repository-Hub/pyweek24 | 4ed4959cecd0ac55e2fa68756cbdf15aa1f246af | 284dc9c1a152fca8e39cf9d637f089ab772b3afd | refs/heads/master | 2022-06-15T02:51:14.652778 | 2018-02-13T19:23:21 | 2018-02-13T19:23:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 694 | py | import sys
if sys.version_info < (3, 6):
sys.exit(
"This game requires Python 3.6 or later."
)
import os
from pathlib import Path
dist = Path(__file__).parent.resolve()
src = str(dist / 'src')
sys.path.insert(0, src)
os.chdir(src)
try:
import main
except ImportError:
import traceback
traceback.print_exc()
req = dist / 'requirements.txt'
sys.exit(
"""
Please ensure you have the following packages installed:
%s
You can run 'pip install -r requirements.txt' to install these (currently this
will require a compiler to be configured).
You will also require AVBin from
https://avbin.github.io/AVbin/Download.html
""" % req.read_text()
)
| [
"[email protected]"
] | |
bba69b0ee5109c3789eb94c4c13b6199daacbf77 | a138092a4fd0bd46e21fade96fea5dfba7742e20 | /scratches/dicts.py | 028103db5eaeb53cb84611165d1ed1e15a955fb8 | [] | no_license | DuaneNielsen/CartPoleQ | 979c00ca15bbac0719ba9197bedffe829f322561 | 202b82cf2b04aaa63965277c326413c62f188ed2 | refs/heads/master | 2020-03-25T07:27:22.651681 | 2018-09-07T01:40:35 | 2018-09-07T01:40:35 | 143,562,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py |
dict = {}
def increment():
if 'key' not in dict:
dict['key'] = 1
else:
dict['key'] += 1
print(dict['key'])
increment()
increment() | [
"[email protected]"
] | |
044e3599fa084cf436a5af3b58daa4d5d4aafefc | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Flask/FlaskIntroduction/env/lib/python3.6/site-packages/jinja2/idtracking.py | e507483177b9aae92710ac72877a042a2f540cb0 | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:d866c34b322f180ac40462e4a2f2e4a847e6631996b047fc737419c0ce2e36cc
size 9197
| [
"[email protected]"
] | |
4ff89d0aec60651bf2ed85d48c0862f3c24b27ea | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/7/r86.py | 8fe6454077c475b77f020a7b0ffd6d8ea3b6ce9b | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'r86':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
ee32622758a3d1e38bdf665d2426e7b04d9517db | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /HyLkfdagDGc99ZhbF_2.py | 5cbf4778f6f92984c2090e629a1ebc045c4aaa40 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 603 | py | """
Create a function that takes a number `n` (integer greater than zero) as an
argument, and returns `2` if `n` is odd and `8` if `n` is even.
You can only use the following arithmetic operators: addition of numbers `+`,
subtraction of numbers `-`, multiplication of number `*`, division of number
`/`, and exponentiation `**`.
You are not allowed to use any other methods in this challenge (i.e. no if
statements, comparison operators, etc).
### Examples
f(1) ➞ 2
f(2) ➞ 8
f(3) ➞ 2
### Notes
N/A
"""
def f(n):
return 5 + 3 * (-1) ** ((n / 2 - n // 2) * 2)
| [
"[email protected]"
] | |
1ed343ca0e6cbe3de6a727240f40e690f376f493 | cf89f6250926d993df642d1204e82f73529500e9 | /app/backend/views.py | 2a6d11060a3e88b1cfa6089bf7e89809881ae2e8 | [] | no_license | gurnitha/django-login-logout | ce05911608122fa7dc560c5c230f2ed7a8ba95e0 | 3be89a432e2e9f9299ba670fce319f96e0af0cfa | refs/heads/main | 2023-04-15T11:34:28.639319 | 2021-05-02T04:33:00 | 2021-05-02T04:33:00 | 363,554,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,552 | py | # app/backend/views.py
# from django.shortcuts import render
# from django.contrib.auth import authenticate, login, logout
# from django.contrib import messages
# from django.http import HttpResponseRedirect
# from django.urls import reverse
# # Create your views here.
# def homePageAdmin(request):
# return render(request, 'backend/home.html')
# #adminLogin
# def adminLogin(request):
# # Get the input (username and password) from the login form
# username=request.POST.get('username')
# password=request.POST.get('password')
# # Authenticate user input(credentials)
# user=authenticate(
# request=request,username=username,password=password)
# # If user exist in the database
# if user is not None:
# login(request=request, user=user)
# return HttpResponseRedirect(reverse('home_admin'))
# # If user is not exist in the database
# else:
# messages.error(request, 'Invalid login detail!')
# return HttpResponseRedirect(reverse('login_page'))
# def adminLoginProcess(request):
# return render(request, 'backend/login_process.html')
# START FROM ZERO AGAIN
# app/dashboard/views.py
from django.shortcuts import render
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.urls import reverse
'''import login_required module to add conditionl to user login'''
from django.contrib.auth.decorators import login_required
# Create your views here.
'''Only loggeg in user can access the admin dashboard'''
@login_required(login_url='/admin/')
def adminHome(request):
messages.success(request, 'Logged in successfully!')
return render(request, 'backend/home.html')
def adminLogin(request):
return render(request, 'backend/login.html')
def adminLoginProcess(request):
return render(request, 'backend/login_process.html')
def adminLoginProcess(request):
# Get input from the login form
username=request.POST.get('username')
password=request.POST.get('password')
# Authenticate user credentials
user=authenticate(
request=request,
username=username,
password=password)
# If user exist
if user is not None:
login(request=request, user=user)
return HttpResponseRedirect(reverse('admin_home'))
# If user not exist
else:
messages.error(
request, 'Login error! Invalid login detail!')
return HttpResponseRedirect(reverse('admin_login'))
def adminLogoutProcess(request):
logout(request)
messages.success(request, 'Logged out successfully!')
return HttpResponseRedirect(reverse('admin_login'))
| [
"[email protected]"
] | |
0d2b7781d78e09fb307adba23b53e92b6e127e33 | e3b9aa9b17ebb55e53dbc4fa9d1f49c3a56c6488 | /microsoft_teams/icon_microsoft_teams/triggers/__init__.py | a7ec027c112c3744e743b15fed58e24d229634a5 | [
"MIT"
] | permissive | OSSSP/insightconnect-plugins | ab7c77f91c46bd66b10db9da1cd7571dfc048ab7 | 846758dab745170cf1a8c146211a8bea9592e8ff | refs/heads/master | 2023-04-06T23:57:28.449617 | 2020-03-18T01:24:28 | 2020-03-18T01:24:28 | 248,185,529 | 1 | 0 | MIT | 2023-04-04T00:12:18 | 2020-03-18T09:14:53 | null | UTF-8 | Python | false | false | 101 | py | # GENERATED BY KOMAND SDK - DO NOT EDIT
from .new_message_received.trigger import NewMessageReceived
| [
"[email protected]"
] | |
641d1c9fb4ce72e07ab9d93104d0a361c98c83e6 | d71978ac89d21de391174c4a6f96edc38142b51f | /src/front/form.py | 2ca88661ab5304c968d1f95b72f6182ceb74a0ee | [] | no_license | mzakany23/django-calendar-app | 4d5fd4f4f5e835f0b81904ea5040f43f2f7e4cb0 | 1cedb92214e9649bb1c9ebcd64e78a66b1d5232b | refs/heads/master | 2016-09-05T23:39:40.026293 | 2015-03-27T00:21:47 | 2015-03-27T00:21:47 | 32,084,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | from django import forms
class LoginForm(forms.Form):
username = forms.CharField(widget=forms.TextInput(
attrs={
'placeholder' : 'Username',
'class' : 'form-control',
'id' : 'inputEmail',
}))
password = forms.CharField(widget=forms.PasswordInput(
attrs={
'placeholder' : 'Password',
'class' : 'form-control',
'id' : 'inputPassword',
}))
| [
"[email protected]"
] | |
e775477bbdf1b10bfe37482cb0347cb2b9f64e68 | 776cf3b0f5865c8639692e1256abb5ad493c9f92 | /__old_stuff/pga/pga_no_sort/ga.py | 44c16031213aaa30fc75f982ad3221ebed52c595 | [] | no_license | ralphbean/ms-thesis | 90afb1d5729d83f1910d8dec2e6d4c65d0304bc0 | 3fea08aa069d735fb7048afbab37bb429800fb48 | refs/heads/master | 2021-01-19T11:28:14.382925 | 2012-01-25T15:24:54 | 2012-01-25T15:24:54 | 3,265,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,793 | py | #!/usr/bin/python
from time import time
from random import random, randint
import shelve, sys, gc
import crossover as cvr
import mutation as mut
import metrics as mtr
import selection as sel
# A constant:
first = False
num_trials = 3
max_gens = 3000
# Print things to stdout and log things that need logging
def IO_update(ID, generation, pop, max_gens):
print "\rL:", mtr.fitness(pop[0]), "a:", pop[0]['amplitude'],
print "%", 100.0 * float(generation)/(max_gens-1),
print " First:", first,
sys.stdout.flush()
# Print stuff to stdout:
# print ID
# print "generation: ", generation
# print " best:", mtr.fitness(pop[0]), "a:", pop[0]['amplitude']
# print " secn:", mtr.fitness(pop[1]), "a:", pop[1]['amplitude']
# print " wrst:", mtr.fitness(pop[-1]), "a:", pop[-1]['amplitude']
# Log stuff to file with shelve
d = shelve.open("dat/" + str(ID) + "." + str(generation) + ".pop" )
d['pop'] = pop
d.close()
def initialize_pop():
# Some initialization constants:
lower_size = 2
upper_size = 50
num = 100
pop = []
for j in range(num):
print "\rInitializing Population %", 100*float(j)/(num-1),
sys.stdout.flush()
org = { 'org':
[[random()*2-1 for i in range(randint(lower_size, upper_size))],
[0,0,0]],
'amplitude' : random() * 0.1 + 0.05 }
org['fitness'] = mtr.fitness(org)
pop.append(org)
print " Done."
return pop
def handle_args():
if len(sys.argv) != 5:
print "Usage:"
print " ga.py <comparator> <crossover> <mutation> <selection>"
sys.exit(1)
cmp_fnc = mtr.fn_list[int(sys.argv[1])]
c_over_op = cvr.fn_list[int(sys.argv[2])]
select_op = sel.fn_list[int(sys.argv[4])]
return cmp_fnc, c_over_op, select_op
def do_experiment(cmp_fnc, c_over_op, select_op, trial, force=False):
ID = str(cmp_fnc)+"."+str(c_over_op)+"."+str(select_op)+"."+str(trial)
cmp_fnc = mtr.fn_list[cmp_fnc]
c_over_op = cvr.fn_list[c_over_op]
select_op = sel.fn_list[select_op]
print "ID:", ID,
print str(cmp_fnc)[10:-15],str(c_over_op)[10:-15],str(select_op)[10:-15]
pop = None
generation = 0
while ( generation < max_gens ):
# First check to see if this experiment is already done...
d = shelve.open("dat/"+ID+"." + str(generation) + ".pop")
if 'pop' in d:
prog = 100.0 * float(generation)/(max_gens-1)
print "\rAlready computed. Skipping ahead. %",prog," f:",first,
sys.stdout.flush() # Update our percentage ticker.
generation = generation + 1 # Advance the generation counter.
pop = d['pop'] # Load that population into memory.
d.close()
continue
d.close()
# Initialize our population if we haven't already
if not pop:
pop = initialize_pop()
# Otherwise we need to compute!
pop.sort(lambda x,y : mtr.comparator(x,y, cmp_fnc) ) # Eval and sort
IO_update(ID, generation, pop, max_gens) # Spit out status
pop = select_op(pop, c_over_op, cmp_fnc) # Breed
generation = generation + 1 # Tick
# Forcibly revaluate the fitness of the hero.
try:
del pop[0]['fitness']
except KeyError:
pass
print " Done."
def combinations():
combins = []
for i in range(len(mtr.fn_list)):
for j in range(len(cvr.fn_list)):
for k in range(len(sel.fn_list)):
for l in range(num_trials):
combins.append([i,j,k,l])
if first:
combins = combins[:len(combins)/2]
else:
combins = combins[len(combins)/2:]
print "Total number of combinations: ", len(combins)
return combins
if __name__ == '__main__':
times = []
combins = combinations()
for i in range(len(combins)):
cmp_fnc, c_over_op, select_op, trial = combins[i]
start = time()
results = do_experiment(cmp_fnc, c_over_op, select_op, trial)
times.append(time() - start)
print "Trial:", times[-1]/(60**2), "(h).",
avg = sum(times)/(60**2 * len(times))
print "Average:", avg, "(h). GC:", gc.get_count()
p_done = 100*float(i+1)/(len(combins))
h_elap = sum(times)/(60**2)
print "%",p_done,"done with entire experiment.", h_elap, "(h) elapsed."
h_left = h_elap*(100-p_done)/p_done
print "Expect to be done in", h_left, "(h)."
print
# Get the function pointers from the arg list
#cmp_fnc, c_over_op, select_op = handle_args()
#do_experiment(cmp_fnc, c_over_op, select_op, 0)
| [
"[email protected]"
] | |
c2397ffc084b0e11f98d99e0cc1d0f65d8625303 | 356fb16011048aa62fcdbed574f310ac7ae37fa3 | /discograph/library/RelationGrapher.py | 2e337d487aaf341d4fef32f183834c847cf855d8 | [
"MIT"
] | permissive | inostia/discograph | 622470dbc4739ada9ea430a73a473bbe22f5db0a | 21f3d842582ff488069233258161202fa3c9cf69 | refs/heads/master | 2021-01-17T23:17:29.176444 | 2015-10-02T20:16:10 | 2015-10-02T20:16:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,294 | py | # -*- encoding: utf-8 -*-
import collections
import re
import six
from discograph.library.mongo.Artist import Artist
from discograph.library.mongo.CreditRole import CreditRole
from discograph.library.mongo.Label import Label
from discograph.library.sqlite.SqliteArtist import SqliteArtist
from discograph.library.sqlite.SqliteLabel import SqliteLabel
from discograph.library.sqlite.SqliteRelation import SqliteRelation
class RelationGrapher(object):
### CLASS VARIABLES ###
word_pattern = re.compile('\s+')
### INITIALIZER ###
def __init__(
self,
center_entity,
cache=None,
degree=3,
max_links=None,
max_nodes=None,
role_names=None,
):
prototype = (Artist, Label, SqliteArtist, SqliteLabel)
assert isinstance(center_entity, prototype)
self.center_entity = center_entity
degree = int(degree)
assert 0 < degree
self.degree = degree
self.cache = cache
if max_links is not None:
max_links = int(max_links)
assert 0 < max_links
self.max_links = max_links
if max_nodes is not None:
max_nodes = int(max_nodes)
assert 0 < max_nodes
self.max_nodes = max_nodes
role_names = role_names or None
if role_names:
if isinstance(role_names, six.string_types):
role_names = (role_names,)
elif not isinstance(role_names, collections.Iterable):
role_names = (role_names,)
role_names = tuple(role_names)
assert all(_ in CreditRole.all_credit_roles
for _ in role_names)
self.role_names = role_names
@classmethod
def get_link_key(cls, link):
source_type, source_id = link['source']
if source_type == 1:
source_type = 'artist'
else:
source_type = 'label'
target_type, target_id = link['target']
if target_type == 1:
target_type = 'artist'
else:
target_type = 'label'
return '{}-{}-{}-{}-{}'.format(
source_type,
source_id,
cls.word_pattern.sub('-', link['role']).lower(),
target_type,
target_id,
)
def relation_to_link(self, relation):
link = relation.copy()
entity_one_id = link['entity_one_id']
entity_one_type = link['entity_one_type']
#entity_one_type = Relation.EntityType(entity_one_type)
#entity_one_type = entity_one_type.name.lower()
source_key = (entity_one_type, entity_one_id)
link['source'] = source_key
entity_two_id = link['entity_two_id']
entity_two_type = link['entity_two_type']
#entity_two_type = Relation.EntityType(entity_two_type)
#entity_two_type = entity_two_type.name.lower()
target_key = (entity_two_type, entity_two_id)
link['target'] = target_key
link['role'] = link['role_name']
link['key'] = self.get_link_key(link)
if '_id' in link: del(link['_id'])
if 'country' in link: del(link['country'])
if 'entity_one_id' in link: del(link['entity_one_id'])
if 'entity_one_type' in link: del(link['entity_one_type'])
if 'entity_two_id' in link: del(link['entity_two_id'])
if 'entity_two_type' in link: del(link['entity_two_type'])
if 'id' in link: del(link['id'])
if 'role_name' in link: del(link['role_name'])
if 'category' in link and not link.get('category'):
del(link['category'])
if 'subcategory' in link and not link.get('subcategory'):
del(link['subcategory'])
if 'genres' in link and not link.get('genres'):
del(link['genres'])
if 'random' in link:
del(link['random'])
if 'release_id' in link and not link.get('release_id'):
del(link['release_id'])
if 'styles' in link and not link.get('styles'):
del(link['styles'])
if 'year' in link and not link.get('year'):
del(link['year'])
return link
def entity_key_to_node(self, entity_key, distance):
node = dict(distance=distance, missing=0, members=set(), aliases=set())
node['id'] = entity_key[1]
if entity_key[0] == 1:
node['type'] = 'artist'
else:
node['type'] = 'label'
node['key'] = '{}-{}'.format(node['type'], node['id'])
node['links'] = set()
return node
def collect_entities_2(self):
original_role_names = self.role_names or ()
provisional_role_names = set(original_role_names)
provisional_role_names.update(['Alias', 'Member Of'])
provisional_role_names = sorted(provisional_role_names)
if type(self.center_entity).__name__.endswith('Artist'):
initial_key = (1, self.center_entity.discogs_id)
else:
initial_key = (2, self.center_entity.discogs_id)
entity_keys_to_visit = set([initial_key])
links = dict()
nodes = dict()
entity_query_cap = 999
entity_query_cap -= (1 + len(provisional_role_names)) * 2
entity_query_cap //= 2
break_on_next_loop = False
for distance in range(self.degree + 1):
current_entity_keys_to_visit = list(entity_keys_to_visit)
for key in current_entity_keys_to_visit:
nodes.setdefault(key, self.entity_key_to_node(key, distance))
#print(' At distance {}:'.format(distance))
#print(' {} new nodes'.format(
# len(current_entity_keys_to_visit)))
#print(' {} old nodes'.format(
# len(nodes) - len(current_entity_keys_to_visit)))
#print(' {} old links'.format(len(links)))
if break_on_next_loop:
#print(' Leaving search loop.')
break
if (
1 < distance and
self.max_nodes and
self.max_nodes <= len(nodes)
):
#print(' Maxed out node count.')
break_on_next_loop = True
entity_keys_to_visit.clear()
relations = []
range_stop = len(current_entity_keys_to_visit)
for start in range(0, range_stop, entity_query_cap):
# Split into multiple queries to avoid variable maximum.
stop = start + entity_query_cap
#print(' Querying: {} to {} of {} new nodes'.format(
# start, stop, len(current_entity_keys_to_visit)
# ))
entity_key_slice = current_entity_keys_to_visit[start:stop]
relations.extend(SqliteRelation.search_multi(
entity_key_slice,
role_names=provisional_role_names,
))
for relation in relations:
e1k = (relation['entity_one_type'], relation['entity_one_id'])
e2k = (relation['entity_two_type'], relation['entity_two_id'])
if e1k not in nodes:
entity_keys_to_visit.add(e1k)
nodes[e1k] = self.entity_key_to_node(e1k, distance + 1)
if e2k not in nodes:
entity_keys_to_visit.add(e2k)
nodes[e2k] = self.entity_key_to_node(e2k, distance + 1)
if relation['role_name'] == 'Alias':
nodes[e1k]['aliases'].add(e2k[1])
nodes[e2k]['aliases'].add(e1k[1])
elif relation['role_name'] in ('Member Of', 'Sublabel Of'):
nodes[e2k]['members'].add(e1k[1])
if relation['role_name'] not in original_role_names:
continue
link = self.relation_to_link(relation)
link['distance'] = min(
nodes[e1k]['distance'],
nodes[e2k]['distance'],
)
links[link['key']] = link
nodes[e1k]['links'].add(link['key'])
nodes[e2k]['links'].add(link['key'])
#print(' Collected: {} / {}'.format(len(nodes), len(links)))
# Query node names.
artist_ids = []
label_ids = []
for entity_type, entity_id in nodes.keys():
if entity_type == 1:
artist_ids.append(entity_id)
else:
label_ids.append(entity_id)
artists = []
for i in range(0, len(artist_ids), 999):
query = (SqliteArtist
.select()
.where(SqliteArtist.id.in_(artist_ids[i:i + 999]))
)
artists.extend(query)
labels = []
for i in range(0, len(artist_ids), 999):
query = (SqliteLabel
.select()
.where(SqliteLabel.id.in_(label_ids[i:i + 999]))
)
labels.extend(query)
for artist in artists:
nodes[(1, artist.id)]['name'] = artist.name
for label in labels:
nodes[(2, label.id)]['name'] = label.name
# Prune nameless nodes.
for node in tuple(nodes.values()):
if not node.get('name'):
self.prune_node(node, nodes, links, update_missing_count=False)
#print(' Pruning nameless: {} / {}'.format(len(nodes), len(links)))
# Prune unvisited nodes and links.
for key in entity_keys_to_visit:
node = nodes.get(key)
self.prune_node(node, nodes, links)
#print(' Pruned unvisited: {} / {}'.format(
# len(nodes), len(links)))
# Prune nodes beyond maximum.
if self.max_nodes:
nodes_to_prune = sorted(nodes.values(),
key=lambda x: (x['distance'], x['id']),
)[self.max_nodes:]
for node in nodes_to_prune:
self.prune_node(node, nodes, links)
#print(' Pruned by max nodes: {} / {}'.format(
# len(nodes), len(links)))
# Prune links beyond maximum.
if self.max_links:
links_to_prune = sorted(links.values(),
key=self.link_sorter,
)[self.max_links:]
for link in links_to_prune:
self.prune_link(link, nodes, links)
#print(' Pruned by max links: {} / {}'.format(
# len(nodes), len(links)))
#print('Finally: {} / {}'.format(len(nodes), len(links)))
return nodes, links
def prune_link(self, link, nodes, links, update_missing_count=True):
if link is None:
return
if link['key'] in links:
del(links[link['key']])
source_node = nodes.get(link['source'])
if source_node is not None:
if update_missing_count:
source_node['missing'] += 1
source_node['links'].remove(link['key'])
if not source_node['links']:
self.prune_node(source_node, nodes, links,
update_missing_count=update_missing_count)
target_node = nodes.get(link['target'])
if target_node is not None:
if update_missing_count:
target_node['missing'] += 1
target_node['links'].remove(link['key'])
if not target_node['links']:
self.prune_node(target_node, nodes, links,
update_missing_count=update_missing_count)
def prune_node(self, node, nodes, links, update_missing_count=True):
if node is None:
return
if node['type'] == 'artist':
key = (1, node['id'])
else:
key = (2, node['id'])
if key in nodes:
del(nodes[key])
for link_key in node['links'].copy():
link = links.get(link_key)
self.prune_link(link, nodes, links,
update_missing_count=update_missing_count)
def get_network_2(self):
nodes, links = self.collect_entities_2()
cluster_count = 0
cluster_map = {}
for node in nodes.values():
cluster = None
if node['aliases']:
if node['id'] not in cluster_map:
cluster_count += 1
cluster_map[node['id']] = cluster_count
for alias_id in node['aliases']:
cluster_map[alias_id] = cluster_count
cluster = cluster_map[node['id']]
if not node['aliases']:
del(node['aliases'])
else:
node['aliases'] = tuple(sorted(node['aliases']))
if cluster is not None:
node['cluster'] = cluster
node['size'] = len(node.pop('members'))
node['links'] = tuple(sorted(node['links']))
links = tuple(sorted(links.values(),
key=lambda x: (
x['source'],
x['role'],
x['target'],
x.get('release_id')
)))
for link in links:
if link['source'][0] == 1:
link['source'] = 'artist-{}'.format(link['source'][1])
else:
link['source'] = 'label-{}'.format(link['source'][1])
if link['target'][0] == 1:
link['target'] = 'artist-{}'.format(link['target'][1])
else:
link['target'] = 'label-{}'.format(link['target'][1])
nodes = tuple(sorted(nodes.values(),
key=lambda x: (x['type'], x['id'])))
if type(self.center_entity) in (Artist, SqliteArtist):
center = 'artist-{}'.format(self.center_entity.discogs_id)
else:
center = 'label-{}'.format(self.center_entity.discogs_id)
network = {
'center': center,
'nodes': nodes,
'links': links,
}
return network
@staticmethod
def link_sorter(link):
role = 2
if link['role'] == 'Alias':
role = 0
elif link['role'] == 'Member Of':
role = 1
return link['distance'], role, link['key'] | [
"[email protected]"
] | |
3986965c2d0a784332a7d222363a1d17a565161e | 9abe914e718155f3a560915c56a55996155159fb | /orders/migrations/0001_initial.py | bc9759f5039dba1b8d785e8084892eb818553505 | [] | no_license | Chaoslecion123/Tienda-Django | 07c8e2abe8cf659a4fce910c2b8fc858d9276e3b | d06e0c789bab69472d0931b2322e7da4f2eaa3bd | refs/heads/master | 2022-03-28T18:07:38.083362 | 2019-12-11T20:01:59 | 2019-12-11T20:01:59 | 227,241,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,431 | py | # Generated by Django 3.0 on 2019-12-06 23:55
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import orders.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('carts', '0002_cart_products'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(choices=[(orders.models.OrderStatus['CREATED'], 'CREATED'), (orders.models.OrderStatus['PAYED'], 'PAYED'), (orders.models.OrderStatus['COMPLETED'], 'COMPLETED'), (orders.models.OrderStatus['CANCELED'], 'CANCELED')], default=orders.models.OrderStatus['CREATED'], max_length=50)),
('shopping_total', models.DecimalField(decimal_places=2, default=5, max_digits=8)),
('total', models.DecimalField(decimal_places=2, default=0, max_digits=8)),
('created_at', models.DateTimeField(auto_now_add=True)),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='carts.Cart')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
0e62466e00c864e812b457df2fb1c13bd2d2c7fc | c6627895fbfae1ef714f10202eecb6ccce6e9af3 | /test/test_v3_stop_response.py | 2f7441ff90ce3cb2055bc1f14760e2bc98b1b3ef | [
"MIT"
] | permissive | richardjkendall/ptv-api-client | c34f08f986de903d43ee50e5d2f39a6258c0e1fd | 9b1a3882ebee8cef363e688a56b90e2643799a88 | refs/heads/master | 2020-07-15T03:25:04.571357 | 2019-09-13T19:38:31 | 2019-09-13T19:38:31 | 205,468,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,973 | py | # coding: utf-8
"""
PTV Timetable API - Version 3
The PTV Timetable API provides direct access to Public Transport Victoria’s public transport timetable data. The API returns scheduled timetable, route and stop data for all metropolitan and regional train, tram and bus services in Victoria, including Night Network(Night Train and Night Tram data are included in metropolitan train and tram services data, respectively, whereas Night Bus is a separate route type). The API also returns real-time data for metropolitan train, tram and bus services (where this data is made available to PTV), as well as disruption information, stop facility information, and access to myki ticket outlet data. This Swagger is for Version 3 of the PTV Timetable API. By using this documentation you agree to comply with the licence and terms of service. Train timetable data is updated daily, while the remaining data is updated weekly, taking into account any planned timetable changes (for example, due to holidays or planned disruptions). The PTV timetable API is the same API used by PTV for its apps. To access the most up to date data PTV has (including real-time data) you must use the API dynamically. You can access the PTV Timetable API through a HTTP or HTTPS interface, as follows: base URL / version number / API name / query string The base URL is either: * http://timetableapi.ptv.vic.gov.au or * https://timetableapi.ptv.vic.gov.au The Swagger JSON file is available at http://timetableapi.ptv.vic.gov.au/swagger/docs/v3 Frequently asked questions are available on the PTV website at http://ptv.vic.gov.au/apifaq Links to the following information are also provided on the PTV website at http://ptv.vic.gov.au/ptv-timetable-api/ * How to register for an API key and calculate a signature * PTV Timetable API V2 to V3 Migration Guide * Documentation for Version 2 of the PTV Timetable API * PTV Timetable API Data Quality Statement All information about how to use the API is in this documentation. PTV cannot provide technical support for the API. Credits: This page has been based on Steve Bennett's http://opentransportdata.org/, used with permission. # noqa: E501
OpenAPI spec version: v3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import ptvapi
from ptvapi.models.v3_stop_response import V3StopResponse # noqa: E501
from ptvapi.rest import ApiException
class TestV3StopResponse(unittest.TestCase):
"""V3StopResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV3StopResponse(self):
"""Test V3StopResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = ptvapi.models.v3_stop_response.V3StopResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
2fa1ed31a07f2af23528b5c43403571d54f6cc0a | 00c9124de185eead1d28dc190e1e125e5b4ceb26 | /Pyto Mac/PyObjC/Quartz/QuartzFilters/_metadata.py | e2ec80683f20af0307984bc22985baf5d9fa4c51 | [
"MIT"
] | permissive | cclauss/Pyto | 028b9e9c3c2fefa815a4d6831e25f5ebae5b14f1 | 1c4ccc47e3a91e996bf6ec38c527d244de2cf7ed | refs/heads/master | 2020-04-23T22:09:21.151598 | 2019-03-02T00:44:20 | 2019-03-02T00:44:20 | 171,491,840 | 0 | 0 | MIT | 2019-02-19T14:49:25 | 2019-02-19T14:49:25 | null | UTF-8 | Python | false | false | 1,987 | py | # This file is generated by objective.metadata
#
# Last update: Tue Jun 26 07:59:02 2018
import objc, sys
if sys.maxsize > 2 ** 32:
def sel32or64(a, b): return b
else:
def sel32or64(a, b): return a
if sys.byteorder == 'little':
def littleOrBig(a, b): return a
else:
def littleOrBig(a, b): return b
misc = {
}
constants = '''$globalUpdateOK@Z$kQuartzFilterApplicationDomain$kQuartzFilterManagerDidAddFilterNotification$kQuartzFilterManagerDidModifyFilterNotification$kQuartzFilterManagerDidRemoveFilterNotification$kQuartzFilterManagerDidSelectFilterNotification$kQuartzFilterPDFWorkflowDomain$kQuartzFilterPrintingDomain$'''
enums = '''$$'''
misc.update({})
r = objc.registerMetaDataForSelector
objc._updatingMetadata(True)
try:
r(b'NSObject', b'quartzFilterManager:didAddFilter:', {'retval': {'type': b'v'}, 'arguments': {2: {'type': b'@'}, 3: {'type': b'@'}}})
r(b'NSObject', b'quartzFilterManager:didModifyFilter:', {'retval': {'type': b'v'}, 'arguments': {2: {'type': b'@'}, 3: {'type': b'@'}}})
r(b'NSObject', b'quartzFilterManager:didRemoveFilter:', {'retval': {'type': b'v'}, 'arguments': {2: {'type': b'@'}, 3: {'type': b'@'}}})
r(b'NSObject', b'quartzFilterManager:didSelectFilter:', {'retval': {'type': b'v'}, 'arguments': {2: {'type': b'@'}, 3: {'type': b'@'}}})
r(b'QuartzFilter', b'applyToContext:', {'retval': {'type': b'Z'}})
r(b'QuartzFilterManager', b'selectFilter:', {'retval': {'type': b'Z'}})
finally:
objc._updatingMetadata(False)
protocols={'QuartzFilterManagerDelegate': objc.informal_protocol('QuartzFilterManagerDelegate', [objc.selector(None, b'quartzFilterManager:didSelectFilter:', b'v@:@@', isRequired=False), objc.selector(None, b'quartzFilterManager:didAddFilter:', b'v@:@@', isRequired=False), objc.selector(None, b'quartzFilterManager:didModifyFilter:', b'v@:@@', isRequired=False), objc.selector(None, b'quartzFilterManager:didRemoveFilter:', b'v@:@@', isRequired=False)])}
expressions = {}
# END OF FILE
| [
"[email protected]"
] | |
0229304f52f6ac1a1a145202bfd50b7ebcab2f13 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03353/s439050403.py | 217d9912ac078b47e9ed9f82f7964e6a159318bd | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | S = input()
K = int(input())
Ans = []
while len(Ans) < K:
for i in range(26):
if chr(97+i) in S and chr(97+i) not in Ans:
f = chr(97 + i)
Ans.append(f)
break
for i, s in enumerate(S):
if s == f:
for j in range(2, 2 + 10):
if i + j > len(S):
break
if S[i:i+j] not in Ans:
Ans.append(S[i:i+j])
Ans.sort()
print(Ans[K-1])
| [
"[email protected]"
] | |
f3279e01111cd6a9a00bf027fab2f05f90273b17 | e59f257d5735cae8cf7bb46d52792aa7371c9dae | /venv/lib/python3.7/site-packages/stripe/version.py | fb126cc98a54cad9bfdc5579b360c82f55cdb733 | [] | no_license | okumujustine/django-eccomerce-website | 95499049dd4e46513c25a0fe6e6b82cf69d2080b | 00c1ca600af5faa89829702044cc9f329bbc8b66 | refs/heads/master | 2022-12-08T23:29:19.453109 | 2021-05-31T11:23:22 | 2021-05-31T11:23:22 | 242,557,196 | 1 | 2 | null | 2022-12-08T03:44:29 | 2020-02-23T17:08:21 | Python | UTF-8 | Python | false | false | 19 | py | VERSION = "2.42.0"
| [
"[email protected]"
] | |
f1a7a3948b8324c5db96b653272eb3ff8269f357 | 7d93e4f8a9475ada3edd770263ccb8cd98a9e73d | /tonetutor_webapi/settings.py | 4578eab178976962e18525b426db619d3143ad69 | [] | no_license | JivanAmara/tonetutor-webapi | 78a7c03fe69aa392db508358040369ecf0973050 | adf23af0dd12e5dd967695621146cd67ac5a416c | refs/heads/master | 2021-04-30T04:46:46.336909 | 2018-02-20T00:17:56 | 2018-02-20T00:22:32 | 121,543,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,145 | py | """
Django settings for tonetutor_webapi project.
Generated by 'django-admin startproject' using Django 1.10.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# This is the host to pass tone check requests through to.
AUDIO_PROTOCOL = os.environ.get('AUDIO_PROTOCOL', 'http://')
AUDIO_HOST = os.environ.get('AUDIO_HOST', 'www.mandarintt.com')
AUDIO_PATH = os.environ.get('AUDIO_PATH', '/audio/')
LOG_FILEPATH = '/var/log/tonetutor_webapi.log'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
'PAGE_SIZE': 10
}
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY', 'Not really the secret key.')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DEBUG', False)
ALLOWED_HOSTS = ['api.mandarintt.com', 'test-api.mandarintt.com', 'www.mandarintt.com', 'localhost']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'webapi',
'rest_framework',
'rest_framework.authtoken',
'syllable_samples',
'tonerecorder',
'hanzi_basics',
'corsheaders',
'usermgmt',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
]
CORS_ORIGIN_ALLOW_ALL = True
ROOT_URLCONF = 'tonetutor_webapi.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tonetutor_webapi.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DBPASS = os.environ.get('DB_PASS')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'webvdc', # Or path to database file if using sqlite3.
'USER': 'webvdc', # Not used with sqlite3.
'PASSWORD': DBPASS, # Not used with sqlite3.
'HOST': 'database-host', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = '/tonetutor_webapi-static'
MEDIA_URL = '/'
# This should be a volume mapped to the shared media root on host system
MEDIA_ROOT = '/mnt/data-volume/tonetutor-media/'
# Subdirectory of MEDIA_ROOT where attempt audio gets stored.
SYLLABLE_AUDIO_DIR = 'audio-files'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': LOG_FILEPATH,
},
},
'loggers': {
'django': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True,
},
'': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True,
},
},
}
| [
"[email protected]"
] | |
f08d6c183b27872465286126635966459c3bd299 | bc7cd6689a8052d442ded8e876de1e5f22bfad6c | /lsml/core/datasets_handler.py | 6d8f39686c4778cbfbe07610f2e691176e6fa08c | [
"BSD-3-Clause"
] | permissive | tor4z/level-set-machine-learning | 3a359e0d55137f3c0a9cbcaf25048c61573abd25 | 38460e514d48f3424bb8d3bd58cb3eb330153e64 | refs/heads/master | 2022-04-08T08:04:27.200188 | 2020-01-26T03:09:56 | 2020-01-26T03:09:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,629 | py | from collections import namedtuple
import contextlib
import logging
import os
import h5py
import numpy
import skfmm
_logger_name = __name__.rsplit('.', 1)[-1]
logger = logging.getLogger(_logger_name)
TRAINING_DATASET_KEY = 'training'
VALIDATION_DATASET_KEY = 'validation'
TESTING_DATASET_KEY = 'testing'
DATASET_KEYS = (
TRAINING_DATASET_KEY,
VALIDATION_DATASET_KEY,
TESTING_DATASET_KEY
)
EXAMPLE_KEY = "example-{:d}"
IMAGE_KEY = "image"
SEGMENTATION_KEY = "segmentation"
DISTANCE_TRANSFORM_KEY = "distance-transform"
# Yielded by dataset manager generators
DatasetExample = namedtuple(
'DatasetExample',
['index', 'key', 'img', 'seg', 'dist', 'dx'])
class DatasetsHandler:
""" Handles internal dataset operations during model fitting
"""
def __init__(self, h5_file, imgs=None, segs=None, dx=None, compress=True):
""" Initialize a dataset manager
Parameters
----------
h5_file: str
A (possibly already existing) hdf5 dataset
imgs: List(ndarray), default=None
List of images. Necessary when `h5_file` doe not exist.
segs: List(ndarray), default=None
List of respective segmentations for :code:`imgs`. Necessary when
`h5_file` doe not exist.
dx: List(ndarray), default=None
The delta spacing along the axis directions in the provided
images and segmentations. Used when `h5_file` doesn't exist. In
that case, the default of None uses all ones.
compress: bool, default=True
When the image and segmentation data are stored in the hdf5 file
this flag indicates whether or not to use compression.
Note
----
Either :code:`h5_file` should be the name of an existing h5 file with
appropriate structure (see :method:`convert_to_hdf5`) or `imgs`
and `segs` should be non-None, and the hdf5 file will creating
using the provided images and segmentations and we be named with the
argument `h5_file`.
"""
self.h5_file = os.path.abspath(h5_file)
self.datasets = {
dataset_key: [] for dataset_key in self._iterate_dataset_keys()}
if not os.path.exists(self.h5_file):
if imgs is None or segs is None:
msg = ("Provided `h5_file` {} doesn't exist but no image or "
"segmentation data provided")
raise ValueError(msg.format(h5_file))
# Perform the conversion to hdf5
self.convert_to_hdf5(
imgs=imgs, segs=segs, dx=dx, compress=compress)
with h5py.File(self.h5_file, mode='r') as hf:
self.n_examples = len(hf.keys())
def convert_to_hdf5(self, imgs, segs, dx=None, compress=True):
""" Convert a dataset of images and boolean segmentations
to hdf5 format, which is required for the level set routine.
The format assuming `hf` is and h5py `File` is as follows::
'i'
|_ img
|_ seg
|_ dist
|_ attrs
|_ dx
Parameters
----------
imgs: list of ndarray
The list of image examples for the dataset
segs: list of ndarray
The list of image examples for the dataset
dx: list of ndarray, shape=(n_examples, img.ndim), default=None
The resolutions along each axis for each image. The default (None)
assumes the resolution is 1 along each axis direction, but this
might not be the case for anisotropic data.
compress: bool, default=True
If True, :code:`gzip` compression with default compression
options (level=4) is used for the images and segmentations.
"""
# Check if the file already exists and abort if so.
if os.path.exists(self.h5_file):
msg = "Dataset already exists at {}"
raise FileExistsError(msg.format(self.h5_file))
# Setup some variables
n_examples = len(imgs)
ndim = imgs[0].ndim
compress_method = "gzip" if compress else None
######################
# Input validation
if len(imgs) != len(segs):
msg = "Mismatch in number of examples: imgs ({}), segs ({})"
raise ValueError(msg.format(len(imgs), len(segs)))
for i in range(n_examples):
img = imgs[i]
seg = segs[i]
# Validate image data type
if img.dtype != numpy.float:
msg = "imgs[{}] (dtype {}) was not float"
raise TypeError(msg.format(i, img.dtype))
# Validate segmentation data type
if seg.dtype != numpy.bool:
msg = "seg[{}] (dtype {}) was not bool"
raise TypeError(msg.format(i, seg.dtype))
if img.ndim != ndim:
msg = "imgs[{}] (ndim={}) did not have correct dimensions ({})"
raise ValueError(msg.format(i, img.ndim, ndim))
if seg.ndim != ndim:
msg = "segs[{}] (ndim={}) did not have correct dimensions ({})"
raise ValueError(msg.format(i, seg.ndim, ndim))
if img.shape != seg.shape:
msg = "imgs[{}] shape {} does not match segs[{}] shape {}"
raise ValueError(msg.format(i, img.shape, i, seg.shape))
# Check dx if provided and is correct shape.
if dx is None:
dx = numpy.ones((n_examples, ndim), dtype=numpy.float)
else:
if dx.shape != (n_examples, ndim):
msg = "`dx` was shape {} but should be shape {}"
raise ValueError(msg.format(dx.shape, (n_examples, ndim)))
# End input validation
##########################
hf = h5py.File(self.h5_file, mode='w')
for i in range(n_examples):
msg = "Creating dataset entry {} / {}"
logger.info(msg.format(i+1, n_examples))
# Create a group for the i'th example
g = hf.create_group(EXAMPLE_KEY.format(i))
# Store the i'th image
g.create_dataset(IMAGE_KEY,
data=imgs[i], compression=compress_method)
# Store the i'th segmentation
g.create_dataset(SEGMENTATION_KEY,
data=segs[i], compression=compress_method)
# Compute the signed distance transform of the ground-truth
# segmentation and store it.
dist = skfmm.distance(2*segs[i].astype(numpy.float)-1, dx=dx[i])
g.create_dataset(DISTANCE_TRANSFORM_KEY,
data=dist, compression=compress_method)
# Store the delta terms as an attribute.
g.attrs['dx'] = dx[i]
# Close up shop
hf.close()
def assign_examples_to_datasets(
self, training, validation, testing, subset_size, random_state):
""" Assign the dataset example keys to training, validation,
or testing
training: float, or list of int
A probability value or a list of indices of examples that belong
to the training dataset
validation: float, or list of int
A probability value or a list of indices of examples that belong
to the validation dataset
testing: float, or list of int
A probability value or a list of indices of examples that belong
to the testing dataset
subset_size: int or None
If datasets are randomly partitioned, then the full dataset
is first down-sampled to be `subset_size` before partitioning
random_state: numpy.random.RandomState, default=None
The random state is used only to perform the randomized split
into when training/validation/testing are provided as probability
values
"""
if not random_state:
random_state = numpy.random.RandomState()
msg = ("RandomState not provided; results will "
"not be reproducible")
logger.warning(msg)
elif not isinstance(random_state, numpy.random.RandomState):
msg = "`random_state` ({}) not instance numpy.random.RandomState"
raise TypeError(msg.format(type(random_state)))
if all([isinstance(item, float)
for item in (training, validation, testing)]):
# Random split
self.assign_examples_randomly(
probabilities=(training, validation, testing),
subset_size=subset_size,
random_state=random_state)
elif all([
(isinstance(index_list, list) and
[isinstance(index, int) for index in index_list])
for index_list in (training, validation, testing)
]):
# Each is list of ints
self.assign_examples_by_indices(
training_dataset_indices=training,
validation_dataset_indices=validation,
testing_dataset_indices=testing)
else:
# Bad values supplied
msg = ("`training`, `validation`, and `testing` should be "
"all floats or all list of ints")
raise ValueError(msg)
def assign_examples_by_indices(self,
training_dataset_indices,
validation_dataset_indices,
testing_dataset_indices):
""" Specify which of the data should belong to training, validation,
and testing datasets. Automatic randomization is possible: see keyword
argument parameters.
Parameters
----------
training_dataset_indices: list of integers
The list of indices of examples that belong to the training dataset
validation_dataset_indices: list of integers
The list of indices of examples that belong to the validation
dataset
testing_dataset_indices: list of integers
The list of indices of examples that belong to the testing dataset
"""
if not all([isinstance(index, int)
for index in training_dataset_indices]):
msg = "Training data indices must be a list of integers"
raise ValueError(msg)
if not all([isinstance(index, int)
for index in validation_dataset_indices]):
msg = "Validation data indices must be a list of integers"
raise ValueError(msg)
if not all([isinstance(index, int)
for index in testing_dataset_indices]):
msg = "Training data indices must be a list of integers"
raise ValueError(msg)
self.datasets[TRAINING_DATASET_KEY] = [
self._example_key_from_index(index)
for index in training_dataset_indices
]
self.datasets[VALIDATION_DATASET_KEY] = [
self._example_key_from_index(index)
for index in validation_dataset_indices
]
self.datasets[TESTING_DATASET_KEY] = [
self._example_key_from_index(index)
for index in testing_dataset_indices
]
def assign_examples_randomly(self, probabilities,
subset_size, random_state):
""" Assign examples randomly into training, validation, and testing
Parameters
----------
probabilities: 3-tuple of floats
The probability of being placed in the training, validation
or testing
subset_size: int
If provided, then should be less than or equal to
:code:`len(keys)`. If given, then :code:`keys` is first
sub-sampled by :code:`subset_size`
before splitting.
random_state: numpy.random.RandomState
Provide for reproducible results
"""
with self.open_h5_file() as hf:
keys = list(hf.keys())
if subset_size is not None and subset_size > len(keys):
raise ValueError("`subset_size` must be <= `len(keys)`")
if subset_size is None:
subset_size = len(keys)
sub_keys = random_state.choice(keys, replace=False, size=subset_size)
n_keys = len(sub_keys)
# This generates a matrix size `(n_keys, 3)` where each row
# is an indicator vector indicating to which dataset the key with
# respective row index should be placed into.
indicators = random_state.multinomial(
n=1, pvals=probabilities, size=n_keys)
# Get the dataset keys for iteration
dataset_keys = self._iterate_dataset_keys()
for idataset_key, dataset_key in enumerate(dataset_keys):
# Include example indexes when the indicator array is 1
# in the respective slot for the given dataset index in the
# outer for loop
self.datasets[dataset_key] = [
self._example_key_from_index(index)
for index, indicator in enumerate(indicators)
if list(indicator).index(1) == idataset_key
]
@contextlib.contextmanager
def open_h5_file(self):
""" Opens the data file
"""
h5 = None
try:
h5 = h5py.File(self.h5_file, mode='r')
yield h5
finally:
if h5:
h5.close()
def _iterate_dataset_keys(self):
""" Iterates through the dataset keys
"""
for dataset_key in DATASET_KEYS:
yield dataset_key
def _example_key_from_index(self, index):
""" Get the example key for the corresponding index
"""
return EXAMPLE_KEY.format(index)
def get_dataset_for_example_key(self, example_key):
""" Get the dataset for the corresponding example key
Returns
-------
dataset_key: str or None
One of TRAINING_DATASET_KEY, VALIDATION_DATASET_KEY, or
TESTING_DATASET_KEY if found; otherwise, returns None.
"""
if self.in_training_dataset(example_key):
return TRAINING_DATASET_KEY
elif self.in_validation_dataset(example_key):
return VALIDATION_DATASET_KEY
elif self.in_testing_dataset(example_key):
return TESTING_DATASET_KEY
else:
return None
def iterate_keys(self, dataset_key=None):
for i in range(self.n_examples):
example_key = self._example_key_from_index(i)
if (not dataset_key or self._in_dataset(
example_key=example_key, dataset_key=dataset_key)):
yield example_key
def get_example_by_index(self, index):
""" Get the `DatasetExample` corresponding to `index`
"""
with self.open_h5_file() as hf:
example_key = self._example_key_from_index(index)
example = DatasetExample(
index=index,
key=example_key,
img=hf[example_key][IMAGE_KEY][...],
seg=hf[example_key][SEGMENTATION_KEY][...],
dist=hf[example_key][DISTANCE_TRANSFORM_KEY][...],
dx=hf[example_key].attrs['dx']
)
return example
def _get_example_by_key(self, example_key):
with self.open_h5_file() as hf:
example = DatasetExample(
index=int(example_key.split('-')[-1]),
key=example_key,
img=hf[example_key][IMAGE_KEY][...],
seg=hf[example_key][SEGMENTATION_KEY][...],
dist=hf[example_key][DISTANCE_TRANSFORM_KEY][...],
dx=hf[example_key].attrs['dx']
)
return example
def iterate_examples(self, dataset_key=None):
""" Iterates through the hdf5 dataset
Parameters
----------
dataset_key: str, default=None
Limit the iterations to the given dataset; None yields all examples
Returns
-------
dataset: generator
The return generator returns
`(i, key, img[i], seg[i], dist[i], dx[i])`
at each iteration, where i is the index and key is the
key into the hdf5 dataset for the respective index
"""
with self.open_h5_file() as hf:
for i in range(self.n_examples):
example_key = self._example_key_from_index(i)
# Skip if the example is not in the desired dataset
if (dataset_key and
not self._in_dataset(example_key, dataset_key)):
continue
yield DatasetExample(
index=i,
key=example_key,
img=hf[example_key][IMAGE_KEY][...],
seg=hf[example_key][SEGMENTATION_KEY][...],
dist=hf[example_key][DISTANCE_TRANSFORM_KEY][...],
dx=hf[example_key].attrs['dx']
)
def _in_dataset(self, example_key, dataset_key):
return example_key in self.datasets[dataset_key]
def in_training_dataset(self, example_key):
""" Returns True if example key is in the training dataset
"""
return self._in_dataset(example_key, TRAINING_DATASET_KEY)
def in_validation_dataset(self, example_key):
""" Returns True if example key is in the validation dataset
"""
return self._in_dataset(example_key, VALIDATION_DATASET_KEY)
def in_testing_dataset(self, example_key):
""" Returns True if example key is in the testing dataset
"""
return self._in_dataset(example_key, TESTING_DATASET_KEY)
class DatasetProxy:
""" A proxy object attached to a level set machine learning model
after fitting so that we can do things like `model.training_data[0]`"""
def __init__(self, datasets_handler, dataset_key):
self._datasets_handler = datasets_handler
self._dataset_key = dataset_key
def __len__(self):
return len(self._datasets_handler.datasets[self._dataset_key])
def __getitem__(self, item):
if not isinstance(item, int):
raise KeyError
key = self._datasets_handler.datasets[self._dataset_key][item]
return self._datasets_handler._get_example_by_key(key)
def __iter__(self):
return (self[i] for i in range(len(self)))
| [
"[email protected]"
] | |
693783e7d62a487f14e10d74a7a450d92bdb14ce | 727f1bc2205c88577b419cf0036c029b8c6f7766 | /out-bin/py/google/fhir/models/model_test.runfiles/com_google_fhir/external/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/python/profiler/internal/__init__.py | 2393af118399d8838b4a147efeb326dc2d1ae32d | [
"Apache-2.0"
] | permissive | rasalt/fhir | 55cf78feed3596a3101b86f9e9bbf6652c6ed4ad | d49883cc4d4986e11ca66058d5a327691e6e048a | refs/heads/master | 2020-04-13T00:16:54.050913 | 2019-01-15T14:22:15 | 2019-01-15T14:22:15 | 160,260,223 | 0 | 0 | Apache-2.0 | 2018-12-03T22:07:01 | 2018-12-03T22:07:01 | null | UTF-8 | Python | false | false | 188 | py | /home/rkharwar/.cache/bazel/_bazel_rkharwar/c4bcd65252c8f8250f091ba96375f9a5/external/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/python/profiler/internal/__init__.py | [
"[email protected]"
] | |
1e623db6fe6a57f23ac6227363e9c2893aae634c | 0fe0ffe29ca6f76c6f15c85c8d82b09beaada246 | /tools/perf/page_sets/dual_browser_story.py | debaf60207cbf2997a297a58ebe5cda647c48d1f | [] | no_license | hanpfei/chromium-net | 4dc8fd48cf3b05d89b11dc121f9c3abdd3ba962e | 9df8ce98c2a14fb60c2f581853011e32eb4bed0f | refs/heads/master | 2023-07-08T15:28:01.033104 | 2023-06-14T13:02:39 | 2023-06-14T13:02:39 | 65,541,033 | 297 | 73 | null | 2022-11-02T23:33:48 | 2016-08-12T09:25:34 | C++ | UTF-8 | Python | false | false | 9,668 | py | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import logging
import re
import sys
import urllib
from telemetry import decorators
from telemetry import story as story_module
# TODO(perezju): Remove references to telementry.internal when
# https://github.com/catapult-project/catapult/issues/2102 is resolved.
from telemetry.internal.browser import browser_finder
from telemetry.internal.browser import browser_finder_exceptions
from telemetry.util import wpr_modes
from page_sets.top_10_mobile import URL_LIST
GOOGLE_SEARCH = 'https://www.google.co.uk/search?'
SEARCH_QUERIES = [
'science',
'cat pictures',
'1600 Amphitheatre Pkwy, Mountain View, CA',
'tom hanks',
'weather 94110',
'goog',
'population of california',
'sfo jfk flights',
'movies 94110',
'tip on 100 bill'
]
def _OptionsForBrowser(browser_type, finder_options):
"""Return options used to get a browser of the given type.
TODO(perezju): Currently this clones the finder_options passed via the
command line to telemetry. When browser_options are split appart from
finder_options (crbug.com/570348) we will be able to construct our own
browser_options as needed.
"""
finder_options = finder_options.Copy()
finder_options.browser_type = browser_type
finder_options.browser_executable = None
finder_options.browser_options.browser_type = browser_type
return finder_options
class MultiBrowserSharedState(story_module.SharedState):
def __init__(self, test, finder_options, story_set):
"""A shared state to run a test involving multiple browsers.
The story_set is expected to include SinglePage instances (class defined
below) mapping each page to a browser on which to run. The state
requires at least one page to run on the 'default' browser, i.e. the
browser selected from the command line by the user.
"""
super(MultiBrowserSharedState, self).__init__(
test, finder_options, story_set)
self._platform = None
self._story_set = story_set
self._possible_browsers = {}
# We use an ordered dict to record the order in which browsers appear on
# the story set. However, browsers are not created yet.
self._browsers_created = False
self._browsers = collections.OrderedDict(
(s.browser_type, None) for s in story_set)
self._current_story = None
self._current_browser = None
self._current_tab = None
possible_browser = self._PrepareBrowser('default', finder_options)
if not possible_browser:
raise browser_finder_exceptions.BrowserFinderException(
'No browser found.\n\nAvailable browsers:\n%s\n' %
'\n'.join(browser_finder.GetAllAvailableBrowserTypes(finder_options)))
if not finder_options.run_disabled_tests:
self._CheckTestEnabled(test, possible_browser)
extra_browser_types = set(story.browser_type for story in story_set)
extra_browser_types.remove('default') # Must include 'default' browser.
for browser_type in extra_browser_types:
options = _OptionsForBrowser(browser_type, finder_options)
if not self._PrepareBrowser(browser_type, options):
logging.warning('Skipping %s (%s) because %s browser is not available',
test.__name__, str(test), browser_type)
logging.warning('Install %s to be able to run the test.', browser_type)
sys.exit(0)
# TODO(crbug/404771): Move network controller options out of
# browser_options and into finder_options.
browser_options = finder_options.browser_options
if finder_options.use_live_sites:
wpr_mode = wpr_modes.WPR_OFF
elif browser_options.wpr_mode == wpr_modes.WPR_RECORD:
wpr_mode = wpr_modes.WPR_RECORD
else:
wpr_mode = wpr_modes.WPR_REPLAY
self.platform.network_controller.Open(wpr_mode,
browser_options.extra_wpr_args)
@property
def current_tab(self):
return self._current_tab
@property
def platform(self):
return self._platform
def _CheckTestEnabled(self, test, possible_browser):
should_skip, msg = decorators.ShouldSkip(test, possible_browser)
if should_skip:
logging.warning(msg)
logging.warning('You are trying to run a disabled test.')
logging.warning(
'Pass --also-run-disabled-tests to squelch this message.')
sys.exit(0)
def _PrepareBrowser(self, browser_type, options):
"""Add a browser to the dict of possible browsers.
TODO(perezju): When available, use the GetBrowserForPlatform API instead.
See: crbug.com/570348
Returns:
The possible browser if found, or None otherwise.
"""
possible_browser = browser_finder.FindBrowser(options)
if possible_browser is None:
return None
if self._platform is None:
self._platform = possible_browser.platform
else:
assert self._platform is possible_browser.platform
self._possible_browsers[browser_type] = (possible_browser, options)
return possible_browser
def _CreateAllBrowsersIfNeeeded(self):
"""Launch all browsers needed for the story set, if not already done.
This ensures that all browsers are alive during the whole duration of the
benchmark and, therefore, e.g. memory dumps are always provided for all
of them.
"""
if self._browsers_created:
return
for browser_type in self._browsers:
possible_browser, options = self._possible_browsers[browser_type]
self._browsers[browser_type] = possible_browser.Create(options)
self._browsers_created = True
def _CloseAllBrowsers(self):
"""Close all of the browsers that were launched for this benchmark."""
if not self._browsers_created:
return
for browser_type, browser in self._browsers.iteritems():
try:
browser.Close()
except Exception:
logging.exception('Error while closing %s browser', browser_type)
self._browsers[browser_type] = None
self._browsers_created = False
def CanRunStory(self, _):
return True
def WillRunStory(self, story):
self._current_story = story
self.platform.network_controller.StartReplay(
self._story_set.WprFilePathForStory(story),
story.make_javascript_deterministic)
# Note: browsers need to be created after replay has been started.
self._CreateAllBrowsersIfNeeeded()
self._current_browser = self._browsers[story.browser_type]
self._current_browser.Foreground()
self._current_tab = self._current_browser.foreground_tab
def RunStory(self, _):
self._current_story.Run(self)
def DidRunStory(self, _):
self._current_story = None
def TakeMemoryMeasurement(self):
self.current_tab.action_runner.ForceGarbageCollection()
self.platform.FlushEntireSystemCache()
if not self.platform.tracing_controller.is_tracing_running:
return # Tracing is not running, e.g., when recording a WPR archive.
for browser_type, browser in self._browsers.iteritems():
if not browser.DumpMemory():
logging.error('Unable to dump memory for %s', browser_type)
def TearDownState(self):
self.platform.network_controller.Close()
self._CloseAllBrowsers()
def DumpStateUponFailure(self, unused_story, unused_results):
if self._browsers:
for browser_type, browser in self._browsers.iteritems():
logging.info('vvvvv BROWSER STATE BELOW FOR \'%s\' vvvvv', browser_type)
browser.DumpStateUponFailure()
else:
logging.warning('Cannot dump browser states: No browsers.')
class SinglePage(story_module.Story):
def __init__(self, name, url, browser_type, phase):
"""A story that associates a particular page with a browser to view it.
Args:
name: A string with the name of the page as it will appear reported,
e.g., on results and dashboards.
url: A string with the url of the page to load.
browser_type: A string identifying the browser where this page should be
displayed. Accepts the same strings as the command line --browser
option (e.g. 'android-webview'), and the special value 'default' to
select the browser chosen by the user on the command line.
"""
super(SinglePage, self).__init__(MultiBrowserSharedState, name=name)
self._url = url
self._browser_type = browser_type
self.grouping_keys['phase'] = phase
@property
def url(self):
return self._url
@property
def browser_type(self):
return self._browser_type
def Run(self, shared_state):
shared_state.current_tab.Navigate(self._url)
shared_state.current_tab.WaitForDocumentReadyStateToBeComplete()
shared_state.TakeMemoryMeasurement()
class DualBrowserStorySet(story_module.StorySet):
"""A story set that switches back and forth between two browsers."""
def __init__(self):
super(DualBrowserStorySet, self).__init__(
archive_data_file='data/dual_browser_story.json',
cloud_storage_bucket=story_module.PARTNER_BUCKET)
for query, url in zip(SEARCH_QUERIES, URL_LIST):
# Stories that run on the android-webview browser.
self.AddStory(SinglePage(
name='google_%s' % re.sub('\W+', '_', query.lower()),
url=GOOGLE_SEARCH + urllib.urlencode({'q': query}),
browser_type='android-webview',
phase='on_webview'))
# Stories that run on the browser selected by command line options.
self.AddStory(SinglePage(
name=re.sub('\W+', '_', url),
url=url,
browser_type='default',
phase='on_chrome'))
| [
"[email protected]"
] | |
6bd19bea2e70c55542a03dfa7a782c7d2dfd879c | 2e8db6ce133756ebe9998a48f98072e2133af792 | /users/admin.py | c74af4fc5bb95c0218144cc7205abe0d7dd7c842 | [] | no_license | KadogoKenya/JWT_TEST | b76f015e65e3950af4b35eebde165700fb8fed03 | 35aa2f9c5b11fa827fbc8565530be597c1f4c2d5 | refs/heads/master | 2023-03-26T20:55:19.301952 | 2021-03-25T07:56:52 | 2021-03-25T07:56:52 | 349,028,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,094 | py | from django.contrib import admin
from django import forms
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
# Register your models here.
# admin.site.register(UserManager,UserAdmin)
from django.contrib.auth.models import Group
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from users.models import User
# from customauth.models import MyUser
class UserCreationForm(forms.ModelForm):
"""A form for creating new users. Includes all the required
fields, plus a repeated password."""
password1 = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(label='Password confirmation', widget=forms.PasswordInput)
class Meta:
model = User
fields = ('email', 'username')
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords don't match")
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super().save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
"""A form for updating users. Includes all the fields on
the user, but replaces the password field with admin's
password hash display field.
"""
password = ReadOnlyPasswordHashField()
class Meta:
model = User
fields = ('email', 'password', 'username', 'is_active', 'is_admin')
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial["password"]
class UserAdmin(BaseUserAdmin):
# The forms to add and change user instances
form = UserChangeForm
add_form = UserCreationForm
# The fields to be used in displaying the User model.
# These override the definitions on the base UserAdmin
# that reference specific fields on auth.User.
list_display = ('email', 'username', 'is_admin')
list_filter = ('is_admin',)
fieldsets = (
(None, {'fields': ('email', 'password')}),
('Personal info', {'fields': ('username',)}),
('Permissions', {'fields': ('is_admin',)}),
)
# add_fieldsets is not a standard ModelAdmin attribute. UserAdmin
# overrides get_fieldsets to use this attribute when creating a user.
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'username', 'password1', 'password2')}
),
)
search_fields = ('email',)
ordering = ('email',)
filter_horizontal = ()
admin.site.register(User,UserAdmin)
admin.site.unregister(Group)
| [
"[email protected]"
] | |
1897bf0e769aee4e932c3b17a1669983468986ba | b501a5eae1018c1c26caa96793c6ee17865ebb2d | /Networking/socket/socket_multicast_receiver.py | 0d53559902dd07fb86eb6ef43f80a3e62f3943dd | [] | no_license | jincurry/standard_Library_Learn | 12b02f9e86d31ca574bb6863aefc95d63cc558fc | 6c7197f12747456e0f1f3efd09667682a2d1a567 | refs/heads/master | 2022-10-26T07:28:36.545847 | 2018-05-04T12:54:50 | 2018-05-04T12:54:50 | 125,447,397 | 0 | 1 | null | 2022-10-02T17:21:50 | 2018-03-16T01:32:50 | Python | UTF-8 | Python | false | false | 638 | py | import socket
import struct
import sys
multicast_group = '224.3.29.71'
server_address = ('', 10000)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(server_address)
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock.setsockopt(
socket.IPPROTO_IP,
socket.IP_ADD_MEMBERSHIP,
mreq
)
while True:
print('\n waiting to receive message')
data, address = sock.recvfrom(1024)
print('received {} bytes from {}'.format(
len(data), address
))
print(data)
print('sending acknowledgement to ', address)
sock.sendto(b'ack', address)
| [
"[email protected]"
] | |
dd1bb84b3f09f1d5438cfa643cf57a0b86ad6d4d | c658b7eed69edfb1a7610694fe7b8e60a5005b7c | /test/functional/test_framework/blocktools.py | 9c2958f076ad5d7002953c87f035d1bf308c0885 | [
"MIT"
] | permissive | wolfoxonly/coc | 0864a6dce2c36d703d93e9b2fb201f599d6db4bd | ff8584518c6979db412aec82e6528a4e37077da2 | refs/heads/master | 2021-01-24T16:52:14.665824 | 2018-04-28T10:00:42 | 2018-04-28T10:00:42 | 123,215,964 | 0 | 0 | MIT | 2018-02-28T02:15:20 | 2018-02-28T02:15:20 | null | UTF-8 | Python | false | false | 3,934 | py | #!/usr/bin/env python3
# Copyright (c) 2015-2017 The Crowncoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utilities for manipulating blocks and transactions."""
from .mininode import *
from .script import CScript, OP_TRUE, OP_CHECKSIG, OP_RETURN
# Create a block (with regtest difficulty)
def create_block(hashprev, coinbase, nTime=None):
block = CBlock()
if nTime is None:
import time
block.nTime = int(time.time()+600)
else:
block.nTime = nTime
block.hashPrevBlock = hashprev
block.nBits = 0x207fffff # Will break after a difficulty adjustment...
block.vtx.append(coinbase)
block.hashMerkleRoot = block.calc_merkle_root()
block.calc_sha256()
return block
# From BIP141
WITNESS_COMMITMENT_HEADER = b"\xaa\x21\xa9\xed"
def get_witness_script(witness_root, witness_nonce):
witness_commitment = uint256_from_str(hash256(ser_uint256(witness_root)+ser_uint256(witness_nonce)))
output_data = WITNESS_COMMITMENT_HEADER + ser_uint256(witness_commitment)
return CScript([OP_RETURN, output_data])
# According to BIP141, blocks with witness rules active must commit to the
# hash of all in-block transactions including witness.
def add_witness_commitment(block, nonce=0):
# First calculate the merkle root of the block's
# transactions, with witnesses.
witness_nonce = nonce
witness_root = block.calc_witness_merkle_root()
# witness_nonce should go to coinbase witness.
block.vtx[0].wit.vtxinwit = [CTxInWitness()]
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(witness_nonce)]
# witness commitment is the last OP_RETURN output in coinbase
block.vtx[0].vout.append(CTxOut(0, get_witness_script(witness_root, witness_nonce)))
block.vtx[0].rehash()
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
def serialize_script_num(value):
r = bytearray(0)
if value == 0:
return r
neg = value < 0
absvalue = -value if neg else value
while (absvalue):
r.append(int(absvalue & 0xff))
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return r
# Create a coinbase transaction, assuming no miner fees.
# If pubkey is passed in, the coinbase output will be a P2PK output;
# otherwise an anyone-can-spend output.
def create_coinbase(height, pubkey = None):
coinbase = CTransaction()
coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff),
ser_string(serialize_script_num(height)), 0xffffffff))
coinbaseoutput = CTxOut()
coinbaseoutput.nValue = 50 * COIN
halvings = int(height/150) # regtest
coinbaseoutput.nValue >>= halvings
if (pubkey != None):
coinbaseoutput.scriptPubKey = CScript([pubkey, OP_CHECKSIG])
else:
coinbaseoutput.scriptPubKey = CScript([OP_TRUE])
coinbase.vout = [ coinbaseoutput ]
coinbase.calc_sha256()
return coinbase
# Create a transaction.
# If the scriptPubKey is not specified, make it anyone-can-spend.
def create_transaction(prevtx, n, sig, value, scriptPubKey=CScript()):
tx = CTransaction()
assert(n < len(prevtx.vout))
tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), sig, 0xffffffff))
tx.vout.append(CTxOut(value, scriptPubKey))
tx.calc_sha256()
return tx
def get_legacy_sigopcount_block(block, fAccurate=True):
count = 0
for tx in block.vtx:
count += get_legacy_sigopcount_tx(tx, fAccurate)
return count
def get_legacy_sigopcount_tx(tx, fAccurate=True):
count = 0
for i in tx.vout:
count += i.scriptPubKey.GetSigOpCount(fAccurate)
for j in tx.vin:
# scriptSig might be of type bytes, so convert to CScript for the moment
count += CScript(j.scriptSig).GetSigOpCount(fAccurate)
return count
| [
"[email protected]"
] | |
210f9371a43aa3890b7486dc0f209676644979f8 | 87bae60470bbe5316d7da8bc4a8709e33b40e2b5 | /whatsnew/south_migrations/0004_auto__del_field_whatsnew_released__add_field_whatsnew_enabled__chg_fie.py | b1dc20a303abe6f65e0843cb1077e8a5bf63afa1 | [] | no_license | saxix/django-whatsnew | c11f0d5fa87e5e1c5c7648e8162bd39c64e69302 | 68b33e5e2599a858e00eda53e1c13a503e1b3856 | refs/heads/develop | 2021-01-19T12:39:41.876635 | 2015-01-28T16:18:29 | 2015-01-28T16:18:29 | 18,416,313 | 0 | 2 | null | 2015-01-28T16:18:30 | 2014-04-03T20:00:33 | Python | UTF-8 | Python | false | false | 1,524 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'WhatsNew.released'
db.delete_column(u'whatsnew_whatsnew', 'released')
# Adding field 'WhatsNew.enabled'
db.add_column(u'whatsnew_whatsnew', 'enabled',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Changing field 'WhatsNew.version'
db.alter_column(u'whatsnew_whatsnew', 'version', self.gf('whatsnew.fields.VersionField')(max_length=50))
def backwards(self, orm):
# Adding field 'WhatsNew.released'
db.add_column(u'whatsnew_whatsnew', 'released',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Deleting field 'WhatsNew.enabled'
db.delete_column(u'whatsnew_whatsnew', 'enabled')
models = {
u'whatsnew.whatsnew': {
'Meta': {'object_name': 'WhatsNew'},
'content': ('django.db.models.fields.TextField', [], {}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'version': ('whatsnew.fields.VersionField', [], {'max_length': '50'})
}
}
complete_apps = ['whatsnew']
| [
"[email protected]"
] | |
1248dbd0c2b9a530f886af3cabee8148160d28b7 | c972024b36470ea42a01075cc3dc5df2ab2defcc | /mysite/radpress/urls.py | e576bc6e9464e86f98204a0e8b8649c90615b75f | [] | no_license | davidrae/abacus-direct | 1c55bed4639716080b77c03359d981fdd3363027 | dc09e2345a01ec36f6a8e2adf1dba12f11cb55ad | refs/heads/master | 2016-08-11T12:49:15.407345 | 2015-11-03T15:50:16 | 2015-11-03T15:50:16 | 44,256,109 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,579 | py | from django.conf.urls import patterns, url
from radpress.views import (
ArticleArchiveView, ArticleDetailView, ArticleListView, PreviewView,
PageDetailView, SearchView, ZenModeView, ZenModeUpdateView, TagListView,
GenericTagListView, index, users, reports, alerts, museums, privacy,
solutions, wan, carpark, contact)
from radpress.feeds import ArticleFeed
urlpatterns = patterns(
'radpress.views',
#url(r'^$',
# view=TagListView.as_view(),
# name='radpress-article-list'),
url(r'^$', 'index', name='radpress_home'),
url(r'^tags/$',
view=ArticleArchiveView.as_view(),
name='radpress-article-tags'),
url(r'^archives/$',
view=GenericTagListView.as_view(),
name='radpress-article-archive'),
url(r'^detail/(?P<slug>[-\w]+)/$',
view=ArticleDetailView.as_view(),
name='radpress-article-detail'),
url(r'^p/(?P<slug>[-\w]+)/$',
view=PageDetailView.as_view(),
name='radpress-page-detail'),
url(r'^preview/$',
view=PreviewView.as_view(),
name='radpress-preview'),
url(r'^search/$',
view=SearchView.as_view(),
name='radpress-search'),
url(r'^zen/$',
view=ZenModeView.as_view(),
name='radpress-zen-mode'),
url(r'zen/(?P<pk>\d+)/$',
view=ZenModeUpdateView.as_view(),
name='radpress-zen-mode-update'),
url(r'^rss/$',
view=ArticleFeed(),
name='radpress-rss'),
url(r'^rss/(?P<tags>[-/\w]+)/$',
view=ArticleFeed(),
name='radpress-rss'),
#url(r'^$',
#view=ArticleListView.as_view(),
#name='radpress-article-list'),
url(r'^$',
'index',
name='radpress-home'),
#url(r'^detail/features/$',
# view=ArticleDetailView.as_view(),
# name='radpress-features'),
url(r'^about/$',
'about',
name='radpress-about-us'),
url(r'^contact/$',
'contact',
name='radpress-contact'),
# Features pages
url(r'features/$',
'features',
name='radpress-features'),
url(r'^features/users/$',
'users',
name='radpress-users'),
url(r'^features/live/$',
'live',
name='radpress-live'),
url(r'^features/reports/$',
'reports',
name='radpress-reports'),
url(r'^features/auto/$',
'auto',
name='radpress-auto'),
url(r'^features/alerts/$',
'alerts',
name='radpress-alerts'),
# Solutions
url(r'^solutions/$',
'solutions',
name='radpress-solutions'),
url(r'^solutions/wan$',
'wan',
name='radpress-wan'),
url(r'^solutions/cloud$',
'cloud',
name='radpress-cloud'),
url(r'^solutions/smart-devices$',
'smart',
name='radpress-smart'),
# Industries
url(r'^indsutries/$',
'industries',
name='radpress-industries'),
url(r'^industries/retail/$',
'retail',
name='radpress-retail'),
url(r'^industries/museums/$',
'museums',
name='radpress-museum'),
url(r'^industries/carparks/$',
'carpark',
name='radpress-carpark'),
url(r'^airport/$',
'airports',
name='radpress-airport'),
# Footer links
url(r'^privacy-policy/$',
'privacy',
name='radpress-privacy'),
url(r'^site-map/$',
'sitemap',
name='radpress-sitemap'),
url(r'^terms-of-use/$',
'terms',
name='radpress-terms'),
)
| [
"[email protected]"
] | |
a704ca8f1db8d26dc296c050b61bbfdec012c64f | 03ff28a6004ba92f3b04c88bebabb503bed0ad0c | /main/migrations/0016_remove_assistantprofile_bio.py | 092e6aec88360f9c741c9f29399cd654c36c95ea | [
"MIT"
] | permissive | mzazakeith/Therapy101 | ebf8ff945f04dc04c7d05fb5cc9f923bca861c88 | be00dd988c6b636f52b57638e70c89da3acbf1a3 | refs/heads/master | 2023-01-08T09:12:13.882424 | 2018-10-01T15:26:25 | 2018-10-01T15:26:25 | 144,596,894 | 0 | 0 | MIT | 2023-01-04T10:58:09 | 2018-08-13T15:12:55 | Python | UTF-8 | Python | false | false | 402 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-08-24 06:18
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0015_assistantprofile_location'),
]
operations = [
migrations.RemoveField(
model_name='assistantprofile',
name='bio',
),
]
| [
"[email protected]"
] | |
eb74a3aaaba45d89b54d606ce9db62d206c45759 | 6189f34eff2831e3e727cd7c5e43bc5b591adffc | /WebMirror/management/rss_parser_funcs/feed_parse_extractLnnwnwarriorCom.py | d57e00464e808b7880e3602ef2d5057707f95b04 | [
"BSD-3-Clause"
] | permissive | fake-name/ReadableWebProxy | 24603660b204a9e7965cfdd4a942ff62d7711e27 | ca2e086818433abc08c014dd06bfd22d4985ea2a | refs/heads/master | 2023-09-04T03:54:50.043051 | 2023-08-26T16:08:46 | 2023-08-26T16:08:46 | 39,611,770 | 207 | 20 | BSD-3-Clause | 2023-09-11T15:48:15 | 2015-07-24T04:30:43 | Python | UTF-8 | Python | false | false | 545 | py |
def extractLnnwnwarriorCom(item):
'''
Parser for 'lnnwnwarrior.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| [
"[email protected]"
] | |
d8503c77ebac2cd8fe3ea0088bc0ea6508e354d6 | b9a1dfcde3851847531b031b99df8bf96edc72be | /0x0F-python-object_relational_mapping/101-relationship_states_cities_list.py | 7c723017eb5f8ce8a9f2b5ae496e97a47d0db301 | [] | no_license | JackWanaCode/holbertonschool-higher_level_programming | c304eba4039dc188d9f2383ae93791be786360b9 | 8c2b5a612aad968f7dcb7bbfdb8a1791650dce8f | refs/heads/master | 2020-03-28T11:19:50.763159 | 2019-02-22T07:40:56 | 2019-02-22T07:40:56 | 148,202,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 856 | py | #!/usr/bin/python3
"""Start link class to table in database
"""
import sys
from relationship_state import Base, State
from relationship_city import City
from sqlalchemy import (create_engine)
from sqlalchemy.orm import sessionmaker, relationship, backref
if __name__ == "__main__":
engine = create_engine('mysql+mysqldb://{}:{}@localhost/{}'.
format(sys.argv[1], sys.argv[2], sys.argv[3]),
pool_pre_ping=True)
Base.metadata.create_all(engine, checkfirst=True)
Session = sessionmaker(bind=engine)
session = Session()
flag = 0
st_id_remember = 0
for st in (session.query(State).order_by(State.id).all()):
print("{}: {}".format(st.id, st.name))
for ct in st.cities:
print("\t{}: {}".format(ct.id, ct.name))
session.commit()
session.close()
| [
"[email protected]"
] | |
cf9ba6f2882fafb0adabf0b721a0fceacb2e24c3 | 99b6faa1e31b9b18755e90070e24787632cd4776 | /apps/postcrash/models.py | 04e994a4e29da3b868fa2f4acc4dd2ea173ff84f | [] | no_license | taliasman/kitsune | d6743ef9e5b26951a87638a963e7429abf1d0327 | f8085205eef143011adb4c52d1f183da06c1c58e | refs/heads/master | 2021-05-28T19:50:40.670060 | 2013-03-11T13:55:15 | 2013-03-11T13:55:15 | 8,706,741 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | from django.db import models
from sumo.models import ModelBase
from wiki.models import Document
class Signature(ModelBase):
signature = models.CharField(max_length=255, db_index=True, unique=True)
document = models.ForeignKey(Document)
def __unicode__(self):
return u'<%s> %s' % (self.signature, self.document.title)
def get_absolute_url(self):
doc = self.document.get_absolute_url().lstrip('/')
_, _, url = doc.partition('/')
return u'/' + url
| [
"[email protected]"
] | |
bb16decea6b3859bd8afa70e6ca5507d66be55e0 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2844/60891/272575.py | 29c0553ab2f29823241003b7ebce0900d08eee6b | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | n_t = [int(i) for i in input().split()]
n = n_t[0]
t = n_t[1]
a = [int(i) for i in input().split()]
max_n = []
for i in range(n):
remain = t
count = 0
j = i
while j < n and remain >= a[j]:
remain -= a[j]
count += 1
j += 1
max_n.append(count)
print(max(max_n)) | [
"[email protected]"
] | |
684061fd2083f4bf639a545dbc9e11253cac2a80 | 7101b0c62f0a060ace2ce03b2785145cd720180c | /e3d/scene_management/SceneClass.py | d148b5fcd1cad063b87b6edff78351bc144ab92d | [
"MIT"
] | permissive | jr-garcia/Engendro3D | 5c5c1e931b390df415b8af2cba86521df82726a1 | fcfd81abab7a7b883be74bc28e2f07fdcd039ef5 | refs/heads/master | 2023-06-09T04:06:52.395785 | 2023-05-23T12:19:08 | 2023-05-23T12:19:08 | 87,677,215 | 9 | 3 | null | null | null | null | UTF-8 | Python | false | false | 15,043 | py | from ..Base3DObjectClass import DefaultObjectParameters
from ..update_management.pickleableMethods import updateLightTransformation, updateModelTransformation
from ..update_management.renderingListCreation import getOrderedModelInstances
from .LightClass import light, lightTypesEnum
from .SkyboxClass import Skybox
from ..backends.base_backend import DrawingData, InstanceData
from ..cameras.SimpleCameraClass import SimpleCamera
from ..commonValues import *
from ..model_management.ModelInstanceClass import ModelInstance
from ..model_management.AnimationModule import Animation
from ..physics_management.physicsModule import bodyShapesEnum, ScenePhysics
from ..model_management.MaterialClass import Material
from cycgkit.cgtypes import mat4
from bullet.bullet import ACTIVE_TAG, WANTS_DEACTIVATION
class Scene(object):
def get_ID(self):
return self._iID
ID = property(fget=get_ID)
def __init__(self, ID, engine, gravity, resolution):
self._models = {}
self._lights = {}
self._defaultCamera = SimpleCamera([0, 0, 0], [0, 0, 0], ID='defaultcam')
self.currentCamera = self._defaultCamera
self.__models = engine.models
self.sounds = engine.sounds
self._engine = engine
self._iID = ID
self._lastUpdate = 0
self.__skipped = 0
self.__frames = 0
self.beforeUpdateCallback = None
self._sky = None
self._instancesOrderedByModel = []
self._DebugColors = [[1, 0, 0], [0, 1, .3], [0, 0, 1], [1, 1, 0], [0, 1, 1], [1, 0, 1], [.0, .5, 0], [1, .5, 0]]
self.__currentDebugColor = 0
self.bottom = -500
self._material = Material()
self._material.diffuseColor = vec3(0.23, 0.34, 0.65)
self.ambientColor = [v / 3.0 for v in self._material.diffuseColor]
self._fogType = 0
self._fogColor = self._material.diffuseColor
self._fogStart = 300.0
self._fogEnd = 500.0
self.physics = ScenePhysics(gravity, resolution)
self._currentTransformations = None
self._currentModel = None
def _getBGColor(self):
return self._material.diffuseColor
def _setBGColor(self, val):
self._material.diffuseColor = val
self._fogColor = self._material.diffuseColor
bgColor = property(_getBGColor, _setBGColor)
def __repr__(self):
return self.ID
def hasModel(self, ID):
return ID in self._models
def addModel(self, modelID, IDInScene, position, rotation, uniformScale, animationQuality=1,
shape=bodyShapesEnum.auto, mass=None, isDynamic=False, materialsList=None):
"""
@param animationQuality: The quality for the steps of the animations. The higher,
more frames will be fetched from the animations. If animations are incomplete or
show jumps, raise this value. Must be equal or higher than 0. Default 1.
@param modelID: The ID of a model previously loaded with engine.models.loadModel.
@param IDInScene: The ID that this instance will have in the scene.
@param shape: One of bodyShapesEnum
@type animationQuality: int
@rtype : ModelInstance
@type IDInScene: str
@type modelID: str
@type shape: bodyShapesEnum
"""
if self.hasModel(IDInScene):
raise NameError("An object with the same ID already exists in the scene:\n" + IDInScene)
model = self.__models._getModel(modelID)
if model:
if shape == bodyShapesEnum.auto:
shape = model._preShape
materials = model.materials if materialsList is None else materialsList
modelIns = ModelInstance(materials, modelID, self._engine, IDInScene, animationQuality, position, rotation,
uniformScale, shape, mass, isDynamic)
self.__currentDebugColor += 1
if self.__currentDebugColor > len(self._DebugColors) - 1:
self.__currentDebugColor = 0
modelIns.debugColor = list(self._DebugColors[self.__currentDebugColor])
self._models[IDInScene] = modelIns
self._instancesOrderedByModel = getOrderedModelInstances(self)
self.physics.addRigidObject(modelIns.physicsBody)
if not modelIns.physicsBody.isDynamic:
modelIns.physicsBody._reBuildMass(0.0)
return modelIns
else:
raise KeyError(
"Error adding model. The specified ID ('" + modelID + "') does not exist.\nTry loading the model before calling addModel.")
def addLight(self, ltype=lightTypesEnum.directional, position=None, rotation=None, ID=''):
"""
Insert a light into the scene.
If no ID is given, a default will be created.
@rtype : Engendro3D.LightClass.light
@type ID: str
"""
if ID == '':
lid = 0
ID = 'light{}'.format(lid)
while ID in self._lights:
lid += 1
ID = 'light{}'.format(lid)
elif ID in self._lights:
raise KeyError('The ID for the light exists.')
l = light(ltype, position, rotation, ID)
self._lights[ID] = l
return l
def removeModel(self, sceneID):
"""
@type sceneID: str
"""
self._models.pop(sceneID)
self._instancesOrderedByModel = getOrderedModelInstances(self)
def removeLight(self, ID):
"""
@type ID: str
"""
self._lights.pop(ID)
def flush(self):
"""
Removes all objects from the scene.
"""
self._models.clear()
self._lights.clear()
def _modelsUpdate(self):
# Todo: implement per model calbacks
for m in self._models.values():
if m.physicsBody.isDynamic and not m.physicsBody._beyondBoundary:
transform = m.physicsBody._motion.getWorldTransform()
pos = vec3(bulletVectorToList(transform.getOrigin() - Vector3(m._pOffset[0], m._pOffset[1], m._pOffset[2])))
rot = vec3(bulletQuatToRotList(transform.getRotation()))
m._position = pos
m._rotation = rot
m._dirty = True
m._update() # todo: reimplement threaded update with stronger method
if m._position[1] <= self.bottom and not m.physicsBody._beyondBoundary:
self.physics.removeRigidObject(m.physicsBody)
m.physicsBody._phyUpdWait = 0
m.visible = False
m.physicsBody._beyondBoundary = True
else:
for Sn in m._attachedSounds.values():
Sn.soundSource.position = list(m._position)
def _UpdateLights(self):
# Todo: implement per model callbacks
for m in self._lights.values():
assert isinstance(m, light)
if m._dirty:
m._update()
if m._position[1] <= self.bottom and not m.physicsBody._beyondBoundary:
self.physics.removeRigidObject(m.physicsBody)
m.physicsBody._phyUpdWait = 0
m.visible = False
def setDefaultSkyBox(self):
self._sky = Skybox('default', self._engine)
self._sky.loadDefault()
def _getSky(self):
return self._sky
def _setSky(self, value):
if value is not None and not isinstance(value, Skybox):
raise TypeError('sky object must be of type \'Skybox\'')
self._sky = value
sky = property(_getSky, _setSky)
@property
def fogType(self):
return self._fogType
@fogType.setter
def fogType(self, value):
self._fogType = value
def update(self, netTime, windowSize):
if self._lastUpdate == 0:
frameTime = 0
else:
frameTime = netTime - self._lastUpdate
if self.beforeUpdateCallback is not None:
self.beforeUpdateCallback([frameTime, netTime])
self.physics.update(frameTime / 1000.0)
self._modelsUpdate()
self._UpdateLights()
if self._sky is not None:
self._sky._update()
self._lastUpdate = netTime
currentModelID = ''
currentModel = None
newDrawingData = DrawingData()
newDrawingData.clearColor = self.bgColor
newDrawingData.sky = self.sky
if self.currentCamera is None:
self.currentCamera = self._defaultCamera
if not self.currentCamera.projectionMatrix:
self.currentCamera.updateFOV(windowSize[0], windowSize[1])
current_view = self.currentCamera._update()
current_projection = self.currentCamera.projectionMatrix
current_zNear = self.currentCamera._p_zNear
current_zFar = self.currentCamera._p_zFar
defaultSceneParams = DefaultSceneParameters()
defaultSceneParams.defaultTexture = self._engine.textures.getDefaultTexture()
defaultSceneParams.zNear = current_zNear
defaultSceneParams.zFar = current_zFar
defaultSceneParams.ambientColor = self.ambientColor
defaultSceneParams.fogType = self._fogType
defaultSceneParams.fogColor = self._fogColor
defaultSceneParams.fogStart = self._fogStart
defaultSceneParams.fogEnd = self._fogEnd
defaultSceneParams.lights = self._lights
defaultSceneParams.cameraPosition = self.currentCamera._position
defaultSceneParams.view = current_view
defaultSceneParams.projection = current_projection
defaultSceneParams.windowSize = vec3(windowSize[0], windowSize[1], 1)
defaultSceneParams.construct()
newDrawingData.defaultSceneParams = defaultSceneParams
for currentModelInstance in self._instancesOrderedByModel:
if currentModelInstance.visible:
if currentModelInstance._baseModelID != currentModelID:
currentModelID = currentModelInstance._baseModelID
currentModel = self._engine.models._getModel(currentModelID)
defaultObjectParams = DefaultObjectParameters()
defaultObjectParams.model = currentModelInstance._transformation
defaultObjectParams.view = current_view
defaultObjectParams.projection = current_projection
defaultObjectParams.hasBones = currentModel.hasBones
defaultObjectParams.construct()
self._currentModel = currentModel
if currentModel.hasBones:
newDrawingData.modelBoneDirs[currentModelID] = currentModel.boneDict
time = -1
if currentModelInstance._animationID != '':
time = Scene.calculateInstanceAnimationTime(netTime, currentModelInstance, currentModel)
else:
time = None
Scene.extractRenderInfo(currentModelInstance, defaultObjectParams, currentModel.rootNode,
newDrawingData, time, self)
return newDrawingData
@staticmethod
def extractRenderInfo(currentModelInstance, defaultParams, node, newDrawingData, time, scene):
for mesh in node._meshes:
meshid = mesh.ID
transformations = None
if time is not None:
# todo: implement currentModel.hasBones debug bounding box
transformations = scene.getInstanceAnimationTransformations(currentModelInstance, time, mesh)
# self._currentAnimatedBBox.clear()
newDrawingData.meshes.add(mesh)
meshMat = currentModelInstance._materials[mesh._materialIndex]
newDrawingData.instances[meshid].append(
InstanceData(meshMat, defaultParams, transformations, currentModelInstance._baseModelID))
for cnode in node._childNodes:
Scene.extractRenderInfo(currentModelInstance, defaultParams, cnode, newDrawingData, time, scene)
@staticmethod
def calculateInstanceAnimationTime(netTime, currentModelInstance, currentModel):
assert isinstance(currentModelInstance, ModelInstance)
model = currentModelInstance
if model.animState == ModelInstance.animationState.playing:
if model._animationStartupTime == -1:
model._animationStartupTime = netTime
if model._animLastPauseStartup != -1:
model._animationPausedTime += netTime - model._animLastPauseStartup
model._animLastPauseStartup = -1
anim = currentModel.animations[currentModelInstance._animationID]
assert isinstance(anim, Animation)
btime = (((netTime - model._animationStartupTime) - model._animationPausedTime) / 1000.0) * anim.ticks
time = btime
while time > anim.duration:
time -= anim.duration
if time < 0:
time += 1.0
adjustedTime = round(time, model.animationQuality)
model._animationLastPlayedFrame = adjustedTime
return adjustedTime
elif model.animState == ModelInstance.animationState.paused:
if model._animationPausedTime == -1:
model._animationPausedTime = netTime
return model._animationLastPlayedFrame
else:
model._animationStartupTime = -1
model._animationPausedTime = -1
def getInstanceAnimationTransformations(self, currentModelInstance, time, mesh):
if time < 0:
ID = list(currentModelInstance.getAnimationsList())[0]
anim = self._currentModel.animations[ID]
currentTransformations = self._currentModel.skeleton.getBindPose(anim, mesh)
else:
anim = self._currentModel.animations[currentModelInstance._animationID]
currentTransformations = self._currentModel.skeleton.getAnimationTranformations(anim, time, mesh)
# for b in mesh.boneMinMax.items():
# flatm = currentTransformations[b[0]]
# pointa = flatm * b[1][0]
# self._currentAnimatedBBox.addPoint(pointa)
# pointb = flatm * b[1][1]
# self._currentAnimatedBBox.addPoint(pointb)
return currentTransformations
def terminate(self):
self.physics.terminate()
class DefaultSceneParameters(object):
def __init__(self):
self.zFar = 5000
self.zNear = 1
self.fogType = 0
self.fogColor = vec3(0.23, 0.34, 0.65)
self.fogStart = 300.0
self.fogEnd = 500.0
self.defaultTexture = None
self.ambientColor = [1, 1, 1, 1]
self.lights = {}
self.cameraPosition = None
self.view = None
self.projection = None
self.ViewProjection = None
self.windowSize = vec3(0)
def construct(self):
self.ViewProjection = self.projection * self.view
| [
"[email protected]"
] | |
ec400f0e82a29ba599263f5b9cbb120c712dff95 | b4ce39af031a93354ade80d4206c26992159d7c7 | /Tutorials/Binary Search/Binary Search Function and whileloop.py | 02a4f2dcb91829a7b678557a1157cc2ec7b8f2a9 | [] | no_license | Bibin22/pythonpgms | 4e19c7c62bc9c892db3fd8298c806f9fdfb86832 | e297d5a99db2f1c57e7fc94724af78138057439d | refs/heads/master | 2023-06-15T00:51:14.074564 | 2021-07-12T17:44:47 | 2021-07-12T17:44:47 | 315,982,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | def search(list, n):
l = 0
u = len(list)-1
while l <= u:
mid = (l + u) // 2
if list[mid] == n:
return True
else:
if list[mid] < n:
l = mid+1
else:
u = mid-1
return False
list = [1, 2, 4, 5, 6, 7, 8, 9,10]
n = int(input("entera number"))
if search(list, n):
print("item found ")
else:
print("item not found")
| [
"[email protected]"
] | |
90c29bd4121c5b2955daef0224a72300c97b7d67 | 6c82cb2e9bab9931c973433e2e384061e1405fc5 | /app/models/customer.py | 50630e0b62fb5881d174d7094ad36b8febeb566b | [] | no_license | M0r13n/bully-backend | 7153b27552ff2ef25c9ffdf63c55600f3fddcd7b | e9443e10f39a819012d612cd0cd075fb8d75bee2 | refs/heads/master | 2023-03-09T13:38:16.198239 | 2021-02-21T12:39:01 | 2021-02-21T12:39:01 | 291,683,343 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 975 | py | from datetime import datetime
from sqlalchemy.orm import relationship
from app.extensions import db
class Customer(db.Model):
__tablename__ = "customer"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
first_name = db.Column(db.String(255), nullable=False, unique=False, index=True)
last_name = db.Column(db.String(255), nullable=False, unique=False, index=True)
street = db.Column(db.String(255), nullable=False, unique=False, index=True)
zip_code = db.Column(db.String(10), nullable=False, unique=False, index=True)
city = db.Column(db.String(255), nullable=False, unique=False, index=True)
tel = db.Column(db.String(64), nullable=True, unique=False, index=True)
email = db.Column(db.String(255), nullable=False, unique=False, index=True)
registered_on = db.Column(db.DateTime, nullable=False, default=datetime.now)
reservations = relationship("Reservation", back_populates="customer")
| [
"[email protected]"
] | |
707bf976b0e8935fd7466d48163a9de42563f8b7 | fcc63d65284593a9ad45e28dd8c49445aa4a8d30 | /app/atuo/views.py | b5333f8a6549d3d6935c0375da3a0819034a38b0 | [] | no_license | Hardworking-tester/API_SAMPLE | 0b33a2ee52e4d316775a09c9c897275b26e027c9 | 867f0b289a01fea72081fd74fbf24b2edcfe1d2d | refs/heads/master | 2021-01-23T12:32:36.585842 | 2017-06-23T02:31:39 | 2017-06-23T02:31:39 | 93,167,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,929 | py | # encoding:utf-8
# author:wwg
from flask import *
import time
import urllib2,urllib
from ..action import post,get
from forms import FunctionModelsForm,CaseInformationForm,DataTestForm,ElementLocateForm,CaseInformationEditForm,FunctionModelsEditForm
from app.models import FunctionModelsDb,CaseInformationDb,CaseDataDb,ElementLocateDb,ResultTestDb
from .. import db
from . import auto
import uuid
from ..resultlog import ResultLog
@auto.route('/addModels',methods=['GET','POST'])
def addFunctionModels():
"""新增功能模块视图"""
form=FunctionModelsForm()
if form.validate_on_submit():
model=FunctionModelsDb(id=str(uuid.uuid4()).replace('-',''),name=form.model_name.data)
form.model_name.data=''
db.session.add(model)
db.session.commit()
return render_template('autotemplates/AddFunctionModel.html',form_html=form)
@auto.route('/editModels',methods=['GET','POST'])
def editFunctionModels():
"""编辑功能模块视图"""
form=FunctionModelsEditForm()
query_model_information = db.session.query(FunctionModelsDb.name).all()
# if request.method == "POST":
global filter_name
for m in query_model_information:
if request.form.get(m[0]) != None:
form.model_name.data = request.form.get(m[0])
filter_name=request.form.get(m[0])
if form.validate_on_submit():
update_object = db.session.query(FunctionModelsDb).filter_by(name=filter_name).first()
update_object.name = form.model_name.data
form.model_name.data=''
db.session.commit()
return render_template('autotemplates/EditFunctionModel.html',form_html=form)
@auto.route('/',methods=['GET','POST'])
def index():
"""首页访问视图"""
return render_template('autotemplates/index.html')
@auto.route('/queryModel',methods=['GET','POST'])
def queryModels():
"""查询功能模块视图"""
query_model_name=db.session.query(FunctionModelsDb).all()
return render_template('autotemplates/queryModel.html',model_names=query_model_name)
@auto.route('/addCase',methods=['GET','POST'])
def addCaseInformation():
"""新增测试用例视图"""
form =CaseInformationForm()
if form.validate_on_submit():
id = str(uuid.uuid4()).replace('-', '')
case_number=form.case_number.data
case_summary=form.case_summary.data
model_id_foreign=form.model_name.data
url = form.url.data
post_data = form.post_data.data
post_method = form.post_method.data
case_info=CaseInformationDb(id=id,case_number=case_number,case_summary=case_summary,model_id=model_id_foreign,url=url,post_data=post_data,post_method=post_method)
form.case_number.data=''
form.case_summary.data = ''
form.url.data = ''
form.post_data.data = ''
db.session.add(case_info)
db.session.commit()
return render_template('autotemplates/addCaseInformation.html', form_html=form)
@auto.route('/editCase',methods=['GET','POST'])
def editCaseInformation():
"""编辑测试用例视图"""
form =CaseInformationEditForm()
query_case_information = db.session.query(CaseInformationDb.case_number, CaseInformationDb.case_summary,
CaseInformationDb.url, CaseInformationDb.post_data, CaseInformationDb.post_method,CaseInformationDb.id,CaseInformationDb.model_id).all()
global filter_id
for m in query_case_information:
if request.form.get(m[5]) != None:
form.case_number.data = m[0]
form.case_summary.data = m[1]
form.model_name.data = db.session.query(FunctionModelsDb.id).filter_by(id=m[6]).first()[0]
form.url.data = m[2]
form.post_data.data = m[3]
form.post_method.data = m[4]
filter_id = request.form.get(m[5])
if form.validate_on_submit():
update_case_object = db.session.query(CaseInformationDb).filter_by(id=filter_id).first()
update_case_object.case_number = form.case_number.data
update_case_object.case_summary= form.case_summary.data
update_case_object.url= form.url.data
update_case_object.post_data= form.post_data.data
update_case_object.post_method= form.post_method.data
update_case_object.model_id=form.model_name.data
form.case_number.data = ''
form.case_summary.data = ''
form.url.data = ''
form.post_data.data = ''
db.session.commit()
return render_template('autotemplates/editCaseInformation.html', form_html=form)
@auto.route('/queryCaseInformation',methods=['GET','POST'])
def queryCaseInformation():
"""查询所有测试用例视图"""
case_data=[]
case_data=db.session.query(CaseInformationDb.case_number, CaseInformationDb.case_summary,
CaseInformationDb.url, CaseInformationDb.post_data, CaseInformationDb.post_method,CaseInformationDb.id,CaseInformationDb.model_id).all()
set_data=db.session.query(FunctionModelsDb.id,FunctionModelsDb.name).all()
module_id_name=dict(set_data)#集合转化为字典
return render_template('autotemplates/queryCaseInformation.html',case_informations=case_data,module_id_name=module_id_name)
def getModuleNameById(module_id):
return db.session.query(FunctionModelsDb.name).filter_by(id=module_id).first()[0]
@auto.route('/executeTest',methods=['GET','POST'])
def executeTest():
"""执行测试"""
query_case_information=db.session.query(CaseInformationDb).all()
all_case_information=db.session.query(CaseInformationDb.case_number,CaseInformationDb.case_summary,CaseInformationDb.model_id).all()
if request.method=="POST":
for m in query_case_information:
if str(m) ==request.form.get('idName'):
id = str(uuid.uuid4()).replace('-', '')
case_id_list = db.session.query(CaseInformationDb.id).filter_by(case_number=request.form.get('idName')).all()
case_id= case_id_list[0][0]
post_url=db.session.query(CaseInformationDb.url).filter_by(case_number=request.form.get('idName')).all()[0][0]
send_data=db.session.query(CaseInformationDb.post_data).filter_by(case_number=request.form.get('idName')).all()[0][0]
post_method=db.session.query(CaseInformationDb.post_method).filter_by(case_number=request.form.get('idName')).all()[0][0]
model_id=db.session.query(CaseInformationDb.model_id).filter_by(case_number=request.form.get('idName')).all()[0][0]
case_summary=db.session.query(CaseInformationDb.case_summary).filter_by(case_number=request.form.get('idName')).all()[0][0]
if post_method=='post':
post_result=post.Post().post(post_url,eval(send_data))
flag=post_result[0]
result_data=post_result[1]
add_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
post_result_data = ResultTestDb(id=id, case_number=request.form.get('idName'), case_result=str(result_data), Result_flag=flag,add_time=add_time)
db.session.add(post_result_data)
db.session.commit()
elif post_method=='get':
get_result=get.Get().get(post_url)
set_data = db.session.query(FunctionModelsDb.id, FunctionModelsDb.name).all()#查询所有的功能模块的ID和名称并转化为字典
module_id_name = dict(set_data) # 集合转化为字典
return render_template('autotemplates/executeTest.html',case_informations=all_case_information,module_id_name=module_id_name)
@auto.route('/getResult/<name>',methods=['GET','POST'])
def getResult(name):
"""得到测试结果"""
query_case_information=db.session.query(CaseInformationDb).all()
# if request.method=="POST":
for m in query_case_information:
if str(m) ==name:
ResultLog.ResultLog().info(str(m))
case_number=str(m)
result_list = db.session.query(ResultTestDb.id,ResultTestDb.case_number,ResultTestDb.Result_flag,ResultTestDb.case_result,ResultTestDb.add_time,ResultTestDb.image_path).order_by(db.desc(ResultTestDb.add_time)).filter_by(case_number=case_number).all()
return render_template('autotemplates/getResult.html',result_data=result_list)
@auto.route('/getAllResult',methods=['GET','POST'])
def getAllResult():
"""得到全部测试用例最后一次测试结果信息"""
result_data = []
query_case_information=db.session.query(CaseInformationDb).all()
for m in query_case_information:
result_list = db.session.query(ResultTestDb.case_number, ResultTestDb.Result_flag,ResultTestDb.add_time).order_by(db.desc(ResultTestDb.add_time)).filter_by(case_number=str(m)).first()
result_data.append(result_list)
return render_template('autotemplates/getAllResult.html',result_data=result_data)
| [
"[email protected]"
] | |
e89963810a075e160abc281ff7078690ec605237 | eba3e4a3935d6422d1ed85aaf69337f5ba15fc74 | /sqlalchemy-migrate/test/versioning/test_template.py | 72217ac895daee6f61801609b45e5c2471a8ca45 | [] | no_license | arianepaola/tg2jython | 2ae74250ca43b021323ef0951a9763712c2eb3d6 | 971b9c3eb8ca941d1797bb4b458f275bdca5a2cb | refs/heads/master | 2021-01-21T12:07:48.815690 | 2009-03-27T02:38:11 | 2009-03-27T02:38:11 | 160,242 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | from test import fixture
from migrate.versioning.repository import *
import os
class TestPathed(fixture.Base):
def test_templates(self):
"""We can find the path to all repository templates"""
path = str(template)
self.assert_(os.path.exists(path))
def test_repository(self):
"""We can find the path to the default repository"""
path = template.get_repository()
self.assert_(os.path.exists(path))
def test_script(self):
"""We can find the path to the default migration script"""
path = template.get_script()
self.assert_(os.path.exists(path))
| [
"ariane@venus.(none)"
] | ariane@venus.(none) |
576e4dd9db1bd5772535f8baac36843c7d73ac12 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/templates/_algorithms_challenges/codewar/_codewarsPython-master/[beta]Mysterious function.py | 6b327c06bd9d4e69da3fa358319279e7d309c1f3 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 118 | py | ___ get_num(n
r.. s..([2 __ c __ '8' ____ 1 ___ c __ s..(n) __ c __ ('0','6','8','9')])
print(get_num(300 | [
"[email protected]"
] | |
a9ba472f2edf7d86bd6237c5e0a8b230fd58eeb7 | 704da68062145c0e1d016256bbe86f2286c6d149 | /tests/test_models/test_user.py | e14bb600f7e57395687239482e89920eacc3d550 | [] | no_license | TMcMac/AirBnB_clone_old | bf39a4cb80fc39390d2af03861938954c24dc742 | 5130e5b08ca9d301600ea963f126d4e124b305cd | refs/heads/master | 2023-01-03T15:09:53.609497 | 2020-10-29T20:04:41 | 2020-10-29T20:04:41 | 274,945,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,533 | py | #!/usr/bin/python3
"""Unit test for User class"""
import unittest
from models.base_model import BaseModel
from models.user import User
class TestBaseModel(unittest.TestCase):
"""Unit test for User class"""
@classmethod
def setUp(cls):
print('SetupClass')
@classmethod
def tearDown(cls):
print('TearDownClass')
def setUp(self):
"""Unit test setup"""
print('setUp')
self.u1 = User()
self.u2 = User()
def tearDown(self):
"""Unit test tear down"""
del self.u1
del self.u2
def test_init(self):
"""Test for init method"""
print("testing init...")
self.assertIsNotNone(self.u1)
self.assertIsInstance(self.u1, BaseModel)
self.assertIs(type(self.u1), User)
def test_uuid(self):
"""Test for uuid attribute"""
print("testing uuid...")
self.assertTrue(hasattr(self.u1, "id"))
self.assertNotEqual(self.u1.id, self.u2.id)
self.assertIsInstance(self.u1.id, str)
def test_email(self):
"""Test for email attribute"""
print("testing email...")
self.assertTrue(hasattr(self.u1, "email"))
self.assertEqual(self.u1.email, "")
self.assertIsInstance(self.u1.email, str)
def test_first_name(self):
"""Test for first_name attribute"""
print("testing first_name...")
self.assertTrue(hasattr(self.u1, "first_name"))
self.assertEqual(self.u1.first_name, "")
self.assertIsInstance(self.u1.first_name, str)
def test_last_name(self):
"""Test for last_name attribute"""
print("testing last_name...")
self.assertTrue(hasattr(self.u1, "last_name"))
self.assertEqual(self.u1.last_name, "")
self.assertIsInstance(self.u1.last_name, str)
def test_str(self):
"""Test for __str__ method"""
print("testing __str__method...")
result = len(self.u1.__str__())
self.assertTrue(result, 172)
def test_save(self):
"""Test for save method"""
print("testing save method...")
prechange = self.u1.updated_at
self.u1.save()
postchange = self.u1.updated_at
self.assertNotEqual(prechange, postchange)
def test_created_at(self):
"""Test for created at time"""
print("Testing the created at time attr")
self.assertTrue(hasattr(self.u1, "created_at"))
def test_updated_at(self):
"""Test for the updated at time attr"""
print("Testing the updated at time attr")
prechange = self.u1.updated_at
self.u1.save()
postchange = self.u1.updated_at
self.assertNotEqual(prechange, postchange)
def test_kwargs(self):
"""Test for kwargs"""
print("Testing for kwargs")
self.u1.name = "Holberton"
self.u1.my_number = 89
u1_json = self.u1.to_dict()
u2 = User(**u1_json)
self.assertEqual(self.u1.id, u2.id)
self.assertEqual(self.u1.created_at, u2.created_at)
self.assertEqual(self.u1.updated_at, u2.updated_at)
self.assertEqual(self.u1.name, u2.name)
self.assertEqual(self.u1.my_number, u2.my_number)
def test_module_docstring(self):
"""Test for existence of module docstring"""
print("testing module docstring...")
result = len(__import__('models.user').__doc__)
self.assertTrue(result > 0, True)
def test_class_docstring(self):
"""User Class Docstring Test"""
print("test_class_docstring")
result = len(User.__doc__)
self.assertTrue(result > 0, True)
def test_init_docstring(self):
"""User init Docstring Test"""
print("test_init_docstring")
result = len(self.__init__.__doc__)
self.assertTrue(result > 0, True)
def test__str__docstring(self):
"""User __str__ Docstring Test"""
print("testing __str__ docstring...")
result = len(User.__str__.__doc__)
self.assertTrue(result > 0, True)
def test_save_docstring(self):
"""User save method Docstring Test"""
print("testing save docstring...")
result = len(User.save.__doc__)
self.assertTrue(result > 0, True)
def test_to_dict_docstring(self):
"""User to_dict Docstring Test"""
print("testing to_dict docstring...")
result = len(User.to_dict.__doc__)
self.assertTrue(result > 0, True)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
bb7191718af6f79dcc797802896409e5072e9902 | cd118552db7fdc384e91a6a6c0addc2d70e7055f | /virtual/bin/flask | 2a0e7f139cc22cfdf49023d5b60fcc24916a961f | [] | no_license | IreriVIkki/BlogIP | f23098a059a75cb3dba0936b1e850b1aa292bf25 | b2dc00e1ec15958efa78a7098cbc2ffdcdebc26e | refs/heads/master | 2020-03-28T17:33:24.508120 | 2018-09-17T07:03:24 | 2018-09-17T07:03:24 | 148,800,977 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 243 | #!/home/vikki/Documents/BlogIP/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
a48f5d7a47297a3004b4066f232a6791173b237c | 7caa438706a423dd9779a81f8345fcf1ec11e921 | /NXT-Python/pyglet-1.2.4/tests/window/WINDOW_SET_ICON.py | bdeac31fc404c7ca4aa58750eff8f810ef925d2f | [
"BSD-3-Clause"
] | permissive | tamarinvs19/python-learning | 5dd2582f5dc504e19a53e9176677adc5170778b0 | 1e514ad7ca8f3d2e2f785b11b0be4d57696dc1e9 | refs/heads/master | 2021-07-15T13:23:24.238594 | 2021-07-08T07:07:21 | 2021-07-08T07:07:21 | 120,604,826 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 990 | py | #!/usr/bin/env python
'''Test that window icon can be set.
Expected behaviour:
One window will be opened. It will have an icon depicting a yellow
"A".
Close the window or press ESC to end the test.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: WINDOW_SET_MOUSE_CURSOR.py 717 2007-03-03 07:04:10Z Alex.Holkner $'
import unittest
from pyglet.gl import *
from pyglet import image
from pyglet import window
from pyglet.window import key
from os.path import join, dirname
icon_file = join(dirname(__file__), 'icon1.png')
class WINDOW_SET_ICON(unittest.TestCase):
def test_set_icon(self):
self.width, self.height = 200, 200
self.w = w = window.Window(self.width, self.height)
w.set_icon(image.load(icon_file))
glClearColor(1, 1, 1, 1)
while not w.has_exit:
glClear(GL_COLOR_BUFFER_BIT)
w.flip()
w.dispatch_events()
w.close()
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
97743cb1f373612199548c0a7a0999c2a8642e77 | c30e2b2e1b7876af01afc11e70b9bde66ebc6d6a | /conftest.py | 9c646ac4834f556a9c633eecf8706d0f05ad49ad | [] | no_license | Jumas-Cola/stepik_selenium_test_project | 7aad125be2623520c6bfe93b34438ffd12de4303 | 8bc920a25ff83883dc6dd653f3dec7ea25350f27 | refs/heads/master | 2020-06-11T10:49:45.344813 | 2019-06-28T20:04:08 | 2019-06-28T20:04:08 | 193,936,673 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | import pytest
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
def pytest_addoption(parser):
parser.addoption('--language', action='store', default='en-gb',
help='Choose language of browser.\
For example: --language="es"')
@pytest.fixture
def browser(request):
lang = request.config.getoption("language")
options = Options()
options.add_experimental_option('prefs', {'intl.accept_languages': lang})
browser = webdriver.Chrome(options=options)
yield browser
browser.quit()
| [
"[email protected]"
] | |
82f52210db3d2ed623c1aa965475dfac4f433892 | a3cc7286d4a319cb76f3a44a593c4a18e5ddc104 | /lib/surface/config/configurations/list.py | a9236025e57f42c19c3d5a61550bb922e874e63a | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | jordanistan/Google-Cloud-SDK | f2c6bb7abc2f33b9dfaec5de792aa1be91154099 | 42b9d7914c36a30d1e4b84ae2925df7edeca9962 | refs/heads/master | 2023-09-01T01:24:53.495537 | 2023-08-22T01:12:23 | 2023-08-22T01:12:23 | 127,072,491 | 0 | 1 | NOASSERTION | 2023-08-22T01:12:24 | 2018-03-28T02:31:19 | Python | UTF-8 | Python | false | false | 2,060 | py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to list named configuration."""
from googlecloudsdk.calliope import base
from googlecloudsdk.core import properties
from googlecloudsdk.core.configurations import named_configs
from googlecloudsdk.core.configurations import properties_file
class List(base.ListCommand):
"""Lists existing named configurations."""
detailed_help = {
'DESCRIPTION': """\
{description}
Run `$ gcloud topic configurations` for an overview of named
configurations.
""",
'EXAMPLES': """\
To list all available configurations, run:
$ {command}
""",
}
@staticmethod
def Args(parser):
base.PAGE_SIZE_FLAG.RemoveFromParser(parser)
base.URI_FLAG.RemoveFromParser(parser)
parser.display_info.AddFormat("""table(
name,
is_active,
properties.core.account,
properties.core.project,
properties.compute.zone:label=DEFAULT_ZONE,
properties.compute.region:label=DEFAULT_REGION)
""")
def Run(self, args):
configs = named_configs.ConfigurationStore.AllConfigs()
for _, config in sorted(configs.iteritems()):
props = properties.VALUES.AllValues(
list_unset=True,
properties_file=properties_file.PropertiesFile([config.file_path]),
only_file_contents=True)
yield {
'name': config.name,
'is_active': config.is_active,
'properties': props,
}
| [
"[email protected]"
] | |
a8ad2ad4050925e02f37ff0841869c81a863dd0c | 14d8418ca5990217be67aee89fdaa310db03fbba | /models/collector_pagination_response.py | f8002358f1839b81d410471e69eb2cd75df52a25 | [
"Apache-2.0"
] | permissive | sachanta/lm-sdk-python | 3a16457bd2d5b880a0d238a88a9d1d5b8d9675f0 | e476d415c7279457f79b5d032a73d950af2fe96b | refs/heads/master | 2023-08-03T08:39:42.842790 | 2021-09-13T07:20:56 | 2021-09-13T07:20:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,155 | py | # coding: utf-8
"""
LogicMonitor REST API
LogicMonitor is a SaaS-based performance monitoring platform that provides full visibility into complex, hybrid infrastructures, offering granular performance monitoring and actionable data and insights. logicmonitor_sdk enables you to manage your LogicMonitor account programmatically. Note: For Python SDKs, the REQUEST parameters can contain camelCase or an underscore. However, the RESPONSE parameters will always contain an underscore. For example, the REQUEST parameter can be testLocation or test_location. The RESPONSE parameter will be test_location. # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from logicmonitor_sdk.models.collector_base import CollectorBase # noqa: F401,E501
class CollectorPaginationResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'total': 'int',
'search_id': 'str',
'items': 'list[CollectorBase]'
}
attribute_map = {
'total': 'total',
'search_id': 'searchId',
'items': 'items'
}
def __init__(self, total=None, search_id=None, items=None): # noqa: E501
"""CollectorPaginationResponse - a model defined in Swagger""" # noqa: E501
self._total = None
self._search_id = None
self._items = None
self.discriminator = None
if total is not None:
self.total = total
if search_id is not None:
self.search_id = search_id
if items is not None:
self.items = items
@property
def total(self):
"""Gets the total of this CollectorPaginationResponse. # noqa: E501
:return: The total of this CollectorPaginationResponse. # noqa: E501
:rtype: int
"""
return self._total
@total.setter
def total(self, total):
"""Sets the total of this CollectorPaginationResponse.
:param total: The total of this CollectorPaginationResponse. # noqa: E501
:type: int
"""
self._total = total
@property
def search_id(self):
"""Gets the search_id of this CollectorPaginationResponse. # noqa: E501
:return: The search_id of this CollectorPaginationResponse. # noqa: E501
:rtype: str
"""
return self._search_id
@search_id.setter
def search_id(self, search_id):
"""Sets the search_id of this CollectorPaginationResponse.
:param search_id: The search_id of this CollectorPaginationResponse. # noqa: E501
:type: str
"""
self._search_id = search_id
@property
def items(self):
"""Gets the items of this CollectorPaginationResponse. # noqa: E501
:return: The items of this CollectorPaginationResponse. # noqa: E501
:rtype: list[CollectorBase]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this CollectorPaginationResponse.
:param items: The items of this CollectorPaginationResponse. # noqa: E501
:type: list[CollectorBase]
"""
self._items = items
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CollectorPaginationResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CollectorPaginationResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
4fd18aa08ea26bb71cfe0f3e50af1b3194892ff2 | 902dea88ec336851f2c325d44a0dd0eaf411fb87 | /day1/strings/19.py | 0e66b0d530e704999c4e711edd826992dae3e877 | [] | no_license | shobhit-nigam/tsip_pydoge | 34d8e03d0744c2eff7615ae94bd6998739ce2bfd | bff5b24e1e93b5b41dfcb913cee280d1ee53bbf5 | refs/heads/main | 2023-07-19T05:21:35.459518 | 2021-09-06T12:59:22 | 2021-09-06T12:59:22 | 399,728,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | # double & single quotes
#
vara = "I'am Manish"
varb = 'I\'am Manish'
print("vara =", vara)
print("varb =", varb)
# error
#varc = 'pushpa"
| [
"[email protected]"
] | |
7eabf17dbaccbfaf4b782657965ea1bc1aaebdf7 | c317f99691f549b393562db200b1e9504ce11f95 | /algorithms_learn/what_can_be_computed/src/convertSatTo3Sat.py | 4ae664cf883f08c293e4acd4fed2ec99d750493a | [
"CC-BY-4.0"
] | permissive | RRisto/learning | 5349f9d3466150dbec0f4b287c13333b02845b11 | 618648f63a09bf946a50e896de8aed0f68b5144a | refs/heads/master | 2023-09-01T00:47:23.664697 | 2023-08-30T17:56:48 | 2023-08-30T17:56:48 | 102,286,332 | 15 | 24 | null | 2023-07-06T21:22:48 | 2017-09-03T18:42:58 | Jupyter Notebook | UTF-8 | Python | false | false | 6,474 | py | # SISO program convertSatTo3Sat.py
# Convert an instance of SAT into an
# equivalent instance of 3-SAT.
# inString: an instance of SAT, formatted as described in the textbook
# and sat.py.
# returns: an instance of 3SAT in the same string format.
# Example:
# >>> convertSatTo3Sat('(x1 OR x2 OR NOT x3 OR NOT x4)')
# '(d1 OR x1 OR x2) AND (NOT d1 OR NOT x3 OR NOT x4)'
import utils; from utils import rf
import sat
def convertSatTo3Sat(inString):
cnfFormula = sat.readSat(inString)
allVariables = sat.getVariablesAsSet(cnfFormula)
# Repeatedly sweep through the clauses looking for "long" clauses
# (i.e., clauses with more than three literals). We favor
# simplicity and readability over efficiency here. Every time a
# long clause is found, it is removed, split, and replaced in the
# list by two new, shorter clauses. The new clauses are inserted
# at the point where the previous long clause was removed. Then
# we go back to the start of the entire list of clauses and start
# looking for long clauses again. This ends up being quadratic or
# worse, whereas near-linear is possible. But the approach is
# simple to understand and the clauses remain in a logical order.
done = False
while not done:
done = True
for clauseID in range(len(cnfFormula)):
clause = cnfFormula[clauseID]
if len(clause) > 3:
done = False
(newClause1, newClause2) = splitClause(clause, allVariables)
cnfFormula.pop(clauseID)
cnfFormula.insert(clauseID, newClause1)
cnfFormula.insert(clauseID+1, newClause2)
break
return sat.writeSat(cnfFormula)
def splitClause(clause, allVariables):
"""Split a clause using the method described in the textbook.
Args:
clause (dict mapping str to int): Each key is a variable in
the clause and the value is +1 for positive literals, -1
for negative, 0 for both
allVariables (set of str): A set of all variables in use, so
that we can choose dummy variables that are not already in
use.
Returns:
(clause, clause): 2-tuple consisting of two clauses, where
each clause is a dictionary as described in the parameter
above. The two clauses are the result of splitting the
input using the method described in the textbook.
"""
assert len(clause) > 3
numLiterals = len(clause)
dummyVariable = addDummyVariable(allVariables)
# There is no need to sort the variables, but it will give a more
# readable and predictable outcome, since otherwise the order of
# variables in the dictionary will be arbitrary.
sortedClauseVariables = sorted(clause.keys())
newClause1 = dict()
newClause2 = dict()
# Put the first numLiterals-2 literals into newClause1, and the
# last two literals into newClause2.
for i in range(numLiterals):
variable = sortedClauseVariables[i]
posNeg = clause[variable]
if i < numLiterals-2:
newClause1[variable] = posNeg
else:
newClause2[variable] = posNeg
# Add the dummy variable, positive in newClause1 and negative in newClause2
newClause1[dummyVariable] = +1
newClause2[dummyVariable] = -1
return (newClause1, newClause2)
# Create, add, and return a new dummy variable name. Specifically, the
# set allVariables is a set of all current variable names. We find a
# new variable name of the form d1, d2, d3, ... which is not in the
# given set. The new name is added to the set, and the new name is also
# returned. Implemented with a simple linear time algorithm; of
# course we could do better than that if desired.
def addDummyVariable(allVariables):
"""Create, add, and return a new dummy variable name.
Specifically, the set allVariables is a set of all current
variable names. We find a new variable name of the form d1, d2,
d3, ... which is not in the given set. The new name is added to
the set, and the new name is also returned. Implemented with a
simple linear time algorithm; of course we could do better than
that if desired.
Args:
allVariables (set of str): A set of all variables in use, so
that we can choose dummy variables that are not already in
use.
Returns:
str: the new dummy variable name.
"""
i = 1; done = False
while not done:
dummyName = 'd' + str(i)
if dummyName not in allVariables:
allVariables.add(dummyName)
return dummyName
i += 1
def testAddDummyVariable():
formulaStr = '(x1 OR x2 OR NOT x3 OR NOT x4 OR x5) AND (NOT x1 OR NOT x2 OR x3 OR x4) AND (x4 OR NOT x5)'
cnfFormula = sat.readSat(formulaStr)
allVariables = sat.getVariablesAsSet(cnfFormula)
numVars = len(allVariables)
for i in range(5):
dummyName = addDummyVariable(allVariables)
utils.tprint(dummyName, allVariables)
varName = 'd'+str(i+1)
assert varName in allVariables
assert len(allVariables) == numVars + i+1
def testSplitClause():
formulaStr = '(x1 OR x2 OR NOT x3 OR NOT x4 OR x5) AND (NOT x1 OR NOT x2 OR x3 OR x4) AND (x4 OR NOT x5)'
cnfFormula = sat.readSat(formulaStr)
allVariables = sat.getVariablesAsSet(cnfFormula)
result = splitClause(cnfFormula[0], allVariables)
solution = ({'x1': 1, 'd1': 1, 'x3': -1, 'x2': 1}, {'d1': -1, 'x5': 1, 'x4': -1})
utils.tprint('before split:', cnfFormula[0], '\nafter split:', result)
assert result==solution
def testConvertSatTo3Sat():
s0 = '(x1 OR x2 OR NOT x3 OR NOT x4 OR x5) AND (NOT x1 OR NOT x2 OR x3 OR x4) AND (x4 OR NOT x5)'
s0soln = '(d1 OR d2 OR x1) AND (NOT d2 OR x2 OR NOT x3) AND (NOT d1 OR NOT x4 OR x5) AND (d3 OR NOT x1 OR NOT x2) AND (NOT d3 OR x3 OR x4) AND (x4 OR NOT x5)'
s1 = ''
s1soln = ''
s2 = 'x1'
s2soln = '(x1)'
s3 = 'x1 AND NOT x2'
s3soln = '(x1) AND (NOT x2)'
s4 = 'x1 OR NOT x2'
s4soln = '(x1 OR NOT x2)'
testvals = [
(s0, s0soln),
(s1, s1soln),
(s2, s2soln),
(s3, s3soln),
(s4, s4soln),
]
for (inString, soln) in testvals:
utils.tprint('**', inString, '**')
converted = convertSatTo3Sat(inString)
utils.tprint(converted, '\n\n')
assert converted == soln
| [
"[email protected]"
] | |
a6e0a619fb8ccad6c68753739f72ef7217d9a4a8 | 95689182691599b2e74ca33b36d2828a01ec5889 | /proyectos_de_ley/pdl/migrations/0002_proyecto_legislatura.py | f73ae0753f3b06383b38193a5fcf041fd0a05384 | [
"MIT"
] | permissive | proyectosdeley/proyectos_de_ley | 2392c6f3fdefc88d355f37e615ddb5ddc70c5321 | aed3f09dd2e41711bdcb27aec66a1a0d7896bb35 | refs/heads/master | 2021-07-14T12:33:33.793325 | 2020-07-26T19:44:53 | 2020-07-26T19:44:53 | 23,754,905 | 13 | 10 | MIT | 2020-07-26T19:44:54 | 2014-09-07T07:32:53 | Python | UTF-8 | Python | false | false | 492 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-09-02 20:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pdl', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='proyecto',
name='legislatura',
field=models.IntegerField(default=2011, max_length=4),
preserve_default=False,
),
]
| [
"[email protected]"
] | |
941083e324bb5e8f93f3c8d1192cd5dacd22c422 | 75566ef3423c72fe9e73075dfe29df172b65a28c | /src/scraper/database.py | f2d4c88d42deffe9bfa9459831dc37dae9483bca | [] | no_license | bicsi/cpaggregator | 7020663305eeff8690e92da64fc796926b12fe31 | b6459fe33c19dc2020b29470e457f0666b0ff212 | refs/heads/master | 2022-12-12T10:24:38.756844 | 2020-11-11T13:53:30 | 2020-11-11T13:53:30 | 153,731,264 | 2 | 1 | null | 2022-12-08T01:15:28 | 2018-10-19T05:33:06 | CSS | UTF-8 | Python | false | false | 2,178 | py | import os
from pprint import pprint
from pymongo import MongoClient, ReplaceOne
from pymongo.errors import BulkWriteError
from core.logging import log
def __insert_many_silent(coll, iterable, unique_fields):
requests = []
for elem in iterable:
find_dict = {field: elem[field] for field in unique_fields}
requests.append(ReplaceOne(find_dict, elem, upsert=True))
try:
result = coll.bulk_write(requests)
return result.inserted_count
except BulkWriteError as bwe:
for err in bwe.details['writeErrors']:
if err['code'] != 11000:
log.error(bwe.details)
log.error(pprint(iterable))
raise
return bwe.details['nInserted']
def get_db():
if os.environ.get('PRODUCTION'):
connection = MongoClient(os.environ.get('MONGODB_HOST'), int(os.environ.get('MONGODB_PORT')))
db = connection[os.environ.get('MONGODB_NAME')]
db.authenticate(os.environ.get('MONGODB_USER'), os.environ.get('MONGODB_PASS'))
return db
return MongoClient()['competitive']
def insert_report(db, report_id, created_at, report):
coll = db["reports"]
coll.insert({
'report_id': report_id,
'created_at': created_at,
'report': report,
})
def insert_submissions(db, submissions):
return __insert_many_silent(
coll=db["submissions"],
iterable=submissions,
unique_fields=['judge_id', 'submission_id', 'author_id'])
def insert_handles(db, handles):
return __insert_many_silent(
coll=db["handles"],
iterable=handles,
unique_fields=['judge_id', 'handle'])
def find_submissions(db, date_range=None, **query_dict):
coll = db["submissions"]
if date_range is not None:
date_start, date_end = date_range
query_dict.update({
'submitted_on': {
'$gte': date_start,
'$lte': date_end,
}
})
return coll.find(query_dict)
def insert_tasks(db, tasks):
return __insert_many_silent(
coll=db["tasks"],
iterable=tasks,
unique_fields=['judge_id', 'task_id'])
| [
"[email protected]"
] | |
2f2f37808d18c375de2161f33f361a7206bf124d | 29d09c634ffdd8cab13631d62bc6e3ad00df49bf | /Algorithm/swexpert/1249_보급로건설.py | 0f9f48542b53f0796bb44539d57d7850a9539998 | [] | no_license | kim-taewoo/TIL_PUBLIC | f1d32c3b4f46344c1c99f02e95cc6d2a888a0374 | ae86b542f8b1805b5dd103576d6538e3b1f5b9f4 | refs/heads/master | 2021-09-12T04:22:52.219301 | 2021-08-28T16:14:11 | 2021-08-28T16:14:11 | 237,408,159 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | from collections import deque
dr = (-1,0,1,0)
dc = (0,1,0,-1)
T = int(input())
for t in range(1, T+1):
n = int(input())
board = [list(map(int, list(input()))) for _ in range(n)]
chk = [[21470000 for _ in range(n)] for __ in range(n)]
chk[0][0] = 0
q = deque((0,0))
while q:
r, c = q.popleft()
cost = chk[r][c]
for d in range(4):
nr, nc = r + dr[d], c + dc[d]
if 0 <= nr < n and 0 <= nc < n:
ncost = cost + board[nr][nc]
if ncost < chk[nr][nc]:
chk[nr][nc] = ncost
q.append((nr,nc))
print("#{} {}".format(t, chk[n-1][n-1]))
| [
"[email protected]"
] | |
51b36a04caec338e1885e9ea1791c5c6c0d3e2af | d8d1a9b2bec5b2679129c246cf58acc836e4355b | /pytest_resource_path/absolute_path_factory.py | 117b373b80071080391d19d48136aa6e5f7f39ef | [
"MIT"
] | permissive | yukihiko-shinoda/pytest-resource-path | 0ac0d612887f453b793ec114b65eb9613817b5cc | bc56c4b5f2c8f3138baeac7f145717f6a70af7b6 | refs/heads/master | 2023-04-15T21:05:18.643053 | 2021-05-01T04:27:12 | 2021-05-01T04:27:12 | 261,375,368 | 10 | 0 | MIT | 2021-05-01T02:45:39 | 2020-05-05T06:08:49 | Python | UTF-8 | Python | false | false | 1,451 | py | """Implements creating process for absolute path to argument of constructor."""
from pathlib import Path
from types import FunctionType, MethodType
from typing import Union
from pytest_resource_path.exceptions import LogicError
from pytest_resource_path.path_factory import PathFactory
__all__ = ["AbsolutePathFactory"]
class AbsolutePathFactory:
"""Implements creating process for absolute path to argument of constructor."""
def __init__(self, path_target: Path):
self.path_target = path_target
def create_by_function(self, item: Union[MethodType, FunctionType]) -> Path:
path = PathFactory.create_absolute_path_by_function(item)
return self.create_by_path(path)
def create_by_path(self, path: Path) -> Path:
"""Creates absolute path to parh_target."""
index = None
index_tests = None
string_path_tests = str(self.path_target)
for index, part in enumerate(path.parts):
if part == string_path_tests:
index_tests = index
if index is None or index_tests is None:
raise LogicError( # pragma: no cover
"Unexpected path.\n"
"path = " + str(path) + ",\n"
"string_path_tests = " + string_path_tests + ",\n"
"index_tests, " + str(index_tests) + ",\n"
"index = " + str(index)
)
return path.parents[index - index_tests - 1]
| [
"[email protected]"
] | |
b6250476b579a0d0ee5585b0f82e06ea882db68d | de64154c4a968ab8c04390938edc300f2b52f129 | /tests/lldb/runtest.py | 71ee019c66d0c9c37232d6e655065a37bdde5e49 | [
"Apache-2.0"
] | permissive | curliph/NyuziProcessor | 7364a83a52b3f1d461c908a9ff88ee222be08c25 | 2d7cc748a8388a5be4c28d3cb34786bc9f0b801a | refs/heads/master | 2020-04-10T15:16:51.874141 | 2018-12-09T22:40:55 | 2018-12-09T22:40:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,708 | py | #!/usr/bin/env python3
#
# Copyright 2011-2015 Jeff Bush
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import re
import subprocess
import sys
sys.path.insert(0, '..')
import test_harness
class EmulatorProcess(object):
"""
This spawns the emulator process and LLDB in MI (machine interface) mode.
It allows communication with LLDB with it via stdin and stdout. It has the
__enter__ and __exit__ methods allowing it to be used in the 'with'
construct so it will automatically be torn down when the test is done.
"""
def __init__(self, hexfile):
self.hexfile = hexfile
self.elf_file = os.path.splitext(hexfile)[0] + '.elf'
self.output = None
self.emulator_proc = None
self.lldb_proc = None
self.outstr = None
self.instr = None
def __enter__(self):
emulator_args = [
test_harness.EMULATOR_PATH,
'-m',
'gdb',
'-v',
self.hexfile
]
if test_harness.DEBUG:
self.output = None
else:
self.output = open(os.devnull, 'w')
self.emulator_proc = subprocess.Popen(emulator_args, stdout=self.output,
stderr=subprocess.STDOUT)
lldb_args = [
test_harness.COMPILER_BIN + 'lldb-mi'
]
# XXX race condition: the emulator needs to be ready before
# lldb tries to connect to it.
try:
self.lldb_proc = subprocess.Popen(lldb_args, stdout=subprocess.PIPE,
stdin=subprocess.PIPE)
self.outstr = self.lldb_proc.stdin
self.instr = self.lldb_proc.stdout
except:
test_harness.kill_gently(self.emulator_proc)
raise
return self
def __exit__(self, *unused):
test_harness.kill_gently(self.emulator_proc)
test_harness.kill_gently(self.lldb_proc)
def send_command(self, cmd):
if test_harness.DEBUG:
print('LLDB send: ' + cmd)
self.outstr.write(str.encode(cmd + '\n'))
self.outstr.flush()
return self.wait_response()
def wait_response(self):
response = ''
while True:
response += self.instr.read(1).decode('utf-8')
if response.endswith('^done'):
break
if test_harness.DEBUG:
print('LLDB recv: ' + response)
return response
def wait_stop(self):
current_line = ''
while True:
inchar = self.instr.read(1).decode('utf-8')
current_line += inchar
if inchar == '\n':
if test_harness.DEBUG:
print('LLDB recv: ' + current_line[:-1])
if current_line.startswith('*stopped'):
break
current_line = ''
FRAME_RE = re.compile(
'frame #[0-9]+:( 0x[0-9a-f]+)? [a-zA-Z_\\.0-9]+`(?P<function>[a-zA-Z_0-9][a-zA-Z_0-9]+)')
AT_RE = re.compile(' at (?P<filename>[a-z_A-Z][a-z\\._A-Z]+):(?P<line>[0-9]+)')
def parse_stack_crawl(response):
"""
Given text response from the debugger containing a stack crawl, this will
return a list of tuples where each entry represents the function name,
filename, and line number of the call site.
"""
stack_info = []
for line in response.split('\\n'):
frame_match = FRAME_RE.search(line)
if frame_match:
func = frame_match.group('function')
at_match = AT_RE.search(line)
if at_match:
stack_info += [(func, at_match.group('filename'),
int(at_match.group('line')))]
else:
stack_info += [(func, '', 0)]
return stack_info
@test_harness.test(['emulator'])
def lldb(*unused):
"""This mainly validates that LLDB is reading symbols correctly."""
hexfile = test_harness.build_program(
['test_program.c'], opt_level='-O0', cflags=['-g'])
with EmulatorProcess(hexfile) as conn:
conn.send_command('file "' + test_harness.WORK_DIR + '/program.elf"')
conn.send_command('gdb-remote 8000\n')
response = conn.send_command(
'breakpoint set --file test_program.c --line 27')
if 'Breakpoint 1: where = program.elf`func2 + 100 at test_program.c:27' not in response:
raise test_harness.TestException(
'breakpoint: did not find expected value ' + response)
conn.send_command('c')
conn.wait_stop()
expected_stack = [
('func2', 'test_program.c', 27),
('func1', 'test_program.c', 35),
('main', 'test_program.c', 41),
('do_main', '', 0)
]
response = conn.send_command('bt')
crawl = parse_stack_crawl(response)
if crawl != expected_stack:
raise test_harness.TestException(
'stack crawl mismatch ' + str(crawl))
response = conn.send_command('print value')
if '= 67' not in response:
raise test_harness.TestException(
'print value: Did not find expected value ' + response)
response = conn.send_command('print result')
if '= 128' not in response:
raise test_harness.TestException(
'print result: Did not find expected value ' + response)
# Up to previous frame
conn.send_command('frame select --relative=1')
response = conn.send_command('print a')
if '= 12' not in response:
raise test_harness.TestException(
'print a: Did not find expected value ' + response)
response = conn.send_command('print b')
if '= 67' not in response:
raise test_harness.TestException(
'print b: Did not find expected value ' + response)
conn.send_command('step')
conn.wait_stop()
response = conn.send_command('print result')
if '= 64' not in response:
raise test_harness.TestException(
'print b: Did not find expected value ' + response)
test_harness.execute_tests()
| [
"[email protected]"
] | |
1f40a950463b7987030dd8e8eb8b668506c8e112 | 920f0fbb7064f2017ff62da372eaf79ddcc9035b | /lc_ladder/Basic_Algo/data-struture/Kth_Largest_Element.py | 263f9c80c8d59cb0954144b77ec4601158323480 | [] | no_license | JenZhen/LC | b29a1c45d8c905680c7b4ad0017516b3dca80cc4 | 85219de95e41551fce5af816b66643495fe51e01 | refs/heads/master | 2021-06-03T10:03:02.901376 | 2020-08-05T19:44:48 | 2020-08-05T19:44:48 | 104,683,578 | 3 | 1 | null | 2020-08-05T19:44:50 | 2017-09-24T23:30:35 | Python | UTF-8 | Python | false | false | 3,106 | py | #!/usr/bin/python
# http://www.lintcode.com/en/problem/kth-largest-element/
# Example
# In array [9,3,2,4,8], the 3rd largest element is 4.
# In array [1,2,3,4,5], the 1st largest element is 5, 2nd largest element is 4, 3rd largest element is 3 and etc.
"""
Algo: QuickSelect
D.S.: Heap, Array
Solution1:
Heap
Find largest number -> max heap
- In python, heapq is min heap. To convert to maxHeap, negate value when push, negate when pop
- In c++, priority queue is max heap.
std::priority_queue<int, std::vector<int>, mycomparison> myPQ
Takes type, container, comparator. use comparator to define min/max/customized heap
Find the Kth element
- option 1: iterate k times
- use fixed size heap
Solution2:
QuickSelect 经典模板, 适用于有相同数字的情况
Time: O(N) - average, O(N ^ 2) - worst case
Space: O(1)
[4, 1, 3, 2] - N = 4; K = 2
第K(2)大 --> 第3小 --> idx = 2 (N - K)
第K(2)小 --> 第3大 --> idx = 1 (K - 1)
Corner cases:
"""
class Solution1:
# @param k & A a integer and an array
# @return ans a integer
def kthLargestElement(self, k, A):
if A is None or k is None or k == 0:
return None
from heapq import heappush, heappop
heap = []
res = None
def _maxHeapPush(heap, value):
negVal = value * (-1)
heappush(heap, negVal)
def _maxHeapPop(heap):
return heappop(heap) * (-1)
for i in A:
_maxHeapPush(heap, i)
for i in range(k):
val = _maxHeapPop(heap)
if i == k - 1:
res = val
return res
class Solution2:
def findKthLargest(self, nums: List[int], k: int) -> int:
return self.quick_select(nums, 0, len(nums) - 1, len(nums) - k)
def quick_select(self, nums, l, r, target_idx):
if l == r:
return nums[l]
# find the rightmost element to pivot, could be a random one within l, r
pivot_idx = self.partition(nums, l, r)
if target_idx == pivot_idx:
return nums[target_idx]
elif target_idx < pivot_idx:
# 注意挪动的时候要pivot_idx + 1
return self.quick_select(nums, l, pivot_idx - 1, target_idx)
else:
return self.quick_select(nums, pivot_idx + 1, r, target_idx)
def partition(self, nums, l, r):
i = l - 1 # i + 1 表示下一个比Pivot值小的数应该去的地方
pivot_value = nums[r]
for j in range(l, r):
if nums[j] <= pivot_value:
i += 1
nums[i], nums[j] = nums[j], nums[i]
# 最后不要忘记挪动Pivot数,并返回
i += 1
nums[i], nums[r] = nums[r], nums[i]
return i
# parition can be
def partition2(self, nums, l, r):
i = l - 1 # i + 1 will be next number less than pivot value
pivot_value = nums[r]
for j in range(l, r + 1):
if nums[j] <= pivot_value:
i += 1
nums[i], nums[j] = nums[j], nums[i]
return i
# Test Cases
if __name__ == "__main__":
| [
"[email protected]"
] | |
b4ca34cc14fe2f16d01b462a5870c0073ce4817b | 3b2940c38412e5216527e35093396470060cca2f | /top/api/rest/TmcGroupAddRequest.py | 0cfb70b6b6c82a626717aa0186205a08ca854def | [] | no_license | akingthink/goods | 842eb09daddc2611868b01ebd6e330e5dd7d50be | ffdb5868a8df5c2935fc6142edcdf4c661c84dca | refs/heads/master | 2021-01-10T14:22:54.061570 | 2016-03-04T09:48:24 | 2016-03-04T09:48:24 | 45,093,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | '''
Created by auto_sdk on 2015-01-20 12:44:32
'''
from top.api.base import RestApi
class TmcGroupAddRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.group_name = None
self.nicks = None
def getapiname(self):
return 'taobao.tmc.group.add'
| [
"[email protected]"
] | |
b4e02b0acdf60bf8bfb4af0cb99e11749ca72fa5 | 1384435f0e0cf706db82d0672d5fe9e3bc0cf5a8 | /agilo/scrum/burndown/model.py | 00071987a48bf3f3696fb7530402572861bc68c7 | [] | no_license | djangsters/agilo | 1e85d776ab4ec2fa67a6366e72206bbad2930226 | 1059b76554363004887b2a60953957f413b80bb0 | refs/heads/master | 2016-09-05T12:16:51.476308 | 2013-12-18T21:19:09 | 2013-12-18T21:19:09 | 15,294,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,677 | py | # -*- encoding: utf-8 -*-
# Copyright 2010 Agile42 GmbH, Berlin (Germany)
# Copyright 2011 Agilo Software GmbH All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import timedelta
from agilo.core import Field, PersistentObject, PersistentObjectModelManager
from agilo.utils import AgiloConfig, Key
from agilo.utils.compat import json
from agilo.utils.days_time import now
from agilo.utils.geometry import Line, Point
from agilo.api.controller import ValuePerTime
__all__ = ['BurndownDataAggregator', 'BurndownDataChange',
'BurndownDataConfirmCommitment', 'BurndownDataConstants',
'BurndownTrendLineGenerator']
class BurndownDataConstants(object):
REMAINING_TIME = Key.REMAINING_TIME
REMAINING_POINTS = Key.REMAINING_POINTS
COMPONENT = Key.COMPONENT
SKIP_AGGREGATION = 'SKIP_AGGREGATION'
DELTAS_BY_COMPONENT = 'DELTAS_BY_COMPONENT'
# REFACT: do we need to add field for backlog-name so we can identify sprints / milestones with the same name? (perhaps not if burndown can only happen in sprints)
class BurndownDataChange(PersistentObject):
class Meta(object):
id = Field(type='integer', primary_key=True, auto_increment=True)
type = Field(db_name='burndown_type')
scope = Field()
when = Field(type='datetime', db_name='timestamp')
# REFACT: Consider splitting metadata tuple (pair) into two fields:
# numeric value and actual metadata dict (component, etc)
value = Field()
def __repr__(self):
return '%s(id=%s, type=%s, scope=%s, when=%s, value=%s)' % (self.__class__.__name__, repr(self.id), repr(self.type), repr(self.scope), repr(self.when), repr(self.value))
@classmethod
def remaining_entry(cls, env, delta, scope, type, when=None, marker_key=None, marker_value=True):
if when is None:
when = now()
instance = cls(env).update_values(
scope=scope,
type=type,
when=when,
delta=delta,
)
if marker_key is not None:
instance.update_marker(marker_key, marker_value)
return instance
@classmethod
def remaining_points_entry(cls, env, delta, scope, when=None, marker_key=None, marker_value=True):
delta = int(delta)
return BurndownDataChange.remaining_entry(env, delta, scope, BurndownDataConstants.REMAINING_POINTS, when, marker_key, marker_value)
@classmethod
def remaining_time_entry(cls, env, delta, scope, when=None, marker_key=None, marker_value=True):
delta = float(delta)
return BurndownDataChange.remaining_entry(env, delta, scope, BurndownDataConstants.REMAINING_TIME, when, marker_key, marker_value)
@classmethod
def create_aggregation_skip_marker(cls, env, scope, when=None):
return cls.remaining_time_entry(env, 0, scope, when, BurndownDataConstants.SKIP_AGGREGATION)
def update_values(self, scope=None, type=None, when=None, delta=None, markers=None):
if scope: self.scope = scope
if type: self.type = type
if when: self.when = when
if delta is not None: self.set_delta(delta)
if markers: self.set_markers(markers)
return self
def parse_microformat(self):
if self.value is None:
return 0, dict()
microformat = json.loads(self.value)
if isinstance(microformat, int) or isinstance(microformat, float):
return microformat, dict()
elif isinstance(microformat, list):
return microformat[0], microformat[1]
else:
raise ValueError('microformat <%s> not supported' % repr(self.value))
def serialize_microformat(self, delta, markers=None):
if markers is not None and len(markers.keys()) != 0:
self.value = json.dumps([delta, markers])
else:
self.value = json.dumps(delta)
def delta(self):
return self.parse_microformat()[0]
def markers(self):
return self.parse_microformat()[1]
def set_delta(self, a_delta):
self.serialize_microformat(a_delta, self.markers())
def set_markers(self, markers):
self.serialize_microformat(self.delta(), markers)
def update_marker(self, key, value):
markers = self.markers()
markers[key] = value
self.set_markers(markers)
def has_marker(self, marker):
return marker in self.markers()
def marker_value(self, marker):
return self.markers().get(marker)
def set_component_marker(self, component_name):
message = 'need to enable both should_reload_burndown_on_filter_change_when_filtering_by_component and backlog_filter_attribute to save burndown data by component'
assert AgiloConfig(self.env).is_filtered_burndown_enabled(), message
self.update_marker(BurndownDataConstants.COMPONENT, component_name)
def value_fields(self):
"Return fields that are not the primary key"
return filter(lambda field: field != 'id', self._fields)
def save(self):
for attribute_name in self.value_fields():
value = getattr(self, attribute_name)
if value is None:
raise ValueError('Missing value for attribute <%s>' % attribute_name)
return self.super()
class BurndownDataChangeModelManager(PersistentObjectModelManager):
model = BurndownDataChange
class BurndownEntry(ValuePerTime):
remaining_time = property(ValuePerTime._value, ValuePerTime._set_value)
# REFACT: Remove method, use class above
def burndown_entry(when, remaining_time):
return BurndownEntry(remaining_time, when)
class BurndownDataAggregator(object):
def __init__(self, env, remaining_field=BurndownDataConstants.REMAINING_TIME):
self.env = env
self.changes = None
self.duration = None
self.extend_until = None
self.aggregated_changes = None
self.filter_by_component = None
self.remaining_field = remaining_field
def burndown_data_for_sprint(self, sprint, extend_until=None, filter_by_component=None):
changes = self.changes_for_sprint(sprint)
return self.aggregate_changes_with_interval(
changes, timedelta(hours=1),
aggregate_until=sprint.start,
discard_after=sprint.end,
extend_until=extend_until,
filter_by_component=filter_by_component)
def changes_for_sprint(self, sprint):
sprint_name = sprint.name
conditions = dict(scope=sprint_name, type=self.remaining_field)
return BurndownDataChangeModelManager(self.env).select(conditions, order_by=['when', 'id'])
def aggregate_changes_with_interval(self, changes, duration, aggregate_until=None, discard_after=None, extend_until=None, filter_by_component=None):
self.changes = changes
self.duration = duration
self.aggregate_until = aggregate_until
self.discard_after = discard_after
self.extend_until = extend_until
self.aggregated_changes = []
self.filter_by_component = filter_by_component
self._compute_aggregation_for_all_changes()
return self.aggregated_changes
def _compute_aggregation_for_all_changes(self):
self._discard_changes_that_do_not_match_the_filtered_component_if_neccessary()
self._discard_all_changes_after_sprint_end()
if self._has_no_entry():
self.aggregated_changes = []
return
self._append_synthetic_burndown_data_if_neccessary()
self._aggregate_changes_before_sprint_start()
self.aggregated_changes = [self._first_aggregated_change()]
if self._has_one_entry():
return
self._compute_aggregation()
def _discard_changes_that_do_not_match_the_filtered_component_if_neccessary(self):
if not self.filter_by_component:
return
if not AgiloConfig(self.env).is_filtered_burndown_enabled():
raise ValueError("Trying to filter by component %s but burndown filtering is not enabled"
% self.filter_by_component)
def has_component(change):
return change.marker_value(BurndownDataConstants.COMPONENT) == self.filter_by_component\
or change.has_marker(BurndownDataConstants.DELTAS_BY_COMPONENT)
self.changes = filter(has_component, self.changes)
def _has_no_entry(self):
return len(self.changes) == 0
def _append_synthetic_burndown_data_if_neccessary(self):
# REFACT: burndown could be nicer if appending a synthetic change would aggregate
# all changes in the hour before that
# Without swallowing the first entry of course.
if self.extend_until is None or self.changes[-1].when >= self.extend_until:
return
data = BurndownDataChange(self.env)
data.when = self.extend_until
data.set_delta(0)
self.changes.append(data)
def _discard_all_changes_after_sprint_end(self):
if self.discard_after is None:
return
for index, change in enumerate(list(self.changes)):
if change.when >= self.discard_after:
# Kill everything after this point
self.changes = self.changes[:index]
break
def _aggregate_changes_before_sprint_start(self):
if self.aggregate_until is None:
return
changes_before_sprint = filter(lambda change: change.when < self.aggregate_until, self.changes)
if len(changes_before_sprint) == 0:
# no changes before aggregate_until
return
accumulated_delta = reduce(lambda sum, change: sum + change.delta(), changes_before_sprint, 0)
synthetic_change = BurndownDataChange(self.env).update_values(when=self.aggregate_until, delta=accumulated_delta)
self.changes = [synthetic_change] + self.changes[len(changes_before_sprint):]
def _first_aggregated_change(self):
delta = self.changes[0].delta()
should_filter = AgiloConfig(self.env).is_filtered_burndown_enabled()
is_filtering_by_component = self.filter_by_component
is_not_component_itself = not self.changes[0].has_marker(BurndownDataConstants.COMPONENT)
if should_filter and is_filtering_by_component and is_not_component_itself:
by_component = self.changes[0].marker_value(BurndownDataConstants.DELTAS_BY_COMPONENT) or {}
delta = by_component.get(self.filter_by_component, 0)
return burndown_entry(self.changes[0].when, delta)
def _has_one_entry(self):
return len(self.changes) == 1
def _compute_aggregation(self):
current_remaining_time = self._first_aggregated_change().remaining_time
for change in self.changes[1:-1]:
# Whenever a change is found that is at least 1 hour from the last
# aggregation, start a new aggregation and add the current change
# as the last point of the closed aggregation window
# This may disconnect changes that are (almost) at the same
# time into two different aggregated changes
current_remaining_time += change.delta()
if self._should_start_next_aggregation(change):
self.aggregated_changes.append(burndown_entry(change.when, current_remaining_time))
final_remaining_time = current_remaining_time + self.changes[-1].delta()
self.aggregated_changes.append(burndown_entry(self.changes[-1].when, final_remaining_time))
def _should_start_next_aggregation(self, change):
if change.has_marker(BurndownDataConstants.SKIP_AGGREGATION):
return True
return change.when > self.aggregated_changes[-1].when + self.duration
class BurndownTrendLineGenerator(object):
"""
Takes an aggregated burndown and a date until the extrapolation should go
and computes an extrapolation of the last three days worth of work.
"""
def __init__(self, reference_interval=None):
self.reference_interval = reference_interval or timedelta(days=3)
def calculate(self, actual_burndown, a_datetime):
if len(actual_burndown) <= 1:
return []
reference_burndown = self.find_reference_burndown(actual_burndown)
current_burndown = actual_burndown[-1]
reference_point = Point(reference_burndown.when, reference_burndown.remaining_time)
current_point = Point(current_burndown.when, current_burndown.remaining_time)
trend_line = Line.from_two_points(reference_point, current_point)
final_value = trend_line.y_from_x(a_datetime)
return [current_burndown, burndown_entry(a_datetime, final_value)]
def find_reference_burndown(self, actual_burndown):
current_burndown = actual_burndown[-1]
def is_old_enough(a_burndown):
return a_burndown.when < current_burndown.when - self.reference_interval
last_burndown = actual_burndown[-1]
for burndown in reversed(actual_burndown[:-1]):
if is_old_enough(burndown):
return burndown
last_burndown = burndown
return last_burndown
class BurndownDataConfirmCommitment(object):
def __init__(self, env):
self.env = env
self.remaining_field = BurndownDataConstants.REMAINING_TIME
# TODO: if when is given, only take the burndown changes till that time
def confirm_commitment_for_sprint(self, a_sprint, when=None):
if AgiloConfig(self.env).is_filtered_burndown_enabled():
self.aggregate_all_changes_with_deltas_by_components(a_sprint, when=when)
else:
self.aggregate_all_changes(a_sprint, when=when)
def aggregate_all_changes(self, a_sprint, when=None):
summed = self.sum_remaining_time_for_sprint(a_sprint)
self.remove_old_changes_for_sprint(a_sprint)
return self.add_initial_change_for_sprint_with_remaining_time(a_sprint, summed, when=when)
def aggregate_all_changes_with_deltas_by_components(self, a_sprint, when=None):
by_components = self.sum_remaining_time_for_sprint_by_component(a_sprint)
change = self.aggregate_all_changes(a_sprint, when=when)
change.update_marker(BurndownDataConstants.DELTAS_BY_COMPONENT, by_components)
change.save()
def sum_remaining_time_for_sprint(self, a_sprint):
sum = 0
for change in self._changes_for_sprint(a_sprint):
sum += change.delta()
return sum
def sum_remaining_time_for_sprint_by_component(self, a_sprint):
by_component = {}
for change in self._changes_for_sprint(a_sprint):
if change.has_marker(BurndownDataConstants.COMPONENT):
component = change.marker_value(BurndownDataConstants.COMPONENT)
by_component[component] = by_component.get(component, 0) + change.delta()
elif change.has_marker(BurndownDataConstants.DELTAS_BY_COMPONENT):
deltas_by_component = change.marker_value(BurndownDataConstants.DELTAS_BY_COMPONENT)
for component, value in deltas_by_component.items():
by_component.setdefault(component, 0)
by_component[component] += value
return by_component
def remove_old_changes_for_sprint(self, a_sprint):
changes = self._changes_for_sprint(a_sprint)
for change in changes:
change.delete()
def add_initial_change_for_sprint_with_remaining_time(self, a_sprint, a_delta, when=None):
change = BurndownDataChange(self.env)
change.set_delta(a_delta)
change.type = self.remaining_field
change.scope = a_sprint.name
change.when = when or now()
change.save()
return change
def _changes_for_sprint(self, a_sprint):
aggregator = BurndownDataAggregator(self.env)
return aggregator.changes_for_sprint(a_sprint)
# TODO switch to BurndownDataConstants.COMPONENT for everywhere?
# consider changing the marker to COMPONENT_MARKER so there is no confusion | [
"[email protected]"
] | |
023a448e65f13fa1aa110f94679a9a53c6e6c840 | 3506d8c9a8391be52d24cff54f27537a92a7228c | /HackerRank/Strings/Palindrome_Index.py | e01f253f9a8217c583a0c47373b035d3b04a1d7b | [] | no_license | saumya-singh/CodeLab | 04ef2c61c516c417c03c6a510e8b5e6e498fbe5d | 9371f0d6bd45e5592dae25b50f0d04ba45ae67cf | refs/heads/master | 2021-09-12T05:01:17.491312 | 2018-04-14T19:48:40 | 2018-04-14T19:48:40 | 81,596,628 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 742 | py | #!/bin/python3
#https://www.hackerrank.com/challenges/palindrome-index/problem
import sys
def palindromeIndex(s):
length = len(s)
counter = length//2
i = 0
j = length -1
flag = 0
while i < counter:
if s[i] == s[j]:
i += 1
j -= 1
else:
if s[i] == s[j - 1] and s[i + 1] == s[j - 2]:
index = j
elif s[j] == s[i + 1] and s[j - 1] == s[i + 2]:
index = i
flag = 1
break
if flag == 0:
index = -1
return index
q = int(input().strip())
answer = []
for i in range(q):
s = input().strip()
result = palindromeIndex(s)
answer.append(result)
for i in answer:
print(i) | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.