blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9f79014a99de26096629779af1c9279f8319b7b4 | 86813bf514f3e0257f92207f40a68443f08ee44b | /0072 编辑距离/0072 编辑距离.py | 09a71367fdde0319dcd6e517d8d2183aa808a77f | [] | no_license | Aurora-yuan/Leetcode_Python3 | 4ce56679b48862c87addc8cd870cdd525c9d926c | 720bb530850febc2aa67a56a7a0b3a85ab37f415 | refs/heads/master | 2021-07-12T13:23:19.399155 | 2020-10-21T03:14:36 | 2020-10-21T03:14:36 | 212,998,500 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 893 | py | # label: dynamic programming difficulty: difficult
class Solution(object):
def minDistance(self, word1, word2):
"""
:type word1: str
:type word2: str
:rtype: int
"""
#用dp[i][j]表示word1[:i + 1], word2[:j + 1]这个问题的解
m, n = len(word1), len(word2)
dp = [[0 for _ in range(n + 1)] for _ in range(m + 1)]
for i in range(m + 1):
dp[i][0] = i
for i in range(n + 1):
dp[0][i] = i
for i in range(1, m + 1):
for j in range(1, n + 1):
if word1[i - 1] == word2[j - 1]:
dp[i][j] = dp[i - 1][j - 1]
else:
dp[i][j] = 1 + min(dp[i - 1][j], dp[i - 1][j - 1], dp[i][j - 1]) #分别对应插入,替换,删除
return dp[m][n]
| [
"[email protected]"
] | |
7ae7e78b80d63d83fad51f24b644042cd5b26dc0 | 128c32834fa8156a25e5693131991525ea33020b | /2016.1/Exércicio LAB DE PROGRAMAÇÃO/Exercício 2016.1/Exemplos_Realizados_em_Sala/TabelaHashOficial.py | 32d11b51e1e5f60f2130fc55a4f5649f77574158 | [] | no_license | wellington16/BSI-UFRPE | 5780e94b4c10b3ee8885d01fc14f4050e6907611 | 268d0e5beabf211df1aa69cbe52ac1e0cb85fe64 | refs/heads/master | 2020-06-30T16:59:59.316415 | 2020-03-10T13:22:31 | 2020-03-10T13:22:31 | 66,642,156 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,527 | py |
class No:
def __init__(self, valor):
self.valor = valor
self.prox = None
self.ant = None
def getValor(self):
return self.valor
def setValor(self, novodado):
self.prox = novoDado
def getNovValor(self):
return self.prox
def setNovValor(self, novoNo):
self.prox = novoNo
def getAntValor(self):
return self.ant
def setAntValor(self, novoNo):
self.ant = novoNo
class ListEncad:
def __init__(self):
self._inicio = None
self._fim = None
# Verifica se a lista está vazia
def listVazia(self):
return (self._inicio is None) or (self._fim is None)
#Inseri no inicio
def InserirNoInicio(self, valor):
NovoNo = No(valor)
if self.listVazia():
self._inicio = self._fim = NovoNo
else:
self._inicio.setAntValor(NovoNo)
NovoNo.setNovValor(self._inicio)
NovoNo.setAntValor(None)
self._inicio = NovoNo
#inseri no fim
def InserirNoFim(self, valor):
NovoNo = No(valor)
if self.listVazia():
self._inicio = self._fim = NovoNo
else:
self._fim.setNovValor(NovoNo)
NovoNo.setAntValor(self._fim)
NovoNo.setNovValor(None)
self._fim = NovoNo
#pesquisa o valor
def pesquisar (self, valor):
if self.listVazia():
return None
NoAtual = self._inicio
while NoAtual.getValor() != valor:
NoAtual = NoAtual.getNovValor()
if NoAtual == None:
return "Esse valor não foi encontrado!"
return NoAtual.getValor
#Função Impirmir
def __str__(self):
NoAtual = self._inicio
if self.listVazia():
return(" Este valor não existe.")
texto = ''
while NoAtual != None:
texto = str(NoAtual.getValor())+ " "
print(NoAtual.getValor())
NoAtual = NoAtual.getNovValor()
return texto
#Função remover
def remover(self, valor):
NoAtual = self._inicio
if self.listVazia():
return None
while NoAtual.getValor() != valor:
NoAtual = NoAtual.getNovValor()
if NoAtual == None:
return "O valor não está na lista"
if self._inicio == self._fim:
self._inicio = self._fim = None
return None
elif NoAtual == self._inicio:
aux = self._inicio.getNovValor()
self._inicio.setNovValor(None)
aux.setNovValor(None)
self._inicio = aux
elif NoAtual == self._fim:
aux = self._fim.getAntValor()
self._fim.setAntValor(None)
aux.setNovValor(None)
self._fim = aux
else:
aux = NoAtual.getAntValor()
aux2 = NoAtual.getNovValor()
aux2.setAntValor(aux)
aux.setNovValor(aux2)
#Função esvaiziar lista
def esvaziarList(self):
self._inicio = self._fim = None
class Pilha(ListEncad):
#Função remover no final da pilha
def desempilhar(self):
if self.listVazia():
return
else:
UltmValNo = self._fim.getValor()
if self._inicio is self._fim:
self._inicio = self.fim = None
else:
aux1 = self._fim.getAntValor()
self._fim.setAntValor(None)
aux1.setNovValor(None)
self._fim = aux1
return UltmValNo
class Fila(ListEncad):
#Função remover no inicio da fila
def removerInicio(self):
if self.listVazia():
return" A fila está vazia!"
else:
PrimValNo = self._inicio.getValor()
if self._inicio is self._fim:
self._inicio = self._fim = None
else:
aux2 = self._inicio.getNovValor()
self._inicio.setNovValor(None)
aux2.setAntValor(None)
self._inicio = aux2
return PrimValNo
class Item():
def __init__(self, chave, valor):
self._chave = chave
self._valor = valor
def __str__(self):
chav = self.getChave()
valor1 = self.getValor()
chav = str(chav)
valor1 = str(valor1)
elemt = "Chave = "+ chav +". O valor = "+ valor1+ "\n"
return elemt
def getChave(self):
return self._chave
def setChave(self, chave):
self._chave = chave
def getValor(self):
return self._valor
def setValor(self, valor):
self._valor = valor
class Hash:
def __init__(self, tamanho):
self.tamanho = tamanho
self._table = [None] * tamanho
def FuncHash(self, chave):
return chave % self.tamanho
def pesquisarItem(self, chave):
x = self.FuncHash(chave)
l = self._table[x]
if l == None:
return None
h = l._inicio
while h != None:
if h.getValor().getValor() == chave:
return h.getValor.getValor()
h = h.getNovValor()
return None
def inserir(self, chave, valor):
valorHash = self.FuncHash(chave)
#print(valorHash)
item = Item(chave,valor)
if (self._table[valorHash] == None):
listx = ListEncad()
listx.InserirNoInicio(item)
self._table[valorHash]= listx
else:
self._table[valorHash].InserirNoInicio(item)
def delete(self, chave):
v = self.listar(chave)
if v != "Nao Existe":
g = v._inicio
while (g != None):
if g.getValor().getChave() == chave:
if v._inicio != v._fim:
if g == v._inicio:
p = v._inicio.getNovValor()
p.setAntValor(None)
v._inicio = p
elif g == v._fim:
a = v._fim.getAntValor()
a.setNovValor(None)
v._fim = a
else:
a = g.getAntValor()
p = g.getNovValor()
p.setAntValor(a)
a.setNovValor(p)
else:
v._inicio = None
v._fim = None
g = g.getNovValor()
else:
return ("Não existe esse elemento na tabela")
def listar(self, chave):
valorHash = self.FuncHash(chave)
if self._table[valorHash] != None:
return self._table[valorHash]
else:
return 0
def __str__(self):
textox = ''
for x in self._table:
if x == None:
pass
else:
textox += str(x.__str__() + "\n")
return textox
novatabelinha = Hash(5)
novatabelinha.inserir(1, 45)
novatabelinha.inserir(3, 67)
novatabelinha.inserir(5, 5)
novatabelinha.inserir(2, 44)
#print(novatabelinha)
novatabelinha.listar(5)
#novatabelinha.delete(1)
novatabelinha.pesquisarItem(2)
print(novatabelinha)
| [
"[email protected]"
] | |
b5e65556cb0df5cb435e365882c7a0da7fe6731e | ac1bbabc7c1b3149711c416dd8b5f5969a0dbd04 | /Python Advanced/comprehensions/heroes_inventory.py | 1ae5e593c8b94b15f8fb01e9809e0e0be37b5b93 | [] | no_license | AssiaHristova/SoftUni-Software-Engineering | 9e904221e50cad5b6c7953c81bc8b3b23c1e8d24 | d4910098ed5aa19770d30a7d9cdf49f9aeaea165 | refs/heads/main | 2023-07-04T04:47:00.524677 | 2021-08-08T23:31:51 | 2021-08-08T23:31:51 | 324,847,727 | 1 | 0 | null | 2021-08-08T23:31:52 | 2020-12-27T20:58:01 | Python | UTF-8 | Python | false | false | 634 | py | heroes = input().split(', ')
command = input()
heroes_inventory = {hero: [] for hero in heroes}
while not command == "End":
data = command.split('-')
name, item, cost = data
if name in heroes_inventory:
if heroes_inventory[name]:
if item not in heroes_inventory[name][0]:
heroes_inventory[name][0].append(item)
heroes_inventory[name][1].append(int(cost))
else:
heroes_inventory[name] = [[item], [int(cost)]]
command = input()
for name, [item, cost] in heroes_inventory.items():
print(f"{name} -> Items: {len(item)}, Cost: {sum(cost)}")
| [
"[email protected]"
] | |
27f120ae877d4f79cb6762956b3002c61edeb0ca | bde402f8375dc12f1a337d534e4ed217023fd1d2 | /setup.py | 18226d5ce7c5d447cabe839a450b0e052abc3db6 | [] | no_license | CONNJUR/nmrglue | 975d386a5128db6904041a57f833b34980ec9170 | 9ee6d6278d1d2be87648bb4903f3948fb6447da1 | refs/heads/master | 2020-04-20T16:52:19.143719 | 2019-02-03T17:51:44 | 2019-02-03T17:51:44 | 168,971,497 | 0 | 0 | null | 2019-02-03T17:22:07 | 2019-02-03T17:22:06 | null | UTF-8 | Python | false | false | 1,654 | py | #!/usr/bin/env python
# setup script for nmrglue
from distutils.core import setup
from codecs import open
from os import path, walk
here = path.abspath(path.dirname(__file__))
# get long description from README
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='nmrglue',
version='0.7-dev', # change this in nmrglue/__init__.py also
description='A module for working with NMR data in Python',
long_description=long_description,
url='http://www.nmrglue.com',
author='Jonathan J. Helmus',
author_email='[email protected]',
license='New BSD License',
classifiers=[
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux'],
requires=['numpy', 'scipy'],
packages=[
'nmrglue',
'nmrglue.analysis',
'nmrglue.analysis.tests',
'nmrglue.fileio',
'nmrglue.fileio.tests',
'nmrglue.process',
'nmrglue.process.nmrtxt',
'nmrglue.util'],
package_data={'nmrglue': [
'fileio/tests/data/*.f*',
'fileio/tests/data/*.dir/*',
'fileio/tests/data/test.tab']},
)
| [
"[email protected]"
] | |
63ab12f6fb3b539ccfbf9e77397ab0cef69e7a12 | cb2c9c33b993e14fec3db34cdbaf04dabdf60ad1 | /2018/17/solve | 4d8fb57693bb81b44e1058eb285023126ead0e59 | [] | no_license | rr-/aoc | 51e95711d3eaf5de4b80bcd57c90750c1c09252d | babc68340eb46dac42981e700435bd740ff3c625 | refs/heads/master | 2020-04-10T00:26:08.388243 | 2018-12-25T15:08:06 | 2018-12-25T15:17:17 | 160,685,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,368 | #!/usr/bin/env python3
import re
import typing as T
from collections import defaultdict
from pathlib import Path
from PIL import Image
WELL_X = 500
WELL_Y = 0
AIR = "."
CLAY = "#"
WELL = "+"
STILL_WATER = "~"
RUNNING_WATER = "|"
def parse_chunk(text: str) -> T.List[int]:
num = list(map(int, re.findall(r"\d+", text)))
if len(num) == 2:
return list(range(num[0], num[1] + 1))
elif len(num) == 1:
return [num[0]]
raise AssertionError
class World:
def __init__(self, text: str) -> None:
self.cells: T.Dict[T.Tuple[int, int], str] = defaultdict(lambda: AIR)
for line in text.split("\n"):
if not line:
continue
chunk_x, chunk_y = sorted(line.split())
xs = parse_chunk(chunk_x)
ys = parse_chunk(chunk_y)
for x in xs:
for y in ys:
self.cells[x, y] = CLAY
self.x1 = min(key[0] for key in self.cells.keys()) - 1
self.x2 = max(key[0] for key in self.cells.keys()) + 1
self.y1 = min(key[1] for key in self.cells.keys())
self.y2 = max(key[1] for key in self.cells.keys())
self.w = self.x2 + 1 - self.x1
self.h = self.y2 + 1 - self.y1
self.cells[WELL_X, WELL_Y] = WELL
self.heads: T.List[T.Tuple[int, int]] = [(WELL_X, WELL_Y)]
def turn(self) -> bool:
while self.heads:
x, y = self.heads.pop(0)
if self.cells[x, y] == WELL:
self.cells[x, y + 1] = RUNNING_WATER
self.heads.append((x, y + 1))
return True
if self.cells[x, y] != RUNNING_WATER:
continue
if self.cells[x, y + 1] == AIR:
self.cells[x, y + 1] = RUNNING_WATER
if y + 1 < self.y2:
self.heads.append((x, y + 1))
return True
if self.cells[x, y + 1] in {STILL_WATER, CLAY}:
ret = False
if self.cells[x - 1, y] == AIR:
self.cells[x - 1, y] = RUNNING_WATER
self.heads.append((x - 1, y))
ret = True
if self.cells[x + 1, y] == AIR:
self.cells[x + 1, y] = RUNNING_WATER
self.heads.append((x + 1, y))
ret = True
if ret:
return True
x1 = x2 = x
while self.cells[x1 - 1, y] == RUNNING_WATER:
x1 -= 1
while self.cells[x2 + 1, y] == RUNNING_WATER:
x2 += 1
if self.cells[x1 - 1, y] == CLAY and self.cells[x2 + 1, y] == CLAY:
for x in range(x1, x2 + 1):
self.cells[x, y] = STILL_WATER
if self.cells[x, y - 1] == RUNNING_WATER:
self.heads.append((x, y - 1))
return True
return False
def save_image(self, path: Path) -> None:
img = Image.new("RGB", (self.w, self.h), "black")
pixels = img.load()
colors: T.Dict[str, T.Tuple[int, int, int]] = {
AIR: (0, 0, 0),
RUNNING_WATER: (0, 0, 255),
STILL_WATER: (0, 0, 128),
WELL: (255, 255, 0),
CLAY: (256, 200, 0),
}
for x, y in self.cells.keys():
if self.x1 <= x <= self.x2 and self.y1 <= y <= self.y2:
pixels[x - self.x1, y - self.y1] = colors[self.cells[x, y]]
img.save(str(path), format="png")
def part1(world: World) -> int:
total = 0
for pos, cell in world.cells.items():
x, y = pos
if y < world.y1 or y > world.y2:
continue
if cell in {STILL_WATER, RUNNING_WATER}:
total += 1
return total
def part2(world: World) -> int:
total = 0
for pos, cell in world.cells.items():
x, y = pos
if y < world.y1 or y > world.y2:
continue
if cell == STILL_WATER:
total += 1
return total
def main() -> None:
text = Path(__file__).with_name("input.txt").read_text()
world = World(text)
while True:
if not world.turn():
break
world.save_image(Path(__file__).with_name("image.png"))
print(part1(world))
print(part2(world))
if __name__ == "__main__":
main()
| [
"[email protected]"
] | ||
540ec97d8fd8b38e391df681d5f04875976ab585 | 243335dfe75c72f4e94ff953f5b0851d2e116cb1 | /model/simple_graphs.py | dd2a0ecdd9d8c05fc516c00c757c9509420d2282 | [] | no_license | bwhub/generalizable-device-placement | f485aea87b8a297cc3212014f3674fd9bad8df49 | d9a81a9f6cb05bfc94773722a4e7ead793ca7fd1 | refs/heads/master | 2022-02-24T13:24:43.374572 | 2019-10-25T01:02:52 | 2019-10-25T01:02:52 | 298,305,058 | 1 | 0 | null | 2020-09-24T14:32:56 | 2020-09-24T14:32:55 | null | UTF-8 | Python | false | false | 1,606 | py | import networkx as nx
# d is the number of chains
def makeChainGraph(N, d=2):
G = nx.DiGraph()
def add_edge(i, j):
G.add_edge(str(i), str(j))
'''
for N = 4, d = 2
1 2 3 4
0 9
5 6 7 8
Lowest Runtime: (N+2) + l_fact* 2
'''
n = 1
for i in range(d):
add_edge(0, n)
for j in range(N-1):
add_edge(n, n+1)
n += 1
add_edge(n, N*d + 1)
n += 1
assert n == N*d + 1
cost = {}
out_size = {}
for i in G.nodes():
cost[i] = 1
out_size[i] = 1
nx.set_node_attributes(G, cost, 'cost')
nx.set_node_attributes(G, out_size, 'out_size')
G.d = d
return G
def makeEdgeGraph(N):
G = nx.DiGraph()
for i in range(N):
G.add_edge(2*i, 2*i + 1)
cost = {}
out_size = {}
for i in G.nodes():
cost[i] = 1
out_size[i] = 1
nx.set_node_attributes(G, cost, 'cost')
nx.set_node_attributes(G, out_size, 'out_size')
return G
def makeCrownGraph(N, d=2):
G = nx.DiGraph()
def add_edge(i, j):
G.add_edge(str(i), str(j))
'''
for N = 4, d = 2
8
/ /\ \
/ / \ \
/ / \ \
4 -> 5 -> 6 -> 7
^ ^ ^ ^
| | | |
0 -> 1 -> 2 -> 3
'''
for i in range(d):
for j in range(N):
n = N*i + j
if j != (N - 1):
add_edge(n, n + 1)
if i > 0:
add_edge(N* (i-1) + j, n)
if i == d - 1:
add_edge(n, N* d)
cost = {}
out_size = {}
for i in G.nodes():
cost[i] = 1
out_size[i] = .5
nx.set_node_attributes(G, cost, 'cost')
nx.set_node_attributes(G, out_size, 'out_size')
G.d = d
return G
| [
"[email protected]"
] | |
44db05656099ea323b7329dabe2deea43a7f61fe | 29c71deb76575eb7142f5e798745ccda8dd5d366 | /salesapp/cart.py | 91c2657add7953b609ab17ee0029f8001f8cac73 | [] | no_license | bill0812/salesSystem | c0b992949183ce8df8cd6c3a1470b17a5f6dc33b | 4ff17f52bac911959b7b7fff0c5e046d5471ed66 | refs/heads/master | 2020-03-29T10:04:57.736379 | 2020-03-01T12:04:10 | 2020-03-01T12:04:10 | 149,788,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,300 | py | from firebase import firebase
import hashlib,re
url = "https://sales-system-project.firebaseio.com/"
fb = firebase.FirebaseApplication(url, None)
def fetch_cart(account):
customer_data = fb.get("/會員資料/"+account+"/購物車/客製化",None)
product_data = fb.get("/會員資料/"+account+"/購物車/產品資訊",None)
return customer_data,product_data
def upload_normal(name,count,account):
data = dict()
product_data = fb.get("/會員資料/"+account+"/購物車/產品資訊/"+name,None)
if product_data == None:
product_detail = fb.get("/產品資訊/"+name,None)
data = {
"數量" : count,
"總價格" : int(product_detail["價格"]) * int(count),
"產品資訊" : product_detail["種類"]
}
fb.put("/會員資料/"+account+"/購物車/產品資訊/", data = data , name = name)
else:
product_detail = fb.get("/產品資訊/"+name,None)
data = {
"數量" : int(product_data["數量"]) + int(count),
"總價格" : int(product_data["總價格"]) + int(product_detail["價格"]) * int(count),
"產品資訊" : product_detail["種類"]
}
fb.put("/會員資料/"+account+"/購物車/產品資訊/", data = data , name = name)
| [
"[email protected]"
] | |
1874a9fa74e68180d06cdde9266507f63280c99c | b167407960a3b69b16752590def1a62b297a4b0c | /tools/project-creator/Python2.6.6/Lib/test/test_aepack.py | 46ec71b7ce3f537a8a7011abdb870791b1e84d7a | [
"MIT"
] | permissive | xcode1986/nineck.ca | 543d1be2066e88a7db3745b483f61daedf5f378a | 637dfec24407d220bb745beacebea4a375bfd78f | refs/heads/master | 2020-04-15T14:48:08.551821 | 2019-01-15T07:36:06 | 2019-01-15T07:36:06 | 164,768,581 | 1 | 1 | MIT | 2019-01-15T08:30:27 | 2019-01-09T02:09:21 | C++ | UTF-8 | Python | false | false | 2,617 | py | # Copyright (C) 2003 Python Software Foundation
import unittest
import aepack
import aetypes
import os
from test import test_support
class TestAepack(unittest.TestCase):
OBJECTS = [
aetypes.Enum('enum'),
aetypes.Type('type'),
aetypes.Keyword('kwrd'),
aetypes.Range(1, 10),
aetypes.Comparison(1, '< ', 10),
aetypes.Logical('not ', 1),
aetypes.IntlText(0, 0, 'international text'),
aetypes.IntlWritingCode(0,0),
aetypes.QDPoint(50,100),
aetypes.QDRectangle(50,100,150,200),
aetypes.RGBColor(0x7000, 0x6000, 0x5000),
aetypes.Unknown('xxxx', 'unknown type data'),
aetypes.Character(1),
aetypes.Character(2, aetypes.Line(2)),
]
def test_roundtrip_string(self):
o = 'a string'
packed = aepack.pack(o)
unpacked = aepack.unpack(packed)
self.assertEqual(o, unpacked)
def test_roundtrip_int(self):
o = 12
packed = aepack.pack(o)
unpacked = aepack.unpack(packed)
self.assertEqual(o, unpacked)
def test_roundtrip_float(self):
o = 12.1
packed = aepack.pack(o)
unpacked = aepack.unpack(packed)
self.assertEqual(o, unpacked)
def test_roundtrip_None(self):
o = None
packed = aepack.pack(o)
unpacked = aepack.unpack(packed)
self.assertEqual(o, unpacked)
def test_roundtrip_aeobjects(self):
for o in self.OBJECTS:
packed = aepack.pack(o)
unpacked = aepack.unpack(packed)
self.assertEqual(repr(o), repr(unpacked))
def test_roundtrip_FSSpec(self):
try:
import Carbon.File
except:
return
if not hasattr(Carbon.File, "FSSpec"):
return
o = Carbon.File.FSSpec(os.curdir)
packed = aepack.pack(o)
unpacked = aepack.unpack(packed)
self.assertEqual(o.as_pathname(), unpacked.as_pathname())
def test_roundtrip_Alias(self):
try:
import Carbon.File
except:
return
if not hasattr(Carbon.File, "FSSpec"):
return
o = Carbon.File.FSSpec(os.curdir).NewAliasMinimal()
packed = aepack.pack(o)
unpacked = aepack.unpack(packed)
self.assertEqual(o.FSResolveAlias(None)[0].as_pathname(),
unpacked.FSResolveAlias(None)[0].as_pathname())
def test_main():
test_support.run_unittest(TestAepack)
if __name__ == '__main__':
test_main()
| [
"[email protected]"
] | |
52ecbc4b3bd62afc3d58015cf1fd71598141e57a | 0a1f8957a798006deaa53d10d09f733fab1e6b05 | /bin/Python27/Lib/site-packages/requests/sessions.py | d8b11fad250b05ad550e67d98980f3423b96c548 | [
"LicenseRef-scancode-other-permissive"
] | permissive | metamorph-inc/meta-core | a89504ccb1ed2f97cc6e792ba52e3a6df349efef | bc7a05e04c7901f477fe553c59e478a837116d92 | refs/heads/master | 2023-03-07T02:52:57.262506 | 2023-03-01T18:49:49 | 2023-03-01T18:49:49 | 40,361,476 | 25 | 15 | NOASSERTION | 2023-01-13T16:54:30 | 2015-08-07T13:21:24 | Python | UTF-8 | Python | false | false | 24,983 | py | # -*- coding: utf-8 -*-
"""
requests.session
~~~~~~~~~~~~~~~~
This module provides a Session object to manage and persist settings across
requests (cookies, auth, proxies).
"""
import os
from collections import Mapping
from datetime import datetime
from .auth import _basic_auth_str
from .compat import cookielib, OrderedDict, urljoin, urlparse
from .cookies import (
cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies)
from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT
from .hooks import default_hooks, dispatch_hook
from .utils import to_key_val_list, default_headers, to_native_string
from .exceptions import (
TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError)
from .packages.urllib3._collections import RecentlyUsedContainer
from .structures import CaseInsensitiveDict
from .adapters import HTTPAdapter
from .utils import (
requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies,
get_auth_from_url
)
from .status_codes import codes
# formerly defined here, reexposed here for backward compatibility
from .models import REDIRECT_STATI
REDIRECT_CACHE_SIZE = 1000
def merge_setting(request_setting, session_setting, dict_class=OrderedDict):
"""Determines appropriate setting for a given request, taking into account
the explicit setting on that request, and the setting in the session. If a
setting is a dictionary, they will be merged together using `dict_class`
"""
if session_setting is None:
return request_setting
if request_setting is None:
return session_setting
# Bypass if not a dictionary (e.g. verify)
if not (
isinstance(session_setting, Mapping) and
isinstance(request_setting, Mapping)
):
return request_setting
merged_setting = dict_class(to_key_val_list(session_setting))
merged_setting.update(to_key_val_list(request_setting))
# Remove keys that are set to None. Extract keys first to avoid altering
# the dictionary during iteration.
none_keys = [k for (k, v) in merged_setting.items() if v is None]
for key in none_keys:
del merged_setting[key]
return merged_setting
def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict):
"""Properly merges both requests and session hooks.
This is necessary because when request_hooks == {'response': []}, the
merge breaks Session hooks entirely.
"""
if session_hooks is None or session_hooks.get('response') == []:
return request_hooks
if request_hooks is None or request_hooks.get('response') == []:
return session_hooks
return merge_setting(request_hooks, session_hooks, dict_class)
class SessionRedirectMixin(object):
def resolve_redirects(self, resp, req, stream=False, timeout=None,
verify=True, cert=None, proxies=None, **adapter_kwargs):
"""Receives a Response. Returns a generator of Responses."""
i = 0
hist = [] # keep track of history
while resp.is_redirect:
prepared_request = req.copy()
if i > 0:
# Update history and keep track of redirects.
hist.append(resp)
new_hist = list(hist)
resp.history = new_hist
try:
resp.content # Consume socket so it can be released
except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
resp.raw.read(decode_content=False)
if i >= self.max_redirects:
raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects, response=resp)
# Release the connection back into the pool.
resp.close()
url = resp.headers['location']
# Handle redirection without scheme (see: RFC 1808 Section 4)
if url.startswith('//'):
parsed_rurl = urlparse(resp.url)
url = '%s:%s' % (parsed_rurl.scheme, url)
# The scheme should be lower case...
parsed = urlparse(url)
url = parsed.geturl()
# Facilitate relative 'location' headers, as allowed by RFC 7231.
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
# Compliant with RFC3986, we percent encode the url.
if not parsed.netloc:
url = urljoin(resp.url, requote_uri(url))
else:
url = requote_uri(url)
prepared_request.url = to_native_string(url)
# Cache the url, unless it redirects to itself.
if resp.is_permanent_redirect and req.url != prepared_request.url:
self.redirect_cache[req.url] = prepared_request.url
self.rebuild_method(prepared_request, resp)
# https://github.com/kennethreitz/requests/issues/1084
if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect):
if 'Content-Length' in prepared_request.headers:
del prepared_request.headers['Content-Length']
prepared_request.body = None
headers = prepared_request.headers
try:
del headers['Cookie']
except KeyError:
pass
# Extract any cookies sent on the response to the cookiejar
# in the new request. Because we've mutated our copied prepared
# request, use the old one that we haven't yet touched.
extract_cookies_to_jar(prepared_request._cookies, req, resp.raw)
prepared_request._cookies.update(self.cookies)
prepared_request.prepare_cookies(prepared_request._cookies)
# Rebuild auth and proxy information.
proxies = self.rebuild_proxies(prepared_request, proxies)
self.rebuild_auth(prepared_request, resp)
# Override the original request.
req = prepared_request
resp = self.send(
req,
stream=stream,
timeout=timeout,
verify=verify,
cert=cert,
proxies=proxies,
allow_redirects=False,
**adapter_kwargs
)
extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
i += 1
yield resp
def rebuild_auth(self, prepared_request, response):
"""When being redirected we may want to strip authentication from the
request to avoid leaking credentials. This method intelligently removes
and reapplies authentication where possible to avoid credential loss.
"""
headers = prepared_request.headers
url = prepared_request.url
if 'Authorization' in headers:
# If we get redirected to a new host, we should strip out any
# authentication headers.
original_parsed = urlparse(response.request.url)
redirect_parsed = urlparse(url)
if (original_parsed.hostname != redirect_parsed.hostname):
del headers['Authorization']
# .netrc might have more auth for us on our new host.
new_auth = get_netrc_auth(url) if self.trust_env else None
if new_auth is not None:
prepared_request.prepare_auth(new_auth)
return
def rebuild_proxies(self, prepared_request, proxies):
"""This method re-evaluates the proxy configuration by considering the
environment variables. If we are redirected to a URL covered by
NO_PROXY, we strip the proxy configuration. Otherwise, we set missing
proxy keys for this URL (in case they were stripped by a previous
redirect).
This method also replaces the Proxy-Authorization header where
necessary.
"""
headers = prepared_request.headers
url = prepared_request.url
scheme = urlparse(url).scheme
new_proxies = proxies.copy() if proxies is not None else {}
if self.trust_env and not should_bypass_proxies(url):
environ_proxies = get_environ_proxies(url)
proxy = environ_proxies.get('all', environ_proxies.get(scheme))
if proxy:
new_proxies.setdefault(scheme, proxy)
if 'Proxy-Authorization' in headers:
del headers['Proxy-Authorization']
try:
username, password = get_auth_from_url(new_proxies[scheme])
except KeyError:
username, password = None, None
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username, password)
return new_proxies
def rebuild_method(self, prepared_request, response):
"""When being redirected we may want to change the method of the request
based on certain specs or browser behavior.
"""
method = prepared_request.method
# http://tools.ietf.org/html/rfc7231#section-6.4.4
if response.status_code == codes.see_other and method != 'HEAD':
method = 'GET'
# Do what the browsers do, despite standards...
# First, turn 302s into GETs.
if response.status_code == codes.found and method != 'HEAD':
method = 'GET'
# Second, if a POST is responded to with a 301, turn it into a GET.
# This bizarre behaviour is explained in Issue 1704.
if response.status_code == codes.moved and method == 'POST':
method = 'GET'
prepared_request.method = method
class Session(SessionRedirectMixin):
"""A Requests session.
Provides cookie persistence, connection-pooling, and configuration.
Basic Usage::
>>> import requests
>>> s = requests.Session()
>>> s.get('http://httpbin.org/get')
<Response [200]>
Or as a context manager::
>>> with requests.Session() as s:
>>> s.get('http://httpbin.org/get')
<Response [200]>
"""
__attrs__ = [
'headers', 'cookies', 'auth', 'proxies', 'hooks', 'params', 'verify',
'cert', 'prefetch', 'adapters', 'stream', 'trust_env',
'max_redirects',
]
def __init__(self):
#: A case-insensitive dictionary of headers to be sent on each
#: :class:`Request <Request>` sent from this
#: :class:`Session <Session>`.
self.headers = default_headers()
#: Default Authentication tuple or object to attach to
#: :class:`Request <Request>`.
self.auth = None
#: Dictionary mapping protocol or protocol and host to the URL of the proxy
#: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to
#: be used on each :class:`Request <Request>`.
self.proxies = {}
#: Event-handling hooks.
self.hooks = default_hooks()
#: Dictionary of querystring data to attach to each
#: :class:`Request <Request>`. The dictionary values may be lists for
#: representing multivalued query parameters.
self.params = {}
#: Stream response content default.
self.stream = False
#: SSL Verification default.
self.verify = True
#: SSL certificate default.
self.cert = None
#: Maximum number of redirects allowed. If the request exceeds this
#: limit, a :class:`TooManyRedirects` exception is raised.
#: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is
#: 30.
self.max_redirects = DEFAULT_REDIRECT_LIMIT
#: Trust environment settings for proxy configuration, default
#: authentication and similar.
self.trust_env = True
#: A CookieJar containing all currently outstanding cookies set on this
#: session. By default it is a
#: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but
#: may be any other ``cookielib.CookieJar`` compatible object.
self.cookies = cookiejar_from_dict({})
# Default connection adapters.
self.adapters = OrderedDict()
self.mount('https://', HTTPAdapter())
self.mount('http://', HTTPAdapter())
# Only store 1000 redirects to prevent using infinite memory
self.redirect_cache = RecentlyUsedContainer(REDIRECT_CACHE_SIZE)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def prepare_request(self, request):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for
transmission and returns it. The :class:`PreparedRequest` has settings
merged from the :class:`Request <Request>` instance and those of the
:class:`Session`.
:param request: :class:`Request` instance to prepare with this
session's settings.
"""
cookies = request.cookies or {}
# Bootstrap CookieJar.
if not isinstance(cookies, cookielib.CookieJar):
cookies = cookiejar_from_dict(cookies)
# Merge with session cookies
merged_cookies = merge_cookies(
merge_cookies(RequestsCookieJar(), self.cookies), cookies)
# Set environment's basic authentication if not explicitly set.
auth = request.auth
if self.trust_env and not auth and not self.auth:
auth = get_netrc_auth(request.url)
p = PreparedRequest()
p.prepare(
method=request.method.upper(),
url=request.url,
files=request.files,
data=request.data,
json=request.json,
headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict),
params=merge_setting(request.params, self.params),
auth=merge_setting(auth, self.auth),
cookies=merged_cookies,
hooks=merge_hooks(request.hooks, self.hooks),
)
return p
def request(self, method, url,
params=None,
data=None,
headers=None,
cookies=None,
files=None,
auth=None,
timeout=None,
allow_redirects=True,
proxies=None,
hooks=None,
stream=None,
verify=None,
cert=None,
json=None):
"""Constructs a :class:`Request <Request>`, prepares it and sends it.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query
string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send
in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the
:class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the
:class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the
:class:`Request`.
:param files: (optional) Dictionary of ``'filename': file-like-objects``
for multipart encoding upload.
:param auth: (optional) Auth tuple or callable to enable
Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Set to True by default.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol or protocol and
hostname to the URL of the proxy.
:param stream: (optional) whether to immediately download the response
content. Defaults to ``False``.
:param verify: (optional) whether the SSL cert will be verified.
A CA_BUNDLE path can also be provided. Defaults to ``True``.
:param cert: (optional) if String, path to ssl client cert file (.pem).
If Tuple, ('cert', 'key') pair.
:rtype: requests.Response
"""
# Create the Request.
req = Request(
method = method.upper(),
url = url,
headers = headers,
files = files,
data = data or {},
json = json,
params = params or {},
auth = auth,
cookies = cookies,
hooks = hooks,
)
prep = self.prepare_request(req)
proxies = proxies or {}
settings = self.merge_environment_settings(
prep.url, proxies, stream, verify, cert
)
# Send the request.
send_kwargs = {
'timeout': timeout,
'allow_redirects': allow_redirects,
}
send_kwargs.update(settings)
resp = self.send(prep, **send_kwargs)
return resp
def get(self, url, **kwargs):
"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return self.request('GET', url, **kwargs)
def options(self, url, **kwargs):
"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return self.request('OPTIONS', url, **kwargs)
def head(self, url, **kwargs):
"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', False)
return self.request('HEAD', url, **kwargs)
def post(self, url, data=None, json=None, **kwargs):
"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('POST', url, data=data, json=json, **kwargs)
def put(self, url, data=None, **kwargs):
"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('PUT', url, data=data, **kwargs)
def patch(self, url, data=None, **kwargs):
"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('PATCH', url, data=data, **kwargs)
def delete(self, url, **kwargs):
"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('DELETE', url, **kwargs)
def send(self, request, **kwargs):
"""Send a given PreparedRequest."""
# Set defaults that the hooks can utilize to ensure they always have
# the correct parameters to reproduce the previous request.
kwargs.setdefault('stream', self.stream)
kwargs.setdefault('verify', self.verify)
kwargs.setdefault('cert', self.cert)
kwargs.setdefault('proxies', self.proxies)
# It's possible that users might accidentally send a Request object.
# Guard against that specific failure case.
if isinstance(request, Request):
raise ValueError('You can only send PreparedRequests.')
# Set up variables needed for resolve_redirects and dispatching of hooks
allow_redirects = kwargs.pop('allow_redirects', True)
stream = kwargs.get('stream')
hooks = request.hooks
# Resolve URL in redirect cache, if available.
if allow_redirects:
checked_urls = set()
while request.url in self.redirect_cache:
checked_urls.add(request.url)
new_url = self.redirect_cache.get(request.url)
if new_url in checked_urls:
break
request.url = new_url
# Get the appropriate adapter to use
adapter = self.get_adapter(url=request.url)
# Start time (approximately) of the request
start = datetime.utcnow()
# Send the request
r = adapter.send(request, **kwargs)
# Total elapsed time of the request (approximately)
r.elapsed = datetime.utcnow() - start
# Response manipulation hooks
r = dispatch_hook('response', hooks, r, **kwargs)
# Persist cookies
if r.history:
# If the hooks create history then we want those cookies too
for resp in r.history:
extract_cookies_to_jar(self.cookies, resp.request, resp.raw)
extract_cookies_to_jar(self.cookies, request, r.raw)
# Redirect resolving generator.
gen = self.resolve_redirects(r, request, **kwargs)
# Resolve redirects if allowed.
history = [resp for resp in gen] if allow_redirects else []
# Shuffle things around if there's history.
if history:
# Insert the first (original) request at the start
history.insert(0, r)
# Get the last request made
r = history.pop()
r.history = history
if not stream:
r.content
return r
def merge_environment_settings(self, url, proxies, stream, verify, cert):
"""Check the environment and merge it with some settings."""
# Gather clues from the surrounding environment.
if self.trust_env:
# Set environment's proxies.
env_proxies = get_environ_proxies(url) or {}
for (k, v) in env_proxies.items():
proxies.setdefault(k, v)
# Look for requests environment configuration and be compatible
# with cURL.
if verify is True or verify is None:
verify = (os.environ.get('REQUESTS_CA_BUNDLE') or
os.environ.get('CURL_CA_BUNDLE'))
# Merge all the kwargs.
proxies = merge_setting(proxies, self.proxies)
stream = merge_setting(stream, self.stream)
verify = merge_setting(verify, self.verify)
cert = merge_setting(cert, self.cert)
return {'verify': verify, 'proxies': proxies, 'stream': stream,
'cert': cert}
def get_adapter(self, url):
"""Returns the appropriate connection adapter for the given URL."""
for (prefix, adapter) in self.adapters.items():
if url.lower().startswith(prefix):
return adapter
# Nothing matches :-/
raise InvalidSchema("No connection adapters were found for '%s'" % url)
def close(self):
"""Closes all adapters and as such the session"""
for v in self.adapters.values():
v.close()
def mount(self, prefix, adapter):
"""Registers a connection adapter to a prefix.
Adapters are sorted in descending order by key length.
"""
self.adapters[prefix] = adapter
keys_to_move = [k for k in self.adapters if len(k) < len(prefix)]
for key in keys_to_move:
self.adapters[key] = self.adapters.pop(key)
def __getstate__(self):
state = dict((attr, getattr(self, attr, None)) for attr in self.__attrs__)
state['redirect_cache'] = dict(self.redirect_cache)
return state
def __setstate__(self, state):
redirect_cache = state.pop('redirect_cache', {})
for attr, value in state.items():
setattr(self, attr, value)
self.redirect_cache = RecentlyUsedContainer(REDIRECT_CACHE_SIZE)
for redirect, to in redirect_cache.items():
self.redirect_cache[redirect] = to
def session():
"""Returns a :class:`Session` for context-management."""
return Session()
| [
"[email protected]"
] | |
ce32fd8f2071c627a7f0902be8dfa99ab9d61d03 | bc441bb06b8948288f110af63feda4e798f30225 | /topboard_sdk/model/ops_automation/jobs_pb2.py | 9233835a9eb0089af4f4dc15c4c5b04c2d1ac53e | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | true | 5,796 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: jobs.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from topboard_sdk.model.ops_automation import bind_resource_pb2 as topboard__sdk_dot_model_dot_ops__automation_dot_bind__resource__pb2
from topboard_sdk.model.ops_automation import mail_info_pb2 as topboard__sdk_dot_model_dot_ops__automation_dot_mail__info__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='jobs.proto',
package='ops_automation',
syntax='proto3',
serialized_options=_b('ZHgo.easyops.local/contracts/protorepo-models/easyops/model/ops_automation'),
serialized_pb=_b('\n\njobs.proto\x12\x0eops_automation\x1a\x35topboard_sdk/model/ops_automation/bind_resource.proto\x1a\x31topboard_sdk/model/ops_automation/mail_info.proto\"\xc1\x01\n\x04Jobs\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x08\x63\x61tegory\x18\x02 \x01(\t\x12\x0e\n\x06menuId\x18\x03 \x01(\t\x12\x32\n\x0c\x62indResource\x18\x04 \x01(\x0b\x32\x1c.ops_automation.BindResource\x12\x0c\n\x04\x64\x65sc\x18\x05 \x01(\t\x12\x13\n\x0b\x61llowModify\x18\x06 \x01(\x08\x12&\n\x04mail\x18\x07 \x01(\x0b\x32\x18.ops_automation.MailInfo\x12\n\n\x02id\x18\x08 \x01(\tBJZHgo.easyops.local/contracts/protorepo-models/easyops/model/ops_automationb\x06proto3')
,
dependencies=[topboard__sdk_dot_model_dot_ops__automation_dot_bind__resource__pb2.DESCRIPTOR,topboard__sdk_dot_model_dot_ops__automation_dot_mail__info__pb2.DESCRIPTOR,])
_JOBS = _descriptor.Descriptor(
name='Jobs',
full_name='ops_automation.Jobs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='ops_automation.Jobs.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='category', full_name='ops_automation.Jobs.category', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='menuId', full_name='ops_automation.Jobs.menuId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bindResource', full_name='ops_automation.Jobs.bindResource', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='desc', full_name='ops_automation.Jobs.desc', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='allowModify', full_name='ops_automation.Jobs.allowModify', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mail', full_name='ops_automation.Jobs.mail', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='ops_automation.Jobs.id', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=137,
serialized_end=330,
)
_JOBS.fields_by_name['bindResource'].message_type = topboard__sdk_dot_model_dot_ops__automation_dot_bind__resource__pb2._BINDRESOURCE
_JOBS.fields_by_name['mail'].message_type = topboard__sdk_dot_model_dot_ops__automation_dot_mail__info__pb2._MAILINFO
DESCRIPTOR.message_types_by_name['Jobs'] = _JOBS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Jobs = _reflection.GeneratedProtocolMessageType('Jobs', (_message.Message,), {
'DESCRIPTOR' : _JOBS,
'__module__' : 'jobs_pb2'
# @@protoc_insertion_point(class_scope:ops_automation.Jobs)
})
_sym_db.RegisterMessage(Jobs)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
9079956a8eb24ea4da1f2da69f8788dc1a694f27 | 6699c63e2abb727bdde2cd08bff8daffbbef7265 | /busqueda14.py | 4399c01209e1054275ef4987d9834c3f16079030 | [] | no_license | FernandotapiaCalua/t08_Tapia | fe034802086515a7b31fdf65d7b37c53a40af7a5 | d5a7e02bab8893fb85f00dedd1403a5cc9b6ed66 | refs/heads/master | 2020-10-02T04:05:06.085249 | 2019-12-12T21:33:08 | 2019-12-12T21:33:08 | 227,696,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71 | py | #busqueda
cadena="para estar en buena forma"
print(cadena.find("for"))
| [
"[email protected]"
] | |
3edc5c35853123a22c04959e95910dfd09412079 | 73a0f661f1423d63e86489d4b2673f0103698aab | /python/oneflow/test/modules/test_flatten.py | cf2ada9bbbdcad6b4174bcacb1aeef712538c9af | [
"Apache-2.0"
] | permissive | Oneflow-Inc/oneflow | 4fc3e081e45db0242a465c4330d8bcc8b21ee924 | 0aab78ea24d4b1c784c30c57d33ec69fe5605e4a | refs/heads/master | 2023-08-25T16:58:30.576596 | 2023-08-22T14:15:46 | 2023-08-22T14:15:46 | 81,634,683 | 5,495 | 786 | Apache-2.0 | 2023-09-14T09:44:31 | 2017-02-11T06:09:53 | C++ | UTF-8 | Python | false | false | 4,076 | py | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from oneflow.test_utils.automated_test_util import *
from oneflow.test_utils.test_util import GenArgList
import oneflow as flow
import oneflow.unittest
def _test_flatten(test_case, device):
m = flow.nn.Flatten()
x = flow.Tensor(32, 2, 5, 5, device=flow.device(device))
flow.nn.init.uniform_(x)
y = m(x)
test_case.assertTrue(y.shape == flow.Size((32, 50)))
test_case.assertTrue(np.array_equal(y.numpy().flatten(), x.numpy().flatten()))
y2 = flow.flatten(x, start_dim=2)
test_case.assertTrue(y2.shape == flow.Size((32, 2, 25)))
test_case.assertTrue(np.array_equal(y2.numpy().flatten(), x.numpy().flatten()))
y3 = x.flatten(start_dim=1)
test_case.assertTrue(y3.shape == flow.Size((32, 50)))
test_case.assertTrue(np.array_equal(y3.numpy().flatten(), x.numpy().flatten()))
y4 = x.flatten(start_dim=1, end_dim=2)
test_case.assertTrue(y4.shape == flow.Size((32, 10, 5)))
test_case.assertTrue(np.array_equal(y4.numpy().flatten(), x.numpy().flatten()))
y5 = flow.flatten(x)
test_case.assertTrue(y5.shape == flow.Size((1600,)))
test_case.assertTrue(np.array_equal(y5.numpy().flatten(), x.numpy().flatten()))
def _test_flatten_backward(test_case, device):
m = flow.nn.Flatten().to(flow.device(device))
x = flow.Tensor(2, 3, 4, 5, device=flow.device(device))
x.requires_grad = True
flow.nn.init.uniform_(x)
y = m(x)
z = y.sum()
z.backward()
test_case.assertTrue(np.array_equal(np.ones(shape=(2, 3, 4, 5)), x.grad.numpy()))
@flow.unittest.skip_unless_1n1d()
class TestFlattenModule(flow.unittest.TestCase):
def test_cast(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [_test_flatten, _test_flatten_backward]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@autotest(n=5)
def test_flatten_module_with_random_data(test_case):
m = torch.nn.Flatten(
start_dim=random(1, 6) | nothing(), end_dim=random(1, 6) | nothing()
)
m.train(random())
device = random_device()
m.to(device)
x = random_tensor().to(device)
y = m(x)
return y
@autotest(n=5)
def test_flatten_with_random_data(test_case):
device = random_device()
x = random_tensor().to(device)
y = torch.flatten(
x,
start_dim=random(1, 6).to(int) | nothing(),
end_dim=random(1, 6).to(int) | nothing(),
)
return y
@autotest(n=5, auto_backward=False, check_graph=True)
def test_flatten_bool_with_random_data(test_case):
device = random_device()
x = random_tensor().to(device=device, dtype=torch.bool)
y = torch.flatten(
x,
start_dim=random(1, 6).to(int) | nothing(),
end_dim=random(1, 6).to(int) | nothing(),
)
return y
@autotest(n=5)
def test_flatten_with_0dim_data(test_case):
device = random_device()
x = random_tensor(ndim=0).to(device)
y = torch.flatten(
x,
start_dim=random(1, 6).to(int) | nothing(),
end_dim=random(1, 6).to(int) | nothing(),
)
return y
@profile(torch.flatten)
def profile_flatten(test_case):
torch.flatten(torch.ones(1000, 1000))
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
11da25680a0f632213592f939be53139bcc252db | 5a281cb78335e06c631181720546f6876005d4e5 | /senlin-7.0.0/senlin/common/exception.py | 43511227726f33e7655deceb9ef5b0b1d100af49 | [
"Apache-2.0"
] | permissive | scottwedge/OpenStack-Stein | d25b2a5bb54a714fc23f0ff0c11fb1fdacad85e8 | 7077d1f602031dace92916f14e36b124f474de15 | refs/heads/master | 2021-03-22T16:07:19.561504 | 2020-03-15T01:31:10 | 2020-03-15T01:31:10 | 247,380,811 | 0 | 0 | Apache-2.0 | 2020-03-15T01:24:15 | 2020-03-15T01:24:15 | null | UTF-8 | Python | false | false | 9,329 | py | #
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Senlin exception subclasses.
"""
import sys
from oslo_log import log as logging
import six
from senlin.common.i18n import _
_FATAL_EXCEPTION_FORMAT_ERRORS = False
LOG = logging.getLogger(__name__)
class SenlinException(Exception):
"""Base Senlin Exception.
To correctly use this class, inherit from it and define a 'msg_fmt'
property. That msg_fmt will get printed with the keyword arguments
provided to the constructor.
"""
message = _("An unknown exception occurred.")
def __init__(self, **kwargs):
self.kwargs = kwargs
try:
self.message = self.msg_fmt % kwargs
# if last char is '.', wipe out redundant '.'
if self.message[-1] == '.':
self.message = self.message.rstrip('.') + '.'
except KeyError:
# exc_info = sys.exc_info()
# if kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception('Exception in string format operation')
for name, value in kwargs.items():
LOG.error("%s: %s", name, value) # noqa
if _FATAL_EXCEPTION_FORMAT_ERRORS:
raise
# raise exc_info[0], exc_info[1], exc_info[2]
def __str__(self):
return six.text_type(self.message)
def __unicode__(self):
return six.text_type(self.message)
def __deepcopy__(self, memo):
return self.__class__(**self.kwargs)
class SIGHUPInterrupt(SenlinException):
msg_fmt = _("System SIGHUP signal received.")
class NotAuthenticated(SenlinException):
msg_fmt = _("You are not authenticated.")
class Forbidden(SenlinException):
msg_fmt = _("You are not authorized to complete this operation.")
class BadRequest(SenlinException):
msg_fmt = _("%(msg)s.")
class InvalidAPIVersionString(SenlinException):
msg_fmt = _("API Version String '%(version)s' is of invalid format. It "
"must be of format 'major.minor'.")
class MethodVersionNotFound(SenlinException):
msg_fmt = _("API version '%(version)s' is not supported on this method.")
class InvalidGlobalAPIVersion(SenlinException):
msg_fmt = _("Version '%(req_ver)s' is not supported by the API. Minimum "
"is '%(min_ver)s' and maximum is '%(max_ver)s'.")
class MultipleChoices(SenlinException):
msg_fmt = _("Multiple results found matching the query criteria "
"'%(arg)s'. Please be more specific.")
class ResourceNotFound(SenlinException):
"""Generic exception for resource not found.
The resource type here can be 'cluster', 'node', 'profile',
'policy', 'receiver', 'webhook', 'profile_type', 'policy_type',
'action', 'event' and so on.
"""
msg_fmt = _("The %(type)s '%(id)s' could not be found.")
@staticmethod
def enhance_msg(enhance, ex):
enhance_msg = ex.message[:4] + enhance + ' ' + ex.message[4:]
return enhance_msg
class ResourceInUse(SenlinException):
"""Generic exception for resource in use.
The resource type here can be 'cluster', 'node', 'profile',
'policy', 'receiver', 'webhook', 'profile_type', 'policy_type',
'action', 'event' and so on.
"""
msg_fmt = _("The %(type)s '%(id)s' cannot be deleted: %(reason)s.")
class ResourceIsLocked(SenlinException):
"""Generic exception for resource in use.
The resource type here can be 'cluster', 'node'.
"""
msg_fmt = _("%(action)s for %(type)s '%(id)s' cannot be completed "
"because it is already locked.")
class ProfileNotSpecified(SenlinException):
msg_fmt = _("Profile not specified.")
class ProfileOperationFailed(SenlinException):
msg_fmt = _("%(message)s")
class ProfileOperationTimeout(SenlinException):
msg_fmt = _("%(message)s")
class PolicyNotSpecified(SenlinException):
msg_fmt = _("Policy not specified.")
class PolicyBindingNotFound(SenlinException):
msg_fmt = _("The policy '%(policy)s' is not found attached to the "
"specified cluster '%(identity)s'.")
class PolicyTypeConflict(SenlinException):
msg_fmt = _("The policy with type '%(policy_type)s' already exists.")
class InvalidSpec(SenlinException):
msg_fmt = _("%(message)s")
class FeatureNotSupported(SenlinException):
msg_fmt = _("%(feature)s is not supported.")
class Error(SenlinException):
msg_fmt = "%(message)s"
def __init__(self, msg):
super(Error, self).__init__(message=msg)
class InvalidContentType(SenlinException):
msg_fmt = _("Invalid content type %(content_type)s")
class RequestLimitExceeded(SenlinException):
msg_fmt = _('Request limit exceeded: %(message)s')
class ActionInProgress(SenlinException):
msg_fmt = _("The %(type)s '%(id)s' is in status %(status)s.")
class ActionConflict(SenlinException):
msg_fmt = _("The %(type)s action for target %(target)s conflicts with "
"the following action(s): %(actions)s")
class ActionCooldown(SenlinException):
msg_fmt = _("The %(type)s action for cluster %(cluster)s cannot be "
"processed due to Policy %(policy_id)s cooldown still in "
"progress")
class ActionImmutable(SenlinException):
msg_fmt = _("Action (%(id)s) is in status (%(actual)s) while expected "
"status must be one of (%(expected)s).")
class NodeNotOrphan(SenlinException):
msg_fmt = _("%(message)s")
class InternalError(SenlinException):
"""A base class for internal exceptions in senlin.
The internal exception classes which inherit from :class:`SenlinException`
class should be translated to a user facing exception type if they need to
be made user visible.
"""
msg_fmt = _("%(message)s")
message = _('Internal error happened')
def __init__(self, **kwargs):
self.code = kwargs.pop('code', 500)
# If a "message" is not provided, or None or blank, use the default.
self.message = kwargs.pop('message', self.message) or self.message
super(InternalError, self).__init__(
code=self.code, message=self.message, **kwargs)
class EResourceBusy(InternalError):
# Internal exception, not to be exposed to end user.
msg_fmt = _("The %(type)s '%(id)s' is busy now.")
class TrustNotFound(InternalError):
# Internal exception, not to be exposed to end user.
msg_fmt = _("The trust for trustor '%(trustor)s' could not be found.")
class EResourceCreation(InternalError):
# Used when creating resources in other services
def __init__(self, **kwargs):
self.resource_id = kwargs.pop('resource_id', None)
super(EResourceCreation, self).__init__(
resource_id=self.resource_id, **kwargs)
msg_fmt = _("Failed in creating %(type)s: %(message)s.")
class EResourceUpdate(InternalError):
# Used when updating resources from other services
msg_fmt = _("Failed in updating %(type)s '%(id)s': %(message)s.")
class EResourceDeletion(InternalError):
# Used when deleting resources from other services
msg_fmt = _("Failed in deleting %(type)s '%(id)s': %(message)s.")
class EServerNotFound(InternalError):
# Used when deleting resources from other services
msg_fmt = _("Failed in found %(type)s '%(id)s': %(message)s.")
class EResourceOperation(InternalError):
"""Generic exception for resource fail operation.
The op here can be 'recovering','rebuilding', 'checking' and
so on. And the op 'creating', 'updating' and 'deleting' we can
use separately class `EResourceCreation`,`EResourceUpdate` and
`EResourceDeletion`.
The type here is resource's driver type.It can be 'server',
'stack', 'container' and so on.
The id is resource's id.
The message here can be message from class 'ResourceNotFound',
'ResourceInUse' and so on, or developer can specified message.
"""
# Used when operating resources from other services
msg_fmt = _("Failed in %(op)s %(type)s '%(id)s': %(message)s.")
class ESchema(InternalError):
msg_fmt = _("%(message)s")
class InvalidPlugin(InternalError):
msg_fmt = _("%(message)s")
class PolicyNotAttached(InternalError):
msg_fmt = _("The policy '%(policy)s' is not attached to the specified "
"cluster '%(cluster)s'.")
class HTTPExceptionDisguise(Exception):
"""Disguises HTTP exceptions.
The purpose is to let them be handled by the webob fault application
in the wsgi pipeline.
"""
def __init__(self, exception):
self.exc = exception
self.tb = sys.exc_info()[2]
| [
"Wayne [email protected]"
] | Wayne [email protected] |
14ba40653e5a91516f606e964c3cc7999beb2bd4 | 2ee7195d71993838829e06f26347f76a2433931b | /test_backtest/T0backtest.py | 6b49876c013292907cee777def029de94403fe24 | [
"MIT"
] | permissive | imgreenbird/QUANTAXIS | 0a056de2c3961f5d0b7d0e17782f34b25593e5fb | 88eac434135a92cd64bd035cd844b34020729747 | refs/heads/master | 2020-03-23T14:27:23.003742 | 2018-07-20T01:25:27 | 2018-07-20T01:25:27 | 141,676,903 | 3 | 0 | MIT | 2018-07-20T07:11:09 | 2018-07-20T07:11:09 | null | UTF-8 | Python | false | false | 3,821 | py |
# coding: utf-8
# In[1]:
from QUANTAXIS.QAARP.QAStrategy import QA_Strategy
from QUANTAXIS.QAARP.QAAccount import QA_Account
from QUANTAXIS.QAUtil.QAParameter import (AMOUNT_MODEL, MARKET_TYPE,
FREQUENCE, ORDER_DIRECTION,
ORDER_MODEL,RUNNING_ENVIRONMENT)
import random
# In[2]:
class MAMINT0Strategy(QA_Account):
def __init__(self,init_hold={'000001':10000}):
super().__init__(init_hold=init_hold)
self.account_cookie = 'T0BACKTEST'
self.running_environment=RUNNING_ENVIRONMENT.TZERO
self.frequence = FREQUENCE.FIFTEEN_MIN
self.market_type = MARKET_TYPE.STOCK_CN
def on_bar(self, event):
try:
for item in event.market_data.code:
print('================')
print(self.sell_available)
print('================')
print(self.hold_available)
if self.sell_available.get(item, 0) > 0:
event.send_order(account_id=self.account_cookie,
amount=self.sell_available[item], amount_model=AMOUNT_MODEL.BY_AMOUNT,
time=self.current_time, code=item, price=0,
order_model=ORDER_MODEL.MARKET, towards=ORDER_DIRECTION.SELL,
market_type=self.market_type, frequence=self.frequence,
broker_name=self.broker
)
else:
event.send_order(account_id=self.account_cookie,
amount=100, amount_model=AMOUNT_MODEL.BY_AMOUNT,
time=self.current_time, code=item, price=0,
order_model=ORDER_MODEL.MARKET, towards=ORDER_DIRECTION.BUY,
market_type=self.market_type, frequence=self.frequence,
broker_name=self.broker)
except:
pass
# In[3]:
from QUANTAXIS.QAARP.QARisk import QA_Risk
from QUANTAXIS.QAARP.QAUser import QA_User
from QUANTAXIS.QABacktest.QABacktest import QA_Backtest
from QUANTAXIS.QAUtil.QALogs import QA_util_log_info
from QUANTAXIS.QAUtil.QAParameter import FREQUENCE, MARKET_TYPE
class Backtest(QA_Backtest):
'''
多线程模式回测示例
'''
def __init__(self, market_type, frequence, start, end, code_list, commission_fee):
super().__init__(market_type, frequence, start, end, code_list, commission_fee)
self.user = QA_User()
t0strategy=MAMINT0Strategy()
# maminstrategy.reset_assets(1000)
# self.portfolio, self.account = self.user.register_account(mastrategy)
self.user = QA_User(user_cookie='user_admin')
self.portfolio = self.user.new_portfolio('folio_admin')
self.portfolio, self.account = self.user.register_account(t0strategy)
def after_success(self):
QA_util_log_info(self.account.history_table)
risk = QA_Risk(self.account, benchmark_code='000300',
benchmark_type=MARKET_TYPE.INDEX_CN)
print(risk().T)
self.account.save()
risk.save()
risk.plot_assets_curve()
print(risk.profit_construct)
# In[4]:
import QUANTAXIS as QA
backtest = Backtest(market_type=MARKET_TYPE.STOCK_CN,
frequence=FREQUENCE.FIFTEEN_MIN,
start='2017-11-01',
end='2017-12-10',
code_list=['000001'],
commission_fee=0.00015)
backtest.start_market()
backtest.run()
backtest.stop()
# In[5]:
backtest.account.history_table
| [
"[email protected]"
] | |
20a9b35189e8cc10558e34e4a5f9b23e8b6e8215 | c733e6b433914a8faba256c7853f5cf2cd39c62a | /Python/Leetcode Daily Practice/Stack/907.Sum of Subarray Minimums.py | 27776a90d32a21b05ff11eaa3a7e22e5d544f69d | [] | no_license | YaqianQi/Algorithm-and-Data-Structure | 3016bebcc1f1356b6e5f3c3e588f3d46c276a805 | 2e1751263f484709102f7f2caf18776a004c8230 | refs/heads/master | 2021-10-27T16:29:18.409235 | 2021-10-14T13:57:36 | 2021-10-14T13:57:36 | 178,946,803 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,337 | py | class Solution(object):
def sumSubarrayMins(self, A):
MOD = 10**9 + 7
stack = []
dot, ans = 0, 0
for num in A:
cnt = 1
while stack and stack[-1][0] >= num:
n, c = stack.pop(-1)
cnt += c
dot -= n * c
stack.append((num, cnt))
dot += num * cnt
ans += dot
return ans % MOD
"""res = 0
for i in range(len(A)):
for j in range(i, len(A)):
res += min(A[i:j+1])
print(A[i:j+1], min(A[i:j+1]))"""
return ans
if __name__=="__main__":
A = [1,7,5,2,4,3,9]
# - - - -
# 1 1 1 1 1 1 1 : 7
# 7 5 2 2 2 2 : 20
# 5 2 2 2 2 : 13
# 2 2 2 2 : 8
# 4 3 3 : 10
# 3 3 : 6
# 9 : 9
# 73
# print(sum(B))
sol = Solution()
print(sol.sumSubarrayMins(A))
# Input:
A = [3,1,2,4]
# 3 1 1 1
# 1 1 1
# 2 2
# 4
# Output: 17
# Explanation: Subarrays are [3], [1], [2], [4], [3,1], [1,2], [2,4], [3,1,2], [1,2,4], [3,1,2,4].
# Minimums are 3, 1, 2, 4, 1, 1, 2, 1, 1, 1. Sum is 17.
sol = Solution()
# print(sol.sumSubarrayMins(A))
| [
"[email protected]"
] | |
5d54aa954b5a801f72d85eef577ed6856517acbf | cccd1ede83f9391238893f3862e7beff999647e1 | /rw_and_plot/15_1_cubes.py | efe10175eca18c8f2ef07cef6f815f60667a78fa | [] | no_license | SMS-NED16/pcc-data-vis | c7c136e32921619af52b46cdbf12f6debaa8a690 | f9750ee947163335c351a6df453f5d2dab87d855 | refs/heads/master | 2020-03-09T02:32:19.494589 | 2018-04-04T12:11:40 | 2018-04-04T12:11:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 724 | py | import matplotlib.pyplot as plt
x = list(range(1, 6))
five_cubes = [val ** 3 for val in x]
thousand_cubes = [ val ** 3 for val in range(1, 5001)]
plt.subplot(1, 2, 1)
plt.scatter(x, five_cubes, s=40)
plt.title("Graph of first five cubes", fontsize=14)
plt.xlabel("Values", fontsize=12)
plt.ylabel("Cube of Value", fontsize=12)
plt.tick_params(labelsize=12, axis='both',which='major')
plt.subplot(1, 2, 2)
x = list(range(1, 5001))
plt.scatter(x, thousand_cubes,
c = thousand_cubes, cmap = plt.cm.Blues, edgecolor='none',)
plt.title("Graph of five thousand cubes", fontsize=14)
plt.xlabel("Values", fontsize=12)
plt.ylabel("Cube of Values", fontsize=12)
plt.tick_params(labelsize=12, axis='both',which='major')
plt.show() | [
"[email protected]"
] | |
824d5eea34380df61a582e52a5a070cac3dff314 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=3.5_rd=0.8_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=48/sched.py | afb4c0412030f6f941ad2606c1757d3218fe3c14 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | -X FMLP -Q 0 -L 1 132 400
-X FMLP -Q 0 -L 1 125 400
-X FMLP -Q 0 -L 1 65 250
-X FMLP -Q 0 -L 1 48 175
-X FMLP -Q 1 -L 1 45 250
-X FMLP -Q 1 -L 1 40 125
-X FMLP -Q 1 -L 1 36 250
-X FMLP -Q 1 -L 1 34 150
-X FMLP -Q 2 -L 1 26 175
-X FMLP -Q 2 -L 1 25 125
-X FMLP -Q 2 -L 1 22 150
-X FMLP -Q 2 -L 1 22 100
-X FMLP -Q 3 -L 1 20 200
-X FMLP -Q 3 -L 1 19 150
-X FMLP -Q 3 -L 1 18 150
16 150
16 100
13 300
10 125
| [
"[email protected]"
] | |
7d2398a97349b50e741469065939c3bdc7116573 | 5d45174eef86562b6c90a4bc07c86258df249486 | /hyak/launcher.py | 7b06e8c0e6a491706816db94253c46ac6fdff997 | [] | no_license | bmorris3/shampoo | a70dd3b1896c7a4f2e88413c13cae96d80f21c71 | 853c6668efef3e7b69727ea45ff9eff419e9a70b | refs/heads/master | 2023-05-27T04:30:02.756962 | 2018-03-28T14:38:09 | 2018-03-28T14:38:09 | 41,105,695 | 22 | 12 | null | 2021-04-27T11:06:07 | 2015-08-20T16:08:58 | Python | UTF-8 | Python | false | false | 2,342 | py | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
from glob import glob
import numpy as np
python_path = '/usr/lusers/bmmorris/miniconda2/bin/python'
data_dir = '/gscratch/stf/bmmorris/shamu/2015.12.15_17-47'
output_dir = '/gscratch/stf/bmmorris/shamu/outputs'
python_script = '/usr/lusers/bmmorris/git/shampoo/hyak/hyak_jobs.py'
raw_hologram_paths = sorted(glob(os.path.join(data_dir, '*_holo.tif')))
submit_template = open('submit_template.sh', 'r').read()
walltime = '01:00:00'
email = '[email protected]'
# Divide holograms to assign 14 per node at a time
n_jobs_per_node = 16
n_repeats_per_node = 2
all_hologram_indices = np.arange(len(raw_hologram_paths))
hologram_index_groups = np.array_split(all_hologram_indices,
len(all_hologram_indices) //
(n_jobs_per_node*n_repeats_per_node) + 1)
for i, split_hologram_indices in enumerate(hologram_index_groups):
hologram_paths = [raw_hologram_paths[j] for j in split_hologram_indices]
# Create input jobs to pipe to "parallel" command:
command_list_path = os.path.join(output_dir,
'command_list_{0:02d}.txt'.format(i))
with open(command_list_path, 'w') as command_file:
for holo_path in hologram_paths:
line = "{0} {1} {2} {3}\n".format(python_path, python_script,
holo_path, output_dir)
command_file.write(line)
submit_script_name = os.path.join(output_dir,
'submit_script_{0:02d}.sh'.format(i))
submit_script = submit_template.format(job_name="shampoo_test",
run_dir=output_dir,
log_dir=output_dir,
walltime=walltime,
email=email,
command_list_path=command_list_path,
n_jobs_per_node=n_jobs_per_node)
submit_script_path = os.path.join(output_dir, submit_script_name)
with open(submit_script_path, 'w') as f:
f.write(submit_script)
os.system('qsub {0}'.format(submit_script_path))
| [
"[email protected]"
] | |
8503552be3628a6c2d0f3f38e147fe33a263a51e | 066435cd1b48955ab0039c275d706f167ccae5a2 | /lib/vhf/test/test_nr_direct_dot.py | bf79d4da8c3e38ac90997edd3731d7adfb69a343 | [
"BSD-2-Clause"
] | permissive | matk86/pyscf | ca4b5c27b9ed6c5fb4120c8471110c087c43600b | 931bf855591a68c415a9564972a6e216a12b0b36 | refs/heads/master | 2020-12-25T10:08:18.334338 | 2016-02-01T06:15:33 | 2016-02-01T06:15:33 | 51,003,165 | 1 | 0 | null | 2016-02-03T14:00:02 | 2016-02-03T14:00:01 | null | UTF-8 | Python | false | false | 16,130 | py | #!/usr/bin/env python
import os
import ctypes
import _ctypes
import unittest
import numpy
from pyscf import lib
from pyscf import scf
from pyscf import gto
from pyscf import ao2mo
libcvhf2 = lib.load_library('libcvhf')
numpy.random.seed(15)
nao = 100
i0, j0, k0, l0 = 40,30,20,10
dm = numpy.random.random((nao,nao))
def run(fname):
vj = numpy.zeros((nao,nao))
di, dj, dk, dl = range(1,5)
eri = numpy.asarray(numpy.random.random((di,dj,dk,dl)), order='F')
fn = getattr(libcvhf2, fname)
fn(eri.ctypes.data_as(ctypes.c_void_p),
dm.ctypes.data_as(ctypes.c_void_p),
vj.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(i0), ctypes.c_int(i0+di),
ctypes.c_int(j0), ctypes.c_int(j0+dj),
ctypes.c_int(k0), ctypes.c_int(k0+dk),
ctypes.c_int(l0), ctypes.c_int(l0+dl),
ctypes.c_int(nao))
return eri, vj
class KnowValues(unittest.TestCase):
def test_nrs1_ji_s1kl(self):
eri, vj = run('CVHFnrs1_ji_s1kl')
di, dj, dk, dl = eri.shape
ref = numpy.einsum('ijkl,ji->kl', eri, dm[j0:j0+dj,i0:i0+di])
self.assertTrue(numpy.allclose(vj[k0:k0+dk,l0:l0+dl], ref))
def test_nrs1_lk_s1ij(self):
eri, vj = run('CVHFnrs1_lk_s1ij')
di, dj, dk, dl = eri.shape
ref = numpy.einsum('ijkl,lk->ij', eri, dm[l0:l0+dl,k0:k0+dk])
self.assertTrue(numpy.allclose(vj[i0:i0+di,j0:j0+dj], ref))
def test_nrs1_jk_s1il(self):
eri, vj = run('CVHFnrs1_jk_s1il')
di, dj, dk, dl = eri.shape
ref = numpy.einsum('ijkl,jk->il', eri, dm[j0:j0+dj,k0:k0+dk])
self.assertTrue(numpy.allclose(vj[i0:i0+di,l0:l0+dl], ref))
def test_nrs1_li_s1kj(self):
eri, vj = run('CVHFnrs1_li_s1kj')
di, dj, dk, dl = eri.shape
ref = numpy.einsum('ijkl,li->kj', eri, dm[l0:l0+dl,i0:i0+di])
self.assertTrue(numpy.allclose(vj[k0:k0+dk,j0:j0+dj], ref))
def test_nrs2ij_lk_s1ij(self):
eri, vj = run('CVHFnrs2ij_lk_s1ij')
di, dj, dk, dl = eri.shape
ref = numpy.einsum('ijkl,lk->ij', eri, dm[l0:l0+dl,k0:k0+dk])
self.assertTrue(numpy.allclose(vj[i0:i0+di,j0:j0+dj], ref))
ref = numpy.einsum('ijkl,lk->ij', eri.transpose(1,0,2,3), dm[l0:l0+dl,k0:k0+dk])
self.assertTrue(numpy.allclose(vj[j0:j0+dj,i0:i0+di], ref))
def test_nrs2ij_ji_s1kl(self):
eri, vj = run('CVHFnrs2ij_ji_s1kl')
di, dj, dk, dl = eri.shape
ref =(numpy.einsum('ijkl,ji->kl', eri, dm[j0:j0+dj,i0:i0+di])
+ numpy.einsum('ijkl,ji->kl', eri.transpose(1,0,2,3), dm[i0:i0+di,j0:j0+dj]))
self.assertTrue(numpy.allclose(vj[k0:k0+dk,l0:l0+dl], ref))
def test_nrs2ij_jk_s1il(self):
eri, vj = run('CVHFnrs2ij_jk_s1il')
di, dj, dk, dl = eri.shape
ref = numpy.einsum('ijkl,jk->il', eri, dm[j0:j0+dj,k0:k0+dk])
self.assertTrue(numpy.allclose(vj[i0:i0+di,l0:l0+dl], ref))
ref = numpy.einsum('ijkl,jk->il', eri.transpose(1,0,2,3), dm[i0:i0+di,k0:k0+dk])
self.assertTrue(numpy.allclose(vj[j0:j0+dj,l0:l0+dl], ref))
def test_nrs2ij_li_s1kj(self):
eri, vj = run('CVHFnrs2ij_li_s1kj')
di, dj, dk, dl = eri.shape
ref = numpy.einsum('ijkl,li->kj', eri, dm[l0:l0+dl,i0:i0+di])
self.assertTrue(numpy.allclose(vj[k0:k0+dk,j0:j0+dj], ref))
ref = numpy.einsum('ijkl,li->kj', eri.transpose(1,0,2,3), dm[l0:l0+dl,j0:j0+dj])
self.assertTrue(numpy.allclose(vj[k0:k0+dk,i0:i0+di], ref))
def test_nrs2kl_lk_s1ij(self):
eri, vj = run('CVHFnrs2kl_lk_s1ij')
di, dj, dk, dl = eri.shape
ref =(numpy.einsum('ijkl,lk->ij', eri, dm[l0:l0+dl,k0:k0+dk])
+ numpy.einsum('ijkl,lk->ij', eri.transpose(0,1,3,2), dm[k0:k0+dk,l0:l0+dl]))
self.assertTrue(numpy.allclose(vj[i0:i0+di,j0:j0+dj], ref))
def test_nrs2kl_ji_s1kl(self):
eri, vj = run('CVHFnrs2kl_ji_s1kl')
di, dj, dk, dl = eri.shape
ref = numpy.einsum('ijkl,ji->kl', eri, dm[j0:j0+dj,i0:i0+di])
self.assertTrue(numpy.allclose(vj[k0:k0+dk,l0:l0+dl], ref))
ref = numpy.einsum('ijkl,ji->kl', eri.transpose(0,1,3,2), dm[j0:j0+dj,i0:i0+di])
self.assertTrue(numpy.allclose(vj[l0:l0+dl,k0:k0+dk], ref))
def test_nrs2kl_jk_s1il(self):
eri, vj = run('CVHFnrs2kl_jk_s1il')
di, dj, dk, dl = eri.shape
ref = numpy.einsum('ijkl,jk->il', eri, dm[j0:j0+dj,k0:k0+dk])
self.assertTrue(numpy.allclose(vj[i0:i0+di,l0:l0+dl], ref))
ref = numpy.einsum('ijkl,jk->il', eri.transpose(0,1,3,2), dm[j0:j0+dj,l0:l0+dl])
self.assertTrue(numpy.allclose(vj[i0:i0+di,k0:k0+dk], ref))
def test_nrs2kl_li_s1kj(self):
eri, vj = run('CVHFnrs2kl_li_s1kj')
di, dj, dk, dl = eri.shape
ref = numpy.einsum('ijkl,li->kj', eri, dm[l0:l0+dl,i0:i0+di])
self.assertTrue(numpy.allclose(vj[k0:k0+dk,j0:j0+dj], ref))
ref = numpy.einsum('ijkl,li->kj', eri.transpose(0,1,3,2), dm[k0:k0+dk,i0:i0+di])
self.assertTrue(numpy.allclose(vj[l0:l0+dl,j0:j0+dj], ref))
def test_nrs4_ji_s1kl(self):
eri, vj = run('CVHFnrs4_ji_s1kl')
di, dj, dk, dl = eri.shape
ref = numpy.einsum('ijkl,ji->kl', eri, dm[j0:j0+dj,i0:i0+di])
ref+= numpy.einsum('ijkl,ji->kl', eri.transpose(1,0,2,3), dm[i0:i0+di,j0:j0+dj])
self.assertTrue(numpy.allclose(vj[k0:k0+dk,l0:l0+dl], ref))
ref = numpy.einsum('ijkl,ji->kl', eri.transpose(0,1,3,2), dm[j0:j0+dj,i0:i0+di])
ref+= numpy.einsum('ijkl,ji->kl', eri.transpose(1,0,3,2), dm[i0:i0+di,j0:j0+dj])
self.assertTrue(numpy.allclose(vj[l0:l0+dl,k0:k0+dk], ref))
def test_nrs4_lk_s1ij(self):
eri, vj = run('CVHFnrs4_lk_s1ij')
di, dj, dk, dl = eri.shape
ref = numpy.einsum('ijkl,lk->ij', eri, dm[l0:l0+dl,k0:k0+dk])
ref+= numpy.einsum('ijkl,lk->ij', eri.transpose(0,1,3,2), dm[k0:k0+dk,l0:l0+dl])
self.assertTrue(numpy.allclose(vj[i0:i0+di,j0:j0+dj], ref))
ref = numpy.einsum('ijkl,lk->ij', eri.transpose(1,0,2,3), dm[l0:l0+dl,k0:k0+dk])
ref+= numpy.einsum('ijkl,lk->ij', eri.transpose(1,0,3,2), dm[k0:k0+dk,l0:l0+dl])
self.assertTrue(numpy.allclose(vj[j0:j0+dj,i0:i0+di], ref))
def test_nrs4_jk_s1il(self):
eri, vj = run('CVHFnrs4_jk_s1il')
di, dj, dk, dl = eri.shape
ref = numpy.einsum('ijkl,jk->il', eri, dm[j0:j0+dj,k0:k0+dk])
self.assertTrue(numpy.allclose(vj[i0:i0+di,l0:l0+dl], ref))
ref = numpy.einsum('ijkl,jk->il', eri.transpose(0,1,3,2), dm[j0:j0+dj,l0:l0+dl])
self.assertTrue(numpy.allclose(vj[i0:i0+di,k0:k0+dk], ref))
ref = numpy.einsum('ijkl,jk->il', eri.transpose(1,0,2,3), dm[i0:i0+di,k0:k0+dk])
self.assertTrue(numpy.allclose(vj[j0:j0+dj,l0:l0+dl], ref))
ref = numpy.einsum('ijkl,jk->il', eri.transpose(1,0,3,2), dm[i0:i0+di,l0:l0+dl])
self.assertTrue(numpy.allclose(vj[j0:j0+dj,k0:k0+dk], ref))
def test_nrs4_li_s1kj(self):
eri, vj = run('CVHFnrs4_li_s1kj')
di, dj, dk, dl = eri.shape
ref = numpy.einsum('ijkl,li->kj', eri, dm[l0:l0+dl,i0:i0+di])
self.assertTrue(numpy.allclose(vj[k0:k0+dk,j0:j0+dj], ref))
ref = numpy.einsum('ijkl,li->kj', eri.transpose(0,1,3,2), dm[k0:k0+dk,i0:i0+di])
self.assertTrue(numpy.allclose(vj[l0:l0+dl,j0:j0+dj], ref))
ref = numpy.einsum('ijkl,li->kj', eri.transpose(1,0,2,3), dm[l0:l0+dl,j0:j0+dj])
self.assertTrue(numpy.allclose(vj[k0:k0+dk,i0:i0+di], ref))
ref = numpy.einsum('ijkl,li->kj', eri.transpose(1,0,3,2), dm[k0:k0+dk,j0:j0+dj])
self.assertTrue(numpy.allclose(vj[l0:l0+dl,i0:i0+di], ref))
def test_nra2ij_lk_s1ij(self):
eri, vj = run('CVHFnra2ij_lk_s1ij')
di, dj, dk, dl = eri.shape
ref = numpy.einsum('ijkl,lk->ij', eri, dm[l0:l0+dl,k0:k0+dk])
self.assertTrue(numpy.allclose(vj[i0:i0+di,j0:j0+dj], ref))
ref =-numpy.einsum('ijkl,lk->ij', eri.transpose(1,0,2,3), dm[l0:l0+dl,k0:k0+dk])
self.assertTrue(numpy.allclose(vj[j0:j0+dj,i0:i0+di], ref))
def test_nra2ij_ji_s1kl(self):
eri, vj = run('CVHFnra2ij_ji_s1kl')
di, dj, dk, dl = eri.shape
ref =(numpy.einsum('ijkl,ji->kl', eri, dm[j0:j0+dj,i0:i0+di])
- numpy.einsum('ijkl,ji->kl', eri.transpose(1,0,2,3), dm[i0:i0+di,j0:j0+dj]))
self.assertTrue(numpy.allclose(vj[k0:k0+dk,l0:l0+dl], ref))
def test_nra2ij_jk_s1il(self):
eri, vj = run('CVHFnra2ij_jk_s1il')
di, dj, dk, dl = eri.shape
ref = numpy.einsum('ijkl,jk->il', eri, dm[j0:j0+dj,k0:k0+dk])
self.assertTrue(numpy.allclose(vj[i0:i0+di,l0:l0+dl], ref))
ref =-numpy.einsum('ijkl,jk->il', eri.transpose(1,0,2,3), dm[i0:i0+di,k0:k0+dk])
self.assertTrue(numpy.allclose(vj[j0:j0+dj,l0:l0+dl], ref))
def test_nra2ij_li_s1kj(self):
eri, vj = run('CVHFnra2ij_li_s1kj')
di, dj, dk, dl = eri.shape
ref = numpy.einsum('ijkl,li->kj', eri, dm[l0:l0+dl,i0:i0+di])
self.assertTrue(numpy.allclose(vj[k0:k0+dk,j0:j0+dj], ref))
ref =-numpy.einsum('ijkl,li->kj', eri.transpose(1,0,2,3), dm[l0:l0+dl,j0:j0+dj])
self.assertTrue(numpy.allclose(vj[k0:k0+dk,i0:i0+di], ref))
def test_nra2kl_lk_s1ij(self):
eri, vj = run('CVHFnra2kl_lk_s1ij')
di, dj, dk, dl = eri.shape
ref =(numpy.einsum('ijkl,lk->ij', eri, dm[l0:l0+dl,k0:k0+dk])
- numpy.einsum('ijkl,lk->ij', eri.transpose(0,1,3,2), dm[k0:k0+dk,l0:l0+dl]))
self.assertTrue(numpy.allclose(vj[i0:i0+di,j0:j0+dj], ref))
def test_nra2kl_ji_s1kl(self):
eri, vj = run('CVHFnra2kl_ji_s1kl')
di, dj, dk, dl = eri.shape
ref = numpy.einsum('ijkl,ji->kl', eri, dm[j0:j0+dj,i0:i0+di])
self.assertTrue(numpy.allclose(vj[k0:k0+dk,l0:l0+dl], ref))
ref =-numpy.einsum('ijkl,ji->kl', eri.transpose(0,1,3,2), dm[j0:j0+dj,i0:i0+di])
self.assertTrue(numpy.allclose(vj[l0:l0+dl,k0:k0+dk], ref))
def test_nra2kl_jk_s1il(self):
eri, vj = run('CVHFnra2kl_jk_s1il')
di, dj, dk, dl = eri.shape
ref = numpy.einsum('ijkl,jk->il', eri, dm[j0:j0+dj,k0:k0+dk])
self.assertTrue(numpy.allclose(vj[i0:i0+di,l0:l0+dl], ref))
ref =-numpy.einsum('ijkl,jk->il', eri.transpose(0,1,3,2), dm[j0:j0+dj,l0:l0+dl])
self.assertTrue(numpy.allclose(vj[i0:i0+di,k0:k0+dk], ref))
def test_nra2kl_li_s1kj(self):
eri, vj = run('CVHFnra2kl_li_s1kj')
di, dj, dk, dl = eri.shape
ref = numpy.einsum('ijkl,li->kj', eri, dm[l0:l0+dl,i0:i0+di])
self.assertTrue(numpy.allclose(vj[k0:k0+dk,j0:j0+dj], ref))
ref =-numpy.einsum('ijkl,li->kj', eri.transpose(0,1,3,2), dm[k0:k0+dk,i0:i0+di])
self.assertTrue(numpy.allclose(vj[l0:l0+dl,j0:j0+dj], ref))
def test_nra4ij_ji_s1kl(self):
eri, vj = run('CVHFnra4ij_ji_s1kl')
di, dj, dk, dl = eri.shape
ref = numpy.einsum('ijkl,ji->kl', eri, dm[j0:j0+dj,i0:i0+di])
ref-= numpy.einsum('ijkl,ji->kl', eri.transpose(1,0,2,3), dm[i0:i0+di,j0:j0+dj])
self.assertTrue(numpy.allclose(vj[k0:k0+dk,l0:l0+dl], ref))
ref = numpy.einsum('ijkl,ji->kl', eri.transpose(0,1,3,2), dm[j0:j0+dj,i0:i0+di])
ref-= numpy.einsum('ijkl,ji->kl', eri.transpose(1,0,3,2), dm[i0:i0+di,j0:j0+dj])
self.assertTrue(numpy.allclose(vj[l0:l0+dl,k0:k0+dk], ref))
def test_nra4ij_lk_s1ij(self):
eri, vj = run('CVHFnra4ij_lk_s1ij')
di, dj, dk, dl = eri.shape
ref = numpy.einsum('ijkl,lk->ij', eri, dm[l0:l0+dl,k0:k0+dk])
ref+= numpy.einsum('ijkl,lk->ij', eri.transpose(0,1,3,2), dm[k0:k0+dk,l0:l0+dl])
self.assertTrue(numpy.allclose(vj[i0:i0+di,j0:j0+dj], ref))
ref =-numpy.einsum('ijkl,lk->ij', eri.transpose(1,0,2,3), dm[l0:l0+dl,k0:k0+dk])
ref+=-numpy.einsum('ijkl,lk->ij', eri.transpose(1,0,3,2), dm[k0:k0+dk,l0:l0+dl])
self.assertTrue(numpy.allclose(vj[j0:j0+dj,i0:i0+di], ref))
def test_nra4ij_jk_s1il(self):
eri, vj = run('CVHFnra4ij_jk_s1il')
di, dj, dk, dl = eri.shape
ref = numpy.einsum('ijkl,jk->il', eri, dm[j0:j0+dj,k0:k0+dk])
self.assertTrue(numpy.allclose(vj[i0:i0+di,l0:l0+dl], ref))
ref = numpy.einsum('ijkl,jk->il', eri.transpose(0,1,3,2), dm[j0:j0+dj,l0:l0+dl])
self.assertTrue(numpy.allclose(vj[i0:i0+di,k0:k0+dk], ref))
ref =-numpy.einsum('ijkl,jk->il', eri.transpose(1,0,2,3), dm[i0:i0+di,k0:k0+dk])
self.assertTrue(numpy.allclose(vj[j0:j0+dj,l0:l0+dl], ref))
ref =-numpy.einsum('ijkl,jk->il', eri.transpose(1,0,3,2), dm[i0:i0+di,l0:l0+dl])
self.assertTrue(numpy.allclose(vj[j0:j0+dj,k0:k0+dk], ref))
def test_nra4ij_li_s1kj(self):
eri, vj = run('CVHFnra4ij_li_s1kj')
di, dj, dk, dl = eri.shape
ref = numpy.einsum('ijkl,li->kj', eri, dm[l0:l0+dl,i0:i0+di])
self.assertTrue(numpy.allclose(vj[k0:k0+dk,j0:j0+dj], ref))
ref = numpy.einsum('ijkl,li->kj', eri.transpose(0,1,3,2), dm[k0:k0+dk,i0:i0+di])
self.assertTrue(numpy.allclose(vj[l0:l0+dl,j0:j0+dj], ref))
ref =-numpy.einsum('ijkl,li->kj', eri.transpose(1,0,2,3), dm[l0:l0+dl,j0:j0+dj])
self.assertTrue(numpy.allclose(vj[k0:k0+dk,i0:i0+di], ref))
ref =-numpy.einsum('ijkl,li->kj', eri.transpose(1,0,3,2), dm[k0:k0+dk,j0:j0+dj])
self.assertTrue(numpy.allclose(vj[l0:l0+dl,i0:i0+di], ref))
def test_nra4kl_ji_s1kl(self):
eri, vj = run('CVHFnra4kl_ji_s1kl')
di, dj, dk, dl = eri.shape
ref = numpy.einsum('ijkl,ji->kl', eri, dm[j0:j0+dj,i0:i0+di])
ref+= numpy.einsum('ijkl,ji->kl', eri.transpose(1,0,2,3), dm[i0:i0+di,j0:j0+dj])
self.assertTrue(numpy.allclose(vj[k0:k0+dk,l0:l0+dl], ref))
ref =-numpy.einsum('ijkl,ji->kl', eri.transpose(0,1,3,2), dm[j0:j0+dj,i0:i0+di])
ref+=-numpy.einsum('ijkl,ji->kl', eri.transpose(1,0,3,2), dm[i0:i0+di,j0:j0+dj])
self.assertTrue(numpy.allclose(vj[l0:l0+dl,k0:k0+dk], ref))
def test_nra4kl_lk_s1ij(self):
eri, vj = run('CVHFnra4kl_lk_s1ij')
di, dj, dk, dl = eri.shape
ref = numpy.einsum('ijkl,lk->ij', eri, dm[l0:l0+dl,k0:k0+dk])
ref-= numpy.einsum('ijkl,lk->ij', eri.transpose(0,1,3,2), dm[k0:k0+dk,l0:l0+dl])
self.assertTrue(numpy.allclose(vj[i0:i0+di,j0:j0+dj], ref))
ref = numpy.einsum('ijkl,lk->ij', eri.transpose(1,0,2,3), dm[l0:l0+dl,k0:k0+dk])
ref-= numpy.einsum('ijkl,lk->ij', eri.transpose(1,0,3,2), dm[k0:k0+dk,l0:l0+dl])
self.assertTrue(numpy.allclose(vj[j0:j0+dj,i0:i0+di], ref))
def test_nra4kl_jk_s1il(self):
eri, vj = run('CVHFnra4kl_jk_s1il')
di, dj, dk, dl = eri.shape
ref = numpy.einsum('ijkl,jk->il', eri, dm[j0:j0+dj,k0:k0+dk])
self.assertTrue(numpy.allclose(vj[i0:i0+di,l0:l0+dl], ref))
ref =-numpy.einsum('ijkl,jk->il', eri.transpose(0,1,3,2), dm[j0:j0+dj,l0:l0+dl])
self.assertTrue(numpy.allclose(vj[i0:i0+di,k0:k0+dk], ref))
ref = numpy.einsum('ijkl,jk->il', eri.transpose(1,0,2,3), dm[i0:i0+di,k0:k0+dk])
self.assertTrue(numpy.allclose(vj[j0:j0+dj,l0:l0+dl], ref))
ref =-numpy.einsum('ijkl,jk->il', eri.transpose(1,0,3,2), dm[i0:i0+di,l0:l0+dl])
self.assertTrue(numpy.allclose(vj[j0:j0+dj,k0:k0+dk], ref))
def test_nra4kl_li_s1kj(self):
eri, vj = run('CVHFnra4kl_li_s1kj')
di, dj, dk, dl = eri.shape
ref = numpy.einsum('ijkl,li->kj', eri, dm[l0:l0+dl,i0:i0+di])
self.assertTrue(numpy.allclose(vj[k0:k0+dk,j0:j0+dj], ref))
ref =-numpy.einsum('ijkl,li->kj', eri.transpose(0,1,3,2), dm[k0:k0+dk,i0:i0+di])
self.assertTrue(numpy.allclose(vj[l0:l0+dl,j0:j0+dj], ref))
ref = numpy.einsum('ijkl,li->kj', eri.transpose(1,0,2,3), dm[l0:l0+dl,j0:j0+dj])
self.assertTrue(numpy.allclose(vj[k0:k0+dk,i0:i0+di], ref))
ref =-numpy.einsum('ijkl,li->kj', eri.transpose(1,0,3,2), dm[k0:k0+dk,j0:j0+dj])
self.assertTrue(numpy.allclose(vj[l0:l0+dl,i0:i0+di], ref))
if __name__ == '__main__':
print('Full Tests for nrdot')
unittest.main()
| [
"[email protected]"
] | |
da1b593cc79b59596b9f76e1fa5c64cf3aed4938 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnmaccabe.py | b1fe5f5d0c2913389aa178ed89bcd9231758a8f9 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 42 | py | ii = [('CoolWHM.py', 1), ('TaylIF.py', 5)] | [
"[email protected]"
] | |
e06d7832c60e9bb8324a477b100c0e87fb4cfe26 | c3274a346ddcf09c9ec70e1402daa34ad0ac44af | /examples/dataframe/dataframe_sum.py | f8171d3e92e95fabd061003fe34051a41b89a867 | [
"BSD-2-Clause"
] | permissive | vishalbelsare/hpat | cb6b39ddeb07c319c88e132df9cee4c6adb0a415 | eb5efbad9bfec67db88b52474c4bd00238b61283 | refs/heads/master | 2023-04-06T14:59:35.723023 | 2023-03-10T16:44:51 | 2023-03-10T16:44:51 | 161,796,133 | 0 | 0 | BSD-2-Clause | 2023-03-19T09:18:25 | 2018-12-14T14:35:55 | Python | UTF-8 | Python | false | false | 1,882 | py | # *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
"""
Expected result:
A 1.0
B 10.0
C inf
dtype: float64
"""
import pandas as pd
import numpy as np
from numba import njit
@njit
def dataframe_sum():
df = pd.DataFrame({"A": [.2, .0, .6, .2],
"B": [2, 0, 6, 2],
"C": [-1, np.nan, 1, np.inf]})
return df.sum()
print(dataframe_sum())
| [
"[email protected]"
] | |
d6c37570c50bc270f95d17f5efe600430681f611 | 481517a085014aefba963d29ff52b56bef6a393e | /ha.py | ad5da57e38e51a032ce98c31194269c8fb7d6e24 | [] | no_license | 27Saidou/cours_python | 6d916fe63652e0463bd995dbb9a3ec72c74f4c3d | 91820b826ced24bed98525429096e32ff4c036db | refs/heads/main | 2022-01-09T09:58:32.514032 | 2022-01-04T18:37:56 | 2022-01-04T18:37:56 | 214,328,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | import pandas as pd
calories = {"day1": 420, "day2": 380, "day3": 390}
myvar = pd.Series(calories)
print(calories) | [
"[email protected]"
] | |
2d221ec37b42ee7d6d78140f67e53a4798e29806 | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/appconfiguration/v20200701preview/list_configuration_store_keys.py | 1c5a925503a75002b4536769dfcd2cc6de923821 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 3,284 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'ListConfigurationStoreKeysResult',
'AwaitableListConfigurationStoreKeysResult',
'list_configuration_store_keys',
]
@pulumi.output_type
class ListConfigurationStoreKeysResult:
"""
The result of a request to list API keys.
"""
def __init__(__self__, next_link=None, value=None):
if next_link and not isinstance(next_link, str):
raise TypeError("Expected argument 'next_link' to be a str")
pulumi.set(__self__, "next_link", next_link)
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="nextLink")
def next_link(self) -> Optional[str]:
"""
The URI that can be used to request the next set of paged results.
"""
return pulumi.get(self, "next_link")
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.ApiKeyResponseResult']]:
"""
The collection value.
"""
return pulumi.get(self, "value")
class AwaitableListConfigurationStoreKeysResult(ListConfigurationStoreKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListConfigurationStoreKeysResult(
next_link=self.next_link,
value=self.value)
def list_configuration_store_keys(config_store_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
skip_token: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListConfigurationStoreKeysResult:
"""
Use this data source to access information about an existing resource.
:param str config_store_name: The name of the configuration store.
:param str resource_group_name: The name of the resource group to which the container registry belongs.
:param str skip_token: A skip token is used to continue retrieving items after an operation returns a partial result. If a previous response contains a nextLink element, the value of the nextLink element will include a skipToken parameter that specifies a starting point to use for subsequent calls.
"""
__args__ = dict()
__args__['configStoreName'] = config_store_name
__args__['resourceGroupName'] = resource_group_name
__args__['skipToken'] = skip_token
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:appconfiguration/v20200701preview:listConfigurationStoreKeys', __args__, opts=opts, typ=ListConfigurationStoreKeysResult).value
return AwaitableListConfigurationStoreKeysResult(
next_link=__ret__.next_link,
value=__ret__.value)
| [
"[email protected]"
] | |
670ee8f2f3d080e83ece3e6e319c7b2d5c3ec218 | d8dfe2bb29965f2bf00724caaa4d5f3f02715002 | /crater/operations/expand_dims.py | c5c91efc7cd1005456f0b37dce2f1d1912a19e56 | [] | no_license | malyvsen/kth-deep-learning | 20fc0d89c0b81ea97af77b627f0ee46458310126 | 17b3140043aaa81cf86a6a9b7fed3295ee48b061 | refs/heads/main | 2023-05-05T10:02:29.764591 | 2021-05-13T08:35:25 | 2021-05-13T08:35:25 | 353,112,929 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | from typing import Union, Tuple
import numpy as np
from crater.tensor import Tensor
from crater.gradient import Gradients, Gradient
from crater.utils import tuplify
from .coalesce import coalesce
def expand_dims(tensor: Tensor, axes: Union[None, int, Tuple[int]] = None):
tensor = coalesce(tensor)
axes = () if axes is None else tuplify(axes)
return Tensor.from_numpy(
data=np.expand_dims(tensor.data, axes),
backward=lambda gradient: Gradients.accumulate(
Gradient(tensor=tensor, gradient=np.squeeze(gradient, axes))
),
)
Tensor.expand_dims = expand_dims
| [
"[email protected]"
] | |
104f75c625a5c419f721c085bd4d90f8ac2b482c | b7f3edb5b7c62174bed808079c3b21fb9ea51d52 | /third_party/blink/tools/blinkpy/w3c/monorail_unittest.py | 14a22ac6fb4b1feb36e0116e4dc30575cf05e1d7 | [
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"BSD-3-Clause"
] | permissive | otcshare/chromium-src | 26a7372773b53b236784c51677c566dc0ad839e4 | 64bee65c921db7e78e25d08f1e98da2668b57be5 | refs/heads/webml | 2023-03-21T03:20:15.377034 | 2020-11-16T01:40:14 | 2020-11-16T01:40:14 | 209,262,645 | 18 | 21 | BSD-3-Clause | 2023-03-23T06:20:07 | 2019-09-18T08:52:07 | null | UTF-8 | Python | false | false | 4,252 | py | # -*- coding: utf-8 -*-
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from blinkpy.w3c.monorail import MonorailAPI, MonorailIssue
class MonorailIssueTest(unittest.TestCase):
def test_init_succeeds(self):
# Minimum example.
MonorailIssue('chromium', summary='test', status='Untriaged')
# All fields.
MonorailIssue(
'chromium',
summary='test',
status='Untriaged',
description='body',
cc=['[email protected]'],
labels=['Flaky'],
components=['Infra'])
def test_init_fills_project_id(self):
issue = MonorailIssue('chromium', summary='test', status='Untriaged')
self.assertEqual(issue.body['projectId'], 'chromium')
def test_unicode(self):
issue = MonorailIssue(
'chromium',
summary=u'test',
status='Untriaged',
description=u'ABC~‾¥≈¤・・•∙·☼★星🌟星★☼·∙•・・¤≈¥‾~XYZ',
cc=['[email protected]', '[email protected]'],
labels=['Flaky'],
components=['Infra'])
self.assertEqual(type(unicode(issue)), unicode)
self.assertEqual(
unicode(issue),
(u'Monorail issue in project chromium\n'
u'Summary: test\n'
u'Status: Untriaged\n'
u'CC: [email protected], [email protected]\n'
u'Components: Infra\n'
u'Labels: Flaky\n'
u'Description:\nABC~‾¥≈¤・・•∙·☼★星🌟星★☼·∙•・・¤≈¥‾~XYZ\n'))
def test_init_unknown_fields(self):
with self.assertRaises(AssertionError):
MonorailIssue('chromium', component='foo')
def test_init_missing_required_fields(self):
with self.assertRaises(AssertionError):
MonorailIssue('', summary='test', status='Untriaged')
with self.assertRaises(AssertionError):
MonorailIssue('chromium', summary='', status='Untriaged')
with self.assertRaises(AssertionError):
MonorailIssue('chromium', summary='test', status='')
def test_init_unknown_status(self):
with self.assertRaises(AssertionError):
MonorailIssue('chromium', summary='test', status='unknown')
def test_init_string_passed_for_list_fields(self):
with self.assertRaises(AssertionError):
MonorailIssue(
'chromium',
summary='test',
status='Untriaged',
cc='[email protected]')
with self.assertRaises(AssertionError):
MonorailIssue(
'chromium',
summary='test',
status='Untriaged',
components='Infra')
with self.assertRaises(AssertionError):
MonorailIssue(
'chromium', summary='test', status='Untriaged', labels='Flaky')
def test_new_chromium_issue(self):
issue = MonorailIssue.new_chromium_issue(
'test',
description='body',
cc=['[email protected]'],
components=['Infra'])
self.assertEqual(issue.project_id, 'chromium')
self.assertEqual(issue.body['summary'], 'test')
self.assertEqual(issue.body['description'], 'body')
self.assertEqual(issue.body['cc'], ['[email protected]'])
self.assertEqual(issue.body['components'], ['Infra'])
def test_crbug_link(self):
self.assertEqual(
MonorailIssue.crbug_link(12345), 'https://crbug.com/12345')
class MonorailAPITest(unittest.TestCase):
def test_fix_cc_field_in_body(self):
original_body = {
'summary': 'test bug',
'cc': ['[email protected]', '[email protected]']
}
# pylint: disable=protected-access
self.assertEqual(
MonorailAPI._fix_cc_in_body(original_body), {
'summary': 'test bug',
'cc': [{
'name': '[email protected]'
}, {
'name': '[email protected]'
}]
})
| [
"[email protected]"
] | |
420a38deaafbfe305203da3b4483510a880f60ab | 98bd2625dbcc955deb007a07129cce8b9edb3c79 | /simulate_barseq_tnseq.py | 85aaae7b4ea0ea4073de7d774c6a359a4df634fe | [] | no_license | melanieabrams/bremdata | 70d0a374ab5dff32f6d9bbe0a3959a617a90ffa8 | df7a12c72a29cca4760333445fafe55bb6e40247 | refs/heads/master | 2021-12-26T01:57:25.684288 | 2021-09-30T22:48:05 | 2021-09-30T22:48:05 | 166,273,567 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,755 | py | import regex
import numpy as np
import sys
import subprocess as sp
import random
# HELP #
if len(sys.argv) == 1:
print("USAGE: python3 simulate_barseq_tnseq out_directory fastq_file1 fastq_file2...")
exit()
# INPUT #
num_orig = 20 #number of unmodified reads to preserve. This will make sure my modified version for barseq of map-and-blat can still filter out reads w/o Tn.
num_new = 1000 # number of new reads
num_duplicate = 100 #number of new reads with duplicate barcodes
bc_length = 20 # number of nt of bc
genome_nt = 50 #number of nt of genome in simulated read
# BEGIN FUNCTIONS #
def generate_bc(length=20):
'''returns a random barcode of specified length'''
random_bc = ''
nucleotides = ['A','T','G','C']
for i in range(length):
random_bc+=random.choice(nucleotides)
return random_bc
def generate_read(genome_seq,barcode ='random'):
'''returns a simulated barseq read with P5 and P7 adaptors (and Rd1 and Rd2 universal sequence primer) and a chunk of genome'''
flanking_bc_left = 'AATGATACGGCGACCACCGAGATCTACACTCTTTCCCTACACGACGCTCTTCCGATCTNNNNNNAGTATGTAACCCTGATGTCCACGAGGTCTCT'
if barcode == 'random':
barcode= generate_bc()
flanking_bc_right = 'CGTACGCTGCAGGTCGACAACGTAAAACACATGCGTCAATTTTACGCATGATTATCTTTAACGTACGTCACAATATGATTATCTTTCTAGGGTTAA'
after_genomic = 'AGATCGGAAGAGCACACGTCTGAACTCCAGTCACATCACGATCTCGTATGCCGTCTTC'
read = flanking_bc_left + barcode + flanking_bc_right + genome_seq + after_genomic
return read
def AddN20(fastq_file): ## add a random N20 plus primers to reads with transposon, so that normal Tn-Seq data looks like it was made with a barcode
wf = open(out_dir+fastq_filename+'_simulatedn20','w') # outfile for the fake-barcoded reads that will be mapped
line_count = 0
tn_count = 0
with open(fastq_file) as f:
head = [next(f) for x in range(4*(num_orig+num_new))]
for line in head:
line_count +=1
if line_count % 4 == 1:
header = line
elif line_count % 4 == 2:
read = line.strip()
elif line_count % 4 == 0:
nt_from_read =read[75:75+genome_nt]
if line_count >4*num_orig:
if line_count>4*(num_new-num_duplicate):
read = generate_read(nt_from_read,barcode='random')
else:
read = generate_read(nt_from_read,barcode='TATTGGAAAACTATAGGGAC')
wf.writelines(">simulatedBarSeq"+header)
wf.writelines(read+"\n")
#### START PROGRAM ####
out_dir = sys.argv[1]
read_files = sys.argv[2:]
for read_file in read_files:
fastq_filename = read_file.split("/")[-1]
AddN20(read_file)
| [
"[email protected]"
] | |
da29fd239650f5e1ed7b3fbb80213b271705d874 | edde333afca3ca4977bec7b38271d8c9e8448d85 | /mirage/projectstartup/django_app_create.py | 377edc7000d92c5c2d4e56add53610298c23d128 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | TrendingTechnology/mirage-django-lts | 0219450155d9ce122196b66045de2bee13fa6bfd | b9d74006c1b64f5f5b33049b5a1701de58b478b3 | refs/heads/master | 2023-05-29T20:25:45.865865 | 2021-06-16T01:20:23 | 2021-06-16T01:20:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,119 | py | # -*- coding: utf-8 -*-
"""
Copyright 2017-2020 Shota Shimazu.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from mirage import proj
from mirage.flow import Workflow
from mirage import system as mys
from mirage.template import readme_md, gitignore
from mirage.miragefile import source
class StartupWorkFlow(Workflow):
def constructor(self):
self._js_runtime = self._option
def main(self):
# Check
try:
self._check_before()
except:
return
# Input information
mys.log("Please type your new Django application information.")
# Check namespace
try:
self._project_name = mys.log("Project name", withInput = True)
self._check_namesapce(self._project_name)
except:
mys.log("Project \"{0}\" is already exists.".format(self._project_name), withError = True,
errorDetail = "Please remove duplication of Django project namespace.")
return
version = mys.log("App version", withInput = True, default = "0.0.1")
author = mys.log("Author name", withInput = True)
email = mys.log("Email", withInput = True)
git_url = mys.log("Git URL", withInput = True)
license_name = mys.log("License", withInput = True)
description = mys.log("Description", withInput = True)
copyrightor = mys.log("Copyrightor", withInput = True, default = author)
self._create_new_django_app()
# Create logging instance
logger = mys.Progress()
with proj.InDir("./" + self._project_name):
# Generate .gitignore
#log("Generating gitignore...")
logger.write("Generating gitignore...", withLazy = True)
self._create_template_git_project()
# Generate README.md
logger.update("Generating readme...", withLazy = True)
self._create_docs(description)
# Generate Miragefile
logger.update("Generating Miragefile...", withLazy = True)
self._create_miragefile(version, author, email, git_url, license_name, description, copyrightor)
# Add remote repo
logger.update("Adding remote repository...", withLazy = True)
mys.command("git remote add origin " + git_url)
# Completed
logger.update("Completed!")
def _create_new_django_app(self):
mys.command("django-admin startproject " + self._project_name)
def _create_miragefile(self, version, author, email, git_url, license_name, description, copyrightors):
with open("Miragefile", "w") as f:
f.write(source.create(self._project_name, version, author, email, git_url, license_name, description, copyrightors))
def _create_template_git_project(self):
ignorance = gitignore.src()
with open(".gitignore", "w") as f:
f.write(ignorance)
mys.command("git init")
def _create_docs(self, description):
with open("README.md", "a") as readme:
readme.write(readme_md.src(self._project_name, description))
def _check_before(self):
try:
import django
except ImportError:
mys.log("Failed to import Django!", withError = True,
errorDetail = "You have to install Django before creating a new Django project.")
raise ImportError
def _check_namesapce(self, name):
if os.path.exists(name):
raise FileExistsError
| [
"[email protected]"
] | |
4dedc840e56c94ed1dd1857f53ca4926ff01e49f | 8997a0bf1e3b6efe5dd9d5f307e1459f15501f5a | /zelle_graphics/hello_world.py | 7b041997c7b74d4bae882c7bb8f6ac20efcc7645 | [
"CC-BY-4.0"
] | permissive | stepik/SimplePyScripts | 01092eb1b2c1c33756427abb2debbd0c0abf533f | 3259d88cb58b650549080d6f63b15910ae7e4779 | refs/heads/master | 2023-05-15T17:35:55.743164 | 2021-06-11T22:59:07 | 2021-06-11T22:59:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from graphics import *
win = GraphWin("My Circle", 100, 100)
c = Circle(Point(50, 50), 10)
c.draw(win)
win.getMouse() # Pause to view result
win.close()
| [
"[email protected]"
] | |
182d05ded57370c6cfa6cbc2097c846975a841d1 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/web/v20200901/list_web_app_function_secrets_slot.py | d97937e5b7161052acdff721dd2dc7821a150932 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 2,925 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'ListWebAppFunctionSecretsSlotResult',
'AwaitableListWebAppFunctionSecretsSlotResult',
'list_web_app_function_secrets_slot',
]
@pulumi.output_type
class ListWebAppFunctionSecretsSlotResult:
"""
Function secrets.
"""
def __init__(__self__, key=None, trigger_url=None):
if key and not isinstance(key, str):
raise TypeError("Expected argument 'key' to be a str")
pulumi.set(__self__, "key", key)
if trigger_url and not isinstance(trigger_url, str):
raise TypeError("Expected argument 'trigger_url' to be a str")
pulumi.set(__self__, "trigger_url", trigger_url)
@property
@pulumi.getter
def key(self) -> Optional[str]:
"""
Secret key.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter(name="triggerUrl")
def trigger_url(self) -> Optional[str]:
"""
Trigger URL.
"""
return pulumi.get(self, "trigger_url")
class AwaitableListWebAppFunctionSecretsSlotResult(ListWebAppFunctionSecretsSlotResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListWebAppFunctionSecretsSlotResult(
key=self.key,
trigger_url=self.trigger_url)
def list_web_app_function_secrets_slot(function_name: Optional[str] = None,
name: Optional[str] = None,
resource_group_name: Optional[str] = None,
slot: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListWebAppFunctionSecretsSlotResult:
"""
Function secrets.
:param str function_name: Function name.
:param str name: Site name.
:param str resource_group_name: Name of the resource group to which the resource belongs.
:param str slot: Name of the deployment slot.
"""
__args__ = dict()
__args__['functionName'] = function_name
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['slot'] = slot
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:web/v20200901:listWebAppFunctionSecretsSlot', __args__, opts=opts, typ=ListWebAppFunctionSecretsSlotResult).value
return AwaitableListWebAppFunctionSecretsSlotResult(
key=__ret__.key,
trigger_url=__ret__.trigger_url)
| [
"[email protected]"
] | |
8daeb7d575a3677e6cbd190d314c2986273f7bc5 | 6bd51065a8ecd097e7f80ee3c6acd16a083be350 | /tensorflow/contrib/framework/__init__.py | 8421ba7c0423c6ed274f92ba74930822d0171e05 | [
"Apache-2.0"
] | permissive | cglewis/tensorflow | 29b50dadbdb599bacd06af960689bc518a472de1 | 6eac524ef63728bdc10c40f95d30c94aede5f4ea | refs/heads/master | 2023-04-07T18:38:29.752739 | 2017-10-31T17:56:48 | 2017-10-31T17:56:48 | 109,033,012 | 0 | 0 | Apache-2.0 | 2023-04-04T00:37:48 | 2017-10-31T17:54:48 | C++ | UTF-8 | Python | false | false | 2,622 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Framework utilities.
See the @{$python/contrib.framework} guide.
@@assert_same_float_dtype
@@assert_scalar
@@assert_scalar_int
@@convert_to_tensor_or_sparse_tensor
@@get_graph_from_inputs
@@is_numeric_tensor
@@is_non_decreasing
@@is_strictly_increasing
@@is_tensor
@@reduce_sum_n
@@remove_squeezable_dimensions
@@with_shape
@@with_same_shape
@@deprecated
@@deprecated_args
@@deprecated_arg_values
@@arg_scope
@@add_arg_scope
@@current_arg_scope
@@has_arg_scope
@@arg_scoped_arguments
@@prepend_name_scope
@@strip_name_scope
@@add_model_variable
@@assert_global_step
@@assert_or_get_global_step
@@assign_from_checkpoint
@@assign_from_checkpoint_fn
@@assign_from_values
@@assign_from_values_fn
@@create_global_step
@@filter_variables
@@get_global_step
@@get_or_create_global_step
@@get_local_variables
@@get_model_variables
@@get_name_scope
@@get_trainable_variables
@@get_unique_variable
@@get_variables_by_name
@@get_variables_by_suffix
@@get_variable_full_name
@@get_variables_to_restore
@@get_variables
@@local_variable
@@model_variable
@@variable
@@VariableDeviceChooser
@@zero_initializer
@@load_checkpoint
@@list_variables
@@load_variable
@@init_from_checkpoint
@@load_and_remap_matrix_initializer
@@load_embedding_initializer
@@load_linear_multiclass_bias_initializer
@@load_variable_slot_initializer
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.framework.python.framework import *
from tensorflow.contrib.framework.python.ops import *
# pylint: enable=unused-import,wildcard-import
from tensorflow.python.framework.ops import prepend_name_scope
from tensorflow.python.framework.ops import strip_name_scope
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['nest']
remove_undocumented(__name__, allowed_exception_list=_allowed_symbols)
| [
"[email protected]"
] | |
bcc8d9ab29c19e61a07d83c51e7492b792ffa9a4 | 689a78e08c957abc02ea5f89fb657b1f78f88b6e | /det3d/core/sampler/sample_ops.py | d50746001a249a608396047a47a23836b94e4c36 | [
"MIT",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | tianweiy/CenterPoint | 2bb9a7def8d4bf87b66af2e3b671736eae6fa275 | d3a248fa56db2601860d576d5934d00fee9916eb | refs/heads/master | 2023-08-30T23:11:49.528882 | 2022-10-24T13:09:52 | 2022-10-24T13:09:52 | 274,006,091 | 1,692 | 476 | MIT | 2023-05-06T10:30:06 | 2020-06-22T00:32:05 | Python | UTF-8 | Python | false | false | 15,185 | py | import copy
import pathlib
import pickle
import time
from functools import partial, reduce
import numpy as np
from det3d.core.bbox import box_np_ops
from det3d.core.sampler import preprocess as prep
from det3d.utils.check import shape_mergeable
class DataBaseSamplerV2:
def __init__(
self,
db_infos,
groups,
db_prepor=None,
rate=1.0,
global_rot_range=None,
logger=None,
):
for k, v in db_infos.items():
logger.info(f"load {len(v)} {k} database infos")
if db_prepor is not None:
db_infos = db_prepor(db_infos)
logger.info("After filter database:")
for k, v in db_infos.items():
logger.info(f"load {len(v)} {k} database infos")
self.db_infos = db_infos
self._rate = rate
self._groups = groups
self._group_db_infos = {}
self._group_name_to_names = []
self._sample_classes = []
self._sample_max_nums = []
self._use_group_sampling = False # slower
if any([len(g) > 1 for g in groups]):
self._use_group_sampling = True
if not self._use_group_sampling:
self._group_db_infos = self.db_infos # just use db_infos
for group_info in groups:
group_names = list(group_info.keys())
self._sample_classes += group_names
self._sample_max_nums += list(group_info.values())
else:
for group_info in groups:
group_dict = {}
group_names = list(group_info.keys())
group_name = ", ".join(group_names)
self._sample_classes += group_names
self._sample_max_nums += list(group_info.values())
self._group_name_to_names.append((group_name, group_names))
# self._group_name_to_names[group_name] = group_names
for name in group_names:
for item in db_infos[name]:
gid = item["group_id"]
if gid not in group_dict:
group_dict[gid] = [item]
else:
group_dict[gid] += [item]
if group_name in self._group_db_infos:
raise ValueError("group must be unique")
group_data = list(group_dict.values())
self._group_db_infos[group_name] = group_data
info_dict = {}
if len(group_info) > 1:
for group in group_data:
names = [item["name"] for item in group]
names = sorted(names)
group_name = ", ".join(names)
if group_name in info_dict:
info_dict[group_name] += 1
else:
info_dict[group_name] = 1
print(info_dict)
self._sampler_dict = {}
for k, v in self._group_db_infos.items():
self._sampler_dict[k] = prep.BatchSampler(v, k)
self._enable_global_rot = False
if global_rot_range is not None:
if not isinstance(global_rot_range, (list, tuple, np.ndarray)):
global_rot_range = [-global_rot_range, global_rot_range]
else:
assert shape_mergeable(global_rot_range, [2])
if np.abs(global_rot_range[0] - global_rot_range[1]) >= 1e-3:
self._enable_global_rot = True
self._global_rot_range = global_rot_range
@property
def use_group_sampling(self):
return self._use_group_sampling
def sample_all(
self,
root_path,
gt_boxes,
gt_names,
num_point_features,
random_crop=False,
gt_group_ids=None,
calib=None,
road_planes=None,
):
sampled_num_dict = {}
sample_num_per_class = []
for class_name, max_sample_num in zip(
self._sample_classes, self._sample_max_nums
):
sampled_num = int(
max_sample_num - np.sum([n == class_name for n in gt_names])
)
sampled_num = np.round(self._rate * sampled_num).astype(np.int64)
sampled_num_dict[class_name] = sampled_num
sample_num_per_class.append(sampled_num)
sampled_groups = self._sample_classes
if self._use_group_sampling:
assert gt_group_ids is not None
sampled_groups = []
sample_num_per_class = []
for group_name, class_names in self._group_name_to_names:
sampled_nums_group = [sampled_num_dict[n] for n in class_names]
sampled_num = np.max(sampled_nums_group)
sample_num_per_class.append(sampled_num)
sampled_groups.append(group_name)
total_group_ids = gt_group_ids
sampled = []
sampled_gt_boxes = []
avoid_coll_boxes = gt_boxes
for class_name, sampled_num in zip(sampled_groups, sample_num_per_class):
if sampled_num > 0:
if self._use_group_sampling:
sampled_cls = self.sample_group(
class_name, sampled_num, avoid_coll_boxes, total_group_ids
)
else:
sampled_cls = self.sample_class_v2(
class_name, sampled_num, avoid_coll_boxes
)
sampled += sampled_cls
if len(sampled_cls) > 0:
if len(sampled_cls) == 1:
sampled_gt_box = sampled_cls[0]["box3d_lidar"][np.newaxis, ...]
else:
sampled_gt_box = np.stack(
[s["box3d_lidar"] for s in sampled_cls], axis=0
)
sampled_gt_boxes += [sampled_gt_box]
avoid_coll_boxes = np.concatenate(
[avoid_coll_boxes, sampled_gt_box], axis=0
)
if self._use_group_sampling:
if len(sampled_cls) == 1:
sampled_group_ids = np.array(sampled_cls[0]["group_id"])[
np.newaxis, ...
]
else:
sampled_group_ids = np.stack(
[s["group_id"] for s in sampled_cls], axis=0
)
total_group_ids = np.concatenate(
[total_group_ids, sampled_group_ids], axis=0
)
if len(sampled) > 0:
sampled_gt_boxes = np.concatenate(sampled_gt_boxes, axis=0)
num_sampled = len(sampled)
s_points_list = []
for info in sampled:
try:
s_points = np.fromfile(
str(pathlib.Path(root_path) / info["path"]), dtype=np.float32
).reshape(-1, num_point_features)
if "rot_transform" in info:
rot = info["rot_transform"]
s_points[:, :3] = box_np_ops.rotation_points_single_angle(
s_points[:, :4], rot, axis=2
)
s_points[:, :3] += info["box3d_lidar"][:3]
s_points_list.append(s_points)
# print(pathlib.Path(info["path"]).stem)
except Exception:
print(str(pathlib.Path(root_path) / info["path"]))
continue
if random_crop:
s_points_list_new = []
assert calib is not None
rect = calib["rect"]
Trv2c = calib["Trv2c"]
P2 = calib["P2"]
gt_bboxes = box_np_ops.box3d_to_bbox(sampled_gt_boxes, rect, Trv2c, P2)
crop_frustums = prep.random_crop_frustum(gt_bboxes, rect, Trv2c, P2)
for i in range(crop_frustums.shape[0]):
s_points = s_points_list[i]
mask = prep.mask_points_in_corners(
s_points, crop_frustums[i : i + 1]
).reshape(-1)
num_remove = np.sum(mask)
if num_remove > 0 and (s_points.shape[0] - num_remove) > 15:
s_points = s_points[np.logical_not(mask)]
s_points_list_new.append(s_points)
s_points_list = s_points_list_new
ret = {
"gt_names": np.array([s["name"] for s in sampled]),
"difficulty": np.array([s["difficulty"] for s in sampled]),
"gt_boxes": sampled_gt_boxes,
"points": np.concatenate(s_points_list, axis=0),
"gt_masks": np.ones((num_sampled,), dtype=np.bool_),
}
if self._use_group_sampling:
ret["group_ids"] = np.array([s["group_id"] for s in sampled])
else:
ret["group_ids"] = np.arange(
gt_boxes.shape[0], gt_boxes.shape[0] + len(sampled)
)
else:
ret = None
return ret
def sample(self, name, num):
if self._use_group_sampling:
group_name = name
ret = self._sampler_dict[group_name].sample(num)
groups_num = [len(l) for l in ret]
return reduce(lambda x, y: x + y, ret), groups_num
else:
ret = self._sampler_dict[name].sample(num)
return ret, np.ones((len(ret),), dtype=np.int64)
def sample_v1(self, name, num):
if isinstance(name, (list, tuple)):
group_name = ", ".join(name)
ret = self._sampler_dict[group_name].sample(num)
groups_num = [len(l) for l in ret]
return reduce(lambda x, y: x + y, ret), groups_num
else:
ret = self._sampler_dict[name].sample(num)
return ret, np.ones((len(ret),), dtype=np.int64)
def sample_class_v2(self, name, num, gt_boxes):
sampled = self._sampler_dict[name].sample(num)
sampled = copy.deepcopy(sampled)
num_gt = gt_boxes.shape[0]
num_sampled = len(sampled)
gt_boxes_bv = box_np_ops.center_to_corner_box2d(
gt_boxes[:, 0:2], gt_boxes[:, 3:5], gt_boxes[:, -1]
)
sp_boxes = np.stack([i["box3d_lidar"] for i in sampled], axis=0)
valid_mask = np.zeros([gt_boxes.shape[0]], dtype=np.bool_)
valid_mask = np.concatenate(
[valid_mask, np.ones([sp_boxes.shape[0]], dtype=np.bool_)], axis=0
)
boxes = np.concatenate([gt_boxes, sp_boxes], axis=0).copy()
if self._enable_global_rot:
# place samples to any place in a circle.
prep.noise_per_object_v3_(
boxes, None, valid_mask, 0, 0, self._global_rot_range, num_try=100
)
sp_boxes_new = boxes[gt_boxes.shape[0] :]
sp_boxes_bv = box_np_ops.center_to_corner_box2d(
sp_boxes_new[:, 0:2], sp_boxes_new[:, 3:5], sp_boxes_new[:, -1]
)
total_bv = np.concatenate([gt_boxes_bv, sp_boxes_bv], axis=0)
# coll_mat = collision_test_allbox(total_bv)
coll_mat = prep.box_collision_test(total_bv, total_bv)
diag = np.arange(total_bv.shape[0])
coll_mat[diag, diag] = False
valid_samples = []
for i in range(num_gt, num_gt + num_sampled):
if coll_mat[i].any():
coll_mat[i] = False
coll_mat[:, i] = False
else:
if self._enable_global_rot:
sampled[i - num_gt]["box3d_lidar"][:2] = boxes[i, :2]
sampled[i - num_gt]["box3d_lidar"][-1] = boxes[i, -1]
sampled[i - num_gt]["rot_transform"] = (
boxes[i, -1] - sp_boxes[i - num_gt, -1]
)
valid_samples.append(sampled[i - num_gt])
return valid_samples
def sample_group(self, name, num, gt_boxes, gt_group_ids):
sampled, group_num = self.sample(name, num)
sampled = copy.deepcopy(sampled)
# rewrite sampled group id to avoid duplicated with gt group ids
gid_map = {}
max_gt_gid = np.max(gt_group_ids)
sampled_gid = max_gt_gid + 1
for s in sampled:
gid = s["group_id"]
if gid in gid_map:
s["group_id"] = gid_map[gid]
else:
gid_map[gid] = sampled_gid
s["group_id"] = sampled_gid
sampled_gid += 1
num_gt = gt_boxes.shape[0]
gt_boxes_bv = box_np_ops.center_to_corner_box2d(
gt_boxes[:, 0:2], gt_boxes[:, 3:5], gt_boxes[:, -1]
)
sp_boxes = np.stack([i["box3d_lidar"] for i in sampled], axis=0)
sp_group_ids = np.stack([i["group_id"] for i in sampled], axis=0)
valid_mask = np.zeros([gt_boxes.shape[0]], dtype=np.bool_)
valid_mask = np.concatenate(
[valid_mask, np.ones([sp_boxes.shape[0]], dtype=np.bool_)], axis=0
)
boxes = np.concatenate([gt_boxes, sp_boxes], axis=0).copy()
group_ids = np.concatenate([gt_group_ids, sp_group_ids], axis=0)
if self._enable_global_rot:
# place samples to any place in a circle.
prep.noise_per_object_v3_(
boxes,
None,
valid_mask,
0,
0,
self._global_rot_range,
group_ids=group_ids,
num_try=100,
)
sp_boxes_new = boxes[gt_boxes.shape[0] :]
sp_boxes_bv = box_np_ops.center_to_corner_box2d(
sp_boxes_new[:, 0:2], sp_boxes_new[:, 3:5], sp_boxes_new[:, -1]
)
total_bv = np.concatenate([gt_boxes_bv, sp_boxes_bv], axis=0)
# coll_mat = collision_test_allbox(total_bv)
coll_mat = prep.box_collision_test(total_bv, total_bv)
diag = np.arange(total_bv.shape[0])
coll_mat[diag, diag] = False
valid_samples = []
idx = num_gt
for num in group_num:
if coll_mat[idx : idx + num].any():
coll_mat[idx : idx + num] = False
coll_mat[:, idx : idx + num] = False
else:
for i in range(num):
if self._enable_global_rot:
sampled[idx - num_gt + i]["box3d_lidar"][:2] = boxes[
idx + i, :2
]
sampled[idx - num_gt + i]["box3d_lidar"][-1] = boxes[
idx + i, -1
]
sampled[idx - num_gt + i]["rot_transform"] = (
boxes[idx + i, -1] - sp_boxes[idx + i - num_gt, -1]
)
valid_samples.append(sampled[idx - num_gt + i])
idx += num
return valid_samples
| [
"[email protected]"
] | |
4bb739f59e43b16c125bc8bb2a99540f52ebf7a0 | 58e4c3e1302a97e781b5657764fdde3e8dd48708 | /no_if_required.py | 0644af70c34260fd2a4fc9ef39a1b2891b776aa4 | [] | no_license | bgroveben/coursera_LTP_TF | 05ebf73991f73a98360ffbde685f24f6c68d3968 | f96bd2d19316713b496979df63d5ebec2161c722 | refs/heads/master | 2020-02-26T15:40:04.082856 | 2017-03-01T13:52:01 | 2017-03-01T13:52:01 | 70,072,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | # Booleans are your friend.
# Why do this:
def is_even(num):
""" (int) -> bool
Return whether number is even.
>>> is_even(1)
False
>>> is_even(2)
True
"""
if num % 2 == 0:
return True
else:
return False
print(is_even(1))
print(is_even(2))
# When you can do this:
def is_even_bool(num):
""" (int) -> bool
Return whether number is even.
>>> is_even_bool(1)
False
>>> is_even_bool(2)
True
"""
return num % 2 == 0
print(is_even_bool(1))
print(is_even_bool(2))
| [
"[email protected]"
] | |
d780518c2b1715e81659974794ceffcf2cb2fbbb | b144c5142226de4e6254e0044a1ca0fcd4c8bbc6 | /ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/spbtopologylist.py | b35e5c816b1e216f2b6b410bddb5b295d4ea2ce0 | [
"MIT"
] | permissive | iwanb/ixnetwork_restpy | fa8b885ea7a4179048ef2636c37ef7d3f6692e31 | c2cb68fee9f2cc2f86660760e9e07bd06c0013c2 | refs/heads/master | 2021-01-02T17:27:37.096268 | 2020-02-11T09:28:15 | 2020-02-11T09:28:15 | 239,721,780 | 0 | 0 | NOASSERTION | 2020-02-11T09:20:22 | 2020-02-11T09:20:21 | null | UTF-8 | Python | false | false | 8,549 | py | # MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class SpbTopologyList(Base):
"""ISIS SPB Topology Range Configuration
The SpbTopologyList class encapsulates a required spbTopologyList resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'spbTopologyList'
def __init__(self, parent):
super(SpbTopologyList, self).__init__(parent)
@property
def BaseVidList(self):
"""An instance of the BaseVidList class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.basevidlist.BaseVidList)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.basevidlist import BaseVidList
return BaseVidList(self)._select()
@property
def Active(self):
"""Activate/Deactivate Configuration
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('active')
@property
def AuxMcidConfName(self):
"""Aux MCID Config Name
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('auxMcidConfName')
@property
def AuxMcidSignature(self):
"""Aux MCID Signature
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('auxMcidSignature')
@property
def BaseVidCount(self):
"""Base VID Count(multiplier)
Returns:
number
"""
return self._get_attribute('baseVidCount')
@BaseVidCount.setter
def BaseVidCount(self, value):
self._set_attribute('baseVidCount', value)
@property
def BridgePriority(self):
"""Bridge Priority
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('bridgePriority')
@property
def CistExternalRootCost(self):
"""CIST External Root Cost
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('cistExternalRootCost')
@property
def CistRootId(self):
"""CIST Root Identifier
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('cistRootId')
@property
def Count(self):
"""Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
Returns:
number
"""
return self._get_attribute('count')
@property
def DescriptiveName(self):
"""Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offers more context
Returns:
str
"""
return self._get_attribute('descriptiveName')
@property
def LinkMetric(self):
"""Link Metric
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('linkMetric')
@property
def McidConfName(self):
"""MCID Config Name
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('mcidConfName')
@property
def McidSignature(self):
"""MCID Signature
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('mcidSignature')
@property
def Name(self):
"""Name of NGPF element, guaranteed to be unique in Scenario
Returns:
str
"""
return self._get_attribute('name')
@Name.setter
def Name(self, value):
self._set_attribute('name', value)
@property
def NumberOfPorts(self):
"""Number of Ports
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('numberOfPorts')
@property
def PortIdentifier(self):
"""Port Identifier
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('portIdentifier')
@property
def SpSourceId(self):
"""SP Source ID
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('spSourceId')
@property
def TopologyId(self):
"""Topology Id
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('topologyId')
@property
def Vbit(self):
"""Enable V Bit
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('vbit')
def update(self, BaseVidCount=None, Name=None):
"""Updates a child instance of spbTopologyList on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args:
BaseVidCount (number): Base VID Count(multiplier)
Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
self._update(locals())
def get_device_ids(self, PortNames=None, Active=None, AuxMcidConfName=None, AuxMcidSignature=None, BridgePriority=None, CistExternalRootCost=None, CistRootId=None, LinkMetric=None, McidConfName=None, McidSignature=None, NumberOfPorts=None, PortIdentifier=None, SpSourceId=None, TopologyId=None, Vbit=None):
"""Base class infrastructure that gets a list of spbTopologyList device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args:
PortNames (str): optional regex of port names
Active (str): optional regex of active
AuxMcidConfName (str): optional regex of auxMcidConfName
AuxMcidSignature (str): optional regex of auxMcidSignature
BridgePriority (str): optional regex of bridgePriority
CistExternalRootCost (str): optional regex of cistExternalRootCost
CistRootId (str): optional regex of cistRootId
LinkMetric (str): optional regex of linkMetric
McidConfName (str): optional regex of mcidConfName
McidSignature (str): optional regex of mcidSignature
NumberOfPorts (str): optional regex of numberOfPorts
PortIdentifier (str): optional regex of portIdentifier
SpSourceId (str): optional regex of spSourceId
TopologyId (str): optional regex of topologyId
Vbit (str): optional regex of vbit
Returns:
list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
| [
"[email protected]"
] | |
45c4d7da37b08d92138205eb3bc65b14002f33fc | d6c117812a618ff34055488337aaffea8cf81ca1 | /git/Gists/GistCommit.py | 89840739856aba0fd9c60143035607162a89a352 | [] | no_license | c0ns0le/Pythonista | 44829969f28783b040dd90b46d08c36cc7a1f590 | 4caba2d48508eafa2477370923e96132947d7b24 | refs/heads/master | 2023-01-21T19:44:28.968799 | 2016-04-01T22:34:04 | 2016-04-01T22:34:04 | 55,368,932 | 3 | 0 | null | 2023-01-22T01:26:07 | 2016-04-03T21:04:40 | Python | UTF-8 | Python | false | false | 36 | py | import gistcheck
gistcheck.commit()
| [
"[email protected]"
] | |
78f2cb1b699f7ef5beaaeed03f0c6df3b2382e73 | d9a490dc36da08051b2685489a8e6af3d29fa903 | /gaussNodes.py | 396b5e58af12eae8c24a9c007102e128565535cc | [] | no_license | freephys/numeric-for-engineer | 403679c3f055164bf8b7097c360ad8bfc2cb9978 | a98d318e8cdff679cc02a575d32840fa87a4717d | refs/heads/master | 2020-04-16T01:33:43.530839 | 2009-11-28T18:42:12 | 2009-11-28T18:42:12 | 388,559 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,021 | py | ## module gaussNodes
''' x,A = gaussNodes(m,tol=10e-9)
Returns nodal abscissas {x} and weights {A} of
Gauss-Legendre m-point quadrature.
'''
from math import cos,pi
from numarray import zeros,Float64
def gaussNodes(m,tol=10e-9):
def legendre(t,m):
p0 = 1.0; p1 = t
for k in range(1,m):
p = ((2.0*k + 1.0)*t*p1 - k*p0)/(1.0 + k )
p0 = p1; p1 = p
dp = m*(p0 - t*p1)/(1.0 - t**2)
return p,dp
A = zeros((m),type=Float64)
x = zeros((m),type=Float64)
nRoots = (m + 1)/2 # Number of non-neg. roots
for i in range(nRoots):
t = cos(pi*(i + 0.75)/(m + 0.5)) # Approx. root
for j in range(30):
p,dp = legendre(t,m) # Newton-Raphson
dt = -p/dp; t = t + dt # method
if abs(dt) < tol:
x[i] = t; x[m-i-1] = -t
A[i] = 2.0/(1.0 - t**2)/(dp**2) # Eq.(6.25)
A[m-i-1] = A[i]
break
return x,A
| [
"[email protected]"
] | |
914136d9cb630ffafb4876af0629d835fdf2852d | 7839d009f3ae0a0c1bc360b86756eba80fce284d | /build/rostime/catkin_generated/generate_cached_setup.py | 35bb758cd62d38d6d794504e7e737ab1ac541e5b | [] | no_license | abhat91/ros_osx | b5022daea0b6fdaae3489a97fdb1793b669e64f5 | 39cd8a79788d437927a24fab05a0e8ac64b3fb33 | refs/heads/master | 2021-01-10T14:43:41.047439 | 2016-03-13T23:18:59 | 2016-03-13T23:18:59 | 53,812,264 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,283 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/jade/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/jade/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/opt/ros/jade".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/site-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/Users/adityabhat/Downloads/devel/env.sh')
output_filename = '/Users/adityabhat/Downloads/build/rostime/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"[email protected]"
] | |
382fe6b2d5bbcfdf0985153ae02dac0e9df70625 | 9f1039075cc611198a988034429afed6ec6d7408 | /tensorflow-stubs/python/estimator/canned/metric_keys.pyi | 10d9b385f98a4f8bc2d53253b192caa053ff2447 | [] | no_license | matangover/tensorflow-stubs | 9422fbb1cb3a3638958d621461291c315f9c6ec2 | 664bd995ef24f05ba2b3867d979d23ee845cb652 | refs/heads/master | 2020-05-23T12:03:40.996675 | 2019-05-15T06:21:43 | 2019-05-15T06:21:43 | 186,748,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 848 | pyi | # Stubs for tensorflow.python.estimator.canned.metric_keys (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from tensorflow.python.estimator import model_fn as model_fn
from typing import Any as Any
class MetricKeys:
LOSS: Any = ...
LOSS_MEAN: Any = ...
LOSS_REGULARIZATION: str = ...
ACCURACY: str = ...
PRECISION: str = ...
RECALL: str = ...
ACCURACY_BASELINE: str = ...
AUC: str = ...
AUC_PR: str = ...
LABEL_MEAN: str = ...
PREDICTION_MEAN: str = ...
ACCURACY_AT_THRESHOLD: str = ...
PRECISION_AT_THRESHOLD: str = ...
RECALL_AT_THRESHOLD: str = ...
PROBABILITY_MEAN_AT_CLASS: str = ...
AUC_AT_CLASS: str = ...
AUC_PR_AT_CLASS: str = ...
PROBABILITY_MEAN_AT_NAME: str = ...
AUC_AT_NAME: str = ...
AUC_PR_AT_NAME: str = ...
| [
"[email protected]"
] | |
f26f4be9786d1d6d93f78cd9342425b3d05c88fc | 99c9ca6edd44a13fd4eabee78625c827cc535ea1 | /examples/english/english_experiment.py | 911a51f8596bb8082423e0c2ed51e9a07dfd52f2 | [
"Apache-2.0"
] | permissive | adeepH/MUDES | bbcdcac41b33990545eac769d127a37ba5f4566f | f2f7413f9c683194253f7ea9286587bad3058396 | refs/heads/master | 2023-04-04T07:26:19.917166 | 2021-04-16T11:51:59 | 2021-04-16T11:51:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,320 | py | import os
import statistics
from sklearn.model_selection import train_test_split
from examples.english.transformer_configs import transformer_config, MODEL_TYPE, MODEL_NAME, LANGUAGE_FINETUNE, \
language_modeling_args, TEMP_DIRECTORY
from mudes.algo.evaluation import f1
from mudes.algo.mudes_model import MUDESModel
from mudes.algo.language_modeling import LanguageModelingModel
from mudes.algo.predict import predict_spans
from mudes.algo.preprocess import read_datafile, format_data, format_lm, read_test_datafile
import torch
if not os.path.exists(TEMP_DIRECTORY):
os.makedirs(TEMP_DIRECTORY)
train = read_datafile('examples/english/data/tsd_train.csv')
dev = read_datafile('examples//english/data/tsd_trial.csv')
test = read_test_datafile('examples//english/data/tsd_test.csv')
if LANGUAGE_FINETUNE:
train_list = format_lm(train)
dev_list = format_lm(dev)
complete_list = train_list + dev_list
lm_train = complete_list[0: int(len(complete_list)*0.8)]
lm_test = complete_list[-int(len(complete_list)*0.2):]
with open(os.path.join(TEMP_DIRECTORY, "lm_train.txt"), 'w') as f:
for item in lm_train:
f.write("%s\n" % item)
with open(os.path.join(TEMP_DIRECTORY, "lm_test.txt"), 'w') as f:
for item in lm_test:
f.write("%s\n" % item)
model = LanguageModelingModel("auto", MODEL_NAME, args=language_modeling_args, use_cuda=torch.cuda.is_available())
model.train_model(os.path.join(TEMP_DIRECTORY, "lm_train.txt"), eval_file=os.path.join(TEMP_DIRECTORY, "lm_test.txt"))
MODEL_NAME = language_modeling_args["best_model_dir"]
train_df = format_data(train)
tags = train_df['labels'].unique().tolist()
model = MUDESModel(MODEL_TYPE, MODEL_NAME, labels=tags, args=transformer_config)
if transformer_config["evaluate_during_training"]:
train_df, eval_df = train_test_split(train_df, test_size=0.1, shuffle=False)
model.train_model(train_df, eval_df=eval_df)
else:
model.train_model(train_df)
model = MUDESModel(MODEL_TYPE, transformer_config["best_model_dir"], labels=tags, args=transformer_config)
scores = []
for n, (spans, text) in enumerate(dev):
predictions = predict_spans(model, text)
score = f1(predictions, spans)
scores.append(score)
print('avg F1 %g' % statistics.mean(scores))
| [
"[email protected]"
] | |
6d325cde6dc284456409345078859f4750f3f561 | 3406886ecbbed36bb47288a38eaab001a2b30417 | /ya_glm/models/FcpLLA.py | c187d77d046477e9904fc8cc0a290839bee5c4e3 | [
"MIT"
] | permissive | thomaskeefe/ya_glm | 8d953f7444e51dfeaa28dcd92aaf946112ebc677 | e6e1bbb915d15c530d10a4776ea848b331c99c3b | refs/heads/main | 2023-06-21T15:08:33.590892 | 2021-07-30T00:57:49 | 2021-07-30T00:57:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,545 | py | from ya_glm.base.GlmFcpLLA import GlmFcpLLA, GlmFcpLLACV
from ya_glm.base.GlmCV import SinglePenSeqSetterMixin
from ya_glm.PenaltyConfig import ConcavePenalty
from ya_glm.loss.LossMixin import LossMixin
from ya_glm.models.Lasso import Lasso, LassoCV
from ya_glm.cv.RunCVMixin import RunCVGridMixin
from ya_glm.processing import check_estimator_type
from ya_glm.autoassign import autoassign
class FcpLLA(LossMixin, GlmFcpLLA):
"""
A GLM with with a folded conave penalty (FCP) fit with the local linear approximation (LLA) algorithm. This handles entrywise, group , multi-task and nuclear norm like penalties.
The LLA algorithm is a two stage estimator. In the first stage an initial estimate of the coefficient is fit (e.g. using LassoCV). In the second stage we attempt to solve the concave peanlized problem using the LLA algorithm where we start the LLA algorithm from the initial coefficient.
Parameters
----------
loss: str, ya_glm.LossConfig.LossConfig
The loss function. If a string is provided the loss function parameters are set to their default values. Otherwise the loss function parameters can be specified by providing a LossConfig object. See ya_glm.LossConfig for available loss functions.
fit_intercept: bool
Whether or not to fit intercept, which is not penalized.
pen_val: float
The penalty value for the concave penalty.
pen_func: str
The concave penalty function. See ya_glm.opt.penalty.concave_penalty.
pen_func_kws: dict
Keyword arguments for the concave penalty function e.g. 'a' for the SCAD function.
lla_n_steps: int
Maximum of steps the LLA algorithm should take. The LLA algorithm can have favorable statistical properties after only 1 step.
lla_kws: dict
Additional keyword arguments to the LLA algorithm solver excluding 'n_steps' and 'glm_solver'. See ya_glm.lla.LLASolver.LLASolver.
init: str, dict, estimator.
If init='default', will use LassoCV as the initializer.
If init is a dict, will return self.init. If init is an estimator that is already fit, it will NOT be refit on the new data. If init is a dict with 'adpt_weights' the the estimator will use exactly these adpative weights.
groups: None, list of ints
Optional groups of variables. If groups is provided then each element in the list should be a list of feature indices. Variables not in a group are not penalized.
multi_task: bool
Use a multi-task Lasso for the coefficient matrix of multiple response GLM. This is the L1 to L2 norm (sum of euclidean norms of the rows).
nuc: bool
Use a nuclear norm penalty (sum of the singular values) for the coefficient matrix of multiple response GLM.
ridge_pen_val: None, float
(Optional) Penalty strength for an optional ridge penalty.
ridge_weights: None, array-like shape (n_featuers, )
(Optional) Features weights for the ridge peanlty.
tikhonov: None, array-like (K, n_features)
(Optional) Tikhonov matrix for the ridge penalty. Both tikhonov and ridge weights cannot be provided at the same time.
standardize: bool
Whether or not to perform internal standardization before fitting the data. Standardization means mean centering and scaling each column by its standard deviation. For the group lasso penalty an additional scaling is applied that scales each variable by 1 / sqrt(group size). Putting each variable on the same scale makes sense for fitting penalized models. Note the fitted coefficient/intercept is transformed to be on the original scale of the input data.
solver: str, ya_glm.GlmSolver
The solver used to solve the penalized GLM optimization problem. If this is set to 'default' we try to guess the best solver. Otherwise a custom solver can be provided by specifying a GlmSolver object.
Attributes
----------
coef_: array-like, shape (n_features, ) or (n_features, n_responses)
The fitted coefficient vector or matrix (for multiple responses).
intercept_: None, float or array-like, shape (n_features, )
The fitted intercept.
classes_: array-like, shape (n_classes, )
A list of class labels known to the classifier.
opt_data_: dict
Data output by the optimization algorithm.
References
----------
Fan, J., Xue, L. and Zou, H., 2014. Strong oracle optimality of folded concave penalized estimation. Annals of statistics, 42(3), p.819.
"""
@autoassign
def __init__(self, loss='lin_reg', fit_intercept=True,
pen_val=1,
pen_func='scad',
pen_func_kws={},
init='default',
lla_n_steps=1, lla_kws={},
groups=None, multi_task=False, nuc=False,
ridge_pen_val=None, ridge_weights=None, tikhonov=None,
standardize=False, glm_solver='default'):
pass
def _get_penalty_config(self):
"""
Gets the penalty config.
Output
------
penalty: ya_glm.PenaltyConfig.ConcavePenalty
A penalty config object.
"""
return ConcavePenalty(pen_val=self.pen_val,
pen_func=self.pen_func,
pen_func_kws=self.pen_func_kws,
groups=self.groups,
multi_task=self.multi_task,
nuc=self.nuc,
ridge_pen_val=self.ridge_pen_val,
ridge_weights=self.ridge_weights,
tikhonov=self.tikhonov
)
def _get_default_init(self):
"""
Output
-------
est: LassoCV()
The default initializer object.
"""
est = Lasso(loss=self.loss,
fit_intercept=self.fit_intercept,
groups=self.groups,
multi_task=self.multi_task,
nuc=self.nuc,
ridge_pen_val=self.ridge_pen_val,
ridge_weights=self.ridge_weights,
tikhonov=self.tikhonov,
standardize=self.standardize,
solver=self.glm_solver
)
return LassoCV(estimator=est)
class FcpLLACV(SinglePenSeqSetterMixin, RunCVGridMixin, GlmFcpLLACV):
"""
Tunes an concave penalized GLM using cross-validation.
Note the initializer is fit before running cross-validation so the same adpat_weights are used for each CV fold.
Parameters
----------
estimator: ya_glm.models.FcpLLA
The base FcpLLA estimator to be tuned with cross-validation. Only the pen_val parameter is tuned.
cv: int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
cv_select_metric: None, str
Which metric to use for select the best tuning parameter if multiple metrics are computed.
cv_scorer: None, callable(est, X, y) -> dict or float
A function for evaluating the cross-validation fit estimators. If this returns a dict of multiple scores then cv_select_metric determines which metric is used to select the tuning parameter.
cv_n_jobs: None, int
Number of jobs to run in parallel.
cv_verbose: int
Amount of printout during cross-validation.
cv_pre_dispatch: int, or str, default=n_jobs
Controls the number of jobs that get dispatched during parallel execution
n_pen_vals: int
Number of penalty values to try for automatically generated tuning parameter sequence.
pen_vals: None, array-like
(Optional) User provided penalty value sequence. The penalty sequence should be monotonicly decreasing so the homotopy path algorithm works propertly.
pen_min_mult: float
Determines the smallest penalty value to try. The automatically generated penalty value squence lives in the interval [pen_min_mult * pen_max_val, pen_max_val] where pen_max_val is automatically determined.
pen_spacing: str
How the penalty values are spaced. Must be one of ['log', 'lin']
for logarithmic and linear spacing respectively.
Attributes
----------
best_estimator_:
The fit estimator with the parameters selected via cross-validation.
cv_results_: dict
The cross-validation results.
best_tune_idx_: int
Index of the best tuning parameter. This index corresponds to the list returned by get_tuning_sequence().
best_tune_params_: dict
The best tuning parameters.
cv_data_: dict
Additional data about the CV fit e.g. the runtime.
References
----------
Fan, J., Xue, L. and Zou, H., 2014. Strong oracle optimality of folded concave penalized estimation. Annals of statistics, 42(3), p.819.
"""
@autoassign
def __init__(self,
estimator=FcpLLA(),
cv=None,
cv_select_rule='best',
cv_select_metric=None,
cv_scorer=None,
cv_verbose=0, cv_n_jobs=None,
cv_pre_dispatch='2*n_jobs',
n_pen_vals=100,
pen_vals=None,
pen_min_mult=1e-3,
pen_spacing='log'
):
pass
def _check_base_estimator(self):
check_estimator_type(self.estimator, FcpLLA)
| [
"[email protected]"
] | |
7d7114b73f7531d5ead27980f9e0b3608c42a9a3 | c64dd4b7f67d1f3c6ade8404831676a3652963e4 | /dask_drmaa/sge.py | 0ec11dacb016ef01325f45ed1814c9faf8b51b02 | [] | no_license | mrocklin/dask-drmaa | 6921a59bf29fd2c5e082dd0aad9bdf1f1e0f1806 | 71bd87c8c11a759f2495139e3b613421e7ba4986 | refs/heads/master | 2021-05-01T20:29:12.752914 | 2017-01-18T13:20:26 | 2017-01-18T13:20:26 | 79,346,877 | 3 | 0 | null | 2017-01-18T14:18:45 | 2017-01-18T14:18:45 | null | UTF-8 | Python | false | false | 1,210 | py | from .core import DRMAACluster, get_session
class SGECluster(DRMAACluster):
default_memory = None
default_memory_fraction = 0.6
def createJobTemplate(self, nativeSpecification='', cpus=1, memory=None,
memory_fraction=None):
memory = memory or self.default_memory
memory_fraction = memory_fraction or self.default_memory_fraction
args = self.args
ns = self.nativeSpecification
if nativeSpecification:
ns = ns + nativeSpecification
if memory:
args = args + ['--memory-limit', str(memory * memory_fraction)]
args = args + ['--resources', 'memory=%f' % (memory * 0.8)]
ns += ' -l h_vmem=%dG' % int(memory / 1e9) # / cpus
if cpus:
args = args + ['--nprocs', '1', '--nthreads', str(cpus)]
# ns += ' -l TODO=%d' % (cpu + 1)
ns += ' -l h_rt={}'.format(self.max_runtime)
wt = get_session().createJobTemplate()
wt.jobName = self.jobName
wt.remoteCommand = self.remoteCommand
wt.args = args
wt.outputPath = self.outputPath
wt.errorPath = self.errorPath
wt.nativeSpecification = ns
return wt
| [
"[email protected]"
] | |
6f4ec6813bf2211d999511545770fc79c5aad30b | 32c56293475f49c6dd1b0f1334756b5ad8763da9 | /google-cloud-sdk/lib/googlecloudsdk/third_party/apis/deploymentmanager/v2beta/deploymentmanager_v2beta_messages.py | e8077885161d342ac6bee5beb274e2e03c7ef624 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] | permissive | bopopescu/socialliteapp | b9041f17f8724ee86f2ecc6e2e45b8ff6a44b494 | 85bb264e273568b5a0408f733b403c56373e2508 | refs/heads/master | 2022-11-20T03:01:47.654498 | 2020-02-01T20:29:43 | 2020-02-01T20:29:43 | 282,403,750 | 0 | 0 | MIT | 2020-07-25T08:31:59 | 2020-07-25T08:31:59 | null | UTF-8 | Python | false | false | 100,141 | py | """Generated message classes for deploymentmanager version v2beta.
The Deployment Manager API allows users to declaratively configure, deploy and
run complex solutions on the Google Cloud Platform.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.protorpclite import messages as _messages
package = 'deploymentmanager'
class AsyncOptions(_messages.Message):
r"""Async options that determine when a resource should finish.
Fields:
methodMatch: Method regex where this policy will apply.
pollingOptions: Deployment manager will poll instances for this API
resource setting a RUNNING state, and blocking until polling conditions
tell whether the resource is completed or failed.
"""
methodMatch = _messages.StringField(1)
pollingOptions = _messages.MessageField('PollingOptions', 2)
class AuditConfig(_messages.Message):
r"""Specifies the audit configuration for a service. The configuration
determines which permission types are logged, and what identities, if any,
are exempted from logging. An AuditConfig must have one or more
AuditLogConfigs. If there are AuditConfigs for both `allServices` and a
specific service, the union of the two AuditConfigs is used for that
service: the log_types specified in each AuditConfig are enabled, and the
exempted_members in each AuditLogConfig are exempted. Example Policy with
multiple AuditConfigs: { "audit_configs": [ { "service": "allServices"
"audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [
"user:[email protected]" ] }, { "log_type": "DATA_WRITE", }, { "log_type":
"ADMIN_READ", } ] }, { "service": "sampleservice.googleapis.com"
"audit_log_configs": [ { "log_type": "DATA_READ", }, { "log_type":
"DATA_WRITE", "exempted_members": [ "user:[email protected]" ] } ] } ] }
For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ
logging. It also exempts [email protected] from DATA_READ logging, and
[email protected] from DATA_WRITE logging.
Fields:
auditLogConfigs: The configuration for logging of each type of permission.
exemptedMembers:
service: Specifies a service that will be enabled for audit logging. For
example, `storage.googleapis.com`, `cloudsql.googleapis.com`.
`allServices` is a special value that covers all services.
"""
auditLogConfigs = _messages.MessageField('AuditLogConfig', 1, repeated=True)
exemptedMembers = _messages.StringField(2, repeated=True)
service = _messages.StringField(3)
class AuditLogConfig(_messages.Message):
r"""Provides the configuration for logging a type of permissions. Example:
{ "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [
"user:[email protected]" ] }, { "log_type": "DATA_WRITE", } ] } This enables
'DATA_READ' and 'DATA_WRITE' logging, while exempting [email protected] from
DATA_READ logging.
Fields:
exemptedMembers: Specifies the identities that do not cause logging for
this type of permission. Follows the same format of [Binding.members][].
ignoreChildExemptions:
logType: The log type that this config enables.
"""
exemptedMembers = _messages.StringField(1, repeated=True)
ignoreChildExemptions = _messages.BooleanField(2)
logType = _messages.StringField(3)
class AuthorizationLoggingOptions(_messages.Message):
r"""Authorization-related information used by Cloud Audit Logging.
Fields:
permissionType: The type of the permission that was checked.
"""
permissionType = _messages.StringField(1)
class BaseType(_messages.Message):
r"""BaseType that describes a service-backed Type.
Fields:
collectionOverrides: Allows resource handling overrides for specific
collections
credential: Credential used when interacting with this type.
descriptorUrl: Descriptor Url for the this type.
options: Options to apply when handling any resources in this service.
"""
collectionOverrides = _messages.MessageField('CollectionOverride', 1, repeated=True)
credential = _messages.MessageField('Credential', 2)
descriptorUrl = _messages.StringField(3)
options = _messages.MessageField('Options', 4)
class BasicAuth(_messages.Message):
r"""Basic Auth used as a credential.
Fields:
password: A string attribute.
user: A string attribute.
"""
password = _messages.StringField(1)
user = _messages.StringField(2)
class Binding(_messages.Message):
r"""Associates `members` with a `role`.
Fields:
condition: The condition that is associated with this binding. NOTE: An
unsatisfied condition will not allow user access via current binding.
Different bindings, including their conditions, are examined
independently.
members: Specifies the identities requesting access for a Cloud Platform
resource. `members` can have the following values: * `allUsers`: A
special identifier that represents anyone who is on the internet; with
or without a Google account. * `allAuthenticatedUsers`: A special
identifier that represents anyone who is authenticated with a Google
account or a service account. * `user:{emailid}`: An email address that
represents a specific Google account. For example, `[email protected]` .
* `serviceAccount:{emailid}`: An email address that represents a service
account. For example, `[email protected]`. *
`group:{emailid}`: An email address that represents a Google group. For
example, `[email protected]`. *
`deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique
identifier) representing a user that has been recently deleted. For
example, `[email protected]?uid=123456789012345678901`. If the user is
recovered, this value reverts to `user:{emailid}` and the recovered user
retains the role in the binding. *
`deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address
(plus unique identifier) representing a service account that has been
recently deleted. For example, `my-other-
[email protected]?uid=123456789012345678901`. If the
service account is undeleted, this value reverts to
`serviceAccount:{emailid}` and the undeleted service account retains the
role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An
email address (plus unique identifier) representing a Google group that
has been recently deleted. For example,
`[email protected]?uid=123456789012345678901`. If the group is
recovered, this value reverts to `group:{emailid}` and the recovered
group retains the role in the binding. * `domain:{domain}`: The G
Suite domain (primary) that represents all the users of that domain. For
example, `google.com` or `example.com`.
role: Role that is assigned to `members`. For example, `roles/viewer`,
`roles/editor`, or `roles/owner`.
"""
condition = _messages.MessageField('Expr', 1)
members = _messages.StringField(2, repeated=True)
role = _messages.StringField(3)
class CollectionOverride(_messages.Message):
r"""CollectionOverride allows resource handling overrides for specific
resources within a BaseType
Fields:
collection: The collection that identifies this resource within its
service.
options: The options to apply to this resource-level override
"""
collection = _messages.StringField(1)
options = _messages.MessageField('Options', 2)
class CompositeType(_messages.Message):
r"""Holds the composite type.
Fields:
description: An optional textual description of the resource; provided by
the client when the resource is created.
id: A string attribute.
insertTime: Output only. Creation timestamp in RFC3339 text format.
labels: Map of labels; provided by the client when the resource is created
or updated. Specifically: Label keys must be between 1 and 63 characters
long and must conform to the following regular expression:
[a-z]([-a-z0-9]*[a-z0-9])? Label values must be between 0 and 63
characters long and must conform to the regular expression
([a-z]([-a-z0-9]*[a-z0-9])?)?
name: Name of the composite type, must follow the expression:
[a-z]([-a-z0-9_.]{0,61}[a-z0-9])?.
operation: Output only. The Operation that most recently ran, or is
currently running, on this composite type.
selfLink: Output only. Server defined URL for the resource.
status: A string attribute.
templateContents: Files for the template type.
"""
description = _messages.StringField(1)
id = _messages.IntegerField(2, variant=_messages.Variant.UINT64)
insertTime = _messages.StringField(3)
labels = _messages.MessageField('CompositeTypeLabelEntry', 4, repeated=True)
name = _messages.StringField(5)
operation = _messages.MessageField('Operation', 6)
selfLink = _messages.StringField(7)
status = _messages.StringField(8)
templateContents = _messages.MessageField('TemplateContents', 9)
class CompositeTypeLabelEntry(_messages.Message):
r"""A CompositeTypeLabelEntry object.
Fields:
key: A string attribute.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
class CompositeTypesListResponse(_messages.Message):
r"""A response that returns all Composite Types supported by Deployment
Manager
Fields:
compositeTypes: Output only. A list of resource composite types supported
by Deployment Manager.
nextPageToken: A token used to continue a truncated list request.
"""
compositeTypes = _messages.MessageField('CompositeType', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class Condition(_messages.Message):
r"""A condition to be met.
Fields:
iam: Trusted attributes supplied by the IAM system.
op: An operator to apply the subject with.
svc: Trusted attributes discharged by the service.
sys: Trusted attributes supplied by any service that owns resources and
uses the IAM system for access control.
values: The objects of the condition.
"""
iam = _messages.StringField(1)
op = _messages.StringField(2)
svc = _messages.StringField(3)
sys = _messages.StringField(4)
values = _messages.StringField(5, repeated=True)
class ConfigFile(_messages.Message):
r"""ConfigFile message type.
Fields:
content: The contents of the file.
"""
content = _messages.StringField(1)
class Credential(_messages.Message):
r"""The credential used by Deployment Manager and TypeProvider. Only one of
the options is permitted.
Fields:
basicAuth: Basic Auth Credential, only used by TypeProvider.
serviceAccount: Service Account Credential, only used by Deployment.
useProjectDefault: Specify to use the project default credential, only
supported by Deployment.
"""
basicAuth = _messages.MessageField('BasicAuth', 1)
serviceAccount = _messages.MessageField('ServiceAccount', 2)
useProjectDefault = _messages.BooleanField(3)
class Deployment(_messages.Message):
r"""Deployment message type.
Fields:
description: An optional user-provided description of the deployment.
fingerprint: Provides a fingerprint to use in requests to modify a
deployment, such as update(), stop(), and cancelPreview() requests. A
fingerprint is a randomly generated value that must be provided with
update(), stop(), and cancelPreview() requests to perform optimistic
locking. This ensures optimistic concurrency so that only one request
happens at a time. The fingerprint is initially generated by Deployment
Manager and changes after every request to modify data. To get the
latest fingerprint value, perform a get() request to a deployment.
id: A string attribute.
insertTime: Output only. Creation timestamp in RFC3339 text format.
labels: Map of labels; provided by the client when the resource is created
or updated. Specifically: Label keys must be between 1 and 63 characters
long and must conform to the following regular expression:
[a-z]([-a-z0-9]*[a-z0-9])? Label values must be between 0 and 63
characters long and must conform to the regular expression
([a-z]([-a-z0-9]*[a-z0-9])?)?
manifest: Output only. URL of the manifest representing the last manifest
that was successfully deployed.
name: Name of the resource; provided by the client when the resource is
created. The name must be 1-63 characters long, and comply with RFC1035.
Specifically, the name must be 1-63 characters long and match the
regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first
character must be a lowercase letter, and all following characters must
be a dash, lowercase letter, or digit, except the last character, which
cannot be a dash.
operation: Output only. The Operation that most recently ran, or is
currently running, on this deployment.
selfLink: Output only. Server defined URL for the resource.
target: [Input Only] The parameters that define your deployment, including
the deployment configuration and relevant templates.
update: Output only. If Deployment Manager is currently updating or
previewing an update to this deployment, the updated configuration
appears here.
updateTime: Output only. Update timestamp in RFC3339 text format.
"""
description = _messages.StringField(1)
fingerprint = _messages.BytesField(2)
id = _messages.IntegerField(3, variant=_messages.Variant.UINT64)
insertTime = _messages.StringField(4)
labels = _messages.MessageField('DeploymentLabelEntry', 5, repeated=True)
manifest = _messages.StringField(6)
name = _messages.StringField(7)
operation = _messages.MessageField('Operation', 8)
selfLink = _messages.StringField(9)
target = _messages.MessageField('TargetConfiguration', 10)
update = _messages.MessageField('DeploymentUpdate', 11)
updateTime = _messages.StringField(12)
class DeploymentLabelEntry(_messages.Message):
r"""A DeploymentLabelEntry object.
Fields:
key: A string attribute.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
class DeploymentUpdate(_messages.Message):
r"""DeploymentUpdate message type.
Fields:
description: Output only. An optional user-provided description of the
deployment after the current update has been applied.
labels: Output only. Map of labels; provided by the client when the
resource is created or updated. Specifically: Label keys must be between
1 and 63 characters long and must conform to the following regular
expression: [a-z]([-a-z0-9]*[a-z0-9])? Label values must be between 0
and 63 characters long and must conform to the regular expression
([a-z]([-a-z0-9]*[a-z0-9])?)?
manifest: Output only. URL of the manifest representing the update
configuration of this deployment.
"""
description = _messages.StringField(1)
labels = _messages.MessageField('DeploymentUpdateLabelEntry', 2, repeated=True)
manifest = _messages.StringField(3)
class DeploymentUpdateLabelEntry(_messages.Message):
r"""A DeploymentUpdateLabelEntry object.
Fields:
key: A string attribute.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
class DeploymentmanagerCompositeTypesDeleteRequest(_messages.Message):
r"""A DeploymentmanagerCompositeTypesDeleteRequest object.
Fields:
compositeType: The name of the type for this request.
project: The project ID for this request.
"""
compositeType = _messages.StringField(1, required=True)
project = _messages.StringField(2, required=True)
class DeploymentmanagerCompositeTypesGetRequest(_messages.Message):
r"""A DeploymentmanagerCompositeTypesGetRequest object.
Fields:
compositeType: The name of the composite type for this request.
project: The project ID for this request.
"""
compositeType = _messages.StringField(1, required=True)
project = _messages.StringField(2, required=True)
class DeploymentmanagerCompositeTypesInsertRequest(_messages.Message):
r"""A DeploymentmanagerCompositeTypesInsertRequest object.
Fields:
compositeType: A CompositeType resource to be passed as the request body.
project: The project ID for this request.
"""
compositeType = _messages.MessageField('CompositeType', 1)
project = _messages.StringField(2, required=True)
class DeploymentmanagerCompositeTypesListRequest(_messages.Message):
r"""A DeploymentmanagerCompositeTypesListRequest object.
Fields:
filter: A filter expression that filters resources listed in the response.
The expression must specify the field name, a comparison operator, and
the value that you want to use for filtering. The value must be a
string, a number, or a boolean. The comparison operator must be either
=, !=, >, or <. For example, if you are filtering Compute Engine
instances, you can exclude instances named example-instance by
specifying name != example-instance. You can also filter nested fields.
For example, you could specify scheduling.automaticRestart = false to
include instances only if they are not scheduled for automatic restarts.
You can use filtering on nested fields to filter based on resource
labels. To filter on multiple expressions, provide each separate
expression within parentheses. For example, (scheduling.automaticRestart
= true) (cpuPlatform = "Intel Skylake"). By default, each expression is
an AND expression. However, you can include AND and OR expressions
explicitly. For example, (cpuPlatform = "Intel Skylake") OR (cpuPlatform
= "Intel Broadwell") AND (scheduling.automaticRestart = true).
maxResults: The maximum number of results per page that should be
returned. If the number of available results is larger than maxResults,
Compute Engine returns a nextPageToken that can be used to get the next
page of results in subsequent list requests. Acceptable values are 0 to
500, inclusive. (Default: 500)
orderBy: Sorts list results by a certain order. By default, results are
returned in alphanumerical order based on the resource name. You can
also sort results in descending order based on the creation timestamp
using orderBy="creationTimestamp desc". This sorts results based on the
creationTimestamp field in reverse chronological order (newest result
first). Use this to sort resources like operations so that the newest
operation is returned first. Currently, only sorting by name or
creationTimestamp desc is supported.
pageToken: Specifies a page token to use. Set pageToken to the
nextPageToken returned by a previous list request to get the next page
of results.
project: The project ID for this request.
"""
filter = _messages.StringField(1)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32, default=500)
orderBy = _messages.StringField(3)
pageToken = _messages.StringField(4)
project = _messages.StringField(5, required=True)
class DeploymentmanagerCompositeTypesPatchRequest(_messages.Message):
r"""A DeploymentmanagerCompositeTypesPatchRequest object.
Fields:
compositeType: The name of the composite type for this request.
compositeTypeResource: A CompositeType resource to be passed as the
request body.
project: The project ID for this request.
"""
compositeType = _messages.StringField(1, required=True)
compositeTypeResource = _messages.MessageField('CompositeType', 2)
project = _messages.StringField(3, required=True)
class DeploymentmanagerCompositeTypesUpdateRequest(_messages.Message):
r"""A DeploymentmanagerCompositeTypesUpdateRequest object.
Fields:
compositeType: The name of the composite type for this request.
compositeTypeResource: A CompositeType resource to be passed as the
request body.
project: The project ID for this request.
"""
compositeType = _messages.StringField(1, required=True)
compositeTypeResource = _messages.MessageField('CompositeType', 2)
project = _messages.StringField(3, required=True)
class DeploymentmanagerDeploymentsCancelPreviewRequest(_messages.Message):
r"""A DeploymentmanagerDeploymentsCancelPreviewRequest object.
Fields:
deployment: The name of the deployment for this request.
deploymentsCancelPreviewRequest: A DeploymentsCancelPreviewRequest
resource to be passed as the request body.
project: The project ID for this request.
"""
deployment = _messages.StringField(1, required=True)
deploymentsCancelPreviewRequest = _messages.MessageField('DeploymentsCancelPreviewRequest', 2)
project = _messages.StringField(3, required=True)
class DeploymentmanagerDeploymentsDeleteRequest(_messages.Message):
r"""A DeploymentmanagerDeploymentsDeleteRequest object.
Enums:
DeletePolicyValueValuesEnum: Sets the policy to use for deleting
resources.
Fields:
deletePolicy: Sets the policy to use for deleting resources.
deployment: The name of the deployment for this request.
project: The project ID for this request.
"""
class DeletePolicyValueValuesEnum(_messages.Enum):
r"""Sets the policy to use for deleting resources.
Values:
ABANDON: <no description>
DELETE: <no description>
"""
ABANDON = 0
DELETE = 1
deletePolicy = _messages.EnumField('DeletePolicyValueValuesEnum', 1, default=u'DELETE')
deployment = _messages.StringField(2, required=True)
project = _messages.StringField(3, required=True)
class DeploymentmanagerDeploymentsGetIamPolicyRequest(_messages.Message):
r"""A DeploymentmanagerDeploymentsGetIamPolicyRequest object.
Fields:
project: Project ID for this request.
resource: Name or id of the resource for this request.
"""
project = _messages.StringField(1, required=True)
resource = _messages.StringField(2, required=True)
class DeploymentmanagerDeploymentsGetRequest(_messages.Message):
r"""A DeploymentmanagerDeploymentsGetRequest object.
Fields:
deployment: The name of the deployment for this request.
project: The project ID for this request.
"""
deployment = _messages.StringField(1, required=True)
project = _messages.StringField(2, required=True)
class DeploymentmanagerDeploymentsInsertRequest(_messages.Message):
r"""A DeploymentmanagerDeploymentsInsertRequest object.
Enums:
CreatePolicyValueValuesEnum: Sets the policy to use for creating new
resources.
Fields:
createPolicy: Sets the policy to use for creating new resources.
deployment: A Deployment resource to be passed as the request body.
preview: If set to true, creates a deployment and creates "shell"
resources but does not actually instantiate these resources. This allows
you to preview what your deployment looks like. After previewing a
deployment, you can deploy your resources by making a request with the
update() method or you can use the cancelPreview() method to cancel the
preview altogether. Note that the deployment will still exist after you
cancel the preview and you must separately delete this deployment if you
want to remove it.
project: The project ID for this request.
"""
class CreatePolicyValueValuesEnum(_messages.Enum):
r"""Sets the policy to use for creating new resources.
Values:
ACQUIRE: <no description>
CREATE: <no description>
CREATE_OR_ACQUIRE: <no description>
"""
ACQUIRE = 0
CREATE = 1
CREATE_OR_ACQUIRE = 2
createPolicy = _messages.EnumField('CreatePolicyValueValuesEnum', 1, default=u'CREATE_OR_ACQUIRE')
deployment = _messages.MessageField('Deployment', 2)
preview = _messages.BooleanField(3)
project = _messages.StringField(4, required=True)
class DeploymentmanagerDeploymentsListRequest(_messages.Message):
r"""A DeploymentmanagerDeploymentsListRequest object.
Fields:
filter: A filter expression that filters resources listed in the response.
The expression must specify the field name, a comparison operator, and
the value that you want to use for filtering. The value must be a
string, a number, or a boolean. The comparison operator must be either
=, !=, >, or <. For example, if you are filtering Compute Engine
instances, you can exclude instances named example-instance by
specifying name != example-instance. You can also filter nested fields.
For example, you could specify scheduling.automaticRestart = false to
include instances only if they are not scheduled for automatic restarts.
You can use filtering on nested fields to filter based on resource
labels. To filter on multiple expressions, provide each separate
expression within parentheses. For example, (scheduling.automaticRestart
= true) (cpuPlatform = "Intel Skylake"). By default, each expression is
an AND expression. However, you can include AND and OR expressions
explicitly. For example, (cpuPlatform = "Intel Skylake") OR (cpuPlatform
= "Intel Broadwell") AND (scheduling.automaticRestart = true).
maxResults: The maximum number of results per page that should be
returned. If the number of available results is larger than maxResults,
Compute Engine returns a nextPageToken that can be used to get the next
page of results in subsequent list requests. Acceptable values are 0 to
500, inclusive. (Default: 500)
orderBy: Sorts list results by a certain order. By default, results are
returned in alphanumerical order based on the resource name. You can
also sort results in descending order based on the creation timestamp
using orderBy="creationTimestamp desc". This sorts results based on the
creationTimestamp field in reverse chronological order (newest result
first). Use this to sort resources like operations so that the newest
operation is returned first. Currently, only sorting by name or
creationTimestamp desc is supported.
pageToken: Specifies a page token to use. Set pageToken to the
nextPageToken returned by a previous list request to get the next page
of results.
project: The project ID for this request.
"""
filter = _messages.StringField(1)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32, default=500)
orderBy = _messages.StringField(3)
pageToken = _messages.StringField(4)
project = _messages.StringField(5, required=True)
class DeploymentmanagerDeploymentsPatchRequest(_messages.Message):
r"""A DeploymentmanagerDeploymentsPatchRequest object.
Enums:
CreatePolicyValueValuesEnum: Sets the policy to use for creating new
resources.
DeletePolicyValueValuesEnum: Sets the policy to use for deleting
resources.
Fields:
createPolicy: Sets the policy to use for creating new resources.
deletePolicy: Sets the policy to use for deleting resources.
deployment: The name of the deployment for this request.
deploymentResource: A Deployment resource to be passed as the request
body.
preview: If set to true, updates the deployment and creates and updates
the "shell" resources but does not actually alter or instantiate these
resources. This allows you to preview what your deployment will look
like. You can use this intent to preview how an update would affect your
deployment. You must provide a target.config with a configuration if
this is set to true. After previewing a deployment, you can deploy your
resources by making a request with the update() or you can
cancelPreview() to remove the preview altogether. Note that the
deployment will still exist after you cancel the preview and you must
separately delete this deployment if you want to remove it.
project: The project ID for this request.
"""
class CreatePolicyValueValuesEnum(_messages.Enum):
r"""Sets the policy to use for creating new resources.
Values:
ACQUIRE: <no description>
CREATE: <no description>
CREATE_OR_ACQUIRE: <no description>
"""
ACQUIRE = 0
CREATE = 1
CREATE_OR_ACQUIRE = 2
class DeletePolicyValueValuesEnum(_messages.Enum):
r"""Sets the policy to use for deleting resources.
Values:
ABANDON: <no description>
DELETE: <no description>
"""
ABANDON = 0
DELETE = 1
createPolicy = _messages.EnumField('CreatePolicyValueValuesEnum', 1, default=u'CREATE_OR_ACQUIRE')
deletePolicy = _messages.EnumField('DeletePolicyValueValuesEnum', 2, default=u'DELETE')
deployment = _messages.StringField(3, required=True)
deploymentResource = _messages.MessageField('Deployment', 4)
preview = _messages.BooleanField(5, default=False)
project = _messages.StringField(6, required=True)
class DeploymentmanagerDeploymentsSetIamPolicyRequest(_messages.Message):
r"""A DeploymentmanagerDeploymentsSetIamPolicyRequest object.
Fields:
globalSetPolicyRequest: A GlobalSetPolicyRequest resource to be passed as
the request body.
project: Project ID for this request.
resource: Name or id of the resource for this request.
"""
globalSetPolicyRequest = _messages.MessageField('GlobalSetPolicyRequest', 1)
project = _messages.StringField(2, required=True)
resource = _messages.StringField(3, required=True)
class DeploymentmanagerDeploymentsStopRequest(_messages.Message):
r"""A DeploymentmanagerDeploymentsStopRequest object.
Fields:
deployment: The name of the deployment for this request.
deploymentsStopRequest: A DeploymentsStopRequest resource to be passed as
the request body.
project: The project ID for this request.
"""
deployment = _messages.StringField(1, required=True)
deploymentsStopRequest = _messages.MessageField('DeploymentsStopRequest', 2)
project = _messages.StringField(3, required=True)
class DeploymentmanagerDeploymentsTestIamPermissionsRequest(_messages.Message):
r"""A DeploymentmanagerDeploymentsTestIamPermissionsRequest object.
Fields:
project: Project ID for this request.
resource: Name or id of the resource for this request.
testPermissionsRequest: A TestPermissionsRequest resource to be passed as
the request body.
"""
project = _messages.StringField(1, required=True)
resource = _messages.StringField(2, required=True)
testPermissionsRequest = _messages.MessageField('TestPermissionsRequest', 3)
class DeploymentmanagerDeploymentsUpdateRequest(_messages.Message):
r"""A DeploymentmanagerDeploymentsUpdateRequest object.
Enums:
CreatePolicyValueValuesEnum: Sets the policy to use for creating new
resources.
DeletePolicyValueValuesEnum: Sets the policy to use for deleting
resources.
Fields:
createPolicy: Sets the policy to use for creating new resources.
deletePolicy: Sets the policy to use for deleting resources.
deployment: The name of the deployment for this request.
deploymentResource: A Deployment resource to be passed as the request
body.
preview: If set to true, updates the deployment and creates and updates
the "shell" resources but does not actually alter or instantiate these
resources. This allows you to preview what your deployment will look
like. You can use this intent to preview how an update would affect your
deployment. You must provide a target.config with a configuration if
this is set to true. After previewing a deployment, you can deploy your
resources by making a request with the update() or you can
cancelPreview() to remove the preview altogether. Note that the
deployment will still exist after you cancel the preview and you must
separately delete this deployment if you want to remove it.
project: The project ID for this request.
"""
class CreatePolicyValueValuesEnum(_messages.Enum):
r"""Sets the policy to use for creating new resources.
Values:
ACQUIRE: <no description>
CREATE: <no description>
CREATE_OR_ACQUIRE: <no description>
"""
ACQUIRE = 0
CREATE = 1
CREATE_OR_ACQUIRE = 2
class DeletePolicyValueValuesEnum(_messages.Enum):
r"""Sets the policy to use for deleting resources.
Values:
ABANDON: <no description>
DELETE: <no description>
"""
ABANDON = 0
DELETE = 1
createPolicy = _messages.EnumField('CreatePolicyValueValuesEnum', 1, default=u'CREATE_OR_ACQUIRE')
deletePolicy = _messages.EnumField('DeletePolicyValueValuesEnum', 2, default=u'DELETE')
deployment = _messages.StringField(3, required=True)
deploymentResource = _messages.MessageField('Deployment', 4)
preview = _messages.BooleanField(5, default=False)
project = _messages.StringField(6, required=True)
class DeploymentmanagerManifestsGetRequest(_messages.Message):
r"""A DeploymentmanagerManifestsGetRequest object.
Fields:
deployment: The name of the deployment for this request.
manifest: The name of the manifest for this request.
project: The project ID for this request.
"""
deployment = _messages.StringField(1, required=True)
manifest = _messages.StringField(2, required=True)
project = _messages.StringField(3, required=True)
class DeploymentmanagerManifestsListRequest(_messages.Message):
r"""A DeploymentmanagerManifestsListRequest object.
Fields:
deployment: The name of the deployment for this request.
filter: A filter expression that filters resources listed in the response.
The expression must specify the field name, a comparison operator, and
the value that you want to use for filtering. The value must be a
string, a number, or a boolean. The comparison operator must be either
=, !=, >, or <. For example, if you are filtering Compute Engine
instances, you can exclude instances named example-instance by
specifying name != example-instance. You can also filter nested fields.
For example, you could specify scheduling.automaticRestart = false to
include instances only if they are not scheduled for automatic restarts.
You can use filtering on nested fields to filter based on resource
labels. To filter on multiple expressions, provide each separate
expression within parentheses. For example, (scheduling.automaticRestart
= true) (cpuPlatform = "Intel Skylake"). By default, each expression is
an AND expression. However, you can include AND and OR expressions
explicitly. For example, (cpuPlatform = "Intel Skylake") OR (cpuPlatform
= "Intel Broadwell") AND (scheduling.automaticRestart = true).
maxResults: The maximum number of results per page that should be
returned. If the number of available results is larger than maxResults,
Compute Engine returns a nextPageToken that can be used to get the next
page of results in subsequent list requests. Acceptable values are 0 to
500, inclusive. (Default: 500)
orderBy: Sorts list results by a certain order. By default, results are
returned in alphanumerical order based on the resource name. You can
also sort results in descending order based on the creation timestamp
using orderBy="creationTimestamp desc". This sorts results based on the
creationTimestamp field in reverse chronological order (newest result
first). Use this to sort resources like operations so that the newest
operation is returned first. Currently, only sorting by name or
creationTimestamp desc is supported.
pageToken: Specifies a page token to use. Set pageToken to the
nextPageToken returned by a previous list request to get the next page
of results.
project: The project ID for this request.
"""
deployment = _messages.StringField(1, required=True)
filter = _messages.StringField(2)
maxResults = _messages.IntegerField(3, variant=_messages.Variant.UINT32, default=500)
orderBy = _messages.StringField(4)
pageToken = _messages.StringField(5)
project = _messages.StringField(6, required=True)
class DeploymentmanagerOperationsGetRequest(_messages.Message):
r"""A DeploymentmanagerOperationsGetRequest object.
Fields:
operation: The name of the operation for this request.
project: The project ID for this request.
"""
operation = _messages.StringField(1, required=True)
project = _messages.StringField(2, required=True)
class DeploymentmanagerOperationsListRequest(_messages.Message):
r"""A DeploymentmanagerOperationsListRequest object.
Fields:
filter: A filter expression that filters resources listed in the response.
The expression must specify the field name, a comparison operator, and
the value that you want to use for filtering. The value must be a
string, a number, or a boolean. The comparison operator must be either
=, !=, >, or <. For example, if you are filtering Compute Engine
instances, you can exclude instances named example-instance by
specifying name != example-instance. You can also filter nested fields.
For example, you could specify scheduling.automaticRestart = false to
include instances only if they are not scheduled for automatic restarts.
You can use filtering on nested fields to filter based on resource
labels. To filter on multiple expressions, provide each separate
expression within parentheses. For example, (scheduling.automaticRestart
= true) (cpuPlatform = "Intel Skylake"). By default, each expression is
an AND expression. However, you can include AND and OR expressions
explicitly. For example, (cpuPlatform = "Intel Skylake") OR (cpuPlatform
= "Intel Broadwell") AND (scheduling.automaticRestart = true).
maxResults: The maximum number of results per page that should be
returned. If the number of available results is larger than maxResults,
Compute Engine returns a nextPageToken that can be used to get the next
page of results in subsequent list requests. Acceptable values are 0 to
500, inclusive. (Default: 500)
orderBy: Sorts list results by a certain order. By default, results are
returned in alphanumerical order based on the resource name. You can
also sort results in descending order based on the creation timestamp
using orderBy="creationTimestamp desc". This sorts results based on the
creationTimestamp field in reverse chronological order (newest result
first). Use this to sort resources like operations so that the newest
operation is returned first. Currently, only sorting by name or
creationTimestamp desc is supported.
pageToken: Specifies a page token to use. Set pageToken to the
nextPageToken returned by a previous list request to get the next page
of results.
project: The project ID for this request.
"""
filter = _messages.StringField(1)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32, default=500)
orderBy = _messages.StringField(3)
pageToken = _messages.StringField(4)
project = _messages.StringField(5, required=True)
class DeploymentmanagerResourcesGetRequest(_messages.Message):
r"""A DeploymentmanagerResourcesGetRequest object.
Fields:
deployment: The name of the deployment for this request.
project: The project ID for this request.
resource: The name of the resource for this request.
"""
deployment = _messages.StringField(1, required=True)
project = _messages.StringField(2, required=True)
resource = _messages.StringField(3, required=True)
class DeploymentmanagerResourcesListRequest(_messages.Message):
r"""A DeploymentmanagerResourcesListRequest object.
Fields:
deployment: The name of the deployment for this request.
filter: A filter expression that filters resources listed in the response.
The expression must specify the field name, a comparison operator, and
the value that you want to use for filtering. The value must be a
string, a number, or a boolean. The comparison operator must be either
=, !=, >, or <. For example, if you are filtering Compute Engine
instances, you can exclude instances named example-instance by
specifying name != example-instance. You can also filter nested fields.
For example, you could specify scheduling.automaticRestart = false to
include instances only if they are not scheduled for automatic restarts.
You can use filtering on nested fields to filter based on resource
labels. To filter on multiple expressions, provide each separate
expression within parentheses. For example, (scheduling.automaticRestart
= true) (cpuPlatform = "Intel Skylake"). By default, each expression is
an AND expression. However, you can include AND and OR expressions
explicitly. For example, (cpuPlatform = "Intel Skylake") OR (cpuPlatform
= "Intel Broadwell") AND (scheduling.automaticRestart = true).
maxResults: The maximum number of results per page that should be
returned. If the number of available results is larger than maxResults,
Compute Engine returns a nextPageToken that can be used to get the next
page of results in subsequent list requests. Acceptable values are 0 to
500, inclusive. (Default: 500)
orderBy: Sorts list results by a certain order. By default, results are
returned in alphanumerical order based on the resource name. You can
also sort results in descending order based on the creation timestamp
using orderBy="creationTimestamp desc". This sorts results based on the
creationTimestamp field in reverse chronological order (newest result
first). Use this to sort resources like operations so that the newest
operation is returned first. Currently, only sorting by name or
creationTimestamp desc is supported.
pageToken: Specifies a page token to use. Set pageToken to the
nextPageToken returned by a previous list request to get the next page
of results.
project: The project ID for this request.
"""
deployment = _messages.StringField(1, required=True)
filter = _messages.StringField(2)
maxResults = _messages.IntegerField(3, variant=_messages.Variant.UINT32, default=500)
orderBy = _messages.StringField(4)
pageToken = _messages.StringField(5)
project = _messages.StringField(6, required=True)
class DeploymentmanagerTypeProvidersDeleteRequest(_messages.Message):
r"""A DeploymentmanagerTypeProvidersDeleteRequest object.
Fields:
project: The project ID for this request.
typeProvider: The name of the type provider for this request.
"""
project = _messages.StringField(1, required=True)
typeProvider = _messages.StringField(2, required=True)
class DeploymentmanagerTypeProvidersGetRequest(_messages.Message):
r"""A DeploymentmanagerTypeProvidersGetRequest object.
Fields:
project: The project ID for this request.
typeProvider: The name of the type provider for this request.
"""
project = _messages.StringField(1, required=True)
typeProvider = _messages.StringField(2, required=True)
class DeploymentmanagerTypeProvidersGetTypeRequest(_messages.Message):
r"""A DeploymentmanagerTypeProvidersGetTypeRequest object.
Fields:
project: The project ID for this request.
type: The name of the type provider type for this request.
typeProvider: The name of the type provider for this request.
"""
project = _messages.StringField(1, required=True)
type = _messages.StringField(2, required=True)
typeProvider = _messages.StringField(3, required=True)
class DeploymentmanagerTypeProvidersInsertRequest(_messages.Message):
r"""A DeploymentmanagerTypeProvidersInsertRequest object.
Fields:
project: The project ID for this request.
typeProvider: A TypeProvider resource to be passed as the request body.
"""
project = _messages.StringField(1, required=True)
typeProvider = _messages.MessageField('TypeProvider', 2)
class DeploymentmanagerTypeProvidersListRequest(_messages.Message):
r"""A DeploymentmanagerTypeProvidersListRequest object.
Fields:
filter: A filter expression that filters resources listed in the response.
The expression must specify the field name, a comparison operator, and
the value that you want to use for filtering. The value must be a
string, a number, or a boolean. The comparison operator must be either
=, !=, >, or <. For example, if you are filtering Compute Engine
instances, you can exclude instances named example-instance by
specifying name != example-instance. You can also filter nested fields.
For example, you could specify scheduling.automaticRestart = false to
include instances only if they are not scheduled for automatic restarts.
You can use filtering on nested fields to filter based on resource
labels. To filter on multiple expressions, provide each separate
expression within parentheses. For example, (scheduling.automaticRestart
= true) (cpuPlatform = "Intel Skylake"). By default, each expression is
an AND expression. However, you can include AND and OR expressions
explicitly. For example, (cpuPlatform = "Intel Skylake") OR (cpuPlatform
= "Intel Broadwell") AND (scheduling.automaticRestart = true).
maxResults: The maximum number of results per page that should be
returned. If the number of available results is larger than maxResults,
Compute Engine returns a nextPageToken that can be used to get the next
page of results in subsequent list requests. Acceptable values are 0 to
500, inclusive. (Default: 500)
orderBy: Sorts list results by a certain order. By default, results are
returned in alphanumerical order based on the resource name. You can
also sort results in descending order based on the creation timestamp
using orderBy="creationTimestamp desc". This sorts results based on the
creationTimestamp field in reverse chronological order (newest result
first). Use this to sort resources like operations so that the newest
operation is returned first. Currently, only sorting by name or
creationTimestamp desc is supported.
pageToken: Specifies a page token to use. Set pageToken to the
nextPageToken returned by a previous list request to get the next page
of results.
project: The project ID for this request.
"""
filter = _messages.StringField(1)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32, default=500)
orderBy = _messages.StringField(3)
pageToken = _messages.StringField(4)
project = _messages.StringField(5, required=True)
class DeploymentmanagerTypeProvidersListTypesRequest(_messages.Message):
r"""A DeploymentmanagerTypeProvidersListTypesRequest object.
Fields:
filter: A filter expression that filters resources listed in the response.
The expression must specify the field name, a comparison operator, and
the value that you want to use for filtering. The value must be a
string, a number, or a boolean. The comparison operator must be either
=, !=, >, or <. For example, if you are filtering Compute Engine
instances, you can exclude instances named example-instance by
specifying name != example-instance. You can also filter nested fields.
For example, you could specify scheduling.automaticRestart = false to
include instances only if they are not scheduled for automatic restarts.
You can use filtering on nested fields to filter based on resource
labels. To filter on multiple expressions, provide each separate
expression within parentheses. For example, (scheduling.automaticRestart
= true) (cpuPlatform = "Intel Skylake"). By default, each expression is
an AND expression. However, you can include AND and OR expressions
explicitly. For example, (cpuPlatform = "Intel Skylake") OR (cpuPlatform
= "Intel Broadwell") AND (scheduling.automaticRestart = true).
maxResults: The maximum number of results per page that should be
returned. If the number of available results is larger than maxResults,
Compute Engine returns a nextPageToken that can be used to get the next
page of results in subsequent list requests. Acceptable values are 0 to
500, inclusive. (Default: 500)
orderBy: Sorts list results by a certain order. By default, results are
returned in alphanumerical order based on the resource name. You can
also sort results in descending order based on the creation timestamp
using orderBy="creationTimestamp desc". This sorts results based on the
creationTimestamp field in reverse chronological order (newest result
first). Use this to sort resources like operations so that the newest
operation is returned first. Currently, only sorting by name or
creationTimestamp desc is supported.
pageToken: Specifies a page token to use. Set pageToken to the
nextPageToken returned by a previous list request to get the next page
of results.
project: The project ID for this request.
typeProvider: The name of the type provider for this request.
"""
filter = _messages.StringField(1)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32, default=500)
orderBy = _messages.StringField(3)
pageToken = _messages.StringField(4)
project = _messages.StringField(5, required=True)
typeProvider = _messages.StringField(6, required=True)
class DeploymentmanagerTypeProvidersPatchRequest(_messages.Message):
r"""A DeploymentmanagerTypeProvidersPatchRequest object.
Fields:
project: The project ID for this request.
typeProvider: The name of the type provider for this request.
typeProviderResource: A TypeProvider resource to be passed as the request
body.
"""
project = _messages.StringField(1, required=True)
typeProvider = _messages.StringField(2, required=True)
typeProviderResource = _messages.MessageField('TypeProvider', 3)
class DeploymentmanagerTypeProvidersUpdateRequest(_messages.Message):
r"""A DeploymentmanagerTypeProvidersUpdateRequest object.
Fields:
project: The project ID for this request.
typeProvider: The name of the type provider for this request.
typeProviderResource: A TypeProvider resource to be passed as the request
body.
"""
project = _messages.StringField(1, required=True)
typeProvider = _messages.StringField(2, required=True)
typeProviderResource = _messages.MessageField('TypeProvider', 3)
class DeploymentmanagerTypesListRequest(_messages.Message):
r"""A DeploymentmanagerTypesListRequest object.
Fields:
filter: A filter expression that filters resources listed in the response.
The expression must specify the field name, a comparison operator, and
the value that you want to use for filtering. The value must be a
string, a number, or a boolean. The comparison operator must be either
=, !=, >, or <. For example, if you are filtering Compute Engine
instances, you can exclude instances named example-instance by
specifying name != example-instance. You can also filter nested fields.
For example, you could specify scheduling.automaticRestart = false to
include instances only if they are not scheduled for automatic restarts.
You can use filtering on nested fields to filter based on resource
labels. To filter on multiple expressions, provide each separate
expression within parentheses. For example, (scheduling.automaticRestart
= true) (cpuPlatform = "Intel Skylake"). By default, each expression is
an AND expression. However, you can include AND and OR expressions
explicitly. For example, (cpuPlatform = "Intel Skylake") OR (cpuPlatform
= "Intel Broadwell") AND (scheduling.automaticRestart = true).
maxResults: The maximum number of results per page that should be
returned. If the number of available results is larger than maxResults,
Compute Engine returns a nextPageToken that can be used to get the next
page of results in subsequent list requests. Acceptable values are 0 to
500, inclusive. (Default: 500)
orderBy: Sorts list results by a certain order. By default, results are
returned in alphanumerical order based on the resource name. You can
also sort results in descending order based on the creation timestamp
using orderBy="creationTimestamp desc". This sorts results based on the
creationTimestamp field in reverse chronological order (newest result
first). Use this to sort resources like operations so that the newest
operation is returned first. Currently, only sorting by name or
creationTimestamp desc is supported.
pageToken: Specifies a page token to use. Set pageToken to the
nextPageToken returned by a previous list request to get the next page
of results.
project: The project ID for this request.
"""
filter = _messages.StringField(1)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32, default=500)
orderBy = _messages.StringField(3)
pageToken = _messages.StringField(4)
project = _messages.StringField(5, required=True)
class DeploymentsCancelPreviewRequest(_messages.Message):
r"""DeploymentsCancelPreviewRequest message type.
Fields:
fingerprint: Specifies a fingerprint for cancelPreview() requests. A
fingerprint is a randomly generated value that must be provided in
cancelPreview() requests to perform optimistic locking. This ensures
optimistic concurrency so that the deployment does not have conflicting
requests (e.g. if someone attempts to make a new update request while
another user attempts to cancel a preview, this would prevent one of the
requests). The fingerprint is initially generated by Deployment Manager
and changes after every request to modify a deployment. To get the
latest fingerprint value, perform a get() request on the deployment.
"""
fingerprint = _messages.BytesField(1)
class DeploymentsListResponse(_messages.Message):
r"""A response containing a partial list of deployments and a page token
used to build the next request if the request has been truncated.
Fields:
deployments: Output only. The deployments contained in this response.
nextPageToken: Output only. A token used to continue a truncated list
request.
"""
deployments = _messages.MessageField('Deployment', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class DeploymentsStopRequest(_messages.Message):
r"""DeploymentsStopRequest message type.
Fields:
fingerprint: Specifies a fingerprint for stop() requests. A fingerprint is
a randomly generated value that must be provided in stop() requests to
perform optimistic locking. This ensures optimistic concurrency so that
the deployment does not have conflicting requests (e.g. if someone
attempts to make a new update request while another user attempts to
stop an ongoing update request, this would prevent a collision). The
fingerprint is initially generated by Deployment Manager and changes
after every request to modify a deployment. To get the latest
fingerprint value, perform a get() request on the deployment.
"""
fingerprint = _messages.BytesField(1)
class Diagnostic(_messages.Message):
r"""Diagnostic message type.
Fields:
field: JsonPath expression on the resource that if non empty, indicates
that this field needs to be extracted as a diagnostic.
level: Level to record this diagnostic.
"""
field = _messages.StringField(1)
level = _messages.StringField(2)
class Expr(_messages.Message):
r"""Represents a textual expression in the Common Expression Language (CEL)
syntax. CEL is a C-like expression language. The syntax and semantics of CEL
are documented at https://github.com/google/cel-spec. Example (Comparison):
title: "Summary size limit" description: "Determines if a summary is less
than 100 chars" expression: "document.summary.size() < 100" Example
(Equality): title: "Requestor is owner" description: "Determines if
requestor is the document owner" expression: "document.owner ==
request.auth.claims.email" Example (Logic): title: "Public documents"
description: "Determine whether the document should be publicly visible"
expression: "document.type != 'private' && document.type != 'internal'"
Example (Data Manipulation): title: "Notification string" description:
"Create a notification string with a timestamp." expression: "'New message
received at ' + string(document.create_time)" The exact variables and
functions that may be referenced within an expression are determined by the
service that evaluates it. See the service documentation for additional
information.
Fields:
description: Optional. Description of the expression. This is a longer
text which describes the expression, e.g. when hovered over it in a UI.
expression: Textual representation of an expression in Common Expression
Language syntax.
location: Optional. String indicating the location of the expression for
error reporting, e.g. a file name and a position in the file.
title: Optional. Title for the expression, i.e. a short string describing
its purpose. This can be used e.g. in UIs which allow to enter the
expression.
"""
description = _messages.StringField(1)
expression = _messages.StringField(2)
location = _messages.StringField(3)
title = _messages.StringField(4)
class GlobalSetPolicyRequest(_messages.Message):
r"""A GlobalSetPolicyRequest object.
Fields:
bindings: Flatten Policy to create a backward compatible wire-format.
Deprecated. Use 'policy' to specify bindings.
etag: Flatten Policy to create a backward compatible wire-format.
Deprecated. Use 'policy' to specify the etag.
policy: REQUIRED: The complete policy to be applied to the 'resource'. The
size of the policy is limited to a few 10s of KB. An empty policy is in
general a valid policy but certain services (like Projects) might reject
them.
"""
bindings = _messages.MessageField('Binding', 1, repeated=True)
etag = _messages.BytesField(2)
policy = _messages.MessageField('Policy', 3)
class ImportFile(_messages.Message):
r"""ImportFile message type.
Fields:
content: The contents of the file.
name: The name of the file.
"""
content = _messages.StringField(1)
name = _messages.StringField(2)
class InputMapping(_messages.Message):
r"""InputMapping creates a 'virtual' property that will be injected into the
properties before sending the request to the underlying API.
Fields:
fieldName: The name of the field that is going to be injected.
location: The location where this mapping applies.
methodMatch: Regex to evaluate on method to decide if input applies.
value: A jsonPath expression to select an element.
"""
fieldName = _messages.StringField(1)
location = _messages.StringField(2)
methodMatch = _messages.StringField(3)
value = _messages.StringField(4)
class LogConfig(_messages.Message):
r"""Specifies what kind of log the caller must write
Fields:
cloudAudit: Cloud audit options.
counter: Counter options.
dataAccess: Data access options.
"""
cloudAudit = _messages.MessageField('LogConfigCloudAuditOptions', 1)
counter = _messages.MessageField('LogConfigCounterOptions', 2)
dataAccess = _messages.MessageField('LogConfigDataAccessOptions', 3)
class LogConfigCloudAuditOptions(_messages.Message):
r"""Write a Cloud Audit log
Fields:
authorizationLoggingOptions: Information used by the Cloud Audit Logging
pipeline.
logName: The log_name to populate in the Cloud Audit Record.
"""
authorizationLoggingOptions = _messages.MessageField('AuthorizationLoggingOptions', 1)
logName = _messages.StringField(2)
class LogConfigCounterOptions(_messages.Message):
r"""Increment a streamz counter with the specified metric and field names.
Metric names should start with a '/', generally be lowercase-only, and end
in "_count". Field names should not contain an initial slash. The actual
exported metric names will have "/iam/policy" prepended. Field names
correspond to IAM request parameters and field values are their respective
values. Supported field names: - "authority", which is "[token]" if
IAMContext.token is present, otherwise the value of
IAMContext.authority_selector if present, and otherwise a representation of
IAMContext.principal; or - "iam_principal", a representation of
IAMContext.principal even if a token or authority selector is present; or -
"" (empty string), resulting in a counter with no fields. Examples: counter
{ metric: "/debug_access_count" field: "iam_principal" } ==> increment
counter /iam/policy/debug_access_count {iam_principal=[value of
IAMContext.principal]} TODO(b/141846426): Consider supporting "authority"
and "iam_principal" fields in the same counter.
Fields:
customFields: Custom fields.
field: The field value to attribute.
metric: The metric to update.
"""
customFields = _messages.MessageField('LogConfigCounterOptionsCustomField', 1, repeated=True)
field = _messages.StringField(2)
metric = _messages.StringField(3)
class LogConfigCounterOptionsCustomField(_messages.Message):
r"""Custom fields. These can be used to create a counter with arbitrary
field/value pairs. See: go/rpcsp-custom-fields.
Fields:
name: Name is the field name.
value: Value is the field value. It is important that in contrast to the
CounterOptions.field, the value here is a constant that is not derived
from the IAMContext.
"""
name = _messages.StringField(1)
value = _messages.StringField(2)
class LogConfigDataAccessOptions(_messages.Message):
r"""Write a Data Access (Gin) log
Fields:
logMode: Whether Gin logging should happen in a fail-closed manner at the
caller. This is relevant only in the LocalIAM implementation, for now.
"""
logMode = _messages.StringField(1)
class Manifest(_messages.Message):
r"""Manifest message type.
Fields:
config: Output only. The YAML configuration for this manifest.
expandedConfig: Output only. The fully-expanded configuration file,
including any templates and references.
id: A string attribute.
imports: Output only. The imported files for this manifest.
insertTime: Output only. Creation timestamp in RFC3339 text format.
layout: Output only. The YAML layout for this manifest.
name: Output only. The name of the manifest.
selfLink: Output only. Self link for the manifest.
"""
config = _messages.MessageField('ConfigFile', 1)
expandedConfig = _messages.StringField(2)
id = _messages.IntegerField(3, variant=_messages.Variant.UINT64)
imports = _messages.MessageField('ImportFile', 4, repeated=True)
insertTime = _messages.StringField(5)
layout = _messages.StringField(6)
name = _messages.StringField(7)
selfLink = _messages.StringField(8)
class ManifestsListResponse(_messages.Message):
r"""A response containing a partial list of manifests and a page token used
to build the next request if the request has been truncated.
Fields:
manifests: Output only. Manifests contained in this list response.
nextPageToken: Output only. A token used to continue a truncated list
request.
"""
manifests = _messages.MessageField('Manifest', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class Operation(_messages.Message):
r"""Represents an Operation resource. Google Compute Engine has three
Operation resources: *
[Global](/compute/docs/reference/rest/latest/globalOperations) *
[Regional](/compute/docs/reference/rest/latest/regionOperations) *
[Zonal](/compute/docs/reference/rest/latest/zoneOperations) You can use an
operation resource to manage asynchronous API requests. For more
information, read Handling API responses. Operations can be global,
regional or zonal. - For global operations, use the globalOperations
resource. - For regional operations, use the regionOperations resource. -
For zonal operations, use the zonalOperations resource. For more
information, read Global, Regional, and Zonal Resources. (== resource_for
{$api_version}.globalOperations ==) (== resource_for
{$api_version}.regionOperations ==) (== resource_for
{$api_version}.zoneOperations ==)
Messages:
ErrorValue: [Output Only] If errors are generated during processing of the
operation, this field will be populated.
WarningsValueListEntry: A WarningsValueListEntry object.
Fields:
clientOperationId: [Output Only] The value of `requestId` if you provided
it in the request. Not present otherwise.
creationTimestamp: [Deprecated] This field is deprecated.
description: [Output Only] A textual description of the operation, which
is set when the operation is created.
endTime: [Output Only] The time that this operation was completed. This
value is in RFC3339 text format.
error: [Output Only] If errors are generated during processing of the
operation, this field will be populated.
httpErrorMessage: [Output Only] If the operation fails, this field
contains the HTTP error message that was returned, such as NOT FOUND.
httpErrorStatusCode: [Output Only] If the operation fails, this field
contains the HTTP error status code that was returned. For example, a
404 means the resource was not found.
id: [Output Only] The unique identifier for the operation. This identifier
is defined by the server.
insertTime: [Output Only] The time that this operation was requested. This
value is in RFC3339 text format.
kind: [Output Only] Type of the resource. Always compute#operation for
Operation resources.
name: [Output Only] Name of the operation.
operationType: [Output Only] The type of operation, such as insert,
update, or delete, and so on.
progress: [Output Only] An optional progress indicator that ranges from 0
to 100. There is no requirement that this be linear or support any
granularity of operations. This should not be used to guess when the
operation will be complete. This number should monotonically increase as
the operation progresses.
region: [Output Only] The URL of the region where the operation resides.
Only applicable when performing regional operations.
selfLink: [Output Only] Server-defined URL for the resource.
startTime: [Output Only] The time that this operation was started by the
server. This value is in RFC3339 text format.
status: [Output Only] The status of the operation, which can be one of the
following: PENDING, RUNNING, or DONE.
statusMessage: [Output Only] An optional textual description of the
current status of the operation.
targetId: [Output Only] The unique target ID, which identifies a specific
incarnation of the target resource.
targetLink: [Output Only] The URL of the resource that the operation
modifies. For operations related to creating a snapshot, this points to
the persistent disk that the snapshot was created from.
user: [Output Only] User who requested the operation, for example:
[email protected].
warnings: [Output Only] If warning messages are generated during
processing of the operation, this field will be populated.
zone: [Output Only] The URL of the zone where the operation resides. Only
applicable when performing per-zone operations.
"""
class ErrorValue(_messages.Message):
r"""[Output Only] If errors are generated during processing of the
operation, this field will be populated.
Messages:
ErrorsValueListEntry: A ErrorsValueListEntry object.
Fields:
errors: [Output Only] The array of errors encountered while processing
this operation.
"""
class ErrorsValueListEntry(_messages.Message):
r"""A ErrorsValueListEntry object.
Fields:
code: [Output Only] The error type identifier for this error.
location: [Output Only] Indicates the field in the request that caused
the error. This property is optional.
message: [Output Only] An optional, human-readable error message.
"""
code = _messages.StringField(1)
location = _messages.StringField(2)
message = _messages.StringField(3)
errors = _messages.MessageField('ErrorsValueListEntry', 1, repeated=True)
class WarningsValueListEntry(_messages.Message):
r"""A WarningsValueListEntry object.
Messages:
DataValueListEntry: A DataValueListEntry object.
Fields:
code: [Output Only] A warning code, if applicable. For example, Compute
Engine returns NO_RESULTS_ON_PAGE if there are no results in the
response.
data: [Output Only] Metadata about this warning in key: value format.
For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" }
message: [Output Only] A human-readable description of the warning code.
"""
class DataValueListEntry(_messages.Message):
r"""A DataValueListEntry object.
Fields:
key: [Output Only] A key that provides more detail on the warning
being returned. For example, for warnings where there are no results
in a list request for a particular zone, this key might be scope and
the key value might be the zone name. Other examples might be a key
indicating a deprecated resource and a suggested replacement, or a
warning about invalid network settings (for example, if an instance
attempts to perform IP forwarding but is not enabled for IP
forwarding).
value: [Output Only] A warning data value corresponding to the key.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
code = _messages.StringField(1)
data = _messages.MessageField('DataValueListEntry', 2, repeated=True)
message = _messages.StringField(3)
clientOperationId = _messages.StringField(1)
creationTimestamp = _messages.StringField(2)
description = _messages.StringField(3)
endTime = _messages.StringField(4)
error = _messages.MessageField('ErrorValue', 5)
httpErrorMessage = _messages.StringField(6)
httpErrorStatusCode = _messages.IntegerField(7, variant=_messages.Variant.INT32)
id = _messages.IntegerField(8, variant=_messages.Variant.UINT64)
insertTime = _messages.StringField(9)
kind = _messages.StringField(10, default=u'deploymentmanager#operation')
name = _messages.StringField(11)
operationType = _messages.StringField(12)
progress = _messages.IntegerField(13, variant=_messages.Variant.INT32)
region = _messages.StringField(14)
selfLink = _messages.StringField(15)
startTime = _messages.StringField(16)
status = _messages.StringField(17)
statusMessage = _messages.StringField(18)
targetId = _messages.IntegerField(19, variant=_messages.Variant.UINT64)
targetLink = _messages.StringField(20)
user = _messages.StringField(21)
warnings = _messages.MessageField('WarningsValueListEntry', 22, repeated=True)
zone = _messages.StringField(23)
class OperationsListResponse(_messages.Message):
r"""A response containing a partial list of operations and a page token used
to build the next request if the request has been truncated.
Fields:
nextPageToken: Output only. A token used to continue a truncated list
request.
operations: Output only. Operations contained in this list response.
"""
nextPageToken = _messages.StringField(1)
operations = _messages.MessageField('Operation', 2, repeated=True)
class Options(_messages.Message):
r"""Options allows customized resource handling by Deployment Manager.
Fields:
asyncOptions: Options regarding how to thread async requests.
inputMappings: The mappings that apply for requests.
validationOptions: Options for how to validate and process properties on a
resource.
virtualProperties: Additional properties block described as a jsonSchema,
these properties will never be part of the json payload, but they can be
consumed by InputMappings, this must be a valid json schema draft-04.
The properties specified here will be decouple in a different section.
This schema will be merged to the schema validation, and properties here
will be extracted From the payload and consumed explicitly by
InputMappings. ex: field1: type: string field2: type: number
"""
asyncOptions = _messages.MessageField('AsyncOptions', 1, repeated=True)
inputMappings = _messages.MessageField('InputMapping', 2, repeated=True)
validationOptions = _messages.MessageField('ValidationOptions', 3)
virtualProperties = _messages.StringField(4)
class Policy(_messages.Message):
r"""An Identity and Access Management (IAM) policy, which specifies access
controls for Google Cloud resources. A `Policy` is a collection of
`bindings`. A `binding` binds one or more `members` to a single `role`.
Members can be user accounts, service accounts, Google groups, and domains
(such as G Suite). A `role` is a named list of permissions; each `role` can
be an IAM predefined role or a user-created custom role. Optionally, a
`binding` can specify a `condition`, which is a logical expression that
allows access to a resource only if the expression evaluates to `true`. A
condition can add constraints based on attributes of the request, the
resource, or both. **JSON example:** { "bindings": [ { "role":
"roles/resourcemanager.organizationAdmin", "members": [
"user:[email protected]", "group:[email protected]", "domain:google.com",
"serviceAccount:[email protected]" ] }, { "role":
"roles/resourcemanager.organizationViewer", "members":
["user:[email protected]"], "condition": { "title": "expirable access",
"description": "Does not grant access after Sep 2020", "expression":
"request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag":
"BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - members: -
user:[email protected] - group:[email protected] - domain:google.com -
serviceAccount:[email protected] role:
roles/resourcemanager.organizationAdmin - members: - user:[email protected]
role: roles/resourcemanager.organizationViewer condition: title: expirable
access description: Does not grant access after Sep 2020 expression:
request.time < timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= -
version: 3 For a description of IAM and its features, see the [IAM
documentation](https://cloud.google.com/iam/docs/).
Fields:
auditConfigs: Specifies cloud audit logging configuration for this policy.
bindings: Associates a list of `members` to a `role`. Optionally, may
specify a `condition` that determines how and when the `bindings` are
applied. Each of the `bindings` must contain at least one member.
etag: `etag` is used for optimistic concurrency control as a way to help
prevent simultaneous updates of a policy from overwriting each other. It
is strongly suggested that systems make use of the `etag` in the read-
modify-write cycle to perform policy updates in order to avoid race
conditions: An `etag` is returned in the response to `getIamPolicy`, and
systems are expected to put that etag in the request to `setIamPolicy`
to ensure that their change will be applied to the same version of the
policy. **Important:** If you use IAM Conditions, you must include the
`etag` field whenever you call `setIamPolicy`. If you omit this field,
then IAM allows you to overwrite a version `3` policy with a version `1`
policy, and all of the conditions in the version `3` policy are lost.
iamOwned:
rules: If more than one rule is specified, the rules are applied in the
following manner: - All matching LOG rules are always applied. - If any
DENY/DENY_WITH_LOG rule matches, permission is denied. Logging will be
applied if one or more matching rule requires logging. - Otherwise, if
any ALLOW/ALLOW_WITH_LOG rule matches, permission is granted. Logging
will be applied if one or more matching rule requires logging. -
Otherwise, if no rule applies, permission is denied.
version: Specifies the format of the policy. Valid values are `0`, `1`,
and `3`. Requests that specify an invalid value are rejected. Any
operation that affects conditional role bindings must specify version
`3`. This requirement applies to the following operations: * Getting a
policy that includes a conditional role binding * Adding a conditional
role binding to a policy * Changing a conditional role binding in a
policy * Removing any role binding, with or without a condition, from a
policy that includes conditions **Important:** If you use IAM
Conditions, you must include the `etag` field whenever you call
`setIamPolicy`. If you omit this field, then IAM allows you to overwrite
a version `3` policy with a version `1` policy, and all of the
conditions in the version `3` policy are lost. If a policy does not
include any conditions, operations on that policy may specify any valid
version or leave the field unset.
"""
auditConfigs = _messages.MessageField('AuditConfig', 1, repeated=True)
bindings = _messages.MessageField('Binding', 2, repeated=True)
etag = _messages.BytesField(3)
iamOwned = _messages.BooleanField(4)
rules = _messages.MessageField('Rule', 5, repeated=True)
version = _messages.IntegerField(6, variant=_messages.Variant.INT32)
class PollingOptions(_messages.Message):
r"""PollingOptions message type.
Fields:
diagnostics: An array of diagnostics to be collected by Deployment
Manager, these diagnostics will be displayed to the user.
failCondition: JsonPath expression that determines if the request failed.
finishCondition: JsonPath expression that determines if the request is
completed.
pollingLink: JsonPath expression that evaluates to string, it indicates
where to poll.
targetLink: JsonPath expression, after polling is completed, indicates
where to fetch the resource.
"""
diagnostics = _messages.MessageField('Diagnostic', 1, repeated=True)
failCondition = _messages.StringField(2)
finishCondition = _messages.StringField(3)
pollingLink = _messages.StringField(4)
targetLink = _messages.StringField(5)
class Resource(_messages.Message):
r"""Resource message type.
Messages:
WarningsValueListEntry: A WarningsValueListEntry object.
Fields:
accessControl: The Access Control Policy set on this resource.
finalProperties: Output only. The evaluated properties of the resource
with references expanded. Returned as serialized YAML.
id: A string attribute.
insertTime: Output only. Creation timestamp in RFC3339 text format.
manifest: Output only. URL of the manifest representing the current
configuration of this resource.
name: Output only. The name of the resource as it appears in the YAML
config.
properties: Output only. The current properties of the resource before any
references have been filled in. Returned as serialized YAML.
type: Output only. The type of the resource, for example
compute.v1.instance, or cloudfunctions.v1beta1.function.
update: Output only. If Deployment Manager is currently updating or
previewing an update to this resource, the updated configuration appears
here.
updateTime: Output only. Update timestamp in RFC3339 text format.
url: Output only. The URL of the actual resource.
warnings: Output only. If warning messages are generated during processing
of this resource, this field will be populated.
"""
class WarningsValueListEntry(_messages.Message):
r"""A WarningsValueListEntry object.
Messages:
DataValueListEntry: A DataValueListEntry object.
Fields:
code: [Output Only] A warning code, if applicable. For example, Compute
Engine returns NO_RESULTS_ON_PAGE if there are no results in the
response.
data: [Output Only] Metadata about this warning in key: value format.
For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" }
message: [Output Only] A human-readable description of the warning code.
"""
class DataValueListEntry(_messages.Message):
r"""A DataValueListEntry object.
Fields:
key: [Output Only] A key that provides more detail on the warning
being returned. For example, for warnings where there are no results
in a list request for a particular zone, this key might be scope and
the key value might be the zone name. Other examples might be a key
indicating a deprecated resource and a suggested replacement, or a
warning about invalid network settings (for example, if an instance
attempts to perform IP forwarding but is not enabled for IP
forwarding).
value: [Output Only] A warning data value corresponding to the key.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
code = _messages.StringField(1)
data = _messages.MessageField('DataValueListEntry', 2, repeated=True)
message = _messages.StringField(3)
accessControl = _messages.MessageField('ResourceAccessControl', 1)
finalProperties = _messages.StringField(2)
id = _messages.IntegerField(3, variant=_messages.Variant.UINT64)
insertTime = _messages.StringField(4)
manifest = _messages.StringField(5)
name = _messages.StringField(6)
properties = _messages.StringField(7)
type = _messages.StringField(8)
update = _messages.MessageField('ResourceUpdate', 9)
updateTime = _messages.StringField(10)
url = _messages.StringField(11)
warnings = _messages.MessageField('WarningsValueListEntry', 12, repeated=True)
class ResourceAccessControl(_messages.Message):
r"""The access controls set on the resource.
Fields:
gcpIamPolicy: The GCP IAM Policy to set on the resource.
"""
gcpIamPolicy = _messages.StringField(1)
class ResourceUpdate(_messages.Message):
r"""ResourceUpdate message type.
Messages:
ErrorValue: Output only. If errors are generated during update of the
resource, this field will be populated.
WarningsValueListEntry: A WarningsValueListEntry object.
Fields:
accessControl: The Access Control Policy to set on this resource after
updating the resource itself.
error: Output only. If errors are generated during update of the resource,
this field will be populated.
finalProperties: Output only. The expanded properties of the resource with
reference values expanded. Returned as serialized YAML.
intent: Output only. The intent of the resource: PREVIEW, UPDATE, or
CANCEL.
manifest: Output only. URL of the manifest representing the update
configuration of this resource.
properties: Output only. The set of updated properties for this resource,
before references are expanded. Returned as serialized YAML.
state: Output only. The state of the resource.
warnings: Output only. If warning messages are generated during processing
of this resource, this field will be populated.
"""
class ErrorValue(_messages.Message):
r"""Output only. If errors are generated during update of the resource,
this field will be populated.
Messages:
ErrorsValueListEntry: A ErrorsValueListEntry object.
Fields:
errors: [Output Only] The array of errors encountered while processing
this operation.
"""
class ErrorsValueListEntry(_messages.Message):
r"""A ErrorsValueListEntry object.
Fields:
code: [Output Only] The error type identifier for this error.
location: [Output Only] Indicates the field in the request that caused
the error. This property is optional.
message: [Output Only] An optional, human-readable error message.
"""
code = _messages.StringField(1)
location = _messages.StringField(2)
message = _messages.StringField(3)
errors = _messages.MessageField('ErrorsValueListEntry', 1, repeated=True)
class WarningsValueListEntry(_messages.Message):
r"""A WarningsValueListEntry object.
Messages:
DataValueListEntry: A DataValueListEntry object.
Fields:
code: [Output Only] A warning code, if applicable. For example, Compute
Engine returns NO_RESULTS_ON_PAGE if there are no results in the
response.
data: [Output Only] Metadata about this warning in key: value format.
For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" }
message: [Output Only] A human-readable description of the warning code.
"""
class DataValueListEntry(_messages.Message):
r"""A DataValueListEntry object.
Fields:
key: [Output Only] A key that provides more detail on the warning
being returned. For example, for warnings where there are no results
in a list request for a particular zone, this key might be scope and
the key value might be the zone name. Other examples might be a key
indicating a deprecated resource and a suggested replacement, or a
warning about invalid network settings (for example, if an instance
attempts to perform IP forwarding but is not enabled for IP
forwarding).
value: [Output Only] A warning data value corresponding to the key.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
code = _messages.StringField(1)
data = _messages.MessageField('DataValueListEntry', 2, repeated=True)
message = _messages.StringField(3)
accessControl = _messages.MessageField('ResourceAccessControl', 1)
error = _messages.MessageField('ErrorValue', 2)
finalProperties = _messages.StringField(3)
intent = _messages.StringField(4)
manifest = _messages.StringField(5)
properties = _messages.StringField(6)
state = _messages.StringField(7)
warnings = _messages.MessageField('WarningsValueListEntry', 8, repeated=True)
class ResourcesListResponse(_messages.Message):
r"""A response containing a partial list of resources and a page token used
to build the next request if the request has been truncated.
Fields:
nextPageToken: A token used to continue a truncated list request.
resources: Resources contained in this list response.
"""
nextPageToken = _messages.StringField(1)
resources = _messages.MessageField('Resource', 2, repeated=True)
class Rule(_messages.Message):
r"""A rule to be applied in a Policy.
Fields:
action: Required
conditions: Additional restrictions that must be met. All conditions must
pass for the rule to match.
description: Human-readable description of the rule.
ins: If one or more 'in' clauses are specified, the rule matches if the
PRINCIPAL/AUTHORITY_SELECTOR is in at least one of these entries.
logConfigs: The config returned to callers of tech.iam.IAM.CheckPolicy for
any entries that match the LOG action.
notIns: If one or more 'not_in' clauses are specified, the rule matches if
the PRINCIPAL/AUTHORITY_SELECTOR is in none of the entries.
permissions: A permission is a string of form '..' (e.g.,
'storage.buckets.list'). A value of '*' matches all permissions, and a
verb part of '*' (e.g., 'storage.buckets.*') matches all verbs.
"""
action = _messages.StringField(1)
conditions = _messages.MessageField('Condition', 2, repeated=True)
description = _messages.StringField(3)
ins = _messages.StringField(4, repeated=True)
logConfigs = _messages.MessageField('LogConfig', 5, repeated=True)
notIns = _messages.StringField(6, repeated=True)
permissions = _messages.StringField(7, repeated=True)
class ServiceAccount(_messages.Message):
r"""Service Account used as a credential.
Fields:
email: The IAM service account email address like
[email protected]
"""
email = _messages.StringField(1)
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
AltValueValuesEnum: Data format for the response.
Fields:
alt: Data format for the response.
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: An opaque string that represents a user for quota purposes.
Must not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
userIp: Deprecated. Please use quotaUser instead.
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for the response.
Values:
json: Responses with Content-Type of application/json
"""
json = 0
alt = _messages.EnumField('AltValueValuesEnum', 1, default=u'json')
fields = _messages.StringField(2)
key = _messages.StringField(3)
oauth_token = _messages.StringField(4)
prettyPrint = _messages.BooleanField(5, default=True)
quotaUser = _messages.StringField(6)
trace = _messages.StringField(7)
userIp = _messages.StringField(8)
class TargetConfiguration(_messages.Message):
r"""TargetConfiguration message type.
Fields:
config: The configuration to use for this deployment.
imports: Specifies any files to import for this configuration. This can be
used to import templates or other files. For example, you might import a
text file in order to use the file in a template.
"""
config = _messages.MessageField('ConfigFile', 1)
imports = _messages.MessageField('ImportFile', 2, repeated=True)
class TemplateContents(_messages.Message):
r"""Files that make up the template contents of a template type.
Fields:
imports: Import files referenced by the main template.
interpreter: Which interpreter (python or jinja) should be used during
expansion.
mainTemplate: The filename of the mainTemplate
schema: The contents of the template schema.
template: The contents of the main template file.
"""
imports = _messages.MessageField('ImportFile', 1, repeated=True)
interpreter = _messages.StringField(2)
mainTemplate = _messages.StringField(3)
schema = _messages.StringField(4)
template = _messages.StringField(5)
class TestPermissionsRequest(_messages.Message):
r"""A TestPermissionsRequest object.
Fields:
permissions: The set of permissions to check for the 'resource'.
Permissions with wildcards (such as '*' or 'storage.*') are not allowed.
"""
permissions = _messages.StringField(1, repeated=True)
class TestPermissionsResponse(_messages.Message):
r"""A TestPermissionsResponse object.
Fields:
permissions: A subset of `TestPermissionsRequest.permissions` that the
caller is allowed.
"""
permissions = _messages.StringField(1, repeated=True)
class Type(_messages.Message):
r"""A resource type supported by Deployment Manager.
Fields:
base: Base Type (configurable service) that backs this Type.
description: An optional textual description of the resource; provided by
the client when the resource is created.
id: A string attribute.
insertTime: Output only. Creation timestamp in RFC3339 text format.
labels: Map of labels; provided by the client when the resource is created
or updated. Specifically: Label keys must be between 1 and 63 characters
long and must conform to the following regular expression:
[a-z]([-a-z0-9]*[a-z0-9])? Label values must be between 0 and 63
characters long and must conform to the regular expression
([a-z]([-a-z0-9]*[a-z0-9])?)?
name: Name of the type.
operation: Output only. The Operation that most recently ran, or is
currently running, on this type.
selfLink: Output only. Server defined URL for the resource.
"""
base = _messages.MessageField('BaseType', 1)
description = _messages.StringField(2)
id = _messages.IntegerField(3, variant=_messages.Variant.UINT64)
insertTime = _messages.StringField(4)
labels = _messages.MessageField('TypeLabelEntry', 5, repeated=True)
name = _messages.StringField(6)
operation = _messages.MessageField('Operation', 7)
selfLink = _messages.StringField(8)
class TypeInfo(_messages.Message):
r"""Type Information. Contains detailed information about a composite type,
base type, or base type with specific collection.
Fields:
description: The description of the type.
documentationLink: For swagger 2.0 externalDocs field will be used. For
swagger 1.2 this field will be empty.
kind: Output only. Type of the output. Always deploymentManager#TypeInfo
for TypeInfo.
name: The base type or composite type name.
schema: For base types with a collection, we return a schema and
documentation link For template types, we return only a schema
selfLink: Output only. Self link for the type provider.
title: The title on the API descriptor URL provided.
"""
description = _messages.StringField(1)
documentationLink = _messages.StringField(2)
kind = _messages.StringField(3)
name = _messages.StringField(4)
schema = _messages.MessageField('TypeInfoSchemaInfo', 5)
selfLink = _messages.StringField(6)
title = _messages.StringField(7)
class TypeInfoSchemaInfo(_messages.Message):
r"""TypeInfoSchemaInfo message type.
Fields:
input: The properties that this composite type or base type collection
accept as input, represented as a json blob, format is: JSON Schema
Draft V4
output: The properties that this composite type or base type collection
exposes as output, these properties can be used for references,
represented as json blob, format is: JSON Schema Draft V4
"""
input = _messages.StringField(1)
output = _messages.StringField(2)
class TypeLabelEntry(_messages.Message):
r"""A TypeLabelEntry object.
Fields:
key: A string attribute.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
class TypeProvider(_messages.Message):
r"""A type provider that describes a service-backed Type.
Fields:
collectionOverrides: Allows resource handling overrides for specific
collections
credential: Credential used when interacting with this type.
customCertificateAuthorityRoots: List of up to 2 custom certificate
authority roots to use for TLS authentication when making calls on
behalf of this type provider. If set, TLS authentication will
exclusively use these roots instead of relying on publicly trusted
certificate authorities when validating TLS certificate authenticity.
The certificates must be in base64-encoded PEM format. The maximum size
of each certificate must not exceed 10KB.
description: An optional textual description of the resource; provided by
the client when the resource is created.
descriptorUrl: Descriptor Url for the this type provider.
id: Output only. Unique identifier for the resource defined by the server.
insertTime: Output only. Creation timestamp in RFC3339 text format.
labels: Map of labels; provided by the client when the resource is created
or updated. Specifically: Label keys must be between 1 and 63 characters
long and must conform to the following regular expression:
[a-z]([-a-z0-9]*[a-z0-9])? Label values must be between 0 and 63
characters long and must conform to the regular expression
([a-z]([-a-z0-9]*[a-z0-9])?)?
name: Name of the resource; provided by the client when the resource is
created. The name must be 1-63 characters long, and comply with RFC1035.
Specifically, the name must be 1-63 characters long and match the
regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first
character must be a lowercase letter, and all following characters must
be a dash, lowercase letter, or digit, except the last character, which
cannot be a dash.
operation: Output only. The Operation that most recently ran, or is
currently running, on this type provider.
options: Options to apply when handling any resources in this service.
selfLink: Output only. Self link for the type provider.
"""
collectionOverrides = _messages.MessageField('CollectionOverride', 1, repeated=True)
credential = _messages.MessageField('Credential', 2)
customCertificateAuthorityRoots = _messages.StringField(3, repeated=True)
description = _messages.StringField(4)
descriptorUrl = _messages.StringField(5)
id = _messages.IntegerField(6, variant=_messages.Variant.UINT64)
insertTime = _messages.StringField(7)
labels = _messages.MessageField('TypeProviderLabelEntry', 8, repeated=True)
name = _messages.StringField(9)
operation = _messages.MessageField('Operation', 10)
options = _messages.MessageField('Options', 11)
selfLink = _messages.StringField(12)
class TypeProviderLabelEntry(_messages.Message):
r"""A TypeProviderLabelEntry object.
Fields:
key: A string attribute.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
class TypeProvidersListResponse(_messages.Message):
r"""A response that returns all Type Providers supported by Deployment
Manager
Fields:
nextPageToken: A token used to continue a truncated list request.
typeProviders: Output only. A list of resource type providers supported by
Deployment Manager.
"""
nextPageToken = _messages.StringField(1)
typeProviders = _messages.MessageField('TypeProvider', 2, repeated=True)
class TypeProvidersListTypesResponse(_messages.Message):
r"""TypeProvidersListTypesResponse message type.
Fields:
nextPageToken: A token used to continue a truncated list request.
types: Output only. A list of resource type info.
"""
nextPageToken = _messages.StringField(1)
types = _messages.MessageField('TypeInfo', 2, repeated=True)
class TypesListResponse(_messages.Message):
r"""A response that returns all Types supported by Deployment Manager
Fields:
nextPageToken: A token used to continue a truncated list request.
types: Output only. A list of resource types supported by Deployment
Manager.
"""
nextPageToken = _messages.StringField(1)
types = _messages.MessageField('Type', 2, repeated=True)
class ValidationOptions(_messages.Message):
r"""Options for how to validate and process properties on a resource.
Fields:
schemaValidation: Customize how deployment manager will validate the
resource against schema errors.
undeclaredProperties: Specify what to do with extra properties when
executing a request.
"""
schemaValidation = _messages.StringField(1)
undeclaredProperties = _messages.StringField(2)
| [
"[email protected]"
] | |
d2c7ab03478503220cdf8c286f1feb0daed10e8a | cad5b92686d48a2e06766b5d3d671eb41083b825 | /microcosm_pubsub/tests/test_decorators.py | b0e88c4ccf382cd9d7abde003189d5cc6f2b313e | [
"Apache-2.0"
] | permissive | lior001/microcosm-pubsub | 8166b4596c04d78330f2ceca31f2827d272ec6ae | eeea8409c1f89a6c420fdf42afcc92b1d69d0e11 | refs/heads/develop | 2020-12-31T00:09:54.584041 | 2017-02-08T18:51:23 | 2017-02-08T18:51:23 | 86,563,543 | 0 | 0 | null | 2017-03-29T09:29:54 | 2017-03-29T09:29:54 | null | UTF-8 | Python | false | false | 1,016 | py | """
Decorator tests.
"""
from hamcrest import (
assert_that,
equal_to,
instance_of,
is_,
)
from marshmallow import fields, Schema
from microcosm.api import create_object_graph
from microcosm_pubsub.decorators import handles, schema
@schema
class TestSchema(Schema):
MEDIA_TYPE = "test"
test = fields.String()
@handles(TestSchema)
def noop_handler(message):
return True
class TestDecorators(object):
def setup(self):
self.graph = create_object_graph("test")
self.graph.use(
"pubsub_message_schema_registry",
"sqs_message_handler_registry",
)
def test_schema_decorators(self):
assert_that(
self.graph.pubsub_message_schema_registry[TestSchema.MEDIA_TYPE].schema,
is_(instance_of(TestSchema)),
)
def test_handles_decorators(self):
assert_that(
self.graph.sqs_message_handler_registry[TestSchema.MEDIA_TYPE],
is_(equal_to(noop_handler)),
)
| [
"[email protected]"
] | |
c0433ae54e1875d6032f2bb5e76a991006e302f1 | a867b1c9da10a93136550c767c45e0d8c98f5675 | /LC_yelp_14_Longest_Common_Prefix.py | d27e5697bb4aff657ed6b1ef3ff7795167d9a246 | [] | no_license | Omkar02/FAANG | f747aacc938bf747129b8ff35b6648fb265d95b6 | ee9b245aa83ea58aa67954ab96442561dbe68d06 | refs/heads/master | 2023-03-25T19:45:08.153403 | 2021-03-28T07:13:08 | 2021-03-28T07:13:08 | 280,783,785 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 504 | py | # import __main__ as main
# from Helper.TimerLogger import CodeTimeLogging
# fileName = main.__file__
# fileName = fileName.split('\\')[-1]
# CodeTimeLogging(Flag='F', filename=fileName, Tag='String', Difficult='Easy')
def longoestCommonPrefix(strs):
strs = list(zip(*strs))
ans = ''
for s in strs:
if len(set(s)) == 1:
ans += s[0]
return ans if ans else -1
strs = ["flower", "flow", "flight"]
# strs = ["dog", "racecar", "car"]
print(longoestCommonPrefix(strs))
| [
"[email protected]"
] | |
dcffb14f17f4fb9194a97de500c404736ef0cec9 | edfd1db2b48d4d225bc58be32fbe372a43415112 | /3. Airflow Fundamentals 3/exercises/lesson3.exercise6.py | e5c543342b90982555c56e162aa19390c2e9af9f | [] | no_license | rwidjojo/airflow-training | ed83cb9e97ca85ef06de1426f2f41014881a1f22 | ac82040d8ddc3859df5576eee08d397e824016f1 | refs/heads/main | 2023-08-12T21:01:17.672059 | 2021-01-04T09:17:48 | 2021-01-04T09:17:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | import airflow
from airflow import DAG
from airflow.utils.dates import days_ago
from airflow.operators.bash_operator import BashOperator
owner = 'john_doe' # Replace with your short name
default_args = {
'owner': owner,
'depends_on_past': False,
'start_date': days_ago(2),
}
dag = DAG(
dag_id=f'{owner}.lesson3.excercise6',
start_date=airflow.utils.dates.days_ago(3),
schedule_interval=None,
)
print_template = BashOperator(
task_id="print_template",
bash_command='echo "execution date is {{ ts }} with year {{ execution_date.year }} and month {{ \'{:02}\'.format(execution_date.month) }}"',
dag=dag,
) | [
"[email protected]"
] | |
3e4ac973d2c8a00ad85ba4f40d23f66a548805d7 | e89f44632effe9ba82b940c7721cad19a32b8a94 | /text2shorthand/shorthand/svsd/nakatta.py | 23f660a4c9c066fff1b7352e466d935597403401 | [] | no_license | Wyess/text2shorthand | 3bcdb708f1d7eeb17f9ae3181c4dd70c65c8986e | 5ba361c716178fc3b7e68ab1ae724a57cf3a5d0b | refs/heads/master | 2020-05-17T14:52:11.369058 | 2019-08-20T12:50:00 | 2019-08-20T12:50:00 | 183,776,467 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,870 | py | from ..svsd.char import SvsdChar
from text2shorthand.common.point import Point as P, PPoint as PP
import pyx
from pyx.metapost.path import (
beginknot,
knot,
endknot,
smoothknot,
tensioncurve,
controlcurve,
curve)
class CharNakatta(SvsdChar):
def __init__(self, name='なかった', kana='nakatta',
model='NER10SWL5UNR2', head_type='NER', tail_type='SER', soundmark=''):
super().__init__(name, kana, model, head_type, tail_type, soundmark)
self.head_ligature = {}
#self.tail_ligature = {}
@classmethod
def path_NERSWLUNR(cls, ta=None, **kwargs):
pass
@classmethod
def path_NERSWLUNRe(cls, ta=None, **kwargs):
pass
@classmethod
def path_NERSWLUNRer(cls, ta=None, **kwargs):
pass
@classmethod
def path_NERSWLUNRel(cls, ta=None, **kwargs):
pass
@classmethod
def path_NERSWLUNRne(cls, ta=None, **kwargs):
pass
@classmethod
def path_NERSWLUNRner(cls, ta=None, **kwargs):
pass
@classmethod
def path_NERSWLUNRnel(cls, ta=None, **kwargs):
pass
@classmethod
def path_NERSWLUNRs(cls, ta=None, **kwargs):
pass
@classmethod
def path_NERSWLUNRsl(cls, ta=None, **kwargs):
pass
@classmethod
def path_NERSWLUNRsr(cls, ta=None, **kwargs):
pass
@classmethod
def path_NERSWLUNRse(cls, ta=None, **kwargs):
pass
@classmethod
def path_NERSWLUNRser(cls, ta=None, **kwargs):
pass
@classmethod
def path_NERSWLUNRsel(cls, ta=None, **kwargs):
pass
@classmethod
def path_NERSWLUNRsw(cls, ta=None, **kwargs):
pass
@classmethod
def path_NERSWLUNRswr(cls, ta=None, **kwargs):
pass
@classmethod
def path_NERSWLUNRswl(cls, ta=None, **kwargs):
pass
| [
"[email protected]"
] | |
c1d36397e7e64ebf831efb5633fa13e307f25556 | f883b2ccb4bf6d527f31fca1f1748e8aa5f17f3a | /web/app/social_auth/urls.py | 2a6e7402dd2d88391b4a3b7b449e536511f5e311 | [] | no_license | nikolaykhodov/liketools | a710faa7fe31cd72df8299829bcc89d16a8d2721 | 65b4a046c3180eec3af0fa709f23bb12975dfe1c | refs/heads/master | 2021-01-10T18:35:08.586890 | 2014-03-31T12:41:50 | 2014-03-31T12:41:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | # -*- coding: utf-8 -*-
from django.conf.urls.defaults import patterns, url
from social_auth.views import VkAuthView, LoginView, LogoutView, KvSetView
urlpatterns = patterns('',
url(r'^login/$', LoginView.as_view(), name='social_auth_login'),
url(r'^logout/$', LogoutView.as_view(), name='social_auth_logout'),
url(r'^vk/$', VkAuthView.as_view(), name='social_auth_vk'),
url(r'^keyvalue_set/$', KvSetView.as_view(), name='keyvalue_set'),
)
| [
"[email protected]"
] | |
65c5dc7ecc967754e6eb46de86e6f915461f2ea1 | d0af9f544b76e1df4f8ffb6c65a3da1fe13c5871 | /setup.py | c10007500e179356be10008723c3f04774009beb | [
"MIT"
] | permissive | vanadium23/doc484 | 472a90ad08352891aa3ed9526375aebad71f3d16 | ff8058f07e6cba8f26e7ce48ef4dd42203dc065a | refs/heads/master | 2020-03-29T20:54:44.503542 | 2018-09-25T22:55:08 | 2018-09-25T22:55:08 | 150,338,332 | 0 | 0 | MIT | 2018-09-25T22:48:16 | 2018-09-25T22:48:16 | null | UTF-8 | Python | false | false | 1,730 | py | from setuptools import setup, find_packages
import os.path
HERE = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
with open(os.path.join(HERE, *parts)) as f:
return f.read()
setup(
name="doc484",
version="0.2.0",
author="Chad Dombrova",
description="Generate PEP 484 type comments from docstrings",
long_description=read("README.rst"),
license="MIT",
keywords=["mypy", "typing", "pep484", "docstrings", "annotations"],
url="https://github.com/chadrik/doc484",
packages=find_packages(),
entry_points={
'console_scripts': ['doc484=doc484.__main__:main'],
},
install_requires=[
"docutils", # only required for rest format
],
extras_require={
"tests": [
"coverage",
"pytest==3.6.2",
"tox==2.7.0",
],
},
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
| [
"[email protected]"
] | |
fad7b7f80e12f72ca4a0827c794f7b6a156be69f | 7eb606a7957e5500f163c93dc4b19418cf9cf335 | /examples/lbfgs/model.py | d68967e5e2c64b3f598e36263ab1e7edc2e6d907 | [
"Apache-2.0",
"MIT"
] | permissive | ludwig-ai/ludwig | 024f74da86567a57ec8e30efcb4600f0c52333a1 | e1d023e41606c9b76b35e1d231c2f13368a30eca | refs/heads/master | 2023-09-03T08:07:32.978301 | 2023-09-01T19:39:32 | 2023-09-01T19:39:32 | 163,346,054 | 2,567 | 285 | Apache-2.0 | 2023-09-14T20:34:52 | 2018-12-27T23:58:12 | Python | UTF-8 | Python | false | false | 1,033 | py | import logging
import pandas as pd
from ludwig.api import LudwigModel
from ludwig.datasets import amazon_employee_access_challenge
df = amazon_employee_access_challenge.load()
model = LudwigModel(config="config.yaml", logging_level=logging.INFO)
training_statistics, preprocessed_data, output_directory = model.train(
df,
skip_save_processed_input=True,
skip_save_log=True,
skip_save_progress=True,
skip_save_training_description=True,
skip_save_training_statistics=True,
)
# Predict on unlabeled test
config = model.config
config["preprocessing"] = {}
model.config = config
unlabeled_test = df[df.split == 2].reset_index(drop=True)
preds, _ = model.predict(unlabeled_test)
# Save predictions to csv
action = preds.ACTION_probabilities_True
submission = pd.merge(unlabeled_test.reset_index(drop=True).id.astype(int), action, left_index=True, right_index=True)
submission.rename(columns={"ACTION_probabilities_True": "Action", "id": "Id"}, inplace=True)
submission.to_csv("submission.csv", index=False)
| [
"[email protected]"
] | |
a5124967f629b267a5314e52eda661da43dc0c9a | aba1d17ddc7d7ad9f49e2d6d87600e9e0387ba14 | /mi/dataset/driver/dosta_abcdjm/cspp/dosta_abcdjm_cspp_telemetered_driver.py | 6f3ee50a7855782d946f895365a65c2d76667fb4 | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | oceanobservatories/mi-instrument | 3ad880c1366b1a8461fc9085768df0e9ddeb6ef5 | bdbf01f5614e7188ce19596704794466e5683b30 | refs/heads/master | 2023-07-23T07:28:36.091223 | 2023-07-14T15:54:49 | 2023-07-14T15:54:49 | 24,165,325 | 1 | 32 | BSD-2-Clause | 2023-07-13T01:39:22 | 2014-09-17T22:53:22 | Python | UTF-8 | Python | false | false | 2,147 | py | """
@package mi.dataset.driver.dosta_abcdjm.cspp
@file mi.dataset.driver.dosta_abcdjm.cspp.dosta_abcdjm_cspp_telemetered_driver.py
@author Emily Hahn
@brief Telemetered driver for the dosta series abcdjm instrument through cspp
"""
__author__ = 'ehahn'
from mi.core.log import get_logger
log = get_logger()
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.dataset_driver import SimpleDatasetDriver
from mi.dataset.parser.cspp_base import METADATA_PARTICLE_CLASS_KEY, DATA_PARTICLE_CLASS_KEY
from mi.dataset.parser.dosta_abcdjm_cspp import DostaAbcdjmCsppParser, \
DostaAbcdjmCsppMetadataTelemeteredDataParticle, \
DostaAbcdjmCsppInstrumentTelemeteredDataParticle
from mi.core.versioning import version
@version("15.7.1")
def parse(unused, source_file_path, particle_data_handler):
"""
This is the method called by Uframe
:param unused
:param source_file_path This is the full path and filename of the file to be parsed
:param particle_data_handler Java Object to consume the output of the parser
:return particle_data_handler
"""
with open(source_file_path, 'rU') as stream_handle:
# create and instance of the concrete driver class defined below
driver = DostaAbcdjmCsppTelemeteredDriver(unused, stream_handle, particle_data_handler)
driver.processFileStream()
return particle_data_handler
class DostaAbcdjmCsppTelemeteredDriver(SimpleDatasetDriver):
"""
This class just needs to create the _build_parser method of the SimpleDatasetDriver
"""
def _build_parser(self, stream_handle):
parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.dosta_abcdjm_cspp',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
METADATA_PARTICLE_CLASS_KEY: DostaAbcdjmCsppMetadataTelemeteredDataParticle,
DATA_PARTICLE_CLASS_KEY: DostaAbcdjmCsppInstrumentTelemeteredDataParticle,
}
}
return DostaAbcdjmCsppParser(parser_config, stream_handle, self._exception_callback)
| [
"[email protected]"
] | |
007bd03242c94c601ec7e96b1cc0870c218cafc8 | a72cb4d00528fb3d2d47f99a1ccca1b8b9b41ff7 | /scripts/addons_extern/mesh_selection_topokit.py | d5f60d87eccf6eb335981176cc0aa8f9e909eaf3 | [] | no_license | talocan/blenderpython | b05204881183ff901ec189916a3bcc1d3e9d3e20 | 056ac37e76a1b410696c9efe4fe0ea09fdc68c0e | refs/heads/master | 2021-01-18T05:16:47.221786 | 2014-07-11T17:01:53 | 2014-07-11T17:01:53 | 21,749,332 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,180 | py | bl_info = {
"name": "Topokit 2",
"author": "dustractor",
"version": (2,0),
"blender": (2,6,0),
"api": 41935,
"location": "edit mesh vertices/edges/faces menus",
"description": "",
"warning": "",
"wiki_url": "",
"tracker_url": "",
"category": "Mesh"}
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110 - 1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
# In between calls, this stores any data that is expensive or static,
# matched to the size of the mesh and the id of the operator that created it
cachedata = dict()
# and the object keeps the key to the cachedata
bpy.types.Object.tkkey = bpy.props.IntVectorProperty(size=4)
# just a mix-in for the operators...
class meshpoller:
@classmethod
def poll(self,context):
try:
assert context.active_object.type == "MESH"
except:
return False
finally:
return True
#BEGIN VERTICES SECTION
# This one works similarly to normal 'grow' (ctrl + NUMPAD_PLUS),
# except the original selection is not part of the result,
#
# 0--0--0 0--1--0
# | | | | | |
# 0--1--0 --> 1--0--1
# | | | | | |
# 0--0--0 0--1--0
#
class MESH_OT_vneighbors_edgewise(meshpoller,bpy.types.Operator):
bl_idname = "mesh.v2v_by_edge"
bl_label = "Neighbors by Edge"
bl_options = {"REGISTER","UNDO"}
def execute(self,context):
global cachedata
bpy.ops.object.mode_set(mode="OBJECT")
obj = context.active_object
mesh = obj.data
meshkey = (len(mesh.vertices),len(mesh.edges),len(mesh.faces),id(self))
next_state = bytearray(meshkey[0])
if (meshkey == obj.tkkey) and (meshkey in cachedata):
vert_to_vert_map,prev_state = cachedata[meshkey]
else:
vert_to_vert_map = {i:{} for i in range(meshkey[0])}
for a,b in mesh.edge_keys:
vert_to_vert_map[a][b] = 1
vert_to_vert_map[b][a] = 1
obj.tkkey = meshkey
prev_state = None
if not prev_state:
selected_vert_indices = filter(lambda _:mesh.vertices[_].select,range(len(mesh.vertices)))
else:
selected_vert_indices = filter(lambda _:mesh.vertices[_].select and not prev_state[_],range(len(mesh.vertices)))
for v in selected_vert_indices:
for neighbor_index in vert_to_vert_map[v]:
next_state[neighbor_index] = True
mesh.vertices.foreach_set("select",next_state)
cachedata[meshkey] = (vert_to_vert_map,next_state)
bpy.ops.object.mode_set(mode="EDIT")
return {"FINISHED"}
# This one is an alternate / counterpart to the previous.
# Think: diagonal opposite corners of a quad
# NOTE: does not apply to a triangle, since verts have no 'opposite'
#
# 0--0--0 1--0--1
# | | | | | |
# 0--1--0 --> 0--0--0
# | | | | | |
# 0--0--0 1--0--1
#
class MESH_OT_vneighbors_facewise(meshpoller,bpy.types.Operator):
bl_idname = "mesh.v2v_facewise"
bl_label = "Neighbors by Face - Edge"
bl_options = {"REGISTER","UNDO"}
def execute(self,context):
global cachedata
bpy.ops.object.mode_set(mode="OBJECT")
obj = context.active_object
mesh = obj.data
meshkey = (len(mesh.vertices),len(mesh.edges),len(mesh.faces),id(self))
next_state = bytearray(meshkey[0])
if (meshkey == obj.tkkey) and (meshkey in cachedata):
vert_to_vert_map = cachedata[meshkey]
else:
vert_to_vert_map = {i:{} for i in range(meshkey[0])}
for a,b in mesh.edge_keys:
vert_to_vert_map[a][b] = 1
vert_to_vert_map[b][a] = 1
obj.tkkey = meshkey
faces = filter(lambda face:(len(face.vertices)==4) and (face.select == False),mesh.faces)
for f in faces:
has = False
t = set()
for v in f.vertices:
if mesh.vertices[v].select:
has = True
t.update(vert_to_vert_map[v])
if has:
for v in f.vertices:
if not mesh.vertices[v].select:
if v not in t:
next_state[v]=1
mesh.vertices.foreach_set("select",next_state)
cachedata[meshkey] = vert_to_vert_map
bpy.ops.object.mode_set(mode="EDIT")
return {"FINISHED"}
def vvmenuitem(self,context):
self.layout.operator(MESH_OT_vneighbors_edgewise.bl_idname)
self.layout.operator(MESH_OT_vneighbors_facewise.bl_idname)
#for the sake of completeness, yes there is one alg missing - one for both...
#END VERTICES SECTION
#BEGIN EDGES SECTION
# +--0--+--0--+--0--+ +--0--+--0--+--0--+
# | | | | | | | |
# 0 0 0 0 0 1 1 0
# | | | | | | | |
# +--0--+--1--+--0--+ ---> +--0--+--0--+--0--+
# | | | | | | | |
# 0 0 0 0 0 1 1 0
# | | | | | | | |
# +--0--+--0--+--0--+ +--0--+--0--+--0--+
class MESH_OT_eneighbors_shared_v_f(meshpoller,bpy.types.Operator):
bl_idname = "mesh.e2e_evfe"
bl_label = "Neighbors by Vert+Face"
bl_options = {"REGISTER","UNDO"}
def execute(self,context):
global cachedata
bpy.ops.object.mode_set(mode="OBJECT")
obj = context.active_object
mesh = obj.data
meshkey = (len(mesh.vertices),len(mesh.edges),len(mesh.faces),id(self))
state_mask = bytearray(meshkey[1])
if (meshkey == obj.tkkey) and (meshkey in cachedata):
edge_to_edges_dict = cachedata
else:
edge_key_to_index = {k:i for i,k in enumerate(mesh.edge_keys)}
edge_to_edges_dict = {i:set() for i in range(len(mesh.edges))}
for f in mesh.faces:
fed=[edge_key_to_index[k] for k in f.edge_keys]
for k in f.edge_keys:
edge_to_edges_dict[edge_key_to_index[k]].update(fed)
obj.tkkey = meshkey
for e in filter(lambda _:mesh.edges[_].select,edge_to_edges_dict):
k1 = set(mesh.edges[e].key)
for n in edge_to_edges_dict[e]:
k2 = set(mesh.edges[n].key)
if not k1.isdisjoint(k2):
state_mask[n] = True
for e in mesh.edges:
e.select ^= state_mask[e.index]
cachedata[meshkey] = edge_key_to_index
bpy.ops.object.mode_set(mode="EDIT")
return {"FINISHED"}
# +--0--+--0--+--0--+ +--0--+--0--+--0--+
# | | | | | | | |
# 0 0 0 0 0 1 1 0
# | | | | | | | |
# +--0--+--1--+--0--+ ---> +--1--+--0--+--1--+
# | | | | | | | |
# 0 0 0 0 0 1 1 0
# | | | | | | | |
# +--0--+--0--+--0--+ +--0--+--0--+--0--+
class MESH_OT_eneighbors_shared_v(meshpoller,bpy.types.Operator):
bl_idname = "mesh.e2e_eve"
bl_label = "Neighbors by Vert"
bl_options = {"REGISTER","UNDO"}
def execute(self,context):
bpy.ops.object.mode_set(mode="OBJECT")
mesh = context.active_object.data
state_mask = bytearray(len(mesh.edges))
for e in mesh.edges:
state_mask[e.index] = mesh.vertices[e.vertices[0]].select ^ mesh.vertices[e.vertices[1]].select
mesh.edges.foreach_set('select',state_mask)
bpy.ops.object.mode_set(mode="EDIT")
return {"FINISHED"}
# +--0--+--0--+--0--+ +--0--+--1--+--0--+
# | | | | | | | |
# 0 0 0 0 0 1 1 0
# | | | | | | | |
# +--0--+--1--+--0--+ ---> +--0--+--0--+--0--+
# | | | | | | | |
# 0 0 0 0 0 1 1 0
# | | | | | | | |
# +--0--+--0--+--0--+ +--0--+--1--+--0--+
class MESH_OT_eneighbors_shared_f(meshpoller,bpy.types.Operator):
bl_idname = "mesh.e2e_efe"
bl_label = "Neighbors by Face"
bl_options = {"REGISTER","UNDO"}
def execute(self,context):
global cachedata
bpy.ops.object.mode_set(mode="OBJECT")
obj = context.active_object
mesh = obj.data
meshkey = (len(mesh.vertices),len(mesh.edges),len(mesh.faces),id(self))
if (meshkey == obj.tkkey) and (meshkey in cachedata):
edge_to_edges_dict = cachedata
else:
edge_key_to_index = {k:i for i,k in enumerate(mesh.edge_keys)}
edge_to_edges_dict = {i:set() for i in range(len(mesh.edges))}
for f in mesh.faces:
fed=[edge_key_to_index[k] for k in f.edge_keys]
for k in f.edge_keys:
edge_to_edges_dict[edge_key_to_index[k]].update(fed)
obj.tkkey = meshkey
state_mask,esel = (bytearray(meshkey[1]),bytearray(meshkey[1]))
mesh.edges.foreach_get('select',esel)
for e in filter(lambda _:mesh.edges[_].select,range(meshkey[1])):
for n in edge_to_edges_dict[e]:
state_mask[n] = 1
for e in range(meshkey[1]):
esel[e] ^= state_mask[e]
mesh.edges.foreach_set('select',esel)
cachedata[meshkey] = edge_to_edges_dict
bpy.ops.object.mode_set(mode="EDIT")
return {"FINISHED"}
# notice that on these next two, the original selection stays
# +--0--+--0--+--0--+ +--0--+--1--+--0--+
# | | | | | | | |
# 0 0 0 0 0 0 0 0
# | | | | | | | |
# +--0--+--1--+--0--+ ---> +--0--+--1--+--0--+
# | | | | | | | |
# 0 0 0 0 0 0 0 0
# | | | | | | | |
# +--0--+--0--+--0--+ +--0--+--1--+--0--+
class MESH_OT_eneighbors_shared_f_notv(meshpoller,bpy.types.Operator):
bl_idname = "mesh.e2e_efnve"
bl_label = "Lateral Neighbors"
bl_options = {"REGISTER","UNDO"}
def execute(self,context):
global cachedata
bpy.ops.object.mode_set(mode="OBJECT")
obj = context.active_object
mesh = obj.data
meshkey = (len(mesh.vertices),len(mesh.edges),len(mesh.faces),id(self))
state_mask = bytearray(meshkey[1])
if (meshkey == obj.tkkey) and (meshkey in cachedata):
edge_to_face_map,edge_key_to_index = cachedata[meshkey]
else:
edge_key_to_index = {}
edge_to_face_map = {i:set() for i in range(meshkey[1])}
for i,k in enumerate(mesh.edge_keys):
edge_key_to_index[k] = i
for f in mesh.faces:
for k in f.edge_keys:
edge_to_face_map[edge_key_to_index[k]].add(f.index)
obj.tkkey = meshkey
selected_edge_indices = filter(lambda _:mesh.edges[_].select,range(meshkey[1]))
for e in selected_edge_indices:
for f in edge_to_face_map[e]:
for k in mesh.faces[f].edge_keys:
hasv_in = False
for v in mesh.edges[e].key:
if v in k:
hasv_in = True
if hasv_in:
continue
else:
state_mask[edge_key_to_index[k]] = True
for e in filter(lambda _:state_mask[_],range(meshkey[1])):
mesh.edges[e].select |= state_mask[e]
cachedata[meshkey] = (edge_to_face_map,edge_key_to_index)
bpy.ops.object.mode_set(mode="EDIT")
return {"FINISHED"}
# +--0--+--0--+--0--+ +--0--+--0--+--0--+
# | | | | | | | |
# 0 0 0 0 0 0 0 0
# | | | | | | | |
# +--0--+--1--+--0--+ ---> +--1--+--1--+--1--+
# | | | | | | | |
# 0 0 0 0 0 0 0 0
# | | | | | | | |
# +--0--+--0--+--0--+ +--0--+--0--+--0--+
class MESH_OT_eneighbors_shared_v_notf(meshpoller,bpy.types.Operator):
bl_idname = "mesh.e2e_evnfe"
bl_label = "Longitudinal Edges"
bl_options = {"REGISTER","UNDO"}
def execute(self,context):
global cachedata
bpy.ops.object.mode_set(mode="OBJECT")
obj = context.active_object
mesh = obj.data
meshkey = (len(mesh.vertices),len(mesh.edges),len(mesh.faces),id(self))
state_mask = bytearray(meshkey[1])
vstate = bytearray(meshkey[0])
mesh.vertices.foreach_get('select',vstate)
if (meshkey == obj.tkkey) and (meshkey in cachedata):
edge_to_face_map,vert_to_vert_map,edge_key_to_index = cachedata[meshkey]
else:
edge_key_to_index = {}
vert_to_vert_map = {i:set() for i in range(meshkey[0])}
edge_to_face_map = {i:set() for i in range(meshkey[1])}
for i,k in enumerate(mesh.edge_keys):
edge_key_to_index[k] = i
vert_to_vert_map[k[0]].add(k[1])
vert_to_vert_map[k[1]].add(k[0])
for f in mesh.faces:
for k in f.edge_keys:
edge_to_face_map[edge_key_to_index[k]].add(f.index)
obj.tkkey = meshkey
selected_edge_indices = filter(lambda _:mesh.edges[_].select,range(meshkey[1]))
for e in selected_edge_indices:
for v in mesh.edges[e].key:
state_mask[v] ^=1
for f in edge_to_face_map[e]:
for v in mesh.faces[f].vertices_raw:
vstate[v] = 1
for v in filter(lambda _:state_mask[_],range(meshkey[1])):
for n in vert_to_vert_map[v]:
if not vstate[n] and (n != v):
mesh.edges[edge_key_to_index[(min(v,n),max(v,n))]].select = True
cachedata[meshkey] = (edge_to_face_map,vert_to_vert_map,edge_key_to_index)
bpy.ops.object.mode_set(mode="EDIT")
return {"FINISHED"}
#deselects faces, leaving only edges selected
class MESH_OT_just_the_edges(meshpoller,bpy.types.Operator):
bl_idname = "mesh.je"
bl_label = "Just the Edge Selection"
bl_options = {"REGISTER","UNDO"}
def execute(self,context):
global cachedata
bpy.ops.object.mode_set(mode="OBJECT")
obj = context.active_object
mesh = obj.data
meshkey = (len(mesh.vertices),len(mesh.edges),len(mesh.faces),id(self))
state_mask = bytearray(meshkey[1])
if (meshkey == obj.tkkey) and (meshkey in cachedata):
edge_key_to_index = cachedata[meshkey]
else:
edge_key_to_index = {k:i for i,k in enumerate(mesh.edge_keys)}
obj.tkkey = meshkey
for f in filter(lambda _:mesh.faces[_].select,range(meshkey[2])):
for k in mesh.faces[f].edge_keys:
state_mask[edge_key_to_index[k]] = 1
for e in range(meshkey[1]):
mesh.edges[e].select ^= state_mask[e]
cachedata[meshkey] = edge_key_to_index
bpy.ops.object.mode_set(mode="EDIT")
return {"FINISHED"}
# deselects edges which are at the edge of a face-selection,
# causing selection to 'shrink in'
class MESH_OT_inner_edges(meshpoller,bpy.types.Operator):
bl_idname = "mesh.ie"
bl_label = "Inner Edge Selection"
bl_options = {"REGISTER","UNDO"}
def execute(self,context):
global cachedata
bpy.ops.object.mode_set(mode="OBJECT")
obj = context.active_object
mesh = obj.data
meshkey = (len(mesh.vertices),len(mesh.edges),len(mesh.faces),id(self))
state_mask = bytearray(meshkey[1])
if (meshkey == obj.tkkey) and (meshkey in cachedata):
edge_to_face_map = cachedata[meshkey]
else:
edge_key_to_index = {k:i for i,k in enumerate(mesh.edge_keys)}
edge_to_face_map = {i:set() for i in range(meshkey[1])}
for f in mesh.faces:
for k in f.edge_keys:
edge_to_face_map[edge_key_to_index[k]].add(f.index)
obj.tkkey = meshkey
for e in filter(lambda _:mesh.edges[_].select,range(meshkey[1])):
for f in edge_to_face_map[e]:
if mesh.faces[f].select:
state_mask[e] ^=1
for e in range(meshkey[1]):
mesh.edges[e].select ^= state_mask[e]
cachedata[meshkey] = edge_to_face_map
bpy.ops.object.mode_set(mode="EDIT")
return {"FINISHED"}
def eemenuitem(self,context):
self.layout.operator(MESH_OT_eneighbors_shared_v_f.bl_idname)
self.layout.operator(MESH_OT_eneighbors_shared_v.bl_idname)
self.layout.operator(MESH_OT_eneighbors_shared_f.bl_idname)
self.layout.operator(MESH_OT_eneighbors_shared_f_notv.bl_idname)
self.layout.operator(MESH_OT_eneighbors_shared_v_notf.bl_idname)
self.layout.operator(MESH_OT_just_the_edges.bl_idname)
self.layout.operator(MESH_OT_inner_edges.bl_idname)
#END EDGES SECTION
#BEGIN FACES SECTION
# here is another one which functions very similarly to the ctrl+NUMPAD_PLUS 'growth'
# but it deselects the original selection, of course.
# This would be your checkerboard-type growth.
# [0][0][0] [0][1][0]
# [0][1][0] ---> [1][0][1]
# [0][0][0] [0][1][0]
class MESH_OT_fneighbors_shared_e(meshpoller,bpy.types.Operator):
bl_idname = "mesh.f2f_fef"
bl_label = "Neighbors by Edge"
bl_options = {"REGISTER","UNDO"}
def execute(self,context):
global cachedata
bpy.ops.object.mode_set(mode="OBJECT")
obj = context.active_object
mesh = obj.data
meshkey = (len(mesh.vertices),len(mesh.edges),len(mesh.faces),id(self))
if (meshkey == obj.tkkey) and (meshkey in cachedata):
face_to_face_map = cachedata[meshkey]
else:
edge_key_to_index = {k:i for i,k in enumerate(mesh.edge_keys)}
edge_to_face_map = {i:set() for i in range(meshkey[1])}
for f in mesh.faces:
for k in f.edge_keys:
edge_to_face_map[edge_key_to_index[k]].add(f.index)
face_to_face_map = {i:set() for i in range(meshkey[2])}
for f in mesh.faces:
for k in f.edge_keys:
face_to_face_map[f.index].update(edge_to_face_map[edge_key_to_index[k]])
obj.tkkey = meshkey
mask_state = bytearray(meshkey[2])
for f in filter(lambda _:mesh.faces[_].select,range(meshkey[2])):
for n in face_to_face_map[f]:
mask_state[n] = True
for f in range(meshkey[2]):
mesh.faces[f].select ^= mask_state[f]
cachedata[meshkey] = face_to_face_map
bpy.ops.object.mode_set(mode="EDIT")
return {"FINISHED"}
# [0][0][0] [1][0][1]
# [0][1][0] ---> [0][0][0]
# [0][0][0] [1][0][1]
class MESH_OT_fneighbors_shared_v_note(meshpoller,bpy.types.Operator):
bl_idname = "mesh.f2f_fvnef"
bl_label = "Neighbors by Vert not Edge"
bl_options = {"REGISTER","UNDO"}
def execute(self,context):
global cachedata
bpy.ops.object.mode_set(mode="OBJECT")
obj = context.active_object
mesh = obj.data
meshkey = (len(mesh.vertices),len(mesh.edges),len(mesh.faces),id(self))
if (meshkey == obj.tkkey) and (meshkey in cachedata):
edge_key_to_index = cachedata[meshkey]
else:
edge_key_to_index = {k:i for i,k in enumerate(mesh.edge_keys)}
obj.tkkey = meshkey
state_mask = bytearray(meshkey[2])
face_verts = set()
for f in filter(lambda _:mesh.faces[_].select,range(meshkey[2])):
face_verts.update(mesh.faces[f].vertices_raw)
for f in filter(lambda _:not mesh.faces[_].select,range(meshkey[2])):
ct = 0
for v in mesh.faces[f].vertices:
ct += (v in face_verts)
if ct == 1:
state_mask[f] = 1
mesh.faces.foreach_set('select',state_mask)
cachedata[meshkey] = edge_key_to_index
bpy.ops.object.mode_set(mode="EDIT")
return {"FINISHED"}
# http://en.wikipedia.org/wiki/Conway's_Game_of_Life
class MESH_OT_conway(meshpoller,bpy.types.Operator):
bl_idname = "mesh.conway"
bl_label = "Conway"
bl_options = {"REGISTER","UNDO"}
def execute(self,context):
global cachedata
bpy.ops.object.mode_set(mode="OBJECT")
obj = context.active_object
mesh = obj.data
meshkey = (len(mesh.vertices),len(mesh.edges),len(mesh.faces),id(self))
if (meshkey == obj.tkkey) and (meshkey in cachedata):
vert_to_face_map = cachedata[meshkey]
else:
vert_to_face_map = {i:set() for i in range(meshkey[0])}
for f in mesh.faces:
for v in f.vertices_raw:
vert_to_face_map[v].add(f.index)
obj.tkkey = meshkey
sel = set()
uns = set()
F = {i:set() for i in range(meshkey[2])}
for f in range(meshkey[2]):
for v in mesh.faces[f].vertices_raw:
for n in filter(lambda _: mesh.faces[_].select and (_ != f),vert_to_face_map[v]):
F[f].add(n)
for f in F:
if len(F[f]) == 3:
sel.add(f)
elif len(F[f]) != 2:
uns.add(f)
for f in range(meshkey[2]):
if f in sel:
mesh.faces[f].select = True
if f in uns:
mesh.faces[f].select = False
cachedata[meshkey] = vert_to_face_map
bpy.ops.object.mode_set(mode="EDIT")
return {"FINISHED"}
def ffmenuitem(self,context):
self.layout.operator(MESH_OT_fneighbors_shared_e.bl_idname)
self.layout.operator(MESH_OT_fneighbors_shared_v_note.bl_idname)
self.layout.operator(MESH_OT_conway.bl_idname)
def register():
bpy.utils.register_module(__name__)
bpy.types.VIEW3D_MT_edit_mesh_vertices.append(vvmenuitem)
bpy.types.VIEW3D_MT_edit_mesh_edges.append(eemenuitem)
bpy.types.VIEW3D_MT_edit_mesh_faces.append(ffmenuitem)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.VIEW3D_MT_edit_mesh_vertices.remove(vvmenuitem)
bpy.types.VIEW3D_MT_edit_mesh_edges.remove(eemenuitem)
bpy.types.VIEW3D_MT_edit_mesh_faces.remove(ffmenuitem)
if __name__ == "__main__":
register()
| [
"[email protected]"
] | |
df2ff2a7a3bc8e55d9c6887725227b2e593cb3e4 | d308fffe3db53b034132fb1ea6242a509f966630 | /pirates/ship/GameFSMShip.py | b0ad0a7ce663134da8aa44f91ddeeff4588297ba | [
"BSD-3-Clause"
] | permissive | rasheelprogrammer/pirates | 83caac204965b77a1b9c630426588faa01a13391 | 6ca1e7d571c670b0d976f65e608235707b5737e3 | refs/heads/master | 2020-03-18T20:03:28.687123 | 2018-05-28T18:05:25 | 2018-05-28T18:05:25 | 135,193,362 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 15,725 | py | # uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.ship.GameFSMShip
import random
from pandac.PandaModules import *
from direct.fsm import FSM
from direct.interval.IntervalGlobal import *
from direct.showbase.PythonUtil import report
from pirates.audio import SoundGlobals
from pirates.audio.SoundGlobals import loadSfx
from pirates.piratesbase import PiratesGlobals
from pirates.piratesbase import PLocalizer
from pirates.effects.Explosion import Explosion
from pirates.effects.ShipSplintersA import ShipSplintersA
from pirates.effects.FlamingDebris import FlamingDebris
class GameFSMShip(FSM.FSM):
__module__ = __name__
def __init__(self, ship):
FSM.FSM.__init__(self, 'GameFSMShip')
self.ship = ship
self.risingIval = None
self.sinkIval = None
self.fadeIval = None
self.currentMusic = None
self.grappleSfx = None
self.targetSphereStr = 'grappleTargetSphere'
self.targets = []
self.pendingAddTarget = None
return
def cleanup(self):
if self.pendingAddTarget:
base.cr.relatedObjectMgr.abortRequest(self.pendingAddTarget)
self.pendingAddTarget = None
self.removeTargets()
if self.risingIval:
self.risingIval.finish()
self.risingIval = None
if self.sinkIval:
self.sinkIval.finish()
self.sinkIval = None
if self.fadeIval:
self.fadeIval.finish()
self.fadeIval = None
FSM.FSM.cleanup(self)
self.ship = None
return
def enterNeutral(self):
pass
def exitNeutral(self):
pass
def enterSpawn(self):
pass
def exitSpawn(self):
pass
def enterAdrift(self):
pass
def exitAdrift(self):
pass
def enterAISteering(self, avId):
self.ship.startSmooth()
self.ship.clientSteeringBegin(avId)
def exitAISteering(self):
self.ship.stopSmooth()
self.ship.clientSteeringEnd()
@report(types=['frameCount', 'deltaStamp', 'args'], dConfigParam='shipboard')
def enterClientSteering(self, avId):
self.ship.clientSteeringBegin(avId)
@report(types=['frameCount', 'deltaStamp', 'args'], dConfigParam='shipboard')
def exitClientSteering(self):
if self.ship.wheel and self.ship.wheel[1]:
if base.cr.interactionMgr.getCurrentInteractive() is self:
self.ship.wheel[1].requestExit()
else:
self.ship.wheel[1].refreshState()
self.ship.clientSteeringEnd()
def enterDocked(self):
self.ship.rollupSails()
def exitDocked(self):
pass
def enterPinned(self):
self.ship.actorNode.getPhysicsObject().setVelocity(Vec3.zero())
if self.ship.isInCrew(localAvatar.doId):
base.musicMgr.requestFadeOut(self.currentMusic)
self.currentMusic = SoundGlobals.MUSIC_AMBUSH
base.musicMgr.request(self.currentMusic, priority=1)
self.ship.rollupSails()
self.ship.disableWheelInteraction()
def exitPinned(self):
self.fadeOutMusicIfInCrew()
self.ship.enableWheelInteraction()
def enterEnsnared(self):
if self.ship.isInCrew(localAvatar.doId):
base.musicMgr.requestFadeOut(self.currentMusic)
self.currentMusic = SoundGlobals.MUSIC_SHIP_ENSNARED
base.musicMgr.request(self.currentMusic, priority=1)
if self.risingIval:
self.risingIval.finish()
self.risingIval = None
sinking = Sequence(LerpPosInterval(self.ship, 1.0, Point3(0.0, 0, -3.0)))
listing = Sequence(LerpHprInterval(self.ship, 1.0, Vec3(0, 0, 10)))
self.sinkIval = Parallel(sinking, listing)
self.sinkIval.start()
return
def exitEnsnared(self):
self.fadeOutMusicIfInCrew()
if self.sinkIval:
self.sinkIval.finish()
self.sinkIval = None
rising = Sequence(LerpPosInterval(self.ship, 1.0, Point3(0, 0, 0)))
unlisting = Sequence(LerpHprInterval(self.ship, 1.0, Vec3(0, 0, 0)))
self.riseIval = Parallel(rising, unlisting)
self.riseIval.start()
return
def enterShoveOff(self):
pass
def exitShoveOff(self):
pass
def enterFollow(self):
self.ship.startSmooth()
def exitFollow(self):
self.ship.stopSmooth()
def enterFadeOut(self):
self.ship.model.modelRoot.setTransparency(1, 100000)
self.fadeIval = LerpColorScaleInterval(self.ship.model.modelRoot, 5, Vec4(1.0, 1.0, 1.0, 0.0))
self.fadeIval.start()
def exitFadeOut(self):
if self.fadeIval:
self.fadeIval.finish()
self.fadeIval = None
return
def enterSinking(self):
actorNode = self.ship.getActorNode()
if actorNode:
actorNode.getPhysicsObject().setVelocity(Vec3.zero())
self.ship.registerMainBuiltFunction(self.ship.sinkingBegin)
if self.ship.isInCrew(localAvatar.doId):
base.musicMgr.requestFadeOut(self.currentMusic)
self.currentMusic = SoundGlobals.MUSIC_DEATH
base.musicMgr.request(self.currentMusic, priority=2, looping=0)
def exitSinking(self):
self.ship.sinkingEnd()
self.fadeOutMusicIfInCrew()
def enterSunk(self):
pass
def enterRecoverFromSunk(self):
self.ship.recoverFromSunk()
def enterInBoardingPosition(self):
pass
def exitInBoardingPosition(self):
pass
def enterPathFollow(self):
self.ship.startSmooth()
def exitPathFollow(self):
self.ship.stopSmooth()
def enterCannonDefenseFollowPath(self):
self.ship.startSmooth()
def exitCannonDefenseFollowPath(self):
self.ship.stopSmooth()
def enterPatrol(self):
self.ship.startSmooth()
def exitPatrol(self):
self.ship.stopSmooth()
def enterAttackChase(self):
self.ship.startSmooth()
def exitAttackChase(self):
self.ship.stopSmooth()
def enterOff(self):
self.ship.stopAutoSailing()
def exitOff(self):
messenger.send('shipStateOn-%s' % self.ship.doId, [self.ship])
def enterPutAway(self):
self.ship.stopAutoSailing()
def exitPutAway(self):
pass
def enterScriptedMovement(self):
self.ship.startSmooth()
def exitScriptedMovement(self):
self.ship.stopSmooth()
def initAudio(self):
base.ambientMgr.requestFadeIn(SoundGlobals.AMBIENT_SHIP)
self.currentMusic = random.choice((SoundGlobals.MUSIC_SAILING_A, SoundGlobals.MUSIC_SAILING_B, SoundGlobals.MUSIC_SAILING_C))
base.musicMgr.request(self.currentMusic, priority=0, volume=0.6)
def clearAudio(self):
base.ambientMgr.requestFadeOut(SoundGlobals.AMBIENT_SHIP)
base.musicMgr.requestFadeOut(self.currentMusic)
def stopCurrentMusic(self):
if self.currentMusic:
base.musicMgr.requestFadeOut(self.currentMusic)
self.currentMusic = None
return
def startCurrentMusic(self, music=None):
if music and self.currentMusic != music:
self.currentMusic = music
if self.currentMusic:
base.musicMgr.request(self.currentMusic)
def fadeOutMusicIfInCrew(self):
try:
if self.ship.isInCrew(localAvatar.doId):
self.stopCurrentMusic()
except NameError:
self.stopCurrentMusic()
def createGrappleProximitySphere(self):
self.grappleProximityStr = self.ship.uniqueName('grappleProximity')
collSphere = CollisionSphere(0, 0, 0, 200)
collSphere.setTangible(0)
collSphereNode = CollisionNode(self.grappleProximityStr)
collSphereNode.addSolid(collSphere)
collSphereNode.setCollideMask(PiratesGlobals.ShipCollideBitmask)
collSphereNodePath = self.ship.attachNewNode(collSphereNode)
self.grappleProximityCollision = collSphereNodePath
self.stashGrappleProximitySphere()
def stashGrappleProximitySphere(self):
self.grappleProximityCollision.stash()
def unstashGrappleProximitySphere(self):
self.grappleProximityCollision.unstash()
def enterWaitingForGrapple(self):
self.notify.debug('enterWaitingForGrapple')
self.ship.removeWake()
if self.ship.boardableShipId == None:
return
self.unstashGrappleProximitySphere()
self.removeTargets()
self.pendingAddTarget = base.cr.relatedObjectMgr.requestObjects([self.ship.boardableShipId], eachCallback=self.addTargets)
if localAvatar.ship and localAvatar.ship.doId == self.ship.boardableShipId:
localAvatar.guiMgr.messageStack.addTextMessage(PLocalizer.FlagshipWaitingForGrappleInstructions)
return
def exitWaitingForGrapple(self):
self.ship.removeTarget()
def addTargets(self, boardableShip):
if localAvatar.ship != boardableShip:
return
attackX = boardableShip.getX(self.ship)
gStr = '**/grapple_right_*'
xOffset = -5.0
if attackX < 0:
gStr = '**/grapple_left_*'
xOffset = 5.0
locators = self.ship.findLocators(gStr + ';+s')
for locator in locators:
target = loader.loadModel('models/effects/selectionCursor')
target.setColorScale(0, 1, 0, 1)
self.ship.addGrappleTarget(target, locator, xOffset)
target.setTwoSided(1)
target.setBillboardPointEye()
target.setFogOff()
scaleA, scaleB = (10, 16)
target.setScale(scaleA)
t = 0.5
ival = Sequence(LerpScaleInterval(target, 2 * t, Vec3(scaleB, scaleB, scaleB), blendType='easeInOut'), LerpScaleInterval(target, t, Vec3(scaleA, scaleA, scaleA), blendType='easeInOut'))
ival.loop()
collSphere = CollisionSphere(0, 0, 0, 10)
collSphere.setTangible(1)
collSphereNode = CollisionNode('grappleTargetSphere')
collSphereNode.addSolid(collSphere)
collSphereNode.setTag('objType', str(PiratesGlobals.COLL_GRAPPLE_TARGET))
collSphereNode.setTag('shipId', str(self.ship.doId))
collSphereNode.setTag('targetId', locator.getName())
collSphereNode.setCollideMask(PiratesGlobals.TargetBitmask)
collSphereNodePath = self.ship.getModelRoot().attachNewNode(collSphereNode)
collSphereNodePath.setPos(target.getPos())
collSphereNodePath.setTag('targetIndex', str(len(self.targets)))
self.targets.append([target, ival, collSphereNodePath])
self.accept('enterGrappleTargetSphere', self.handleTargetHit)
def removeTargets(self):
for target, ival, csnp in self.targets:
target.removeNode()
csnp.removeNode()
if ival:
ival.pause()
del ival
self.targets = []
self.ignore('entergrappleTargetSphere')
def handleTargetHit(self, collEntry):
print '**********HANDLE TARGET HIT*****************'
def enterGrappleLerping(self):
self.notify.debug('enterGrappleLerping')
self.ship.startSmooth()
self.grappleSfx = loadSfx(SoundGlobals.SFX_SHIP_GRAPPLE)
base.playSfx(self.grappleSfx, looping=1)
grappler = base.cr.doId2do.get(self.ship.boardableShipId)
if grappler:
grappler.grappledShip(self.ship)
def exitGrappleLerping(self):
self.ship.stopSmooth()
self.ship.removeTarget()
if self.grappleSfx:
self.grappleSfx.stop()
self.grappleSfx = None
return
def enterInPosition(self):
self.notify.debug('enterInPosition')
self.removeTargets()
myShip = localAvatar.getShip()
if myShip and myShip.doId == self.ship.boardableShipId:
if myShip.isCaptain(localAvatar.doId):
localAvatar.guiMgr.messageStack.addTextMessage(PLocalizer.FlagshipInPositionInstructionsCaptain)
myShip.showBoardingChoice(self.ship)
else:
localAvatar.guiMgr.messageStack.addTextMessage(PLocalizer.FlagshipInPositionInstructionsCrew)
def exitInPosition(self):
self.ship.removeTarget()
myShip = localAvatar.getShip()
if myShip and myShip.doId == self.ship.boardableShipId:
if myShip.isCaptain(localAvatar.doId):
myShip.removeBoardingChoice()
def enterBoarded(self):
self.ship.disableOnDeckInteractions()
def exitBoarded(self):
pass
def enterDefeated(self):
self.explosionIval = None
if self.ship:
self.notify.debug('%s enterDefeated' % self.ship.doId)
self.ship.removeTarget()
if self.ship.getModelRoot():
pos = self.ship.getClosestBoardingPos()
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsHigh:
effectsIval = Parallel()
explosionEffect = Explosion.getEffect()
if explosionEffect:
explosionEffect.reparentTo(self.ship.getModelRoot())
explosionEffect.setPos(self.ship.getModelRoot(), pos)
explosionEffect.setEffectScale(1.0)
effectsIval.append(Func(explosionEffect.play))
shipSplintersAEffect = ShipSplintersA.getEffect()
if shipSplintersAEffect:
shipSplintersAEffect.wrtReparentTo(self.ship.getModelRoot())
shipSplintersAEffect.setPos(self.ship.getModelRoot(), pos)
effectsIval.append(Func(shipSplintersAEffect.play))
effect1 = FlamingDebris.getEffect()
if effect1:
effect1.wrtReparentTo(self.ship.getModelRoot())
effect1.setPos(self.ship.getModelRoot(), pos)
effect1.velocityX = 25
effect1.velocityY = 0
effectsIval.append(Func(effect1.play))
effect2 = FlamingDebris.getEffect()
if effect2:
effect2.wrtReparentTo(self.ship.getModelRoot())
effect2.setPos(self.ship.getModelRoot(), pos)
effect2.velocityX = 0
effect2.velocityY = 25
effectsIval.append(Func(effect2.play))
self.explosionIval = Sequence(Wait(4.0), effectsIval)
self.explosionIval.start()
return
def enterKrakenPinned(self):
if self.ship.model:
self.ship.model.modelRoot.setR(10)
def exitKrakenPinned(self):
if self.ship.model:
self.ship.model.modelRoot.setR(0)
def exitDefeated(self):
self.notify.debug('%s exitDefeated' % self.ship.doId)
if self.explosionIval:
self.explosionIval.pause()
self.explosionIval = None
return
def enterInactive(self):
pass
def exitInactive(self):
pass
def enterCaptured(self):
if self.ship:
self.notify.debug('%s enterCaptured' % self.ship.doId)
self.ship.removeTarget()
def exitCaptured(self):
if self.ship:
self.notify.debug('%s exitCaptured' % self.ship.doId) | [
"[email protected]"
] | |
102a8f4cfe3f00d5c68a2cbea906f0b545205b92 | f7630fd6c829cb306e72472296e3a513844d99af | /lib/python3.8/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_vpn_ssl_settings.py | 9f00dbacc824308a2aa22c269afe4e8f958bf281 | [] | no_license | baltah666/automation | 6eccce20c83dbe0d5aa9a82a27937886e3131d32 | 140eb81fe9bacb9a3ed1f1eafe86edeb8a8d0d52 | refs/heads/master | 2023-03-07T10:53:21.187020 | 2023-02-10T08:39:38 | 2023-02-10T08:39:38 | 272,007,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 58,665 | py | #!/usr/bin/python
from __future__ import absolute_import, division, print_function
# Copyright 2019-2021 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_vpn_ssl_settings
short_description: no description
description:
- This module is able to configure a FortiManager device.
- Examples include all parameters and values which need to be adjusted to data sources before usage.
version_added: "1.0.0"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Frank Shen (@fshen01)
- Hongbin Lu (@fgtdev-hblu)
notes:
- Running in workspace locking mode is supported in this FortiManager module, the top
level parameters workspace_locking_adom and workspace_locking_timeout help do the work.
- To create or update an object, use state present directive.
- To delete an object, use state absent directive.
- Normally, running one module can fail when a non-zero rc is returned. you can also override
the conditions to fail or succeed with parameters rc_failed and rc_succeeded
options:
enable_log:
description: Enable/Disable logging for task
required: false
type: bool
default: false
proposed_method:
description: The overridden method for the underlying Json RPC request
required: false
type: str
choices:
- update
- set
- add
bypass_validation:
description: |
only set to True when module schema diffs with FortiManager API structure,
module continues to execute without validating parameters
required: false
type: bool
default: false
workspace_locking_adom:
description: |
the adom to lock for FortiManager running in workspace mode, the value can be global and others including root
required: false
type: str
workspace_locking_timeout:
description: the maximum time in seconds to wait for other user to release the workspace lock
required: false
type: int
default: 300
state:
description: the directive to create, update or delete an object
type: str
required: true
choices:
- present
- absent
rc_succeeded:
description: the rc codes list with which the conditions to succeed will be overriden
type: list
required: false
rc_failed:
description: the rc codes list with which the conditions to fail will be overriden
type: list
required: false
device:
description: the parameter (device) in requested url
type: str
required: true
vdom:
description: the parameter (vdom) in requested url
type: str
required: true
vpn_ssl_settings:
description: the top level parameters set
required: false
type: dict
suboptions:
algorithm:
type: str
description: no description
choices:
- 'default'
- 'high'
- 'low'
- 'medium'
auth-session-check-source-ip:
type: str
description: no description
choices:
- 'disable'
- 'enable'
auth-timeout:
type: int
description: no description
authentication-rule:
description: description
type: list
suboptions:
auth:
type: str
description: no description
choices:
- 'any'
- 'local'
- 'radius'
- 'ldap'
- 'tacacs+'
cipher:
type: str
description: no description
choices:
- 'any'
- 'high'
- 'medium'
client-cert:
type: str
description: no description
choices:
- 'disable'
- 'enable'
groups:
type: str
description: no description
id:
type: int
description: no description
portal:
type: str
description: no description
realm:
type: str
description: no description
source-address:
type: str
description: no description
source-address-negate:
type: str
description: no description
choices:
- 'disable'
- 'enable'
source-address6:
type: str
description: no description
source-address6-negate:
type: str
description: no description
choices:
- 'disable'
- 'enable'
source-interface:
type: str
description: no description
user-peer:
type: str
description: no description
users:
type: str
description: no description
auto-tunnel-static-route:
type: str
description: no description
choices:
- 'disable'
- 'enable'
banned-cipher:
description: description
type: list
choices:
- RSA
- DH
- DHE
- ECDH
- ECDHE
- DSS
- ECDSA
- AES
- AESGCM
- CAMELLIA
- 3DES
- SHA1
- SHA256
- SHA384
- STATIC
check-referer:
type: str
description: no description
choices:
- 'disable'
- 'enable'
default-portal:
type: str
description: no description
deflate-compression-level:
type: int
description: no description
deflate-min-data-size:
type: int
description: no description
dns-server1:
type: str
description: no description
dns-server2:
type: str
description: no description
dns-suffix:
type: str
description: no description
dtls-hello-timeout:
type: int
description: no description
dtls-max-proto-ver:
type: str
description: no description
choices:
- 'dtls1-0'
- 'dtls1-2'
dtls-min-proto-ver:
type: str
description: no description
choices:
- 'dtls1-0'
- 'dtls1-2'
dtls-tunnel:
type: str
description: no description
choices:
- 'disable'
- 'enable'
encode-2f-sequence:
type: str
description: no description
choices:
- 'disable'
- 'enable'
encrypt-and-store-password:
type: str
description: no description
choices:
- 'disable'
- 'enable'
force-two-factor-auth:
type: str
description: no description
choices:
- 'disable'
- 'enable'
header-x-forwarded-for:
type: str
description: no description
choices:
- 'pass'
- 'add'
- 'remove'
hsts-include-subdomains:
type: str
description: no description
choices:
- 'disable'
- 'enable'
http-compression:
type: str
description: no description
choices:
- 'disable'
- 'enable'
http-only-cookie:
type: str
description: no description
choices:
- 'disable'
- 'enable'
http-request-body-timeout:
type: int
description: no description
http-request-header-timeout:
type: int
description: no description
https-redirect:
type: str
description: no description
choices:
- 'disable'
- 'enable'
idle-timeout:
type: int
description: no description
ipv6-dns-server1:
type: str
description: no description
ipv6-dns-server2:
type: str
description: no description
ipv6-wins-server1:
type: str
description: no description
ipv6-wins-server2:
type: str
description: no description
login-attempt-limit:
type: int
description: no description
login-block-time:
type: int
description: no description
login-timeout:
type: int
description: no description
port:
type: int
description: no description
port-precedence:
type: str
description: no description
choices:
- 'disable'
- 'enable'
reqclientcert:
type: str
description: no description
choices:
- 'disable'
- 'enable'
route-source-interface:
type: str
description: no description
choices:
- 'disable'
- 'enable'
servercert:
type: str
description: no description
source-address:
type: str
description: no description
source-address-negate:
type: str
description: no description
choices:
- 'disable'
- 'enable'
source-address6:
type: str
description: no description
source-address6-negate:
type: str
description: no description
choices:
- 'disable'
- 'enable'
source-interface:
type: str
description: no description
ssl-client-renegotiation:
type: str
description: no description
choices:
- 'disable'
- 'enable'
ssl-insert-empty-fragment:
type: str
description: no description
choices:
- 'disable'
- 'enable'
ssl-max-proto-ver:
type: str
description: no description
choices:
- 'tls1-0'
- 'tls1-1'
- 'tls1-2'
- 'tls1-3'
ssl-min-proto-ver:
type: str
description: no description
choices:
- 'tls1-0'
- 'tls1-1'
- 'tls1-2'
- 'tls1-3'
tlsv1-0:
type: str
description: no description
choices:
- 'disable'
- 'enable'
tlsv1-1:
type: str
description: no description
choices:
- 'disable'
- 'enable'
tlsv1-2:
type: str
description: no description
choices:
- 'disable'
- 'enable'
tlsv1-3:
type: str
description: no description
choices:
- 'disable'
- 'enable'
transform-backward-slashes:
type: str
description: no description
choices:
- 'disable'
- 'enable'
tunnel-connect-without-reauth:
type: str
description: no description
choices:
- 'disable'
- 'enable'
tunnel-ip-pools:
type: str
description: no description
tunnel-ipv6-pools:
type: str
description: no description
tunnel-user-session-timeout:
type: int
description: no description
unsafe-legacy-renegotiation:
type: str
description: no description
choices:
- 'disable'
- 'enable'
url-obscuration:
type: str
description: no description
choices:
- 'disable'
- 'enable'
user-peer:
type: str
description: no description
wins-server1:
type: str
description: no description
wins-server2:
type: str
description: no description
x-content-type-options:
type: str
description: no description
choices:
- 'disable'
- 'enable'
'''
EXAMPLES = '''
- hosts: fortimanager-inventory
collections:
- fortinet.fortimanager
connection: httpapi
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: no description
fmgr_vpn_ssl_settings:
bypass_validation: False
workspace_locking_adom: <value in [global, custom adom including root]>
workspace_locking_timeout: 300
rc_succeeded: [0, -2, -3, ...]
rc_failed: [-2, -3, ...]
device: <your own value>
vdom: <your own value>
vpn_ssl_settings:
algorithm: <value in [default, high, low, ...]>
auth-session-check-source-ip: <value in [disable, enable]>
auth-timeout: <value of integer>
authentication-rule:
-
auth: <value in [any, local, radius, ...]>
cipher: <value in [any, high, medium]>
client-cert: <value in [disable, enable]>
groups: <value of string>
id: <value of integer>
portal: <value of string>
realm: <value of string>
source-address: <value of string>
source-address-negate: <value in [disable, enable]>
source-address6: <value of string>
source-address6-negate: <value in [disable, enable]>
source-interface: <value of string>
user-peer: <value of string>
users: <value of string>
auto-tunnel-static-route: <value in [disable, enable]>
banned-cipher:
- RSA
- DH
- DHE
- ECDH
- ECDHE
- DSS
- ECDSA
- AES
- AESGCM
- CAMELLIA
- 3DES
- SHA1
- SHA256
- SHA384
- STATIC
check-referer: <value in [disable, enable]>
default-portal: <value of string>
deflate-compression-level: <value of integer>
deflate-min-data-size: <value of integer>
dns-server1: <value of string>
dns-server2: <value of string>
dns-suffix: <value of string>
dtls-hello-timeout: <value of integer>
dtls-max-proto-ver: <value in [dtls1-0, dtls1-2]>
dtls-min-proto-ver: <value in [dtls1-0, dtls1-2]>
dtls-tunnel: <value in [disable, enable]>
encode-2f-sequence: <value in [disable, enable]>
encrypt-and-store-password: <value in [disable, enable]>
force-two-factor-auth: <value in [disable, enable]>
header-x-forwarded-for: <value in [pass, add, remove]>
hsts-include-subdomains: <value in [disable, enable]>
http-compression: <value in [disable, enable]>
http-only-cookie: <value in [disable, enable]>
http-request-body-timeout: <value of integer>
http-request-header-timeout: <value of integer>
https-redirect: <value in [disable, enable]>
idle-timeout: <value of integer>
ipv6-dns-server1: <value of string>
ipv6-dns-server2: <value of string>
ipv6-wins-server1: <value of string>
ipv6-wins-server2: <value of string>
login-attempt-limit: <value of integer>
login-block-time: <value of integer>
login-timeout: <value of integer>
port: <value of integer>
port-precedence: <value in [disable, enable]>
reqclientcert: <value in [disable, enable]>
route-source-interface: <value in [disable, enable]>
servercert: <value of string>
source-address: <value of string>
source-address-negate: <value in [disable, enable]>
source-address6: <value of string>
source-address6-negate: <value in [disable, enable]>
source-interface: <value of string>
ssl-client-renegotiation: <value in [disable, enable]>
ssl-insert-empty-fragment: <value in [disable, enable]>
ssl-max-proto-ver: <value in [tls1-0, tls1-1, tls1-2, ...]>
ssl-min-proto-ver: <value in [tls1-0, tls1-1, tls1-2, ...]>
tlsv1-0: <value in [disable, enable]>
tlsv1-1: <value in [disable, enable]>
tlsv1-2: <value in [disable, enable]>
tlsv1-3: <value in [disable, enable]>
transform-backward-slashes: <value in [disable, enable]>
tunnel-connect-without-reauth: <value in [disable, enable]>
tunnel-ip-pools: <value of string>
tunnel-ipv6-pools: <value of string>
tunnel-user-session-timeout: <value of integer>
unsafe-legacy-renegotiation: <value in [disable, enable]>
url-obscuration: <value in [disable, enable]>
user-peer: <value of string>
wins-server1: <value of string>
wins-server2: <value of string>
x-content-type-options: <value in [disable, enable]>
'''
RETURN = '''
request_url:
description: The full url requested
returned: always
type: str
sample: /sys/login/user
response_code:
description: The status of api request
returned: always
type: int
sample: 0
response_message:
description: The descriptive message of the api response
type: str
returned: always
sample: OK.
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import NAPIManager
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_galaxy_version
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_parameter_bypass
def main():
jrpc_urls = [
'/pm/config/device/{device}/vdom/{vdom}/vpn/ssl/settings'
]
perobject_jrpc_urls = [
'/pm/config/device/{device}/vdom/{vdom}/vpn/ssl/settings/{settings}'
]
url_params = ['device', 'vdom']
module_primary_key = None
module_arg_spec = {
'enable_log': {
'type': 'bool',
'required': False,
'default': False
},
'forticloud_access_token': {
'type': 'str',
'required': False,
'no_log': True
},
'proposed_method': {
'type': 'str',
'required': False,
'choices': [
'set',
'update',
'add'
]
},
'bypass_validation': {
'type': 'bool',
'required': False,
'default': False
},
'workspace_locking_adom': {
'type': 'str',
'required': False
},
'workspace_locking_timeout': {
'type': 'int',
'required': False,
'default': 300
},
'rc_succeeded': {
'required': False,
'type': 'list'
},
'rc_failed': {
'required': False,
'type': 'list'
},
'device': {
'required': True,
'type': 'str'
},
'vdom': {
'required': True,
'type': 'str'
},
'vpn_ssl_settings': {
'required': False,
'type': 'dict',
'revision': {
'6.4.2': True
},
'options': {
'algorithm': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'default',
'high',
'low',
'medium'
],
'type': 'str'
},
'auth-session-check-source-ip': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'auth-timeout': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'int'
},
'authentication-rule': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'list',
'options': {
'auth': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'any',
'local',
'radius',
'ldap',
'tacacs+'
],
'type': 'str'
},
'cipher': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'any',
'high',
'medium'
],
'type': 'str'
},
'client-cert': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'groups': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'str'
},
'id': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'int'
},
'portal': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'str'
},
'realm': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'str'
},
'source-address': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'str'
},
'source-address-negate': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'source-address6': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'str'
},
'source-address6-negate': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'source-interface': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'str'
},
'user-peer': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'str'
},
'users': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'str'
}
}
},
'auto-tunnel-static-route': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'banned-cipher': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'list',
'choices': [
'RSA',
'DH',
'DHE',
'ECDH',
'ECDHE',
'DSS',
'ECDSA',
'AES',
'AESGCM',
'CAMELLIA',
'3DES',
'SHA1',
'SHA256',
'SHA384',
'STATIC'
]
},
'check-referer': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'default-portal': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'str'
},
'deflate-compression-level': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'int'
},
'deflate-min-data-size': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'int'
},
'dns-server1': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'str'
},
'dns-server2': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'str'
},
'dns-suffix': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'str'
},
'dtls-hello-timeout': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'int'
},
'dtls-max-proto-ver': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'dtls1-0',
'dtls1-2'
],
'type': 'str'
},
'dtls-min-proto-ver': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'dtls1-0',
'dtls1-2'
],
'type': 'str'
},
'dtls-tunnel': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'encode-2f-sequence': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'encrypt-and-store-password': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'force-two-factor-auth': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'header-x-forwarded-for': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'pass',
'add',
'remove'
],
'type': 'str'
},
'hsts-include-subdomains': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'http-compression': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'http-only-cookie': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'http-request-body-timeout': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'int'
},
'http-request-header-timeout': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'int'
},
'https-redirect': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'idle-timeout': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'int'
},
'ipv6-dns-server1': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'str'
},
'ipv6-dns-server2': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'str'
},
'ipv6-wins-server1': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'str'
},
'ipv6-wins-server2': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'str'
},
'login-attempt-limit': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'int'
},
'login-block-time': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'int'
},
'login-timeout': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'int'
},
'port': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'int'
},
'port-precedence': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'reqclientcert': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'route-source-interface': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'servercert': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'str'
},
'source-address': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'str'
},
'source-address-negate': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'source-address6': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'str'
},
'source-address6-negate': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'source-interface': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'str'
},
'ssl-client-renegotiation': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'ssl-insert-empty-fragment': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'ssl-max-proto-ver': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'tls1-0',
'tls1-1',
'tls1-2',
'tls1-3'
],
'type': 'str'
},
'ssl-min-proto-ver': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'tls1-0',
'tls1-1',
'tls1-2',
'tls1-3'
],
'type': 'str'
},
'tlsv1-0': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'tlsv1-1': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'tlsv1-2': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'tlsv1-3': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'transform-backward-slashes': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'tunnel-connect-without-reauth': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'tunnel-ip-pools': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'str'
},
'tunnel-ipv6-pools': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'str'
},
'tunnel-user-session-timeout': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'int'
},
'unsafe-legacy-renegotiation': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'url-obscuration': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'user-peer': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'str'
},
'wins-server1': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'str'
},
'wins-server2': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'type': 'str'
},
'x-content-type-options': {
'required': False,
'revision': {
'6.4.2': True,
'6.4.5': False,
'7.0.0': False,
'7.2.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
}
}
}
}
params_validation_blob = []
check_galaxy_version(module_arg_spec)
module = AnsibleModule(argument_spec=check_parameter_bypass(module_arg_spec, 'vpn_ssl_settings'),
supports_check_mode=False)
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
connection.set_option('enable_log', module.params['enable_log'] if 'enable_log' in module.params else False)
connection.set_option('forticloud_access_token',
module.params['forticloud_access_token'] if 'forticloud_access_token' in module.params else None)
fmgr = NAPIManager(jrpc_urls, perobject_jrpc_urls, module_primary_key, url_params, module, connection, top_level_schema_name='data')
fmgr.validate_parameters(params_validation_blob)
fmgr.process_partial_curd(argument_specs=module_arg_spec)
else:
module.fail_json(msg='MUST RUN IN HTTPAPI MODE')
module.exit_json(meta=module.params)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
b9f78075d182ca9e57ef766de03b49e5e67b83e3 | 5ae3bc1920fafc33693cdfa3928a48158aa6f725 | /339/339.py | 3ae587496c391a69889871704c55b71a5fa45463 | [] | no_license | sjzyjc/leetcode | 2d0764aec6681d567bffd8ff9a8cc482c44336c2 | 5e09a5d36ac55d782628a888ad57d48e234b61ac | refs/heads/master | 2021-04-03T08:26:38.232218 | 2019-08-15T21:54:59 | 2019-08-15T21:54:59 | 124,685,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,912 | py | # """
# This is the interface that allows for creating nested lists.
# You should not implement it, or speculate about its implementation
# """
#class NestedInteger:
# def __init__(self, value=None):
# """
# If value is not specified, initializes an empty list.
# Otherwise initializes a single integer equal to value.
# """
#
# def isInteger(self):
# """
# @return True if this NestedInteger holds a single integer, rather than a nested list.
# :rtype bool
# """
#
# def add(self, elem):
# """
# Set this NestedInteger to hold a nested list and adds a nested integer elem to it.
# :rtype void
# """
#
# def setInteger(self, value):
# """
# Set this NestedInteger to hold a single integer equal to value.
# :rtype void
# """
#
# def getInteger(self):
# """
# @return the single integer that this NestedInteger holds, if it holds a single integer
# Return None if this NestedInteger holds a nested list
# :rtype int
# """
#
# def getList(self):
# """
# @return the nested list that this NestedInteger holds, if it holds a nested list
# Return None if this NestedInteger holds a single integer
# :rtype List[NestedInteger]
# """
class Solution:
def depthSum(self, nestedList):
"""
:type nestedList: List[NestedInteger]
:rtype: int
"""
if not nestedList:
return 0
ans = 0
for item in nestedList:
ans += self.dfs(item, 1)
return ans
def dfs(self, nestedList, depth):
if nestedList.isInteger():
return nestedList.getInteger() * depth
ans = 0
for item in nestedList.getList():
ans += self.dfs(item, depth + 1)
return ans
| [
"[email protected]"
] | |
264486e6a67a5bd97d1e2a4f5fe5a9d2793e581e | 60ca69e2a4c6b05e6df44007fd9e4a4ed4425f14 | /beginner_contest/072/C.py | 080a37ff02695427c7499db8c51223bd96de0bd5 | [
"MIT"
] | permissive | FGtatsuro/myatcoder | 12a9daafc88efbb60fc0cd8840e594500fc3ee55 | 25a3123be6a6311e7d1c25394987de3e35575ff4 | refs/heads/master | 2021-06-13T15:24:07.906742 | 2021-05-16T11:47:09 | 2021-05-16T11:47:09 | 195,441,531 | 0 | 0 | MIT | 2021-05-16T11:47:10 | 2019-07-05T16:47:58 | Python | UTF-8 | Python | false | false | 320 | py | import sys
input = sys.stdin.readline
sys.setrecursionlimit(10 ** 7)
n = int(input())
a = list(map(int, input().split()))
t = [0] * (max(a) + 1)
for v in a:
t[v] += 1
if max(a) <= 1:
print(sum(t))
sys.exit(0)
ans = 0
for i in range(0, max(a) - 1):
ans = max(ans, t[i] + t[i+1] + t[i+2])
print(ans)
| [
"[email protected]"
] | |
0c52af3d9af2fa22e731c5bf98e9226c2a7b2245 | b2b79cc61101ddf54959b15cf7d0887d114fb4e5 | /web/pgadmin/browser/server_groups/servers/databases/schemas/tables/tests/test_table_get_script_sql.py | bfd9c1ad7926ca39adec0ae5ed46e473f8192970 | [
"PostgreSQL"
] | permissive | 99Percent/pgadmin4 | 8afe737eb2ec1400ab034ad1d8a4f7c4ba4c35c8 | 5e0c113c7bc4ffefbec569e7ca5416d9acf9dd8a | refs/heads/master | 2021-10-10T20:08:48.321551 | 2021-09-30T12:51:43 | 2021-09-30T12:51:43 | 165,702,958 | 0 | 0 | NOASSERTION | 2019-01-14T17:18:40 | 2019-01-14T17:18:39 | null | UTF-8 | Python | false | false | 4,072 | py | ##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2021, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import uuid
from unittest.mock import patch
from pgadmin.browser.server_groups.servers.databases.schemas.tests import \
utils as schema_utils
from pgadmin.browser.server_groups.servers.databases.tests import utils as \
database_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
from . import utils as tables_utils
class TableGetScriptSqlTestCase(BaseTestGenerator):
"""This class will add new collation under schema node."""
url = '/browser/table/'
# Generates scenarios
scenarios = utils.generate_scenarios("table_get_script_sql",
tables_utils.test_cases)
def setUp(self):
# Load test data
self.data = self.test_data
# Update url
self.url = self.url + self.add_to_url
# Create db connection
self.db_name = parent_node_dict["database"][-1]["db_name"]
schema_info = parent_node_dict["schema"][-1]
self.server_id = schema_info["server_id"]
self.db_id = schema_info["db_id"]
db_con = database_utils.connect_database(self, utils.SERVER_GROUP,
self.server_id, self.db_id)
if not db_con['data']["connected"]:
raise Exception("Could not connect to database to add a table.")
# Create schema
self.schema_id = schema_info["schema_id"]
self.schema_name = schema_info["schema_name"]
schema_response = schema_utils.verify_schemas(self.server,
self.db_name,
self.schema_name)
if not schema_response:
raise Exception("Could not find the schema to add a table.")
# Create table
self.table_name = "test_table_get_%s" % (str(uuid.uuid4())[1:8])
if "query" in self.inventory_data:
custom_query = self.inventory_data["query"]
self.table_id = tables_utils.create_table(self.server,
self.db_name,
self.schema_name,
self.table_name,
custom_query)
else:
self.table_id = tables_utils.create_table(self.server,
self.db_name,
self.schema_name,
self.table_name)
def runTest(self):
"""This function will delete added table under schema node."""
if self.is_positive_test:
response = tables_utils.api_get(self)
# Assert response
utils.assert_status_code(self, response)
else:
if self.mocking_required:
with patch(self.mock_data["function_name"],
side_effect=eval(self.mock_data["return_value"])):
if self.is_list:
response = tables_utils.api_get(self, "")
else:
response = tables_utils.api_get(self)
else:
if 'table_id' in self.data:
self.table_id = self.data['table_id']
response = tables_utils.api_get(self)
# Assert response
utils.assert_status_code(self, response)
utils.assert_error_message(self, response)
def tearDown(self):
# Disconnect the database
database_utils.disconnect_database(self, self.server_id, self.db_id)
| [
"[email protected]"
] | |
ece365a265c581bb6a442e96dcae94df889f9d30 | 5a6f88e6671a6a8bd81b0062a687949ee56741e8 | /src/variants_analysis.py | 1d78860594ad68285baafbc69fac17d74389dbbf | [
"MIT"
] | permissive | yaosichao0915/DeepImmuno | dad4e256515688d8efe622fab55b8f7f40518acc | a2a7832f6cded9296735475c2e8fa5c9b62b3f8d | refs/heads/main | 2023-06-17T01:09:51.851248 | 2021-07-14T17:21:44 | 2021-07-14T17:21:44 | 386,014,588 | 0 | 0 | MIT | 2021-07-14T17:01:18 | 2021-07-14T17:01:17 | null | UTF-8 | Python | false | false | 6,762 | py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
with open('/Users/ligk2e/Desktop/github/DeepImmuno/files/covid/ORF2-spike.fa','r') as f:
spike = f.readlines()
spike = ''.join([item.rstrip('\n') for item in spike[1:]])
record = 'MFVFLVLLPLVSSQCVNLTTRTQLPPAYTNSFTRGVYYPDKVFRSSVLHSTQDLFLPFFSNVTWFHAIHVSGTNGTKRFDNPVLPFNDGVYFASTEKSNIIRGWIFGTTLDSKTQSLLIVNNATNVVIKVCEFQFCNDPFLGVYYHKNNKSWMESEFRVYSSANNCTFEYVSQPFLMDLEGKQGNFKNLREFVFKNIDGYFKIYSKHTPINLVRDLPQGFSALEPLVDLPIGINITRFQTLLALHRSYLTPGDSSSGWTAGAAAYYVGYLQPRTFLLKYNENGTITDAVDCALDPLSETKCTLKSFTVEKGIYQTSNFRVQPTESIVRFPNITNLCPFGEVFNATRFASVYAWNRKRISNCVADYSVLYNSASFSTFKCYGVSPTKLNDLCFTNVYADSFVIRGDEVRQIAPGQTGKIADYNYKLPDDFTGCVIAWNSNNLDSKVGGNYNYLYRLFRKSNLKPFERDISTEIYQAGSTPCNGVEGFNCYFPLQSYGFQPTNGVGYQPYRVVVLSFELLHAPATVCGPKKSTNLVKNKCVNFNFNGLTGTGVLTESNKKFLPFQQFGRDIADTTDAVRDPQTLEILDITPCSFGGVSVITPGTNTSNQVAVLYQDVNCTEVPVAIHADQLTPTWRVYSTGSNVFQTRAGCLIGAEHVNNSYECDIPIGAGICASYQTQTNSPRRARSVASQSIIAYTMSLGAENSVAYSNNSIAIPTNFTISVTTEILPVSMTKTSVDCTMYICGDSTECSNLLLQYGSFCTQLNRALTGIAVEQDKNTQEVFAQVKQIYKTPPIKDFGGFNFSQILPDPSKPSKRSFIEDLLFNKVTLADAGFIKQYGDCLGDIAARDLICAQKFNGLTVLPPLLTDEMIAQYTSALLAGTITSGWTFGAGAALQIPFAMQMAYRFNGIGVTQNVLYENQKLIANQFNSAIGKIQDSLSSTASALGKLQDVVNQNAQALNTLVKQLSSNFGAISSVLNDILSRLDKVEAEVQIDRLITGRLQSLQTYVTQQLIRAAEIRASANLAATKMSECVLGQSKRVDFCGKGYHLMSFPQSAPHGVVFLHVTYVPAQEKNFTTAPAICHDGKAHFPREGVFVSNGTHWFVTQRNFYEPQIITTDNTFVSGNCDVVIGIVNNTVYDPLQPELDSFKEELDKYFKNHTSPDVDLGDISGINASVVNIQKEIDRLNEVAKNLNESLIDLQELGKYEQYIKWPWYIWLGFIAGLIAIVMVTIMLCCMTSCCSCLKGCCSCGSCCKFDEDDSEPVLKGVKLHYT'
# D614G
region = spike[605:622]
mer9_normal = [region[i:i+9] for i in range(0,9,1)]
mutate = region[0:8] + 'G' + region[9:]
mer9_mutate = [mutate[i:i+9] for i in range(0,9,1)]
def set_query_df(frag):
from itertools import product
hla = ['HLA-A*0101','HLA-A*0201','HLA-A*0301','HLA-A*1101','HLA-A*2402','HLA-B*0702','HLA-B*0801','HLA-B*1501','HLA-B*4001','HLA-C*0702']
combine = list(product(frag,hla))
col1 = [item[0] for item in combine] # peptide
col2 = [item[1] for item in combine] # hla
df = pd.DataFrame({'peptide':col1,'HLA':col2})
return df
set_query_df(mer9_normal).to_csv('/Users/ligk2e/Desktop/github/DeepImmuno/files/variants/D614G/D614G_normal.csv',index=None,
header=None)
set_query_df(mer9_mutate).to_csv('/Users/ligk2e/Desktop/github/DeepImmuno/files/variants/D614G/D614G_mutate.csv',index=None,
header=None)
result_normal = pd.read_csv('/Users/ligk2e/Desktop/github/DeepImmuno/files/variants/D614G/normal_result.txt',sep='\t')
result_mutate = pd.read_csv('/Users/ligk2e/Desktop/github/DeepImmuno/files/variants/D614G/mutate_result.txt',sep='\t')
# plot by each HLA
fig,axes = plt.subplots(nrows=5,ncols=2,figsize=(10,10),gridspec_kw={'hspace':0.5})
n = list(result_normal.groupby(by='HLA'))
m = list(result_mutate.groupby(by='HLA'))
for i,ax in enumerate(axes.flatten()):
ax.plot(np.arange(9)+1,n[i][1]['immunogenicity'][::-1],label='normal',marker='v',alpha=0.5)
ax.plot(np.arange(9)+1,m[i][1]['immunogenicity'][::-1],label='mutate',marker='o',linestyle='--')
ax.legend()
ax.set_title(n[i][0])
plt.savefig('/Users/ligk2e/Desktop/github/DeepImmuno/files/variants/D614G/lineplot.pdf',bbox_inches='tight')
# N501Y mutation
region = spike[492:509]
mer9_normal = [region[i:i+9] for i in range(0,9,1)]
mutate = region[0:8] + 'Y' + region[9:]
mer9_mutate = [mutate[i:i+9] for i in range(0,9,1)]
def set_query_df(frag):
from itertools import product
hla = ['HLA-A*0101','HLA-A*0201','HLA-A*0301','HLA-A*1101','HLA-A*2402','HLA-B*0702','HLA-B*0801','HLA-B*1501','HLA-B*4001','HLA-C*0702']
combine = list(product(frag,hla))
col1 = [item[0] for item in combine] # peptide
col2 = [item[1] for item in combine] # hla
df = pd.DataFrame({'peptide':col1,'HLA':col2})
return df
set_query_df(mer9_normal).to_csv('/Users/ligk2e/Desktop/github/DeepImmuno/files/variants/N501Y/N501Y_normal.csv',index=None,
header=None)
set_query_df(mer9_mutate).to_csv('/Users/ligk2e/Desktop/github/DeepImmuno/files/variants/N501Y/N501Y_mutate.csv',index=None,
header=None)
result_normal = pd.read_csv('/Users/ligk2e/Desktop/github/DeepImmuno/files/variants/N501Y/normal_result.txt',sep='\t')
result_mutate = pd.read_csv('/Users/ligk2e/Desktop/github/DeepImmuno/files/variants/N501Y/mutate_result.txt',sep='\t')
# plot by each HLA
fig,axes = plt.subplots(nrows=5,ncols=2,figsize=(10,10),gridspec_kw={'hspace':0.5})
n = list(result_normal.groupby(by='HLA'))
m = list(result_mutate.groupby(by='HLA'))
for i,ax in enumerate(axes.flatten()):
ax.plot(np.arange(9)+1,n[i][1]['immunogenicity'][::-1],label='normal',marker='v',alpha=0.5)
ax.plot(np.arange(9)+1,m[i][1]['immunogenicity'][::-1],label='mutate',marker='o',linestyle='--')
ax.legend()
ax.set_title(n[i][0])
plt.savefig('/Users/ligk2e/Desktop/github/DeepImmuno/files/variants/N501Y/lineplot.pdf',bbox_inches='tight')
# E484K
region = spike[475:492]
mer9_normal = [region[i:i+9] for i in range(0,9,1)]
mutate = region[0:8] + 'K' + region[9:]
mer9_mutate = [mutate[i:i+9] for i in range(0,9,1)]
def set_query_df(frag):
from itertools import product
hla = ['HLA-A*0101','HLA-A*0201','HLA-A*0301','HLA-A*1101','HLA-A*2402','HLA-B*0702','HLA-B*0801','HLA-B*1501','HLA-B*4001','HLA-C*0702']
combine = list(product(frag,hla))
col1 = [item[0] for item in combine] # peptide
col2 = [item[1] for item in combine] # hla
df = pd.DataFrame({'peptide':col1,'HLA':col2})
return df
set_query_df(mer9_normal).to_csv('/Users/ligk2e/Desktop/github/DeepImmuno/files/variants/E484K/E484K_normal.csv',index=None,
header=None)
set_query_df(mer9_mutate).to_csv('/Users/ligk2e/Desktop/github/DeepImmuno/files/variants/E484K/E484K_mutate.csv',index=None,
header=None)
result_normal = pd.read_csv('/Users/ligk2e/Desktop/github/DeepImmuno/files/variants/E484K/normal_result.txt',sep='\t')
result_mutate = pd.read_csv('/Users/ligk2e/Desktop/github/DeepImmuno/files/variants/E484K/mutate_result.txt',sep='\t')
# plot by each HLA
fig,axes = plt.subplots(nrows=5,ncols=2,figsize=(10,10),gridspec_kw={'hspace':0.5})
n = list(result_normal.groupby(by='HLA'))
m = list(result_mutate.groupby(by='HLA'))
for i,ax in enumerate(axes.flatten()):
ax.plot(np.arange(9)+1,n[i][1]['immunogenicity'][::-1],label='normal',marker='v',alpha=0.5)
ax.plot(np.arange(9)+1,m[i][1]['immunogenicity'][::-1],label='mutate',marker='o',linestyle='--')
ax.legend()
ax.set_title(n[i][0])
plt.savefig('/Users/ligk2e/Desktop/github/DeepImmuno/files/variants/E484K/lineplot.pdf',bbox_inches='tight')
| [
"Frank Li"
] | Frank Li |
48fae7769117066bc7dbba45df7955795e600155 | 1677eaad65da601a3ac34bd6648c973ffd23c5a9 | /test/test_payment_intent.py | 96a16f7f0e080de4c3970d1af68af0c32eebc622 | [] | no_license | jeffkynaston/sdk-spike-python | dc557cc1557387f8a126cd8e546201d141de535e | f9c65f578abb801ffe5389b2680f9c6ed1fcebd3 | refs/heads/main | 2023-07-10T00:58:13.864373 | 2021-08-05T21:38:07 | 2021-08-05T21:38:07 | 393,175,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,307 | py | """
Plastiq Public API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import openapi_client
from openapi_client.model.payer_id import PayerId
from openapi_client.model.payment_details import PaymentDetails
from openapi_client.model.payment_intent_fees import PaymentIntentFees
from openapi_client.model.payment_method_id import PaymentMethodId
from openapi_client.model.recipient_id import RecipientId
globals()['PayerId'] = PayerId
globals()['PaymentDetails'] = PaymentDetails
globals()['PaymentIntentFees'] = PaymentIntentFees
globals()['PaymentMethodId'] = PaymentMethodId
globals()['RecipientId'] = RecipientId
from openapi_client.model.payment_intent import PaymentIntent
class TestPaymentIntent(unittest.TestCase):
"""PaymentIntent unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPaymentIntent(self):
"""Test PaymentIntent"""
# FIXME: construct object with mandatory attributes with example values
# model = PaymentIntent() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
c4b8382cde9c3442aad8576c605f4e3165a4187c | cb3583cc1322d38b1ee05cb1c081e0867ddb2220 | /home/0024_auto_20210409_1103.py | 6dfdc8917586768f05e72645b5ee96b68f7f94cc | [
"MIT"
] | permissive | iamgaddiel/codeupblood | 9e897ff23dedf5299cb59fd6c44d9bd8a645e9c6 | a0aa1725e5776d80e083b6d4e9e67476bb97e983 | refs/heads/main | 2023-05-07T23:34:27.475043 | 2021-04-24T20:49:08 | 2021-04-24T20:49:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,718 | py | # Generated by Django 3.1.6 on 2021-04-09 18:03
from django.db import migrations, models
import tagulous.models.fields
import tagulous.models.models
class Migration(migrations.Migration):
dependencies = [
('home', '0023_auto_20210409_0900'),
]
operations = [
migrations.AlterField(
model_name='partner',
name='class_id',
field=models.CharField(default='pMNit', max_length=150),
),
migrations.AlterField(
model_name='sponsor',
name='class_id',
field=models.CharField(default='eLZfH', max_length=150),
),
migrations.CreateModel(
name='Tagulous_Blog_tags',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
('slug', models.SlugField()),
('count', models.IntegerField(default=0, help_text='Internal counter of how many times this tag is in use')),
('protected', models.BooleanField(default=False, help_text='Will not be deleted when the count reaches 0')),
],
options={
'ordering': ('name',),
'abstract': False,
'unique_together': {('slug',)},
},
bases=(tagulous.models.models.BaseTagModel, models.Model),
),
migrations.AlterField(
model_name='blog',
name='tags',
field=tagulous.models.fields.TagField(_set_tag_meta=True, help_text='Enter a comma-separated tag string', to='home.Tagulous_Blog_tags'),
),
]
| [
"[email protected]"
] | |
05a2b261a7a89318b11bc6be9fb3d2227a7bfd93 | 92e3a6424326bf0b83e4823c3abc2c9d1190cf5e | /scripts/icehouse/opt/stack/cinder/cinder/volume/drivers/ibm/gpfs.py | e2ddea245b600e2f95568f348349f9d0e75ba915 | [
"Apache-2.0"
] | permissive | AnthonyEzeigbo/OpenStackInAction | d6c21cf972ce2b1f58a93a29973534ded965d1ea | ff28cc4ee3c1a8d3bbe477d9d6104d2c6e71bf2e | refs/heads/master | 2023-07-28T05:38:06.120723 | 2020-07-25T15:19:21 | 2020-07-25T15:19:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41,988 | py | # Copyright IBM Corp. 2013 All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
GPFS Volume Driver.
"""
import math
import os
import re
import shutil
from oslo.config import cfg
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common import fileutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder import units
from cinder import utils
from cinder.volume import driver
GPFS_CLONE_MIN_RELEASE = 1200
LOG = logging.getLogger(__name__)
gpfs_opts = [
cfg.StrOpt('gpfs_mount_point_base',
default=None,
help='Specifies the path of the GPFS directory where Block '
'Storage volume and snapshot files are stored.'),
cfg.StrOpt('gpfs_images_dir',
default=None,
help='Specifies the path of the Image service repository in '
'GPFS. Leave undefined if not storing images in GPFS.'),
cfg.StrOpt('gpfs_images_share_mode',
default=None,
help='Specifies the type of image copy to be used. Set this '
'when the Image service repository also uses GPFS so '
'that image files can be transferred efficiently from '
'the Image service to the Block Storage service. There '
'are two valid values: "copy" specifies that a full copy '
'of the image is made; "copy_on_write" specifies that '
'copy-on-write optimization strategy is used and '
'unmodified blocks of the image file are shared '
'efficiently.'),
cfg.IntOpt('gpfs_max_clone_depth',
default=0,
help='Specifies an upper limit on the number of indirections '
'required to reach a specific block due to snapshots or '
'clones. A lengthy chain of copy-on-write snapshots or '
'clones can have a negative impact on performance, but '
'improves space utilization. 0 indicates unlimited '
'clone depth.'),
cfg.BoolOpt('gpfs_sparse_volumes',
default=True,
help=('Specifies that volumes are created as sparse files '
'which initially consume no space. If set to False, the '
'volume is created as a fully allocated file, in which '
'case, creation may take a significantly longer time.')),
cfg.StrOpt('gpfs_storage_pool',
default=None,
help=('Specifies the storage pool that volumes are assigned '
'to. By default, the system storage pool is used.')),
]
CONF = cfg.CONF
CONF.register_opts(gpfs_opts)
def _different(difference_tuple):
"""Return true if two elements of a tuple are different."""
if difference_tuple:
member1, member2 = difference_tuple
return member1 != member2
else:
return False
def _same_filesystem(path1, path2):
"""Return true if the two paths are in the same GPFS file system."""
return os.lstat(path1).st_dev == os.lstat(path2).st_dev
def _sizestr(size_in_g):
"""Convert the specified size into a string value."""
if int(size_in_g) == 0:
# return 100M size on zero input for testing
return '100M'
return '%sG' % size_in_g
class GPFSDriver(driver.VolumeDriver):
"""Implements volume functions using GPFS primitives.
Version history:
1.0.0 - Initial driver
1.1.0 - Add volume retype, refactor volume migration
"""
VERSION = "1.1.0"
def __init__(self, *args, **kwargs):
super(GPFSDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(gpfs_opts)
def _get_gpfs_state(self):
"""Return GPFS state information."""
try:
(out, err) = self._execute('mmgetstate', '-Y', run_as_root=True)
return out
except processutils.ProcessExecutionError as exc:
LOG.error(_('Failed to issue mmgetstate command, error: %s.') %
exc.stderr)
raise exception.VolumeBackendAPIException(data=exc.stderr)
def _check_gpfs_state(self):
"""Raise VolumeBackendAPIException if GPFS is not active."""
out = self._get_gpfs_state()
lines = out.splitlines()
state_token = lines[0].split(':').index('state')
gpfs_state = lines[1].split(':')[state_token]
if gpfs_state != 'active':
LOG.error(_('GPFS is not active. Detailed output: %s.') % out)
exception_message = (_('GPFS is not running, state: %s.') %
gpfs_state)
raise exception.VolumeBackendAPIException(data=exception_message)
def _get_filesystem_from_path(self, path):
"""Return filesystem for specified path."""
try:
(out, err) = self._execute('df', path, run_as_root=True)
lines = out.splitlines()
filesystem = lines[1].split()[0]
return filesystem
except processutils.ProcessExecutionError as exc:
LOG.error(_('Failed to issue df command for path %(path)s, '
'error: %(error)s.') %
{'path': path,
'error': exc.stderr})
raise exception.VolumeBackendAPIException(data=exc.stderr)
def _get_gpfs_cluster_id(self):
"""Return the id for GPFS cluster being used."""
try:
(out, err) = self._execute('mmlsconfig', 'clusterId', '-Y',
run_as_root=True)
lines = out.splitlines()
value_token = lines[0].split(':').index('value')
cluster_id = lines[1].split(':')[value_token]
return cluster_id
except processutils.ProcessExecutionError as exc:
LOG.error(_('Failed to issue mmlsconfig command, error: %s.') %
exc.stderr)
raise exception.VolumeBackendAPIException(data=exc.stderr)
def _get_fileset_from_path(self, path):
"""Return the GPFS fileset for specified path."""
fs_regex = re.compile(r'.*fileset.name:\s+(?P<fileset>\w+)', re.S)
try:
(out, err) = self._execute('mmlsattr', '-L', path,
run_as_root=True)
except processutils.ProcessExecutionError as exc:
LOG.error(_('Failed to issue mmlsattr command on path %(path)s, '
'error: %(error)s') %
{'path': path,
'error': exc.stderr})
raise exception.VolumeBackendAPIException(data=exc.stderr)
try:
fileset = fs_regex.match(out).group('fileset')
return fileset
except AttributeError as exc:
msg = (_('Failed to find fileset for path %(path)s, command '
'output: %(cmdout)s.') %
{'path': path,
'cmdout': out})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def _verify_gpfs_pool(self, storage_pool):
"""Return true if the specified pool is a valid GPFS storage pool."""
try:
self._execute('mmlspool', self._gpfs_device, storage_pool,
run_as_root=True)
return True
except processutils.ProcessExecutionError:
return False
def _update_volume_storage_pool(self, local_path, new_pool):
"""Set the storage pool for a volume to the specified value."""
if new_pool is None:
new_pool = 'system'
if not self._verify_gpfs_pool(new_pool):
msg = (_('Invalid storage pool %s requested. Retype failed.') %
new_pool)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
try:
self._execute('mmchattr', '-P', new_pool, local_path,
run_as_root=True)
LOG.debug('Updated storage pool with mmchattr to %s.' % new_pool)
return True
except processutils.ProcessExecutionError as exc:
LOG.info('Could not update storage pool with mmchattr to '
'%(pool)s, error: %(error)s' %
{'pool': new_pool,
'error': exc.stderr})
return False
def _get_gpfs_fs_release_level(self, path):
"""Return the GPFS version of the specified file system.
The file system is specified by any valid path it contains.
"""
filesystem = self._get_filesystem_from_path(path)
try:
(out, err) = self._execute('mmlsfs', filesystem, '-V', '-Y',
run_as_root=True)
except processutils.ProcessExecutionError as exc:
LOG.error(_('Failed to issue mmlsfs command for path %(path)s, '
'error: %(error)s.') %
{'path': path,
'error': exc.stderr})
raise exception.VolumeBackendAPIException(data=exc.stderr)
lines = out.splitlines()
value_token = lines[0].split(':').index('data')
fs_release_level_str = lines[1].split(':')[value_token]
# at this point, release string looks like "13.23 (3.5.0.7)"
# extract first token and convert to whole number value
fs_release_level = int(float(fs_release_level_str.split()[0]) * 100)
return filesystem, fs_release_level
def _get_gpfs_cluster_release_level(self):
"""Return the GPFS version of current cluster."""
try:
(out, err) = self._execute('mmlsconfig', 'minreleaseLeveldaemon',
'-Y', run_as_root=True)
except processutils.ProcessExecutionError as exc:
LOG.error(_('Failed to issue mmlsconfig command, error: %s.') %
exc.stderr)
raise exception.VolumeBackendAPIException(data=exc.stderr)
lines = out.splitlines()
value_token = lines[0].split(':').index('value')
min_release_level = lines[1].split(':')[value_token]
return int(min_release_level)
def _is_gpfs_path(self, directory):
"""Determine if the specified path is in a gpfs file system.
If not part of a gpfs file system, raise ProcessExecutionError.
"""
try:
self._execute('mmlsattr', directory, run_as_root=True)
except processutils.ProcessExecutionError as exc:
LOG.error(_('Failed to issue mmlsattr command for path %(path)s, '
'error: %(error)s.') %
{'path': directory,
'error': exc.stderr})
raise exception.VolumeBackendAPIException(data=exc.stderr)
def _is_same_fileset(self, path1, path2):
"""Return true if the two paths are in the same GPFS fileset."""
if self._get_fileset_from_path(path1) == \
self._get_fileset_from_path(path2):
return True
return False
def _same_cluster(self, host):
"""Return true if the host is a member of the same GPFS cluster."""
dest_location = host['capabilities'].get('location_info')
if self._stats['location_info'] == dest_location:
return True
return False
def _set_rw_permission(self, path, modebits='660'):
"""Set permission bits for the path."""
self._execute('chmod', modebits, path, run_as_root=True)
def _can_migrate_locally(self, host):
"""Return true if the host can migrate a volume locally."""
if 'location_info' not in host['capabilities']:
LOG.debug('Evaluate migration: no location info, '
'cannot migrate locally.')
return None
info = host['capabilities']['location_info']
try:
(dest_type, dest_id, dest_path) = info.split(':')
except ValueError:
LOG.debug('Evaluate migration: unexpected location info, '
'cannot migrate locally: %s.' % info)
return None
if dest_type != 'GPFSDriver' or dest_id != self._cluster_id:
LOG.debug('Evaluate migration: different destination driver or '
'cluster id in location info: %s.' % info)
return None
LOG.debug('Evaluate migration: use local migration.')
return dest_path
def do_setup(self, ctxt):
"""Determine storage back end capabilities."""
try:
self._cluster_id = self._get_gpfs_cluster_id()
except Exception as setup_exception:
msg = (_('Could not find GPFS cluster id: %s.') %
setup_exception)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
try:
gpfs_base = self.configuration.gpfs_mount_point_base
self._gpfs_device = self._get_filesystem_from_path(gpfs_base)
except Exception as setup_exception:
msg = (_('Could not find GPFS file system device: %s.') %
setup_exception)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
pool = self.configuration.safe_get('gpfs_storage_pool')
self._storage_pool = pool or 'system'
if not self._verify_gpfs_pool(self._storage_pool):
msg = (_('Invalid storage pool %s specificed.') %
self._storage_pool)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met."""
self._check_gpfs_state()
if self.configuration.gpfs_mount_point_base is None:
msg = _('Option gpfs_mount_point_base is not set correctly.')
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if (self.configuration.gpfs_images_share_mode and
self.configuration.gpfs_images_share_mode not in ['copy_on_write',
'copy']):
msg = _('Option gpfs_images_share_mode is not set correctly.')
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if(self.configuration.gpfs_images_share_mode and
self.configuration.gpfs_images_dir is None):
msg = _('Option gpfs_images_dir is not set correctly.')
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if(self.configuration.gpfs_images_share_mode == 'copy_on_write' and
not _same_filesystem(self.configuration.gpfs_mount_point_base,
self.configuration.gpfs_images_dir)):
msg = (_('gpfs_images_share_mode is set to copy_on_write, but '
'%(vol)s and %(img)s belong to different file '
'systems.') %
{'vol': self.configuration.gpfs_mount_point_base,
'img': self.configuration.gpfs_images_dir})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if(self.configuration.gpfs_images_share_mode == 'copy_on_write' and
not self._is_same_fileset(self.configuration.gpfs_mount_point_base,
self.configuration.gpfs_images_dir)):
msg = (_('gpfs_images_share_mode is set to copy_on_write, but '
'%(vol)s and %(img)s belong to different filesets.') %
{'vol': self.configuration.gpfs_mount_point_base,
'img': self.configuration.gpfs_images_dir})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
_gpfs_cluster_release_level = self._get_gpfs_cluster_release_level()
if not _gpfs_cluster_release_level >= GPFS_CLONE_MIN_RELEASE:
msg = (_('Downlevel GPFS Cluster Detected. GPFS Clone feature '
'not enabled in cluster daemon level %(cur)s - must '
'be at least at level %(min)s.') %
{'cur': _gpfs_cluster_release_level,
'min': GPFS_CLONE_MIN_RELEASE})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
for directory in [self.configuration.gpfs_mount_point_base,
self.configuration.gpfs_images_dir]:
if directory is None:
continue
if not directory.startswith('/'):
msg = (_('%s must be an absolute path.') % directory)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if not os.path.isdir(directory):
msg = (_('%s is not a directory.') % directory)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
# Check if GPFS is mounted
self._verify_gpfs_path_state(directory)
filesystem, fslevel = \
self._get_gpfs_fs_release_level(directory)
if not fslevel >= GPFS_CLONE_MIN_RELEASE:
msg = (_('The GPFS filesystem %(fs)s is not at the required '
'release level. Current level is %(cur)s, must be '
'at least %(min)s.') %
{'fs': filesystem,
'cur': fslevel,
'min': GPFS_CLONE_MIN_RELEASE})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def _create_sparse_file(self, path, size):
"""Creates file with 0 disk usage."""
sizestr = _sizestr(size)
self._execute('truncate', '-s', sizestr, path, run_as_root=True)
def _allocate_file_blocks(self, path, size):
"""Preallocate file blocks by writing zeros."""
block_size_mb = 1
block_count = size * units.GiB / (block_size_mb * units.MiB)
self._execute('dd', 'if=/dev/zero', 'of=%s' % path,
'bs=%dM' % block_size_mb,
'count=%d' % block_count,
run_as_root=True)
def _gpfs_change_attributes(self, options, path):
"""Update GPFS attributes on the specified file."""
cmd = ['mmchattr']
cmd.extend(options)
cmd.append(path)
LOG.debug('Update volume attributes with mmchattr to %s.' % options)
self._execute(*cmd, run_as_root=True)
def _set_volume_attributes(self, path, metadata):
"""Set various GPFS attributes for this volume."""
set_pool = False
options = []
for item in metadata:
if item['key'] == 'data_pool_name':
options.extend(['-P', item['value']])
set_pool = True
elif item['key'] == 'replicas':
options.extend(['-r', item['value'], '-m', item['value']])
elif item['key'] == 'dio':
options.extend(['-D', item['value']])
elif item['key'] == 'write_affinity_depth':
options.extend(['--write-affinity-depth', item['value']])
elif item['key'] == 'block_group_factor':
options.extend(['--block-group-factor', item['value']])
elif item['key'] == 'write_affinity_failure_group':
options.extend(['--write-affinity-failure-group',
item['value']])
# metadata value has precedence over value set in volume type
if self.configuration.gpfs_storage_pool and not set_pool:
options.extend(['-P', self.configuration.gpfs_storage_pool])
if options:
self._gpfs_change_attributes(options, path)
def create_volume(self, volume):
"""Creates a GPFS volume."""
# Check if GPFS is mounted
self._verify_gpfs_path_state(self.configuration.gpfs_mount_point_base)
volume_path = self.local_path(volume)
volume_size = volume['size']
# Create a sparse file first; allocate blocks later if requested
self._create_sparse_file(volume_path, volume_size)
self._set_rw_permission(volume_path)
# Set the attributes prior to allocating any blocks so that
# they are allocated according to the policy
v_metadata = volume.get('volume_metadata')
self._set_volume_attributes(volume_path, v_metadata)
if not self.configuration.gpfs_sparse_volumes:
self._allocate_file_blocks(volume_path, volume_size)
fstype = None
fslabel = None
for item in v_metadata:
if item['key'] == 'fstype':
fstype = item['value']
elif item['key'] == 'fslabel':
fslabel = item['value']
if fstype:
self._mkfs(volume, fstype, fslabel)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a GPFS volume from a snapshot."""
volume_path = self.local_path(volume)
snapshot_path = self.local_path(snapshot)
self._create_gpfs_copy(src=snapshot_path, dest=volume_path)
self._set_rw_permission(volume_path)
self._gpfs_redirect(volume_path)
virt_size = self._resize_volume_file(volume, volume['size'])
return {'size': math.ceil(virt_size / units.GiB)}
def create_cloned_volume(self, volume, src_vref):
"""Create a GPFS volume from another volume."""
src = self.local_path(src_vref)
dest = self.local_path(volume)
self._create_gpfs_clone(src, dest)
self._set_rw_permission(dest)
virt_size = self._resize_volume_file(volume, volume['size'])
return {'size': math.ceil(virt_size / units.GiB)}
def _delete_gpfs_file(self, fchild):
"""Delete a GPFS file and cleanup clone children."""
if not os.path.exists(fchild):
return
(out, err) = self._execute('mmclone', 'show', fchild, run_as_root=True)
fparent = None
inode_regex = re.compile(
r'.*\s+(?:yes|no)\s+\d+\s+(?P<inode>\d+)', re.M | re.S)
match = inode_regex.match(out)
if match:
inode = match.group('inode')
path = os.path.dirname(fchild)
(out, err) = self._execute('find', path, '-maxdepth', '1',
'-inum', inode, run_as_root=True)
if out:
fparent = out.split('\n', 1)[0]
self._execute(
'rm', '-f', fchild, check_exit_code=False, run_as_root=True)
# There is no need to check for volume references on this snapshot
# because 'rm -f' itself serves as a simple and implicit check. If the
# parent is referenced by another volume, GPFS doesn't allow deleting
# it. 'rm -f' silently fails and the subsequent check on the path
# indicates whether there are any volumes derived from that snapshot.
# If there are such volumes, we quit recursion and let the other
# volumes delete the snapshot later. If there are no references, rm
# would succeed and the snapshot is deleted.
if not os.path.exists(fchild) and fparent:
fpbase = os.path.basename(fparent)
if fpbase.endswith('.snap') or fpbase.endswith('.ts'):
self._delete_gpfs_file(fparent)
def delete_volume(self, volume):
"""Deletes a logical volume."""
# Check if GPFS is mounted
self._verify_gpfs_path_state(self.configuration.gpfs_mount_point_base)
volume_path = self.local_path(volume)
self._delete_gpfs_file(volume_path)
def _gpfs_redirect(self, src):
"""Removes the copy_on_write dependency between src and parent.
Remove the copy_on_write dependency between the src file and its
immediate parent such that the length of dependency chain is reduced
by 1.
"""
max_depth = self.configuration.gpfs_max_clone_depth
if max_depth == 0:
return False
(out, err) = self._execute('mmclone', 'show', src, run_as_root=True)
depth_regex = re.compile(r'.*\s+no\s+(?P<depth>\d+)', re.M | re.S)
match = depth_regex.match(out)
if match:
depth = int(match.group('depth'))
if depth > max_depth:
self._execute('mmclone', 'redirect', src, run_as_root=True)
return True
return False
def _create_gpfs_clone(self, src, dest):
"""Create a GPFS file clone parent for the specified file."""
snap = dest + ".snap"
self._create_gpfs_snap(src, snap)
self._create_gpfs_copy(snap, dest)
if self._gpfs_redirect(src) and self._gpfs_redirect(dest):
self._execute('rm', '-f', snap, run_as_root=True)
def _create_gpfs_copy(self, src, dest):
"""Create a GPFS file clone copy for the specified file."""
self._execute('mmclone', 'copy', src, dest, run_as_root=True)
def _create_gpfs_snap(self, src, dest=None):
"""Create a GPFS file clone snapshot for the specified file."""
if dest is None:
self._execute('mmclone', 'snap', src, run_as_root=True)
else:
self._execute('mmclone', 'snap', src, dest, run_as_root=True)
def _is_gpfs_parent_file(self, gpfs_file):
"""Return true if the specified file is a gpfs clone parent."""
out, err = self._execute('mmclone', 'show', gpfs_file,
run_as_root=True)
ptoken = out.splitlines().pop().split()[0]
return ptoken == 'yes'
def create_snapshot(self, snapshot):
"""Creates a GPFS snapshot."""
snapshot_path = self.local_path(snapshot)
volume_path = os.path.join(self.configuration.gpfs_mount_point_base,
snapshot['volume_name'])
self._create_gpfs_snap(src=volume_path, dest=snapshot_path)
self._set_rw_permission(snapshot_path, modebits='640')
self._gpfs_redirect(volume_path)
def delete_snapshot(self, snapshot):
"""Deletes a GPFS snapshot."""
# Rename the deleted snapshot to indicate it no longer exists in
# cinder db. Attempt to delete the snapshot. If the snapshot has
# clone children, the delete will fail silently. When volumes that
# are clone children are deleted in the future, the remaining ts
# snapshots will also be deleted.
snapshot_path = self.local_path(snapshot)
snapshot_ts_path = '%s.ts' % snapshot_path
self._execute('mv', snapshot_path, snapshot_ts_path, run_as_root=True)
self._execute('rm', '-f', snapshot_ts_path,
check_exit_code=False, run_as_root=True)
def local_path(self, volume):
"""Return the local path for the specified volume."""
return os.path.join(self.configuration.gpfs_mount_point_base,
volume['name'])
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a logical volume."""
pass
def create_export(self, context, volume):
"""Exports the volume."""
pass
def remove_export(self, context, volume):
"""Removes an export for a logical volume."""
pass
def initialize_connection(self, volume, connector):
return {
'driver_volume_type': 'local',
'data': {
'name': volume['name'],
'device_path': self.local_path(volume),
}
}
def terminate_connection(self, volume, connector, **kwargs):
pass
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, or stats have never been updated, run update
the stats first.
"""
if not self._stats or refresh:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug("Updating volume stats.")
gpfs_base = self.configuration.gpfs_mount_point_base
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data["volume_backend_name"] = backend_name or 'GPFS'
data["vendor_name"] = 'IBM'
data["driver_version"] = self.VERSION
data["storage_protocol"] = 'file'
free, capacity = self._get_available_capacity(self.configuration.
gpfs_mount_point_base)
data['total_capacity_gb'] = math.ceil(capacity / units.GiB)
data['free_capacity_gb'] = math.ceil(free / units.GiB)
data['reserved_percentage'] = 0
data['QoS_support'] = False
data['storage_pool'] = self._storage_pool
data['location_info'] = ('GPFSDriver:%(cluster_id)s:%(root_path)s' %
{'cluster_id': self._cluster_id,
'root_path': gpfs_base})
data['reserved_percentage'] = 0
self._stats = data
def clone_image(self, volume, image_location, image_id, image_meta):
"""Create a volume from the specified image."""
return self._clone_image(volume, image_location, image_id)
def _is_cloneable(self, image_id):
"""Return true if the specified image can be cloned by GPFS."""
if not((self.configuration.gpfs_images_dir and
self.configuration.gpfs_images_share_mode)):
reason = 'glance repository not configured to use GPFS'
return False, reason, None
image_path = os.path.join(self.configuration.gpfs_images_dir, image_id)
try:
self._is_gpfs_path(image_path)
except processutils.ProcessExecutionError:
reason = 'image file not in GPFS'
return False, reason, None
return True, None, image_path
def _clone_image(self, volume, image_location, image_id):
"""Attempt to create a volume by efficiently copying image to volume.
If both source and target are backed by gpfs storage and the source
image is in raw format move the image to create a volume using either
gpfs clone operation or with a file copy. If the image format is not
raw, convert it to raw at the volume path.
"""
# Check if GPFS is mounted
self._verify_gpfs_path_state(self.configuration.gpfs_mount_point_base)
cloneable_image, reason, image_path = self._is_cloneable(image_id)
if not cloneable_image:
LOG.debug('Image %(img)s not cloneable: %(reas)s.' %
{'img': image_id, 'reas': reason})
return (None, False)
vol_path = self.local_path(volume)
# if the image is not already a GPFS snap file make it so
if not self._is_gpfs_parent_file(image_path):
self._create_gpfs_snap(image_path)
data = image_utils.qemu_img_info(image_path)
# if image format is already raw either clone it or
# copy it depending on config file settings
if data.file_format == 'raw':
if (self.configuration.gpfs_images_share_mode ==
'copy_on_write'):
LOG.debug('Clone image to vol %s using mmclone.' %
volume['id'])
self._create_gpfs_copy(image_path, vol_path)
elif self.configuration.gpfs_images_share_mode == 'copy':
LOG.debug('Clone image to vol %s using copyfile.' %
volume['id'])
shutil.copyfile(image_path, vol_path)
# if image is not raw convert it to raw into vol_path destination
else:
LOG.debug('Clone image to vol %s using qemu convert.' %
volume['id'])
image_utils.convert_image(image_path, vol_path, 'raw')
self._set_rw_permission(vol_path)
self._resize_volume_file(volume, volume['size'])
return {'provider_location': None}, True
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume.
Note that cinder.volume.flows.create_volume will attempt to use
clone_image to efficiently create volume from image when both
source and target are backed by gpfs storage. If that is not the
case, this function is invoked and uses fetch_to_raw to create the
volume.
"""
# Check if GPFS is mounted
self._verify_gpfs_path_state(self.configuration.gpfs_mount_point_base)
LOG.debug('Copy image to vol %s using image_utils fetch_to_raw.' %
volume['id'])
image_utils.fetch_to_raw(context, image_service, image_id,
self.local_path(volume),
self.configuration.volume_dd_blocksize,
size=volume['size'])
self._resize_volume_file(volume, volume['size'])
def _resize_volume_file(self, volume, new_size):
"""Resize volume file to new size."""
vol_path = self.local_path(volume)
try:
image_utils.resize_image(vol_path, new_size, run_as_root=True)
except processutils.ProcessExecutionError as exc:
LOG.error(_("Failed to resize volume "
"%(volume_id)s, error: %(error)s.") %
{'volume_id': volume['id'],
'error': exc.stderr})
raise exception.VolumeBackendAPIException(data=exc.stderr)
data = image_utils.qemu_img_info(vol_path)
return data.virtual_size
def extend_volume(self, volume, new_size):
"""Extend an existing volume."""
self._resize_volume_file(volume, new_size)
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
image_utils.upload_volume(context,
image_service,
image_meta,
self.local_path(volume))
def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume."""
volume = self.db.volume_get(context, backup['volume_id'])
volume_path = self.local_path(volume)
LOG.debug(_('Begin backup of volume %s.') % volume['name'])
# create a snapshot that will be used as the backup source
backup_path = '%s_%s' % (volume_path, backup['id'])
self._create_gpfs_clone(volume_path, backup_path)
self._gpfs_redirect(volume_path)
try:
with utils.temporary_chown(backup_path):
with fileutils.file_open(backup_path) as backup_file:
backup_service.backup(backup, backup_file)
finally:
# clean up snapshot file. If it is a clone parent, delete
# will fail silently, but be cleaned up when volume is
# eventually removed. This ensures we do not accumulate
# more than gpfs_max_clone_depth snap files.
self._delete_gpfs_file(backup_path)
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume."""
LOG.debug(_('Begin restore of backup %s.') % backup['id'])
volume_path = self.local_path(volume)
with utils.temporary_chown(volume_path):
with fileutils.file_open(volume_path, 'wb') as volume_file:
backup_service.restore(backup, volume['id'], volume_file)
def _migrate_volume(self, volume, host):
"""Migrate vol if source and dest are managed by same GPFS cluster."""
LOG.debug('Migrate volume request %(vol)s to %(host)s.' %
{'vol': volume['name'],
'host': host['host']})
dest_path = self._can_migrate_locally(host)
if dest_path is None:
LOG.debug('Cannot migrate volume locally, use generic migration.')
return (False, None)
if dest_path == self.configuration.gpfs_mount_point_base:
LOG.debug('Migration target is same cluster and path, '
'no work needed.')
return (True, None)
LOG.debug('Migration target is same cluster but different path, '
'move the volume file.')
local_path = self.local_path(volume)
new_path = os.path.join(dest_path, volume['name'])
try:
self._execute('mv', local_path, new_path, run_as_root=True)
return (True, None)
except processutils.ProcessExecutionError as exc:
LOG.error(_('Driver-based migration of volume %(vol)s failed. '
'Move from %(src)s to %(dst)s failed with error: '
'%(error)s.') %
{'vol': volume['name'],
'src': local_path,
'dst': new_path,
'error': exc.stderr})
return (False, None)
def migrate_volume(self, context, volume, host):
"""Attempt to migrate a volume to specified host."""
return self._migrate_volume(volume, host)
def retype(self, context, volume, new_type, diff, host):
"""Modify volume to be of new type."""
LOG.debug('Retype volume request %(vol)s to be %(type)s '
'(host: %(host)s), diff %(diff)s.' %
{'vol': volume['name'],
'type': new_type,
'host': host,
'diff': diff})
retyped = False
migrated = False
pools = diff['extra_specs'].get('capabilities:storage_pool')
backends = diff['extra_specs'].get('volume_backend_name')
hosts = (volume['host'], host['host'])
# if different backends let migration create a new volume and copy
# data because the volume is considered to be substantially different
if _different(backends):
LOG.debug('Retype request is for different backends, '
'use migration: %s %s.' % backends)
return False
if _different(pools):
old, new = pools
LOG.debug('Retype pool attribute from %s to %s.' % pools)
retyped = self._update_volume_storage_pool(self.local_path(volume),
new)
if _different(hosts):
LOG.debug('Retype hosts migrate from: %s to %s.' % hosts)
migrated, mdl_update = self._migrate_volume(volume, host)
if migrated:
updates = {'host': host['host']}
self.db.volume_update(context, volume['id'], updates)
return retyped or migrated
def _mkfs(self, volume, filesystem, label=None):
"""Initialize volume to be specified filesystem type."""
if filesystem == 'swap':
cmd = ['mkswap']
else:
cmd = ['mkfs', '-t', filesystem]
if filesystem in ('ext3', 'ext4'):
cmd.append('-F')
if label:
if filesystem in ('msdos', 'vfat'):
label_opt = '-n'
else:
label_opt = '-L'
cmd.extend([label_opt, label])
path = self.local_path(volume)
cmd.append(path)
try:
self._execute(*cmd, run_as_root=True)
except processutils.ProcessExecutionError as exc:
exception_message = (_("mkfs failed on volume %(vol)s, "
"error message was: %(err)s.")
% {'vol': volume['name'], 'err': exc.stderr})
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(
data=exception_message)
def _get_available_capacity(self, path):
"""Calculate available space on path."""
# Check if GPFS is mounted
try:
self._verify_gpfs_path_state(path)
mounted = True
except exception.VolumeBackendAPIException:
mounted = False
# If GPFS is not mounted, return zero capacity. So that the volume
# request can be scheduled to another volume service.
if not mounted:
return 0, 0
out, err = self._execute('df', '-P', '-B', '1', path,
run_as_root=True)
out = out.splitlines()[1]
size = int(out.split()[1])
available = int(out.split()[3])
return available, size
def _verify_gpfs_path_state(self, path):
"""Examine if GPFS is active and file system is mounted or not."""
try:
self._is_gpfs_path(path)
except processutils.ProcessExecutionError:
msg = (_('%s cannot be accessed. Verify that GPFS is active and '
'file system is mounted.') % path)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
| [
"[email protected]"
] | |
8b99d4542e0c838b433c0a0a5d58782212c4ca07 | 9d0195aa83cc594a8c61f334b90375961e62d4fe | /JTTest/SL7/CMSSW_10_2_15/src/dataRunA/nano3876.py | 3a979b641b2f383e7391e1c9a00800ebf0b3ce64 | [] | no_license | rsk146/CMS | 4e49592fc64f6438051544c5de18598db36ed985 | 5f8dab8c59ae556598b9747b52b88205fffc4dbe | refs/heads/master | 2022-12-01T03:57:12.126113 | 2020-08-04T03:29:27 | 2020-08-04T03:29:27 | 284,863,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,293 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: nanoAOD_jetToolbox_cff -s NANO --data --eventcontent NANOAOD --datatier NANOAOD --no_exec --conditions 102X_dataRun2_Sep2018Rereco_v1 --era Run2_2018,run2_nanoAOD_102Xv1 --customise_commands=process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False))) --customise JMEAnalysis/JetToolbox/nanoAOD_jetToolbox_cff.nanoJTB_customizeMC --filein /users/h2/rsk146/JTTest/SL7/CMSSW_10_6_12/src/ttbarCutTest/dataReprocessing/0004A5E9-9F18-6B42-B31D-4206406CE423.root --fileout file:jetToolbox_nano_datatest.root
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('NANO',eras.Run2_2018,eras.run2_nanoAOD_102Xv1)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('PhysicsTools.NanoAOD.nano_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:root://cms-xrd-global.cern.ch//store/data/Run2018A/EGamma/MINIAOD/17Sep2018-v2/270000/CB198DE2-7241-F649-96D7-88C84D962C43.root'),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('nanoAOD_jetToolbox_cff nevts:1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.NANOAODoutput = cms.OutputModule("NanoAODOutputModule",
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(9),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('NANOAOD'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:jetToolbox_nano_datatest3876.root'),
outputCommands = process.NANOAODEventContent.outputCommands
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '102X_dataRun2_Sep2018Rereco_v1', '')
# Path and EndPath definitions
process.nanoAOD_step = cms.Path(process.nanoSequence)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.NANOAODoutput_step = cms.EndPath(process.NANOAODoutput)
# Schedule definition
process.schedule = cms.Schedule(process.nanoAOD_step,process.endjob_step,process.NANOAODoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# customisation of the process.
# Automatic addition of the customisation function from PhysicsTools.NanoAOD.nano_cff
from PhysicsTools.NanoAOD.nano_cff import nanoAOD_customizeData
#call to customisation function nanoAOD_customizeData imported from PhysicsTools.NanoAOD.nano_cff
process = nanoAOD_customizeData(process)
# Automatic addition of the customisation function from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff import nanoJTB_customizeMC
#call to customisation function nanoJTB_customizeMC imported from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
process = nanoJTB_customizeMC(process)
# End of customisation functions
# Customisation from command line
process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False)))
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion | [
"[email protected]"
] | |
3785d41263e4b9234ce9fa12094e5c58c4066148 | 14567e2f77d2bf697bb18c3c1e3d6744c11f41c8 | /kfpt/ftp/old/yewubiangeng.py | b6326593df20c37274bbb06c1dd6d1d4c6865c11 | [] | no_license | yanislong/junnan | 268e64c288e18456da621d5485e04bf8eb8f5322 | fc35f32a29a7b6da2a8ea334d0e53a21a81d97f3 | refs/heads/master | 2021-01-01T20:08:05.825407 | 2017-09-08T02:24:40 | 2017-09-08T02:24:40 | 98,772,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,854 | py | #!/usr/bin/python
# -*- coding=utf-8 -*-
import threading
import requests
import chardet
import suds
import sys
sys.path.append('/root/git_20170730/kfpt')
import jiami
import config
import jiemi
reload(sys)
sys.setdefaultencoding('utf-8')
def DI():
num = "1005891"
num2 = "1000213"
token4 = "MTAwNDAwMFQwM19PVF8wMDY5OTkyMDE3LTA4"
token3 = "MTAwM2FiY1QwM19PVF8wMDZ4eXoyMDE3LTA4"
XML1 = "<PACKET><HEAD><SYS_COMPANY>1004</SYS_COMPANY><SERVICE_CODE>T03_OT_006</SERVICE_CODE><FILE_TYPE>YWBG</FILE_TYPE><REQUEST_TIME>2017-07-05 09:00:00</REQUEST_TIME><ACCESS_TOKEN>"+ token4 +"</ACCESS_TOKEN><HANDLE_TYPE>1</HANDLE_TYPE><CUST_COMPANY>1002</CUST_COMPANY><ACCOUNT_PERIOD>201704</ACCOUNT_PERIOD><PROVINCE_ID>370000</PROVINCE_ID><CITY_ID>370100</CITY_ID><FLOW_ID></FLOW_ID><STATUS>1</STATUS></HEAD></PACKET>"
XML2 = "<PACKET><HEAD><SYS_COMPANY>1004</SYS_COMPANY><SERVICE_CODE>T03_OT_006</SERVICE_CODE><FILE_TYPE>YWBG</FILE_TYPE><REQUEST_TIME>2017-07-05 09:00:00</REQUEST_TIME><ACCESS_TOKEN>"+ token3 +"</ACCESS_TOKEN><HANDLE_TYPE>2</HANDLE_TYPE><CUST_COMPANY>1003</CUST_COMPANY><ACCOUNT_PERIOD>201704</ACCOUNT_PERIOD><PROVINCE_ID>370000</PROVINCE_ID><CITY_ID>370100</CITY_ID><FLOW_ID>"+ num +"</FLOW_ID><STATUS>1</STATUS></HEAD></PACKET>"
XML3 = "<PACKET><HEAD><SYS_COMPANY>1004</SYS_COMPANY><SERVICE_CODE>T03_OT_006</SERVICE_CODE><FILE_TYPE>YWBG</FILE_TYPE><REQUEST_TIME>2017-07-05 09:00:00</REQUEST_TIME><ACCESS_TOKEN>"+ token4 +"</ACCESS_TOKEN><HANDLE_TYPE>3</HANDLE_TYPE><CUST_COMPANY>1003</CUST_COMPANY><ACCOUNT_PERIOD>201704</ACCOUNT_PERIOD><PROVINCE_ID>370000</PROVINCE_ID><CITY_ID>370100</CITY_ID><FLOW_ID>" + num + "</FLOW_ID><STATUS>1</STATUS></HEAD></PACKET>"
XML4 = "<PACKET><HEAD><SYS_COMPANY>1004</SYS_COMPANY><SERVICE_CODE>T03_OT_006</SERVICE_CODE><FILE_TYPE>YWBG</FILE_TYPE><REQUEST_TIME>2017-07-05 09:00:00</REQUEST_TIME><ACCESS_TOKEN>"+ token3 +"</ACCESS_TOKEN><HANDLE_TYPE>4</HANDLE_TYPE><CUST_COMPANY>1003</CUST_COMPANY><ACCOUNT_PERIOD>201704</ACCOUNT_PERIOD><PROVINCE_ID>370000</PROVINCE_ID><CITY_ID>370100</CITY_ID><FLOW_ID>"+ num2 +"</FLOW_ID><STATUS>1</STATUS></HEAD></PACKET>"
XML = XML2
print "请求报文明文:\n", XML
r0 = requests.post(config.encode, data={'requestXml':XML})
endata = r0.content.replace(r"\n","")
# print "请求报文密文:\n", endata
print u">> 业务变更确认信息同步接口"
print "*************"
en = endata[1:-1]
u = config.url + "/services/filesMutual?wsdl"
cc = suds.client.Client(u).service.ftpFilesMutual(encReqXml=en)
# print "请求返回的加密报文:\n", cc
print jiemi.jiemi(cc.replace(r"\n",""))
return cc
if __name__ == "__main__":
for i in range(1):
for i in range(1):
t = threading.Thread(target=DI, args=())
t.start()
t.join()
print ">> program run end"
| [
"[email protected]"
] | |
efb56f8d52f1fa5f6f1068625bd3f62c292a2263 | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/refactoring/move/class/before/src/a.py | 3672729542fb5dc6e6b22bfe999a85b4377e35a4 | [
"Apache-2.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 329 | py | from lib1 import URLOpener
import lib1
class C(object):
def __init__(self):
self.opener = lib1.URLOpener(None)
def f(self, x):
o = URLOpener(x)
return o.urlopen()
def g(self, x):
return 'f({0!r}) = {1!r}'.format(URLOpener(x), lib1.URLOpener(x))
def main():
c = C()
print c | [
"[email protected]"
] | |
6d70e863685d27bec6df010891b179cfa58dc4b2 | 75678ff3f7fb3af16b36d5ef952ce90c089336e1 | /legacy_folder/deep_action_network.py | d542fe6fced91191d8af9ad6908157fffa3b55e6 | [] | no_license | jeonggwanlee/LSTD-mu | 71cc530b7072aab9e515609fa61f0fefe1d53903 | b2d890ddba587ac01b722a4a6b21575b354dec9d | refs/heads/master | 2020-04-12T16:06:25.249480 | 2019-03-07T16:58:00 | 2019-03-07T16:58:00 | 162,602,354 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,518 | py | """ Jeonggwan Lee([email protected])
"""
import pickle
import ipdb
import gym
import numpy as np
import copy
import os
import tensorflow as tf
import random
from record import get_test_record_title
import tf_utils
TRANSITION = 15000
#EPISODE = 20
#MEMORY_SIZE = TRANSITION + 1000
NUM_ACTION_ITER = 10000
NUM_EVALUATION = 100
NUM_EPISODES = 300
MAX_STEPS = 300
EPOCH_SIZE = 100
BATCH_SIZE = 100
def generate_trajectories_from_expert_policy(env, n_trajectories=100):
trajectories = []
rewards_list = []
for _ in range(n_trajectories):
state = env.reset()
trajectory = []
rewards = 0
for _ in range(TRANSITION):
if state[2] < 0: # pole angle is minus(left)
if state[3] < 0: # pole velocity is minus(left) => bad situation.
action = 0 # go left
else: # pole velocity is plus(right) => good situation.
action = env.action_space.sample()
else: # pole angle is plus(right)
if state[3] < 0: # pole velocity is minus(left) => good situation.
action = env.action_space.sample()
else:
action = 1 # go right
next_state, reward, done, info = env.step(action)
trajectory.append([state, action, reward, next_state, done])
state = next_state
rewards += 1
if done:
rewards_list.append(rewards)
break
# for j
trajectories.append(trajectory)
# for i
print("expert policy average reward : {}".format(sum(rewards_list)/n_trajectories))
return trajectories
class DeepActionNetwork:
""" Deep Action(Q) Network
predict action from state
loss : square(q_pred(s) - q(s, true_a)(=1) )
"""
def __init__(self,
feature_op,
state_size=4,
action_size=2,
n_h1=20,
n_h2=9,
learning_rate=0.05,
scope="deep_action"
):
self.sess = tf.Session()
self.feature_op = feature_op
assert self.feature_op in ["h1", "h2", "pred"]
self.learning_rate = learning_rate
self.state_size = state_size
self.action_size = action_size
self.n_h1 = n_h1
self.n_h2 = n_h2
self.scope = scope
self.meta_name = "dan_cartpole_Nh1{}_Nh2{}.meta".format(n_h1, n_h2)
print("meta_name : {}".format(self.meta_name))
if self.isRestore():
self.saver = tf.train.import_meta_graph(self.meta_name)
self.saver.restore(self.sess, self.meta_name[:-5])
self._load_network()
else:
theta = self._build_network()
init_new_vars_op = tf.variables_initializer(theta)
self.sess.run(init_new_vars_op)
#self.sess.run(tf.global_variable_initializer())
def _build_network(self):
with tf.variable_scope(self.scope):
self.state_input = tf.placeholder(tf.float32, [None, self.state_size], name="state_input")
self.action = tf.placeholder(tf.int32, [None], name="action")
self.fc1 = tf_utils.fc(self.state_input, self.n_h1, scope="fc1",
activation_fn=tf.nn.relu,
initializer=tf.contrib.layers.variance_scaling_initializer(mode="FAN_IN"))
self.fc1_softmax = tf.nn.softmax(self.fc1, name="fc1_softmax")
self.fc2 = tf_utils.fc(self.fc1, self.n_h2, scope="fc2",
activation_fn=tf.nn.relu,
initializer=tf.contrib.layers.variance_scaling_initializer(mode="FAN_IN"))
self.fc2_softmax = tf.nn.softmax(self.fc2, name="fc2_softmax")
self.q_value = tf_utils.fc(self.fc2, self.action_size, scope="q_value", activation_fn=None)
self.action_pred = tf.nn.softmax(self.q_value, name="action_prediction")
self.action_target = tf.one_hot(self.action, self.action_size, on_value=1.0, off_value=0.0,
name="action_target")
self.loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=self.action_target,
logits=self.action_pred, name="loss")
#self.loss = tf.reduce_mean(tf.square(tf.subtract(self.action_pred, self.action_target)))
self.optimizer = tf.train.AdamOptimizer(self.learning_rate, name="optimizer")
self.train_op = self.optimizer.minimize(self.loss, global_step=tf.train.get_global_step(),
name="train_op")
new_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.scope)
return new_variables
def _load_network(self):
graph = tf.get_default_graph()
nodes = graph.as_graph_def().node
#for node in nodes:
# print(node.name)
#ops = graph.get_operations()
#for op in ops:
# print(op.name)
self.state_input = graph.get_tensor_by_name("deep_action/state_input:0")
self.action = graph.get_tensor_by_name("deep_action/action:0")
self.fc1 = graph.get_tensor_by_name("deep_action/fc1/Relu:0")
self.fc1_softmax = graph.get_tensor_by_name("deep_action/fc1_softmax:0")
self.fc2 = graph.get_tensor_by_name("deep_action/fc2/Relu:0")
self.fc2_softmax = graph.get_tensor_by_name("deep_action/fc2_softmax:0")
self.q_value = graph.get_tensor_by_name("deep_action/q_value/Add:0")
self.action_pred = graph.get_tensor_by_name("deep_action/action_prediction:0")
self.action_target = graph.get_tensor_by_name("deep_action/action_target:0")
self.loss = graph.get_tensor_by_name("deep_action/loss:0")
#self.optimizer = graph.get_tensor_by_name("deep_action/optimizer:0")
self.train_op = graph.get_operation_by_name("deep_action/train_op")
def isRestore(self):
if False:
#if os.path.exists(self.meta_name):
return True
else:
return False
def _num_basis(self):
return self.n_h2
def learn(self, expert_trajectories=None):
""" training from expert_trajectories """
if expert_trajectories is None:
env = gym.make("CartPole-v0")
expert_trajectories = generate_trajectories_from_expert_policy(env, n_trajectories=100)
expert_trajs_flat = []
for i in range(len(expert_trajectories)):
for j in range(len(expert_trajectories[i])):
expert_trajs_flat.append(expert_trajectories[i][j])
random.shuffle(expert_trajs_flat)
batch_end = 0
for i in range(NUM_ACTION_ITER):
if batch_end + BATCH_SIZE > len(expert_trajs_flat):
batch_end = 0
random.shuffle(expert_trajs_flat)
batch_expert_trajs = expert_trajs_flat[batch_end:batch_end+BATCH_SIZE]
cur_state_batch = [s[0] for s in batch_expert_trajs]
cur_action_batch = [s[1] for s in batch_expert_trajs]
loss, _ = self.sess.run([self.loss, self.train_op], feed_dict={self.state_input:cur_state_batch,
self.action:cur_action_batch})
batch_end += BATCH_SIZE
if i % 10 == 0:
if type(loss) == np.float32:
print("Deep Action Network Training iteration {}, {}".format(i, loss))
else:
print("Deep Action Network Training iteration {}, {}".format(i, sum(loss)/BATCH_SIZE))
print("saveing our trained weights!!")
self.saver = tf.train.Saver()
self.saver.save(self.sess, "./" + self.meta_name[:-5])
def get_optimal_action(self, state):
actions = self.sess.run(self.action_pred, feed_dict={self.state_input: [state]})
return actions.argmax()
def get_q_value(self, state):
q_value = self.sess.run(self.q_value, feed_dict={self.state_input: [state]})
return q_value
def get_action_pred(self, state):
action_pred = self.sess.run(self.action_pred, feed_dict={self.state_input: [state]})
#q_value = self.sess.run(self.q_value, feed_dict={self.state_input: [state]})
return action_pred
def get_features(self, state):
if self.feature_op == 'pred':
features = self.sess.run(self.action_pred, feed_dict={self.state_input: [state]})
elif self.feature_op == 'h2':
features = self.sess.run(self.fc2_softmax, feed_dict={self.state_input: [state]})
elif self.feature_op == 'h1':
features = self.sess.run(self.fc1_softmax, feed_dict={self.state_input: [state]})
return features
def get_feature_dim(self):
if self.feature_op == 'pred':
return self.action_size
elif self.feature_op == 'h2':
return self.n_h2
elif self.feature_op == 'h1':
return self.n_h1
def evaluate_multi_states(self, state):
""" get features's multiple version
"""
if self.feature_op == 'pred':
features = self.sess.run(self.action_pred, feed_dict={self.state_input: state})
elif self.feature_op == 'h2':
features = self.sess.run(self.fc2_softmax, feed_dict={self.state_input: state})
elif self.feature_op == 'h1':
features = self.sess.run(self.fc1_softmax, feed_dict={self.state_input: state})
return features
def test(self, env, isRender=True, num_test=100):
print("Testing Deep Action Network... {} times".format(num_test))
timesteps = []
for i in range(num_test):
cur_state = env.reset()
done = False
t = 0
while not done:
t = t + 1
if isRender:
env.render()
action = self.get_optimal_action(cur_state)
next_state, reward, done, _ = env.step(action)
cur_state = next_state
if done:
print("Test DAN {} : {} timesteps".format(i, t))
timesteps.append(t)
break
print("DAN average test results : {}".format(sum(timesteps)/num_test))
#end while
#end for i
| [
"[email protected]"
] | |
776c5ce3437a6a7358d8726241ec13c72d739770 | f7346cf6969fb68d157147e91b90584a881ab814 | /tutorials/EI networks/STEP3_ExcInhNet_Brunel2000_brian2.py | e04b4f6bf78e4d5d25d75b7bb94d075b5d40eaae | [
"MIT"
] | permissive | h-mayorquin/camp_india_2016 | d423f330523fafd4320dbce0429ac4eaafc32e3d | a8bf8db7778c39c7ca959a7f876c1aa85f2cae8b | refs/heads/master | 2021-01-20T20:32:50.036961 | 2016-07-15T17:50:34 | 2016-07-15T17:50:34 | 62,492,660 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,316 | py | from brian2 import *
from data_utils import *
set_device('cpp_standalone', build_on_run=False)
# neuronal parameters
N = 12500 # total number of neurons
NE = 10000 # number of excitatory neurons
vth = 20*mV # threshold potential
vr = 10*mV # reset potential
tau = 20*ms # membrane time constant
eqs_neurons='''
inp : volt
dv/dt = (-v + inp)/tau : volt
'''
P=NeuronGroup(N=N,model=eqs_neurons,\
threshold='v>=vth',reset='v=vr',\
refractory=2*ms,method='euler')
P.v = uniform(size=12500)*vth
# synaptic parameters
g = 5 # ratio of inh to exc
J = 0.1*mV # synaptic weight
p = 0.1 # connection probability
delay = 1.5*ms # synaptic delay
# delta-function synapses
con = Synapses(P,P,'w:volt (constant)',on_pre='v_post+=w',method='euler')
#con.connect(condition='i!=j',p=p)
print 'computing connection matrix'
CE = int(p*NE)
CI = int(p*(N-NE))
C = CE+CI
conn_i = np.zeros(C*N,dtype=int)
preneuronsE = arange(0,NE,dtype=int)
preneuronsI = arange(NE,N,dtype=int)
for j in range(N): # loop over post-synaptic neurons
# draw CE number of neuron indices out of NE neurons, no autapses
if j<NE: preneurons = np.delete(preneuronsE,j)
else: preneurons = preneuronsE
conn_i[j*C:j*C+CE] = np.random.permutation(preneurons)[:CE]
# draw CI number of neuron indices out of inhibitory neurons, no autapses
if j>NE: preneurons = np.delete(preneuronsI,j-NE)
else: preneurons = preneuronsI
conn_i[j*C+CE:(j+1)*C] = np.random.permutation(preneurons)[:CI]
conn_j = np.repeat(range(N),C)
print 'connecting network'
con.connect(i=conn_i,j=conn_j)
con.delay = delay
con.w['i<NE'] = J
con.w['i>=NE'] = -g*J
# input parameters
inpfactor = 2
nu_theta = vth/(p*NE*J*tau)
Pinp = PoissonGroup(N=N,rates=inpfactor*nu_theta)
con_ext = Synapses(Pinp, P, on_pre='v += J')
con_ext.connect(True, p=p*NE/float(N))
con_ext.delay = delay
sm = SpikeMonitor(P)
sr = PopulationRateMonitor(P)
sm_vm = StateMonitor(P,'v',record=range(5))
print 'compiling/running'
run(0.25*second, report='text')
device.build(directory='output', compile=True, run=True, debug=False);
print "mean activity (Hz) =",mean(sr.rate/Hz)
figure()
plot(sm.t/ms,sm.i,'.')
#ylim([1350,1400])
figure()
plot(sr.t/ms,sr.rate/Hz,',-')
#figure()
#hist(CV_spiketrains(array(sm.t),array(sm.i),0.,range(N)),bins=100)
show()
| [
"[email protected]"
] | |
4240dcc89fea3086e9ad8bb2404e025a5801990a | b0b566dc3d3df8b60b8ce26d151991700341667f | /cms/custom_settings/models.py | fd4b05b7a5f1c8fae3e729c316734ea79e048e5e | [] | no_license | lorne-luo/hawkeye | cf2f7cbe2494aea92e0fc333217f86d00cf1ecba | 82b633dfc1278ab8f2d25ec699d6034b26c791e2 | refs/heads/master | 2023-01-06T17:28:28.489494 | 2020-01-22T23:53:10 | 2020-01-22T23:53:24 | 185,289,740 | 0 | 0 | null | 2023-01-04T14:03:49 | 2019-05-07T00:13:40 | Python | UTF-8 | Python | false | false | 2,026 | py | from django.db import models
from django.utils.translation import ugettext_lazy as _
from wagtail.contrib.settings.models import BaseSetting
from wagtail.contrib.settings.registry import register_setting
from core.constants import AU_CITY_CHOICES, AU_STATE_CHOICES
from wagtail.snippets.models import register_snippet
from wagtail.admin.edit_handlers import FieldPanel
from wagtail.images.edit_handlers import ImageChooserPanel
@register_setting(icon='form')
class ContactUs(BaseSetting):
name = models.CharField(_('name'), max_length=255, blank=True, help_text='contactor name')
address1 = models.CharField(_('address1'), max_length=255, blank=True, help_text='address1')
address2 = models.CharField(_('address2'), max_length=255, blank=True, help_text='address2')
city = models.CharField(_('city'), choices=AU_CITY_CHOICES, max_length=255, blank=True, help_text='city')
state = models.CharField(_('state'), choices=AU_STATE_CHOICES, max_length=255, blank=True, help_text='state')
postcode = models.CharField(_('postcode'), max_length=32, blank=True, help_text='postcode')
phone = models.CharField(_('phone'), max_length=32, blank=True, help_text='phone')
email = models.EmailField(_('email'), max_length=255, blank=True, help_text='email')
class Meta:
verbose_name = 'contact us'
@register_snippet
class Link(models.Model):
name = models.CharField(_('name'), max_length=255, blank=False, help_text='Partner name')
link = models.URLField(_('link'), blank=True, help_text='Partner link')
logo = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
)
class_name = models.CharField(_('styling class name'), max_length=64, blank=True, help_text='styling class name')
panels = [
FieldPanel('name'),
ImageChooserPanel('logo'),
FieldPanel('link'),
FieldPanel('class_name'),
]
def __str__(self):
return self.name
| [
"[email protected]"
] | |
67e607b1a677e995c621101f6d3f9332324b08f5 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/billing/azure-mgmt-billing/azure/mgmt/billing/operations/_customers_operations.py | 4f17e31856174d92a6b40d4a905e37c304eae880 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 17,554 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_by_billing_profile_request(
billing_account_name: str,
billing_profile_name: str,
*,
search: Optional[str] = None,
filter: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-05-01")) # type: Literal["2020-05-01"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/billingProfiles/{billingProfileName}/customers",
) # pylint: disable=line-too-long
path_format_arguments = {
"billingAccountName": _SERIALIZER.url("billing_account_name", billing_account_name, "str"),
"billingProfileName": _SERIALIZER.url("billing_profile_name", billing_profile_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
if search is not None:
_params["$search"] = _SERIALIZER.query("search", search, "str")
if filter is not None:
_params["$filter"] = _SERIALIZER.query("filter", filter, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_by_billing_account_request(
billing_account_name: str, *, search: Optional[str] = None, filter: Optional[str] = None, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-05-01")) # type: Literal["2020-05-01"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/customers")
path_format_arguments = {
"billingAccountName": _SERIALIZER.url("billing_account_name", billing_account_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
if search is not None:
_params["$search"] = _SERIALIZER.query("search", search, "str")
if filter is not None:
_params["$filter"] = _SERIALIZER.query("filter", filter, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_request(
billing_account_name: str, customer_name: str, *, expand: Optional[str] = None, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-05-01")) # type: Literal["2020-05-01"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url", "/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/customers/{customerName}"
) # pylint: disable=line-too-long
path_format_arguments = {
"billingAccountName": _SERIALIZER.url("billing_account_name", billing_account_name, "str"),
"customerName": _SERIALIZER.url("customer_name", customer_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
if expand is not None:
_params["$expand"] = _SERIALIZER.query("expand", expand, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class CustomersOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.billing.BillingManagementClient`'s
:attr:`customers` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_billing_profile(
self,
billing_account_name: str,
billing_profile_name: str,
search: Optional[str] = None,
filter: Optional[str] = None,
**kwargs: Any
) -> Iterable["_models.Customer"]:
"""Lists the customers that are billed to a billing profile. The operation is supported only for
billing accounts with agreement type Microsoft Partner Agreement.
:param billing_account_name: The ID that uniquely identifies a billing account. Required.
:type billing_account_name: str
:param billing_profile_name: The ID that uniquely identifies a billing profile. Required.
:type billing_profile_name: str
:param search: Used for searching customers by their name. Any customer with name containing
the search text will be included in the response. Default value is None.
:type search: str
:param filter: May be used to filter the list of customers. Default value is None.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Customer or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.billing.models.Customer]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-05-01")) # type: Literal["2020-05-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.CustomerListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_billing_profile_request(
billing_account_name=billing_account_name,
billing_profile_name=billing_profile_name,
search=search,
filter=filter,
api_version=api_version,
template_url=self.list_by_billing_profile.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("CustomerListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_by_billing_profile.metadata = {"url": "/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/billingProfiles/{billingProfileName}/customers"} # type: ignore
@distributed_trace
def list_by_billing_account(
self, billing_account_name: str, search: Optional[str] = None, filter: Optional[str] = None, **kwargs: Any
) -> Iterable["_models.Customer"]:
"""Lists the customers that are billed to a billing account. The operation is supported only for
billing accounts with agreement type Microsoft Partner Agreement.
:param billing_account_name: The ID that uniquely identifies a billing account. Required.
:type billing_account_name: str
:param search: Used for searching customers by their name. Any customer with name containing
the search text will be included in the response. Default value is None.
:type search: str
:param filter: May be used to filter the list of customers. Default value is None.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Customer or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.billing.models.Customer]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-05-01")) # type: Literal["2020-05-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.CustomerListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_billing_account_request(
billing_account_name=billing_account_name,
search=search,
filter=filter,
api_version=api_version,
template_url=self.list_by_billing_account.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("CustomerListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_by_billing_account.metadata = {"url": "/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/customers"} # type: ignore
@distributed_trace
def get(
self, billing_account_name: str, customer_name: str, expand: Optional[str] = None, **kwargs: Any
) -> _models.Customer:
"""Gets a customer by its ID. The operation is supported only for billing accounts with agreement
type Microsoft Partner Agreement.
:param billing_account_name: The ID that uniquely identifies a billing account. Required.
:type billing_account_name: str
:param customer_name: The ID that uniquely identifies a customer. Required.
:type customer_name: str
:param expand: May be used to expand enabledAzurePlans and resellers. Default value is None.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Customer or the result of cls(response)
:rtype: ~azure.mgmt.billing.models.Customer
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-05-01")) # type: Literal["2020-05-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.Customer]
request = build_get_request(
billing_account_name=billing_account_name,
customer_name=customer_name,
expand=expand,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("Customer", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/customers/{customerName}"} # type: ignore
| [
"[email protected]"
] | |
720b8b5ad105cefcdea842e54d46b83fb5563320 | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/validators/parcoords/_labelangle.py | 13d8fff892a81eaefbdfe73a406fc69782ba9295 | [
"MIT"
] | permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 406 | py | import _plotly_utils.basevalidators
class LabelangleValidator(_plotly_utils.basevalidators.AngleValidator):
def __init__(self, plotly_name="labelangle", parent_name="parcoords", **kwargs):
super(LabelangleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs
)
| [
"[email protected]"
] | |
3ac09f865d0bb02c5ab3497d9bf59377dc8c0d7e | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnessex.py | 8c1f5ab339803004f3a0e038993da95c03c88730 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 628 | py | ii = [('CookGHP3.py', 2), ('MarrFDI.py', 1), ('CoolWHM2.py', 2), ('CookGHP.py', 37), ('ShawHDE.py', 5), ('WilbRLW5.py', 1), ('ClarGE2.py', 16), ('SeniNSP.py', 2), ('CookGHP2.py', 1), ('CrokTPS.py', 1), ('ClarGE.py', 22), ('LandWPA.py', 1), ('LyelCPG.py', 4), ('DaltJMA.py', 5), ('DibdTRL2.py', 2), ('WadeJEB.py', 8), ('TalfTIT.py', 1), ('GodwWLN.py', 6), ('MereHHB3.py', 7), ('HowiWRL2.py', 4), ('MereHHB.py', 10), ('HogaGMM.py', 4), ('WestJIT.py', 1), ('BabbCEM.py', 1), ('WilbRLW3.py', 2), ('MereHHB2.py', 6), ('ClarGE3.py', 23), ('DibdTRL.py', 3), ('MartHSI.py', 1), ('DwigTHH.py', 1), ('SadlMLP2.py', 4), ('LyelCPG3.py', 5)] | [
"[email protected]"
] | |
8c90dde967a168d19294d7de308bc868f12d2843 | 1719920a92f7194766624474b98d59ef8d6eddaf | /models/workbook_chart.py | e1ff1c8523996a870b6b7be3ab6088101b52548a | [
"MIT"
] | permissive | MIchaelMainer/msgraph-v10-models-python | cfa5e3a65ba675383975a99779763211ed9fa0a9 | adad66363ebe151be2332f3ef74a664584385748 | refs/heads/master | 2020-03-19T12:51:06.370673 | 2018-06-08T00:16:12 | 2018-06-08T00:16:12 | 136,544,573 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,259 | py | # -*- coding: utf-8 -*-
'''
# Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information.
#
# This file was generated and any changes will be overwritten.
'''
from __future__ import unicode_literals
from ..model.workbook_chart_axes import WorkbookChartAxes
from ..model.workbook_chart_data_labels import WorkbookChartDataLabels
from ..model.workbook_chart_area_format import WorkbookChartAreaFormat
from ..model.workbook_chart_legend import WorkbookChartLegend
from ..model.workbook_chart_series import WorkbookChartSeries
from ..model.workbook_chart_title import WorkbookChartTitle
from ..model.workbook_worksheet import WorkbookWorksheet
from ..one_drive_object_base import OneDriveObjectBase
class WorkbookChart(OneDriveObjectBase):
def __init__(self, prop_dict={}):
self._prop_dict = prop_dict
@property
def height(self):
"""
Gets and sets the height
Returns:
float:
The height
"""
if "height" in self._prop_dict:
return self._prop_dict["height"]
else:
return None
@height.setter
def height(self, val):
self._prop_dict["height"] = val
@property
def left(self):
"""
Gets and sets the left
Returns:
float:
The left
"""
if "left" in self._prop_dict:
return self._prop_dict["left"]
else:
return None
@left.setter
def left(self, val):
self._prop_dict["left"] = val
@property
def name(self):
"""
Gets and sets the name
Returns:
str:
The name
"""
if "name" in self._prop_dict:
return self._prop_dict["name"]
else:
return None
@name.setter
def name(self, val):
self._prop_dict["name"] = val
@property
def top(self):
"""
Gets and sets the top
Returns:
float:
The top
"""
if "top" in self._prop_dict:
return self._prop_dict["top"]
else:
return None
@top.setter
def top(self, val):
self._prop_dict["top"] = val
@property
def width(self):
"""
Gets and sets the width
Returns:
float:
The width
"""
if "width" in self._prop_dict:
return self._prop_dict["width"]
else:
return None
@width.setter
def width(self, val):
self._prop_dict["width"] = val
@property
def axes(self):
"""
Gets and sets the axes
Returns:
:class:`WorkbookChartAxes<onedrivesdk.model.workbook_chart_axes.WorkbookChartAxes>`:
The axes
"""
if "axes" in self._prop_dict:
if isinstance(self._prop_dict["axes"], OneDriveObjectBase):
return self._prop_dict["axes"]
else :
self._prop_dict["axes"] = WorkbookChartAxes(self._prop_dict["axes"])
return self._prop_dict["axes"]
return None
@axes.setter
def axes(self, val):
self._prop_dict["axes"] = val
@property
def data_labels(self):
"""
Gets and sets the dataLabels
Returns:
:class:`WorkbookChartDataLabels<onedrivesdk.model.workbook_chart_data_labels.WorkbookChartDataLabels>`:
The dataLabels
"""
if "dataLabels" in self._prop_dict:
if isinstance(self._prop_dict["dataLabels"], OneDriveObjectBase):
return self._prop_dict["dataLabels"]
else :
self._prop_dict["dataLabels"] = WorkbookChartDataLabels(self._prop_dict["dataLabels"])
return self._prop_dict["dataLabels"]
return None
@data_labels.setter
def data_labels(self, val):
self._prop_dict["dataLabels"] = val
@property
def format(self):
"""
Gets and sets the format
Returns:
:class:`WorkbookChartAreaFormat<onedrivesdk.model.workbook_chart_area_format.WorkbookChartAreaFormat>`:
The format
"""
if "format" in self._prop_dict:
if isinstance(self._prop_dict["format"], OneDriveObjectBase):
return self._prop_dict["format"]
else :
self._prop_dict["format"] = WorkbookChartAreaFormat(self._prop_dict["format"])
return self._prop_dict["format"]
return None
@format.setter
def format(self, val):
self._prop_dict["format"] = val
@property
def legend(self):
"""
Gets and sets the legend
Returns:
:class:`WorkbookChartLegend<onedrivesdk.model.workbook_chart_legend.WorkbookChartLegend>`:
The legend
"""
if "legend" in self._prop_dict:
if isinstance(self._prop_dict["legend"], OneDriveObjectBase):
return self._prop_dict["legend"]
else :
self._prop_dict["legend"] = WorkbookChartLegend(self._prop_dict["legend"])
return self._prop_dict["legend"]
return None
@legend.setter
def legend(self, val):
self._prop_dict["legend"] = val
@property
def series(self):
"""Gets and sets the series
Returns:
:class:`SeriesCollectionPage<onedrivesdk.request.series_collection.SeriesCollectionPage>`:
The series
"""
if "series" in self._prop_dict:
return SeriesCollectionPage(self._prop_dict["series"])
else:
return None
@property
def title(self):
"""
Gets and sets the title
Returns:
:class:`WorkbookChartTitle<onedrivesdk.model.workbook_chart_title.WorkbookChartTitle>`:
The title
"""
if "title" in self._prop_dict:
if isinstance(self._prop_dict["title"], OneDriveObjectBase):
return self._prop_dict["title"]
else :
self._prop_dict["title"] = WorkbookChartTitle(self._prop_dict["title"])
return self._prop_dict["title"]
return None
@title.setter
def title(self, val):
self._prop_dict["title"] = val
@property
def worksheet(self):
"""
Gets and sets the worksheet
Returns:
:class:`WorkbookWorksheet<onedrivesdk.model.workbook_worksheet.WorkbookWorksheet>`:
The worksheet
"""
if "worksheet" in self._prop_dict:
if isinstance(self._prop_dict["worksheet"], OneDriveObjectBase):
return self._prop_dict["worksheet"]
else :
self._prop_dict["worksheet"] = WorkbookWorksheet(self._prop_dict["worksheet"])
return self._prop_dict["worksheet"]
return None
@worksheet.setter
def worksheet(self, val):
self._prop_dict["worksheet"] = val
| [
"[email protected]"
] | |
57da4fcb912fee8e7c21d8f1cbf7a1539e0aaf81 | df4ecb12fe9d20cb9fb92014736045425bf57c0d | /setup.py | 19fe4c62ffb2598aeb7ade8be207c6859e5fe45b | [
"MIT"
] | permissive | gecko-robotics/pygecko | a277c717d516de6d836ccfd47ac5b1a6e7dd09ef | a809593a894d8e591e992455a01aa73d8f7b7981 | refs/heads/master | 2022-07-09T09:28:44.500735 | 2019-10-26T23:07:51 | 2019-10-26T23:07:51 | 70,022,547 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,018 | py | from __future__ import print_function
from setuptools import setup
from build_utils import BuildCommand
from build_utils import PublishCommand
from build_utils import BinaryDistribution
from build_utils import SetGitTag
from build_utils import get_pkg_version
# ver = {}
# with open("pygecko/version.py") as fp:
# exec(fp.read(), ver)
# VERSION = ver['__version__']
VERSION = get_pkg_version('pygecko/__init__.py')
PACKAGE_NAME = 'pygecko'
BuildCommand.pkg = PACKAGE_NAME
BuildCommand.py2 = False # not supporting python2 anymore
# BuildCommand.test = False # don't do tests
PublishCommand.pkg = PACKAGE_NAME
PublishCommand.version = VERSION
SetGitTag.version = VERSION
README = open('readme.md').read()
setup(
name=PACKAGE_NAME,
version=VERSION,
author="Kevin Walchko",
keywords=['framework', 'robotic', 'robot', 'vision', 'ros', 'distributed'],
author_email="[email protected]",
description="A python robotic framework and tools",
license="MIT",
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
# 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Operating System :: Unix',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Software Development :: Libraries :: Python Modules'
],
install_requires=[
'pyyaml', # config files
'psutil',
'simplejson', # config files
'msgpack', # efficient message serialization through zmq
'pyzmq', # connecting to different processes and computers
# 'bjoern', # http server, multiple connections
# 'the_collector', # saving data
'colorama', # log messages
'numpy', # basic image stuff ... remove/optional?
'build_utils' # installing and building the library
],
url="https://github.com/MomsFriendlyRobotCompany/{}".format(PACKAGE_NAME),
long_description=README,
long_description_content_type='text/markdown',
packages=[PACKAGE_NAME],
cmdclass={
'publish': PublishCommand,
'make': BuildCommand,
'tag': SetGitTag
},
scripts=[
'bin/geckocore.py',
'bin/pycore.py',
'bin/geckolaunch.py',
# 'bin/mjpeg_server.py', # why? use opencvutils instead
# 'bin/bag_play.py',
# 'bin/bag_record.py',
# 'bin/camera_calibrate.py',
# 'bin/image_view.py',
# 'bin/service.py', # fix
'bin/gecko_log_display.py',
'bin/geckotopic.py',
'bin/twist_keyboard.py'
# 'bin/video.py',
# 'bin/webserver.py'
]
)
| [
"[email protected]"
] | |
25e27632e69d5de8c7019925cc7cc57531e3916a | 2ffdd45472fc20497123bffc3c9b94d9fe8c9bc8 | /venv/Lib/site-packages/setuptools/_vendor/pyparsing.py | d322ce2aaa240d25e0f0d73d8bffe221325e8e25 | [] | no_license | mbea-int/expense-tracker-app | fca02a45623e24ed20d201f69c9a892161141e0c | 47db2c98ed93efcac5330ced2b98d2ca365e6017 | refs/heads/master | 2023-05-10T14:29:04.935218 | 2021-06-04T15:10:00 | 2021-06-04T15:10:00 | 373,816,157 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230,976 | py | # module pyparsing.py
#
# Copyright (c) 2003-2018 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = """
pyparsing module - Classes and methods to define and execute parsing grammars
=============================================================================
The pyparsing module is an alternative approach to creating and executing simple grammars,
vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
provides a library of classes that you use to construct the grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form
C{"<salutation>, <addressee>!"}), built up using L{Word}, L{Literal}, and L{And} elements
(L{'+'<ParserElement.__add__>} operator gives L{And} expressions, strings are auto-converted to
L{Literal} expressions)::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print (hello, "->", greet.parseString(hello))
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the self-explanatory
class names, and the use of '+', '|' and '^' operators.
The L{ParseResults} object returned from L{ParserElement.parseString<ParserElement.parseString>} can be accessed as a nested list, a dictionary, or an
object with named attributes.
The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
Getting Started -
-----------------
Visit the classes L{ParserElement} and L{ParseResults} to see the base classes that most other pyparsing
classes inherit from. Use the docstrings for examples of how to:
- construct literal match expressions from L{Literal} and L{CaselessLiteral} classes
- construct character word-group expressions using the L{Word} class
- see how to create repetitive expressions using L{ZeroOrMore} and L{OneOrMore} classes
- use L{'+'<And>}, L{'|'<MatchFirst>}, L{'^'<Or>}, and L{'&'<Each>} operators to combine simple expressions into more complex ones
- associate names with your parsed results using L{ParserElement.setResultsName}
- find some helpful expression short-cuts like L{delimitedList} and L{oneOf}
- find more useful common expressions in the L{pyparsing_common} namespace class
"""
__version__ = "2.2.1"
__versionTime__ = "18 Sep 2018 00:49 UTC"
__author__ = "Paul McGuire <[email protected]>"
import string
from weakref import ref as wkref
import copy
import sys
import warnings
import re
import sre_constants
import collections
import pprint
import traceback
import types
from datetime import datetime
try:
from _thread import RLock
except ImportError:
from threading import RLock
try:
# Python 3
from collections.abc import Iterable
from collections.abc import MutableMapping
except ImportError:
# Python 2.7
from collections import Iterable
from collections import MutableMapping
try:
from collections import OrderedDict as _OrderedDict
except ImportError:
try:
from ordereddict import OrderedDict as _OrderedDict
except ImportError:
_OrderedDict = None
# ~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
__all__ = [
"And",
"CaselessKeyword",
"CaselessLiteral",
"CharsNotIn",
"Combine",
"Dict",
"Each",
"Empty",
"FollowedBy",
"Forward",
"GoToColumn",
"Group",
"Keyword",
"LineEnd",
"LineStart",
"Literal",
"MatchFirst",
"NoMatch",
"NotAny",
"OneOrMore",
"OnlyOnce",
"Optional",
"Or",
"ParseBaseException",
"ParseElementEnhance",
"ParseException",
"ParseExpression",
"ParseFatalException",
"ParseResults",
"ParseSyntaxException",
"ParserElement",
"QuotedString",
"RecursiveGrammarException",
"Regex",
"SkipTo",
"StringEnd",
"StringStart",
"Suppress",
"Token",
"TokenConverter",
"White",
"Word",
"WordEnd",
"WordStart",
"ZeroOrMore",
"alphanums",
"alphas",
"alphas8bit",
"anyCloseTag",
"anyOpenTag",
"cStyleComment",
"col",
"commaSeparatedList",
"commonHTMLEntity",
"countedArray",
"cppStyleComment",
"dblQuotedString",
"dblSlashComment",
"delimitedList",
"dictOf",
"downcaseTokens",
"empty",
"hexnums",
"htmlComment",
"javaStyleComment",
"line",
"lineEnd",
"lineStart",
"lineno",
"makeHTMLTags",
"makeXMLTags",
"matchOnlyAtCol",
"matchPreviousExpr",
"matchPreviousLiteral",
"nestedExpr",
"nullDebugAction",
"nums",
"oneOf",
"opAssoc",
"operatorPrecedence",
"printables",
"punc8bit",
"pythonStyleComment",
"quotedString",
"removeQuotes",
"replaceHTMLEntity",
"replaceWith",
"restOfLine",
"sglQuotedString",
"srange",
"stringEnd",
"stringStart",
"traceParseAction",
"unicodeString",
"upcaseTokens",
"withAttribute",
"indentedBlock",
"originalTextFor",
"ungroup",
"infixNotation",
"locatedExpr",
"withClass",
"CloseMatch",
"tokenMap",
"pyparsing_common",
]
system_version = tuple(sys.version_info)[:3]
PY_3 = system_version[0] == 3
if PY_3:
_MAX_INT = sys.maxsize
basestring = str
unichr = chr
_ustr = str
# build list of single arg builtins, that can be used as parse actions
singleArgBuiltins = [
sum,
len,
sorted,
reversed,
list,
tuple,
set,
any,
all,
min,
max,
]
else:
_MAX_INT = sys.maxint
range = xrange
def _ustr(obj):
"""Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
then < returns the unicode object | encodes it with the default encoding | ... >.
"""
if isinstance(obj, unicode):
return obj
try:
# If this works, then _ustr(obj) has the same behaviour as str(obj), so
# it won't break any existing code.
return str(obj)
except UnicodeEncodeError:
# Else encode it
ret = unicode(obj).encode(sys.getdefaultencoding(), "xmlcharrefreplace")
xmlcharref = Regex(r"&#\d+;")
xmlcharref.setParseAction(lambda t: "\\u" + hex(int(t[0][2:-1]))[2:])
return xmlcharref.transformString(ret)
# build list of single arg builtins, tolerant of Python version, that can be used as parse actions
singleArgBuiltins = []
import __builtin__
for fname in "sum len sorted reversed list tuple set any all min max".split():
try:
singleArgBuiltins.append(getattr(__builtin__, fname))
except AttributeError:
continue
_generatorType = type((y for y in range(1)))
def _xml_escape(data):
"""Escape &, <, >, ", ', etc. in a string of data."""
# ampersand must be replaced first
from_symbols = "&><\"'"
to_symbols = ("&" + s + ";" for s in "amp gt lt quot apos".split())
for from_, to_ in zip(from_symbols, to_symbols):
data = data.replace(from_, to_)
return data
class _Constants(object):
pass
alphas = string.ascii_uppercase + string.ascii_lowercase
nums = "0123456789"
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
_bslash = chr(92)
printables = "".join(c for c in string.printable if c not in string.whitespace)
class ParseBaseException(Exception):
"""base exception class for all parsing runtime exceptions"""
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__(self, pstr, loc=0, msg=None, elem=None):
self.loc = loc
if msg is None:
self.msg = pstr
self.pstr = ""
else:
self.msg = msg
self.pstr = pstr
self.parserElement = elem
self.args = (pstr, loc, msg)
@classmethod
def _from_exception(cls, pe):
"""
internal factory method to simplify creating one type of ParseException
from another - avoids having __init__ signature conflicts among subclasses
"""
return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
def __getattr__(self, aname):
"""supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
if aname == "lineno":
return lineno(self.loc, self.pstr)
elif aname in ("col", "column"):
return col(self.loc, self.pstr)
elif aname == "line":
return line(self.loc, self.pstr)
else:
raise AttributeError(aname)
def __str__(self):
return "%s (at char %d), (line:%d, col:%d)" % (
self.msg,
self.loc,
self.lineno,
self.column,
)
def __repr__(self):
return _ustr(self)
def markInputline(self, markerString=">!<"):
"""Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join(
(line_str[:line_column], markerString, line_str[line_column:])
)
return line_str.strip()
def __dir__(self):
return "lineno col line".split() + dir(type(self))
class ParseException(ParseBaseException):
"""
Exception thrown when parse expressions don't match class;
supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
Example::
try:
Word(nums).setName("integer").parseString("ABC")
except ParseException as pe:
print(pe)
print("column: {}".format(pe.col))
prints::
Expected integer (at char 0), (line:1, col:1)
column: 1
"""
pass
class ParseFatalException(ParseBaseException):
"""user-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately"""
pass
class ParseSyntaxException(ParseFatalException):
"""just like L{ParseFatalException}, but thrown internally when an
L{ErrorStop<And._ErrorStop>} ('-' operator) indicates that parsing is to stop
immediately because an unbacktrackable syntax error has been found"""
pass
# ~ class ReparseException(ParseBaseException):
# ~ """Experimental class - parse actions can raise this exception to cause
# ~ pyparsing to reparse the input string:
# ~ - with a modified input string, and/or
# ~ - with a modified start location
# ~ Set the values of the ReparseException in the constructor, and raise the
# ~ exception in a parse action to cause pyparsing to use the new string/location.
# ~ Setting the values as None causes no change to be made.
# ~ """
# ~ def __init_( self, newstring, restartLoc ):
# ~ self.newParseText = newstring
# ~ self.reparseLoc = restartLoc
class RecursiveGrammarException(Exception):
"""exception thrown by L{ParserElement.validate} if the grammar could be improperly recursive"""
def __init__(self, parseElementList):
self.parseElementTrace = parseElementList
def __str__(self):
return "RecursiveGrammarException: %s" % self.parseElementTrace
class _ParseResultsWithOffset(object):
def __init__(self, p1, p2):
self.tup = (p1, p2)
def __getitem__(self, i):
return self.tup[i]
def __repr__(self):
return repr(self.tup[0])
def setOffset(self, i):
self.tup = (self.tup[0], i)
class ParseResults(object):
"""
Structured parse results, to provide multiple means of access to the parsed data:
- as a list (C{len(results)})
- by list index (C{results[0], results[1]}, etc.)
- by attribute (C{results.<resultsName>} - see L{ParserElement.setResultsName})
Example::
integer = Word(nums)
date_str = (integer.setResultsName("year") + '/'
+ integer.setResultsName("month") + '/'
+ integer.setResultsName("day"))
# equivalent form:
# date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
# parseString returns a ParseResults object
result = date_str.parseString("1999/12/31")
def test(s, fn=repr):
print("%s -> %s" % (s, fn(eval(s))))
test("list(result)")
test("result[0]")
test("result['month']")
test("result.day")
test("'month' in result")
test("'minutes' in result")
test("result.dump()", str)
prints::
list(result) -> ['1999', '/', '12', '/', '31']
result[0] -> '1999'
result['month'] -> '12'
result.day -> '31'
'month' in result -> True
'minutes' in result -> False
result.dump() -> ['1999', '/', '12', '/', '31']
- day: 31
- month: 12
- year: 1999
"""
def __new__(cls, toklist=None, name=None, asList=True, modal=True):
if isinstance(toklist, cls):
return toklist
retobj = object.__new__(cls)
retobj.__doinit = True
return retobj
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__(
self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance
):
if self.__doinit:
self.__doinit = False
self.__name = None
self.__parent = None
self.__accumNames = {}
self.__asList = asList
self.__modal = modal
if toklist is None:
toklist = []
if isinstance(toklist, list):
self.__toklist = toklist[:]
elif isinstance(toklist, _generatorType):
self.__toklist = list(toklist)
else:
self.__toklist = [toklist]
self.__tokdict = dict()
if name is not None and name:
if not modal:
self.__accumNames[name] = 0
if isinstance(name, int):
name = _ustr(
name
) # will always return a str, but use _ustr for consistency
self.__name = name
if not (
isinstance(toklist, (type(None), basestring, list))
and toklist in (None, "", [])
):
if isinstance(toklist, basestring):
toklist = [toklist]
if asList:
if isinstance(toklist, ParseResults):
self[name] = _ParseResultsWithOffset(toklist.copy(), 0)
else:
self[name] = _ParseResultsWithOffset(
ParseResults(toklist[0]), 0
)
self[name].__name = name
else:
try:
self[name] = toklist[0]
except (KeyError, TypeError, IndexError):
self[name] = toklist
def __getitem__(self, i):
if isinstance(i, (int, slice)):
return self.__toklist[i]
else:
if i not in self.__accumNames:
return self.__tokdict[i][-1][0]
else:
return ParseResults([v[0] for v in self.__tokdict[i]])
def __setitem__(self, k, v, isinstance=isinstance):
if isinstance(v, _ParseResultsWithOffset):
self.__tokdict[k] = self.__tokdict.get(k, list()) + [v]
sub = v[0]
elif isinstance(k, (int, slice)):
self.__toklist[k] = v
sub = v
else:
self.__tokdict[k] = self.__tokdict.get(k, list()) + [
_ParseResultsWithOffset(v, 0)
]
sub = v
if isinstance(sub, ParseResults):
sub.__parent = wkref(self)
def __delitem__(self, i):
if isinstance(i, (int, slice)):
mylen = len(self.__toklist)
del self.__toklist[i]
# convert int to slice
if isinstance(i, int):
if i < 0:
i += mylen
i = slice(i, i + 1)
# get removed indices
removed = list(range(*i.indices(mylen)))
removed.reverse()
# fixup indices in token dictionary
for name, occurrences in self.__tokdict.items():
for j in removed:
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(
value, position - (position > j)
)
else:
del self.__tokdict[i]
def __contains__(self, k):
return k in self.__tokdict
def __len__(self):
return len(self.__toklist)
def __bool__(self):
return not not self.__toklist
__nonzero__ = __bool__
def __iter__(self):
return iter(self.__toklist)
def __reversed__(self):
return iter(self.__toklist[::-1])
def _iterkeys(self):
if hasattr(self.__tokdict, "iterkeys"):
return self.__tokdict.iterkeys()
else:
return iter(self.__tokdict)
def _itervalues(self):
return (self[k] for k in self._iterkeys())
def _iteritems(self):
return ((k, self[k]) for k in self._iterkeys())
if PY_3:
keys = _iterkeys
"""Returns an iterator of all named result keys (Python 3.x only)."""
values = _itervalues
"""Returns an iterator of all named result values (Python 3.x only)."""
items = _iteritems
"""Returns an iterator of all named result key-value tuples (Python 3.x only)."""
else:
iterkeys = _iterkeys
"""Returns an iterator of all named result keys (Python 2.x only)."""
itervalues = _itervalues
"""Returns an iterator of all named result values (Python 2.x only)."""
iteritems = _iteritems
"""Returns an iterator of all named result key-value tuples (Python 2.x only)."""
def keys(self):
"""Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x)."""
return list(self.iterkeys())
def values(self):
"""Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x)."""
return list(self.itervalues())
def items(self):
"""Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x)."""
return list(self.iteritems())
def haskeys(self):
"""Since keys() returns an iterator, this method is helpful in bypassing
code that looks for the existence of any defined results names."""
return bool(self.__tokdict)
def pop(self, *args, **kwargs):
"""
Removes and returns item at specified index (default=C{last}).
Supports both C{list} and C{dict} semantics for C{pop()}. If passed no
argument or an integer argument, it will use C{list} semantics
and pop tokens from the list of parsed tokens. If passed a
non-integer argument (most likely a string), it will use C{dict}
semantics and pop the corresponding value from any defined
results names. A second default return value argument is
supported, just as in C{dict.pop()}.
Example::
def remove_first(tokens):
tokens.pop(0)
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']
label = Word(alphas)
patt = label("LABEL") + OneOrMore(Word(nums))
print(patt.parseString("AAB 123 321").dump())
# Use pop() in a parse action to remove named result (note that corresponding value is not
# removed from list form of results)
def remove_LABEL(tokens):
tokens.pop("LABEL")
return tokens
patt.addParseAction(remove_LABEL)
print(patt.parseString("AAB 123 321").dump())
prints::
['AAB', '123', '321']
- LABEL: AAB
['AAB', '123', '321']
"""
if not args:
args = [-1]
for k, v in kwargs.items():
if k == "default":
args = (args[0], v)
else:
raise TypeError("pop() got an unexpected keyword argument '%s'" % k)
if isinstance(args[0], int) or len(args) == 1 or args[0] in self:
index = args[0]
ret = self[index]
del self[index]
return ret
else:
defaultvalue = args[1]
return defaultvalue
def get(self, key, defaultValue=None):
"""
Returns named result matching the given key, or if there is no
such name, then returns the given C{defaultValue} or C{None} if no
C{defaultValue} is specified.
Similar to C{dict.get()}.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString("1999/12/31")
print(result.get("year")) # -> '1999'
print(result.get("hour", "not specified")) # -> 'not specified'
print(result.get("hour")) # -> None
"""
if key in self:
return self[key]
else:
return defaultValue
def insert(self, index, insStr):
"""
Inserts new element at location index in the list of parsed tokens.
Similar to C{list.insert()}.
Example::
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
# use a parse action to insert the parse location in the front of the parsed results
def insert_locn(locn, tokens):
tokens.insert(0, locn)
print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']
"""
self.__toklist.insert(index, insStr)
# fixup indices in token dictionary
for name, occurrences in self.__tokdict.items():
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(
value, position + (position > index)
)
def append(self, item):
"""
Add single element to end of ParseResults list of elements.
Example::
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
# use a parse action to compute the sum of the parsed integers, and add it to the end
def append_sum(tokens):
tokens.append(sum(map(int, tokens)))
print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]
"""
self.__toklist.append(item)
def extend(self, itemseq):
"""
Add sequence of elements to end of ParseResults list of elements.
Example::
patt = OneOrMore(Word(alphas))
# use a parse action to append the reverse of the matched strings, to make a palindrome
def make_palindrome(tokens):
tokens.extend(reversed([t[::-1] for t in tokens]))
return ''.join(tokens)
print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
"""
if isinstance(itemseq, ParseResults):
self += itemseq
else:
self.__toklist.extend(itemseq)
def clear(self):
"""
Clear all elements and results names.
"""
del self.__toklist[:]
self.__tokdict.clear()
def __getattr__(self, name):
try:
return self[name]
except KeyError:
return ""
if name in self.__tokdict:
if name not in self.__accumNames:
return self.__tokdict[name][-1][0]
else:
return ParseResults([v[0] for v in self.__tokdict[name]])
else:
return ""
def __add__(self, other):
ret = self.copy()
ret += other
return ret
def __iadd__(self, other):
if other.__tokdict:
offset = len(self.__toklist)
addoffset = lambda a: offset if a < 0 else a + offset
otheritems = other.__tokdict.items()
otherdictitems = [
(k, _ParseResultsWithOffset(v[0], addoffset(v[1])))
for (k, vlist) in otheritems
for v in vlist
]
for k, v in otherdictitems:
self[k] = v
if isinstance(v[0], ParseResults):
v[0].__parent = wkref(self)
self.__toklist += other.__toklist
self.__accumNames.update(other.__accumNames)
return self
def __radd__(self, other):
if isinstance(other, int) and other == 0:
# useful for merging many ParseResults using sum() builtin
return self.copy()
else:
# this may raise a TypeError - so be it
return other + self
def __repr__(self):
return "(%s, %s)" % (repr(self.__toklist), repr(self.__tokdict))
def __str__(self):
return (
"["
+ ", ".join(
_ustr(i) if isinstance(i, ParseResults) else repr(i)
for i in self.__toklist
)
+ "]"
)
def _asStringList(self, sep=""):
out = []
for item in self.__toklist:
if out and sep:
out.append(sep)
if isinstance(item, ParseResults):
out += item._asStringList()
else:
out.append(_ustr(item))
return out
def asList(self):
"""
Returns the parse results as a nested list of matching tokens, all converted to strings.
Example::
patt = OneOrMore(Word(alphas))
result = patt.parseString("sldkj lsdkj sldkj")
# even though the result prints in string-like form, it is actually a pyparsing ParseResults
print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
# Use asList() to create an actual list
result_list = result.asList()
print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
"""
return [
res.asList() if isinstance(res, ParseResults) else res
for res in self.__toklist
]
def asDict(self):
"""
Returns the named parse results as a nested dictionary.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString('12/31/1999')
print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
result_dict = result.asDict()
print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}
# even though a ParseResults supports dict-like access, sometime you just need to have a dict
import json
print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
"""
if PY_3:
item_fn = self.items
else:
item_fn = self.iteritems
def toItem(obj):
if isinstance(obj, ParseResults):
if obj.haskeys():
return obj.asDict()
else:
return [toItem(v) for v in obj]
else:
return obj
return dict((k, toItem(v)) for k, v in item_fn())
def copy(self):
"""
Returns a new copy of a C{ParseResults} object.
"""
ret = ParseResults(self.__toklist)
ret.__tokdict = self.__tokdict.copy()
ret.__parent = self.__parent
ret.__accumNames.update(self.__accumNames)
ret.__name = self.__name
return ret
def asXML(self, doctag=None, namedItemsOnly=False, indent="", formatted=True):
"""
(Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.
"""
nl = "\n"
out = []
namedItems = dict(
(v[1], k) for (k, vlist) in self.__tokdict.items() for v in vlist
)
nextLevelIndent = indent + " "
# collapse out indents if formatting is not desired
if not formatted:
indent = ""
nextLevelIndent = ""
nl = ""
selfTag = None
if doctag is not None:
selfTag = doctag
else:
if self.__name:
selfTag = self.__name
if not selfTag:
if namedItemsOnly:
return ""
else:
selfTag = "ITEM"
out += [nl, indent, "<", selfTag, ">"]
for i, res in enumerate(self.__toklist):
if isinstance(res, ParseResults):
if i in namedItems:
out += [
res.asXML(
namedItems[i],
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted,
)
]
else:
out += [
res.asXML(
None,
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted,
)
]
else:
# individual token, see if there is a name for it
resTag = None
if i in namedItems:
resTag = namedItems[i]
if not resTag:
if namedItemsOnly:
continue
else:
resTag = "ITEM"
xmlBodyText = _xml_escape(_ustr(res))
out += [
nl,
nextLevelIndent,
"<",
resTag,
">",
xmlBodyText,
"</",
resTag,
">",
]
out += [nl, indent, "</", selfTag, ">"]
return "".join(out)
def __lookup(self, sub):
for k, vlist in self.__tokdict.items():
for v, loc in vlist:
if sub is v:
return k
return None
def getName(self):
r"""
Returns the results name for this token expression. Useful when several
different expressions might match at a particular location.
Example::
integer = Word(nums)
ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
house_number_expr = Suppress('#') + Word(nums, alphanums)
user_data = (Group(house_number_expr)("house_number")
| Group(ssn_expr)("ssn")
| Group(integer)("age"))
user_info = OneOrMore(user_data)
result = user_info.parseString("22 111-22-3333 #221B")
for item in result:
print(item.getName(), ':', item[0])
prints::
age : 22
ssn : 111-22-3333
house_number : 221B
"""
if self.__name:
return self.__name
elif self.__parent:
par = self.__parent()
if par:
return par.__lookup(self)
else:
return None
elif (
len(self) == 1
and len(self.__tokdict) == 1
and next(iter(self.__tokdict.values()))[0][1] in (0, -1)
):
return next(iter(self.__tokdict.keys()))
else:
return None
def dump(self, indent="", depth=0, full=True):
"""
Diagnostic method for listing out the contents of a C{ParseResults}.
Accepts an optional C{indent} argument so that this string can be embedded
in a nested display of other data.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString('12/31/1999')
print(result.dump())
prints::
['12', '/', '31', '/', '1999']
- day: 1999
- month: 31
- year: 12
"""
out = []
NL = "\n"
out.append(indent + _ustr(self.asList()))
if full:
if self.haskeys():
items = sorted((str(k), v) for k, v in self.items())
for k, v in items:
if out:
out.append(NL)
out.append("%s%s- %s: " % (indent, (" " * depth), k))
if isinstance(v, ParseResults):
if v:
out.append(v.dump(indent, depth + 1))
else:
out.append(_ustr(v))
else:
out.append(repr(v))
elif any(isinstance(vv, ParseResults) for vv in self):
v = self
for i, vv in enumerate(v):
if isinstance(vv, ParseResults):
out.append(
"\n%s%s[%d]:\n%s%s%s"
% (
indent,
(" " * (depth)),
i,
indent,
(" " * (depth + 1)),
vv.dump(indent, depth + 1),
)
)
else:
out.append(
"\n%s%s[%d]:\n%s%s%s"
% (
indent,
(" " * (depth)),
i,
indent,
(" " * (depth + 1)),
_ustr(vv),
)
)
return "".join(out)
def pprint(self, *args, **kwargs):
"""
Pretty-printer for parsed results as a list, using the C{pprint} module.
Accepts additional positional or keyword args as defined for the
C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})
Example::
ident = Word(alphas, alphanums)
num = Word(nums)
func = Forward()
term = ident | num | Group('(' + func + ')')
func <<= ident + Group(Optional(delimitedList(term)))
result = func.parseString("fna a,b,(fnb c,d,200),100")
result.pprint(width=40)
prints::
['fna',
['a',
'b',
['(', 'fnb', ['c', 'd', '200'], ')'],
'100']]
"""
pprint.pprint(self.asList(), *args, **kwargs)
# add support for pickle protocol
def __getstate__(self):
return (
self.__toklist,
(
self.__tokdict.copy(),
self.__parent is not None and self.__parent() or None,
self.__accumNames,
self.__name,
),
)
def __setstate__(self, state):
self.__toklist = state[0]
(self.__tokdict, par, inAccumNames, self.__name) = state[1]
self.__accumNames = {}
self.__accumNames.update(inAccumNames)
if par is not None:
self.__parent = wkref(par)
else:
self.__parent = None
def __getnewargs__(self):
return self.__toklist, self.__name, self.__asList, self.__modal
def __dir__(self):
return dir(type(self)) + list(self.keys())
MutableMapping.register(ParseResults)
def col(loc, strg):
"""Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
s = strg
return 1 if 0 < loc < len(s) and s[loc - 1] == "\n" else loc - s.rfind("\n", 0, loc)
def lineno(loc, strg):
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return strg.count("\n", 0, loc) + 1
def line(loc, strg):
"""Returns the line of text containing loc within a string, counting newlines as line separators.
"""
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR >= 0:
return strg[lastCR + 1 : nextCR]
else:
return strg[lastCR + 1 :]
def _defaultStartDebugAction(instring, loc, expr):
print(
(
"Match "
+ _ustr(expr)
+ " at loc "
+ _ustr(loc)
+ "(%d,%d)" % (lineno(loc, instring), col(loc, instring))
)
)
def _defaultSuccessDebugAction(instring, startloc, endloc, expr, toks):
print("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
def _defaultExceptionDebugAction(instring, loc, expr, exc):
print("Exception raised:" + _ustr(exc))
def nullDebugAction(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
pass
# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
# ~ 'decorator to trim function calls to match the arity of the target'
# ~ def _trim_arity(func, maxargs=3):
# ~ if func in singleArgBuiltins:
# ~ return lambda s,l,t: func(t)
# ~ limit = 0
# ~ foundArity = False
# ~ def wrapper(*args):
# ~ nonlocal limit,foundArity
# ~ while 1:
# ~ try:
# ~ ret = func(*args[limit:])
# ~ foundArity = True
# ~ return ret
# ~ except TypeError:
# ~ if limit == maxargs or foundArity:
# ~ raise
# ~ limit += 1
# ~ continue
# ~ return wrapper
# this version is Python 2.x-3.x cross-compatible
"decorator to trim function calls to match the arity of the target"
def _trim_arity(func, maxargs=2):
if func in singleArgBuiltins:
return lambda s, l, t: func(t)
limit = [0]
foundArity = [False]
# traceback return data structure changed in Py3.5 - normalize back to plain tuples
if system_version[:2] >= (3, 5):
def extract_stack(limit=0):
# special handling for Python 3.5.0 - extra deep call stack by 1
offset = -3 if system_version == (3, 5, 0) else -2
frame_summary = traceback.extract_stack(limit=-offset + limit - 1)[offset]
return [frame_summary[:2]]
def extract_tb(tb, limit=0):
frames = traceback.extract_tb(tb, limit=limit)
frame_summary = frames[-1]
return [frame_summary[:2]]
else:
extract_stack = traceback.extract_stack
extract_tb = traceback.extract_tb
# synthesize what would be returned by traceback.extract_stack at the call to
# user's parse action 'func', so that we don't incur call penalty at parse time
LINE_DIFF = 6
# IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND
# THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
this_line = extract_stack(limit=2)[-1]
pa_call_line_synth = (this_line[0], this_line[1] + LINE_DIFF)
def wrapper(*args):
while 1:
try:
ret = func(*args[limit[0] :])
foundArity[0] = True
return ret
except TypeError:
# re-raise TypeErrors if they did not come from our arity testing
if foundArity[0]:
raise
else:
try:
tb = sys.exc_info()[-1]
if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:
raise
finally:
del tb
if limit[0] <= maxargs:
limit[0] += 1
continue
raise
# copy func name to wrapper for sensible debug output
func_name = "<parse action>"
try:
func_name = getattr(func, "__name__", getattr(func, "__class__").__name__)
except Exception:
func_name = str(func)
wrapper.__name__ = func_name
return wrapper
class ParserElement(object):
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS = " \n\t\r"
verbose_stacktrace = False
@staticmethod
def setDefaultWhitespaceChars(chars):
r"""
Overrides the default whitespace chars
Example::
# default whitespace chars are space, <TAB> and newline
OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl']
# change to just treat newline as significant
ParserElement.setDefaultWhitespaceChars(" \t")
OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def']
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
@staticmethod
def inlineLiteralsUsing(cls):
"""
Set class to be used for inclusion of string literals into a parser.
Example::
# default literal class used is Literal
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
# change to Suppress
ParserElement.inlineLiteralsUsing(Suppress)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
date_str.parseString("1999/12/31") # -> ['1999', '12', '31']
"""
ParserElement._literalStringClass = cls
def __init__(self, savelist=False):
self.parseAction = list()
self.failAction = None
# ~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
self.strRepr = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
self.copyDefaultWhiteChars = True
self.mayReturnEmpty = False # used when checking for left-recursion
self.keepTabs = False
self.ignoreExprs = list()
self.debug = False
self.streamlined = False
self.mayIndexError = (
True
) # used to optimize exception handling for subclasses that don't advance parse index
self.errmsg = ""
self.modalResults = (
True
) # used to mark results names as modal (report only last) or cumulative (list all)
self.debugActions = (None, None, None) # custom debug actions
self.re = None
self.callPreparse = True # used to avoid redundant calls to preParse
self.callDuringTry = False
def copy(self):
"""
Make a copy of this C{ParserElement}. Useful for defining different parse actions
for the same parsing pattern, using copies of the original parse element.
Example::
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K")
integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
prints::
[5120, 100, 655360, 268435456]
Equivalent form of C{expr.copy()} is just C{expr()}::
integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
"""
cpy = copy.copy(self)
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy
def setName(self, name):
"""
Define name for this expression, makes debugging and exception messages clearer.
Example::
Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1)
"""
self.name = name
self.errmsg = "Expected " + self.name
if hasattr(self, "exception"):
self.exception.msg = self.errmsg
return self
def setResultsName(self, name, listAllMatches=False):
"""
Define name for referencing matching tokens as a nested attribute
of the returned parse results.
NOTE: this returns a *copy* of the original C{ParserElement} object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
You can also set results names using the abbreviated syntax,
C{expr("name")} in place of C{expr.setResultsName("name")} -
see L{I{__call__}<__call__>}.
Example::
date_str = (integer.setResultsName("year") + '/'
+ integer.setResultsName("month") + '/'
+ integer.setResultsName("day"))
# equivalent form:
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
"""
newself = self.copy()
if name.endswith("*"):
name = name[:-1]
listAllMatches = True
newself.resultsName = name
newself.modalResults = not listAllMatches
return newself
def setBreak(self, breakFlag=True):
"""Method to invoke the Python pdb debugger when this element is
about to be parsed. Set C{breakFlag} to True to enable, False to
disable.
"""
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
pdb.set_trace()
return _parseMethod(instring, loc, doActions, callPreParse)
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse, "_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self
def setParseAction(self, *fns, **kwargs):
"""
Define one or more actions to perform when successfully matching parse element definition.
Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
- s = the original string being parsed (see note below)
- loc = the location of the matching substring
- toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object
If the functions in fns modify the tokens, they can return them as the return
value from fn, and the modified list of tokens will replace the original.
Otherwise, fn does not need to return any value.
Optional keyword arguments:
- callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{parseString}<parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
Example::
integer = Word(nums)
date_str = integer + '/' + integer + '/' + integer
date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
# use parse action to convert to ints at parse time
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
date_str = integer + '/' + integer + '/' + integer
# note that integer fields are now ints, not strings
date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31]
"""
self.parseAction = list(map(_trim_arity, list(fns)))
self.callDuringTry = kwargs.get("callDuringTry", False)
return self
def addParseAction(self, *fns, **kwargs):
"""
Add one or more parse actions to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}.
See examples in L{I{copy}<copy>}.
"""
self.parseAction += list(map(_trim_arity, list(fns)))
self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
return self
def addCondition(self, *fns, **kwargs):
"""Add a boolean predicate function to expression's list of parse actions. See
L{I{setParseAction}<setParseAction>} for function call signatures. Unlike C{setParseAction},
functions passed to C{addCondition} need to return boolean success/fail of the condition.
Optional keyword arguments:
- message = define a custom message to be used in the raised exception
- fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
Example::
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
year_int = integer.copy()
year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
date_str = year_int + '/' + integer + '/' + integer
result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
"""
msg = kwargs.get("message", "failed user-defined condition")
exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException
for fn in fns:
def pa(s, l, t):
if not bool(_trim_arity(fn)(s, l, t)):
raise exc_type(s, l, msg)
self.parseAction.append(pa)
self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
return self
def setFailAction(self, fn):
"""Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
C{fn(s,loc,expr,err)} where:
- s = string being parsed
- loc = location where expression match was attempted and failed
- expr = the parse expression that failed
- err = the exception thrown
The function returns no value. It may throw C{L{ParseFatalException}}
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def _skipIgnorables(self, instring, loc):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
loc, dummy = e._parse(instring, loc)
exprsFound = True
except ParseException:
pass
return loc
def preParse(self, instring, loc):
if self.ignoreExprs:
loc = self._skipIgnorables(instring, loc)
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while loc < instrlen and instring[loc] in wt:
loc += 1
return loc
def parseImpl(self, instring, loc, doActions=True):
return loc, []
def postParse(self, instring, loc, tokenlist):
return tokenlist
# ~ @profile
def _parseNoCache(self, instring, loc, doActions=True, callPreParse=True):
debugging = self.debug # and doActions )
if debugging or self.failAction:
# ~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
if self.debugActions[0]:
self.debugActions[0](instring, loc, self)
if callPreParse and self.callPreparse:
preloc = self.preParse(instring, loc)
else:
preloc = loc
tokensStart = preloc
try:
try:
loc, tokens = self.parseImpl(instring, preloc, doActions)
except IndexError:
raise ParseException(instring, len(instring), self.errmsg, self)
except ParseBaseException as err:
# ~ print ("Exception raised:", err)
if self.debugActions[2]:
self.debugActions[2](instring, tokensStart, self, err)
if self.failAction:
self.failAction(instring, tokensStart, self, err)
raise
else:
if callPreParse and self.callPreparse:
preloc = self.preParse(instring, loc)
else:
preloc = loc
tokensStart = preloc
if self.mayIndexError or preloc >= len(instring):
try:
loc, tokens = self.parseImpl(instring, preloc, doActions)
except IndexError:
raise ParseException(instring, len(instring), self.errmsg, self)
else:
loc, tokens = self.parseImpl(instring, preloc, doActions)
tokens = self.postParse(instring, loc, tokens)
retTokens = ParseResults(
tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults
)
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
tokens = fn(instring, tokensStart, retTokens)
if tokens is not None:
retTokens = ParseResults(
tokens,
self.resultsName,
asList=self.saveAsList
and isinstance(tokens, (ParseResults, list)),
modal=self.modalResults,
)
except ParseBaseException as err:
# ~ print "Exception raised in user parse action:", err
if self.debugActions[2]:
self.debugActions[2](instring, tokensStart, self, err)
raise
else:
for fn in self.parseAction:
tokens = fn(instring, tokensStart, retTokens)
if tokens is not None:
retTokens = ParseResults(
tokens,
self.resultsName,
asList=self.saveAsList
and isinstance(tokens, (ParseResults, list)),
modal=self.modalResults,
)
if debugging:
# ~ print ("Matched",self,"->",retTokens.asList())
if self.debugActions[1]:
self.debugActions[1](instring, tokensStart, loc, self, retTokens)
return loc, retTokens
def tryParse(self, instring, loc):
try:
return self._parse(instring, loc, doActions=False)[0]
except ParseFatalException:
raise ParseException(instring, loc, self.errmsg, self)
def canParseNext(self, instring, loc):
try:
self.tryParse(instring, loc)
except (ParseException, IndexError):
return False
else:
return True
class _UnboundedCache(object):
def __init__(self):
cache = {}
self.not_in_cache = not_in_cache = object()
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
def clear(self):
cache.clear()
def cache_len(self):
return len(cache)
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
self.__len__ = types.MethodType(cache_len, self)
if _OrderedDict is not None:
class _FifoCache(object):
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = _OrderedDict()
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
while len(cache) > size:
try:
cache.popitem(False)
except KeyError:
pass
def clear(self):
cache.clear()
def cache_len(self):
return len(cache)
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
self.__len__ = types.MethodType(cache_len, self)
else:
class _FifoCache(object):
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = {}
key_fifo = collections.deque([], size)
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
while len(key_fifo) > size:
cache.pop(key_fifo.popleft(), None)
key_fifo.append(key)
def clear(self):
cache.clear()
key_fifo.clear()
def cache_len(self):
return len(cache)
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
self.__len__ = types.MethodType(cache_len, self)
# argument cache for optimizing repeated calls when backtracking through recursive expressions
packrat_cache = (
{}
) # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail
packrat_cache_lock = RLock()
packrat_cache_stats = [0, 0]
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache(self, instring, loc, doActions=True, callPreParse=True):
HIT, MISS = 0, 1
lookup = (self, instring, loc, callPreParse, doActions)
with ParserElement.packrat_cache_lock:
cache = ParserElement.packrat_cache
value = cache.get(lookup)
if value is cache.not_in_cache:
ParserElement.packrat_cache_stats[MISS] += 1
try:
value = self._parseNoCache(instring, loc, doActions, callPreParse)
except ParseBaseException as pe:
# cache a copy of the exception, without the traceback
cache.set(lookup, pe.__class__(*pe.args))
raise
else:
cache.set(lookup, (value[0], value[1].copy()))
return value
else:
ParserElement.packrat_cache_stats[HIT] += 1
if isinstance(value, Exception):
raise value
return (value[0], value[1].copy())
_parse = _parseNoCache
@staticmethod
def resetCache():
ParserElement.packrat_cache.clear()
ParserElement.packrat_cache_stats[:] = [0] * len(
ParserElement.packrat_cache_stats
)
_packratEnabled = False
@staticmethod
def enablePackrat(cache_size_limit=128):
"""Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
Parameters:
- cache_size_limit - (default=C{128}) - if an integer value is provided
will limit the size of the packrat cache; if None is passed, then
the cache size will be unbounded; if 0 is passed, the cache will
be effectively disabled.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method C{ParserElement.enablePackrat()}. If
your program uses C{psyco} to "compile as you go", you must call
C{enablePackrat} before calling C{psyco.full()}. If you do not do this,
Python will crash. For best results, call C{enablePackrat()} immediately
after importing pyparsing.
Example::
import pyparsing
pyparsing.ParserElement.enablePackrat()
"""
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
if cache_size_limit is None:
ParserElement.packrat_cache = ParserElement._UnboundedCache()
else:
ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)
ParserElement._parse = ParserElement._parseCache
def parseString(self, instring, parseAll=False):
"""
Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
If you want the grammar to require that the entire input string be
successfully parsed, then set C{parseAll} to True (equivalent to ending
the grammar with C{L{StringEnd()}}).
Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
in order to report proper column numbers in parse actions.
If the input string contains tabs and
the grammar uses parse actions that use the C{loc} argument to index into the
string being parsed, you can ensure you have a consistent view of the input
string by:
- calling C{parseWithTabs} on your grammar before calling C{parseString}
(see L{I{parseWithTabs}<parseWithTabs>})
- define your parse action using the full C{(s,loc,toks)} signature, and
reference the input string using the parse action's C{s} argument
- explictly expand the tabs in your input string before calling
C{parseString}
Example::
Word('a').parseString('aaaaabaaa') # -> ['aaaaa']
Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text
"""
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
# ~ self.saveAsList = True
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
try:
loc, tokens = self._parse(instring, 0)
if parseAll:
loc = self.preParse(instring, loc)
se = Empty() + StringEnd()
se._parse(instring, loc)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
else:
return tokens
def scanString(self, instring, maxMatches=_MAX_INT, overlap=False):
"""
Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
C{maxMatches} argument, to clip scanning after 'n' matches are found. If
C{overlap} is specified, then overlapping matches will be reported.
Note that the start and end locations are reported relative to the string
being parsed. See L{I{parseString}<parseString>} for more information on parsing
strings with embedded tabs.
Example::
source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
print(source)
for tokens,start,end in Word(alphas).scanString(source):
print(' '*start + '^'*(end-start))
print(' '*start + tokens[0])
prints::
sldjf123lsdjjkf345sldkjf879lkjsfd987
^^^^^
sldjf
^^^^^^^
lsdjjkf
^^^^^^
sldkjf
^^^^^^
lkjsfd
"""
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn(instring, loc)
nextLoc, tokens = parseFn(instring, preloc, callPreParse=False)
except ParseException:
loc = preloc + 1
else:
if nextLoc > loc:
matches += 1
yield tokens, preloc, nextLoc
if overlap:
nextloc = preparseFn(instring, loc)
if nextloc > loc:
loc = nextLoc
else:
loc += 1
else:
loc = nextLoc
else:
loc = preloc + 1
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def transformString(self, instring):
"""
Extension to C{L{scanString}}, to modify matching text with modified tokens that may
be returned from a parse action. To use C{transformString}, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking C{transformString()} on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. C{transformString()} returns the resulting transformed string.
Example::
wd = Word(alphas)
wd.setParseAction(lambda toks: toks[0].title())
print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
Prints::
Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
"""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
try:
for t, s, e in self.scanString(instring):
out.append(instring[lastE:s])
if t:
if isinstance(t, ParseResults):
out += t.asList()
elif isinstance(t, list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
out = [o for o in out if o]
return "".join(map(_ustr, _flatten(out)))
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def searchString(self, instring, maxMatches=_MAX_INT):
"""
Another extension to C{L{scanString}}, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
C{maxMatches} argument, to clip searching after 'n' matches are found.
Example::
# a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
cap_word = Word(alphas.upper(), alphas.lower())
print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
# the sum() builtin can be used to merge results into a single ParseResults object
print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")))
prints::
[['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
"""
try:
return ParseResults(
[t for t, s, e in self.scanString(instring, maxMatches)]
)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):
"""
Generator method to split a string using the given expression as a separator.
May be called with optional C{maxsplit} argument, to limit the number of splits;
and the optional C{includeSeparators} argument (default=C{False}), if the separating
matching text should be included in the split results.
Example::
punc = oneOf(list(".,;:/-!?"))
print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
prints::
['This', ' this', '', ' this sentence', ' is badly punctuated', '']
"""
splits = 0
last = 0
for t, s, e in self.scanString(instring, maxMatches=maxsplit):
yield instring[last:s]
if includeSeparators:
yield t[0]
last = e
yield instring[last:]
def __add__(self, other):
"""
Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement
converts them to L{Literal}s by default.
Example::
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print (hello, "->", greet.parseString(hello))
Prints::
Hello, World! -> ['Hello', ',', 'World', '!']
"""
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2,
)
return None
return And([self, other])
def __radd__(self, other):
"""
Implementation of + operator when left operand is not a C{L{ParserElement}}
"""
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2,
)
return None
return other + self
def __sub__(self, other):
"""
Implementation of - operator, returns C{L{And}} with error stop
"""
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2,
)
return None
return self + And._ErrorStop() + other
def __rsub__(self, other):
"""
Implementation of - operator when left operand is not a C{L{ParserElement}}
"""
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2,
)
return None
return other - self
def __mul__(self, other):
"""
Implementation of * operator, allows use of C{expr * 3} in place of
C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer
tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples
may also include C{None} as in:
- C{expr*(n,None)} or C{expr*(n,)} is equivalent
to C{expr*n + L{ZeroOrMore}(expr)}
(read as "at least n instances of C{expr}")
- C{expr*(None,n)} is equivalent to C{expr*(0,n)}
(read as "0 to n instances of C{expr}")
- C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}
- C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}
Note that C{expr*(None,n)} does not raise an exception if
more than n exprs exist in the input stream; that is,
C{expr*(None,n)} does not enforce a maximum number of expr
occurrences. If this behavior is desired, then write
C{expr*(None,n) + ~expr}
"""
if isinstance(other, int):
minElements, optElements = other, 0
elif isinstance(other, tuple):
other = (other + (None, None))[:2]
if other[0] is None:
other = (0, other[1])
if isinstance(other[0], int) and other[1] is None:
if other[0] == 0:
return ZeroOrMore(self)
if other[0] == 1:
return OneOrMore(self)
else:
return self * other[0] + ZeroOrMore(self)
elif isinstance(other[0], int) and isinstance(other[1], int):
minElements, optElements = other
optElements -= minElements
else:
raise TypeError(
"cannot multiply 'ParserElement' and ('%s','%s') objects",
type(other[0]),
type(other[1]),
)
else:
raise TypeError(
"cannot multiply 'ParserElement' and '%s' objects", type(other)
)
if minElements < 0:
raise ValueError("cannot multiply ParserElement by negative value")
if optElements < 0:
raise ValueError(
"second tuple value must be greater or equal to first tuple value"
)
if minElements == optElements == 0:
raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
if optElements:
def makeOptionalList(n):
if n > 1:
return Optional(self + makeOptionalList(n - 1))
else:
return Optional(self)
if minElements:
if minElements == 1:
ret = self + makeOptionalList(optElements)
else:
ret = And([self] * minElements) + makeOptionalList(optElements)
else:
ret = makeOptionalList(optElements)
else:
if minElements == 1:
ret = self
else:
ret = And([self] * minElements)
return ret
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other):
"""
Implementation of | operator - returns C{L{MatchFirst}}
"""
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2,
)
return None
return MatchFirst([self, other])
def __ror__(self, other):
"""
Implementation of | operator when left operand is not a C{L{ParserElement}}
"""
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2,
)
return None
return other | self
def __xor__(self, other):
"""
Implementation of ^ operator - returns C{L{Or}}
"""
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2,
)
return None
return Or([self, other])
def __rxor__(self, other):
"""
Implementation of ^ operator when left operand is not a C{L{ParserElement}}
"""
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2,
)
return None
return other ^ self
def __and__(self, other):
"""
Implementation of & operator - returns C{L{Each}}
"""
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2,
)
return None
return Each([self, other])
def __rand__(self, other):
"""
Implementation of & operator when left operand is not a C{L{ParserElement}}
"""
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2,
)
return None
return other & self
def __invert__(self):
"""
Implementation of ~ operator - returns C{L{NotAny}}
"""
return NotAny(self)
def __call__(self, name=None):
"""
Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}.
If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
passed as C{True}.
If C{name} is omitted, same as calling C{L{copy}}.
Example::
# these are equivalent
userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
"""
if name is not None:
return self.setResultsName(name)
else:
return self.copy()
def suppress(self):
"""
Suppresses the output of this C{ParserElement}; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress(self)
def leaveWhitespace(self):
"""
Disables the skipping of whitespace before matching the characters in the
C{ParserElement}'s defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
"""
self.skipWhitespace = False
return self
def setWhitespaceChars(self, chars):
"""
Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
def parseWithTabs(self):
"""
Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string.
Must be called before C{parseString} when the input grammar contains elements that
match C{<TAB>} characters.
"""
self.keepTabs = True
return self
def ignore(self, other):
"""
Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
Example::
patt = OneOrMore(Word(alphas))
patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']
patt.ignore(cStyleComment)
patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
"""
if isinstance(other, basestring):
other = Suppress(other)
if isinstance(other, Suppress):
if other not in self.ignoreExprs:
self.ignoreExprs.append(other)
else:
self.ignoreExprs.append(Suppress(other.copy()))
return self
def setDebugActions(self, startAction, successAction, exceptionAction):
"""
Enable display of debugging messages while doing pattern matching.
"""
self.debugActions = (
startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction,
)
self.debug = True
return self
def setDebug(self, flag=True):
"""
Enable display of debugging messages while doing pattern matching.
Set C{flag} to True to enable, False to disable.
Example::
wd = Word(alphas).setName("alphaword")
integer = Word(nums).setName("numword")
term = wd | integer
# turn on debugging for wd
wd.setDebug()
OneOrMore(term).parseString("abc 123 xyz 890")
prints::
Match alphaword at loc 0(1,1)
Matched alphaword -> ['abc']
Match alphaword at loc 3(1,4)
Exception raised:Expected alphaword (at char 4), (line:1, col:5)
Match alphaword at loc 7(1,8)
Matched alphaword -> ['xyz']
Match alphaword at loc 11(1,12)
Exception raised:Expected alphaword (at char 12), (line:1, col:13)
Match alphaword at loc 15(1,16)
Exception raised:Expected alphaword (at char 15), (line:1, col:16)
The output shown is that produced by the default debug actions - custom debug actions can be
specified using L{setDebugActions}. Prior to attempting
to match the C{wd} expression, the debugging message C{"Match <exprname> at loc <n>(<line>,<col>)"}
is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"}
message is shown. Also note the use of L{setName} to assign a human-readable name to the expression,
which makes debugging and exception messages easier to understand - for instance, the default
name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}.
"""
if flag:
self.setDebugActions(
_defaultStartDebugAction,
_defaultSuccessDebugAction,
_defaultExceptionDebugAction,
)
else:
self.debug = False
return self
def __str__(self):
return self.name
def __repr__(self):
return _ustr(self)
def streamline(self):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion(self, parseElementList):
pass
def validate(self, validateTrace=[]):
"""
Check defined expressions for valid structure, check for infinite recursive definitions.
"""
self.checkRecursion([])
def parseFile(self, file_or_filename, parseAll=False):
"""
Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
try:
file_contents = file_or_filename.read()
except AttributeError:
with open(file_or_filename, "r") as f:
file_contents = f.read()
try:
return self.parseString(file_contents, parseAll)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def __eq__(self, other):
if isinstance(other, ParserElement):
return self is other or vars(self) == vars(other)
elif isinstance(other, basestring):
return self.matches(other)
else:
return super(ParserElement, self) == other
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(id(self))
def __req__(self, other):
return self == other
def __rne__(self, other):
return not (self == other)
def matches(self, testString, parseAll=True):
"""
Method for quick testing of a parser against a test string. Good for simple
inline microtests of sub expressions while building up larger parser.
Parameters:
- testString - to test against this expression for a match
- parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
Example::
expr = Word(nums)
assert expr.matches("100")
"""
try:
self.parseString(_ustr(testString), parseAll=parseAll)
return True
except ParseBaseException:
return False
def runTests(
self,
tests,
parseAll=True,
comment="#",
fullDump=True,
printResults=True,
failureTests=False,
):
"""
Execute the parse expression on a series of test strings, showing each
test, the parsed results or where the parse failed. Quick and easy way to
run a parse expression against a list of sample strings.
Parameters:
- tests - a list of separate test strings, or a multiline string of test strings
- parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
- comment - (default=C{'#'}) - expression for indicating embedded comments in the test
string; pass None to disable comment filtering
- fullDump - (default=C{True}) - dump results as list followed by results names in nested outline;
if False, only dump nested list
- printResults - (default=C{True}) prints test output to stdout
- failureTests - (default=C{False}) indicates if these tests are expected to fail parsing
Returns: a (success, results) tuple, where success indicates that all tests succeeded
(or failed if C{failureTests} is True), and the results contain a list of lines of each
test's output
Example::
number_expr = pyparsing_common.number.copy()
result = number_expr.runTests('''
# unsigned integer
100
# negative integer
-100
# float with scientific notation
6.02e23
# integer with scientific notation
1e-12
''')
print("Success" if result[0] else "Failed!")
result = number_expr.runTests('''
# stray character
100Z
# missing leading digit before '.'
-.100
# too many '.'
3.14.159
''', failureTests=True)
print("Success" if result[0] else "Failed!")
prints::
# unsigned integer
100
[100]
# negative integer
-100
[-100]
# float with scientific notation
6.02e23
[6.02e+23]
# integer with scientific notation
1e-12
[1e-12]
Success
# stray character
100Z
^
FAIL: Expected end of text (at char 3), (line:1, col:4)
# missing leading digit before '.'
-.100
^
FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
# too many '.'
3.14.159
^
FAIL: Expected end of text (at char 4), (line:1, col:5)
Success
Each test string must be on a single line. If you want to test a string that spans multiple
lines, create a test like this::
expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines")
(Note that this is a raw string literal, you must include the leading 'r'.)
"""
if isinstance(tests, basestring):
tests = list(map(str.strip, tests.rstrip().splitlines()))
if isinstance(comment, basestring):
comment = Literal(comment)
allResults = []
comments = []
success = True
for t in tests:
if comment is not None and comment.matches(t, False) or comments and not t:
comments.append(t)
continue
if not t:
continue
out = ["\n".join(comments), t]
comments = []
try:
t = t.replace(r"\n", "\n")
result = self.parseString(t, parseAll=parseAll)
out.append(result.dump(full=fullDump))
success = success and not failureTests
except ParseBaseException as pe:
fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
if "\n" in t:
out.append(line(pe.loc, t))
out.append(" " * (col(pe.loc, t) - 1) + "^" + fatal)
else:
out.append(" " * pe.loc + "^" + fatal)
out.append("FAIL: " + str(pe))
success = success and failureTests
result = pe
except Exception as exc:
out.append("FAIL-EXCEPTION: " + str(exc))
success = success and failureTests
result = exc
if printResults:
if fullDump:
out.append("")
print("\n".join(out))
allResults.append((t, result))
return success, allResults
class Token(ParserElement):
"""
Abstract C{ParserElement} subclass, for defining atomic matching patterns.
"""
def __init__(self):
super(Token, self).__init__(savelist=False)
class Empty(Token):
"""
An empty token, will always match.
"""
def __init__(self):
super(Empty, self).__init__()
self.name = "Empty"
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""
A token that will never match.
"""
def __init__(self):
super(NoMatch, self).__init__()
self.name = "NoMatch"
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
def parseImpl(self, instring, loc, doActions=True):
raise ParseException(instring, loc, self.errmsg, self)
class Literal(Token):
"""
Token to exactly match a specified string.
Example::
Literal('blah').parseString('blah') # -> ['blah']
Literal('blah').parseString('blahfooblah') # -> ['blah']
Literal('blah').parseString('bla') # -> Exception: Expected "blah"
For case-insensitive matching, use L{CaselessLiteral}.
For keyword matching (force word break before and after the matched string),
use L{Keyword} or L{CaselessKeyword}.
"""
def __init__(self, matchString):
super(Literal, self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn(
"null string passed to Literal; use Empty() instead",
SyntaxWarning,
stacklevel=2,
)
self.__class__ = Empty
self.name = '"%s"' % _ustr(self.match)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
# Performance tuning: this routine gets called a *lot*
# if this is a single character match string and the first character matches,
# short-circuit as quickly as possible, and avoid calling startswith
# ~ @profile
def parseImpl(self, instring, loc, doActions=True):
if instring[loc] == self.firstMatchChar and (
self.matchLen == 1 or instring.startswith(self.match, loc)
):
return loc + self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
_L = Literal
ParserElement._literalStringClass = Literal
class Keyword(Token):
"""
Token to exactly match a specified string as a keyword, that is, it must be
immediately followed by a non-keyword character. Compare with C{L{Literal}}:
- C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}.
- C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}
Accepts two optional constructor arguments in addition to the keyword string:
- C{identChars} is a string of characters that would be valid identifier characters,
defaulting to all alphanumerics + "_" and "$"
- C{caseless} allows case-insensitive matching, default is C{False}.
Example::
Keyword("start").parseString("start") # -> ['start']
Keyword("start").parseString("starting") # -> Exception
For case-insensitive matching, use L{CaselessKeyword}.
"""
DEFAULT_KEYWORD_CHARS = alphanums + "_$"
def __init__(self, matchString, identChars=None, caseless=False):
super(Keyword, self).__init__()
if identChars is None:
identChars = Keyword.DEFAULT_KEYWORD_CHARS
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn(
"null string passed to Keyword; use Empty() instead",
SyntaxWarning,
stacklevel=2,
)
self.name = '"%s"' % self.match
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = matchString.upper()
identChars = identChars.upper()
self.identChars = set(identChars)
def parseImpl(self, instring, loc, doActions=True):
if self.caseless:
if (
(instring[loc : loc + self.matchLen].upper() == self.caselessmatch)
and (
loc >= len(instring) - self.matchLen
or instring[loc + self.matchLen].upper() not in self.identChars
)
and (loc == 0 or instring[loc - 1].upper() not in self.identChars)
):
return loc + self.matchLen, self.match
else:
if (
instring[loc] == self.firstMatchChar
and (self.matchLen == 1 or instring.startswith(self.match, loc))
and (
loc >= len(instring) - self.matchLen
or instring[loc + self.matchLen] not in self.identChars
)
and (loc == 0 or instring[loc - 1] not in self.identChars)
):
return loc + self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
def copy(self):
c = super(Keyword, self).copy()
c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
return c
@staticmethod
def setDefaultKeywordChars(chars):
"""Overrides the default Keyword chars
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
class CaselessLiteral(Literal):
"""
Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
Example::
OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD']
(Contrast with example for L{CaselessKeyword}.)
"""
def __init__(self, matchString):
super(CaselessLiteral, self).__init__(matchString.upper())
# Preserve the defining literal.
self.returnString = matchString
self.name = "'%s'" % self.returnString
self.errmsg = "Expected " + self.name
def parseImpl(self, instring, loc, doActions=True):
if instring[loc : loc + self.matchLen].upper() == self.match:
return loc + self.matchLen, self.returnString
raise ParseException(instring, loc, self.errmsg, self)
class CaselessKeyword(Keyword):
"""
Caseless version of L{Keyword}.
Example::
OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD']
(Contrast with example for L{CaselessLiteral}.)
"""
def __init__(self, matchString, identChars=None):
super(CaselessKeyword, self).__init__(matchString, identChars, caseless=True)
def parseImpl(self, instring, loc, doActions=True):
if (instring[loc : loc + self.matchLen].upper() == self.caselessmatch) and (
loc >= len(instring) - self.matchLen
or instring[loc + self.matchLen].upper() not in self.identChars
):
return loc + self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
class CloseMatch(Token):
"""
A variation on L{Literal} which matches "close" matches, that is,
strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters:
- C{match_string} - string to be matched
- C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match
The results from a successful parse will contain the matched text from the input string and the following named results:
- C{mismatches} - a list of the positions within the match_string where mismatches were found
- C{original} - the original match_string used to compare against the input string
If C{mismatches} is an empty list, then the match was an exact match.
Example::
patt = CloseMatch("ATCATCGAATGGA")
patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
# exact match
patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
# close match allowing up to 2 mismatches
patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)
patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
"""
def __init__(self, match_string, maxMismatches=1):
super(CloseMatch, self).__init__()
self.name = match_string
self.match_string = match_string
self.maxMismatches = maxMismatches
self.errmsg = "Expected %r (with up to %d mismatches)" % (
self.match_string,
self.maxMismatches,
)
self.mayIndexError = False
self.mayReturnEmpty = False
def parseImpl(self, instring, loc, doActions=True):
start = loc
instrlen = len(instring)
maxloc = start + len(self.match_string)
if maxloc <= instrlen:
match_string = self.match_string
match_stringloc = 0
mismatches = []
maxMismatches = self.maxMismatches
for match_stringloc, s_m in enumerate(
zip(instring[loc:maxloc], self.match_string)
):
src, mat = s_m
if src != mat:
mismatches.append(match_stringloc)
if len(mismatches) > maxMismatches:
break
else:
loc = match_stringloc + 1
results = ParseResults([instring[start:loc]])
results["original"] = self.match_string
results["mismatches"] = mismatches
return loc, results
raise ParseException(instring, loc, self.errmsg, self)
class Word(Token):
"""
Token for matching words composed of allowed character sets.
Defined with string containing all allowed initial characters,
an optional string containing allowed body characters (if omitted,
defaults to the initial character set), and an optional minimum,
maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction. An optional
C{excludeChars} parameter can list characters that might be found in
the input C{bodyChars} string; useful to define a word of all printables
except for one or two characters, for instance.
L{srange} is useful for defining custom character set strings for defining
C{Word} expressions, using range notation from regular expression character sets.
A common mistake is to use C{Word} to match a specific literal string, as in
C{Word("Address")}. Remember that C{Word} uses the string argument to define
I{sets} of matchable characters. This expression would match "Add", "AAA",
"dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'.
To match an exact literal string, use L{Literal} or L{Keyword}.
pyparsing includes helper strings for building Words:
- L{alphas}
- L{nums}
- L{alphanums}
- L{hexnums}
- L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.)
- L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.)
- L{printables} (any non-whitespace character)
Example::
# a word composed of digits
integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
# a word with a leading capital, and zero or more lowercase
capital_word = Word(alphas.upper(), alphas.lower())
# hostnames are alphanumeric, with leading alpha, and '-'
hostname = Word(alphas, alphanums+'-')
# roman numeral (not a strict parser, accepts invalid mix of characters)
roman = Word("IVXLCDM")
# any string of non-whitespace characters, except for ','
csv_value = Word(printables, excludeChars=",")
"""
def __init__(
self,
initChars,
bodyChars=None,
min=1,
max=0,
exact=0,
asKeyword=False,
excludeChars=None,
):
super(Word, self).__init__()
if excludeChars:
initChars = "".join(c for c in initChars if c not in excludeChars)
if bodyChars:
bodyChars = "".join(c for c in bodyChars if c not in excludeChars)
self.initCharsOrig = initChars
self.initChars = set(initChars)
if bodyChars:
self.bodyCharsOrig = bodyChars
self.bodyChars = set(bodyChars)
else:
self.bodyCharsOrig = initChars
self.bodyChars = set(initChars)
self.maxSpecified = max > 0
if min < 1:
raise ValueError(
"cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted"
)
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.asKeyword = asKeyword
if " " not in self.initCharsOrig + self.bodyCharsOrig and (
min == 1 and max == 0 and exact == 0
):
if self.bodyCharsOrig == self.initCharsOrig:
self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
elif len(self.initCharsOrig) == 1:
self.reString = "%s[%s]*" % (
re.escape(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),
)
else:
self.reString = "[%s][%s]*" % (
_escapeRegexRangeChars(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),
)
if self.asKeyword:
self.reString = r"\b" + self.reString + r"\b"
try:
self.re = re.compile(self.reString)
except Exception:
self.re = None
def parseImpl(self, instring, loc, doActions=True):
if self.re:
result = self.re.match(instring, loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
return loc, result.group()
if not (instring[loc] in self.initChars):
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
instrlen = len(instring)
bodychars = self.bodyChars
maxloc = start + self.maxLen
maxloc = min(maxloc, instrlen)
while loc < maxloc and instring[loc] in bodychars:
loc += 1
throwException = False
if loc - start < self.minLen:
throwException = True
if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
throwException = True
if self.asKeyword:
if (start > 0 and instring[start - 1] in bodychars) or (
loc < instrlen and instring[loc] in bodychars
):
throwException = True
if throwException:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__(self):
try:
return super(Word, self).__str__()
except Exception:
pass
if self.strRepr is None:
def charsAsStr(s):
if len(s) > 4:
return s[:4] + "..."
else:
return s
if self.initCharsOrig != self.bodyCharsOrig:
self.strRepr = "W:(%s,%s)" % (
charsAsStr(self.initCharsOrig),
charsAsStr(self.bodyCharsOrig),
)
else:
self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
return self.strRepr
class Regex(Token):
r"""
Token for matching strings that match a given regular expression.
Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
If the given regex contains named groups (defined using C{(?P<name>...)}), these will be preserved as
named parse results.
Example::
realnum = Regex(r"[+-]?\d+\.\d*")
date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)')
# ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
"""
compiledREtype = type(re.compile("[A-Z]"))
def __init__(self, pattern, flags=0):
"""The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""
super(Regex, self).__init__()
if isinstance(pattern, basestring):
if not pattern:
warnings.warn(
"null string passed to Regex; use Empty() instead",
SyntaxWarning,
stacklevel=2,
)
self.pattern = pattern
self.flags = flags
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn(
"invalid pattern (%s) passed to Regex" % pattern,
SyntaxWarning,
stacklevel=2,
)
raise
elif isinstance(pattern, Regex.compiledREtype):
self.re = pattern
self.pattern = self.reString = str(pattern)
self.flags = flags
else:
raise ValueError(
"Regex may only be constructed with a string or a compiled RE object"
)
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
result = self.re.match(instring, loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
d = result.groupdict()
ret = ParseResults(result.group())
if d:
for k in d:
ret[k] = d[k]
return loc, ret
def __str__(self):
try:
return super(Regex, self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "Re:(%s)" % repr(self.pattern)
return self.strRepr
class QuotedString(Token):
r"""
Token for matching strings that are delimited by quoting characters.
Defined with the following parameters:
- quoteChar - string of one or more characters defining the quote delimiting string
- escChar - character to escape quotes, typically backslash (default=C{None})
- escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None})
- multiline - boolean indicating whether quotes can span multiple lines (default=C{False})
- unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True})
- endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar)
- convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True})
Example::
qs = QuotedString('"')
print(qs.searchString('lsjdf "This is the quote" sldjf'))
complex_qs = QuotedString('{{', endQuoteChar='}}')
print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf'))
sql_qs = QuotedString('"', escQuote='""')
print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
prints::
[['This is the quote']]
[['This is the "quote"']]
[['This is the quote with "embedded" quotes']]
"""
def __init__(
self,
quoteChar,
escChar=None,
escQuote=None,
multiline=False,
unquoteResults=True,
endQuoteChar=None,
convertWhitespaceEscapes=True,
):
super(QuotedString, self).__init__()
# remove white space from quote chars - wont work anyway
quoteChar = quoteChar.strip()
if not quoteChar:
warnings.warn(
"quoteChar cannot be the empty string", SyntaxWarning, stacklevel=2
)
raise SyntaxError()
if endQuoteChar is None:
endQuoteChar = quoteChar
else:
endQuoteChar = endQuoteChar.strip()
if not endQuoteChar:
warnings.warn(
"endQuoteChar cannot be the empty string",
SyntaxWarning,
stacklevel=2,
)
raise SyntaxError()
self.quoteChar = quoteChar
self.quoteCharLen = len(quoteChar)
self.firstQuoteChar = quoteChar[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
self.convertWhitespaceEscapes = convertWhitespaceEscapes
if multiline:
self.flags = re.MULTILINE | re.DOTALL
self.pattern = r"%s(?:[^%s%s]" % (
re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or ""),
)
else:
self.flags = 0
self.pattern = r"%s(?:[^%s\n\r%s]" % (
re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or ""),
)
if len(self.endQuoteChar) > 1:
self.pattern += (
"|(?:"
+ ")|(?:".join(
"%s[^%s]"
% (
re.escape(self.endQuoteChar[:i]),
_escapeRegexRangeChars(self.endQuoteChar[i]),
)
for i in range(len(self.endQuoteChar) - 1, 0, -1)
)
+ ")"
)
if escQuote:
self.pattern += r"|(?:%s)" % re.escape(escQuote)
if escChar:
self.pattern += r"|(?:%s.)" % re.escape(escChar)
self.escCharReplacePattern = re.escape(self.escChar) + "(.)"
self.pattern += r")*%s" % re.escape(self.endQuoteChar)
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn(
"invalid pattern (%s) passed to Regex" % self.pattern,
SyntaxWarning,
stacklevel=2,
)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
result = (
instring[loc] == self.firstQuoteChar
and self.re.match(instring, loc)
or None
)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result.group()
if self.unquoteResults:
# strip off quotes
ret = ret[self.quoteCharLen : -self.endQuoteCharLen]
if isinstance(ret, basestring):
# replace escaped whitespace
if "\\" in ret and self.convertWhitespaceEscapes:
ws_map = {r"\t": "\t", r"\n": "\n", r"\f": "\f", r"\r": "\r"}
for wslit, wschar in ws_map.items():
ret = ret.replace(wslit, wschar)
# replace escaped characters
if self.escChar:
ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret)
# replace escaped quotes
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return loc, ret
def __str__(self):
try:
return super(QuotedString, self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "quoted string, starting with %s ending with %s" % (
self.quoteChar,
self.endQuoteChar,
)
return self.strRepr
class CharsNotIn(Token):
"""
Token for matching words composed of characters I{not} in a given set (will
include whitespace in matched characters if not listed in the provided exclusion set - see example).
Defined with string containing all disallowed characters, and an optional
minimum, maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction.
Example::
# define a comma-separated-value as anything that is not a ','
csv_value = CharsNotIn(',')
print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))
prints::
['dkls', 'lsdkjf', 's12 34', '@!#', '213']
"""
def __init__(self, notChars, min=1, max=0, exact=0):
super(CharsNotIn, self).__init__()
self.skipWhitespace = False
self.notChars = notChars
if min < 1:
raise ValueError(
"cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted"
)
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = self.minLen == 0
self.mayIndexError = False
def parseImpl(self, instring, loc, doActions=True):
if instring[loc] in self.notChars:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
notchars = self.notChars
maxlen = min(start + self.maxLen, len(instring))
while loc < maxlen and (instring[loc] not in notchars):
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__(self):
try:
return super(CharsNotIn, self).__str__()
except Exception:
pass
if self.strRepr is None:
if len(self.notChars) > 4:
self.strRepr = "!W:(%s...)" % self.notChars[:4]
else:
self.strRepr = "!W:(%s)" % self.notChars
return self.strRepr
class White(Token):
"""
Special matching class for matching whitespace. Normally, whitespace is ignored
by pyparsing grammars. This class is included when some whitespace structures
are significant. Define with a string containing the whitespace characters to be
matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments,
as defined for the C{L{Word}} class.
"""
whiteStrs = {" ": "<SPC>", "\t": "<TAB>", "\n": "<LF>", "\r": "<CR>", "\f": "<FF>"}
def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
super(White, self).__init__()
self.matchWhite = ws
self.setWhitespaceChars(
"".join(c for c in self.whiteChars if c not in self.matchWhite)
)
# ~ self.leaveWhitespace()
self.name = "".join(White.whiteStrs[c] for c in self.matchWhite)
self.mayReturnEmpty = True
self.errmsg = "Expected " + self.name
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
def parseImpl(self, instring, loc, doActions=True):
if not (instring[loc] in self.matchWhite):
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min(maxloc, len(instring))
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
class _PositionToken(Token):
def __init__(self):
super(_PositionToken, self).__init__()
self.name = self.__class__.__name__
self.mayReturnEmpty = True
self.mayIndexError = False
class GoToColumn(_PositionToken):
"""
Token to advance to a specific column of input text; useful for tabular report scraping.
"""
def __init__(self, colno):
super(GoToColumn, self).__init__()
self.col = colno
def preParse(self, instring, loc):
if col(loc, instring) != self.col:
instrlen = len(instring)
if self.ignoreExprs:
loc = self._skipIgnorables(instring, loc)
while (
loc < instrlen
and instring[loc].isspace()
and col(loc, instring) != self.col
):
loc += 1
return loc
def parseImpl(self, instring, loc, doActions=True):
thiscol = col(loc, instring)
if thiscol > self.col:
raise ParseException(instring, loc, "Text not in expected column", self)
newloc = loc + self.col - thiscol
ret = instring[loc:newloc]
return newloc, ret
class LineStart(_PositionToken):
"""
Matches if current position is at the beginning of a line within the parse string
Example::
test = '''\
AAA this line
AAA and this line
AAA but not this one
B AAA and definitely not this one
'''
for t in (LineStart() + 'AAA' + restOfLine).searchString(test):
print(t)
Prints::
['AAA', ' this line']
['AAA', ' and this line']
"""
def __init__(self):
super(LineStart, self).__init__()
self.errmsg = "Expected start of line"
def parseImpl(self, instring, loc, doActions=True):
if col(loc, instring) == 1:
return loc, []
raise ParseException(instring, loc, self.errmsg, self)
class LineEnd(_PositionToken):
"""
Matches if current position is at the end of a line within the parse string
"""
def __init__(self):
super(LineEnd, self).__init__()
self.setWhitespaceChars(ParserElement.DEFAULT_WHITE_CHARS.replace("\n", ""))
self.errmsg = "Expected end of line"
def parseImpl(self, instring, loc, doActions=True):
if loc < len(instring):
if instring[loc] == "\n":
return loc + 1, "\n"
else:
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc + 1, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class StringStart(_PositionToken):
"""
Matches if current position is at the beginning of the parse string
"""
def __init__(self):
super(StringStart, self).__init__()
self.errmsg = "Expected start of text"
def parseImpl(self, instring, loc, doActions=True):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse(instring, 0):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class StringEnd(_PositionToken):
"""
Matches if current position is at the end of the parse string
"""
def __init__(self):
super(StringEnd, self).__init__()
self.errmsg = "Expected end of text"
def parseImpl(self, instring, loc, doActions=True):
if loc < len(instring):
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc + 1, []
elif loc > len(instring):
return loc, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class WordStart(_PositionToken):
"""
Matches if the current position is at the beginning of a Word, and
is not preceded by any character in a given set of C{wordChars}
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
the string being parsed, or at the beginning of a line.
"""
def __init__(self, wordChars=printables):
super(WordStart, self).__init__()
self.wordChars = set(wordChars)
self.errmsg = "Not at the start of a word"
def parseImpl(self, instring, loc, doActions=True):
if loc != 0:
if (
instring[loc - 1] in self.wordChars
or instring[loc] not in self.wordChars
):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class WordEnd(_PositionToken):
"""
Matches if the current position is at the end of a Word, and
is not followed by any character in a given set of C{wordChars}
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
the string being parsed, or at the end of a line.
"""
def __init__(self, wordChars=printables):
super(WordEnd, self).__init__()
self.wordChars = set(wordChars)
self.skipWhitespace = False
self.errmsg = "Not at the end of a word"
def parseImpl(self, instring, loc, doActions=True):
instrlen = len(instring)
if instrlen > 0 and loc < instrlen:
if (
instring[loc] in self.wordChars
or instring[loc - 1] not in self.wordChars
):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class ParseExpression(ParserElement):
"""
Abstract subclass of ParserElement, for combining and post-processing parsed tokens.
"""
def __init__(self, exprs, savelist=False):
super(ParseExpression, self).__init__(savelist)
if isinstance(exprs, _generatorType):
exprs = list(exprs)
if isinstance(exprs, basestring):
self.exprs = [ParserElement._literalStringClass(exprs)]
elif isinstance(exprs, Iterable):
exprs = list(exprs)
# if sequence of strings provided, wrap with Literal
if all(isinstance(expr, basestring) for expr in exprs):
exprs = map(ParserElement._literalStringClass, exprs)
self.exprs = list(exprs)
else:
try:
self.exprs = list(exprs)
except TypeError:
self.exprs = [exprs]
self.callPreparse = False
def __getitem__(self, i):
return self.exprs[i]
def append(self, other):
self.exprs.append(other)
self.strRepr = None
return self
def leaveWhitespace(self):
"""Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on
all contained expressions."""
self.skipWhitespace = False
self.exprs = [e.copy() for e in self.exprs]
for e in self.exprs:
e.leaveWhitespace()
return self
def ignore(self, other):
if isinstance(other, Suppress):
if other not in self.ignoreExprs:
super(ParseExpression, self).ignore(other)
for e in self.exprs:
e.ignore(self.ignoreExprs[-1])
else:
super(ParseExpression, self).ignore(other)
for e in self.exprs:
e.ignore(self.ignoreExprs[-1])
return self
def __str__(self):
try:
return super(ParseExpression, self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "%s:(%s)" % (self.__class__.__name__, _ustr(self.exprs))
return self.strRepr
def streamline(self):
super(ParseExpression, self).streamline()
for e in self.exprs:
e.streamline()
# collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for Or's and MatchFirst's)
if len(self.exprs) == 2:
other = self.exprs[0]
if (
isinstance(other, self.__class__)
and not (other.parseAction)
and other.resultsName is None
and not other.debug
):
self.exprs = other.exprs[:] + [self.exprs[1]]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if (
isinstance(other, self.__class__)
and not (other.parseAction)
and other.resultsName is None
and not other.debug
):
self.exprs = self.exprs[:-1] + other.exprs[:]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
self.errmsg = "Expected " + _ustr(self)
return self
def setResultsName(self, name, listAllMatches=False):
ret = super(ParseExpression, self).setResultsName(name, listAllMatches)
return ret
def validate(self, validateTrace=[]):
tmp = validateTrace[:] + [self]
for e in self.exprs:
e.validate(tmp)
self.checkRecursion([])
def copy(self):
ret = super(ParseExpression, self).copy()
ret.exprs = [e.copy() for e in self.exprs]
return ret
class And(ParseExpression):
"""
Requires all given C{ParseExpression}s to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the C{'+'} operator.
May also be constructed using the C{'-'} operator, which will suppress backtracking.
Example::
integer = Word(nums)
name_expr = OneOrMore(Word(alphas))
expr = And([integer("id"),name_expr("name"),integer("age")])
# more easily written as:
expr = integer("id") + name_expr("name") + integer("age")
"""
class _ErrorStop(Empty):
def __init__(self, *args, **kwargs):
super(And._ErrorStop, self).__init__(*args, **kwargs)
self.name = "-"
self.leaveWhitespace()
def __init__(self, exprs, savelist=True):
super(And, self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.setWhitespaceChars(self.exprs[0].whiteChars)
self.skipWhitespace = self.exprs[0].skipWhitespace
self.callPreparse = True
def parseImpl(self, instring, loc, doActions=True):
# pass False as last arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse(
instring, loc, doActions, callPreParse=False
)
errorStop = False
for e in self.exprs[1:]:
if isinstance(e, And._ErrorStop):
errorStop = True
continue
if errorStop:
try:
loc, exprtokens = e._parse(instring, loc, doActions)
except ParseSyntaxException:
raise
except ParseBaseException as pe:
pe.__traceback__ = None
raise ParseSyntaxException._from_exception(pe)
except IndexError:
raise ParseSyntaxException(
instring, len(instring), self.errmsg, self
)
else:
loc, exprtokens = e._parse(instring, loc, doActions)
if exprtokens or exprtokens.haskeys():
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other):
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
return self.append(other) # And( [ self, other ] )
def checkRecursion(self, parseElementList):
subRecCheckList = parseElementList[:] + [self]
for e in self.exprs:
e.checkRecursion(subRecCheckList)
if not e.mayReturnEmpty:
break
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
class Or(ParseExpression):
"""
Requires that at least one C{ParseExpression} is found.
If two expressions match, the expression that matches the longest string will be used.
May be constructed using the C{'^'} operator.
Example::
# construct Or using '^' operator
number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))
print(number.searchString("123 3.1416 789"))
prints::
[['123'], ['3.1416'], ['789']]
"""
def __init__(self, exprs, savelist=False):
super(Or, self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
maxExcLoc = -1
maxException = None
matches = []
for e in self.exprs:
try:
loc2 = e.tryParse(instring, loc)
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(
instring, len(instring), e.errmsg, self
)
maxExcLoc = len(instring)
else:
# save match among all matches, to retry longest to shortest
matches.append((loc2, e))
if matches:
matches.sort(key=lambda x: -x[0])
for _, e in matches:
try:
return e._parse(instring, loc, doActions)
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(
instring, loc, "no defined alternatives to match", self
)
def __ixor__(self, other):
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
return self.append(other) # Or( [ self, other ] )
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion(self, parseElementList):
subRecCheckList = parseElementList[:] + [self]
for e in self.exprs:
e.checkRecursion(subRecCheckList)
class MatchFirst(ParseExpression):
"""
Requires that at least one C{ParseExpression} is found.
If two expressions match, the first one listed is the one that will match.
May be constructed using the C{'|'} operator.
Example::
# construct MatchFirst using '|' operator
# watch the order of expressions to match
number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']]
# put more selective expression first
number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']]
"""
def __init__(self, exprs, savelist=False):
super(MatchFirst, self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
maxExcLoc = -1
maxException = None
for e in self.exprs:
try:
ret = e._parse(instring, loc, doActions)
return ret
except ParseException as err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(
instring, len(instring), e.errmsg, self
)
maxExcLoc = len(instring)
# only got here if no expression matched, raise exception for match that made it the furthest
else:
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(
instring, loc, "no defined alternatives to match", self
)
def __ior__(self, other):
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
return self.append(other) # MatchFirst( [ self, other ] )
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion(self, parseElementList):
subRecCheckList = parseElementList[:] + [self]
for e in self.exprs:
e.checkRecursion(subRecCheckList)
class Each(ParseExpression):
"""
Requires all given C{ParseExpression}s to be found, but in any order.
Expressions may be separated by whitespace.
May be constructed using the C{'&'} operator.
Example::
color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
integer = Word(nums)
shape_attr = "shape:" + shape_type("shape")
posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
color_attr = "color:" + color("color")
size_attr = "size:" + integer("size")
# use Each (using operator '&') to accept attributes in any order
# (shape and posn are required, color and size are optional)
shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)
shape_spec.runTests('''
shape: SQUARE color: BLACK posn: 100, 120
shape: CIRCLE size: 50 color: BLUE posn: 50,80
color:GREEN size:20 shape:TRIANGLE posn:20,40
'''
)
prints::
shape: SQUARE color: BLACK posn: 100, 120
['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
- color: BLACK
- posn: ['100', ',', '120']
- x: 100
- y: 120
- shape: SQUARE
shape: CIRCLE size: 50 color: BLUE posn: 50,80
['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
- color: BLUE
- posn: ['50', ',', '80']
- x: 50
- y: 80
- shape: CIRCLE
- size: 50
color: GREEN size: 20 shape: TRIANGLE posn: 20,40
['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
- color: GREEN
- posn: ['20', ',', '40']
- x: 20
- y: 40
- shape: TRIANGLE
- size: 20
"""
def __init__(self, exprs, savelist=True):
super(Each, self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.skipWhitespace = True
self.initExprGroups = True
def parseImpl(self, instring, loc, doActions=True):
if self.initExprGroups:
self.opt1map = dict(
(id(e.expr), e) for e in self.exprs if isinstance(e, Optional)
)
opt1 = [e.expr for e in self.exprs if isinstance(e, Optional)]
opt2 = [
e
for e in self.exprs
if e.mayReturnEmpty and not isinstance(e, Optional)
]
self.optionals = opt1 + opt2
self.multioptionals = [
e.expr for e in self.exprs if isinstance(e, ZeroOrMore)
]
self.multirequired = [
e.expr for e in self.exprs if isinstance(e, OneOrMore)
]
self.required = [
e
for e in self.exprs
if not isinstance(e, (Optional, ZeroOrMore, OneOrMore))
]
self.required += self.multirequired
self.initExprGroups = False
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
matchOrder = []
keepMatching = True
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
failed = []
for e in tmpExprs:
try:
tmpLoc = e.tryParse(instring, tmpLoc)
except ParseException:
failed.append(e)
else:
matchOrder.append(self.opt1map.get(id(e), e))
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
if tmpReqd:
missing = ", ".join(_ustr(e) for e in tmpReqd)
raise ParseException(
instring, loc, "Missing one or more required elements (%s)" % missing
)
# add any unmatched Optionals, in case they have default values defined
matchOrder += [
e for e in self.exprs if isinstance(e, Optional) and e.expr in tmpOpt
]
resultlist = []
for e in matchOrder:
loc, results = e._parse(instring, loc, doActions)
resultlist.append(results)
finalResults = sum(resultlist, ParseResults([]))
return loc, finalResults
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion(self, parseElementList):
subRecCheckList = parseElementList[:] + [self]
for e in self.exprs:
e.checkRecursion(subRecCheckList)
class ParseElementEnhance(ParserElement):
"""
Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens.
"""
def __init__(self, expr, savelist=False):
super(ParseElementEnhance, self).__init__(savelist)
if isinstance(expr, basestring):
if issubclass(ParserElement._literalStringClass, Token):
expr = ParserElement._literalStringClass(expr)
else:
expr = ParserElement._literalStringClass(Literal(expr))
self.expr = expr
self.strRepr = None
if expr is not None:
self.mayIndexError = expr.mayIndexError
self.mayReturnEmpty = expr.mayReturnEmpty
self.setWhitespaceChars(expr.whiteChars)
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def parseImpl(self, instring, loc, doActions=True):
if self.expr is not None:
return self.expr._parse(instring, loc, doActions, callPreParse=False)
else:
raise ParseException("", loc, self.errmsg, self)
def leaveWhitespace(self):
self.skipWhitespace = False
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.leaveWhitespace()
return self
def ignore(self, other):
if isinstance(other, Suppress):
if other not in self.ignoreExprs:
super(ParseElementEnhance, self).ignore(other)
if self.expr is not None:
self.expr.ignore(self.ignoreExprs[-1])
else:
super(ParseElementEnhance, self).ignore(other)
if self.expr is not None:
self.expr.ignore(self.ignoreExprs[-1])
return self
def streamline(self):
super(ParseElementEnhance, self).streamline()
if self.expr is not None:
self.expr.streamline()
return self
def checkRecursion(self, parseElementList):
if self in parseElementList:
raise RecursiveGrammarException(parseElementList + [self])
subRecCheckList = parseElementList[:] + [self]
if self.expr is not None:
self.expr.checkRecursion(subRecCheckList)
def validate(self, validateTrace=[]):
tmp = validateTrace[:] + [self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__(self):
try:
return super(ParseElementEnhance, self).__str__()
except Exception:
pass
if self.strRepr is None and self.expr is not None:
self.strRepr = "%s:(%s)" % (self.__class__.__name__, _ustr(self.expr))
return self.strRepr
class FollowedBy(ParseElementEnhance):
"""
Lookahead matching of the given parse expression. C{FollowedBy}
does I{not} advance the parsing position within the input string, it only
verifies that the specified parse expression matches at the current
position. C{FollowedBy} always returns a null token list.
Example::
# use FollowedBy to match a label only if it is followed by a ':'
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()
prints::
[['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
"""
def __init__(self, expr):
super(FollowedBy, self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
self.expr.tryParse(instring, loc)
return loc, []
class NotAny(ParseElementEnhance):
"""
Lookahead to disallow matching with the given parse expression. C{NotAny}
does I{not} advance the parsing position within the input string, it only
verifies that the specified parse expression does I{not} match at the current
position. Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny}
always returns a null token list. May be constructed using the '~' operator.
Example::
"""
def __init__(self, expr):
super(NotAny, self).__init__(expr)
# ~ self.leaveWhitespace()
self.skipWhitespace = (
False
) # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
self.mayReturnEmpty = True
self.errmsg = "Found unwanted token, " + _ustr(self.expr)
def parseImpl(self, instring, loc, doActions=True):
if self.expr.canParseNext(instring, loc):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "~{" + _ustr(self.expr) + "}"
return self.strRepr
class _MultipleMatch(ParseElementEnhance):
def __init__(self, expr, stopOn=None):
super(_MultipleMatch, self).__init__(expr)
self.saveAsList = True
ender = stopOn
if isinstance(ender, basestring):
ender = ParserElement._literalStringClass(ender)
self.not_ender = ~ender if ender is not None else None
def parseImpl(self, instring, loc, doActions=True):
self_expr_parse = self.expr._parse
self_skip_ignorables = self._skipIgnorables
check_ender = self.not_ender is not None
if check_ender:
try_not_ender = self.not_ender.tryParse
# must be at least one (but first see if we are the stopOn sentinel;
# if so, fail)
if check_ender:
try_not_ender(instring, loc)
loc, tokens = self_expr_parse(instring, loc, doActions, callPreParse=False)
try:
hasIgnoreExprs = not not self.ignoreExprs
while 1:
if check_ender:
try_not_ender(instring, loc)
if hasIgnoreExprs:
preloc = self_skip_ignorables(instring, loc)
else:
preloc = loc
loc, tmptokens = self_expr_parse(instring, preloc, doActions)
if tmptokens or tmptokens.haskeys():
tokens += tmptokens
except (ParseException, IndexError):
pass
return loc, tokens
class OneOrMore(_MultipleMatch):
"""
Repetition of one or more of the given expression.
Parameters:
- expr - expression that must match one or more times
- stopOn - (default=C{None}) - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression)
Example::
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
text = "shape: SQUARE posn: upper left color: BLACK"
OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
# use stopOn attribute for OneOrMore to avoid reading label string as part of the data
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
# could also be written as
(attr_expr * (1,)).parseString(text).pprint()
"""
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + _ustr(self.expr) + "}..."
return self.strRepr
class ZeroOrMore(_MultipleMatch):
"""
Optional repetition of zero or more of the given expression.
Parameters:
- expr - expression that must match zero or more times
- stopOn - (default=C{None}) - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression)
Example: similar to L{OneOrMore}
"""
def __init__(self, expr, stopOn=None):
super(ZeroOrMore, self).__init__(expr, stopOn=stopOn)
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
try:
return super(ZeroOrMore, self).parseImpl(instring, loc, doActions)
except (ParseException, IndexError):
return loc, []
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]..."
return self.strRepr
class _NullToken(object):
def __bool__(self):
return False
__nonzero__ = __bool__
def __str__(self):
return ""
_optionalNotMatched = _NullToken()
class Optional(ParseElementEnhance):
"""
Optional matching of the given expression.
Parameters:
- expr - expression that must match zero or more times
- default (optional) - value to be returned if the optional expression is not found.
Example::
# US postal code can be a 5-digit zip, plus optional 4-digit qualifier
zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))
zip.runTests('''
# traditional ZIP code
12345
# ZIP+4 form
12101-0001
# invalid ZIP
98765-
''')
prints::
# traditional ZIP code
12345
['12345']
# ZIP+4 form
12101-0001
['12101-0001']
# invalid ZIP
98765-
^
FAIL: Expected end of text (at char 5), (line:1, col:6)
"""
def __init__(self, expr, default=_optionalNotMatched):
super(Optional, self).__init__(expr, savelist=False)
self.saveAsList = self.expr.saveAsList
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
try:
loc, tokens = self.expr._parse(instring, loc, doActions, callPreParse=False)
except (ParseException, IndexError):
if self.defaultValue is not _optionalNotMatched:
if self.expr.resultsName:
tokens = ParseResults([self.defaultValue])
tokens[self.expr.resultsName] = self.defaultValue
else:
tokens = [self.defaultValue]
else:
tokens = []
return loc, tokens
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]"
return self.strRepr
class SkipTo(ParseElementEnhance):
"""
Token for skipping over all undefined text until the matched expression is found.
Parameters:
- expr - target expression marking the end of the data to be skipped
- include - (default=C{False}) if True, the target expression is also parsed
(the skipped text and target expression are returned as a 2-element list).
- ignore - (default=C{None}) used to define grammars (typically quoted strings and
comments) that might contain false matches to the target expression
- failOn - (default=C{None}) define expressions that are not allowed to be
included in the skipped test; if found before the target expression is found,
the SkipTo is not a match
Example::
report = '''
Outstanding Issues Report - 1 Jan 2000
# | Severity | Description | Days Open
-----+----------+-------------------------------------------+-----------
101 | Critical | Intermittent system crash | 6
94 | Cosmetic | Spelling error on Login ('log|n') | 14
79 | Minor | System slow when running too many reports | 47
'''
integer = Word(nums)
SEP = Suppress('|')
# use SkipTo to simply match everything up until the next SEP
# - ignore quoted strings, so that a '|' character inside a quoted string does not match
# - parse action will call token.strip() for each matched token, i.e., the description body
string_data = SkipTo(SEP, ignore=quotedString)
string_data.setParseAction(tokenMap(str.strip))
ticket_expr = (integer("issue_num") + SEP
+ string_data("sev") + SEP
+ string_data("desc") + SEP
+ integer("days_open"))
for tkt in ticket_expr.searchString(report):
print tkt.dump()
prints::
['101', 'Critical', 'Intermittent system crash', '6']
- days_open: 6
- desc: Intermittent system crash
- issue_num: 101
- sev: Critical
['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
- days_open: 14
- desc: Spelling error on Login ('log|n')
- issue_num: 94
- sev: Cosmetic
['79', 'Minor', 'System slow when running too many reports', '47']
- days_open: 47
- desc: System slow when running too many reports
- issue_num: 79
- sev: Minor
"""
def __init__(self, other, include=False, ignore=None, failOn=None):
super(SkipTo, self).__init__(other)
self.ignoreExpr = ignore
self.mayReturnEmpty = True
self.mayIndexError = False
self.includeMatch = include
self.asList = False
if isinstance(failOn, basestring):
self.failOn = ParserElement._literalStringClass(failOn)
else:
self.failOn = failOn
self.errmsg = "No match found for " + _ustr(self.expr)
def parseImpl(self, instring, loc, doActions=True):
startloc = loc
instrlen = len(instring)
expr = self.expr
expr_parse = self.expr._parse
self_failOn_canParseNext = (
self.failOn.canParseNext if self.failOn is not None else None
)
self_ignoreExpr_tryParse = (
self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
)
tmploc = loc
while tmploc <= instrlen:
if self_failOn_canParseNext is not None:
# break if failOn expression matches
if self_failOn_canParseNext(instring, tmploc):
break
if self_ignoreExpr_tryParse is not None:
# advance past ignore expressions
while 1:
try:
tmploc = self_ignoreExpr_tryParse(instring, tmploc)
except ParseBaseException:
break
try:
expr_parse(instring, tmploc, doActions=False, callPreParse=False)
except (ParseException, IndexError):
# no match, advance loc in string
tmploc += 1
else:
# matched skipto expr, done
break
else:
# ran off the end of the input string without matching skipto expr, fail
raise ParseException(instring, loc, self.errmsg, self)
# build up return values
loc = tmploc
skiptext = instring[startloc:loc]
skipresult = ParseResults(skiptext)
if self.includeMatch:
loc, mat = expr_parse(instring, loc, doActions, callPreParse=False)
skipresult += mat
return loc, skipresult
class Forward(ParseElementEnhance):
"""
Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.
Note: take care when assigning to C{Forward} not to overlook precedence of operators.
Specifically, '|' has a lower precedence than '<<', so that::
fwdExpr << a | b | c
will actually be evaluated as::
(fwdExpr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the C{Forward}::
fwdExpr << (a | b | c)
Converting to use the '<<=' operator instead will avoid this problem.
See L{ParseResults.pprint} for an example of a recursive parser created using
C{Forward}.
"""
def __init__(self, other=None):
super(Forward, self).__init__(other, savelist=False)
def __lshift__(self, other):
if isinstance(other, basestring):
other = ParserElement._literalStringClass(other)
self.expr = other
self.strRepr = None
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.setWhitespaceChars(self.expr.whiteChars)
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
return self
def __ilshift__(self, other):
return self << other
def leaveWhitespace(self):
self.skipWhitespace = False
return self
def streamline(self):
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate(self, validateTrace=[]):
if self not in validateTrace:
tmp = validateTrace[:] + [self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__(self):
if hasattr(self, "name"):
return self.name
return self.__class__.__name__ + ": ..."
# stubbed out for now - creates awful memory and perf issues
self._revertClass = self.__class__
self.__class__ = _ForwardNoRecurse
try:
if self.expr is not None:
retString = _ustr(self.expr)
else:
retString = "None"
finally:
self.__class__ = self._revertClass
return self.__class__.__name__ + ": " + retString
def copy(self):
if self.expr is not None:
return super(Forward, self).copy()
else:
ret = Forward()
ret <<= self
return ret
class _ForwardNoRecurse(Forward):
def __str__(self):
return "..."
class TokenConverter(ParseElementEnhance):
"""
Abstract subclass of C{ParseExpression}, for converting parsed results.
"""
def __init__(self, expr, savelist=False):
super(TokenConverter, self).__init__(expr) # , savelist )
self.saveAsList = False
class Combine(TokenConverter):
"""
Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the input string;
this can be disabled by specifying C{'adjacent=False'} in the constructor.
Example::
real = Word(nums) + '.' + Word(nums)
print(real.parseString('3.1416')) # -> ['3', '.', '1416']
# will also erroneously match the following
print(real.parseString('3. 1416')) # -> ['3', '.', '1416']
real = Combine(Word(nums) + '.' + Word(nums))
print(real.parseString('3.1416')) # -> ['3.1416']
# no match when there are internal spaces
print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)
"""
def __init__(self, expr, joinString="", adjacent=True):
super(Combine, self).__init__(expr)
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leaveWhitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
self.callPreparse = True
def ignore(self, other):
if self.adjacent:
ParserElement.ignore(self, other)
else:
super(Combine, self).ignore(other)
return self
def postParse(self, instring, loc, tokenlist):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults(
["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults
)
if self.resultsName and retToks.haskeys():
return [retToks]
else:
return retToks
class Group(TokenConverter):
"""
Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions.
Example::
ident = Word(alphas)
num = Word(nums)
term = ident | num
func = ident + Optional(delimitedList(term))
print(func.parseString("fn a,b,100")) # -> ['fn', 'a', 'b', '100']
func = ident + Group(Optional(delimitedList(term)))
print(func.parseString("fn a,b,100")) # -> ['fn', ['a', 'b', '100']]
"""
def __init__(self, expr):
super(Group, self).__init__(expr)
self.saveAsList = True
def postParse(self, instring, loc, tokenlist):
return [tokenlist]
class Dict(TokenConverter):
"""
Converter to return a repetitive expression as a list, but also as a dictionary.
Each element can also be referenced using the first token in the expression as its key.
Useful for tabular report scraping when the first column can be used as a item key.
Example::
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
# print attributes as plain groups
print(OneOrMore(attr_expr).parseString(text).dump())
# instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names
result = Dict(OneOrMore(Group(attr_expr))).parseString(text)
print(result.dump())
# access named fields as dict entries, or output as dict
print(result['shape'])
print(result.asDict())
prints::
['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- color: light blue
- posn: upper left
- shape: SQUARE
- texture: burlap
SQUARE
{'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
See more examples at L{ParseResults} of accessing fields by results name.
"""
def __init__(self, expr):
super(Dict, self).__init__(expr)
self.saveAsList = True
def postParse(self, instring, loc, tokenlist):
for i, tok in enumerate(tokenlist):
if len(tok) == 0:
continue
ikey = tok[0]
if isinstance(ikey, int):
ikey = _ustr(tok[0]).strip()
if len(tok) == 1:
tokenlist[ikey] = _ParseResultsWithOffset("", i)
elif len(tok) == 2 and not isinstance(tok[1], ParseResults):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i)
else:
dictvalue = tok.copy() # ParseResults(i)
del dictvalue[0]
if len(dictvalue) != 1 or (
isinstance(dictvalue, ParseResults) and dictvalue.haskeys()
):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i)
if self.resultsName:
return [tokenlist]
else:
return tokenlist
class Suppress(TokenConverter):
"""
Converter for ignoring the results of a parsed expression.
Example::
source = "a, b, c,d"
wd = Word(alphas)
wd_list1 = wd + ZeroOrMore(',' + wd)
print(wd_list1.parseString(source))
# often, delimiters that are useful during parsing are just in the
# way afterward - use Suppress to keep them out of the parsed output
wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)
print(wd_list2.parseString(source))
prints::
['a', ',', 'b', ',', 'c', ',', 'd']
['a', 'b', 'c', 'd']
(See also L{delimitedList}.)
"""
def postParse(self, instring, loc, tokenlist):
return []
def suppress(self):
return self
class OnlyOnce(object):
"""
Wrapper for parse actions, to ensure they are only called once.
"""
def __init__(self, methodCall):
self.callable = _trim_arity(methodCall)
self.called = False
def __call__(self, s, l, t):
if not self.called:
results = self.callable(s, l, t)
self.called = True
return results
raise ParseException(s, l, "")
def reset(self):
self.called = False
def traceParseAction(f):
"""
Decorator for debugging parse actions.
When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".}
When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised.
Example::
wd = Word(alphas)
@traceParseAction
def remove_duplicate_chars(tokens):
return ''.join(sorted(set(''.join(tokens))))
wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
prints::
>>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
<<leaving remove_duplicate_chars (ret: 'dfjkls')
['dfjkls']
"""
f = _trim_arity(f)
def z(*paArgs):
thisFunc = f.__name__
s, l, t = paArgs[-3:]
if len(paArgs) > 3:
thisFunc = paArgs[0].__class__.__name__ + "." + thisFunc
sys.stderr.write(
">>entering %s(line: '%s', %d, %r)\n" % (thisFunc, line(l, s), l, t)
)
try:
ret = f(*paArgs)
except Exception as exc:
sys.stderr.write("<<leaving %s (exception: %s)\n" % (thisFunc, exc))
raise
sys.stderr.write("<<leaving %s (ret: %r)\n" % (thisFunc, ret))
return ret
try:
z.__name__ = f.__name__
except AttributeError:
pass
return z
#
# global helpers
#
def delimitedList(expr, delim=",", combine=False):
"""
Helper to define a delimited list of expressions - the delimiter defaults to ','.
By default, the list elements and delimiters can have intervening whitespace, and
comments, but this can be overridden by passing C{combine=True} in the constructor.
If C{combine} is set to C{True}, the matching tokens are returned as a single token
string, with the delimiters included; otherwise, the matching tokens are returned
as a list of tokens, with the delimiters suppressed.
Example::
delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc']
delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
"""
dlName = _ustr(expr) + " [" + _ustr(delim) + " " + _ustr(expr) + "]..."
if combine:
return Combine(expr + ZeroOrMore(delim + expr)).setName(dlName)
else:
return (expr + ZeroOrMore(Suppress(delim) + expr)).setName(dlName)
def countedArray(expr, intExpr=None):
"""
Helper to define a counted list of expressions.
This helper defines a pattern of the form::
integer expr expr expr...
where the leading integer tells how many expr expressions follow.
The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value.
Example::
countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd']
# in this parser, the leading integer value is given in binary,
# '10' indicating that 2 values are in the array
binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))
countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd']
"""
arrayExpr = Forward()
def countFieldParseAction(s, l, t):
n = t[0]
arrayExpr << (n and Group(And([expr] * n)) or Group(empty))
return []
if intExpr is None:
intExpr = Word(nums).setParseAction(lambda t: int(t[0]))
else:
intExpr = intExpr.copy()
intExpr.setName("arrayLen")
intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
return (intExpr + arrayExpr).setName("(len) " + _ustr(expr) + "...")
def _flatten(L):
ret = []
for i in L:
if isinstance(i, list):
ret.extend(_flatten(i))
else:
ret.append(i)
return ret
def matchPreviousLiteral(expr):
"""
Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousLiteral(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches a
previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
If this is not desired, use C{matchPreviousExpr}.
Do I{not} use with packrat parsing enabled.
"""
rep = Forward()
def copyTokenToRepeater(s, l, t):
if t:
if len(t) == 1:
rep << t[0]
else:
# flatten t tokens
tflat = _flatten(t.asList())
rep << And(Literal(tt) for tt in tflat)
else:
rep << Empty()
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
rep.setName("(prev) " + _ustr(expr))
return rep
def matchPreviousExpr(expr):
"""
Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousExpr(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches by
expressions, will I{not} match the leading C{"1:1"} in C{"1:10"};
the expressions are evaluated first, and then compared, so
C{"1"} is compared with C{"10"}.
Do I{not} use with packrat parsing enabled.
"""
rep = Forward()
e2 = expr.copy()
rep <<= e2
def copyTokenToRepeater(s, l, t):
matchTokens = _flatten(t.asList())
def mustMatchTheseTokens(s, l, t):
theseTokens = _flatten(t.asList())
if theseTokens != matchTokens:
raise ParseException("", 0, "")
rep.setParseAction(mustMatchTheseTokens, callDuringTry=True)
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
rep.setName("(prev) " + _ustr(expr))
return rep
def _escapeRegexRangeChars(s):
# ~ escape these chars: ^-]
for c in r"\^-]":
s = s.replace(c, _bslash + c)
s = s.replace("\n", r"\n")
s = s.replace("\t", r"\t")
return _ustr(s)
def oneOf(strs, caseless=False, useRegex=True):
"""
Helper to quickly define a set of alternative Literals, and makes sure to do
longest-first testing when there is a conflict, regardless of the input order,
but returns a C{L{MatchFirst}} for best performance.
Parameters:
- strs - a string of space-delimited literals, or a collection of string literals
- caseless - (default=C{False}) - treat all literals as caseless
- useRegex - (default=C{True}) - as an optimization, will generate a Regex
object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
if creating a C{Regex} raises an exception)
Example::
comp_oper = oneOf("< = > <= >= !=")
var = Word(alphas)
number = Word(nums)
term = var | number
comparison_expr = term + comp_oper + term
print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12"))
prints::
[['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
"""
if caseless:
isequal = lambda a, b: a.upper() == b.upper()
masks = lambda a, b: b.upper().startswith(a.upper())
parseElementClass = CaselessLiteral
else:
isequal = lambda a, b: a == b
masks = lambda a, b: b.startswith(a)
parseElementClass = Literal
symbols = []
if isinstance(strs, basestring):
symbols = strs.split()
elif isinstance(strs, Iterable):
symbols = list(strs)
else:
warnings.warn(
"Invalid argument to oneOf, expected string or iterable",
SyntaxWarning,
stacklevel=2,
)
if not symbols:
return NoMatch()
i = 0
while i < len(symbols) - 1:
cur = symbols[i]
for j, other in enumerate(symbols[i + 1 :]):
if isequal(other, cur):
del symbols[i + j + 1]
break
elif masks(cur, other):
del symbols[i + j + 1]
symbols.insert(i, other)
cur = other
break
else:
i += 1
if not caseless and useRegex:
# ~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
try:
if len(symbols) == len("".join(symbols)):
return Regex(
"[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols)
).setName(" | ".join(symbols))
else:
return Regex("|".join(re.escape(sym) for sym in symbols)).setName(
" | ".join(symbols)
)
except Exception:
warnings.warn(
"Exception creating Regex for oneOf, building MatchFirst",
SyntaxWarning,
stacklevel=2,
)
# last resort, just use MatchFirst
return MatchFirst(parseElementClass(sym) for sym in symbols).setName(
" | ".join(symbols)
)
def dictOf(key, value):
"""
Helper to easily and clearly define a dictionary by specifying the respective patterns
for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens
in the proper order. The key pattern can include delimiting markers or punctuation,
as long as they are suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the C{Dict} results can include named token
fields.
Example::
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
print(OneOrMore(attr_expr).parseString(text).dump())
attr_label = label
attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)
# similar to Dict, but simpler call format
result = dictOf(attr_label, attr_value).parseString(text)
print(result.dump())
print(result['shape'])
print(result.shape) # object attribute access works too
print(result.asDict())
prints::
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- color: light blue
- posn: upper left
- shape: SQUARE
- texture: burlap
SQUARE
SQUARE
{'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
"""
return Dict(ZeroOrMore(Group(key + value)))
def originalTextFor(expr, asString=True):
"""
Helper to return the original, untokenized text for a given expression. Useful to
restore the parsed fields of an HTML start tag into the raw tag text itself, or to
revert separate tokens with intervening whitespace back to the original matching
input text. By default, returns astring containing the original parsed text.
If the optional C{asString} argument is passed as C{False}, then the return value is a
C{L{ParseResults}} containing any results names that were originally matched, and a
single token containing the original matched text from the input string. So if
the expression passed to C{L{originalTextFor}} contains expressions with defined
results names, you must set C{asString} to C{False} if you want to preserve those
results name values.
Example::
src = "this is test <b> bold <i>text</i> </b> normal text "
for tag in ("b","i"):
opener,closer = makeHTMLTags(tag)
patt = originalTextFor(opener + SkipTo(closer) + closer)
print(patt.searchString(src)[0])
prints::
['<b> bold <i>text</i> </b>']
['<i>text</i>']
"""
locMarker = Empty().setParseAction(lambda s, loc, t: loc)
endlocMarker = locMarker.copy()
endlocMarker.callPreparse = False
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
if asString:
extractText = lambda s, l, t: s[t._original_start : t._original_end]
else:
def extractText(s, l, t):
t[:] = [s[t.pop("_original_start") : t.pop("_original_end")]]
matchExpr.setParseAction(extractText)
matchExpr.ignoreExprs = expr.ignoreExprs
return matchExpr
def ungroup(expr):
"""
Helper to undo pyparsing's default grouping of And expressions, even
if all but one are non-empty.
"""
return TokenConverter(expr).setParseAction(lambda t: t[0])
def locatedExpr(expr):
"""
Helper to decorate a returned token with its starting and ending locations in the input string.
This helper adds the following results names:
- locn_start = location where matched expression begins
- locn_end = location where matched expression ends
- value = the actual parsed results
Be careful if the input text contains C{<TAB>} characters, you may want to call
C{L{ParserElement.parseWithTabs}}
Example::
wd = Word(alphas)
for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
print(match)
prints::
[[0, 'ljsdf', 5]]
[[8, 'lksdjjf', 15]]
[[18, 'lkkjj', 23]]
"""
locator = Empty().setParseAction(lambda s, l, t: l)
return Group(
locator("locn_start")
+ expr("value")
+ locator.copy().leaveWhitespace()("locn_end")
)
# convenience constants for positional expressions
empty = Empty().setName("empty")
lineStart = LineStart().setName("lineStart")
lineEnd = LineEnd().setName("lineEnd")
stringStart = StringStart().setName("stringStart")
stringEnd = StringEnd().setName("stringEnd")
_escapedPunc = Word(_bslash, r"\[]-*.$+^?()~ ", exact=2).setParseAction(
lambda s, l, t: t[0][1]
)
_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(
lambda s, l, t: unichr(int(t[0].lstrip(r"\0x"), 16))
)
_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(
lambda s, l, t: unichr(int(t[0][1:], 8))
)
_singleChar = (
_escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r"\]", exact=1)
)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = (
Literal("[")
+ Optional("^").setResultsName("negate")
+ Group(OneOrMore(_charRange | _singleChar)).setResultsName("body")
+ "]"
)
def srange(s):
r"""
Helper to easily define string ranges for use in Word construction. Borrows
syntax from regexp '[]' string range definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string is the expanded
character set joined into a single string.
The values enclosed in the []'s may be:
- a single character
- an escaped character with a leading backslash (such as C{\-} or C{\]})
- an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character)
(C{\0x##} is also supported for backwards compatibility)
- an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character)
- a range of any of the above, separated by a dash (C{'a-z'}, etc.)
- any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.)
"""
_expanded = (
lambda p: p
if not isinstance(p, ParseResults)
else "".join(unichr(c) for c in range(ord(p[0]), ord(p[1]) + 1))
)
try:
return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)
except Exception:
return ""
def matchOnlyAtCol(n):
"""
Helper method for defining parse actions that require matching at a specific
column in the input text.
"""
def verifyCol(strg, locn, toks):
if col(locn, strg) != n:
raise ParseException(strg, locn, "matched token not at column %d" % n)
return verifyCol
def replaceWith(replStr):
"""
Helper method for common parse actions that simply return a literal value. Especially
useful when used with C{L{transformString<ParserElement.transformString>}()}.
Example::
num = Word(nums).setParseAction(lambda toks: int(toks[0]))
na = oneOf("N/A NA").setParseAction(replaceWith(math.nan))
term = na | num
OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234]
"""
return lambda s, l, t: [replStr]
def removeQuotes(s, l, t):
"""
Helper parse action for removing quotation marks from parsed quoted strings.
Example::
# by default, quotation marks are included in parsed results
quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
# use removeQuotes to strip quotation marks from parsed results
quotedString.setParseAction(removeQuotes)
quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
"""
return t[0][1:-1]
def tokenMap(func, *args):
"""
Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional
args are passed, they are forwarded to the given function as additional arguments after
the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the
parsed data to an integer using base 16.
Example (compare the last to example in L{ParserElement.transformString}::
hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))
hex_ints.runTests('''
00 11 22 aa FF 0a 0d 1a
''')
upperword = Word(alphas).setParseAction(tokenMap(str.upper))
OneOrMore(upperword).runTests('''
my kingdom for a horse
''')
wd = Word(alphas).setParseAction(tokenMap(str.title))
OneOrMore(wd).setParseAction(' '.join).runTests('''
now is the winter of our discontent made glorious summer by this sun of york
''')
prints::
00 11 22 aa FF 0a 0d 1a
[0, 17, 34, 170, 255, 10, 13, 26]
my kingdom for a horse
['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']
now is the winter of our discontent made glorious summer by this sun of york
['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
"""
def pa(s, l, t):
return [func(tokn, *args) for tokn in t]
try:
func_name = getattr(func, "__name__", getattr(func, "__class__").__name__)
except Exception:
func_name = str(func)
pa.__name__ = func_name
return pa
upcaseTokens = tokenMap(lambda t: _ustr(t).upper())
"""(Deprecated) Helper parse action to convert tokens to upper case. Deprecated in favor of L{pyparsing_common.upcaseTokens}"""
downcaseTokens = tokenMap(lambda t: _ustr(t).lower())
"""(Deprecated) Helper parse action to convert tokens to lower case. Deprecated in favor of L{pyparsing_common.downcaseTokens}"""
def _makeTags(tagStr, xml):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr, basestring):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas, alphanums + "_-:")
if xml:
tagAttrValue = dblQuotedString.copy().setParseAction(removeQuotes)
openTag = (
Suppress("<")
+ tagStr("tag")
+ Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue)))
+ Optional("/", default=[False])
.setResultsName("empty")
.setParseAction(lambda s, l, t: t[0] == "/")
+ Suppress(">")
)
else:
printablesLessRAbrack = "".join(c for c in printables if c not in ">")
tagAttrValue = quotedString.copy().setParseAction(removeQuotes) | Word(
printablesLessRAbrack
)
openTag = (
Suppress("<")
+ tagStr("tag")
+ Dict(
ZeroOrMore(
Group(
tagAttrName.setParseAction(downcaseTokens)
+ Optional(Suppress("=") + tagAttrValue)
)
)
)
+ Optional("/", default=[False])
.setResultsName("empty")
.setParseAction(lambda s, l, t: t[0] == "/")
+ Suppress(">")
)
closeTag = Combine(_L("</") + tagStr + ">")
openTag = openTag.setResultsName(
"start" + "".join(resname.replace(":", " ").title().split())
).setName("<%s>" % resname)
closeTag = closeTag.setResultsName(
"end" + "".join(resname.replace(":", " ").title().split())
).setName("</%s>" % resname)
openTag.tag = resname
closeTag.tag = resname
return openTag, closeTag
def makeHTMLTags(tagStr):
"""
Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches
tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values.
Example::
text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
# makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple
a,a_end = makeHTMLTags("A")
link_expr = a + SkipTo(a_end)("link_text") + a_end
for link in link_expr.searchString(text):
# attributes in the <A> tag (like "href" shown here) are also accessible as named results
print(link.link_text, '->', link.href)
prints::
pyparsing -> http://pyparsing.wikispaces.com
"""
return _makeTags(tagStr, False)
def makeXMLTags(tagStr):
"""
Helper to construct opening and closing tag expressions for XML, given a tag name. Matches
tags only in the given upper/lower case.
Example: similar to L{makeHTMLTags}
"""
return _makeTags(tagStr, True)
def withAttribute(*args, **attrDict):
"""
Helper to create a validating parse action to be used with start tags created
with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag
with a required attribute value, to avoid false matches on common tags such as
C{<TD>} or C{<DIV>}.
Call C{withAttribute} with a series of attribute names and values. Specify the list
of filter attributes names and values as:
- keyword arguments, as in C{(align="right")}, or
- as an explicit dict with C{**} operator, when an attribute name is also a Python
reserved word, as in C{**{"class":"Customer", "align":"right"}}
- a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
For attribute names with a namespace prefix, you must use the second form. Attribute
names are matched insensitive to upper/lower case.
If just testing for C{class} (with or without a namespace), use C{L{withClass}}.
To verify that the attribute exists, but without specifying a value, pass
C{withAttribute.ANY_VALUE} as the value.
Example::
html = '''
<div>
Some text
<div type="grid">1 4 0 1 0</div>
<div type="graph">1,3 2,3 1,1</div>
<div>this has no type</div>
</div>
'''
div,div_end = makeHTMLTags("div")
# only match div tag having a type attribute with value "grid"
div_grid = div().setParseAction(withAttribute(type="grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.searchString(html):
print(grid_header.body)
# construct a match with any div tag having a type attribute, regardless of the value
div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.searchString(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1
"""
if args:
attrs = args[:]
else:
attrs = attrDict.items()
attrs = [(k, v) for k, v in attrs]
def pa(s, l, tokens):
for attrName, attrValue in attrs:
if attrName not in tokens:
raise ParseException(s, l, "no matching attribute " + attrName)
if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
raise ParseException(
s,
l,
"attribute '%s' has value '%s', must be '%s'"
% (attrName, tokens[attrName], attrValue),
)
return pa
withAttribute.ANY_VALUE = object()
def withClass(classname, namespace=""):
"""
Simplified version of C{L{withAttribute}} when matching on a div class - made
difficult because C{class} is a reserved word in Python.
Example::
html = '''
<div>
Some text
<div class="grid">1 4 0 1 0</div>
<div class="graph">1,3 2,3 1,1</div>
<div>this <div> has no class</div>
</div>
'''
div,div_end = makeHTMLTags("div")
div_grid = div().setParseAction(withClass("grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.searchString(html):
print(grid_header.body)
div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.searchString(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1
"""
classattr = "%s:class" % namespace if namespace else "class"
return withAttribute(**{classattr: classname})
opAssoc = _Constants()
opAssoc.LEFT = object()
opAssoc.RIGHT = object()
def infixNotation(baseExpr, opList, lpar=Suppress("("), rpar=Suppress(")")):
"""
Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary or
binary, left- or right-associative. Parse actions can also be attached
to operator expressions. The generated parser will also recognize the use
of parentheses to override operator precedences (see example below).
Note: if you define a deep operator list, you may see performance issues
when using infixNotation. See L{ParserElement.enablePackrat} for a
mechanism to potentially improve your parser performance.
Parameters:
- baseExpr - expression representing the most basic element for the nested
- opList - list of tuples, one for each operator precedence level in the
expression grammar; each tuple is of the form
(opExpr, numTerms, rightLeftAssoc, parseAction), where:
- opExpr is the pyparsing expression for the operator;
may also be a string, which will be converted to a Literal;
if numTerms is 3, opExpr is a tuple of two expressions, for the
two operators separating the 3 terms
- numTerms is the number of terms for this operator (must
be 1, 2, or 3)
- rightLeftAssoc is the indicator whether the operator is
right or left associative, using the pyparsing-defined
constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}.
- parseAction is the parse action to be associated with
expressions matching this operator expression (the
parse action tuple member may be omitted); if the parse action
is passed a tuple or list of functions, this is equivalent to
calling C{setParseAction(*fn)} (L{ParserElement.setParseAction})
- lpar - expression for matching left-parentheses (default=C{Suppress('(')})
- rpar - expression for matching right-parentheses (default=C{Suppress(')')})
Example::
# simple example of four-function arithmetic with ints and variable names
integer = pyparsing_common.signed_integer
varname = pyparsing_common.identifier
arith_expr = infixNotation(integer | varname,
[
('-', 1, opAssoc.RIGHT),
(oneOf('* /'), 2, opAssoc.LEFT),
(oneOf('+ -'), 2, opAssoc.LEFT),
])
arith_expr.runTests('''
5+3*6
(5+3)*6
-2--11
''', fullDump=False)
prints::
5+3*6
[[5, '+', [3, '*', 6]]]
(5+3)*6
[[[5, '+', 3], '*', 6]]
-2--11
[[['-', 2], '-', ['-', 11]]]
"""
ret = Forward()
lastExpr = baseExpr | (lpar + ret + rpar)
for i, operDef in enumerate(opList):
opExpr, arity, rightLeftAssoc, pa = (operDef + (None,))[:4]
termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr
if arity == 3:
if opExpr is None or len(opExpr) != 2:
raise ValueError(
"if numterms=3, opExpr must be a tuple or list of two expressions"
)
opExpr1, opExpr2 = opExpr
thisExpr = Forward().setName(termName)
if rightLeftAssoc == opAssoc.LEFT:
if arity == 1:
matchExpr = FollowedBy(lastExpr + opExpr) + Group(
lastExpr + OneOrMore(opExpr)
)
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group(
lastExpr + OneOrMore(opExpr + lastExpr)
)
else:
matchExpr = FollowedBy(lastExpr + lastExpr) + Group(
lastExpr + OneOrMore(lastExpr)
)
elif arity == 3:
matchExpr = FollowedBy(
lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr
) + Group(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr)
else:
raise ValueError(
"operator must be unary (1), binary (2), or ternary (3)"
)
elif rightLeftAssoc == opAssoc.RIGHT:
if arity == 1:
# try to avoid LR with this extra test
if not isinstance(opExpr, Optional):
opExpr = Optional(opExpr)
matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group(
opExpr + thisExpr
)
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group(
lastExpr + OneOrMore(opExpr + thisExpr)
)
else:
matchExpr = FollowedBy(lastExpr + thisExpr) + Group(
lastExpr + OneOrMore(thisExpr)
)
elif arity == 3:
matchExpr = FollowedBy(
lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr
) + Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr)
else:
raise ValueError(
"operator must be unary (1), binary (2), or ternary (3)"
)
else:
raise ValueError("operator must indicate right or left associativity")
if pa:
if isinstance(pa, (tuple, list)):
matchExpr.setParseAction(*pa)
else:
matchExpr.setParseAction(pa)
thisExpr <<= matchExpr.setName(termName) | lastExpr
lastExpr = thisExpr
ret <<= lastExpr
return ret
operatorPrecedence = infixNotation
"""(Deprecated) Former name of C{L{infixNotation}}, will be dropped in a future release."""
dblQuotedString = Combine(
Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"'
).setName("string enclosed in double quotes")
sglQuotedString = Combine(
Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'"
).setName("string enclosed in single quotes")
quotedString = Combine(
Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"'
| Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'"
).setName("quotedString using single or double quotes")
unicodeString = Combine(_L("u") + quotedString.copy()).setName("unicode string literal")
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
"""
Helper method for defining nested lists enclosed in opening and closing
delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression
- closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression
- content - expression for items within the nested lists (default=C{None})
- ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString})
If an expression is not provided for the content argument, the nested
expression will capture all whitespace-delimited content between delimiters
as a list of separate values.
Use the C{ignoreExpr} argument to define expressions that may contain
opening or closing characters that should not be treated as opening
or closing characters for nesting, such as quotedString or a comment
expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}.
The default is L{quotedString}, but if no expressions are to be ignored,
then pass C{None} for this argument.
Example::
data_type = oneOf("void int short long char float double")
decl_data_type = Combine(data_type + Optional(Word('*')))
ident = Word(alphas+'_', alphanums+'_')
number = pyparsing_common.number
arg = Group(decl_data_type + ident)
LPAR,RPAR = map(Suppress, "()")
code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment))
c_function = (decl_data_type("type")
+ ident("name")
+ LPAR + Optional(delimitedList(arg), [])("args") + RPAR
+ code_body("body"))
c_function.ignore(cStyleComment)
source_code = '''
int is_odd(int x) {
return (x%2);
}
int dec_to_hex(char hchar) {
if (hchar >= '0' && hchar <= '9') {
return (ord(hchar)-ord('0'));
} else {
return (10+ord(hchar)-ord('A'));
}
}
'''
for func in c_function.searchString(source_code):
print("%(name)s (%(type)s) args: %(args)s" % func)
prints::
is_odd (int) args: [['int', 'x']]
dec_to_hex (int) args: [['char', 'hchar']]
"""
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener, basestring) and isinstance(closer, basestring):
if len(opener) == 1 and len(closer) == 1:
if ignoreExpr is not None:
content = Combine(
OneOrMore(
~ignoreExpr
+ CharsNotIn(
opener + closer + ParserElement.DEFAULT_WHITE_CHARS,
exact=1,
)
)
).setParseAction(lambda t: t[0].strip())
else:
content = empty.copy() + CharsNotIn(
opener + closer + ParserElement.DEFAULT_WHITE_CHARS
).setParseAction(lambda t: t[0].strip())
else:
if ignoreExpr is not None:
content = Combine(
OneOrMore(
~ignoreExpr
+ ~Literal(opener)
+ ~Literal(closer)
+ CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)
)
).setParseAction(lambda t: t[0].strip())
else:
content = Combine(
OneOrMore(
~Literal(opener)
+ ~Literal(closer)
+ CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)
)
).setParseAction(lambda t: t[0].strip())
else:
raise ValueError(
"opening and closing arguments must be strings if no content expression is given"
)
ret = Forward()
if ignoreExpr is not None:
ret <<= Group(
Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer)
)
else:
ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer))
ret.setName("nested %s%s expression" % (opener, closer))
return ret
def indentedBlock(blockStatementExpr, indentStack, indent=True):
"""
Helper method for defining space-delimited indentation blocks, such as
those used to define block statements in Python source code.
Parameters:
- blockStatementExpr - expression defining syntax of statement that
is repeated within the indented block
- indentStack - list created by caller to manage indentation stack
(multiple statementWithIndentedBlock expressions within a single grammar
should share a common indentStack)
- indent - boolean indicating whether block must be indented beyond the
the current level; set to False for block of left-most statements
(default=C{True})
A valid block must contain at least one C{blockStatement}.
Example::
data = '''
def A(z):
A1
B = 100
G = A2
A2
A3
B
def BB(a,b,c):
BB1
def BBA():
bba1
bba2
bba3
C
D
def spam(x,y):
def eggs(z):
pass
'''
indentStack = [1]
stmt = Forward()
identifier = Word(alphas, alphanums)
funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":")
func_body = indentedBlock(stmt, indentStack)
funcDef = Group( funcDecl + func_body )
rvalue = Forward()
funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")")
rvalue << (funcCall | identifier | Word(nums))
assignment = Group(identifier + "=" + rvalue)
stmt << ( funcDef | assignment | identifier )
module_body = OneOrMore(stmt)
parseTree = module_body.parseString(data)
parseTree.pprint()
prints::
[['def',
'A',
['(', 'z', ')'],
':',
[['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
'B',
['def',
'BB',
['(', 'a', 'b', 'c', ')'],
':',
[['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
'C',
'D',
['def',
'spam',
['(', 'x', 'y', ')'],
':',
[[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]
"""
def checkPeerIndent(s, l, t):
if l >= len(s):
return
curCol = col(l, s)
if curCol != indentStack[-1]:
if curCol > indentStack[-1]:
raise ParseFatalException(s, l, "illegal nesting")
raise ParseException(s, l, "not a peer entry")
def checkSubIndent(s, l, t):
curCol = col(l, s)
if curCol > indentStack[-1]:
indentStack.append(curCol)
else:
raise ParseException(s, l, "not a subentry")
def checkUnindent(s, l, t):
if l >= len(s):
return
curCol = col(l, s)
if not (indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):
raise ParseException(s, l, "not an unindent")
indentStack.pop()
NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName("INDENT")
PEER = Empty().setParseAction(checkPeerIndent).setName("")
UNDENT = Empty().setParseAction(checkUnindent).setName("UNINDENT")
if indent:
smExpr = Group(
Optional(NL)
+
# ~ FollowedBy(blockStatementExpr) +
INDENT
+ (OneOrMore(PEER + Group(blockStatementExpr) + Optional(NL)))
+ UNDENT
)
else:
smExpr = Group(
Optional(NL) + (OneOrMore(PEER + Group(blockStatementExpr) + Optional(NL)))
)
blockStatementExpr.ignore(_bslash + LineEnd())
return smExpr.setName("indented block")
alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
anyOpenTag, anyCloseTag = makeHTMLTags(
Word(alphas, alphanums + "_:").setName("any tag")
)
_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(), "><& \"'"))
commonHTMLEntity = Regex(
"&(?P<entity>" + "|".join(_htmlEntityMap.keys()) + ");"
).setName("common HTML entity")
def replaceHTMLEntity(t):
"""Helper parser action to replace common HTML entities with their special characters"""
return _htmlEntityMap.get(t.entity)
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/").setName(
"C style comment"
)
"Comment of the form C{/* ... */}"
htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment")
"Comment of the form C{<!-- ... -->}"
restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line")
dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment")
"Comment of the form C{// ... (to end of line)}"
cppStyleComment = Combine(
Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/" | dblSlashComment
).setName("C++ style comment")
"Comment of either form C{L{cStyleComment}} or C{L{dblSlashComment}}"
javaStyleComment = cppStyleComment
"Same as C{L{cppStyleComment}}"
pythonStyleComment = Regex(r"#.*").setName("Python style comment")
"Comment of the form C{# ... (to end of line)}"
_commasepitem = (
Combine(
OneOrMore(
Word(printables, excludeChars=",")
+ Optional(Word(" \t") + ~Literal(",") + ~LineEnd())
)
)
.streamline()
.setName("commaItem")
)
commaSeparatedList = delimitedList(
Optional(quotedString.copy() | _commasepitem, default="")
).setName("commaSeparatedList")
"""(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas.
This expression is deprecated in favor of L{pyparsing_common.comma_separated_list}."""
# some other useful expressions - using lower-case class name since we are really using this as a namespace
class pyparsing_common:
"""
Here are some common low-level expressions that may be useful in jump-starting parser development:
- numeric forms (L{integers<integer>}, L{reals<real>}, L{scientific notation<sci_real>})
- common L{programming identifiers<identifier>}
- network addresses (L{MAC<mac_address>}, L{IPv4<ipv4_address>}, L{IPv6<ipv6_address>})
- ISO8601 L{dates<iso8601_date>} and L{datetime<iso8601_datetime>}
- L{UUID<uuid>}
- L{comma-separated list<comma_separated_list>}
Parse actions:
- C{L{convertToInteger}}
- C{L{convertToFloat}}
- C{L{convertToDate}}
- C{L{convertToDatetime}}
- C{L{stripHTMLTags}}
- C{L{upcaseTokens}}
- C{L{downcaseTokens}}
Example::
pyparsing_common.number.runTests('''
# any int or real number, returned as the appropriate type
100
-100
+100
3.14159
6.02e23
1e-12
''')
pyparsing_common.fnumber.runTests('''
# any int or real number, returned as float
100
-100
+100
3.14159
6.02e23
1e-12
''')
pyparsing_common.hex_integer.runTests('''
# hex numbers
100
FF
''')
pyparsing_common.fraction.runTests('''
# fractions
1/2
-3/4
''')
pyparsing_common.mixed_integer.runTests('''
# mixed fractions
1
1/2
-3/4
1-3/4
''')
import uuid
pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
pyparsing_common.uuid.runTests('''
# uuid
12345678-1234-5678-1234-567812345678
''')
prints::
# any int or real number, returned as the appropriate type
100
[100]
-100
[-100]
+100
[100]
3.14159
[3.14159]
6.02e23
[6.02e+23]
1e-12
[1e-12]
# any int or real number, returned as float
100
[100.0]
-100
[-100.0]
+100
[100.0]
3.14159
[3.14159]
6.02e23
[6.02e+23]
1e-12
[1e-12]
# hex numbers
100
[256]
FF
[255]
# fractions
1/2
[0.5]
-3/4
[-0.75]
# mixed fractions
1
[1]
1/2
[0.5]
-3/4
[-0.75]
1-3/4
[1.75]
# uuid
12345678-1234-5678-1234-567812345678
[UUID('12345678-1234-5678-1234-567812345678')]
"""
convertToInteger = tokenMap(int)
"""
Parse action for converting parsed integers to Python int
"""
convertToFloat = tokenMap(float)
"""
Parse action for converting parsed numbers to Python float
"""
integer = Word(nums).setName("integer").setParseAction(convertToInteger)
"""expression that parses an unsigned integer, returns an int"""
hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int, 16))
"""expression that parses a hexadecimal integer, returns an int"""
signed_integer = (
Regex(r"[+-]?\d+").setName("signed integer").setParseAction(convertToInteger)
)
"""expression that parses an integer with optional leading sign, returns an int"""
fraction = (
signed_integer().setParseAction(convertToFloat)
+ "/"
+ signed_integer().setParseAction(convertToFloat)
).setName("fraction")
"""fractional expression of an integer divided by an integer, returns a float"""
fraction.addParseAction(lambda t: t[0] / t[-1])
mixed_integer = (
fraction | signed_integer + Optional(Optional("-").suppress() + fraction)
).setName("fraction or mixed integer-fraction")
"""mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""
mixed_integer.addParseAction(sum)
real = Regex(r"[+-]?\d+\.\d*").setName("real number").setParseAction(convertToFloat)
"""expression that parses a floating point number and returns a float"""
sci_real = (
Regex(r"[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)")
.setName("real number with scientific notation")
.setParseAction(convertToFloat)
)
"""expression that parses a floating point number with optional scientific notation and returns a float"""
# streamlining this expression makes the docs nicer-looking
number = (sci_real | real | signed_integer).streamline()
"""any numeric expression, returns the corresponding Python type"""
fnumber = (
Regex(r"[+-]?\d+\.?\d*([eE][+-]?\d+)?")
.setName("fnumber")
.setParseAction(convertToFloat)
)
"""any int or real number, returned as float"""
identifier = Word(alphas + "_", alphanums + "_").setName("identifier")
"""typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
ipv4_address = Regex(
r"(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}"
).setName("IPv4 address")
"IPv4 address (C{0.0.0.0 - 255.255.255.255})"
_ipv6_part = Regex(r"[0-9a-fA-F]{1,4}").setName("hex_integer")
_full_ipv6_address = (_ipv6_part + (":" + _ipv6_part) * 7).setName(
"full IPv6 address"
)
_short_ipv6_address = (
Optional(_ipv6_part + (":" + _ipv6_part) * (0, 6))
+ "::"
+ Optional(_ipv6_part + (":" + _ipv6_part) * (0, 6))
).setName("short IPv6 address")
_short_ipv6_address.addCondition(
lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8
)
_mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address")
ipv6_address = Combine(
(_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName(
"IPv6 address"
)
).setName("IPv6 address")
"IPv6 address (long, short, or mixed form)"
mac_address = Regex(
r"[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}"
).setName("MAC address")
"MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"
@staticmethod
def convertToDate(fmt="%Y-%m-%d"):
"""
Helper to create a parse action for converting parsed date string to Python datetime.date
Params -
- fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"})
Example::
date_expr = pyparsing_common.iso8601_date.copy()
date_expr.setParseAction(pyparsing_common.convertToDate())
print(date_expr.parseString("1999-12-31"))
prints::
[datetime.date(1999, 12, 31)]
"""
def cvt_fn(s, l, t):
try:
return datetime.strptime(t[0], fmt).date()
except ValueError as ve:
raise ParseException(s, l, str(ve))
return cvt_fn
@staticmethod
def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"):
"""
Helper to create a parse action for converting parsed datetime string to Python datetime.datetime
Params -
- fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"})
Example::
dt_expr = pyparsing_common.iso8601_datetime.copy()
dt_expr.setParseAction(pyparsing_common.convertToDatetime())
print(dt_expr.parseString("1999-12-31T23:59:59.999"))
prints::
[datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
"""
def cvt_fn(s, l, t):
try:
return datetime.strptime(t[0], fmt)
except ValueError as ve:
raise ParseException(s, l, str(ve))
return cvt_fn
iso8601_date = Regex(
r"(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?"
).setName("ISO8601 date")
"ISO8601 date (C{yyyy-mm-dd})"
iso8601_datetime = Regex(
r"(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?"
).setName("ISO8601 datetime")
"ISO8601 datetime (C{yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)}) - trailing seconds, milliseconds, and timezone optional; accepts separating C{'T'} or C{' '}"
uuid = Regex(r"[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}").setName("UUID")
"UUID (C{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx})"
_html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress()
@staticmethod
def stripHTMLTags(s, l, tokens):
"""
Parse action to remove HTML tags from web page HTML source
Example::
# strip HTML links from normal text
text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
td,td_end = makeHTMLTags("TD")
table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end
print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page'
"""
return pyparsing_common._html_stripper.transformString(tokens[0])
_commasepitem = (
Combine(
OneOrMore(
~Literal(",")
+ ~LineEnd()
+ Word(printables, excludeChars=",")
+ Optional(White(" \t"))
)
)
.streamline()
.setName("commaItem")
)
comma_separated_list = delimitedList(
Optional(quotedString.copy() | _commasepitem, default="")
).setName("comma separated list")
"""Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper()))
"""Parse action to convert tokens to upper case."""
downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower()))
"""Parse action to convert tokens to lower case."""
if __name__ == "__main__":
selectToken = CaselessLiteral("select")
fromToken = CaselessLiteral("from")
ident = Word(alphas, alphanums + "_$")
columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
columnNameList = Group(delimitedList(columnName)).setName("columns")
columnSpec = "*" | columnNameList
tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
tableNameList = Group(delimitedList(tableName)).setName("tables")
simpleSQL = (
selectToken("command")
+ columnSpec("columns")
+ fromToken
+ tableNameList("tables")
)
# demo runTests method, including embedded comments in test string
simpleSQL.runTests(
"""
# '*' as column list and dotted table name
select * from SYS.XYZZY
# caseless match on "SELECT", and casts back to "select"
SELECT * from XYZZY, ABC
# list of column names, and mixed case SELECT keyword
Select AA,BB,CC from Sys.dual
# multiple tables
Select A, B, C from Sys.dual, Table2
# invalid SELECT keyword - should fail
Xelect A, B, C from Sys.dual
# incomplete command - should fail
Select
# invalid column name - should fail
Select ^^^ frox Sys.dual
"""
)
pyparsing_common.number.runTests(
"""
100
-100
+100
3.14159
6.02e23
1e-12
"""
)
# any int or real number, returned as float
pyparsing_common.fnumber.runTests(
"""
100
-100
+100
3.14159
6.02e23
1e-12
"""
)
pyparsing_common.hex_integer.runTests(
"""
100
FF
"""
)
import uuid
pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
pyparsing_common.uuid.runTests(
"""
12345678-1234-5678-1234-567812345678
"""
)
| [
"[email protected]"
] | |
5d00565d21a9ad4f8942b1f3c6dd71b679e404a9 | 722386e8cb2be70e3a59e4e4667ad2733d84cb93 | /fishc/列表/test1.py | 0634933ace66d7b1339a02e7d2431af7fe045326 | [] | no_license | yuansuixin/Python_Learning | 15720a33c5d3d4e2e3b2f5214fdbfb4c3d1ed92e | 40aa8d0d034599f448f9125b34897648e87c8f37 | refs/heads/master | 2021-03-31T01:04:10.830905 | 2018-03-11T09:55:49 | 2018-03-11T09:55:49 | 124,743,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 304 | py | member=['米兔',88,'夜',90,'小甲鱼',87,'意境',56,'求无斜阳',99]
# for i in member:
# print(i)
# temp=0
# for i in member :
# temp+=1
# print(i,end=" ")
# if temp%2==0:
# print()
for i in range(len(member)):
if i%2==0:
print(member[i],member[i+1])
| [
"[email protected]"
] | |
53d2d54f181fbd6d3e304ad89a0d8e9ba7676558 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/sieve-big-592.py | 4617a0f91c5b38f9b018301b8a830d082707ec39 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,750 | py | # A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2($TypedVar) -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"[email protected]"
] | |
a605ac5c7c09241b10bfc550d7dcc7f39dea9c94 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nngravel.py | ffff4b8fe8373362d4a531935536a8b9d9e1aa36 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 876 | py | ii = [('FerrSDO3.py', 2), ('RennJIT.py', 2), ('ShawHDE.py', 1), ('MartHSI2.py', 1), ('LeakWTI2.py', 2), ('KembFJ1.py', 2), ('WilbRLW5.py', 1), ('PeckJNG.py', 1), ('KnowJMM.py', 1), ('AdamWEP.py', 4), ('ClarGE2.py', 1), ('WilkJMC2.py', 1), ('KiddJAE.py', 22), ('CrokTPS.py', 3), ('ClarGE.py', 1), ('LandWPA.py', 1), ('BuckWGM.py', 10), ('LyelCPG.py', 30), ('GilmCRS.py', 1), ('DaltJMA.py', 1), ('WestJIT2.py', 1), ('AinsWRR.py', 2), ('MedwTAI.py', 4), ('BackGNE.py', 5), ('LeakWTI4.py', 1), ('LeakWTI.py', 1), ('MedwTAI2.py', 1), ('SoutRD.py', 1), ('MartHRW.py', 1), ('BabbCEM.py', 2), ('FitzRNS4.py', 39), ('CoolWHM3.py', 2), ('FitzRNS.py', 3), ('ThomGLG.py', 1), ('KembFJ2.py', 2), ('WilbRLW3.py', 1), ('JacoWHI.py', 1), ('DibdTRL.py', 1), ('FitzRNS2.py', 2), ('MartHSI.py', 2), ('LyelCPG3.py', 75), ('BeckWRE.py', 1), ('ChalTPW.py', 1), ('KirbWPW.py', 4), ('ClarGE4.py', 3)] | [
"[email protected]"
] | |
3dd03b2ae089b7517063f6fc0366db46c85811ea | 4cd0631100e099e9b154b12b234715ddee0711d3 | /model/FastSCNNX10.py | 8a64c7ac424bd7adb6d5b991f892e91adb8962f6 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | Ethan-ye/Efficient-Segmentation-Networks | d6dd029c76cb46b89ac00ee2f6a49d9ddcd99a3a | 27272e43126a507a6d93b21cd2372f5432f61237 | refs/heads/master | 2023-04-22T11:10:23.256349 | 2021-05-07T05:04:40 | 2021-05-07T05:12:38 | 281,823,847 | 0 | 0 | MIT | 2020-07-23T01:50:42 | 2020-07-23T01:50:41 | null | UTF-8 | Python | false | false | 48,630 | py | ##################################################################################
# Fast-SCNN: Fast Semantic Segmentation Network
# Paper-Link: https://arxiv.org/pdf/1902.04502.pdf
##################################################################################
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary
__all__ = ["FastSCNNX10"]
class _ConvBNReLU(nn.Module):
"""Conv-BN-ReLU"""
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=0, **kwargs):
super(_ConvBNReLU, self).__init__()
if stride ==2:
self.conv = nn.Sequential(
nn.Conv2d(in_channels, in_channels, 2, 1, 0, bias=False),
nn.BatchNorm2d(in_channels),
nn.ReLU(True),
nn.Conv2d(in_channels, out_channels, 3, 2, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(True)
)
else:
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(True)
)
def forward(self, x):
return self.conv(x)
class _DSConv(nn.Module):
"""Depthwise Separable Convolutions"""
def __init__(self, dw_channels, out_channels, kernel_size=3, stride=1, padding=1):
super(_DSConv, self).__init__()
if stride == 2:
self.conv = nn.Sequential(
nn.Conv2d(dw_channels, dw_channels, 2, 1, 0, groups=dw_channels, bias=False),
nn.BatchNorm2d(dw_channels),
nn.ReLU(True),
nn.Conv2d(dw_channels, dw_channels, 1, bias=False),
nn.BatchNorm2d(dw_channels),
nn.ReLU(True),
nn.Conv2d(dw_channels, dw_channels, 3, 2, 1, groups=dw_channels, bias=False),
nn.BatchNorm2d(dw_channels),
nn.ReLU(True),
nn.Conv2d(dw_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(True)
)
else:
self.conv = nn.Sequential(
nn.Conv2d(dw_channels, dw_channels, kernel_size, stride, padding, groups=dw_channels, bias=False),
nn.BatchNorm2d(dw_channels),
nn.ReLU(True),
nn.Conv2d(dw_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(True)
)
def forward(self, x):
return self.conv(x)
class _DWConv(nn.Module):
"""Depthwise Convolutions"""
def __init__(self, dw_channels, out_channels, kernel_size=3, stride=1, padding=1):
super(_DWConv, self).__init__()
if stride ==2:
self.conv = nn.Sequential(
nn.Conv2d(dw_channels, dw_channels, 2, 1, 0, groups=dw_channels, bias=False),
nn.BatchNorm2d(dw_channels),
nn.ReLU(True),
nn.Conv2d(dw_channels, out_channels, 3, 2, 1, groups=dw_channels, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(True)
)
else:
self.conv = nn.Sequential(
nn.Conv2d(dw_channels, out_channels, kernel_size, stride, padding, groups=dw_channels, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(True)
)
def forward(self, x):
return self.conv(x)
class LinearBottleneck(nn.Module):
"""LinearBottleneck used in MobileNetV2"""
def __init__(self, in_channels, out_channels, t=6, kernel_size=3, stride=1, padding=1):
super(LinearBottleneck, self).__init__()
self.use_shortcut = stride == 1 and in_channels == out_channels
self.block = nn.Sequential(
# pw
_ConvBNReLU(in_channels, in_channels * t, 1),
# dw
_DWConv(in_channels * t, in_channels * t, kernel_size, stride, padding),
# pw-linear
nn.Conv2d(in_channels * t, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels)
)
def forward(self, x):
out = self.block(x)
if self.use_shortcut:
out = x + out
return out
class PyramidPooling(nn.Module):
"""Pyramid pooling module"""
def __init__(self, in_channels, out_channels, **kwargs):
super(PyramidPooling, self).__init__()
inter_channels = int(in_channels / 4)
self.conv1 = _ConvBNReLU(in_channels, inter_channels, 1, **kwargs)
self.conv2 = _ConvBNReLU(in_channels, inter_channels, 1, **kwargs)
self.conv3 = _ConvBNReLU(in_channels, inter_channels, 1, **kwargs)
self.conv4 = _ConvBNReLU(in_channels, inter_channels, 1, **kwargs)
self.out = _ConvBNReLU(in_channels * 2, out_channels, 1)
def pool(self, x, size):
avgpool = nn.AdaptiveAvgPool2d(size)
return avgpool(x)
def upsample(self, x, size):
return F.interpolate(x, size, mode='bilinear', align_corners=True)
def forward(self, x):
size = x.size()[2:]
feat1 = self.upsample(self.conv1(self.pool(x, 1)), size)
feat2 = self.upsample(self.conv2(self.pool(x, 2)), size)
feat3 = self.upsample(self.conv3(self.pool(x, 3)), size)
feat4 = self.upsample(self.conv4(self.pool(x, 6)), size)
x = torch.cat([x, feat1, feat2, feat3, feat4], dim=1)
x = self.out(x)
return x
class LearningToDownsample(nn.Module):
"""Learning to downsample module"""
def __init__(self, dw_channels1=32, dw_channels2=48, out_channels=64, **kwargs):
super(LearningToDownsample, self).__init__()
self.conv = _ConvBNReLU(3, dw_channels1, 3, 2, 0)
self.dsconv1 = _DSConv(dw_channels1, dw_channels2, 3, 2, 0)
self.dsconv2 = _DSConv(dw_channels2, out_channels, 3, 2, 0)
def forward(self, x):
x = self.conv(x)
x = self.dsconv1(x)
x = self.dsconv2(x)
return x
class GlobalFeatureExtractor(nn.Module):
"""Global feature extractor module"""
def __init__(self, in_channels=64, block_channels=(64, 96, 128),
out_channels=128, t=6, num_blocks=(3, 3, 3), **kwargs):
super(GlobalFeatureExtractor, self).__init__()
self.bottleneck1 = self._make_layer(LinearBottleneck, in_channels, block_channels[0], num_blocks[0], t, 3, 2, 0)
self.bottleneck2 = self._make_layer(LinearBottleneck, block_channels[0], block_channels[1], num_blocks[1], t, 3,
2, 0)
self.bottleneck3 = self._make_layer(LinearBottleneck, block_channels[1], block_channels[2], num_blocks[2], t, 3,
1, 1)
self.ppm = PyramidPooling(block_channels[2], out_channels)
def _make_layer(self, block, inplanes, planes, blocks, t=6, kernel_size=3, stride=1, padding=1):
layers = []
layers.append(block(inplanes, planes, t, kernel_size, stride, padding))
for i in range(1, blocks):
layers.append(block(planes, planes, t, 3, 1, 1))
return nn.Sequential(*layers)
def forward(self, x):
x = self.bottleneck1(x)
x = self.bottleneck2(x)
x = self.bottleneck3(x)
x = self.ppm(x)
return x
class FeatureFusionModule(nn.Module):
"""Feature fusion module"""
def __init__(self, highter_in_channels, lower_in_channels, out_channels, scale_factor=4, **kwargs):
super(FeatureFusionModule, self).__init__()
self.scale_factor = scale_factor
self.dwconv = _DWConv(lower_in_channels, out_channels)
self.conv_lower_res = nn.Sequential(
nn.Conv2d(out_channels, out_channels, 1),
nn.BatchNorm2d(out_channels)
)
self.conv_higher_res = nn.Sequential(
nn.Conv2d(highter_in_channels, out_channels, 1),
nn.BatchNorm2d(out_channels)
)
self.relu = nn.ReLU(True)
def forward(self, higher_res_feature, lower_res_feature):
_, _, h, w = higher_res_feature.size()
lower_res_feature = F.interpolate(lower_res_feature, size=(h, w), mode='bilinear', align_corners=True)
lower_res_feature = self.dwconv(lower_res_feature)
lower_res_feature = self.conv_lower_res(lower_res_feature)
higher_res_feature = self.conv_higher_res(higher_res_feature)
out = higher_res_feature + lower_res_feature
return self.relu(out)
class Classifer(nn.Module):
"""Classifer"""
def __init__(self, dw_channels, num_classes):
super(Classifer, self).__init__()
self.dsconv1 = _DSConv(dw_channels, dw_channels)
self.dsconv2 = _DSConv(dw_channels, dw_channels)
self.conv = nn.Sequential(
nn.Dropout(0.1),
nn.Conv2d(dw_channels, num_classes, 1)
)
def forward(self, x):
x = self.dsconv1(x)
x = self.dsconv2(x)
x = self.conv(x)
return x
# 该网络基本和context的网络相同,区别在于,将头部的shallownet变成了公共部分,然后再deepnet中增加了PPM
class FastSCNNX10(nn.Module):
def __init__(self, classes, aux=False, **kwargs):
super(FastSCNNX10, self).__init__()
self.aux = aux
self.learning_to_downsample = LearningToDownsample(32, 48, 64) # 与contextnet的Shallow_net相似
self.global_feature_extractor = GlobalFeatureExtractor(64, [64, 96, 128], 128, 6,
[3, 3, 3]) # 与contextnet的deepnet相似,多了PPM
self.feature_fusion = FeatureFusionModule(64, 128, 128) # 与context一样
self.classifier = Classifer(128, classes) # 与context一样
if self.aux:
self.auxlayer = nn.Sequential(
nn.Conv2d(64, 32, 3, padding=1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(True),
nn.Dropout(0.1),
nn.Conv2d(32, classes, 1)
)
def forward(self, x):
size = x.size()[2:]
higher_res_features = self.learning_to_downsample(x)
x = self.global_feature_extractor(higher_res_features)
x = self.feature_fusion(higher_res_features, x)
x = self.classifier(x)
outputs = []
x = F.interpolate(x, size, mode='bilinear', align_corners=True)
outputs.append(x)
if self.aux:
auxout = self.auxlayer(higher_res_features)
auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True)
outputs.append(auxout)
return x
# return tuple(outputs)
"""print layers and params of network"""
if __name__ == '__main__':
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = FastSCNNX10(classes=19).to(device)
summary(model, (3, 512, 1024))
from fvcore.nn.flop_count import flop_count # https://github.com/facebookresearch/fvcore
from tools.flops_counter.ptflops import get_model_complexity_info
from thop import profile # https://github.com/Lyken17/pytorch-OpCounter
x = torch.randn(2, 3, 512, 1024).to(device)
from fvcore.nn.jit_handles import batchnorm_flop_jit
from fvcore.nn.jit_handles import generic_activation_jit
supported_ops = {
"aten::batch_norm": batchnorm_flop_jit,
}
flop_dict, _ = flop_count(model, (x,), supported_ops)
flops_count, params_count = get_model_complexity_info(model, (3, 512, 1024),
as_strings=False,
print_per_layer_stat=True)
input = x
macs, params = profile(model, inputs=(input,))
print(flop_dict)
print(flops_count, params_count)
print(macs, params)
'''
"D:\ProgramData\Anaconda3\envs\tensorflow 1\python.exe" D:/GitHub/Efficient-Segmentation-Networks/model/FastSCNNX10.py
----------------------------------------------------------------
Layer (type) Output Shape Param #
================================================================
Conv2d-1 [-1, 3, 511, 1023] 36
BatchNorm2d-2 [-1, 3, 511, 1023] 6
ReLU-3 [-1, 3, 511, 1023] 0
Conv2d-4 [-1, 32, 256, 512] 864
BatchNorm2d-5 [-1, 32, 256, 512] 64
ReLU-6 [-1, 32, 256, 512] 0
_ConvBNReLU-7 [-1, 32, 256, 512] 0
Conv2d-8 [-1, 32, 255, 511] 128
BatchNorm2d-9 [-1, 32, 255, 511] 64
ReLU-10 [-1, 32, 255, 511] 0
Conv2d-11 [-1, 32, 255, 511] 1,024
BatchNorm2d-12 [-1, 32, 255, 511] 64
ReLU-13 [-1, 32, 255, 511] 0
Conv2d-14 [-1, 32, 128, 256] 288
BatchNorm2d-15 [-1, 32, 128, 256] 64
ReLU-16 [-1, 32, 128, 256] 0
Conv2d-17 [-1, 48, 128, 256] 1,536
BatchNorm2d-18 [-1, 48, 128, 256] 96
ReLU-19 [-1, 48, 128, 256] 0
_DSConv-20 [-1, 48, 128, 256] 0
Conv2d-21 [-1, 48, 127, 255] 192
BatchNorm2d-22 [-1, 48, 127, 255] 96
ReLU-23 [-1, 48, 127, 255] 0
Conv2d-24 [-1, 48, 127, 255] 2,304
BatchNorm2d-25 [-1, 48, 127, 255] 96
ReLU-26 [-1, 48, 127, 255] 0
Conv2d-27 [-1, 48, 64, 128] 432
BatchNorm2d-28 [-1, 48, 64, 128] 96
ReLU-29 [-1, 48, 64, 128] 0
Conv2d-30 [-1, 64, 64, 128] 3,072
BatchNorm2d-31 [-1, 64, 64, 128] 128
ReLU-32 [-1, 64, 64, 128] 0
_DSConv-33 [-1, 64, 64, 128] 0
LearningToDownsample-34 [-1, 64, 64, 128] 0
Conv2d-35 [-1, 384, 64, 128] 24,576
BatchNorm2d-36 [-1, 384, 64, 128] 768
ReLU-37 [-1, 384, 64, 128] 0
_ConvBNReLU-38 [-1, 384, 64, 128] 0
Conv2d-39 [-1, 384, 63, 127] 1,536
BatchNorm2d-40 [-1, 384, 63, 127] 768
ReLU-41 [-1, 384, 63, 127] 0
Conv2d-42 [-1, 384, 32, 64] 3,456
BatchNorm2d-43 [-1, 384, 32, 64] 768
ReLU-44 [-1, 384, 32, 64] 0
_DWConv-45 [-1, 384, 32, 64] 0
Conv2d-46 [-1, 64, 32, 64] 24,576
BatchNorm2d-47 [-1, 64, 32, 64] 128
LinearBottleneck-48 [-1, 64, 32, 64] 0
Conv2d-49 [-1, 384, 32, 64] 24,576
BatchNorm2d-50 [-1, 384, 32, 64] 768
ReLU-51 [-1, 384, 32, 64] 0
_ConvBNReLU-52 [-1, 384, 32, 64] 0
Conv2d-53 [-1, 384, 32, 64] 3,456
BatchNorm2d-54 [-1, 384, 32, 64] 768
ReLU-55 [-1, 384, 32, 64] 0
_DWConv-56 [-1, 384, 32, 64] 0
Conv2d-57 [-1, 64, 32, 64] 24,576
BatchNorm2d-58 [-1, 64, 32, 64] 128
LinearBottleneck-59 [-1, 64, 32, 64] 0
Conv2d-60 [-1, 384, 32, 64] 24,576
BatchNorm2d-61 [-1, 384, 32, 64] 768
ReLU-62 [-1, 384, 32, 64] 0
_ConvBNReLU-63 [-1, 384, 32, 64] 0
Conv2d-64 [-1, 384, 32, 64] 3,456
BatchNorm2d-65 [-1, 384, 32, 64] 768
ReLU-66 [-1, 384, 32, 64] 0
_DWConv-67 [-1, 384, 32, 64] 0
Conv2d-68 [-1, 64, 32, 64] 24,576
BatchNorm2d-69 [-1, 64, 32, 64] 128
LinearBottleneck-70 [-1, 64, 32, 64] 0
Conv2d-71 [-1, 384, 32, 64] 24,576
BatchNorm2d-72 [-1, 384, 32, 64] 768
ReLU-73 [-1, 384, 32, 64] 0
_ConvBNReLU-74 [-1, 384, 32, 64] 0
Conv2d-75 [-1, 384, 31, 63] 1,536
BatchNorm2d-76 [-1, 384, 31, 63] 768
ReLU-77 [-1, 384, 31, 63] 0
Conv2d-78 [-1, 384, 16, 32] 3,456
BatchNorm2d-79 [-1, 384, 16, 32] 768
ReLU-80 [-1, 384, 16, 32] 0
_DWConv-81 [-1, 384, 16, 32] 0
Conv2d-82 [-1, 96, 16, 32] 36,864
BatchNorm2d-83 [-1, 96, 16, 32] 192
LinearBottleneck-84 [-1, 96, 16, 32] 0
Conv2d-85 [-1, 576, 16, 32] 55,296
BatchNorm2d-86 [-1, 576, 16, 32] 1,152
ReLU-87 [-1, 576, 16, 32] 0
_ConvBNReLU-88 [-1, 576, 16, 32] 0
Conv2d-89 [-1, 576, 16, 32] 5,184
BatchNorm2d-90 [-1, 576, 16, 32] 1,152
ReLU-91 [-1, 576, 16, 32] 0
_DWConv-92 [-1, 576, 16, 32] 0
Conv2d-93 [-1, 96, 16, 32] 55,296
BatchNorm2d-94 [-1, 96, 16, 32] 192
LinearBottleneck-95 [-1, 96, 16, 32] 0
Conv2d-96 [-1, 576, 16, 32] 55,296
BatchNorm2d-97 [-1, 576, 16, 32] 1,152
ReLU-98 [-1, 576, 16, 32] 0
_ConvBNReLU-99 [-1, 576, 16, 32] 0
Conv2d-100 [-1, 576, 16, 32] 5,184
BatchNorm2d-101 [-1, 576, 16, 32] 1,152
ReLU-102 [-1, 576, 16, 32] 0
_DWConv-103 [-1, 576, 16, 32] 0
Conv2d-104 [-1, 96, 16, 32] 55,296
BatchNorm2d-105 [-1, 96, 16, 32] 192
LinearBottleneck-106 [-1, 96, 16, 32] 0
Conv2d-107 [-1, 576, 16, 32] 55,296
BatchNorm2d-108 [-1, 576, 16, 32] 1,152
ReLU-109 [-1, 576, 16, 32] 0
_ConvBNReLU-110 [-1, 576, 16, 32] 0
Conv2d-111 [-1, 576, 16, 32] 5,184
BatchNorm2d-112 [-1, 576, 16, 32] 1,152
ReLU-113 [-1, 576, 16, 32] 0
_DWConv-114 [-1, 576, 16, 32] 0
Conv2d-115 [-1, 128, 16, 32] 73,728
BatchNorm2d-116 [-1, 128, 16, 32] 256
LinearBottleneck-117 [-1, 128, 16, 32] 0
Conv2d-118 [-1, 768, 16, 32] 98,304
BatchNorm2d-119 [-1, 768, 16, 32] 1,536
ReLU-120 [-1, 768, 16, 32] 0
_ConvBNReLU-121 [-1, 768, 16, 32] 0
Conv2d-122 [-1, 768, 16, 32] 6,912
BatchNorm2d-123 [-1, 768, 16, 32] 1,536
ReLU-124 [-1, 768, 16, 32] 0
_DWConv-125 [-1, 768, 16, 32] 0
Conv2d-126 [-1, 128, 16, 32] 98,304
BatchNorm2d-127 [-1, 128, 16, 32] 256
LinearBottleneck-128 [-1, 128, 16, 32] 0
Conv2d-129 [-1, 768, 16, 32] 98,304
BatchNorm2d-130 [-1, 768, 16, 32] 1,536
ReLU-131 [-1, 768, 16, 32] 0
_ConvBNReLU-132 [-1, 768, 16, 32] 0
Conv2d-133 [-1, 768, 16, 32] 6,912
BatchNorm2d-134 [-1, 768, 16, 32] 1,536
ReLU-135 [-1, 768, 16, 32] 0
_DWConv-136 [-1, 768, 16, 32] 0
Conv2d-137 [-1, 128, 16, 32] 98,304
BatchNorm2d-138 [-1, 128, 16, 32] 256
LinearBottleneck-139 [-1, 128, 16, 32] 0
Conv2d-140 [-1, 32, 1, 1] 4,096
BatchNorm2d-141 [-1, 32, 1, 1] 64
ReLU-142 [-1, 32, 1, 1] 0
_ConvBNReLU-143 [-1, 32, 1, 1] 0
Conv2d-144 [-1, 32, 2, 2] 4,096
BatchNorm2d-145 [-1, 32, 2, 2] 64
ReLU-146 [-1, 32, 2, 2] 0
_ConvBNReLU-147 [-1, 32, 2, 2] 0
Conv2d-148 [-1, 32, 3, 3] 4,096
BatchNorm2d-149 [-1, 32, 3, 3] 64
ReLU-150 [-1, 32, 3, 3] 0
_ConvBNReLU-151 [-1, 32, 3, 3] 0
Conv2d-152 [-1, 32, 6, 6] 4,096
BatchNorm2d-153 [-1, 32, 6, 6] 64
ReLU-154 [-1, 32, 6, 6] 0
_ConvBNReLU-155 [-1, 32, 6, 6] 0
Conv2d-156 [-1, 128, 16, 32] 32,768
BatchNorm2d-157 [-1, 128, 16, 32] 256
ReLU-158 [-1, 128, 16, 32] 0
_ConvBNReLU-159 [-1, 128, 16, 32] 0
PyramidPooling-160 [-1, 128, 16, 32] 0
GlobalFeatureExtractor-161 [-1, 128, 16, 32] 0
Conv2d-162 [-1, 128, 64, 128] 1,152
BatchNorm2d-163 [-1, 128, 64, 128] 256
ReLU-164 [-1, 128, 64, 128] 0
_DWConv-165 [-1, 128, 64, 128] 0
Conv2d-166 [-1, 128, 64, 128] 16,512
BatchNorm2d-167 [-1, 128, 64, 128] 256
Conv2d-168 [-1, 128, 64, 128] 8,320
BatchNorm2d-169 [-1, 128, 64, 128] 256
ReLU-170 [-1, 128, 64, 128] 0
FeatureFusionModule-171 [-1, 128, 64, 128] 0
Conv2d-172 [-1, 128, 64, 128] 1,152
BatchNorm2d-173 [-1, 128, 64, 128] 256
ReLU-174 [-1, 128, 64, 128] 0
Conv2d-175 [-1, 128, 64, 128] 16,384
BatchNorm2d-176 [-1, 128, 64, 128] 256
ReLU-177 [-1, 128, 64, 128] 0
_DSConv-178 [-1, 128, 64, 128] 0
Conv2d-179 [-1, 128, 64, 128] 1,152
BatchNorm2d-180 [-1, 128, 64, 128] 256
ReLU-181 [-1, 128, 64, 128] 0
Conv2d-182 [-1, 128, 64, 128] 16,384
BatchNorm2d-183 [-1, 128, 64, 128] 256
ReLU-184 [-1, 128, 64, 128] 0
_DSConv-185 [-1, 128, 64, 128] 0
Dropout-186 [-1, 128, 64, 128] 0
Conv2d-187 [-1, 19, 64, 128] 2,451
Classifer-188 [-1, 19, 64, 128] 0
================================================================
Total params: 1,146,669
Trainable params: 1,146,669
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 6.00
Forward/backward pass size (MB): 1184.71
Params size (MB): 4.37
Estimated Total Size (MB): 1195.09
----------------------------------------------------------------
D:\ProgramData\Anaconda3\envs\tensorflow 1\lib\collections\__init__.py:833: RuntimeWarning: overflow encountered in long_scalars
self[elem] += count
Skipped operation aten::relu_ 41 time(s)
Skipped operation aten::add 7 time(s)
Skipped operation aten::adaptive_avg_pool2d 4 time(s)
Skipped operation aten::upsample_bilinear2d 6 time(s)
Skipped operation aten::dropout 1 time(s)
FastSCNNX10(
2.075 GMac, 100.000% MACs,
(learning_to_downsample): LearningToDownsample(
0.514 GMac, 24.753% MACs,
(conv): _ConvBNReLU(
0.149 GMac, 7.196% MACs,
(conv): Sequential(
0.149 GMac, 7.196% MACs,
(0): Conv2d(0.019 GMac, 0.907% MACs, 3, 3, kernel_size=(2, 2), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.003 GMac, 0.151% MACs, 3, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.002 GMac, 0.076% MACs, inplace=True)
(3): Conv2d(0.113 GMac, 5.456% MACs, 3, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(4): BatchNorm2d(0.008 GMac, 0.404% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): ReLU(0.004 GMac, 0.202% MACs, inplace=True)
)
)
(dsconv1): _DSConv(
0.243 GMac, 11.697% MACs,
(conv): Sequential(
0.243 GMac, 11.697% MACs,
(0): Conv2d(0.017 GMac, 0.804% MACs, 32, 32, kernel_size=(2, 2), stride=(1, 1), groups=32, bias=False)
(1): BatchNorm2d(0.008 GMac, 0.402% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.004 GMac, 0.201% MACs, inplace=True)
(3): Conv2d(0.133 GMac, 6.429% MACs, 32, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(4): BatchNorm2d(0.008 GMac, 0.402% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): ReLU(0.004 GMac, 0.201% MACs, inplace=True)
(6): Conv2d(0.009 GMac, 0.455% MACs, 32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=32, bias=False)
(7): BatchNorm2d(0.002 GMac, 0.101% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(8): ReLU(0.001 GMac, 0.051% MACs, inplace=True)
(9): Conv2d(0.05 GMac, 2.425% MACs, 32, 48, kernel_size=(1, 1), stride=(1, 1), bias=False)
(10): BatchNorm2d(0.003 GMac, 0.152% MACs, 48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(11): ReLU(0.002 GMac, 0.076% MACs, inplace=True)
)
)
(dsconv2): _DSConv(
0.122 GMac, 5.860% MACs,
(conv): Sequential(
0.122 GMac, 5.860% MACs,
(0): Conv2d(0.006 GMac, 0.300% MACs, 48, 48, kernel_size=(2, 2), stride=(1, 1), groups=48, bias=False)
(1): BatchNorm2d(0.003 GMac, 0.150% MACs, 48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.002 GMac, 0.075% MACs, inplace=True)
(3): Conv2d(0.075 GMac, 3.595% MACs, 48, 48, kernel_size=(1, 1), stride=(1, 1), bias=False)
(4): BatchNorm2d(0.003 GMac, 0.150% MACs, 48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): ReLU(0.002 GMac, 0.075% MACs, inplace=True)
(6): Conv2d(0.004 GMac, 0.171% MACs, 48, 48, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=48, bias=False)
(7): BatchNorm2d(0.001 GMac, 0.038% MACs, 48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(8): ReLU(0.0 GMac, 0.019% MACs, inplace=True)
(9): Conv2d(0.025 GMac, 1.213% MACs, 48, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(10): BatchNorm2d(0.001 GMac, 0.051% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(11): ReLU(0.001 GMac, 0.025% MACs, inplace=True)
)
)
)
(global_feature_extractor): GlobalFeatureExtractor(
1.02 GMac, 49.170% MACs,
(bottleneck1): Sequential(
0.518 GMac, 24.946% MACs,
(0): LinearBottleneck(
0.292 GMac, 14.084% MACs,
(block): Sequential(
0.292 GMac, 14.084% MACs,
(0): _ConvBNReLU(
0.211 GMac, 10.155% MACs,
(conv): Sequential(
0.211 GMac, 10.155% MACs,
(0): Conv2d(0.201 GMac, 9.700% MACs, 64, 384, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.006 GMac, 0.303% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.003 GMac, 0.152% MACs, inplace=True)
)
)
(1): _DWConv(
0.031 GMac, 1.491% MACs,
(conv): Sequential(
0.031 GMac, 1.491% MACs,
(0): Conv2d(0.012 GMac, 0.592% MACs, 384, 384, kernel_size=(2, 2), stride=(1, 1), groups=384, bias=False)
(1): BatchNorm2d(0.006 GMac, 0.296% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.003 GMac, 0.148% MACs, inplace=True)
(3): Conv2d(0.007 GMac, 0.341% MACs, 384, 384, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=384, bias=False)
(4): BatchNorm2d(0.002 GMac, 0.076% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): ReLU(0.001 GMac, 0.038% MACs, inplace=True)
)
)
(2): Conv2d(0.05 GMac, 2.425% MACs, 384, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(3): BatchNorm2d(0.0 GMac, 0.013% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): LinearBottleneck(
0.113 GMac, 5.431% MACs,
(block): Sequential(
0.113 GMac, 5.431% MACs,
(0): _ConvBNReLU(
0.053 GMac, 2.539% MACs,
(conv): Sequential(
0.053 GMac, 2.539% MACs,
(0): Conv2d(0.05 GMac, 2.425% MACs, 64, 384, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.002 GMac, 0.076% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.001 GMac, 0.038% MACs, inplace=True)
)
)
(1): _DWConv(
0.009 GMac, 0.455% MACs,
(conv): Sequential(
0.009 GMac, 0.455% MACs,
(0): Conv2d(0.007 GMac, 0.341% MACs, 384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=384, bias=False)
(1): BatchNorm2d(0.002 GMac, 0.076% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.001 GMac, 0.038% MACs, inplace=True)
)
)
(2): Conv2d(0.05 GMac, 2.425% MACs, 384, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(3): BatchNorm2d(0.0 GMac, 0.013% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(2): LinearBottleneck(
0.113 GMac, 5.431% MACs,
(block): Sequential(
0.113 GMac, 5.431% MACs,
(0): _ConvBNReLU(
0.053 GMac, 2.539% MACs,
(conv): Sequential(
0.053 GMac, 2.539% MACs,
(0): Conv2d(0.05 GMac, 2.425% MACs, 64, 384, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.002 GMac, 0.076% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.001 GMac, 0.038% MACs, inplace=True)
)
)
(1): _DWConv(
0.009 GMac, 0.455% MACs,
(conv): Sequential(
0.009 GMac, 0.455% MACs,
(0): Conv2d(0.007 GMac, 0.341% MACs, 384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=384, bias=False)
(1): BatchNorm2d(0.002 GMac, 0.076% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.001 GMac, 0.038% MACs, inplace=True)
)
)
(2): Conv2d(0.05 GMac, 2.425% MACs, 384, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(3): BatchNorm2d(0.0 GMac, 0.013% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
)
(bottleneck2): Sequential(
0.202 GMac, 9.712% MACs,
(0): LinearBottleneck(
0.079 GMac, 3.820% MACs,
(block): Sequential(
0.079 GMac, 3.820% MACs,
(0): _ConvBNReLU(
0.053 GMac, 2.539% MACs,
(conv): Sequential(
0.053 GMac, 2.539% MACs,
(0): Conv2d(0.05 GMac, 2.425% MACs, 64, 384, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.002 GMac, 0.076% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.001 GMac, 0.038% MACs, inplace=True)
)
)
(1): _DWConv(
0.008 GMac, 0.367% MACs,
(conv): Sequential(
0.008 GMac, 0.367% MACs,
(0): Conv2d(0.003 GMac, 0.145% MACs, 384, 384, kernel_size=(2, 2), stride=(1, 1), groups=384, bias=False)
(1): BatchNorm2d(0.001 GMac, 0.072% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.001 GMac, 0.036% MACs, inplace=True)
(3): Conv2d(0.002 GMac, 0.085% MACs, 384, 384, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=384, bias=False)
(4): BatchNorm2d(0.0 GMac, 0.019% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): ReLU(0.0 GMac, 0.009% MACs, inplace=True)
)
)
(2): Conv2d(0.019 GMac, 0.909% MACs, 384, 96, kernel_size=(1, 1), stride=(1, 1), bias=False)
(3): BatchNorm2d(0.0 GMac, 0.005% MACs, 96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): LinearBottleneck(
0.061 GMac, 2.946% MACs,
(block): Sequential(
0.061 GMac, 2.946% MACs,
(0): _ConvBNReLU(
0.029 GMac, 1.407% MACs,
(conv): Sequential(
0.029 GMac, 1.407% MACs,
(0): Conv2d(0.028 GMac, 1.364% MACs, 96, 576, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.001 GMac, 0.028% MACs, 576, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.014% MACs, inplace=True)
)
)
(1): _DWConv(
0.004 GMac, 0.171% MACs,
(conv): Sequential(
0.004 GMac, 0.171% MACs,
(0): Conv2d(0.003 GMac, 0.128% MACs, 576, 576, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=576, bias=False)
(1): BatchNorm2d(0.001 GMac, 0.028% MACs, 576, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.014% MACs, inplace=True)
)
)
(2): Conv2d(0.028 GMac, 1.364% MACs, 576, 96, kernel_size=(1, 1), stride=(1, 1), bias=False)
(3): BatchNorm2d(0.0 GMac, 0.005% MACs, 96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(2): LinearBottleneck(
0.061 GMac, 2.946% MACs,
(block): Sequential(
0.061 GMac, 2.946% MACs,
(0): _ConvBNReLU(
0.029 GMac, 1.407% MACs,
(conv): Sequential(
0.029 GMac, 1.407% MACs,
(0): Conv2d(0.028 GMac, 1.364% MACs, 96, 576, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.001 GMac, 0.028% MACs, 576, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.014% MACs, inplace=True)
)
)
(1): _DWConv(
0.004 GMac, 0.171% MACs,
(conv): Sequential(
0.004 GMac, 0.171% MACs,
(0): Conv2d(0.003 GMac, 0.128% MACs, 576, 576, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=576, bias=False)
(1): BatchNorm2d(0.001 GMac, 0.028% MACs, 576, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.014% MACs, inplace=True)
)
)
(2): Conv2d(0.028 GMac, 1.364% MACs, 576, 96, kernel_size=(1, 1), stride=(1, 1), bias=False)
(3): BatchNorm2d(0.0 GMac, 0.005% MACs, 96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
)
(bottleneck3): Sequential(
0.284 GMac, 13.684% MACs,
(0): LinearBottleneck(
0.071 GMac, 3.402% MACs,
(block): Sequential(
0.071 GMac, 3.402% MACs,
(0): _ConvBNReLU(
0.029 GMac, 1.407% MACs,
(conv): Sequential(
0.029 GMac, 1.407% MACs,
(0): Conv2d(0.028 GMac, 1.364% MACs, 96, 576, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.001 GMac, 0.028% MACs, 576, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.014% MACs, inplace=True)
)
)
(1): _DWConv(
0.004 GMac, 0.171% MACs,
(conv): Sequential(
0.004 GMac, 0.171% MACs,
(0): Conv2d(0.003 GMac, 0.128% MACs, 576, 576, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=576, bias=False)
(1): BatchNorm2d(0.001 GMac, 0.028% MACs, 576, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.014% MACs, inplace=True)
)
)
(2): Conv2d(0.038 GMac, 1.819% MACs, 576, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(3): BatchNorm2d(0.0 GMac, 0.006% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): LinearBottleneck(
0.107 GMac, 5.141% MACs,
(block): Sequential(
0.107 GMac, 5.141% MACs,
(0): _ConvBNReLU(
0.052 GMac, 2.482% MACs,
(conv): Sequential(
0.052 GMac, 2.482% MACs,
(0): Conv2d(0.05 GMac, 2.425% MACs, 128, 768, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.001 GMac, 0.038% MACs, 768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.019% MACs, inplace=True)
)
)
(1): _DWConv(
0.005 GMac, 0.227% MACs,
(conv): Sequential(
0.005 GMac, 0.227% MACs,
(0): Conv2d(0.004 GMac, 0.171% MACs, 768, 768, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=768, bias=False)
(1): BatchNorm2d(0.001 GMac, 0.038% MACs, 768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.019% MACs, inplace=True)
)
)
(2): Conv2d(0.05 GMac, 2.425% MACs, 768, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(3): BatchNorm2d(0.0 GMac, 0.006% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(2): LinearBottleneck(
0.107 GMac, 5.141% MACs,
(block): Sequential(
0.107 GMac, 5.141% MACs,
(0): _ConvBNReLU(
0.052 GMac, 2.482% MACs,
(conv): Sequential(
0.052 GMac, 2.482% MACs,
(0): Conv2d(0.05 GMac, 2.425% MACs, 128, 768, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.001 GMac, 0.038% MACs, 768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.019% MACs, inplace=True)
)
)
(1): _DWConv(
0.005 GMac, 0.227% MACs,
(conv): Sequential(
0.005 GMac, 0.227% MACs,
(0): Conv2d(0.004 GMac, 0.171% MACs, 768, 768, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=768, bias=False)
(1): BatchNorm2d(0.001 GMac, 0.038% MACs, 768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.019% MACs, inplace=True)
)
)
(2): Conv2d(0.05 GMac, 2.425% MACs, 768, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(3): BatchNorm2d(0.0 GMac, 0.006% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
)
(ppm): PyramidPooling(
0.017 GMac, 0.828% MACs,
(conv1): _ConvBNReLU(
0.0 GMac, 0.000% MACs,
(conv): Sequential(
0.0 GMac, 0.000% MACs,
(0): Conv2d(0.0 GMac, 0.000% MACs, 128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.0 GMac, 0.000% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
)
(conv2): _ConvBNReLU(
0.0 GMac, 0.001% MACs,
(conv): Sequential(
0.0 GMac, 0.001% MACs,
(0): Conv2d(0.0 GMac, 0.001% MACs, 128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.0 GMac, 0.000% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
)
(conv3): _ConvBNReLU(
0.0 GMac, 0.002% MACs,
(conv): Sequential(
0.0 GMac, 0.002% MACs,
(0): Conv2d(0.0 GMac, 0.002% MACs, 128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.0 GMac, 0.000% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
)
(conv4): _ConvBNReLU(
0.0 GMac, 0.007% MACs,
(conv): Sequential(
0.0 GMac, 0.007% MACs,
(0): Conv2d(0.0 GMac, 0.007% MACs, 128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.0 GMac, 0.000% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
)
(out): _ConvBNReLU(
0.017 GMac, 0.818% MACs,
(conv): Sequential(
0.017 GMac, 0.818% MACs,
(0): Conv2d(0.017 GMac, 0.808% MACs, 256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.0 GMac, 0.006% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.003% MACs, inplace=True)
)
)
)
)
(feature_fusion): FeatureFusionModule(
0.221 GMac, 10.660% MACs,
(dwconv): _DWConv(
0.013 GMac, 0.606% MACs,
(conv): Sequential(
0.013 GMac, 0.606% MACs,
(0): Conv2d(0.009 GMac, 0.455% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
(1): BatchNorm2d(0.002 GMac, 0.101% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.001 GMac, 0.051% MACs, inplace=True)
)
)
(conv_lower_res): Sequential(
0.137 GMac, 6.619% MACs,
(0): Conv2d(0.135 GMac, 6.517% MACs, 128, 128, kernel_size=(1, 1), stride=(1, 1))
(1): BatchNorm2d(0.002 GMac, 0.101% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(conv_higher_res): Sequential(
0.07 GMac, 3.385% MACs,
(0): Conv2d(0.068 GMac, 3.284% MACs, 64, 128, kernel_size=(1, 1), stride=(1, 1))
(1): BatchNorm2d(0.002 GMac, 0.101% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(relu): ReLU(0.001 GMac, 0.051% MACs, inplace=True)
)
(classifier): Classifer(
0.32 GMac, 15.417% MACs,
(dsconv1): _DSConv(
0.15 GMac, 7.225% MACs,
(conv): Sequential(
0.15 GMac, 7.225% MACs,
(0): Conv2d(0.009 GMac, 0.455% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
(1): BatchNorm2d(0.002 GMac, 0.101% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.001 GMac, 0.051% MACs, inplace=True)
(3): Conv2d(0.134 GMac, 6.467% MACs, 128, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(4): BatchNorm2d(0.002 GMac, 0.101% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): ReLU(0.001 GMac, 0.051% MACs, inplace=True)
)
)
(dsconv2): _DSConv(
0.15 GMac, 7.225% MACs,
(conv): Sequential(
0.15 GMac, 7.225% MACs,
(0): Conv2d(0.009 GMac, 0.455% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
(1): BatchNorm2d(0.002 GMac, 0.101% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.001 GMac, 0.051% MACs, inplace=True)
(3): Conv2d(0.134 GMac, 6.467% MACs, 128, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(4): BatchNorm2d(0.002 GMac, 0.101% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): ReLU(0.001 GMac, 0.051% MACs, inplace=True)
)
)
(conv): Sequential(
0.02 GMac, 0.967% MACs,
(0): Dropout(0.0 GMac, 0.000% MACs, p=0.1, inplace=False)
(1): Conv2d(0.02 GMac, 0.967% MACs, 128, 19, kernel_size=(1, 1), stride=(1, 1))
)
)
)
[INFO] Register count_convNd() for <class 'torch.nn.modules.conv.Conv2d'>.
[INFO] Register count_bn() for <class 'torch.nn.modules.batchnorm.BatchNorm2d'>.
[INFO] Register zero_ops() for <class 'torch.nn.modules.activation.ReLU'>.
[WARN] Cannot find rule for <class 'torch.nn.modules.container.Sequential'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__._ConvBNReLU'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__._DSConv'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.LearningToDownsample'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__._DWConv'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.LinearBottleneck'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.PyramidPooling'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.GlobalFeatureExtractor'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.FeatureFusionModule'>. Treat it as zero Macs and zero Params.
[INFO] Register zero_ops() for <class 'torch.nn.modules.dropout.Dropout'>.
[WARN] Cannot find rule for <class '__main__.Classifer'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.FastSCNNX10'>. Treat it as zero Macs and zero Params.
defaultdict(<class 'float'>, {'batchnorm': 0.35296028, 'conv': 1.686126592})
2075445773.0 1146669
4066223188.0 1146669.0
Process finished with exit code 0
'''
| [
"[email protected]"
] | |
33de0048d31e64df7114554ace988932e59ef82a | a61263850fe63de61ec3004519f0d9aa69f104ac | /python_Algorithm/battle16/ArraySideSpin.py | 1608d9afead08d58b4772c40d12f1b9339d25e39 | [] | no_license | Kimhyeonsuk/Programmers_Python | dd0e13ef6690cfab0c46a7c8b07a5f3b40175071 | cc5687c8db2cfa098602829dec3acbf17c5c2177 | refs/heads/master | 2023-07-16T22:30:29.457419 | 2021-09-02T10:40:56 | 2021-09-02T10:40:56 | 355,876,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,177 | py | def solution(rows, columns, queries):
maplist = [[j+i*columns for j in range(1,columns+1)]for i in range(rows)]
answer=[]
# maplist = [[] for _ in range(rows)]
# for i in range(1, rows + 1):
# for j in range(1, columns + 1):
# maplist[i - 1].append((i - 1) * columns + j)
for querie in queries:
x1=querie[0]-1
y1=querie[1]-1
x2=querie[2]-1
y2=querie[3]-1
tmplist=[]
for i in range(x1,x2):
tmplist.append(maplist[i][y1])
for i in range(y1,y2):
tmplist.append(maplist[x2][i])
for i in range(x2,x1,-1):
tmplist.append(maplist[i][y2])
for i in range(y2,y1,-1):
tmplist.append(maplist[x1][i])
val=tmplist.pop(0)
tmplist.append(val)
minval=min(tmplist)
answer.append(minval)
for i in range(x1,x2):
maplist[i][y1]=tmplist.pop(0)
for i in range(y1,y2):
maplist[x2][i]=tmplist.pop(0)
for i in range(x2,x1,-1):
maplist[i][y2]=tmplist.pop(0)
for i in range(y2,y1,-1):
maplist[x1][i]=tmplist.pop(0)
return answer | [
"[email protected]"
] | |
aeea0cff7aca2caf25f9f9dd3296fa30bac15a92 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2158/60837/267537.py | f9eb16e1f46295dd50a8fa352db22017d4a5bdca | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | def atoi(string):
result=[]
for i in range(len(string)):
if i==0:
if string[i]=='-' or string[i].isdigit():
result.append(string[i])
else:
break
else:
if string[i].isdigit():
result.append(string[i])
else:
break
if len(result)==0:
return 0
return int(''.join(result))
a=input()
print(atoi(a)) | [
"[email protected]"
] | |
5999d541cfc1bb6b84ba5ce6029f9f926694038a | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_116/ch120_2020_03_25_19_26_17_731011.py | 72962cf219a8f0a53e1c3745ee3728e79282ae89 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,087 | py | from random import randint
dinheiro=100
print(dinheiro)
while dinheiro>0:
aposta=int(input("qual o valor da aposta? "))
if aposta !=0:
opcao=input("a aposta é em um número ou paridade? ")
if opcao == "n":
numero=int(input("numero de 1 a 36: "))
roleta=randint(2,35)
if numero == roleta:
dinheiro+=(aposta*35)
print(dinheiro)
else:
dinheiro-=aposta
print(dinheiro)
elif opcao == "p":
roleta=randint(0,36)
if roleta % 2 == 0 or roleta==0:
dinheiro+=aposta
print(dinheiro)
else:
dinheiro-=aposta
print(dinheiro)
elif opcao == "i":
roleta=randint(0,36)
if roleta % 2 != 0 and roleta !=0:
dinheiro+=aposta
print(dinheiro)
else:
dinheiro-=aposta
print(dinheiro)
else:
dinheiro-=dinheiro
| [
"[email protected]"
] | |
7bca385eac35f5e7363aff05cb998eabbff0618e | 512327cb8d0bbdafd70ad532378a2206c958242e | /tensorflow_federated/python/simulation/baselines/stackoverflow/word_prediction_preprocessing_test.py | 736f0bf1b77d1e220c2c9efe9b03ef2abc5071d4 | [
"Apache-2.0"
] | permissive | j35tor/federated | 14983e3e7d040ccfef41738d12c00b276c0aee9c | d92bfa6b8e3c9ebbac51ff7a3a180c2baaa08730 | refs/heads/master | 2023-03-22T13:27:11.019501 | 2021-03-11T01:37:07 | 2021-03-11T02:17:34 | 279,016,899 | 0 | 0 | Apache-2.0 | 2020-07-12T07:45:29 | 2020-07-12T07:45:28 | null | UTF-8 | Python | false | false | 10,543 | py | # Copyright 2019, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from absl.testing import parameterized
import tensorflow as tf
from tensorflow_federated.python.core.backends.native import execution_contexts
from tensorflow_federated.python.simulation.baselines.stackoverflow import word_prediction_preprocessing
TEST_DATA = collections.OrderedDict(
creation_date=(['unused date']),
score=([tf.constant(0, dtype=tf.int64)]),
tags=(['unused test tag']),
title=(['unused title']),
tokens=(['one must imagine']),
type=(['unused type']),
)
def _compute_length_of_dataset(ds):
return ds.reduce(0, lambda x, _: x + 1)
class SplitInputTest(tf.test.TestCase):
def test_split_input_returns_expected_result(self):
tokens = tf.constant([[0, 1, 2, 3, 4]], dtype=tf.int64)
expected_input = [[0, 1, 2, 3]]
expected_target = [[1, 2, 3, 4]]
split = word_prediction_preprocessing.split_input_target(tokens)
self.assertAllEqual(self.evaluate(split[0]), expected_input)
self.assertAllEqual(self.evaluate(split[1]), expected_target)
class ToIDsFnTest(tf.test.TestCase):
def test_ids_fn_truncates_on_input_longer_than_sequence_length(self):
vocab = ['A', 'B', 'C']
max_seq_len = 1
bos = word_prediction_preprocessing.get_special_tokens(len(vocab)).bos
to_ids_fn = word_prediction_preprocessing.build_to_ids_fn(
vocab, max_seq_len)
data = {'tokens': 'A B C'}
processed = to_ids_fn(data)
self.assertAllEqual(self.evaluate(processed), [bos, 1])
def test_build_to_ids_fn_embeds_all_vocab(self):
vocab = ['A', 'B', 'C']
max_seq_len = 5
special_tokens = word_prediction_preprocessing.get_special_tokens(
len(vocab))
bos = special_tokens.bos
eos = special_tokens.eos
to_ids_fn = word_prediction_preprocessing.build_to_ids_fn(
vocab, max_seq_len)
data = {'tokens': 'A B C'}
processed = to_ids_fn(data)
self.assertAllEqual(self.evaluate(processed), [bos, 1, 2, 3, eos])
def test_pad_token_correct(self):
vocab = ['A', 'B', 'C']
max_seq_len = 5
to_ids_fn = word_prediction_preprocessing.build_to_ids_fn(
vocab, max_seq_len)
special_tokens = word_prediction_preprocessing.get_special_tokens(
len(vocab))
pad, bos, eos = special_tokens.pad, special_tokens.bos, special_tokens.eos
data = {'tokens': 'A B C'}
processed = to_ids_fn(data)
batched_ds = tf.data.Dataset.from_tensor_slices([processed]).padded_batch(
1, padded_shapes=[6])
sample_elem = next(iter(batched_ds))
self.assertAllEqual(self.evaluate(sample_elem), [[bos, 1, 2, 3, eos, pad]])
def test_oov_token_correct(self):
vocab = ['A', 'B', 'C']
max_seq_len = 5
num_oov_buckets = 2
to_ids_fn = word_prediction_preprocessing.build_to_ids_fn(
vocab, max_seq_len, num_oov_buckets=num_oov_buckets)
oov_tokens = word_prediction_preprocessing.get_special_tokens(
len(vocab), num_oov_buckets=num_oov_buckets).oov
data = {'tokens': 'A B D'}
processed = to_ids_fn(data)
self.assertLen(oov_tokens, num_oov_buckets)
self.assertIn(self.evaluate(processed)[3], oov_tokens)
class BatchAndSplitTest(tf.test.TestCase):
def test_batch_and_split_fn_returns_dataset_with_correct_type_spec(self):
token = tf.constant([[0, 1, 2, 3, 4]], dtype=tf.int64)
ds = tf.data.Dataset.from_tensor_slices(token)
padded_and_batched = word_prediction_preprocessing.batch_and_split(
ds, sequence_length=6, batch_size=1)
self.assertIsInstance(padded_and_batched, tf.data.Dataset)
self.assertEqual(padded_and_batched.element_spec, (tf.TensorSpec(
[None, 6], dtype=tf.int64), tf.TensorSpec([None, 6], dtype=tf.int64)))
def test_batch_and_split_fn_returns_dataset_yielding_expected_elements(self):
token = tf.constant([[0, 1, 2, 3, 4]], dtype=tf.int64)
ds = tf.data.Dataset.from_tensor_slices(token)
padded_and_batched = word_prediction_preprocessing.batch_and_split(
ds, sequence_length=6, batch_size=1)
num_elems = 0
for elem in padded_and_batched:
self.assertAllEqual(
self.evaluate(elem[0]),
tf.constant([[0, 1, 2, 3, 4, 0]], dtype=tf.int64))
self.assertAllEqual(
self.evaluate(elem[1]),
tf.constant([[1, 2, 3, 4, 0, 0]], dtype=tf.int64))
num_elems += 1
self.assertEqual(num_elems, 1)
class PreprocessFnTest(tf.test.TestCase, parameterized.TestCase):
def test_preprocess_fn_with_negative_epochs_raises(self):
with self.assertRaisesRegex(ValueError,
'num_epochs must be a positive integer'):
word_prediction_preprocessing.create_preprocess_fn(
num_epochs=-2, batch_size=1, vocab=['A'], sequence_length=10)
def test_preprocess_fn_with_negative_batch_raises(self):
with self.assertRaisesRegex(ValueError,
'batch_size must be a positive integer'):
word_prediction_preprocessing.create_preprocess_fn(
num_epochs=1, batch_size=-10, vocab=['A'], sequence_length=10)
def test_preprocess_fn_with_empty_vocab_raises(self):
with self.assertRaisesRegex(ValueError, 'vocab must be non-empty'):
word_prediction_preprocessing.create_preprocess_fn(
num_epochs=1, batch_size=1, vocab=[], sequence_length=10)
def test_preprocess_fn_with_negative_sequence_length(self):
with self.assertRaisesRegex(ValueError,
'sequence_length must be a positive integer'):
word_prediction_preprocessing.create_preprocess_fn(
num_epochs=1, batch_size=1, vocab=['A'], sequence_length=0)
def test_preprocess_fn_with_zero_or_less_neg1_max_elements_raises(self):
with self.assertRaisesRegex(
ValueError, 'max_elements must be a positive integer or -1'):
word_prediction_preprocessing.create_preprocess_fn(
num_epochs=1,
batch_size=1,
vocab=['A'],
sequence_length=10,
max_elements=-2)
with self.assertRaisesRegex(
ValueError, 'max_elements must be a positive integer or -1'):
word_prediction_preprocessing.create_preprocess_fn(
num_epochs=1,
batch_size=1,
vocab=['A'],
sequence_length=10,
max_elements=0)
def test_preprocess_fn_with_negative_num_oov_buckets_raises(self):
with self.assertRaisesRegex(ValueError,
'num_oov_buckets must be a positive integer'):
word_prediction_preprocessing.create_preprocess_fn(
num_epochs=1,
batch_size=1,
vocab=['A'],
sequence_length=10,
num_oov_buckets=-1)
@parameterized.named_parameters(('param1', 1, 1), ('param2', 4, 2),
('param3', 100, 3))
def test_preprocess_fn_returns_correct_dataset_element_spec(
self, sequence_length, num_oov_buckets):
ds = tf.data.Dataset.from_tensor_slices(TEST_DATA)
preprocess_fn = word_prediction_preprocessing.create_preprocess_fn(
batch_size=32,
num_epochs=1,
sequence_length=sequence_length,
max_elements=100,
vocab=['one', 'must'],
num_oov_buckets=num_oov_buckets)
preprocessed_ds = preprocess_fn(ds)
self.assertEqual(
preprocessed_ds.element_spec,
(tf.TensorSpec(shape=[None, sequence_length], dtype=tf.int64),
tf.TensorSpec(shape=[None, sequence_length], dtype=tf.int64)))
def test_preprocess_fn_returns_correct_sequence_with_1_oov_bucket(self):
ds = tf.data.Dataset.from_tensor_slices(TEST_DATA)
preprocess_fn = word_prediction_preprocessing.create_preprocess_fn(
batch_size=32,
num_epochs=1,
sequence_length=6,
max_elements=100,
vocab=['one', 'must'],
num_oov_buckets=1)
preprocessed_ds = preprocess_fn(ds)
element = next(iter(preprocessed_ds))
# BOS is len(vocab)+2, EOS is len(vocab)+3, pad is 0, OOV is len(vocab)+1
self.assertAllEqual(
self.evaluate(element[0]),
tf.constant([[4, 1, 2, 3, 5, 0]], dtype=tf.int64))
def test_preprocess_fn_returns_correct_sequence_with_3_oov_buckets(self):
ds = tf.data.Dataset.from_tensor_slices(TEST_DATA)
preprocess_fn = word_prediction_preprocessing.create_preprocess_fn(
batch_size=32,
num_epochs=1,
sequence_length=6,
max_elements=100,
vocab=['one', 'must'],
num_oov_buckets=3)
preprocessed_ds = preprocess_fn(ds)
element = next(iter(preprocessed_ds))
# BOS is len(vocab)+3+1
self.assertEqual(self.evaluate(element[0])[0][0], 6)
self.assertEqual(self.evaluate(element[0])[0][1], 1)
self.assertEqual(self.evaluate(element[0])[0][2], 2)
# OOV is [len(vocab)+1, len(vocab)+2, len(vocab)+3]
self.assertIn(self.evaluate(element[0])[0][3], [3, 4, 5])
# EOS is len(vocab)+3+2
self.assertEqual(self.evaluate(element[0])[0][4], 7)
# pad is 0
self.assertEqual(self.evaluate(element[0])[0][5], 0)
@parameterized.named_parameters(
('num_epochs_1_batch_size_1', 1, 1),
('num_epochs_4_batch_size_2', 4, 2),
('num_epochs_9_batch_size_3', 9, 3),
('num_epochs_12_batch_size_1', 12, 1),
('num_epochs_3_batch_size_5', 3, 5),
('num_epochs_7_batch_size_2', 7, 2),
)
def test_ds_length_is_ceil_num_epochs_over_batch_size(self, num_epochs,
batch_size):
ds = tf.data.Dataset.from_tensor_slices(TEST_DATA)
preprocess_fn = word_prediction_preprocessing.create_preprocess_fn(
num_epochs=num_epochs,
batch_size=batch_size,
vocab=['A'],
sequence_length=10,
shuffle_buffer_size=1)
preprocessed_ds = preprocess_fn(ds)
self.assertEqual(
_compute_length_of_dataset(preprocessed_ds),
tf.cast(tf.math.ceil(num_epochs / batch_size), tf.int32))
if __name__ == '__main__':
execution_contexts.set_local_execution_context()
tf.test.main()
| [
"[email protected]"
] | |
c517d6fa8f95cfd54f1582a7435d4da648b9952e | d77cd334b0d05dc12c620d792bf4a1b8382c9cbe | /examples/keras-iris-pipeline/run_pipeline.py | 40c6bb7a3df4745e2e5a2753b981380cba2cec26 | [
"Apache-2.0"
] | permissive | pcrete/skil-python | befc4cdbad78213e6e0221c78e960db5eea16a73 | 672a1aa9e8af020c960ab9ee280cbb6b194afc3f | refs/heads/master | 2020-05-18T17:23:30.325751 | 2019-05-16T07:34:47 | 2019-05-16T07:34:47 | 180,715,194 | 0 | 0 | Apache-2.0 | 2019-04-11T04:39:12 | 2019-04-11T04:39:11 | null | UTF-8 | Python | false | false | 480 | py | import skil
import numpy as np
skil_server = skil.Skil()
work_space = skil.WorkSpace(skil_server)
experiment = skil.Experiment(work_space)
transform = skil.Transform(transform='iris_tp.json', experiment=experiment)
model = skil.Model(model='iris_model.h5', experiment=experiment)
deployment = skil.Deployment(skil_server)
pipeline = skil.Pipeline(deployment, model, transform)
with open('iris.data', 'r') as f:
data = np.array(f.readlines())
print(pipeline.predict(data)) | [
"[email protected]"
] | |
2425a8967772fd09cd8e552026ade42063b6fbd5 | 921f5c21500eb3526d153c6b50fb73bbfe4ecef9 | /1.4 Ad Hoc/Game (Chess)/p278.py | 8a937641efa41281b9c4caa505235964a159317b | [] | no_license | alex-stephens/competitive-programming | c3c2919b1e3978e2f498f2d53837774b490c2a3c | 833363f56ef9ada91952c501829a8f430db0caf5 | refs/heads/master | 2021-09-13T18:25:22.432073 | 2018-05-03T00:46:39 | 2018-05-03T00:46:39 | 119,809,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | # Competitive Programming 3
# Problem 278
T = int(input())
for _ in range(T):
inputStr = input().split()
piece, m, n = inputStr[0], int(inputStr[1]), int(inputStr[2])
if piece == 'k':
print( (m*n + 1) // 2 )
elif piece == 'r' or piece == 'Q':
print(min(m, n))
elif piece == 'K':
print( ((n+1)//2) * ((m+1)//2)) | [
"[email protected]"
] | |
93157f7a41570e59528643a96614df362d938106 | 2ed86a79d0fcd299ad4a01310954c5eddcf01edf | /tests/components/thread/test_dataset_store.py | 212db0de06f1b86c6f9ef94662d3b4f1f23b3731 | [
"Apache-2.0"
] | permissive | konnected-io/home-assistant | 037f12c87bb79e19220192eb918e49db1b1a8b3e | 2e65b77b2b5c17919939481f327963abdfdc53f0 | refs/heads/dev | 2023-05-11T08:57:41.891518 | 2023-05-07T20:03:37 | 2023-05-07T20:03:37 | 109,931,626 | 24 | 10 | Apache-2.0 | 2023-02-22T06:24:01 | 2017-11-08T05:27:21 | Python | UTF-8 | Python | false | false | 9,151 | py | """Test the thread dataset store."""
from typing import Any
import pytest
from python_otbr_api.tlv_parser import TLVError
from homeassistant.components.thread import dataset_store
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from . import DATASET_1, DATASET_2, DATASET_3
from tests.common import flush_store
# Same as DATASET_1, but PAN ID moved to the end
DATASET_1_REORDERED = (
"0E080000000000010000000300000F35060004001FFFE0020811111111222222220708FDAD70BF"
"E5AA15DD051000112233445566778899AABBCCDDEEFF030E4F70656E54687265616444656D6F04"
"10445F2B5CA6F2A93A55CE570A70EFEECB0C0402A0F7F801021234"
)
DATASET_1_BAD_CHANNEL = (
"0E080000000000010000000035060004001FFFE0020811111111222222220708FDAD70BF"
"E5AA15DD051000112233445566778899AABBCCDDEEFF030E4F70656E54687265616444656D6F01"
"0212340410445F2B5CA6F2A93A55CE570A70EFEECB0C0402A0F7F8"
)
DATASET_1_NO_CHANNEL = (
"0E08000000000001000035060004001FFFE0020811111111222222220708FDAD70BF"
"E5AA15DD051000112233445566778899AABBCCDDEEFF030E4F70656E54687265616444656D6F01"
"0212340410445F2B5CA6F2A93A55CE570A70EFEECB0C0402A0F7F8"
)
async def test_add_invalid_dataset(hass: HomeAssistant) -> None:
"""Test adding an invalid dataset."""
with pytest.raises(TLVError, match="unknown type 222"):
await dataset_store.async_add_dataset(hass, "source", "DEADBEEF")
store = await dataset_store.async_get_store(hass)
assert len(store.datasets) == 0
async def test_add_dataset_twice(hass: HomeAssistant) -> None:
"""Test adding dataset twice does nothing."""
await dataset_store.async_add_dataset(hass, "source", DATASET_1)
store = await dataset_store.async_get_store(hass)
assert len(store.datasets) == 1
created = list(store.datasets.values())[0].created
await dataset_store.async_add_dataset(hass, "new_source", DATASET_1)
assert len(store.datasets) == 1
assert list(store.datasets.values())[0].created == created
async def test_add_dataset_reordered(hass: HomeAssistant) -> None:
"""Test adding dataset with keys in a different order does nothing."""
await dataset_store.async_add_dataset(hass, "source", DATASET_1)
store = await dataset_store.async_get_store(hass)
assert len(store.datasets) == 1
created = list(store.datasets.values())[0].created
await dataset_store.async_add_dataset(hass, "new_source", DATASET_1_REORDERED)
assert len(store.datasets) == 1
assert list(store.datasets.values())[0].created == created
async def test_delete_dataset_twice(hass: HomeAssistant) -> None:
"""Test deleting dataset twice raises."""
await dataset_store.async_add_dataset(hass, "source", DATASET_1)
await dataset_store.async_add_dataset(hass, "source", DATASET_2)
store = await dataset_store.async_get_store(hass)
dataset_id = list(store.datasets.values())[1].id
store.async_delete(dataset_id)
assert len(store.datasets) == 1
with pytest.raises(KeyError, match=f"'{dataset_id}'"):
store.async_delete(dataset_id)
assert len(store.datasets) == 1
async def test_delete_preferred_dataset(hass: HomeAssistant) -> None:
"""Test deleting preferred dataset raises."""
await dataset_store.async_add_dataset(hass, "source", DATASET_1)
store = await dataset_store.async_get_store(hass)
dataset_id = list(store.datasets.values())[0].id
with pytest.raises(HomeAssistantError, match="attempt to remove preferred dataset"):
store.async_delete(dataset_id)
assert len(store.datasets) == 1
async def test_get_dataset(hass: HomeAssistant) -> None:
"""Test get the preferred dataset."""
assert await dataset_store.async_get_dataset(hass, "blah") is None
await dataset_store.async_add_dataset(hass, "source", DATASET_1)
store = await dataset_store.async_get_store(hass)
dataset_id = list(store.datasets.values())[0].id
assert (await dataset_store.async_get_dataset(hass, dataset_id)) == DATASET_1
async def test_get_preferred_dataset(hass: HomeAssistant) -> None:
"""Test get the preferred dataset."""
assert await dataset_store.async_get_preferred_dataset(hass) is None
await dataset_store.async_add_dataset(hass, "source", DATASET_1)
assert (await dataset_store.async_get_preferred_dataset(hass)) == DATASET_1
async def test_dataset_properties(hass: HomeAssistant) -> None:
"""Test dataset entry properties."""
datasets = [
{"source": "Google", "tlv": DATASET_1},
{"source": "Multipan", "tlv": DATASET_2},
{"source": "🎅", "tlv": DATASET_3},
{"source": "test1", "tlv": DATASET_1_BAD_CHANNEL},
{"source": "test2", "tlv": DATASET_1_NO_CHANNEL},
]
for dataset in datasets:
await dataset_store.async_add_dataset(hass, dataset["source"], dataset["tlv"])
store = await dataset_store.async_get_store(hass)
for dataset in store.datasets.values():
if dataset.source == "Google":
dataset_1 = dataset
if dataset.source == "Multipan":
dataset_2 = dataset
if dataset.source == "🎅":
dataset_3 = dataset
if dataset.source == "test1":
dataset_4 = dataset
if dataset.source == "test2":
dataset_5 = dataset
dataset = store.async_get(dataset_1.id)
assert dataset == dataset_1
assert dataset.channel == 15
assert dataset.extended_pan_id == "1111111122222222"
assert dataset.network_name == "OpenThreadDemo"
assert dataset.pan_id == "1234"
dataset = store.async_get(dataset_2.id)
assert dataset == dataset_2
assert dataset.channel == 15
assert dataset.extended_pan_id == "1111111122222222"
assert dataset.network_name == "HomeAssistant!"
assert dataset.pan_id == "1234"
dataset = store.async_get(dataset_3.id)
assert dataset == dataset_3
assert dataset.channel == 15
assert dataset.extended_pan_id == "1111111122222222"
assert dataset.network_name == "~🐣🐥🐤~"
assert dataset.pan_id == "1234"
dataset = store.async_get(dataset_4.id)
assert dataset == dataset_4
assert dataset.channel is None
dataset = store.async_get(dataset_5.id)
assert dataset == dataset_5
assert dataset.channel is None
async def test_load_datasets(hass: HomeAssistant) -> None:
"""Make sure that we can load/save data correctly."""
datasets = [
{
"source": "Google",
"tlv": DATASET_1,
},
{
"source": "Multipan",
"tlv": DATASET_2,
},
{
"source": "🎅",
"tlv": DATASET_3,
},
]
store1 = await dataset_store.async_get_store(hass)
for dataset in datasets:
store1.async_add(dataset["source"], dataset["tlv"])
assert len(store1.datasets) == 3
for dataset in store1.datasets.values():
if dataset.source == "Google":
dataset_1_store_1 = dataset
if dataset.source == "Multipan":
dataset_2_store_1 = dataset
if dataset.source == "🎅":
dataset_3_store_1 = dataset
assert store1.preferred_dataset == dataset_1_store_1.id
with pytest.raises(HomeAssistantError):
store1.async_delete(dataset_1_store_1.id)
store1.async_delete(dataset_2_store_1.id)
assert len(store1.datasets) == 2
store2 = dataset_store.DatasetStore(hass)
await flush_store(store1._store)
await store2.async_load()
assert len(store2.datasets) == 2
for dataset in store2.datasets.values():
if dataset.source == "Google":
dataset_1_store_2 = dataset
if dataset.source == "🎅":
dataset_3_store_2 = dataset
assert list(store1.datasets) == list(store2.datasets)
assert dataset_1_store_1 == dataset_1_store_2
assert dataset_3_store_1 == dataset_3_store_2
async def test_loading_datasets_from_storage(
hass: HomeAssistant, hass_storage: dict[str, Any]
) -> None:
"""Test loading stored datasets on start."""
hass_storage[dataset_store.STORAGE_KEY] = {
"version": dataset_store.STORAGE_VERSION_MAJOR,
"minor_version": dataset_store.STORAGE_VERSION_MINOR,
"data": {
"datasets": [
{
"created": "2023-02-02T09:41:13.746514+00:00",
"id": "id1",
"source": "source_1",
"tlv": "DATASET_1",
},
{
"created": "2023-02-02T09:41:13.746514+00:00",
"id": "id2",
"source": "source_2",
"tlv": "DATASET_2",
},
{
"created": "2023-02-02T09:41:13.746514+00:00",
"id": "id3",
"source": "source_3",
"tlv": "DATASET_3",
},
],
"preferred_dataset": "id1",
},
}
store = await dataset_store.async_get_store(hass)
assert len(store.datasets) == 3
assert store.preferred_dataset == "id1"
| [
"[email protected]"
] | |
9d027ba393e19bf31b550809b6ed0fc83cc038b4 | 10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94 | /Python/verbal-arithmetic-puzzle.py | 0ce93235f4960cf53349882ed8ec47a385473257 | [
"MIT"
] | permissive | kamyu104/LeetCode-Solutions | f54822059405ef4df737d2e9898b024f051fd525 | 4dc4e6642dc92f1983c13564cc0fd99917cab358 | refs/heads/master | 2023-09-02T13:48:26.830566 | 2023-08-28T10:11:12 | 2023-08-28T10:11:12 | 152,631,182 | 4,549 | 1,651 | MIT | 2023-05-31T06:10:33 | 2018-10-11T17:38:35 | C++ | UTF-8 | Python | false | false | 1,711 | py | # Time: O(10! * n * l)
# Space: O(n * l)
import collections
class Solution(object):
def isSolvable(self, words, result):
"""
:type words: List[str]
:type result: str
:rtype: bool
"""
def backtracking(words, result, i, j, carry, lookup, used):
if j == len(result):
return carry == 0
if i != len(words):
if j >= len(words[i]) or words[i][j] in lookup:
return backtracking(words, result, i+1, j, carry, lookup, used)
for val in xrange(10):
if val in used or (val == 0 and j == len(words[i])-1):
continue
lookup[words[i][j]] = val
used.add(val)
if backtracking(words, result, i+1, j, carry, lookup, used):
return True
used.remove(val)
del lookup[words[i][j]]
return False
carry, val = divmod(carry + sum(lookup[w[j]] for w in words if j < len(w)), 10)
if result[j] in lookup:
return val == lookup[result[j]] and \
backtracking(words, result, 0, j+1, carry, lookup, used)
if val in used or (val == 0 and j == len(result)-1):
return False
lookup[result[j]] = val
used.add(val)
if backtracking(words, result, 0, j+1, carry, lookup, used):
return True
used.remove(val)
del lookup[result[j]]
return False
return backtracking([w[::-1] for w in words], result[::-1], 0, 0, 0, {}, set())
| [
"[email protected]"
] | |
63148046f11c2f2384d376fa158a19b4f33f4a5b | ea44a1681e276b3cc85226b53de217f6096a05d4 | /fhir/resources/STU3/documentmanifest.py | 76787d0d214eee4100a2f3e37c06bdfd35bce5fe | [
"BSD-3-Clause"
] | permissive | stephanie-howson/fhir.resources | 69d2a5a6b0fe4387b82e984255b24027b37985c4 | 126e9dc6e14541f74e69ef7c1a0b8a74aa981905 | refs/heads/master | 2020-05-04T22:24:49.826585 | 2019-06-27T15:51:26 | 2019-06-27T15:51:26 | 179,511,579 | 0 | 0 | null | 2019-04-04T14:14:53 | 2019-04-04T14:14:52 | null | UTF-8 | Python | false | false | 7,250 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.1.11917 (http://hl7.org/fhir/StructureDefinition/DocumentManifest) on 2019-01-17.
# 2019, SMART Health IT.
from . import domainresource
class DocumentManifest(domainresource.DomainResource):
""" A list that defines a set of documents.
A collection of documents compiled for a purpose together with metadata
that applies to the collection.
"""
resource_type = "DocumentManifest"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.author = None
""" Who and/or what authored the manifest.
List of `FHIRReference` items referencing `Practitioner, Organization, Device, Patient, RelatedPerson` (represented as `dict` in JSON). """
self.content = None
""" The items included.
List of `DocumentManifestContent` items (represented as `dict` in JSON). """
self.created = None
""" When this document manifest created.
Type `FHIRDate` (represented as `str` in JSON). """
self.description = None
""" Human-readable description (title).
Type `str`. """
self.identifier = None
""" Other identifiers for the manifest.
List of `Identifier` items (represented as `dict` in JSON). """
self.masterIdentifier = None
""" Unique Identifier for the set of documents.
Type `Identifier` (represented as `dict` in JSON). """
self.recipient = None
""" Intended to get notified about this set of documents.
List of `FHIRReference` items referencing `Patient, Practitioner, RelatedPerson, Organization` (represented as `dict` in JSON). """
self.related = None
""" Related things.
List of `DocumentManifestRelated` items (represented as `dict` in JSON). """
self.source = None
""" The source system/application/software.
Type `str`. """
self.status = None
""" current | superseded | entered-in-error.
Type `str`. """
self.subject = None
""" The subject of the set of documents.
Type `FHIRReference` referencing `Patient, Practitioner, Group, Device` (represented as `dict` in JSON). """
self.type = None
""" Kind of document set.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(DocumentManifest, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(DocumentManifest, self).elementProperties()
js.extend([
("author", "author", fhirreference.FHIRReference, True, None, False),
("content", "content", DocumentManifestContent, True, None, True),
("created", "created", fhirdate.FHIRDate, False, None, False),
("description", "description", str, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("masterIdentifier", "masterIdentifier", identifier.Identifier, False, None, False),
("recipient", "recipient", fhirreference.FHIRReference, True, None, False),
("related", "related", DocumentManifestRelated, True, None, False),
("source", "source", str, False, None, False),
("status", "status", str, False, None, True),
("subject", "subject", fhirreference.FHIRReference, False, None, False),
("type", "type", codeableconcept.CodeableConcept, False, None, False),
])
return js
from . import backboneelement
class DocumentManifestContent(backboneelement.BackboneElement):
""" The items included.
The list of Documents included in the manifest.
"""
resource_type = "DocumentManifestContent"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.pAttachment = None
""" Contents of this set of documents.
Type `Attachment` (represented as `dict` in JSON). """
self.pReference = None
""" Contents of this set of documents.
Type `FHIRReference` referencing `Resource` (represented as `dict` in JSON). """
super(DocumentManifestContent, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(DocumentManifestContent, self).elementProperties()
js.extend([
("pAttachment", "pAttachment", attachment.Attachment, False, "p", True),
("pReference", "pReference", fhirreference.FHIRReference, False, "p", True),
])
return js
class DocumentManifestRelated(backboneelement.BackboneElement):
""" Related things.
Related identifiers or resources associated with the DocumentManifest.
"""
resource_type = "DocumentManifestRelated"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.identifier = None
""" Identifiers of things that are related.
Type `Identifier` (represented as `dict` in JSON). """
self.ref = None
""" Related Resource.
Type `FHIRReference` referencing `Resource` (represented as `dict` in JSON). """
super(DocumentManifestRelated, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(DocumentManifestRelated, self).elementProperties()
js.extend([
("identifier", "identifier", identifier.Identifier, False, None, False),
("ref", "ref", fhirreference.FHIRReference, False, None, False),
])
return js
import sys
try:
from . import attachment
except ImportError:
attachment = sys.modules[__package__ + '.attachment']
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
| [
"[email protected]"
] | |
7e0bca58ce7844776a9d4810c22e4a72cfef623b | a3926c09872e1f74b57431fbb3e711918a11dc0a | /python/array/0766_toeplitz_matrix.py | 3462854c72324c5f85f5dea1c0cc69e7fd5f6b58 | [
"MIT"
] | permissive | linshaoyong/leetcode | e64297dc6afcebcee0614a153a566323bf223779 | 57080da5fbe5d62cbc0b8a34e362a8b0978d5b59 | refs/heads/main | 2022-09-15T00:05:36.476268 | 2022-08-16T14:09:11 | 2022-08-16T14:09:11 | 196,914,051 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | class Solution(object):
def isToeplitzMatrix(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: bool
"""
m, n = len(matrix), len(matrix[0])
for i in range(0, m):
for j in range(0, n):
if i < m - 1 and j < n - 1 and \
matrix[i][j] != matrix[i + 1][j + 1]:
return False
return True
def test_is_toeplitz_matrix():
assert Solution().isToeplitzMatrix([
[1, 2, 3, 4],
[5, 1, 2, 3],
[9, 5, 1, 2]
])
assert Solution().isToeplitzMatrix([
[1, 2],
[2, 2]
]) is False
| [
"[email protected]"
] | |
c5e4cd96ca5accdc01de794e2f5a0f7465d365d1 | ba694353a3cb1cfd02a6773b40f693386d0dba39 | /sdk/python/pulumi_google_native/compute/v1/get_interconnect.py | 706eebbfb0d58fdac9e9a151e8a950eb13cae2f3 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | pulumi/pulumi-google-native | cc57af8bd3d1d6b76f1f48333ed1f1b31d56f92b | 124d255e5b7f5440d1ef63c9a71e4cc1d661cd10 | refs/heads/master | 2023-08-25T00:18:00.300230 | 2023-07-20T04:25:48 | 2023-07-20T04:25:48 | 323,680,373 | 69 | 16 | Apache-2.0 | 2023-09-13T00:28:04 | 2020-12-22T16:39:01 | Python | UTF-8 | Python | false | false | 19,450 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetInterconnectResult',
'AwaitableGetInterconnectResult',
'get_interconnect',
'get_interconnect_output',
]
@pulumi.output_type
class GetInterconnectResult:
def __init__(__self__, admin_enabled=None, circuit_infos=None, creation_timestamp=None, customer_name=None, description=None, expected_outages=None, google_ip_address=None, google_reference_id=None, interconnect_attachments=None, interconnect_type=None, kind=None, label_fingerprint=None, labels=None, link_type=None, location=None, name=None, noc_contact_email=None, operational_status=None, peer_ip_address=None, provisioned_link_count=None, remote_location=None, requested_link_count=None, satisfies_pzs=None, self_link=None, state=None):
if admin_enabled and not isinstance(admin_enabled, bool):
raise TypeError("Expected argument 'admin_enabled' to be a bool")
pulumi.set(__self__, "admin_enabled", admin_enabled)
if circuit_infos and not isinstance(circuit_infos, list):
raise TypeError("Expected argument 'circuit_infos' to be a list")
pulumi.set(__self__, "circuit_infos", circuit_infos)
if creation_timestamp and not isinstance(creation_timestamp, str):
raise TypeError("Expected argument 'creation_timestamp' to be a str")
pulumi.set(__self__, "creation_timestamp", creation_timestamp)
if customer_name and not isinstance(customer_name, str):
raise TypeError("Expected argument 'customer_name' to be a str")
pulumi.set(__self__, "customer_name", customer_name)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if expected_outages and not isinstance(expected_outages, list):
raise TypeError("Expected argument 'expected_outages' to be a list")
pulumi.set(__self__, "expected_outages", expected_outages)
if google_ip_address and not isinstance(google_ip_address, str):
raise TypeError("Expected argument 'google_ip_address' to be a str")
pulumi.set(__self__, "google_ip_address", google_ip_address)
if google_reference_id and not isinstance(google_reference_id, str):
raise TypeError("Expected argument 'google_reference_id' to be a str")
pulumi.set(__self__, "google_reference_id", google_reference_id)
if interconnect_attachments and not isinstance(interconnect_attachments, list):
raise TypeError("Expected argument 'interconnect_attachments' to be a list")
pulumi.set(__self__, "interconnect_attachments", interconnect_attachments)
if interconnect_type and not isinstance(interconnect_type, str):
raise TypeError("Expected argument 'interconnect_type' to be a str")
pulumi.set(__self__, "interconnect_type", interconnect_type)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if label_fingerprint and not isinstance(label_fingerprint, str):
raise TypeError("Expected argument 'label_fingerprint' to be a str")
pulumi.set(__self__, "label_fingerprint", label_fingerprint)
if labels and not isinstance(labels, dict):
raise TypeError("Expected argument 'labels' to be a dict")
pulumi.set(__self__, "labels", labels)
if link_type and not isinstance(link_type, str):
raise TypeError("Expected argument 'link_type' to be a str")
pulumi.set(__self__, "link_type", link_type)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if noc_contact_email and not isinstance(noc_contact_email, str):
raise TypeError("Expected argument 'noc_contact_email' to be a str")
pulumi.set(__self__, "noc_contact_email", noc_contact_email)
if operational_status and not isinstance(operational_status, str):
raise TypeError("Expected argument 'operational_status' to be a str")
pulumi.set(__self__, "operational_status", operational_status)
if peer_ip_address and not isinstance(peer_ip_address, str):
raise TypeError("Expected argument 'peer_ip_address' to be a str")
pulumi.set(__self__, "peer_ip_address", peer_ip_address)
if provisioned_link_count and not isinstance(provisioned_link_count, int):
raise TypeError("Expected argument 'provisioned_link_count' to be a int")
pulumi.set(__self__, "provisioned_link_count", provisioned_link_count)
if remote_location and not isinstance(remote_location, str):
raise TypeError("Expected argument 'remote_location' to be a str")
pulumi.set(__self__, "remote_location", remote_location)
if requested_link_count and not isinstance(requested_link_count, int):
raise TypeError("Expected argument 'requested_link_count' to be a int")
pulumi.set(__self__, "requested_link_count", requested_link_count)
if satisfies_pzs and not isinstance(satisfies_pzs, bool):
raise TypeError("Expected argument 'satisfies_pzs' to be a bool")
pulumi.set(__self__, "satisfies_pzs", satisfies_pzs)
if self_link and not isinstance(self_link, str):
raise TypeError("Expected argument 'self_link' to be a str")
pulumi.set(__self__, "self_link", self_link)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
@property
@pulumi.getter(name="adminEnabled")
def admin_enabled(self) -> bool:
"""
Administrative status of the interconnect. When this is set to true, the Interconnect is functional and can carry traffic. When set to false, no packets can be carried over the interconnect and no BGP routes are exchanged over it. By default, the status is set to true.
"""
return pulumi.get(self, "admin_enabled")
@property
@pulumi.getter(name="circuitInfos")
def circuit_infos(self) -> Sequence['outputs.InterconnectCircuitInfoResponse']:
"""
A list of CircuitInfo objects, that describe the individual circuits in this LAG.
"""
return pulumi.get(self, "circuit_infos")
@property
@pulumi.getter(name="creationTimestamp")
def creation_timestamp(self) -> str:
"""
Creation timestamp in RFC3339 text format.
"""
return pulumi.get(self, "creation_timestamp")
@property
@pulumi.getter(name="customerName")
def customer_name(self) -> str:
"""
Customer name, to put in the Letter of Authorization as the party authorized to request a crossconnect.
"""
return pulumi.get(self, "customer_name")
@property
@pulumi.getter
def description(self) -> str:
"""
An optional description of this resource. Provide this property when you create the resource.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="expectedOutages")
def expected_outages(self) -> Sequence['outputs.InterconnectOutageNotificationResponse']:
"""
A list of outages expected for this Interconnect.
"""
return pulumi.get(self, "expected_outages")
@property
@pulumi.getter(name="googleIpAddress")
def google_ip_address(self) -> str:
"""
IP address configured on the Google side of the Interconnect link. This can be used only for ping tests.
"""
return pulumi.get(self, "google_ip_address")
@property
@pulumi.getter(name="googleReferenceId")
def google_reference_id(self) -> str:
"""
Google reference ID to be used when raising support tickets with Google or otherwise to debug backend connectivity issues.
"""
return pulumi.get(self, "google_reference_id")
@property
@pulumi.getter(name="interconnectAttachments")
def interconnect_attachments(self) -> Sequence[str]:
"""
A list of the URLs of all InterconnectAttachments configured to use this Interconnect.
"""
return pulumi.get(self, "interconnect_attachments")
@property
@pulumi.getter(name="interconnectType")
def interconnect_type(self) -> str:
"""
Type of interconnect, which can take one of the following values: - PARTNER: A partner-managed interconnection shared between customers though a partner. - DEDICATED: A dedicated physical interconnection with the customer. Note that a value IT_PRIVATE has been deprecated in favor of DEDICATED.
"""
return pulumi.get(self, "interconnect_type")
@property
@pulumi.getter
def kind(self) -> str:
"""
Type of the resource. Always compute#interconnect for interconnects.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter(name="labelFingerprint")
def label_fingerprint(self) -> str:
"""
A fingerprint for the labels being applied to this Interconnect, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve an Interconnect.
"""
return pulumi.get(self, "label_fingerprint")
@property
@pulumi.getter
def labels(self) -> Mapping[str, str]:
"""
Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter(name="linkType")
def link_type(self) -> str:
"""
Type of link requested, which can take one of the following values: - LINK_TYPE_ETHERNET_10G_LR: A 10G Ethernet with LR optics - LINK_TYPE_ETHERNET_100G_LR: A 100G Ethernet with LR optics. Note that this field indicates the speed of each of the links in the bundle, not the speed of the entire bundle.
"""
return pulumi.get(self, "link_type")
@property
@pulumi.getter
def location(self) -> str:
"""
URL of the InterconnectLocation object that represents where this connection is to be provisioned.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nocContactEmail")
def noc_contact_email(self) -> str:
"""
Email address to contact the customer NOC for operations and maintenance notifications regarding this Interconnect. If specified, this will be used for notifications in addition to all other forms described, such as Cloud Monitoring logs alerting and Cloud Notifications. This field is required for users who sign up for Cloud Interconnect using workforce identity federation.
"""
return pulumi.get(self, "noc_contact_email")
@property
@pulumi.getter(name="operationalStatus")
def operational_status(self) -> str:
"""
The current status of this Interconnect's functionality, which can take one of the following values: - OS_ACTIVE: A valid Interconnect, which is turned up and is ready to use. Attachments may be provisioned on this Interconnect. - OS_UNPROVISIONED: An Interconnect that has not completed turnup. No attachments may be provisioned on this Interconnect. - OS_UNDER_MAINTENANCE: An Interconnect that is undergoing internal maintenance. No attachments may be provisioned or updated on this Interconnect.
"""
return pulumi.get(self, "operational_status")
@property
@pulumi.getter(name="peerIpAddress")
def peer_ip_address(self) -> str:
"""
IP address configured on the customer side of the Interconnect link. The customer should configure this IP address during turnup when prompted by Google NOC. This can be used only for ping tests.
"""
return pulumi.get(self, "peer_ip_address")
@property
@pulumi.getter(name="provisionedLinkCount")
def provisioned_link_count(self) -> int:
"""
Number of links actually provisioned in this interconnect.
"""
return pulumi.get(self, "provisioned_link_count")
@property
@pulumi.getter(name="remoteLocation")
def remote_location(self) -> str:
"""
Indicates that this is a Cross-Cloud Interconnect. This field specifies the location outside of Google's network that the interconnect is connected to.
"""
return pulumi.get(self, "remote_location")
@property
@pulumi.getter(name="requestedLinkCount")
def requested_link_count(self) -> int:
"""
Target number of physical links in the link bundle, as requested by the customer.
"""
return pulumi.get(self, "requested_link_count")
@property
@pulumi.getter(name="satisfiesPzs")
def satisfies_pzs(self) -> bool:
"""
Reserved for future use.
"""
return pulumi.get(self, "satisfies_pzs")
@property
@pulumi.getter(name="selfLink")
def self_link(self) -> str:
"""
Server-defined URL for the resource.
"""
return pulumi.get(self, "self_link")
@property
@pulumi.getter
def state(self) -> str:
"""
The current state of Interconnect functionality, which can take one of the following values: - ACTIVE: The Interconnect is valid, turned up and ready to use. Attachments may be provisioned on this Interconnect. - UNPROVISIONED: The Interconnect has not completed turnup. No attachments may be provisioned on this Interconnect. - UNDER_MAINTENANCE: The Interconnect is undergoing internal maintenance. No attachments may be provisioned or updated on this Interconnect.
"""
return pulumi.get(self, "state")
class AwaitableGetInterconnectResult(GetInterconnectResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetInterconnectResult(
admin_enabled=self.admin_enabled,
circuit_infos=self.circuit_infos,
creation_timestamp=self.creation_timestamp,
customer_name=self.customer_name,
description=self.description,
expected_outages=self.expected_outages,
google_ip_address=self.google_ip_address,
google_reference_id=self.google_reference_id,
interconnect_attachments=self.interconnect_attachments,
interconnect_type=self.interconnect_type,
kind=self.kind,
label_fingerprint=self.label_fingerprint,
labels=self.labels,
link_type=self.link_type,
location=self.location,
name=self.name,
noc_contact_email=self.noc_contact_email,
operational_status=self.operational_status,
peer_ip_address=self.peer_ip_address,
provisioned_link_count=self.provisioned_link_count,
remote_location=self.remote_location,
requested_link_count=self.requested_link_count,
satisfies_pzs=self.satisfies_pzs,
self_link=self.self_link,
state=self.state)
def get_interconnect(interconnect: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetInterconnectResult:
"""
Returns the specified Interconnect. Get a list of available Interconnects by making a list() request.
"""
__args__ = dict()
__args__['interconnect'] = interconnect
__args__['project'] = project
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('google-native:compute/v1:getInterconnect', __args__, opts=opts, typ=GetInterconnectResult).value
return AwaitableGetInterconnectResult(
admin_enabled=pulumi.get(__ret__, 'admin_enabled'),
circuit_infos=pulumi.get(__ret__, 'circuit_infos'),
creation_timestamp=pulumi.get(__ret__, 'creation_timestamp'),
customer_name=pulumi.get(__ret__, 'customer_name'),
description=pulumi.get(__ret__, 'description'),
expected_outages=pulumi.get(__ret__, 'expected_outages'),
google_ip_address=pulumi.get(__ret__, 'google_ip_address'),
google_reference_id=pulumi.get(__ret__, 'google_reference_id'),
interconnect_attachments=pulumi.get(__ret__, 'interconnect_attachments'),
interconnect_type=pulumi.get(__ret__, 'interconnect_type'),
kind=pulumi.get(__ret__, 'kind'),
label_fingerprint=pulumi.get(__ret__, 'label_fingerprint'),
labels=pulumi.get(__ret__, 'labels'),
link_type=pulumi.get(__ret__, 'link_type'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
noc_contact_email=pulumi.get(__ret__, 'noc_contact_email'),
operational_status=pulumi.get(__ret__, 'operational_status'),
peer_ip_address=pulumi.get(__ret__, 'peer_ip_address'),
provisioned_link_count=pulumi.get(__ret__, 'provisioned_link_count'),
remote_location=pulumi.get(__ret__, 'remote_location'),
requested_link_count=pulumi.get(__ret__, 'requested_link_count'),
satisfies_pzs=pulumi.get(__ret__, 'satisfies_pzs'),
self_link=pulumi.get(__ret__, 'self_link'),
state=pulumi.get(__ret__, 'state'))
@_utilities.lift_output_func(get_interconnect)
def get_interconnect_output(interconnect: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetInterconnectResult]:
"""
Returns the specified Interconnect. Get a list of available Interconnects by making a list() request.
"""
...
| [
"[email protected]"
] | |
ebb24c87e2166bd9394bf1a84d7f4ae129ca184b | c970d6543bc17b5a546ae80dc02cbae3f8b3830a | /server/dhcpd.py | d6dba2dcdd1e75ca8d5caebafbfdb0e5f631e0c9 | [] | no_license | Studentergaarden/APsetup | fe0be854f9e74f5ccf4d469d9a448bf8ef5b21cc | b5c2015a87b3ffb904ce13c7e08f656aa839228d | refs/heads/master | 2021-01-10T14:08:54.980350 | 2017-03-06T10:31:44 | 2017-03-06T10:31:44 | 54,288,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,123 | py | #!/usr/bin/env python2
from __future__ import print_function
navne = ["adm",
"cosmos","kanni","abort","dyt","ug","mg","iv","bzrk","barbar","pharis","psyko"]
vlan_id = range(10,22)
andre = ["priv wifi", "free wifi", "wire"]
andre_id = [30, 32, 40]
ap_id = range(10, 21)
file = "dhcpd.txt"
f = open(file, 'w')
for idx, val in enumerate(vlan_id):
f.write("# %s\n"%(navne[idx]))
f.write("subnet 10.42.%d.0 netmask 255.255.255.0 {\n"%(val))
f.write("\trange dynamic-bootp 10.42.%d.50 10.42.%d.250;\n"%(val, val))
f.write("\toption routers 10.42.%d.1;\n"%(val))
f.write("\toption domain-name-servers 10.42.%d.1;\n"%(val))
f.write("\tnext-server 10.42.%d.1;\n"%(val))
f.write("}\n\n")
for idx, val in enumerate(andre_id):
f.write("# %s\n"%(andre[idx]))
f.write("subnet 10.42.%d.0 netmask 255.255.254.0 {\n"%(val))
f.write("\trange dynamic-bootp 10.42.%d.50 10.42.%d.250;\n"%(val, val+1))
f.write("\toption routers 10.42.%d.1;\n"%(val))
f.write("\toption domain-name-servers 10.42.%d.1;\n"%(val))
f.write("\tnext-server 10.42.%d.1;\n"%(val))
f.write("}\n\n")
| [
"[email protected]"
] | |
ef77160c3cd3e81ca2ba7e4c01584f18b0e7ef73 | f139a99d51cfa01a7892f0ac5bbb022c0cee0664 | /Pythonlogy/ShareYourSystem/Standards/Controllers/Drawer/01_ExampleDoc.py | ffe2e7bb003b595d10f996f43eee1168929bdb9e | [
"MIT"
] | permissive | Ledoux/ShareYourSystem | 90bb2e6be3088b458348afa37ace68c93c4b6a7a | 3a2ffabf46f1f68b2c4fd80fa6edb07ae85fa3b2 | refs/heads/master | 2021-01-25T12:14:34.118295 | 2017-01-12T14:44:31 | 2017-01-12T14:44:31 | 29,198,670 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,825 | py |
#ImportModules
import ShareYourSystem as SYS
#Define
MyDrawer=SYS.DrawerClass(
).draw(
{
'|fig1':{
'-Panels':{
'|A':{
'-Axes':{
'|a':{
'-Plots':{
'|0':{
'FiguringDrawVariable':
[
(
'#plot',
{
'#liarg':[
[1,2,3],
[2,6,3]
],
'#kwarg':{
'linestyle':"",
'marker':'o'
}
}
)
]
},
'|1':{
'FiguringDrawVariable':
[
(
'#plot',
{
'#liarg':[
[0,1,2],
[2,3,4]
],
'#kwarg':{
'linestyle':"--",
'color':'r'
}
}
)
],
}
}
},
'|b':{
'FiguringDrawVariable':
[
(
'#plot',
{
'#liarg':[
[1,2,3],
[2,6,3]
],
'#kwarg':{
'linestyle':"",
'marker':'o'
}
}
)
]
}
}
},
'|B':{
'FiguringDrawVariable':
[
(
'#plot',
{
'#liarg':[
[1,2,3],
[2,6,3]
],
'#kwarg':{
'linestyle':"",
'marker':'o'
}
}
)
]
},
}
},
'|fig2':{
'FiguringDrawVariable':
[
(
'#plot',
{
'#liarg':[
[1,2,3],
[2,6,3]
],
'#kwarg':{
'linestyle':"",
'marker':'o'
}
}
)
]
}
}
)
#print
print('MyDrawer is ')
SYS._print(MyDrawer)
| [
"[email protected]"
] | |
8602ac0c0521e11e3e0a15e566e3dab96c14e7d7 | 4c9ed67e62eaa75598f949bb517ac1c175e3eec9 | /code/ch07/ch07.py | 21fb4eae64d048a09852b993fc7c0a20fd37a304 | [
"MIT"
] | permissive | mwasjos/python-machine-learning-book-2nd-edition | c8dda555aebbcd042497af5c382358a2e058ef48 | 8c54875fb2eea8f997073e9002870d3ef481c286 | refs/heads/master | 2021-06-27T15:05:34.938957 | 2017-09-15T07:26:37 | 2017-09-15T07:26:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,002 | py | # coding: utf-8
from scipy.misc import comb
import math
import numpy as np
import matplotlib.pyplot as plt
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.base import ClassifierMixin
from sklearn.preprocessing import LabelEncoder
from sklearn.externals import six
from sklearn.base import clone
from sklearn.pipeline import _name_estimators
import numpy as np
import operator
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import Pipeline
from sklearn.model_selection import cross_val_score
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
from itertools import product
from sklearn.model_selection import GridSearchCV
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
# *Python Machine Learning 2nd Edition* by [Sebastian Raschka](https://sebastianraschka.com), Packt Publishing Ltd. 2017
#
# Code Repository: https://github.com/rasbt/python-machine-learning-book-2nd-edition
#
# Code License: [MIT License](https://github.com/rasbt/python-machine-learning-book-2nd-edition/blob/master/LICENSE.txt)
# # Python Machine Learning - Code Examples
# # Chapter 7 - Combining Different Models for Ensemble Learning
# Note that the optional watermark extension is a small IPython notebook plugin that I developed to make the code reproducible. You can just skip the following line(s).
# *The use of `watermark` is optional. You can install this IPython extension via "`pip install watermark`". For more information, please see: https://github.com/rasbt/watermark.*
# ### Overview
# - [Learning with ensembles](#Learning-with-ensembles)
# - [Combining classifiers via majority vote](#Combining-classifiers-via-majority-vote)
# - [Implementing a simple majority vote classifier](#Implementing-a-simple-majority-vote-classifier)
# - [Using the majority voting principle to make predictions](#Using-the-majority-voting-principle-to-make-predictions)
# - [Evaluating and tuning the ensemble classifier](#Evaluating-and-tuning-the-ensemble-classifier)
# - [Bagging – building an ensemble of classifiers from bootstrap samples](#Bagging----Building-an-ensemble-of-classifiers-from-bootstrap-samples)
# - [Bagging in a nutshell](#Bagging-in-a-nutshell)
# - [Applying bagging to classify samples in the Wine dataset](#Applying-bagging-to-classify-samples-in-the-Wine-dataset)
# - [Leveraging weak learners via adaptive boosting](#Leveraging-weak-learners-via-adaptive-boosting)
# - [How boosting works](#How-boosting-works)
# - [Applying AdaBoost using scikit-learn](#Applying-AdaBoost-using-scikit-learn)
# - [Summary](#Summary)
# # Learning with ensembles
def ensemble_error(n_classifier, error):
k_start = int(math.ceil(n_classifier / 2.))
probs = [comb(n_classifier, k) * error**k * (1-error)**(n_classifier - k)
for k in range(k_start, n_classifier + 1)]
return sum(probs)
ensemble_error(n_classifier=11, error=0.25)
error_range = np.arange(0.0, 1.01, 0.01)
ens_errors = [ensemble_error(n_classifier=11, error=error)
for error in error_range]
plt.plot(error_range,
ens_errors,
label='Ensemble error',
linewidth=2)
plt.plot(error_range,
error_range,
linestyle='--',
label='Base error',
linewidth=2)
plt.xlabel('Base error')
plt.ylabel('Base/Ensemble error')
plt.legend(loc='upper left')
plt.grid(alpha=0.5)
#plt.savefig('images/07_03.png', dpi=300)
plt.show()
# # Combining classifiers via majority vote
# ## Implementing a simple majority vote classifier
np.argmax(np.bincount([0, 0, 1],
weights=[0.2, 0.2, 0.6]))
ex = np.array([[0.9, 0.1],
[0.8, 0.2],
[0.4, 0.6]])
p = np.average(ex,
axis=0,
weights=[0.2, 0.2, 0.6])
p
np.argmax(p)
class MajorityVoteClassifier(BaseEstimator,
ClassifierMixin):
""" A majority vote ensemble classifier
Parameters
----------
classifiers : array-like, shape = [n_classifiers]
Different classifiers for the ensemble
vote : str, {'classlabel', 'probability'} (default='label')
If 'classlabel' the prediction is based on the argmax of
class labels. Else if 'probability', the argmax of
the sum of probabilities is used to predict the class label
(recommended for calibrated classifiers).
weights : array-like, shape = [n_classifiers], optional (default=None)
If a list of `int` or `float` values are provided, the classifiers
are weighted by importance; Uses uniform weights if `weights=None`.
"""
def __init__(self, classifiers, vote='classlabel', weights=None):
self.classifiers = classifiers
self.named_classifiers = {key: value for key, value
in _name_estimators(classifiers)}
self.vote = vote
self.weights = weights
def fit(self, X, y):
""" Fit classifiers.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Matrix of training samples.
y : array-like, shape = [n_samples]
Vector of target class labels.
Returns
-------
self : object
"""
if self.vote not in ('probability', 'classlabel'):
raise ValueError("vote must be 'probability' or 'classlabel'"
"; got (vote=%r)"
% self.vote)
if self.weights and len(self.weights) != len(self.classifiers):
raise ValueError('Number of classifiers and weights must be equal'
'; got %d weights, %d classifiers'
% (len(self.weights), len(self.classifiers)))
# Use LabelEncoder to ensure class labels start with 0, which
# is important for np.argmax call in self.predict
self.lablenc_ = LabelEncoder()
self.lablenc_.fit(y)
self.classes_ = self.lablenc_.classes_
self.classifiers_ = []
for clf in self.classifiers:
fitted_clf = clone(clf).fit(X, self.lablenc_.transform(y))
self.classifiers_.append(fitted_clf)
return self
def predict(self, X):
""" Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Matrix of training samples.
Returns
----------
maj_vote : array-like, shape = [n_samples]
Predicted class labels.
"""
if self.vote == 'probability':
maj_vote = np.argmax(self.predict_proba(X), axis=1)
else: # 'classlabel' vote
# Collect results from clf.predict calls
predictions = np.asarray([clf.predict(X)
for clf in self.classifiers_]).T
maj_vote = np.apply_along_axis(
lambda x:
np.argmax(np.bincount(x,
weights=self.weights)),
axis=1,
arr=predictions)
maj_vote = self.lablenc_.inverse_transform(maj_vote)
return maj_vote
def predict_proba(self, X):
""" Predict class probabilities for X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
avg_proba : array-like, shape = [n_samples, n_classes]
Weighted average probability for each class per sample.
"""
probas = np.asarray([clf.predict_proba(X)
for clf in self.classifiers_])
avg_proba = np.average(probas, axis=0, weights=self.weights)
return avg_proba
def get_params(self, deep=True):
""" Get classifier parameter names for GridSearch"""
if not deep:
return super(MajorityVoteClassifier, self).get_params(deep=False)
else:
out = self.named_classifiers.copy()
for name, step in six.iteritems(self.named_classifiers):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
# ## Using the majority voting principle to make predictions
iris = datasets.load_iris()
X, y = iris.data[50:, [1, 2]], iris.target[50:]
le = LabelEncoder()
y = le.fit_transform(y)
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.5,
random_state=1,
stratify=y)
clf1 = LogisticRegression(penalty='l2',
C=0.001,
random_state=1)
clf2 = DecisionTreeClassifier(max_depth=1,
criterion='entropy',
random_state=0)
clf3 = KNeighborsClassifier(n_neighbors=1,
p=2,
metric='minkowski')
pipe1 = Pipeline([['sc', StandardScaler()],
['clf', clf1]])
pipe3 = Pipeline([['sc', StandardScaler()],
['clf', clf3]])
clf_labels = ['Logistic regression', 'Decision tree', 'KNN']
print('10-fold cross validation:\n')
for clf, label in zip([pipe1, clf2, pipe3], clf_labels):
scores = cross_val_score(estimator=clf,
X=X_train,
y=y_train,
cv=10,
scoring='roc_auc')
print("ROC AUC: %0.2f (+/- %0.2f) [%s]"
% (scores.mean(), scores.std(), label))
# Majority Rule (hard) Voting
mv_clf = MajorityVoteClassifier(classifiers=[pipe1, clf2, pipe3])
clf_labels += ['Majority voting']
all_clf = [pipe1, clf2, pipe3, mv_clf]
for clf, label in zip(all_clf, clf_labels):
scores = cross_val_score(estimator=clf,
X=X_train,
y=y_train,
cv=10,
scoring='roc_auc')
print("ROC AUC: %0.2f (+/- %0.2f) [%s]"
% (scores.mean(), scores.std(), label))
# # Evaluating and tuning the ensemble classifier
colors = ['black', 'orange', 'blue', 'green']
linestyles = [':', '--', '-.', '-']
for clf, label, clr, ls in zip(all_clf,
clf_labels, colors, linestyles):
# assuming the label of the positive class is 1
y_pred = clf.fit(X_train,
y_train).predict_proba(X_test)[:, 1]
fpr, tpr, thresholds = roc_curve(y_true=y_test,
y_score=y_pred)
roc_auc = auc(x=fpr, y=tpr)
plt.plot(fpr, tpr,
color=clr,
linestyle=ls,
label='%s (auc = %0.2f)' % (label, roc_auc))
plt.legend(loc='lower right')
plt.plot([0, 1], [0, 1],
linestyle='--',
color='gray',
linewidth=2)
plt.xlim([-0.1, 1.1])
plt.ylim([-0.1, 1.1])
plt.grid(alpha=0.5)
plt.xlabel('False positive rate (FPR)')
plt.ylabel('True positive rate (TPR)')
#plt.savefig('images/07_04', dpi=300)
plt.show()
sc = StandardScaler()
X_train_std = sc.fit_transform(X_train)
all_clf = [pipe1, clf2, pipe3, mv_clf]
x_min = X_train_std[:, 0].min() - 1
x_max = X_train_std[:, 0].max() + 1
y_min = X_train_std[:, 1].min() - 1
y_max = X_train_std[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(nrows=2, ncols=2,
sharex='col',
sharey='row',
figsize=(7, 5))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
all_clf, clf_labels):
clf.fit(X_train_std, y_train)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.3)
axarr[idx[0], idx[1]].scatter(X_train_std[y_train==0, 0],
X_train_std[y_train==0, 1],
c='blue',
marker='^',
s=50)
axarr[idx[0], idx[1]].scatter(X_train_std[y_train==1, 0],
X_train_std[y_train==1, 1],
c='green',
marker='o',
s=50)
axarr[idx[0], idx[1]].set_title(tt)
plt.text(-3.5, -5.,
s='Sepal width [standardized]',
ha='center', va='center', fontsize=12)
plt.text(-12.5, 4.5,
s='Petal length [standardized]',
ha='center', va='center',
fontsize=12, rotation=90)
#plt.savefig('images/07_05', dpi=300)
plt.show()
mv_clf.get_params()
params = {'decisiontreeclassifier__max_depth': [1, 2],
'pipeline-1__clf__C': [0.001, 0.1, 100.0]}
grid = GridSearchCV(estimator=mv_clf,
param_grid=params,
cv=10,
scoring='roc_auc')
grid.fit(X_train, y_train)
for r, _ in enumerate(grid.cv_results_['mean_test_score']):
print("%0.3f +/- %0.2f %r"
% (grid.cv_results_['mean_test_score'][r],
grid.cv_results_['std_test_score'][r] / 2.0,
grid.cv_results_['params'][r]))
print('Best parameters: %s' % grid.best_params_)
print('Accuracy: %.2f' % grid.best_score_)
# **Note**
# By default, the default setting for `refit` in `GridSearchCV` is `True` (i.e., `GridSeachCV(..., refit=True)`), which means that we can use the fitted `GridSearchCV` estimator to make predictions via the `predict` method, for example:
#
# grid = GridSearchCV(estimator=mv_clf,
# param_grid=params,
# cv=10,
# scoring='roc_auc')
# grid.fit(X_train, y_train)
# y_pred = grid.predict(X_test)
#
# In addition, the "best" estimator can directly be accessed via the `best_estimator_` attribute.
grid.best_estimator_.classifiers
mv_clf = grid.best_estimator_
mv_clf.set_params(**grid.best_estimator_.get_params())
mv_clf
# # Bagging -- Building an ensemble of classifiers from bootstrap samples
# ## Bagging in a nutshell
# ## Applying bagging to classify samples in the Wine dataset
df_wine = pd.read_csv('https://archive.ics.uci.edu/ml/'
'machine-learning-databases/wine/wine.data',
header=None)
df_wine.columns = ['Class label', 'Alcohol', 'Malic acid', 'Ash',
'Alcalinity of ash', 'Magnesium', 'Total phenols',
'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins',
'Color intensity', 'Hue', 'OD280/OD315 of diluted wines',
'Proline']
# if the Breast Cancer dataset is temporarily unavailable from the
# UCI machine learning repository, un-comment the following line
# of code to load the dataset from a local path:
# df_wine = pd.read_csv('wine.data', header=None)
# drop 1 class
df_wine = df_wine[df_wine['Class label'] != 1]
y = df_wine['Class label'].values
X = df_wine[['Alcohol', 'OD280/OD315 of diluted wines']].values
le = LabelEncoder()
y = le.fit_transform(y)
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.2,
random_state=1,
stratify=y)
tree = DecisionTreeClassifier(criterion='entropy',
max_depth=None,
random_state=1)
bag = BaggingClassifier(base_estimator=tree,
n_estimators=500,
max_samples=1.0,
max_features=1.0,
bootstrap=True,
bootstrap_features=False,
n_jobs=1,
random_state=1)
tree = tree.fit(X_train, y_train)
y_train_pred = tree.predict(X_train)
y_test_pred = tree.predict(X_test)
tree_train = accuracy_score(y_train, y_train_pred)
tree_test = accuracy_score(y_test, y_test_pred)
print('Decision tree train/test accuracies %.3f/%.3f'
% (tree_train, tree_test))
bag = bag.fit(X_train, y_train)
y_train_pred = bag.predict(X_train)
y_test_pred = bag.predict(X_test)
bag_train = accuracy_score(y_train, y_train_pred)
bag_test = accuracy_score(y_test, y_test_pred)
print('Bagging train/test accuracies %.3f/%.3f'
% (bag_train, bag_test))
x_min = X_train[:, 0].min() - 1
x_max = X_train[:, 0].max() + 1
y_min = X_train[:, 1].min() - 1
y_max = X_train[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(nrows=1, ncols=2,
sharex='col',
sharey='row',
figsize=(8, 3))
for idx, clf, tt in zip([0, 1],
[tree, bag],
['Decision tree', 'Bagging']):
clf.fit(X_train, y_train)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx].contourf(xx, yy, Z, alpha=0.3)
axarr[idx].scatter(X_train[y_train == 0, 0],
X_train[y_train == 0, 1],
c='blue', marker='^')
axarr[idx].scatter(X_train[y_train == 1, 0],
X_train[y_train == 1, 1],
c='green', marker='o')
axarr[idx].set_title(tt)
axarr[0].set_ylabel('Alcohol', fontsize=12)
plt.text(10.2, -0.5,
s='OD280/OD315 of diluted wines',
ha='center', va='center', fontsize=12)
plt.tight_layout()
#plt.savefig('images/07_08.png', dpi=300, bbox_inches='tight')
plt.show()
# # Leveraging weak learners via adaptive boosting
# ## How boosting works
# ## Applying AdaBoost using scikit-learn
tree = DecisionTreeClassifier(criterion='entropy',
max_depth=1,
random_state=1)
ada = AdaBoostClassifier(base_estimator=tree,
n_estimators=500,
learning_rate=0.1,
random_state=1)
tree = tree.fit(X_train, y_train)
y_train_pred = tree.predict(X_train)
y_test_pred = tree.predict(X_test)
tree_train = accuracy_score(y_train, y_train_pred)
tree_test = accuracy_score(y_test, y_test_pred)
print('Decision tree train/test accuracies %.3f/%.3f'
% (tree_train, tree_test))
ada = ada.fit(X_train, y_train)
y_train_pred = ada.predict(X_train)
y_test_pred = ada.predict(X_test)
ada_train = accuracy_score(y_train, y_train_pred)
ada_test = accuracy_score(y_test, y_test_pred)
print('AdaBoost train/test accuracies %.3f/%.3f'
% (ada_train, ada_test))
x_min, x_max = X_train[:, 0].min() - 1, X_train[:, 0].max() + 1
y_min, y_max = X_train[:, 1].min() - 1, X_train[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(1, 2, sharex='col', sharey='row', figsize=(8, 3))
for idx, clf, tt in zip([0, 1],
[tree, ada],
['Decision tree', 'AdaBoost']):
clf.fit(X_train, y_train)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx].contourf(xx, yy, Z, alpha=0.3)
axarr[idx].scatter(X_train[y_train == 0, 0],
X_train[y_train == 0, 1],
c='blue', marker='^')
axarr[idx].scatter(X_train[y_train == 1, 0],
X_train[y_train == 1, 1],
c='green', marker='o')
axarr[idx].set_title(tt)
axarr[0].set_ylabel('Alcohol', fontsize=12)
plt.text(10.2, -0.5,
s='OD280/OD315 of diluted wines',
ha='center', va='center', fontsize=12)
plt.tight_layout()
#plt.savefig('images/07_11.png', dpi=300, bbox_inches='tight')
plt.show()
# # Summary
# ...
# ---
#
# Readers may ignore the next cell.
| [
"[email protected]"
] | |
21a08b2221328a34991a9d6b79744c027a46f1f1 | 69580624a23ab1e6a9d90d957ae08ed951f96d3b | /coolapp/templates/compiled/macros/apptools.py | b846f6aeb6094c6ad51d7952513a9f9a23dc740e | [] | no_license | sgammon/apptools-sample | 6e9254e712a6e82b52247f6451d2e388db9ceb9b | 5016841ff63536d29b1eb2dd5c84d611b969da21 | refs/heads/master | 2021-01-23T07:34:17.017619 | 2013-11-21T18:49:48 | 2013-11-21T18:49:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,993 | py | from __future__ import division
from jinja2 import environment
from jinja2.runtime import LoopContext, TemplateReference, Macro, Markup, TemplateRuntimeError, missing, concat, escape, markup_join, unicode_join, to_string, identity, TemplateNotFound
def run(environment):
name = '/source/macros/apptools.html'
def root(context, environment=environment):
t_1 = environment.filters['json']
t_2 = environment.filters['safe']
if 0: yield None
def macro(l_page, l_transport, l_security):
t_3 = []
l_sys = context.resolve('sys')
l_handler = context.resolve('handler')
l_util = context.resolve('util')
l_api = context.resolve('api')
l_authorized = context.resolve('authorized')
l_oauth = context.resolve('oauth')
l_null = context.resolve('null')
l_channel = context.resolve('channel')
pass
t_3.append(
to_string(t_2(t_1({'platform': {'name': environment.getattr(environment.getattr(environment.getattr(l_util, 'config'), 'project'), 'name'), 'version': environment.getattr(l_sys, 'version'), 'origindc': environment.getattr(l_util, 'datacenter'), 'instance': environment.getattr(l_util, 'instance'), 'debug': (True if environment.getattr(environment.getattr(l_util, 'config'), 'debug') else False)}, 'debug': {'logging': (environment.getattr(environment.getattr(l_util, 'config'), 'debug') or context.call(environment.getattr(environment.getattr(l_api, 'users'), 'is_current_user_admin'))), 'eventlog': environment.getattr(environment.getattr(l_util, 'config'), 'debug'), 'verbose': environment.getattr(environment.getattr(l_util, 'config'), 'debug'), 'strict': False}, 'push': ({'enabled': (True if environment.getattr(l_channel, 'token') else False), 'token': (environment.getattr(l_channel, 'token') if environment.getattr(l_channel, 'token') else l_null), 'timeout': ((environment.getattr(environment.getattr(l_channel, '_TTL'), 'seconds') / 60) if l_channel else l_null)} if l_channel else {}), 'user': ({'email': l_null, 'is_user_admin': l_null, 'nickname': l_null} if environment.getattr(l_security, 'current_user') != None else False), 'session': ({'blob': context.call(environment.getattr(environment.getattr(l_handler, 'session'), '_encode_session'))} if environment.getattr(l_handler, 'session') else {}), 'media': ({'key': environment.getattr(environment.getattr(l_page, 'media'), 'key'), 'ref': environment.getattr(environment.getattr(l_page, 'media'), 'ref'), 'name': environment.getattr(environment.getattr(l_page, 'media'), 'name')} if environment.getattr(l_page, 'media') else {'ref': None}), 'oauth': (({'id': environment.getattr(l_oauth, 'fbid')} if l_authorized else {'redirect': environment.getattr(l_oauth, 'redirect'), 'mode': environment.getattr(l_oauth, 'mode')}) if l_oauth else {}), 'services': {'endpoint': context.call(environment.getattr('://', 'join'), [(('https' if environment.getattr(l_handler, 'force_https') else False) or environment.getattr(environment.getattr(l_handler, 'request'), 'scheme')), environment.getattr(environment.getattr(l_transport, 'services'), 'endpoint')]), 'consumer': environment.getattr(environment.getattr(l_transport, 'services'), 'consumer'), 'scope': environment.getattr(environment.getattr(l_transport, 'services'), 'scope'), 'apis': context.call(environment.getattr(environment.getattr(l_transport, 'services'), 'make_object'), environment.getattr(l_page, 'services'))}}))),
)
return concat(t_3)
context.exported_vars.add('build_native_page_object')
context.vars['build_native_page_object'] = l_build_native_page_object = Macro(environment, macro, 'build_native_page_object', ('page', 'transport', 'security'), (), False, False, False)
def macro(l_services, l_config, l_page):
t_4 = []
l_null = context.resolve('null')
pass
t_4.append(
u'$(document).ready(function (){\n\n\t',
)
for event in context.blocks['platform_statement'][0](context):
t_4.append(event)
t_4.append(
u'\n\n\t',
)
if l_services != l_null:
pass
t_4.append(
u'\n\t$.apptools.api.rpc.factory([',
)
l_action = l_cfg = l_opts = l_service = missing
l_util = context.resolve('util')
l_enumerate = context.resolve('enumerate')
for (l_service, l_action, l_cfg, l_opts), l_loop in LoopContext(l_services):
pass
t_4.extend((
u"{\n\t\t\t\tname: '",
to_string(l_service),
u"',\n\t\t\t\tbase_uri: '",
to_string(l_action),
u"',\n\t\t\t\tmethods: [",
))
t_5 = l_loop
l_i = l_method = missing
l_len = context.resolve('len')
for (l_i, l_method) in context.call(l_enumerate, environment.getattr(l_cfg, 'methods')):
pass
t_4.extend((
u"'",
to_string(l_method),
u"'",
))
if l_i != (context.call(l_len, environment.getattr(l_cfg, 'methods')) - 1):
pass
t_4.append(
u',',
)
l_loop = t_5
l_i = l_method = missing
t_4.extend((
u'],\n\t\t\t\tconfig: ',
to_string(t_2(context.call(environment.getattr(environment.getattr(environment.getattr(l_util, 'converters'), 'json'), 'dumps'), l_opts))),
u'\n\t\t\t}',
))
if (not environment.getattr(l_loop, 'last')):
pass
t_4.append(
u',',
)
l_action = l_cfg = l_opts = l_service = missing
t_4.append(
u']);\n\t',
)
t_4.append(
u'\n\n\t',
)
if environment.getattr(l_page, 'open_channel'):
pass
t_4.append(
u'\n\t',
)
if environment.getattr(l_page, 'channel_token'):
pass
t_4.extend((
u'\n\t\t$.apptools.push.channel.establish("',
to_string(environment.getattr(l_page, 'channel_token')),
u'").listen();\n\t',
))
t_4.append(
u'\n\t',
)
t_4.append(
u'\n\n\t',
)
for event in context.blocks['userobj'][0](context):
t_4.append(event)
t_4.append(
u"\n\n\t$.apptools.events.trigger('API_READY');\n\n});",
)
return concat(t_4)
context.exported_vars.add('build_page_object')
context.vars['build_page_object'] = l_build_page_object = Macro(environment, macro, 'build_page_object', ('services', 'config', 'page'), (), False, False, False)
def block_platform_statement(context, environment=environment):
l_util = context.resolve('util')
l_sys = context.resolve('sys')
l_api = context.resolve('api')
if 0: yield None
yield u"\n\t\t$.apptools.sys.platform = {\n\t\t\tname: '%s', version: '%s', origindc: '%s', instance: '%s'," % (
environment.getattr(environment.getattr(environment.getattr(l_util, 'config'), 'project'), 'name'),
environment.getattr(l_sys, 'version'),
environment.getattr(environment.getattr(l_util, 'appengine'), 'datacenter'),
environment.getattr(environment.getattr(l_util, 'appengine'), 'instance'),
)
if context.call(environment.getattr(environment.getattr(l_api, 'users'), 'is_current_user_admin')):
if 0: yield None
yield u'debug: '
l_off = context.resolve('off')
if 0: yield None
t_6 = context.eval_ctx.save()
context.eval_ctx.autoescape = l_off
yield (context.eval_ctx.autoescape and escape or to_string)(context.call(environment.getattr(environment.getattr(environment.getattr(l_util, 'converters'), 'json'), 'dumps'), environment.getattr(environment.getattr(l_util, 'config'), 'debug')))
context.eval_ctx.revert(t_6)
yield u'};'
if (environment.getattr(environment.getattr(l_util, 'config'), 'debug') or context.call(environment.getattr(environment.getattr(l_api, 'users'), 'is_current_user_admin'))):
if 0: yield None
yield u'$.apptools.dev.setDebug({logging: true, eventlog: true, verbose: true});'
else:
if 0: yield None
yield u'$.apptools.dev.setDebug({logging: false, eventlog: false, verbose: false});'
def block_userobj(context, environment=environment):
l_util = context.resolve('util')
l_api = context.resolve('api')
l_userapi = context.resolve('userapi')
l_userobj = context.resolve('userobj')
if 0: yield None
if l_userapi != None:
if 0: yield None
yield u'$.apptools.user.setUserInfo({'
if context.call(environment.getattr(environment.getattr(l_api, 'users'), 'get_current_user')) != None:
if 0: yield None
l_userobj = context.call(environment.getattr(environment.getattr(l_api, 'users'), 'get_current_user'))
yield u'current_user: {\n\t\t\t\t\t\tnickname: "%s",\n\t\t\t\t\t\temail: "%s"\n\t\t\t\t\t},\n\t\t\t\t\tis_user_admin: %s' % (
context.call(environment.getattr(l_userobj, 'nickname')),
context.call(environment.getattr(l_userobj, 'email')),
context.call(environment.getattr(environment.getattr(environment.getattr(l_util, 'converters'), 'json'), 'dumps'), context.call(environment.getattr(environment.getattr(l_api, 'users'), 'is_current_user_admin'))),
)
else:
if 0: yield None
yield u'current_user: null,\n\t\t\t\t\tis_user_admin: false'
yield u'});'
blocks = {'platform_statement': block_platform_statement, 'userobj': block_userobj}
debug_info = '1=11&55=23&58=28&62=35&76=40&78=48&80=52&81=54&82=60&83=76&84=79&89=91&90=96&91=100&95=109&62=118&64=124&65=129&66=136&69=139&95=146&96=152&100=155&101=157&103=159&104=160&106=161'
return locals() | [
"[email protected]"
] | |
216d04773776fc81e9f52786b3e8e6d88651197d | f9f019da9bb01be7e35887082747c4c468a73809 | /enarksh/logger/message/LogFileMessage.py | 5bb11200fb079bca1d1f5fb33a80288aceab3cb2 | [
"MIT"
] | permissive | SetBased/py-enarksh | 63df38ce8622b6b22c0115bbe28630b26de042b7 | ec0c33cdae4a0afeea37928743abd744ef291a9f | refs/heads/master | 2020-04-11T00:14:33.628135 | 2020-01-24T06:52:17 | 2020-01-24T06:52:17 | 60,259,451 | 3 | 2 | null | 2016-08-02T14:42:21 | 2016-06-02T11:43:47 | Python | UTF-8 | Python | false | false | 2,299 | py | """
Enarksh
Copyright 2013-2016 Set Based IT Consultancy
Licence MIT
"""
from enarksh.message.Message import Message
class LogFileMessage(Message):
"""
Message type for notifying the logger that a log file is available for storing into the database.
"""
MESSAGE_TYPE = 'logger:LogFileMessage'
"""
The message type.
:type: str
"""
# ------------------------------------------------------------------------------------------------------------------
def __init__(self, rnd_id, name, total_size, filename1, filename2):
"""
Object constructor.
:param int rnd_id: The ID of the run node.
:param str name: The name of he output:
- 'out' for stdout
- 'err' for stderr
:param int total_size: The total size in bytes of the log.
:param str|None filename1: The name of the file where the first chunk of the log is stored.
:param str|None filename2: The name of the file where the last chunk of the log is stored.
"""
Message.__init__(self, LogFileMessage.MESSAGE_TYPE)
self.rnd_id = rnd_id
"""
The ID of the run node.
:type: int
"""
self.name = name
"""
The name of he output:
- 'out' for stdout
- 'err' for stderr
:type: str
"""
self.total_size = total_size
"""
The total size in bytes of the log.
:type: int
"""
self.filename1 = filename1
"""
The name of the file where the first chunk of the log is stored.
:type: str
"""
self.filename2 = filename2
"""
The name of the file where the last chunk of the log is stored.
:type: str
"""
# ------------------------------------------------------------------------------------------------------------------
def send_message(self, end_point):
"""
Sends the message to an end point.
:param str end_point: The end point.
:rtype: None
"""
self.message_controller.send_message(end_point, self)
# ----------------------------------------------------------------------------------------------------------------------
| [
"[email protected]"
] | |
11decc9e417e1fab8528f8ed17648dc30b41d0c2 | 98be41d34ca238e040408017e1a2af8cfd71a419 | /command_line/aimless_absorption_map.py | 5bb462cc2b80aba1f78ccb44a5aee9b3ce5329e1 | [] | no_license | hainm/xia2 | 338a834fd40aa0a684e4833d34244d6d8f6e0417 | a5ae68c731577f14b8400404e883029d2147c548 | refs/heads/master | 2021-01-24T15:24:44.917551 | 2016-03-10T09:47:27 | 2016-03-10T09:47:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | # LIBTBX_SET_DISPATCHER_NAME dev.xia2.aimless_absorption_map
from __future__ import division
def main(log, png):
from xia2.Toolkit.AimlessSurface import evaluate_1degree, scrape_coefficients
evaluate_1degree(scrape_coefficients(log), png)
return
if __name__ == '__main__':
import sys
main(sys.argv[1], sys.argv[2])
| [
"[email protected]"
] | |
0eb6a2d29b81069f9b412a45ae5e72d5688176c9 | 154ec3de1efcf3c97d154ac2ed0c7cd1c9a25040 | /tests/h/services/delete_user_test.py | 3c3d768d91bc479bf86e39a9e1d7dd833ea1695b | [
"BSD-3-Clause",
"BSD-2-Clause-Views",
"BSD-2-Clause"
] | permissive | Manuelinux/kubeh | 98a9c5c0a98be67c3583dd222bd74046cd5ee484 | a549f0d1c09619843290f9b78bce7668ed90853a | refs/heads/master | 2023-03-16T00:51:43.318292 | 2021-09-17T03:33:14 | 2021-09-17T03:33:14 | 211,371,455 | 0 | 0 | BSD-2-Clause | 2023-03-03T07:20:50 | 2019-09-27T17:37:10 | Python | UTF-8 | Python | false | false | 3,514 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from unittest import mock
import pytest
import sqlalchemy
from h.models import Annotation, Document
from h.services.delete_user import delete_user_service_factory
from h.services.annotation_delete import AnnotationDeleteService
@pytest.mark.usefixtures("annotation_delete_service")
class TestDeleteUserService:
def test_delete_disassociate_group_memberships(self, factories, svc):
user = factories.User()
svc.delete(user)
assert user.groups == []
def test_delete_deletes_annotations(
self, factories, pyramid_request, svc, annotation_delete_service
):
user = factories.User(username="bob")
anns = [
factories.Annotation(userid=user.userid),
factories.Annotation(userid=user.userid),
]
svc.delete(user)
annotation_delete_service.delete.assert_has_calls(
[mock.call(anns[0]), mock.call(anns[1])], any_order=True
)
def test_delete_deletes_user(self, db_session, factories, pyramid_request, svc):
user = factories.User()
svc.delete(user)
assert user in db_session.deleted
def test_delete_user_removes_groups_if_no_collaborators(
self, db_session, group_with_two_users, pyramid_request, svc
):
pyramid_request.db = db_session
(group, creator, member, creator_ann, member_ann) = group_with_two_users
db_session.delete(member_ann)
svc.delete(creator)
assert sqlalchemy.inspect(group).was_deleted
def test_creator_is_none_if_groups_have_collaborators(
self, db_session, group_with_two_users, pyramid_request, svc
):
pyramid_request.db = db_session
(group, creator, member, creator_ann, member_ann) = group_with_two_users
svc.delete(creator)
assert group.creator is None
def test_delete_user_removes_only_groups_created_by_user(
self, db_session, group_with_two_users, pyramid_request, svc
):
pyramid_request.db = db_session
(group, creator, member, creator_ann, member_ann) = group_with_two_users
svc.delete(member)
assert group not in db_session.deleted
@pytest.fixture
def svc(self, db_session, pyramid_request):
pyramid_request.db = db_session
return delete_user_service_factory({}, pyramid_request)
@pytest.fixture
def pyramid_request(pyramid_request):
pyramid_request.notify_after_commit = mock.Mock()
return pyramid_request
@pytest.fixture
def group_with_two_users(db_session, factories):
"""
Create a group with two members and an annotation created by each.
"""
creator = factories.User()
member = factories.User()
group = factories.Group(
authority=creator.authority, creator=creator, members=[creator, member]
)
doc = Document(web_uri="https://example.org")
creator_ann = Annotation(userid=creator.userid, groupid=group.pubid, document=doc)
member_ann = Annotation(userid=member.userid, groupid=group.pubid, document=doc)
db_session.add(creator_ann)
db_session.add(member_ann)
db_session.flush()
return (group, creator, member, creator_ann, member_ann)
@pytest.fixture
def annotation_delete_service(pyramid_config):
service = mock.create_autospec(
AnnotationDeleteService, spec_set=True, instance=True
)
pyramid_config.register_service(service, name="annotation_delete")
return service
| [
"[email protected]"
] | |
4cb25fcb4d4a805d548a5b8cc8fd783cbdf29274 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03378/s823001968.py | 18624fdbcc9f357534760d943dbd2474726acb00 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | N, M, X = map(int, input().split())
A = list(map(int, input().split()))
l_cost = 0
g_cost = 0
for i in A:
if i > X:
l_cost += 1
else:
g_cost += 1
print(min(l_cost, g_cost)) | [
"[email protected]"
] | |
eb0a7405d6d4ee0c550e9d35ccfe74c20a028799 | 648796da46791794ee5de7a8004da437c840323e | /311_calls/p2.py | 4bbbe7466034c4642042a3360d44ed80983f59db | [] | no_license | YulianaGomez/ml_pp | 86530a2ee26bb2f39117ec6a458368a5c1c74104 | 3891350e1ef6fbf2fd29a792387182601f94c250 | refs/heads/master | 2020-03-07T19:09:25.958025 | 2018-05-25T22:34:28 | 2018-05-25T22:34:28 | 127,663,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,906 | py | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import glob
import json
import requests
import sys
"""
Homework 1: Diagnostic
Looking at 311 requests from the Chicago Open Data Portal and
census API's for analysis of Chicago communities
author: Yuliana Zamora
Date: April 3, 2018
"""
class dataCounter():
def __init__(self):
self.child15 = {"Far North":0,"Northwest":0,"North":0,"West":0,"Central":0,"South":0,"Southwest":0,"Far Southwest":0,"Far Southeast":0}
self.child16 = {"Far North":0,"Northwest":0,"North":0,"West":0,"Central":0,"South":0,"Southwest":0,"Far Southwest":0,"Far Southeast":0}
self.bach15 = {"Far North":0,"Northwest":0,"North":0,"West":0,"Central":0,"South":0,"Southwest":0,"Far Southwest":0,"Far Southeast":0}
self.bach16 = {"Far North":0,"Northwest":0,"North":0,"West":0,"Central":0,"South":0,"Southwest":0,"Far Southwest":0,"Far Southeast":0}
self.mom15 = {"Far North":0,"Northwest":0,"North":0,"West":0,"Central":0,"South":0,"Southwest":0,"Far Southwest":0,"Far Southeast":0}
self.mom16 = {"Far North":0,"Northwest":0,"North":0,"West":0,"Central":0,"South":0,"Southwest":0,"Far Southwest":0,"Far Southeast":0}
def main():
####--Populating demo data--####
#Number of children on govt assistant, bachelors degrees, children in single mom homes
processed15 = glob.glob("2015.json")
processed16 = glob.glob("2016.json")
if len(processed15) > 0 and len(processed16) > 0:
json_data= open("2015.json", "r")
demo_15 = json.load(json_data)
json_data.close()
json_data= open("2016.json", "r")
demo_16 = json.load(json_data)
json_data.close()
else:
for year in range(2015,2017):
url = "https://api.census.gov/data/"+str(year)+"/acs/acs5/subject?get=NAME,S0901_C01_031E,S1501_C01_012E,S0901_C04_001E&for=zip%20code%20tabulation%20area:*"
demo_data = requests.get(url,allow_redirects=True)
file_name = str(year) +".json"
open(file_name, 'wb').write(demo_data.content)
if year == 2015:
json_data= open("2015.json", "r")
demo_15 = json.load(json_data)
json_data.close()
else:
json_data= open("2016.json", "r")
demo_16 = json.load(json_data)
json_data.close()
###--setting specific regions with their corresponding zipcodes--###
#http://chicago-zone.blogspot.com/2014/03/chicago-zip-code-map-locate-chicago.html
zip_dict = {"Far North" : [60626,60645, 60659, 60660,60640,60625,60630,60631,60656], \
"Northwest" : [60618,60634, 60641,60607,60639], \
"North" : [60618, 60613,60657, 60613,60614, 60610,60647], \
"West" :[60651, 60622,60612, 60623, 60642,60639, 60644,60624,60612,60607,60608,60616], \
"Central" : [60610,60601, 60602, 60603, 60604, 60605,60606, 60607, 60661,60616], \
"South" : [60609,60616,60653,60615,60637,60649,60608,60620,60619], \
"Southwest" :[60632,60608, 60609,60629,60638,60621,60636], \
"Far Southwest" : [60652,60620,60643,60655], \
"Far Southeast" : [60619,60617,60628,60643,60633,60827,60633,60638] }
# Create object to store the counters
datacnt = dataCounter()
#Populate data for 2015
for key, val in zip_dict.items():
for i in range(1, len(demo_15)):
zipCode = int(demo_15[i][4])
if zipCode in val:
addval=[0, 0, 0]
for j in range(1,4):
if demo_15[i][j] != None:
if j==1: addval[j-1] = float(demo_15[i][j])
else: addval[j-1] = int(demo_15[i][j])
datacnt.child15[key] += addval[0]
datacnt.bach15[key] += addval[1]
datacnt.mom15[key] += addval[2]
#Populate data for 2016
for key, val in zip_dict.items():
for i in range(1, len(demo_16)):
zipCode = int(demo_16[i][4])
if zipCode in val:
addval=[0, 0, 0]
for j in range(1,4):
if demo_16[i][j] != None:
if j==1: addval[j-1] = float(demo_16[i][j])
else: addval[j-1] = int(demo_16[i][j])
datacnt.child16[key] += addval[0]
datacnt.bach16[key] += addval[1]
datacnt.mom16[key] += addval[2]
fig, ax = plt.subplots()
N = len(datacnt.child16.keys())
ind = np.arange(N)
width = 0.35
setting='mom'
if setting == 'child':
rects1 = ax.bar(ind, datacnt.child15.values(), width)
rects2 = ax.bar(ind + width, datacnt.child16.values(), width)
elif setting == 'bach':
rects1 = ax.bar(ind, datacnt.bach15.values(), width)
rects2 = ax.bar(ind + width, datacnt.bach16.values(), width)
elif setting == 'mom':
rects1 = ax.bar(ind, datacnt.mom15.values(), width)
rects2 = ax.bar(ind + width, datacnt.mom16.values(), width)
ax.set_ylabel('Frequency')
ax.set_xlabel('Chicago Communities')
ax.set_title('Number of Children in Single mom Households in City of Chicago Community (2015-2016)')
ax.set_xticks(ind + width / 2)
ax.set_xticklabels(datacnt.mom16.keys())
ax.legend((rects1[0], rects2[0]), ('2015', '2016'))
def autolabel(rects):
"""
Attach a text label above each bar displaying its height
"""
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,
'%d' % int(height),
ha='center', va='bottom')
#autolabel(rects1)
#autolabel(rects2)
plt.show()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
7a509387928543470805a0bff90e312d0618d154 | 9f1039075cc611198a988034429afed6ec6d7408 | /tensorflow-stubs/contrib/seq2seq/__init__.pyi | 257610d37bf4e559a080c9544edbf012f74b2e01 | [] | no_license | matangover/tensorflow-stubs | 9422fbb1cb3a3638958d621461291c315f9c6ec2 | 664bd995ef24f05ba2b3867d979d23ee845cb652 | refs/heads/master | 2020-05-23T12:03:40.996675 | 2019-05-15T06:21:43 | 2019-05-15T06:21:43 | 186,748,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | pyi | # Stubs for tensorflow.contrib.seq2seq (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from tensorflow.contrib.seq2seq.python.ops.attention_wrapper import *
from tensorflow.contrib.seq2seq.python.ops.basic_decoder import *
from tensorflow.contrib.seq2seq.python.ops.beam_search_decoder import *
from tensorflow.contrib.seq2seq.python.ops.beam_search_ops import *
from tensorflow.contrib.seq2seq.python.ops.decoder import *
from tensorflow.contrib.seq2seq.python.ops.helper import *
from tensorflow.contrib.seq2seq.python.ops.loss import *
from tensorflow.python.util.all_util import remove_undocumented as remove_undocumented
| [
"[email protected]"
] | |
0c00c9201ad849dadeaf44789e0b9752180054d1 | b62563d791382e75f65ec9cc281882c58baa1444 | /machine/urls.py | 189042b2c8fa9fb5f6bfb2e68d4fac0d7a3963f7 | [] | no_license | moses-mugoya/Farmers-Machinery-Management-System | 4fda328f1813041c9a6ae64bf618a7eb0b23d78b | 0221c33e5b5936edee3c32baa98f486c9a4726a4 | refs/heads/master | 2020-05-19T03:59:05.148558 | 2019-05-03T20:06:04 | 2019-05-03T20:06:04 | 184,814,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | from django.urls import re_path
from . import views
app_name = 'machine'
urlpatterns = [
re_path(r'^$', views.product_list, name='product_list'),
re_path(r'^(?P<category_slug>[-\w]+)/$', views.product_list, name='product_list_by_category'),
re_path(r'^(?P<id>\d+)/(?P<slug>[-\w]+)/$', views.product_detail, name='product_detail'),
]
| [
"[email protected]"
] | |
24b53bcc79668b3c8a1ee6b0b5ad11b327473c3d | 842ccd98867d5549884505d18ed1bc7f53a1803e | /Random-Alan-n-Akaash-Puzzles/mystery_2.py | 6f99b554e185fe4a1750db9c0a4030df16940b37 | [] | no_license | AwesomeZaidi/Problem-Solving | dd43593c2a9f5d7ce30c7aaa2575fdd9eaa2ba1d | 28d40a46f415a41b6754378a46ab26e90a094273 | refs/heads/master | 2023-01-09T14:40:10.354981 | 2019-07-10T02:18:26 | 2019-07-10T02:18:26 | 158,969,031 | 3 | 0 | null | 2023-01-03T23:53:00 | 2018-11-24T19:56:50 | Python | UTF-8 | Python | false | false | 932 | py | import string
import sys
def puzzle(binary_input, b):
"""
Create a dictionary values of 1-9-a-z :
Args: ds ? type?
b ? type?
Returns? ?
"""
string_values = {char: index for index, char in enumerate(string.printable[:36])}
# for _, base_36_val in enumerate(string.printable[:36]):
# print('base_36_val:', base_36_val)
# sum( values[d] * b**e for e, d in enumerate(ds[::-1]) )
sum = 0
for idx, num in enumerate(binary_input[::-1]):
print('idx:', idx, 'num:', num)
current = string_values[num] * b**idx
sum += current
print('current:', current)
return sum # wrong
# return sum(string_values[d] * b**e for e, d in enumerate(ds[::-1]))
if __name__ == '__main__':
# print('string.printable[:36]:', string.printable[:36])
print(puzzle(sys.argv[1], int(sys.argv[2])))
| [
"[email protected]"
] | |
ebed4b81b88149665e77ef9354a5f98dd58f2dea | 2884e44c7c8b5f1dd7405fba24549e8135605ad8 | /plastiqpublicapi/http/http_response.py | 44ce71985958c313988911f98ecf3550dcaaeec5 | [
"MIT"
] | permissive | jeffkynaston/sdk-spike-python-apimatic | d44d2464ba43c12dabe3ae3ba01ef268f73c16f3 | e1ca52116aabfcdb2f36c24ebd866cf00bb5c6c9 | refs/heads/main | 2023-07-01T15:17:50.623155 | 2021-08-05T22:45:12 | 2021-08-05T22:45:12 | 393,186,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,508 | py | # -*- coding: utf-8 -*-
"""
plastiqpublicapi
This file was automatically generated by APIMATIC v3.0 (
https://www.apimatic.io ).
"""
class HttpResponse(object):
"""Information about an HTTP Response including its status code, returned
headers, and raw body
Attributes:
status_code (int): The status code response from the server that
corresponds to this response.
reason_phrase (string): The reason phrase returned by the server.
headers (dict): A dictionary of headers (key : value) that were
returned with the response
text (string): The Raw body of the HTTP Response as a string
request (HttpRequest): The request that resulted in this response.
"""
def __init__(self,
status_code,
reason_phrase,
headers,
text,
request):
"""Constructor for the HttpResponse class
Args:
status_code (int): The response status code.
reason_phrase (string): The response reason phrase.
headers (dict): The response headers.
text (string): The raw body from the server.
request (HttpRequest): The request that resulted in this response.
"""
self.status_code = status_code
self.reason_phrase = reason_phrase
self.headers = headers
self.text = text
self.request = request
| [
"[email protected]"
] | |
f1f3e72278974dc7469588dff418f5e9272454b0 | 5d3fd9328cf3fab1056d79cd8464df3f1719b30e | /MG5_aMC_v2_6_7/tests/unit_tests/madspin/test_madspin.py | d573a9790c9b5cf7806115ae6b05f73d6bc1310d | [] | no_license | BKailasapathy/madgraph | c8d34147146edda1f147e8259539c0e86e6209c2 | 949fcf00f111eadf8948827e2933952b7823778d | refs/heads/master | 2023-07-15T08:38:08.382422 | 2021-08-21T09:12:23 | 2021-08-21T09:12:23 | 398,511,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,966 | py | ################################################################################
#
# Copyright (c) 2009 The MadGraph5_aMC@NLO Development team and Contributors
#
# This file is a part of the MadGraph5_aMC@NLO project, an application which
# automatically generates Feynman diagrams and matrix elements for arbitrary
# high-energy processes in the Standard Model and beyond.
#
# It is subject to the MadGraph5_aMC@NLO license which should accompany this
# distribution.
#
# For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch
#
################################################################################
"""Unit test library for the spin correlated decay routines
in the madspin directory"""
import sys
import os
import string
import shutil
pjoin = os.path.join
from subprocess import Popen, PIPE, STDOUT
root_path = os.path.split(os.path.dirname(os.path.realpath( __file__ )))[0]
sys.path.insert(0, os.path.join(root_path,'..','..'))
import tests.unit_tests as unittest
import madgraph.interface.master_interface as Cmd
import madgraph.various.banner as banner
import copy
import array
import madgraph.core.base_objects as MG
import madgraph.various.misc as misc
import MadSpin.decay as madspin
import models.import_ufo as import_ufo
from madgraph import MG5DIR
#
class TestBanner(unittest.TestCase):
"""Test class for the reading of the banner"""
def test_extract_info(self):
"""Test that the banner is read properly"""
path=pjoin(MG5DIR, 'tests', 'input_files', 'tt_banner.txt')
inputfile = open(path, 'r')
mybanner = banner.Banner(inputfile)
# mybanner.ReadBannerFromFile()
process=mybanner.get("generate")
model=mybanner.get("model")
self.assertEqual(process,"p p > t t~ @1")
self.assertEqual(model,"sm")
def test_get_final_state_particle(self):
"""test that we find the final state particles correctly"""
cmd = Cmd.MasterCmd()
cmd.do_import('sm')
fct = lambda x: cmd.get_final_part(x)
#
self.assertEqual(set([11, -11]), fct('p p > e+ e-'))
self.assertEqual(set([11, 24]), fct('p p > w+ e-'))
self.assertEqual(set([11, 24]), fct('p p > W+ e-'))
self.assertEqual(set([1, 2, 3, 4, -1, 11, 21, -4, -3, -2]), fct('p p > W+ e-, w+ > j j'))
self.assertEqual(fct('p p > t t~, (t > b w+, w+ > j j) ,t~ > b~ w-'), set([1, 2, 3, 4, -1, 21, -4, -3, -2,5,-5,-24]))
self.assertEqual(fct('e+ e- > all all, all > e+ e-'), set([-11,11]))
self.assertEqual(fct('e+ e- > j w+, j > e+ e-'), set([-11,11,24]))
def test_get_proc_with_decay_LO(self):
cmd = Cmd.MasterCmd()
cmd.do_import('sm')
# Note the ; at the end of the line is important!
#1 simple case
out = madspin.decay_all_events.get_proc_with_decay('generate p p > t t~', 't> w+b', cmd._curr_model)
self.assertEqual(['generate p p > t t~, t> w+b --no_warning=duplicate;'],[out])
#2 with @0
out = madspin.decay_all_events.get_proc_with_decay('generate p p > t t~ @0', 't> w+b', cmd._curr_model)
self.assertEqual(['generate p p > t t~ , t> w+b @0 --no_warning=duplicate;'],[out])
#3 with @0 and --no_warning=duplicate
out = madspin.decay_all_events.get_proc_with_decay('generate p p > t t~ @0 --no_warning=duplicate', 't> w+b', cmd._curr_model)
self.assertEqual(['generate p p > t t~ , t> w+b @0 --no_warning=duplicate;'],[out])
#4 test with already present decay chain
out = madspin.decay_all_events.get_proc_with_decay('generate p p > t t~, t > w+ b @0 --no_warning=duplicate', 't~ > w+b', cmd._curr_model)
self.assertEqual(['generate p p > t t~, t~ > w+b, ( t > w+ b , t~ > w+b) @0 --no_warning=duplicate;'],[out])
#4 test with already present decay chain
out = madspin.decay_all_events.get_proc_with_decay('generate p p > t t~, t > w+ b, t~ > w- b~ @0 --no_warning=duplicate', 'w > all all', cmd._curr_model)
self.assertEqual(['generate p p > t t~, w > all all, ( t > w+ b, w > all all), ( t~ > w- b~ , w > all all) @0 --no_warning=duplicate;'],[out])
#6 case with noborn=QCD
# This is technically not yet supported by MS, but it is nice that this functions supports it.
out = madspin.decay_all_events.get_proc_with_decay('generate g g > h QED=1 [noborn=QCD]', 'h > b b~', cmd._curr_model)
self.assertEqual(['add process g g > h QED=1 [sqrvirt=QCD], h > b b~ --no_warning=duplicate;'],
[out])
# simple case but failing initial implementation. Handle it now but raising a critical message [mute here]
with misc.MuteLogger(['decay'], [60]):
out = madspin.decay_all_events.get_proc_with_decay('p p > t t~', 't~ > w- b~ QCD=99, t > w+ b QCD=99', cmd._curr_model)
self.assertEqual(['add process p p > t t~, t~ > w- b~ QCD=99, t > w+ b QCD=99 --no_warning=duplicate;'],[out])
self.assertRaises(Exception, madspin.decay_all_events.get_proc_with_decay, 'generate p p > t t~, (t> w+ b, w+ > e+ ve)')
def test_get_proc_with_decay_NLO(self):
cmd = Cmd.MasterCmd()
cmd.do_import('sm')
#1 simple case
out = madspin.decay_all_events.get_proc_with_decay('generate p p > t t~ [QCD]', 't> w+b', cmd._curr_model)
self.assertEqual(['add process p p > t t~, t> w+b --no_warning=duplicate',
'define pert_QCD = -4 -3 -2 -1 1 2 3 4 21',
'add process p p > t t~ pert_QCD, t> w+b --no_warning=duplicate'],
out.split(';')[:-1])
#2 simple case with QED=1
out = madspin.decay_all_events.get_proc_with_decay('generate p p > t t~ QED=1 [QCD]', 't> w+b', cmd._curr_model)
self.assertEqual(['add process p p > t t~ QED=1, t> w+b --no_warning=duplicate',
'define pert_QCD = -4 -3 -2 -1 1 2 3 4 21',
'add process p p > t t~ pert_QCD QED=1, t> w+b --no_warning=duplicate'],
out.split(';')[:-1])
#3 simple case with options
out = madspin.decay_all_events.get_proc_with_decay('generate p p > t t~ QED=1 [QCD] --test', 't> w+b', cmd._curr_model)
self.assertEqual(['add process p p > t t~ QED=1, t> w+b --no_warning=duplicate --test',
'define pert_QCD = -4 -3 -2 -1 1 2 3 4 21',
'add process p p > t t~ pert_QCD QED=1, t> w+b --no_warning=duplicate --test'],
out.split(';')[:-1])
#4 case with LOonly
out = madspin.decay_all_events.get_proc_with_decay('generate p p > t t~ QED=1 [LOonly]', 't> w+b', cmd._curr_model)
self.assertEqual(['add process p p > t t~ QED=1, t> w+b --no_warning=duplicate'],
out.split(';')[:-1])
#5 case with LOonly=QCD
out = madspin.decay_all_events.get_proc_with_decay('generate p p > t t~ QED=1 [LOonly=QCD]', 't> w+b', cmd._curr_model)
self.assertEqual(['add process p p > t t~ QED=1, t> w+b --no_warning=duplicate'],
out.split(';')[:-1])
#5 case with LOonly=QCD
out = madspin.decay_all_events.get_proc_with_decay('generate p p > t t~ QED=1 [LOonly=QCD,QED]', 't> w+b', cmd._curr_model)
self.assertEqual(['add process p p > t t~ QED=1, t> w+b --no_warning=duplicate'],
out.split(';')[:-1])
#5 case with LOonly=QCD
out = madspin.decay_all_events.get_proc_with_decay('generate p p > t t~ QED=1 [LOonly=QCD QED]', 't> w+b', cmd._curr_model)
self.assertEqual(['add process p p > t t~ QED=1, t> w+b --no_warning=duplicate'],
out.split(';')[:-1])
#6 case with all=QCD
out = madspin.decay_all_events.get_proc_with_decay('generate p p > t t~ QED=1 [all=QCD]', 't> w+b', cmd._curr_model)
self.assertEqual(['add process p p > t t~ QED=1, t> w+b --no_warning=duplicate',
'define pert_QCD = -4 -3 -2 -1 1 2 3 4 21',
'add process p p > t t~ pert_QCD QED=1, t> w+b --no_warning=duplicate'],
out.split(';')[:-1])
out = madspin.decay_all_events.get_proc_with_decay('generate p p > t t~ QED=1 [ all= QCD]', 't> w+b', cmd._curr_model)
self.assertEqual(['add process p p > t t~ QED=1, t> w+b --no_warning=duplicate',
'define pert_QCD = -4 -3 -2 -1 1 2 3 4 21',
'add process p p > t t~ pert_QCD QED=1, t> w+b --no_warning=duplicate'],
out.split(';')[:-1])
#6 case with virt=QCD, technically not valid but I like that the function can do it
out = madspin.decay_all_events.get_proc_with_decay('generate p p > t t~ QED=1 [virt=QCD]', 't> w+b', cmd._curr_model)
self.assertEqual(['add process p p > t t~ QED=1 [virt=QCD], t> w+b --no_warning=duplicate'],
out.split(';')[:-1])
class TestEvent(unittest.TestCase):
"""Test class for the reading of the lhe input file"""
def test_madspin_event(self):
"""check the reading/writting of the events inside MadSpin"""
inputfile = open(pjoin(MG5DIR, 'tests', 'input_files', 'madspin_event.lhe'))
events = madspin.Event(inputfile)
# First event
event = events.get_next_event()
self.assertEqual(event, 1)
event = events
self.assertEqual(event.string_event_compact(), """21 0.0 0.0 586.83954 586.84002 0.750577236977
21 0.0 0.0 -182.0876 182.08914 0.748887294316
6 197.60403 48.424858 76.818601 277.88922 173.00000459
-6 -212.77359 -34.669345 359.45458 453.44366 172.999981581
21 15.169561 -13.755513 -31.521232 37.59628 0.749989476383
""")
self.assertEqual(event.get_tag(), (((21, 21), (-6, 6, 21)), [[21, 21], [6, -6, 21]]))
event.assign_scale_line("8 3 0.1 125 0.1 0.3")
event.change_wgt(factor=0.4)
self.assertEqual(event.string_event().split('\n'), """<event>
8 3 +4.0000000e-02 1.25000000e+02 1.00000000e-01 3.00000000e-01
21 -1 0 0 503 502 +0.00000000000e+00 +0.00000000000e+00 +5.86839540000e+02 5.86840020000e+02 7.50000000000e-01 0.0000e+00 0.0000e+00
21 -1 0 0 501 503 +0.00000000000e+00 +0.00000000000e+00 -1.82087600000e+02 1.82089140000e+02 7.50000000000e-01 0.0000e+00 0.0000e+00
6 1 1 2 504 0 +1.97604030000e+02 +4.84248580000e+01 +7.68186010000e+01 2.77889220000e+02 1.73000000000e+02 0.0000e+00 0.0000e+00
-6 1 1 2 0 502 -2.12773590000e+02 -3.46693450000e+01 +3.59454580000e+02 4.53443660000e+02 1.73000000000e+02 0.0000e+00 0.0000e+00
21 1 1 2 501 504 +1.51695610000e+01 -1.37555130000e+01 -3.15212320000e+01 3.75962800000e+01 7.50000000000e-01 0.0000e+00 0.0000e+00
#aMCatNLO 2 5 3 3 1 0.45933500E+02 0.45933500E+02 9 0 0 0.99999999E+00 0.69338413E+00 0.14872513E+01 0.00000000E+00 0.00000000E+00
<rwgt>
<wgt id='1001'> +1.2946800e+02 </wgt>
<wgt id='1002'> +1.1581600e+02 </wgt>
<wgt id='1003'> +1.4560400e+02 </wgt>
<wgt id='1004'> +1.0034800e+02 </wgt>
<wgt id='1005'> +8.9768000e+01 </wgt>
<wgt id='1006'> +1.1285600e+02 </wgt>
<wgt id='1007'> +1.7120800e+02 </wgt>
<wgt id='1008'> +1.5316000e+02 </wgt>
<wgt id='1009'> +1.9254800e+02 </wgt>
</rwgt>
</event>
""".split('\n'))
# Second event
event = events.get_next_event()
self.assertEqual(event, 1)
event =events
self.assertEqual(event.get_tag(), (((21, 21), (-6, 6, 21)), [[21, 21], [6, 21, -6]]))
self.assertEqual(event.string_event().split('\n'), """<event>
5 66 +3.2366351e+02 4.39615290e+02 7.54677160e-03 1.02860750e-01
21 -1 0 0 503 502 +0.00000000000e+00 +0.00000000000e+00 +1.20582240000e+03 1.20582260000e+03 7.50000000000e-01 0.0000e+00 0.0000e+00
21 -1 0 0 501 503 +0.00000000000e+00 +0.00000000000e+00 -5.46836110000e+01 5.46887540000e+01 7.50000000000e-01 0.0000e+00 0.0000e+00
6 1 1 2 501 0 -4.03786550000e+01 -1.41924320000e+02 +3.66089980000e+02 4.30956860000e+02 1.73000000000e+02 0.0000e+00 0.0000e+00
21 1 1 2 504 502 -2.46716450000e+01 +3.98371210000e+01 +2.49924260000e+02 2.54280130000e+02 7.50000000000e-01 0.0000e+00 0.0000e+00
-6 1 1 2 0 504 +6.50503000000e+01 +1.02087200000e+02 +5.35124510000e+02 5.75274350000e+02 1.73000000000e+02 0.0000e+00 0.0000e+00
#aMCatNLO 2 5 4 4 4 0.40498390E+02 0.40498390E+02 9 0 0 0.99999997E+00 0.68201705E+00 0.15135239E+01 0.00000000E+00 0.00000000E+00
<mgrwgt>
some information
<scale> even more infor
</mgrwgt>
<clustering>
blabla
</clustering>
<rwgt>
<wgt id='1001'> 0.32367e+03 </wgt>
<wgt id='1002'> 0.28621e+03 </wgt>
<wgt id='1003'> 0.36822e+03 </wgt>
<wgt id='1004'> 0.24963e+03 </wgt>
<wgt id='1005'> 0.22075e+03 </wgt>
<wgt id='1006'> 0.28400e+03 </wgt>
<wgt id='1007'> 0.43059e+03 </wgt>
<wgt id='1008'> 0.38076e+03 </wgt>
<wgt id='1009'> 0.48987e+03 </wgt>
</rwgt>
</event>
""".split('\n'))
# Third event ! Not existing
event = events.get_next_event()
self.assertEqual(event, "no_event")
#class Testtopo(unittest.TestCase):
# """Test the extraction of the topologies for the undecayed process"""
#
# def test_topottx(self):
#
# os.environ['GFORTRAN_UNBUFFERED_ALL']='y'
# path_for_me=pjoin(MG5DIR, 'tests','unit_tests','madspin')
# shutil.copyfile(pjoin(MG5DIR, 'tests','input_files','param_card_sm.dat'),\
# pjoin(path_for_me,'param_card.dat'))
# curr_dir=os.getcwd()
# os.chdir('/tmp')
# temp_dir=os.getcwd()
# mgcmd=Cmd.MasterCmd()
# process_prod=" g g > t t~ "
# process_full=process_prod+", ( t > b w+ , w+ > mu+ vm ), "
# process_full+="( t~ > b~ w- , w- > mu- vm~ ) "
# decay_tools=madspin.decay_misc()
# topo=decay_tools.generate_fortran_me([process_prod],"sm",0, mgcmd, path_for_me)
# decay_tools.generate_fortran_me([process_full],"sm", 1,mgcmd, path_for_me)
#
# prod_name=decay_tools.compile_fortran_me_production(path_for_me)
# decay_name = decay_tools.compile_fortran_me_full(path_for_me)
#
#
# topo_test={1: {'branchings': [{'index_propa': -1, 'type': 's',\
# 'index_d2': 3, 'index_d1': 4}], 'get_id': {}, 'get_momentum': {}, \
# 'get_mass2': {}}, 2: {'branchings': [{'index_propa': -1, 'type': 't', \
# 'index_d2': 3, 'index_d1': 1}, {'index_propa': -2, 'type': 't', 'index_d2': 4,\
# 'index_d1': -1}], 'get_id': {}, 'get_momentum': {}, 'get_mass2': {}}, \
# 3: {'branchings': [{'index_propa': -1, 'type': 't', 'index_d2': 4, \
# 'index_d1': 1}, {'index_propa': -2, 'type': 't', 'index_d2': 3, 'index_d1': -1}],\
# 'get_id': {}, 'get_momentum': {}, 'get_mass2': {}}}
#
# self.assertEqual(topo,topo_test)
#
#
# p_string='0.5000000E+03 0.0000000E+00 0.0000000E+00 0.5000000E+03 \n'
# p_string+='0.5000000E+03 0.0000000E+00 0.0000000E+00 -0.5000000E+03 \n'
# p_string+='0.5000000E+03 0.1040730E+03 0.4173556E+03 -0.1872274E+03 \n'
# p_string+='0.5000000E+03 -0.1040730E+03 -0.4173556E+03 0.1872274E+03 \n'
#
#
# os.chdir(pjoin(path_for_me,'production_me','SubProcesses',prod_name))
# executable_prod="./check"
# external = Popen(executable_prod, stdout=PIPE, stdin=PIPE, stderr=STDOUT)
#
# external.stdin.write(p_string)
#
# info = int(external.stdout.readline())
# nb_output = abs(info)+1
#
#
# prod_values = ' '.join([external.stdout.readline() for i in range(nb_output)])
#
# prod_values=prod_values.split()
# prod_values_test=['0.59366146660637686', '7.5713552297679376', '12.386583104018380', '34.882849897228873']
# self.assertEqual(prod_values,prod_values_test)
# external.terminate()
#
#
# os.chdir(temp_dir)
#
# p_string='0.5000000E+03 0.0000000E+00 0.0000000E+00 0.5000000E+03 \n'
# p_string+='0.5000000E+03 0.0000000E+00 0.0000000E+00 -0.5000000E+03 \n'
# p_string+='0.8564677E+02 -0.8220633E+01 0.3615807E+02 -0.7706033E+02 \n'
# p_string+='0.1814001E+03 -0.5785084E+02 -0.1718366E+03 -0.5610972E+01 \n'
# p_string+='0.8283621E+02 -0.6589913E+02 -0.4988733E+02 0.5513262E+01 \n'
# p_string+='0.3814391E+03 0.1901552E+03 0.2919968E+03 -0.1550888E+03 \n'
# p_string+='0.5422284E+02 -0.3112810E+02 -0.7926714E+01 0.4368438E+02\n'
# p_string+='0.2144550E+03 -0.2705652E+02 -0.9850424E+02 0.1885624E+03\n'
#
# os.chdir(pjoin(path_for_me,'full_me','SubProcesses',decay_name))
# executable_decay="./check"
# external = Popen(executable_decay, stdout=PIPE, stdin=PIPE, stderr=STDOUT)
# external.stdin.write(p_string)
#
# nb_output =1
# decay_value = ' '.join([external.stdout.readline() for i in range(nb_output)])
#
# decay_value=decay_value.split()
# decay_value_test=['3.8420345719455465E-017']
# for i in range(len(decay_value)):
# self.assertAlmostEqual(eval(decay_value[i]),eval(decay_value_test[i]))
# os.chdir(curr_dir)
# external.terminate()
# shutil.rmtree(pjoin(path_for_me,'production_me'))
# shutil.rmtree(pjoin(path_for_me,'full_me'))
# os.remove(pjoin(path_for_me,'param_card.dat'))
# os.environ['GFORTRAN_UNBUFFERED_ALL']='n'
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.