blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
56aceb4af73c59684df5c5acbffce3711e16c735 | abcfd07772ce75f34e51592189c29cf84d1a3611 | /flask/lib/python3.6/site-packages/sqlparse/utils.py | a620f2d3c17fbfba4d56e6a4d8608c27420f4868 | [] | no_license | yuhaihui3435/p_mc | 66d89bcccf214e53729b26a0f80ddee8797e9e3e | 3039a5c691b649fc88e941a2553b1a7e0aac2a0a | refs/heads/master | 2021-06-28T18:52:00.111385 | 2017-09-15T00:26:02 | 2017-09-15T00:26:58 | 103,524,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,490 | py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Andi Albrecht, [email protected]
#
# This module is part of python-sqlparse and is released under
# the BSD License: https://opensource.org/licenses/BSD-3-Clause
import itertools
import re
from collections import deque
from contextlib import contextmanager
from sqlparse.compat import text_type
# This regular expression replaces the home-cooked parser that was here before.
# It is much faster, but requires an extra post-processing step to get the
# desired results (that are compatible with what you would expect from the
# str.splitlines() method).
#
# It matches groups of characters: newlines, quoted strings, or unquoted text,
# and splits on that basis. The post-processing step puts those back together
# into the actual lines of SQL.
SPLIT_REGEX = re.compile(r"""
(
(?: # Start of non-capturing group
(?:\r\n|\r|\n) | # Match any single newline, or
[^\r\n'"]+ | # Match any character series without quotes or
# newlines, or
"(?:[^"\\]|\\.)*" | # Match double-quoted strings, or
'(?:[^'\\]|\\.)*' # Match single quoted strings
)
)
""", re.VERBOSE)
LINE_MATCH = re.compile(r'(\r\n|\r|\n)')
def split_unquoted_newlines(stmt):
"""Split a string on all unquoted newlines.
Unlike str.splitlines(), this will ignore CR/LF/CR+LF if the requisite
character is inside of a string."""
text = text_type(stmt)
lines = SPLIT_REGEX.split(text)
outputlines = ['']
for line in lines:
if not line:
continue
elif LINE_MATCH.match(line):
outputlines.append('')
else:
outputlines[-1] += line
return outputlines
def remove_quotes(val):
"""Helper that removes surrounding quotes from strings."""
if val is None:
return
if val[0] in ('"', "'") and val[0] == val[-1]:
val = val[1:-1]
return val
def recurse(*cls):
"""Function decorator to help with recursion
:param cls: Classes to not recurse over
:return: function
"""
def wrap(f):
def wrapped_f(tlist):
for sgroup in tlist.get_sublists():
if not isinstance(sgroup, cls):
wrapped_f(sgroup)
f(tlist)
return wrapped_f
return wrap
def imt(token, i=None, m=None, t=None):
"""Helper function to simplify comparisons Instance, Match and TokenType
:param token:
:param i: Class or Tuple/List of Classes
:param m: Tuple of TokenType & Value. Can be list of Tuple for multiple
:param t: TokenType or Tuple/List of TokenTypes
:return: bool
"""
clss = i
types = [t, ] if t and not isinstance(t, list) else t
mpatterns = [m, ] if m and not isinstance(m, list) else m
if token is None:
return False
elif clss and isinstance(token, clss):
return True
elif mpatterns and any((token.match(*pattern) for pattern in mpatterns)):
return True
elif types and any([token.ttype in ttype for ttype in types]):
return True
else:
return False
def consume(iterator, n):
"""Advance the iterator n-steps ahead. If n is none, consume entirely."""
deque(itertools.islice(iterator, n), maxlen=0)
@contextmanager
def offset(filter_, n=0):
filter_.offset += n
yield
filter_.offset -= n
@contextmanager
def indent(filter_, n=1):
filter_.indent += n
yield
filter_.indent -= n
| [
"[email protected]"
] | |
ca7fe2126b290de9c15044feaa402731564a284c | 846506a6c9023a21ff831c637f71cffd3b0aab62 | /Python/X_Archive/AddLabelSplits.py | 585014bdeeaa59c66a2eac711aee31123872d4e2 | [] | no_license | emonson/Weinfurt_DocFreq | b90378e57af7c17d32c72e5924a1b9af9f0f6584 | 1c9082d8ce4c0d002b6a65d446421840e24435fd | refs/heads/master | 2020-05-29T21:05:01.397101 | 2015-01-22T13:53:53 | 2015-01-22T13:53:53 | 29,681,004 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,753 | py | # After changing data labels to new dataXXX scheme, automating
# some of the sentence splits based on the Datamap...xls sheet
import xlrd
book = xlrd.open_workbook('Datamap with Sentence IDs.xls')
sh = book.sheet_by_index(0)
ids = sh.col_values(0,12)
sentences = sh.col_values(1,12)
f = open('LongVer_newLabelsPrelim_rev5.html','r')
# File has some non-ascii characters in it
whole = f.read().decode('latin_1')
for ii, id in enumerate(ids):
id = str(id)
if id.endswith(('a', 'b', 'c', 'd')):
oldStr = sentences[ii].strip()
newStr = '</font>\n<font size=${s_%s} color=${c_%s}>%s' % (id, id, oldStr)
whole = whole.replace(oldStr, newStr)
# Have to manually fix 92a and 95a since the text is identical
oldStr = 'We will keep health information and research data on secure computers. </font>\n<font size=${s_data092a} color=${c_data092a}></font>\n<font size=${s_data095a} color=${c_data095a}>'
newStr = 'We will keep health information and research data on secure computers. </font>\n<font size=${s_data092a} color=${c_data092a}>'
whole = whole.replace(oldStr, newStr)
oldStr = 'We will store this list on secure computers. </font>\n<font size=${s_data092a} color=${c_data092a}></font>\n<font size=${s_data095a} color=${c_data095a}>'
newStr = 'We will store this list on secure computers. </font>\n<font size=${s_data095a} color=${c_data095a}>'
whole = whole.replace(oldStr, newStr)
fout = open('LongVer_newLabelsPrelim2_rev5.html','w')
# for some reason must re-encode before writing
fout.write(whole.encode('latin_1'))
fout.close()
f.close()
# data179a didn't split properly for some reason
# data138b is a duplicate, so it overwrote data138
# split the 200s manually for now
# manually changed (their) mistake of data138b -> 138b
| [
"[email protected]"
] | |
75f33a50056f2286b145c44e8361185d4ff87561 | 16b4229a925a4e3b0e760f401d80c4d2adb793a9 | /models/real_nvp/coupling_layer.py | d8b2581f2febe1eaab92fecde288e1f539fd103a | [
"MIT"
] | permissive | ahmadkhajehnejad/real-nvp | 4fc5a6a5f23fe076304b5652277a8de70ab4f068 | 15f51eb91388fc232334123ac11467c4d3aa3d33 | refs/heads/master | 2022-08-11T09:37:32.353605 | 2020-05-18T12:24:39 | 2020-05-18T12:24:39 | 261,586,549 | 0 | 0 | MIT | 2020-05-05T21:25:51 | 2020-05-05T21:25:50 | null | UTF-8 | Python | false | false | 3,919 | py | import torch
import torch.nn as nn
from enum import IntEnum
from models.resnet import ResNet
from util import checkerboard_mask
class MaskType(IntEnum):
CHECKERBOARD = 0
CHANNEL_WISE = 1
class CouplingLayer(nn.Module):
"""Coupling layer in RealNVP.
Args:
in_channels (int): Number of channels in the input.
mid_channels (int): Number of channels in the `s` and `t` network.
num_blocks (int): Number of residual blocks in the `s` and `t` network.
mask_type (MaskType): One of `MaskType.CHECKERBOARD` or `MaskType.CHANNEL_WISE`.
reverse_mask (bool): Whether to reverse the mask. Useful for alternating masks.
"""
def __init__(self, in_channels, mid_channels, num_blocks, mask_type, reverse_mask):
super(CouplingLayer, self).__init__()
# Save mask info
self.mask_type = mask_type
self.reverse_mask = reverse_mask
# Build scale and translate network
if self.mask_type == MaskType.CHANNEL_WISE:
in_channels //= 2
self.st_net = ResNet(in_channels, mid_channels, 2 * in_channels,
num_blocks=num_blocks, kernel_size=3, padding=1,
double_after_norm=(self.mask_type == MaskType.CHECKERBOARD))
# Learnable scale for s
self.rescale = nn.utils.weight_norm(Rescale(in_channels))
def forward(self, x, sldj=None, reverse=True):
if self.mask_type == MaskType.CHECKERBOARD:
# Checkerboard mask
b = checkerboard_mask(x.size(2), x.size(3), self.reverse_mask, device=x.device)
x_b = x * b
st = self.st_net(x_b)
s, t = st.chunk(2, dim=1)
s = self.rescale(torch.tanh(s))
s = s * (1 - b)
t = t * (1 - b)
# Scale and translate
if reverse:
inv_exp_s = s.mul(-1).exp()
if torch.isnan(inv_exp_s).any():
raise RuntimeError('Scale factor has NaN entries')
x = x * inv_exp_s - t
else:
exp_s = s.exp()
if torch.isnan(exp_s).any():
raise RuntimeError('Scale factor has NaN entries')
x = (x + t) * exp_s
# Add log-determinant of the Jacobian
sldj += s.view(s.size(0), -1).sum(-1)
else:
# Channel-wise mask
if self.reverse_mask:
x_id, x_change = x.chunk(2, dim=1)
else:
x_change, x_id = x.chunk(2, dim=1)
st = self.st_net(x_id)
s, t = st.chunk(2, dim=1)
s = self.rescale(torch.tanh(s))
# Scale and translate
if reverse:
inv_exp_s = s.mul(-1).exp()
if torch.isnan(inv_exp_s).any():
raise RuntimeError('Scale factor has NaN entries')
x_change = x_change * inv_exp_s - t
else:
exp_s = s.exp()
if torch.isnan(exp_s).any():
raise RuntimeError('Scale factor has NaN entries')
x_change = (x_change + t) * exp_s
# Add log-determinant of the Jacobian
sldj += s.view(s.size(0), -1).sum(-1)
if self.reverse_mask:
x = torch.cat((x_id, x_change), dim=1)
else:
x = torch.cat((x_change, x_id), dim=1)
return x, sldj
class Rescale(nn.Module):
"""Per-channel rescaling. Need a proper `nn.Module` so we can wrap it
with `torch.nn.utils.weight_norm`.
Args:
num_channels (int): Number of channels in the input.
"""
def __init__(self, num_channels):
super(Rescale, self).__init__()
self.weight = nn.Parameter(torch.ones(num_channels, 1, 1))
def forward(self, x):
x = self.weight * x
return x
| [
"[email protected]"
] | |
7f77fb3562993641b617a5b8f28eb60e5b4690d8 | 615f83418985b80f2a2a47200acb08dfa9418fc7 | /identities/widgets.py | f59c91e055afcef5e61baf70b159870a655016a6 | [
"MIT"
] | permissive | alejo8591/maker | a42b89ddc426da326a397765dc091db45dd50d8e | 001e85eaf489c93b565efe679eb159cfcfef4c67 | refs/heads/master | 2016-09-06T19:36:01.864526 | 2013-03-23T06:54:21 | 2013-03-23T06:54:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | # encoding: utf-8
# Copyright 2013 maker
# License
"""
Identities module widgets
"""
WIDGETS = {'widget_contact_me': {'title': 'My Contact Card',
'size': "95%"}}
def get_widgets(request):
"Returns a set of all available widgets"
return WIDGETS | [
"[email protected]"
] | |
9fa1eef441427dbc31a6b3821675d6bfcc0a7512 | da8adef15efbdacda32b19196b391f63d5026e3a | /ITMO/ML/Lab5/main.py | 5fbef8d876250cd8a74ae41ce03cc26e95b369ec | [] | no_license | rubcuadra/MachineLearning | 05da95c1f800e6acbce97f6ca825bd7a41d806a6 | aa13dd007a7954d50586cca6dd413a04db18ef77 | refs/heads/master | 2021-03-19T17:33:14.080691 | 2018-10-19T23:43:27 | 2018-10-19T23:43:27 | 100,544,903 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,440 | py | '''
You should implement the feature selection algorithm based on the utility metric (the Filter method).
Implement several utility metrics and compare their performance at classification tasks.
https://en.wikipedia.org/wiki/Feature_selection
https://machinelearningmastery.com/an-introduction-to-feature-selection/
'''
#http://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.chi2.html#sklearn.feature_selection.chi2
#http://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.mutual_info_classif.html#sklearn.feature_selection.mutual_info_classif
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.feature_selection import mutual_info_classif, chi2, SelectKBest
from sklearn.metrics import accuracy_score, f1_score
from scipy.stats import chi2_contingency
from glob import glob
import pandas as pd
import numpy as np
def test_model(model,x_train,y_train,x_test,y_test):
model.fit(x_train, y_train)
pred = model.predict(x_test)
print('\t',x_train.shape )
print('\tAccuracy: ', accuracy_score(y_test, pred))
print('\tF-score: ', f1_score(y_test, pred, average='macro'))
#Folder with files/structure
# *_train.data
# *_train.labels
# *_valid.data
# *_valid.labels
def loadData(path):
X,Y,x,y = [],[],[],[]
with open( glob(f"{path}/*_train.data")[0] ,"r" ) as td: X = [ [int(v) for v in line.split()] for line in td ]
with open( glob(f"{path}/*_train.labels")[0] ,"r" ) as td: Y = [ [int(v) for v in line.split()] for line in td ]
with open( glob(f"{path}/*_valid.data")[0] ,"r" ) as td: x = [ [int(v) for v in line.split()] for line in td ]
with open( glob(f"{path}/*_valid.labels")[0] ,"r" ) as td: y = [ [int(v) for v in line.split()] for line in td ]
return (np.matrix(X),np.matrix(Y).A1,np.matrix(x),np.matrix(y).A1)
class VarianceThresh():
def __init__(self, threshold=0):
self.th = threshold
def fit(self,data):
v = np.var(data,axis=0).A1 #Get variances as vector
self.ixs = np.argwhere( v <= self.th )[:,0] #Get indexes to eliminate
def transform(self,data):
newData = []
ixs = list(self.ixs.copy()) + [-1] #to finish
c = ixs.pop(0)
for i,col in enumerate(data.T):
if i == c: c = ixs.pop(0) #new index to remove
else: newData.append( col.A1 ) #add
return np.matrix(newData).T
class ChiSquare: #Determine whether there is a significant difference between the expected frequencies and the observed frequencies in one or more categories.
def __init__(self, alpha = 0.5):
self.alpha = alpha
def fit(self,data,Y):
self.ixs = []
for i, X in enumerate(data.T):
dfObserved = pd.crosstab(Y,X.A1)
chi2, p, degrfree, expected = chi2_contingency(dfObserved.values)
# self.dfExpected = pd.DataFrame(expected, columns=self.dfObserved.columns, index = self.dfObserved.index)
if p<self.alpha: self.ixs.append(i) #SignLev
def transform(self,data):
newData = []
ixs = self.ixs + [-1] #to finish
c = ixs.pop(0)
for i,col in enumerate(data.T):
if i == c: c = ixs.pop(0) #new index to remove
else: newData.append( col.A1 ) #add
return np.matrix(newData).T
if __name__ == '__main__':
Xtrain,Ytrain,Xtest,Ytest = loadData("arcene")
#VT
VT = VarianceThresh(threshold=5000) #5000
VT.fit(Xtrain)
vtX_train = VT.transform(Xtrain) #Apply Selections
vtX_test = VT.transform(Xtest) #Apply Selections
#CHI2
CHI = SelectKBest(score_func=chi2, k=550) #SelectKBest(score_func=chi2, k=550) #ChiSquare(alpha=0.05)
CHI.fit(Xtrain,Ytrain)
CHIXtrain = CHI.transform(Xtrain)
CHIXtest = CHI.transform(Xtest)
#Different ML Techniques
MLT = [LogisticRegression(),RandomForestClassifier(),DecisionTreeClassifier(),SVC(kernel='linear')]
for model in MLT:
print(model.__class__.__name__)
print("\tFULL")
test_model( model, Xtrain, Ytrain, Xtest, Ytest )
print("\tVarianceThreshold")
test_model( model, vtX_train, Ytrain, vtX_test, Ytest )
print("\tCHI^2")
test_model( model, CHIXtrain, Ytrain, CHIXtest, Ytest )
| [
"[email protected]"
] | |
be1f53665e8022b4ede91764bb78e101e375c601 | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv2/lib/python3.8/site-packages/ansible/inventory/manager.py | 070e01170b61f1d5f188966caf37738bde4fb084 | [
"MIT"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 23,674 | py | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import fnmatch
import os
import re
import itertools
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleParserError
from ansible.inventory.data import InventoryData
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.parsing.utils.addresses import parse_address
from ansible.plugins.loader import inventory_loader
from ansible.utils.path import unfrackpath
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
IGNORED_ALWAYS = [br"^\.", b"^host_vars$", b"^group_vars$", b"^vars_plugins$"]
IGNORED_PATTERNS = [to_bytes(x) for x in C.INVENTORY_IGNORE_PATTERNS]
IGNORED_EXTS = [b'%s$' % to_bytes(re.escape(x)) for x in C.INVENTORY_IGNORE_EXTS]
IGNORED = re.compile(b'|'.join(IGNORED_ALWAYS + IGNORED_PATTERNS + IGNORED_EXTS))
def order_patterns(patterns):
''' takes a list of patterns and reorders them by modifier to apply them consistently '''
# FIXME: this goes away if we apply patterns incrementally or by groups
pattern_regular = []
pattern_intersection = []
pattern_exclude = []
for p in patterns:
if p.startswith("!"):
pattern_exclude.append(p)
elif p.startswith("&"):
pattern_intersection.append(p)
elif p:
pattern_regular.append(p)
# if no regular pattern was given, hence only exclude and/or intersection
# make that magically work
if pattern_regular == []:
pattern_regular = ['all']
# when applying the host selectors, run those without the "&" or "!"
# first, then the &s, then the !s.
return pattern_regular + pattern_intersection + pattern_exclude
def split_host_pattern(pattern):
"""
Takes a string containing host patterns separated by commas (or a list
thereof) and returns a list of single patterns (which may not contain
commas). Whitespace is ignored.
Also accepts ':' as a separator for backwards compatibility, but it is
not recommended due to the conflict with IPv6 addresses and host ranges.
Example: 'a,b[1], c[2:3] , d' -> ['a', 'b[1]', 'c[2:3]', 'd']
"""
if isinstance(pattern, list):
return list(itertools.chain(*map(split_host_pattern, pattern)))
elif not isinstance(pattern, string_types):
pattern = to_native(pattern)
# If it's got commas in it, we'll treat it as a straightforward
# comma-separated list of patterns.
if ',' in pattern:
patterns = pattern.split(',')
# If it doesn't, it could still be a single pattern. This accounts for
# non-separator uses of colons: IPv6 addresses and [x:y] host ranges.
else:
try:
(base, port) = parse_address(pattern, allow_ranges=True)
patterns = [pattern]
except Exception:
# The only other case we accept is a ':'-separated list of patterns.
# This mishandles IPv6 addresses, and is retained only for backwards
# compatibility.
patterns = re.findall(
r'''(?: # We want to match something comprising:
[^\s:\[\]] # (anything other than whitespace or ':[]'
| # ...or...
\[[^\]]*\] # a single complete bracketed expression)
)+ # occurring once or more
''', pattern, re.X
)
return [p.strip() for p in patterns]
class InventoryManager(object):
''' Creates and manages inventory '''
def __init__(self, loader, sources=None):
# base objects
self._loader = loader
self._inventory = InventoryData()
# a list of host(names) to contain current inquiries to
self._restriction = None
self._subset = None
# caches
self._hosts_patterns_cache = {} # resolved full patterns
self._pattern_cache = {} # resolved individual patterns
self._inventory_plugins = [] # for generating inventory
# the inventory dirs, files, script paths or lists of hosts
if sources is None:
self._sources = []
elif isinstance(sources, string_types):
self._sources = [sources]
else:
self._sources = sources
# get to work!
self.parse_sources(cache=True)
@property
def localhost(self):
return self._inventory.localhost
@property
def groups(self):
return self._inventory.groups
@property
def hosts(self):
return self._inventory.hosts
def get_vars(self, *args, **kwargs):
return self._inventory.get_vars(args, kwargs)
def add_host(self, host, group=None, port=None):
return self._inventory.add_host(host, group, port)
def add_group(self, group):
return self._inventory.add_group(group)
def get_groups_dict(self):
return self._inventory.get_groups_dict()
def reconcile_inventory(self):
self.clear_caches()
return self._inventory.reconcile_inventory()
def get_host(self, hostname):
return self._inventory.get_host(hostname)
def _setup_inventory_plugins(self):
''' sets up loaded inventory plugins for usage '''
display.vvvv('setting up inventory plugins')
for name in C.INVENTORY_ENABLED:
plugin = inventory_loader.get(name)
if plugin:
plugin.set_options()
self._inventory_plugins.append(plugin)
else:
display.warning('Failed to load inventory plugin, skipping %s' % name)
if not self._inventory_plugins:
raise AnsibleError("No inventory plugins available to generate inventory, make sure you have at least one whitelisted.")
def parse_sources(self, cache=False):
''' iterate over inventory sources and parse each one to populate it'''
self._setup_inventory_plugins()
parsed = False
# allow for multiple inventory parsing
for source in self._sources:
if source:
if ',' not in source:
source = unfrackpath(source, follow=False)
parse = self.parse_source(source, cache=cache)
if parse and not parsed:
parsed = True
if parsed:
# do post processing
self._inventory.reconcile_inventory()
else:
if C.INVENTORY_UNPARSED_IS_FAILED:
raise AnsibleError("No inventory was parsed, please check your configuration and options.")
else:
display.warning("No inventory was parsed, only implicit localhost is available")
self._inventory_plugins = []
def parse_source(self, source, cache=False):
''' Generate or update inventory for the source provided '''
parsed = False
display.debug(u'Examining possible inventory source: %s' % source)
b_source = to_bytes(source)
# process directories as a collection of inventories
if os.path.isdir(b_source):
display.debug(u'Searching for inventory files in directory: %s' % source)
for i in sorted(os.listdir(b_source)):
display.debug(u'Considering %s' % i)
# Skip hidden files and stuff we explicitly ignore
if IGNORED.search(i):
continue
# recursively deal with directory entries
fullpath = os.path.join(b_source, i)
parsed_this_one = self.parse_source(to_native(fullpath), cache=cache)
display.debug(u'parsed %s as %s' % (fullpath, parsed_this_one))
if not parsed:
parsed = parsed_this_one
else:
# left with strings or files, let plugins figure it out
# set so new hosts can use for inventory_file/dir vasr
self._inventory.current_source = source
# get inventory plugins if needed, there should always be at least one generator
if not self._inventory_plugins:
self._setup_inventory_plugins()
# try source with each plugin
failures = []
for plugin in self._inventory_plugins:
plugin_name = to_native(getattr(plugin, '_load_name', getattr(plugin, '_original_path', '')))
display.debug(u'Attempting to use plugin %s (%s)' % (plugin_name, plugin._original_path))
# initialize and figure out if plugin wants to attempt parsing this file
try:
plugin_wants = bool(plugin.verify_file(source))
except Exception:
plugin_wants = False
if plugin_wants:
try:
# in case plugin fails 1/2 way we dont want partial inventory
plugin.parse(self._inventory, self._loader, source, cache=cache)
parsed = True
display.vvv('Parsed %s inventory source with %s plugin' % (to_text(source), plugin_name))
break
except AnsibleParserError as e:
display.debug('%s was not parsable by %s' % (to_text(source), plugin_name))
failures.append({'src': source, 'plugin': plugin_name, 'exc': e})
except Exception as e:
display.debug('%s failed to parse %s' % (plugin_name, to_text(source)))
failures.append({'src': source, 'plugin': plugin_name, 'exc': AnsibleError(e)})
else:
display.debug('%s did not meet %s requirements' % (to_text(source), plugin_name))
else:
if not parsed and failures:
# only if no plugin processed files should we show errors.
for fail in failures:
display.warning(u'\n* Failed to parse %s with %s plugin: %s' % (to_text(fail['src']), fail['plugin'], to_text(fail['exc'])))
if hasattr(fail['exc'], 'tb'):
display.vvv(to_text(fail['exc'].tb))
if not parsed:
display.warning("Unable to parse %s as an inventory source" % to_text(source))
# clear up, jic
self._inventory.current_source = None
return parsed
def clear_caches(self):
''' clear all caches '''
self._hosts_patterns_cache = {}
self._pattern_cache = {}
# FIXME: flush inventory cache
def refresh_inventory(self):
''' recalculate inventory '''
self.clear_caches()
self._inventory = InventoryData()
self.parse_sources(cache=False)
def _match_list(self, items, pattern_str):
# compile patterns
try:
if not pattern_str.startswith('~'):
pattern = re.compile(fnmatch.translate(pattern_str))
else:
pattern = re.compile(pattern_str[1:])
except Exception:
raise AnsibleError('Invalid host list pattern: %s' % pattern_str)
# apply patterns
results = []
for item in items:
if pattern.match(item):
results.append(item)
return results
def get_hosts(self, pattern="all", ignore_limits=False, ignore_restrictions=False, order=None):
"""
Takes a pattern or list of patterns and returns a list of matching
inventory host names, taking into account any active restrictions
or applied subsets
"""
hosts = []
# Check if pattern already computed
if isinstance(pattern, list):
pattern_hash = u":".join(pattern)
else:
pattern_hash = pattern
if pattern_hash:
if not ignore_limits and self._subset:
pattern_hash += u":%s" % to_text(self._subset, errors='surrogate_or_strict')
if not ignore_restrictions and self._restriction:
pattern_hash += u":%s" % to_text(self._restriction, errors='surrogate_or_strict')
if pattern_hash not in self._hosts_patterns_cache:
patterns = split_host_pattern(pattern)
hosts = self._evaluate_patterns(patterns)
# mainly useful for hostvars[host] access
if not ignore_limits and self._subset:
# exclude hosts not in a subset, if defined
subset = self._evaluate_patterns(self._subset)
hosts = [h for h in hosts if h in subset]
if not ignore_restrictions and self._restriction:
# exclude hosts mentioned in any restriction (ex: failed hosts)
hosts = [h for h in hosts if h.name in self._restriction]
seen = set()
self._hosts_patterns_cache[pattern_hash] = [x for x in hosts if x not in seen and not seen.add(x)]
# sort hosts list if needed (should only happen when called from strategy)
if order in ['sorted', 'reverse_sorted']:
from operator import attrgetter
hosts = sorted(self._hosts_patterns_cache[pattern_hash][:], key=attrgetter('name'), reverse=(order == 'reverse_sorted'))
elif order == 'reverse_inventory':
hosts = sorted(self._hosts_patterns_cache[pattern_hash][:], reverse=True)
else:
hosts = self._hosts_patterns_cache[pattern_hash][:]
if order == 'shuffle':
from random import shuffle
shuffle(hosts)
elif order not in [None, 'inventory']:
AnsibleOptionsError("Invalid 'order' specified for inventory hosts: %s" % order)
return hosts
def _evaluate_patterns(self, patterns):
"""
Takes a list of patterns and returns a list of matching host names,
taking into account any negative and intersection patterns.
"""
patterns = order_patterns(patterns)
hosts = []
for p in patterns:
# avoid resolving a pattern that is a plain host
if p in self._inventory.hosts:
hosts.append(self._inventory.get_host(p))
else:
that = self._match_one_pattern(p)
if p.startswith("!"):
hosts = [h for h in hosts if h not in frozenset(that)]
elif p.startswith("&"):
hosts = [h for h in hosts if h in frozenset(that)]
else:
hosts.extend([h for h in that if h.name not in frozenset([y.name for y in hosts])])
return hosts
def _match_one_pattern(self, pattern):
"""
Takes a single pattern and returns a list of matching host names.
Ignores intersection (&) and exclusion (!) specifiers.
The pattern may be:
1. A regex starting with ~, e.g. '~[abc]*'
2. A shell glob pattern with ?/*/[chars]/[!chars], e.g. 'foo*'
3. An ordinary word that matches itself only, e.g. 'foo'
The pattern is matched using the following rules:
1. If it's 'all', it matches all hosts in all groups.
2. Otherwise, for each known group name:
(a) if it matches the group name, the results include all hosts
in the group or any of its children.
(b) otherwise, if it matches any hosts in the group, the results
include the matching hosts.
This means that 'foo*' may match one or more groups (thus including all
hosts therein) but also hosts in other groups.
The built-in groups 'all' and 'ungrouped' are special. No pattern can
match these group names (though 'all' behaves as though it matches, as
described above). The word 'ungrouped' can match a host of that name,
and patterns like 'ungr*' and 'al*' can match either hosts or groups
other than all and ungrouped.
If the pattern matches one or more group names according to these rules,
it may have an optional range suffix to select a subset of the results.
This is allowed only if the pattern is not a regex, i.e. '~foo[1]' does
not work (the [1] is interpreted as part of the regex), but 'foo*[1]'
would work if 'foo*' matched the name of one or more groups.
Duplicate matches are always eliminated from the results.
"""
if pattern.startswith("&") or pattern.startswith("!"):
pattern = pattern[1:]
if pattern not in self._pattern_cache:
(expr, slice) = self._split_subscript(pattern)
hosts = self._enumerate_matches(expr)
try:
hosts = self._apply_subscript(hosts, slice)
except IndexError:
raise AnsibleError("No hosts matched the subscripted pattern '%s'" % pattern)
self._pattern_cache[pattern] = hosts
return self._pattern_cache[pattern]
def _split_subscript(self, pattern):
"""
Takes a pattern, checks if it has a subscript, and returns the pattern
without the subscript and a (start,end) tuple representing the given
subscript (or None if there is no subscript).
Validates that the subscript is in the right syntax, but doesn't make
sure the actual indices make sense in context.
"""
# Do not parse regexes for enumeration info
if pattern.startswith('~'):
return (pattern, None)
# We want a pattern followed by an integer or range subscript.
# (We can't be more restrictive about the expression because the
# fnmatch semantics permit [\[:\]] to occur.)
pattern_with_subscript = re.compile(
r'''^
(.+) # A pattern expression ending with...
\[(?: # A [subscript] expression comprising:
(-?[0-9]+)| # A single positive or negative number
([0-9]+)([:-]) # Or an x:y or x: range.
([0-9]*)
)\]
$
''', re.X
)
subscript = None
m = pattern_with_subscript.match(pattern)
if m:
(pattern, idx, start, sep, end) = m.groups()
if idx:
subscript = (int(idx), None)
else:
if not end:
end = -1
subscript = (int(start), int(end))
if sep == '-':
display.warning("Use [x:y] inclusive subscripts instead of [x-y] which has been removed")
return (pattern, subscript)
def _apply_subscript(self, hosts, subscript):
"""
Takes a list of hosts and a (start,end) tuple and returns the subset of
hosts based on the subscript (which may be None to return all hosts).
"""
if not hosts or not subscript:
return hosts
(start, end) = subscript
if end:
if end == -1:
end = len(hosts) - 1
return hosts[start:end + 1]
else:
return [hosts[start]]
def _enumerate_matches(self, pattern):
"""
Returns a list of host names matching the given pattern according to the
rules explained above in _match_one_pattern.
"""
results = []
# check if pattern matches group
matching_groups = self._match_list(self._inventory.groups, pattern)
if matching_groups:
for groupname in matching_groups:
results.extend(self._inventory.groups[groupname].get_hosts())
# check hosts if no groups matched or it is a regex/glob pattern
if not matching_groups or pattern.startswith('~') or any(special in pattern for special in ('.', '?', '*', '[')):
# pattern might match host
matching_hosts = self._match_list(self._inventory.hosts, pattern)
if matching_hosts:
for hostname in matching_hosts:
results.append(self._inventory.hosts[hostname])
if not results and pattern in C.LOCALHOST:
# get_host autocreates implicit when needed
implicit = self._inventory.get_host(pattern)
if implicit:
results.append(implicit)
if not results and pattern != 'all':
display.warning("Could not match supplied host pattern, ignoring: %s" % pattern)
return results
def list_hosts(self, pattern="all"):
""" return a list of hostnames for a pattern """
# FIXME: cache?
result = [h for h in self.get_hosts(pattern)]
# allow implicit localhost if pattern matches and no other results
if len(result) == 0 and pattern in C.LOCALHOST:
result = [pattern]
return result
def list_groups(self):
# FIXME: cache?
return sorted(self._inventory.groups.keys(), key=lambda x: x)
def restrict_to_hosts(self, restriction):
"""
Restrict list operations to the hosts given in restriction. This is used
to batch serial operations in main playbook code, don't use this for other
reasons.
"""
if restriction is None:
return
elif not isinstance(restriction, list):
restriction = [restriction]
self._restriction = [h.name for h in restriction]
def subset(self, subset_pattern):
"""
Limits inventory results to a subset of inventory that matches a given
pattern, such as to select a given geographic of numeric slice amongst
a previous 'hosts' selection that only select roles, or vice versa.
Corresponds to --limit parameter to ansible-playbook
"""
if subset_pattern is None:
self._subset = None
else:
subset_patterns = split_host_pattern(subset_pattern)
results = []
# allow Unix style @filename data
for x in subset_patterns:
if x.startswith("@"):
fd = open(x[1:])
results.extend(fd.read().split("\n"))
fd.close()
else:
results.append(x)
self._subset = results
def remove_restriction(self):
""" Do not restrict list operations """
self._restriction = None
def clear_pattern_cache(self):
self._pattern_cache = {}
| [
"[email protected]"
] | |
fc03483fd51af21d7011aefcdb15b9eaf9d4d645 | 349dadbf45b7c12a3fe41c5e0421c0488b679919 | /transformers/tests/test_modeling_longformer.py | 2d30bd3ba4bcd77d8802388c40bc455a2be55d2c | [
"Apache-2.0",
"BSD-3-Clause",
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] | permissive | salesforce/CodeRL | c772e408bac690527759f416ea22add4c97e5bec | 51db4ff983d5376e62b9e7eba150316a651c80d9 | refs/heads/main | 2023-08-18T18:38:02.740995 | 2022-11-18T16:14:28 | 2022-11-18T16:14:28 | 508,912,853 | 412 | 52 | BSD-3-Clause | 2023-08-31T07:51:27 | 2022-06-30T02:54:36 | Python | UTF-8 | Python | false | false | 30,157 | py | # coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import LongformerConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerSelfAttention,
)
class LongformerModelTester:
def __init__(
self,
parent,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_input_mask = True
self.use_token_type_ids = True
self.use_labels = True
self.vocab_size = 99
self.hidden_size = 32
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.intermediate_size = 37
self.hidden_act = "gelu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.scope = None
self.attention_window = 4
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but LongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window + 1` locations
# (assuming no token with global attention, otherwise the last dimension of attentions
# is x + self.attention_window + 1, where x is the number of tokens with global attention)
self.key_length = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
self.encoder_seq_length = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def get_config(self):
return LongformerConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
attention_window=self.attention_window,
)
def create_and_check_attention_mask_determinism(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = LongformerModel(config=config)
model.to(torch_device)
model.eval()
attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
output_with_mask = model(input_ids, attention_mask=attention_mask)["last_hidden_state"]
output_without_mask = model(input_ids)["last_hidden_state"]
self.parent.assertTrue(torch.allclose(output_with_mask[0, 0, :5], output_without_mask[0, 0, :5], atol=1e-4))
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = LongformerModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_model_with_global_attention_mask(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = LongformerModel(config=config)
model.to(torch_device)
model.eval()
global_attention_mask = input_mask.clone()
global_attention_mask[:, input_mask.shape[-1] // 2] = 0
global_attention_mask = global_attention_mask.to(torch_device)
result = model(
input_ids,
attention_mask=input_mask,
global_attention_mask=global_attention_mask,
token_type_ids=token_type_ids,
)
result = model(input_ids, token_type_ids=token_type_ids, global_attention_mask=global_attention_mask)
result = model(input_ids, global_attention_mask=global_attention_mask)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = LongformerForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = LongformerForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
global_attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_for_sequence_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = LongformerForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = LongformerForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = LongformerForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
result = model(
multiple_choice_inputs_ids,
attention_mask=multiple_choice_input_mask,
global_attention_mask=multiple_choice_input_mask,
token_type_ids=multiple_choice_token_type_ids,
labels=choice_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
global_attention_mask = torch.zeros_like(input_ids)
global_attention_mask[:, -1] = 1
inputs_dict = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
"global_attention_mask": global_attention_mask,
}
return config, inputs_dict
def prepare_config_and_inputs_for_question_answering(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
# Replace sep_token_id by some random id
input_ids[input_ids == config.sep_token_id] = torch.randint(0, config.vocab_size, (1,)).item()
# Make sure there are exactly three sep_token_id
input_ids[:, -3:] = config.sep_token_id
input_mask = torch.ones_like(input_ids)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
@require_torch
class LongformerModelTest(ModelTesterMixin, unittest.TestCase):
test_pruning = False # pruning is not supported
test_torchscript = False
all_model_classes = (
(
LongformerModel,
LongformerForMaskedLM,
LongformerForSequenceClassification,
LongformerForQuestionAnswering,
LongformerForTokenClassification,
LongformerForMultipleChoice,
)
if is_torch_available()
else ()
)
def setUp(self):
self.model_tester = LongformerModelTester(self)
self.config_tester = ConfigTester(self, config_class=LongformerConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_attention_mask_determinism(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_attention_mask_determinism(*config_and_inputs)
def test_model_global_attention_mask(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_with_global_attention_mask(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_question_answering()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
def test_retain_grad_hidden_states_attentions(self):
# longformer cannot keep gradients in attentions or hidden states
return
@require_torch
@require_sentencepiece
@require_tokenizers
class LongformerModelIntegrationTest(unittest.TestCase):
def _get_hidden_states(self):
return torch.tensor(
[
[
[
4.98332758e-01,
2.69175139e00,
-7.08081422e-03,
1.04915401e00,
-1.83476661e00,
7.67220476e-01,
2.98580543e-01,
2.84803992e-02,
],
[
-7.58357372e-01,
4.20635998e-01,
-4.04739919e-02,
1.59924145e-01,
2.05135748e00,
-1.15997978e00,
5.37166397e-01,
2.62873606e-01,
],
[
-1.69438001e00,
4.17574660e-01,
-1.49196962e00,
-1.76483717e00,
-1.94566312e-01,
-1.71183858e00,
7.72903565e-01,
-1.11557056e00,
],
[
5.44028163e-01,
2.05466114e-01,
-3.63045868e-01,
2.41865062e-01,
3.20348382e-01,
-9.05611176e-01,
-1.92690727e-01,
-1.19917547e00,
],
]
],
dtype=torch.float32,
device=torch_device,
)
def test_diagonalize(self):
hidden_states = self._get_hidden_states()
hidden_states = hidden_states.reshape((1, 8, 4)) # set seq length = 8, hidden dim = 4
chunked_hidden_states = LongformerSelfAttention._chunk(hidden_states, window_overlap=2)
window_overlap_size = chunked_hidden_states.shape[2]
self.assertTrue(window_overlap_size == 4)
padded_hidden_states = LongformerSelfAttention._pad_and_diagonalize(chunked_hidden_states)
self.assertTrue(padded_hidden_states.shape[-1] == chunked_hidden_states.shape[-1] + window_overlap_size - 1)
# first row => [0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000]
self.assertTrue(torch.allclose(padded_hidden_states[0, 0, 0, :4], chunked_hidden_states[0, 0, 0], atol=1e-3))
self.assertTrue(
torch.allclose(
padded_hidden_states[0, 0, 0, 4:],
torch.zeros((3,), device=torch_device, dtype=torch.float32),
atol=1e-3,
)
)
# last row => [0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629]
self.assertTrue(torch.allclose(padded_hidden_states[0, 0, -1, 3:], chunked_hidden_states[0, 0, -1], atol=1e-3))
self.assertTrue(
torch.allclose(
padded_hidden_states[0, 0, -1, :3],
torch.zeros((3,), device=torch_device, dtype=torch.float32),
atol=1e-3,
)
)
def test_pad_and_transpose_last_two_dims(self):
hidden_states = self._get_hidden_states()
self.assertTrue(hidden_states.shape, (1, 8, 4))
padding = (0, 0, 0, 1)
padded_hidden_states = LongformerSelfAttention._pad_and_transpose_last_two_dims(hidden_states, padding)
self.assertTrue(padded_hidden_states.shape, (1, 8, 5))
expected_added_dim = torch.zeros((5,), device=torch_device, dtype=torch.float32)
self.assertTrue(torch.allclose(expected_added_dim, padded_hidden_states[0, -1, :], atol=1e-6))
self.assertTrue(torch.allclose(hidden_states[0, -1, :], padded_hidden_states.view(1, -1)[0, 24:32], atol=1e-6))
def test_chunk(self):
hidden_states = self._get_hidden_states()
batch_size = 1
seq_length = 8
hidden_size = 4
hidden_states = hidden_states.reshape((batch_size, seq_length, hidden_size))
chunked_hidden_states = LongformerSelfAttention._chunk(hidden_states, window_overlap=2)
# expected slices across chunk and seq length dim
expected_slice_along_seq_length = torch.tensor(
[0.4983, -0.7584, -1.6944], device=torch_device, dtype=torch.float32
)
expected_slice_along_chunk = torch.tensor(
[0.4983, -1.8348, -0.7584, 2.0514], device=torch_device, dtype=torch.float32
)
self.assertTrue(torch.allclose(chunked_hidden_states[0, :, 0, 0], expected_slice_along_seq_length, atol=1e-3))
self.assertTrue(torch.allclose(chunked_hidden_states[0, 0, :, 0], expected_slice_along_chunk, atol=1e-3))
self.assertTrue(chunked_hidden_states.shape, (1, 3, 4, 4))
def test_mask_invalid_locations(self):
hidden_states = self._get_hidden_states()
batch_size = 1
seq_length = 8
hidden_size = 4
hidden_states = hidden_states.reshape((batch_size, seq_length, hidden_size))
chunked_hidden_states = LongformerSelfAttention._chunk(hidden_states, window_overlap=2)
hid_states_1 = chunked_hidden_states.clone()
LongformerSelfAttention._mask_invalid_locations(hid_states_1, 1)
self.assertTrue(torch.isinf(hid_states_1).sum().item() == 8)
hid_states_2 = chunked_hidden_states.clone()
LongformerSelfAttention._mask_invalid_locations(hid_states_2, 2)
self.assertTrue(torch.isinf(hid_states_2).sum().item() == 24)
hid_states_3 = chunked_hidden_states.clone()[:, :, :, :3]
LongformerSelfAttention._mask_invalid_locations(hid_states_3, 2)
self.assertTrue(torch.isinf(hid_states_3).sum().item() == 24)
hid_states_4 = chunked_hidden_states.clone()[:, :, 2:, :]
LongformerSelfAttention._mask_invalid_locations(hid_states_4, 2)
self.assertTrue(torch.isinf(hid_states_4).sum().item() == 12)
def test_layer_local_attn(self):
model = LongformerModel.from_pretrained("patrickvonplaten/longformer-random-tiny")
model.eval()
layer = model.encoder.layer[0].attention.self.to(torch_device)
hidden_states = self._get_hidden_states()
batch_size, seq_length, hidden_size = hidden_states.size()
attention_mask = torch.zeros((batch_size, seq_length), dtype=torch.float32, device=torch_device)
attention_mask[:, -2:] = -10000
is_index_masked = attention_mask < 0
is_index_global_attn = attention_mask > 0
is_global_attn = is_index_global_attn.flatten().any().item()
output_hidden_states = layer(
hidden_states,
attention_mask=attention_mask,
is_index_masked=is_index_masked,
is_index_global_attn=is_index_global_attn,
is_global_attn=is_global_attn,
)[0]
self.assertTrue(output_hidden_states.shape, (1, 4, 8))
self.assertTrue(
torch.allclose(
output_hidden_states[0, 1],
torch.tensor(
[0.0019, 0.0122, -0.0171, -0.0256, -0.0300, 0.0173, -0.0115, 0.0048],
dtype=torch.float32,
device=torch_device,
),
atol=1e-3,
)
)
def test_layer_global_attn(self):
model = LongformerModel.from_pretrained("patrickvonplaten/longformer-random-tiny")
model.eval()
layer = model.encoder.layer[0].attention.self.to(torch_device)
hidden_states = torch.cat([self._get_hidden_states(), self._get_hidden_states() - 0.5], dim=0)
batch_size, seq_length, hidden_size = hidden_states.size()
attention_mask = torch.zeros((batch_size, seq_length), dtype=torch.float32, device=torch_device)
# create attn mask
attention_mask[0, -2:] = 10000.0
attention_mask[0, -1:] = -10000.0
attention_mask[1, 1:] = 10000.0
is_index_masked = attention_mask < 0
is_index_global_attn = attention_mask > 0
is_global_attn = is_index_global_attn.flatten().any().item()
output_hidden_states = layer(
hidden_states,
attention_mask=attention_mask,
is_index_masked=is_index_masked,
is_index_global_attn=is_index_global_attn,
is_global_attn=is_global_attn,
)[0]
self.assertTrue(output_hidden_states.shape, (2, 4, 8))
self.assertTrue(
torch.allclose(
output_hidden_states[0, 2],
torch.tensor(
[-0.0651, -0.0393, 0.0309, -0.0342, -0.0066, -0.0155, -0.0209, -0.0494],
dtype=torch.float32,
device=torch_device,
),
atol=1e-3,
)
)
self.assertTrue(
torch.allclose(
output_hidden_states[1, -2],
torch.tensor(
[-0.0405, -0.0384, 0.0396, -0.0374, -0.0341, 0.0136, 0.0014, -0.0571],
dtype=torch.float32,
device=torch_device,
),
atol=1e-3,
)
)
def test_layer_attn_probs(self):
model = LongformerModel.from_pretrained("patrickvonplaten/longformer-random-tiny")
model.eval()
layer = model.encoder.layer[0].attention.self.to(torch_device)
hidden_states = torch.cat([self._get_hidden_states(), self._get_hidden_states() - 0.5], dim=0)
batch_size, seq_length, hidden_size = hidden_states.size()
attention_mask = torch.zeros((batch_size, seq_length), dtype=torch.float32, device=torch_device)
# create attn mask
attention_mask[0, -2:] = 10000.0
attention_mask[0, -1:] = -10000.0
attention_mask[1, 1:] = 10000.0
is_index_masked = attention_mask < 0
is_index_global_attn = attention_mask > 0
is_global_attn = is_index_global_attn.flatten().any().item()
output_hidden_states, local_attentions, global_attentions = layer(
hidden_states,
attention_mask=attention_mask,
is_index_masked=is_index_masked,
is_index_global_attn=is_index_global_attn,
is_global_attn=is_global_attn,
output_attentions=True,
)
self.assertEqual(local_attentions.shape, (2, 4, 2, 8))
self.assertEqual(global_attentions.shape, (2, 2, 3, 4))
# All tokens with global attention have weight 0 in local attentions.
self.assertTrue(torch.all(local_attentions[0, 2:4, :, :] == 0))
self.assertTrue(torch.all(local_attentions[1, 1:4, :, :] == 0))
# The weight of all tokens with local attention must sum to 1.
self.assertTrue(torch.all(torch.abs(global_attentions[0, :, :2, :].sum(dim=-1) - 1) < 1e-6))
self.assertTrue(torch.all(torch.abs(global_attentions[1, :, :1, :].sum(dim=-1) - 1) < 1e-6))
self.assertTrue(
torch.allclose(
local_attentions[0, 0, 0, :],
torch.tensor(
[0.3328, 0.0000, 0.0000, 0.0000, 0.0000, 0.3355, 0.3318, 0.0000],
dtype=torch.float32,
device=torch_device,
),
atol=1e-3,
)
)
self.assertTrue(
torch.allclose(
local_attentions[1, 0, 0, :],
torch.tensor(
[0.2492, 0.2502, 0.2502, 0.0000, 0.0000, 0.2505, 0.0000, 0.0000],
dtype=torch.float32,
device=torch_device,
),
atol=1e-3,
)
)
# All the global attention weights must sum to 1.
self.assertTrue(torch.all(torch.abs(global_attentions.sum(dim=-1) - 1) < 1e-6))
self.assertTrue(
torch.allclose(
global_attentions[0, 0, 1, :],
torch.tensor(
[0.2500, 0.2500, 0.2500, 0.2500],
dtype=torch.float32,
device=torch_device,
),
atol=1e-3,
)
)
self.assertTrue(
torch.allclose(
global_attentions[1, 0, 0, :],
torch.tensor(
[0.2497, 0.2500, 0.2499, 0.2504],
dtype=torch.float32,
device=torch_device,
),
atol=1e-3,
)
)
@slow
def test_inference_no_head(self):
model = LongformerModel.from_pretrained("allenai/longformer-base-4096")
model.to(torch_device)
# 'Hello world!'
input_ids = torch.tensor([[0, 20920, 232, 328, 1437, 2]], dtype=torch.long, device=torch_device)
attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
output = model(input_ids, attention_mask=attention_mask)[0]
output_without_mask = model(input_ids)[0]
expected_output_slice = torch.tensor([0.0549, 0.1087, -0.1119, -0.0368, 0.0250], device=torch_device)
self.assertTrue(torch.allclose(output[0, 0, -5:], expected_output_slice, atol=1e-4))
self.assertTrue(torch.allclose(output_without_mask[0, 0, -5:], expected_output_slice, atol=1e-4))
@slow
def test_inference_no_head_long(self):
model = LongformerModel.from_pretrained("allenai/longformer-base-4096")
model.to(torch_device)
# 'Hello world! ' repeated 1000 times
input_ids = torch.tensor(
[[0] + [20920, 232, 328, 1437] * 1000 + [2]], dtype=torch.long, device=torch_device
) # long input
attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=input_ids.device)
global_attention_mask = torch.zeros(input_ids.shape, dtype=torch.long, device=input_ids.device)
global_attention_mask[:, [1, 4, 21]] = 1 # Set global attention on a few random positions
output = model(input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask)[0]
expected_output_sum = torch.tensor(74585.8594, device=torch_device)
expected_output_mean = torch.tensor(0.0243, device=torch_device)
self.assertTrue(torch.allclose(output.sum(), expected_output_sum, atol=1e-4))
self.assertTrue(torch.allclose(output.mean(), expected_output_mean, atol=1e-4))
@slow
def test_inference_masked_lm_long(self):
model = LongformerForMaskedLM.from_pretrained("allenai/longformer-base-4096")
model.to(torch_device)
# 'Hello world! ' repeated 1000 times
input_ids = torch.tensor(
[[0] + [20920, 232, 328, 1437] * 1000 + [2]], dtype=torch.long, device=torch_device
) # long input
input_ids = input_ids.to(torch_device)
loss, prediction_scores = model(input_ids, labels=input_ids).to_tuple()
expected_loss = torch.tensor(0.0074, device=torch_device)
expected_prediction_scores_sum = torch.tensor(-6.1048e08, device=torch_device)
expected_prediction_scores_mean = torch.tensor(-3.0348, device=torch_device)
self.assertTrue(torch.allclose(loss, expected_loss, atol=1e-4))
self.assertTrue(torch.allclose(prediction_scores.sum(), expected_prediction_scores_sum, atol=1e-4))
self.assertTrue(torch.allclose(prediction_scores.mean(), expected_prediction_scores_mean, atol=1e-4))
| [
"[email protected]"
] | |
09dc4b8c4826d5758b97747fe4b4d51aa543a01a | 5053116ea6876add7903bf9433a9cf5da6aa5cbb | /CI_final project/rbfnet/RBF.py | 37606ac2732850630bf4bcd555651a1ffac725a9 | [] | no_license | Yasaman1997/Computatioanal-Intelligence | 4c400643bc4eb8ab30e51fc1fe7e76cf2d2ca2e5 | 65607a5d3ff6e08269584f9055f4feba5358abe2 | refs/heads/master | 2020-12-27T07:41:18.100841 | 2020-02-02T19:29:28 | 2020-02-02T19:29:28 | 237,819,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,066 | py | import math
import matplotlib.pyplot as plt
import numpy as np
import FCM
import code
class RBF:
def __init__(self, path, clusters, fuzziness_parameter, gama, n_class):
path = path
self.dataset = code.prepare_data()
self.n_cluster = clusters
self.m = fuzziness_parameter
self.n_class = n_class
self.c_raduis = gama
self.G_matrix = np.array([[0.0 for i in range(self.n_cluster)] for j in range(int(len(self.dataset) * 0.7))])
self.Y_matrix = np.array([[0 for i in range(self.n_class)] for j in range(int(len(self.dataset) * 0.7))])
self.W_matrix = None
self.G_matrix_test = np.array(
[[0.0 for i in range(self.n_cluster)] for j in range(int(len(self.dataset) - len(self.dataset) * 0.7))])
self.Y = [0.0 for i in range(int(len(self.dataset) - len(self.dataset) * 0.7))]
self.Output_matrix = np.array([[0.0 for i in range(self.n_class)] for j in range(int(len(self.dataset) * 0.7))])
def distance(self, point1, point2):
d = math.sqrt(sum((a - b) ** 2 for a, b in zip(point1, point2)))
if d == 0:
return 0.00000001
else:
return d
def get_uik(self, x, vi):
T1 = 0
T2 = float((self.distance(x, vi)))
for ck in self.C_matrix:
T3 = float(self.distance(x, ck))
T1 += pow(float(T2 / T3), 2 / (self.m - 1))
uik = 1 / T1
return uik
def compute_G(self, start, end, G):
g1 = []
g2 = []
for i in range(len(self.C_matrix)):
ci = np.array([[0.0, 0.0],
[0.0, 0.0]])
uik = 0
u = 0
for j in range(start, end):
if G == 0:
u = self.U_matrix[j - start][i]
else:
u = self.get_uik(self.dataset[j][0], self.C_matrix[i])
g = np.array([u ** self.m * self.dataset[j][0][0],
u ** self.m * self.dataset[j][0][1]]) - \
np.array([u ** self.m * float(self.C_matrix[i][0]),
u ** self.m * float(self.C_matrix[i][1])])
ci += [[g[0] ** 2, g[0] * g[1]], [g[0] * g[1], g[1] ** 2]]
uik += (u ** self.m)
ci = ci / uik
for j in range(start, end):
x = np.array([self.dataset[j][0][0],
self.dataset[j][0][1]])
if G == 0:
self.G_matrix[j - start][i] = math.exp(
-self.c_raduis * np.matmul(np.matmul(np.transpose(x - self.C_matrix[i]),
np.linalg.inv(ci)),
x - self.C_matrix[i]))
# g1.append(self.G_matrix)
# np.savetxt("G1.txt", g1)
else:
self.G_matrix_test[j - start][i] = math.exp(
-self.c_raduis * np.matmul(np.matmul(np.transpose(x - self.C_matrix[i]),
np.linalg.inv(ci)),
x - self.C_matrix[i]))
# g2.append(self.G_matrix_test)
# np.savetxt("G2.txt", g2)
def Run_Rbf(self):
np.random.shuffle(self.dataset)
for i in range(int(len(self.dataset) * 0.7)):
self.Y_matrix[i][self.dataset[i][1] - 1] = 1
fcm = FCM.FCM(self.n_cluster, self.dataset[0:int(len(self.dataset) * 0.7)], self.m) # ue FCM
self.U_matrix, self.C_matrix = fcm.clustering_algorithm()
self.compute_G(0, int(len(self.dataset) * 0.7), 0)
self.W_matrix = np.matmul(np.matmul(np.linalg.inv(np.matmul(np.transpose(self.G_matrix),
self.G_matrix)), np.transpose(self.G_matrix)),
self.Y_matrix)
self.Output_matrix = np.matmul(self.G_matrix, self.W_matrix)
print('W_matrix:')
print(self.W_matrix)
print('output:')
print(self.Output_matrix)
def rbf_test(self):
self.compute_G(int(len(self.dataset) * 0.7) + 1, len(self.dataset), 1)
self.Output_matrix = np.matmul(self.G_matrix_test, self.W_matrix)
# print(self.dataset[int(len(self.dataset) * 0.7)+1:len(self.dataset)])
for i in range(len(self.Output_matrix)):
self.Y[i] = np.argmax(self.Output_matrix[i]) + 1
print('y:')
print(self.Y)
print('predicted_output:')
print(self.Output_matrix)
def accuracy(self):
sum = 0.0
acc = []
start = int(len(self.dataset) * 0.7) + 1
end = len(self.dataset)
for i in range(start, end):
dif = self.dataset[i][1] - self.Y[i - start]
# plt.scatter(self.Y[i - start], c='green')
# plt.scatter(self.dataset[i][1], c='red')
plt.show()
if dif > 0 or dif < 0:
sum += 1
accuracy = 1 - sum / int(len(self.dataset) * 0.3)
acc.append(accuracy)
np.savetxt("acc.txt", acc)
print('accuracy:')
print(accuracy)
def run():
for i in range(2, 32, 2):
rbf = RBF("2clstrain1200.csv", 10, 2, 1, 2)
rbf.Run_Rbf()
rbf.rbf_test()
plt.scatter([rbf.C_matrix[0][0], rbf.C_matrix[1][0], rbf.C_matrix[2][0], rbf.C_matrix[3][0], rbf.C_matrix[4][0],
rbf.C_matrix[5][0], rbf.C_matrix[6][0], rbf.C_matrix[7][0]],
[rbf.C_matrix[0][1], rbf.C_matrix[1][1], rbf.C_matrix[2][1], rbf.C_matrix[3][1], rbf.C_matrix[4][1],
rbf.C_matrix[5][1], rbf.C_matrix[6][1], rbf.C_matrix[7][1]], color='black')
plt.show()
# print('accuracy:')
print(rbf.accuracy())
run()
| [
"[email protected]"
] | |
2a38b18643398080afdf4fbe89533401fdd3c67d | 4c300a18ba13bed9e0fa933a9f6d01187e005468 | /devrun/cmd/web.py | d2a193d5aa0a8dded14213a28127cad236a46f7a | [] | no_license | smurfix/devrun | 08def56eda1090e9489b2f3c33d2a26d9a143277 | d6098fafc79c81d65468a6cbdaaf0b8633ebde97 | refs/heads/master | 2021-01-25T01:22:05.091114 | 2017-06-22T10:54:40 | 2017-06-22T10:54:40 | 94,753,022 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,572 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division, unicode_literals
##
## This file is part of devrun, a comprehensive controller and monitor for
## various typed code.
##
## devrun is Copyright © 2016 by Matthias Urlichs <[email protected]>,
## it is licensed under the GPLv3. See the file `README.rst` for details,
## including optimistic statements by the author.
##
## This paragraph is auto-generated and may self-destruct at any time,
## courtesy of "make update". The original is in ‘utils/_boilerplate.py’.
## Thus, please do not remove the next line, or insert any blank lines.
##BP
import sys
import asyncio
import inspect
from collections.abc import Mapping
from . import BaseCommand
from devrun.web import App
class Command(BaseCommand):
"Run a web server"
help = """\
web
-- run a web server. Usage: web [[bind-to] port]
Defaults: any 9980
"""
app = None
bindto = '0.0.0.0'
port = 9980
async def run(self, *args):
self.loop = self.opt.loop
if len(args) > 2:
print("Usage: run", file=sys.stderr)
return 1
if args:
self.port = atoi(args[-1])
if len(args) > 1:
self.bindto = args[0]
self.app = App(self)
await self.app.start(self.bindto,self.port)
while True:
await asyncio.sleep(9999,loop=self.loop)
async def stop(self):
if self.app is not None:
await self.app.stop()
await super().stop()
| [
"[email protected]"
] | |
485b464e4b94a31de2a1e612ab6f6c40411628a1 | 4de0c6d3a820d7669fcef5fd035416cf85b35f23 | /ITcoach/xlrd_xlwt处理数据/第1章 python基础/1.7 学Python,不愁没对象!/1.7.2.py | 41358a812e40297cbed3f5cf30e10307d8cf4862 | [
"AFL-3.0"
] | permissive | ww35133634/chenxusheng | 5e1b7391a94387b73bcd7c4d12f1247b79be8016 | 666e0eb3aedde46342faf0d4030f5c72b10c9732 | refs/heads/master | 2022-11-12T03:46:47.953680 | 2020-07-02T20:50:56 | 2020-07-02T20:50:56 | 275,168,080 | 0 | 0 | AFL-3.0 | 2020-07-02T20:58:37 | 2020-06-26T13:54:48 | HTML | UTF-8 | Python | false | false | 34 | py | print(type(99))
print(type('abc')) | [
"[email protected]"
] | |
a32077954feaaf66d2e4a23b6ccbfb1d5d8009ef | bcc199a7e71b97af6fbfd916d5a0e537369c04d9 | /leetcode/solved/53_Maximum_Subarray/solution.py | 4b9dd9a6ff3ffd63395f61fdf9315a2b1fb9c7b9 | [] | no_license | sungminoh/algorithms | 9c647e82472905a2c4e505c810b622b734d9d20d | 1389a009a02e90e8700a7a00e0b7f797c129cdf4 | refs/heads/master | 2023-05-01T23:12:53.372060 | 2023-04-24T06:34:12 | 2023-04-24T06:34:12 | 87,406,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,423 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2020 sungminoh <[email protected]>
#
# Distributed under terms of the MIT license.
"""
Given an integer array nums, find the contiguous subarray (containing at least one number) which has the largest sum and return its sum.
A subarray is a contiguous part of an array.
Example 1:
Input: nums = [-2,1,-3,4,-1,2,1,-5,4]
Output: 6
Explanation: [4,-1,2,1] has the largest sum = 6.
Example 2:
Input: nums = [1]
Output: 1
Example 3:
Input: nums = [5,4,-1,7,8]
Output: 23
Constraints:
1 <= nums.length <= 105
-104 <= nums[i] <= 104
Follow up: If you have figured out the O(n) solution, try coding another solution using the divide and conquer approach, which is more subtle.
"""
import sys
from typing import List
import pytest
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
"""
Time complexity: O(n)
Space complexity: O(1)
"""
ret = nums[0]
prevsum = nums[0]
for n in nums[1:]:
prevsum = max(n, prevsum + n)
ret = max(ret, prevsum)
return ret
@pytest.mark.parametrize('nums, expected', [
([-2,1,-3,4,-1,2,1,-5,4], 6),
([1], 1),
([5,4,-1,7,8], 23),
])
def test(nums, expected):
assert expected == Solution().maxSubArray(nums)
if __name__ == '__main__':
sys.exit(pytest.main(["-s", "-v"] + sys.argv))
| [
"[email protected]"
] | |
eeed637ae329962457dea76e8a46dd6381f588f2 | 1832a909b2c564bc623bca36dd3eea8c5587e2db | /server/core/migrations/0007_auto_20170206_0100.py | 30aa6f8b8b22f8af086a24dce4fe314c5837a1d1 | [] | no_license | bravesoftdz/tramsdaol | ea4370b93ccba6ba569e948d552bab89042fb337 | e726a00a0296454582f18d18956140c67be8cf8d | refs/heads/master | 2020-03-20T02:57:25.088779 | 2017-04-01T01:31:49 | 2017-04-01T01:31:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-06 01:00
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0006_geographiccoordinate_search_address'),
]
operations = [
migrations.AlterUniqueTogether(
name='geographiccoordinate',
unique_together=set([('lat', 'lng', 'address', 'search_address')]),
),
]
| [
"[email protected]"
] | |
9aa63aac3e6bf6b02a6ac01ef808c52247f95e0c | 649255f0d9b6d90be3d3f68263680081f893a089 | /test/test_excluded_scan_targets.py | 85f9366583d08f39747f863208b29caf424513f5 | [] | no_license | khantext/r7ivm3 | 611e1bbc988d9eb8fbb53294d3ed488130e46818 | bd9b25f511f9e7479ea7069d71929700bed09e87 | refs/heads/master | 2023-05-01T10:01:16.336656 | 2021-05-03T18:16:12 | 2021-05-03T18:16:12 | 237,514,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49,254 | py | # coding: utf-8
"""
InsightVM API
# Overview This guide documents the InsightVM Application Programming Interface (API) Version 3. This API supports the Representation State Transfer (REST) design pattern. Unless noted otherwise this API accepts and produces the `application/json` media type. This API uses Hypermedia as the Engine of Application State (HATEOAS) and is hypermedia friendly. All API connections must be made to the security console using HTTPS. ## Versioning Versioning is specified in the URL and the base path of this API is: `https://<host>:<port>/api/3/`. ## Specification An <a target=\"_blank\" rel=\"noopener noreferrer\" href=\"https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md\">OpenAPI v2</a> specification (also known as Swagger 2) of this API is available. Tools such as <a target=\"_blank\" rel=\"noopener noreferrer\" href=\"https://github.com/swagger-api/swagger-codegen\">swagger-codegen</a> can be used to generate an API client in the language of your choosing using this specification document. <p class=\"openapi\">Download the specification: <a class=\"openapi-button\" target=\"_blank\" rel=\"noopener noreferrer\" download=\"\" href=\"/api/3/json\"> Download </a></p> ## Authentication Authorization to the API uses HTTP Basic Authorization (see <a target=\"_blank\" rel=\"noopener noreferrer\" href=\"https://www.ietf.org/rfc/rfc2617.txt\">RFC 2617</a> for more information). Requests must supply authorization credentials in the `Authorization` header using a Base64 encoded hash of `\"username:password\"`. <!-- ReDoc-Inject: <security-definitions> --> ### 2FA This API supports two-factor authentication (2FA) by supplying an authentication token in addition to the Basic Authorization. The token is specified using the `Token` request header. To leverage two-factor authentication, this must be enabled on the console and be configured for the account accessing the API. ## Resources ### Naming Resource names represent nouns and identify the entity being manipulated or accessed. All collection resources are pluralized to indicate to the client they are interacting with a collection of multiple resources of the same type. Singular resource names are used when there exists only one resource available to interact with. The following naming conventions are used by this API: | Type | Case | | --------------------------------------------- | ------------------------ | | Resource names | `lower_snake_case` | | Header, body, and query parameters parameters | `camelCase` | | JSON fields and property names | `camelCase` | #### Collections A collection resource is a parent resource for instance resources, but can itself be retrieved and operated on independently. Collection resources use a pluralized resource name. The resource path for collection resources follow the convention: ``` /api/3/{resource_name} ``` #### Instances An instance resource is a \"leaf\" level resource that may be retrieved, optionally nested within a collection resource. Instance resources are usually retrievable with opaque identifiers. The resource path for instance resources follows the convention: ``` /api/3/{resource_name}/{instance_id}... ``` ## Verbs The following HTTP operations are supported throughout this API. The general usage of the operation and both its failure and success status codes are outlined below. | Verb | Usage | Success | Failure | | --------- | ------------------------------------------------------------------------------------- | ----------- | -------------------------------------------------------------- | | `GET` | Used to retrieve a resource by identifier, or a collection of resources by type. | `200` | `400`, `401`, `402`, `404`, `405`, `408`, `410`, `415`, `500` | | `POST` | Creates a resource with an application-specified identifier. | `201` | `400`, `401`, `404`, `405`, `408`, `413`, `415`, `500` | | `POST` | Performs a request to queue an asynchronous job. | `202` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Creates a resource with a client-specified identifier. | `200` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Performs a full update of a resource with a specified identifier. | `201` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `DELETE` | Deletes a resource by identifier or an entire collection of resources. | `204` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `OPTIONS` | Requests what operations are available on a resource. | `200` | `401`, `404`, `405`, `408`, `500` | ### Common Operations #### OPTIONS All resources respond to the `OPTIONS` request, which allows discoverability of available operations that are supported. The `OPTIONS` response returns the acceptable HTTP operations on that resource within the `Allow` header. The response is always a `200 OK` status. ### Collection Resources Collection resources can support the `GET`, `POST`, `PUT`, and `DELETE` operations. #### GET The `GET` operation invoked on a collection resource indicates a request to retrieve all, or some, of the entities contained within the collection. This also includes the optional capability to filter or search resources during the request. The response from a collection listing is a paginated document. See [hypermedia links](#section/Overview/Paging) for more information. #### POST The `POST` is a non-idempotent operation that allows for the creation of a new resource when the resource identifier is not provided by the system during the creation operation (i.e. the Security Console generates the identifier). The content of the `POST` request is sent in the request body. The response to a successful `POST` request should be a `201 CREATED` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. The `POST` to a collection resource can also be used to interact with asynchronous resources. In this situation, instead of a `201 CREATED` response, the `202 ACCEPTED` response indicates that processing of the request is not fully complete but has been accepted for future processing. This request will respond similarly with a `Location` header with link to the job-oriented asynchronous resource that was created and/or queued. #### PUT The `PUT` is an idempotent operation that either performs a create with user-supplied identity, or a full replace or update of a resource by a known identifier. The response to a `PUT` operation to create an entity is a `201 Created` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. `PUT` on a collection resource replaces all values in the collection. The typical response to a `PUT` operation that updates an entity is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. #### DELETE The `DELETE` is an idempotent operation that physically deletes a resource, or removes an association between resources. The typical response to a `DELETE` operation is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. ### Instance Resources Instance resources can support the `GET`, `PUT`, `POST`, `PATCH` and `DELETE` operations. #### GET Retrieves the details of a specific resource by its identifier. The details retrieved can be controlled through property selection and property views. The content of the resource is returned within the body of the response in the acceptable media type. #### PUT Allows for and idempotent \"full update\" (complete replacement) on a specific resource. If the resource does not exist, it will be created; if it does exist, it is completely overwritten. Any omitted properties in the request are assumed to be undefined/null. For \"partial updates\" use `POST` or `PATCH` instead. The content of the `PUT` request is sent in the request body. The identifier of the resource is specified within the URL (not the request body). The response to a successful `PUT` request is a `201 CREATED` to represent the created status, with a valid `Location` header field set to the URI that can be used to access to the newly created (or fully replaced) resource. #### POST Performs a non-idempotent creation of a new resource. The `POST` of an instance resource most commonly occurs with the use of nested resources (e.g. searching on a parent collection resource). The response to a `POST` of an instance resource is typically a `200 OK` if the resource is non-persistent, and a `201 CREATED` if there is a resource created/persisted as a result of the operation. This varies by endpoint. #### PATCH The `PATCH` operation is used to perform a partial update of a resource. `PATCH` is a non-idempotent operation that enforces an atomic mutation of a resource. Only the properties specified in the request are to be overwritten on the resource it is applied to. If a property is missing, it is assumed to not have changed. #### DELETE Permanently removes the individual resource from the system. If the resource is an association between resources, only the association is removed, not the resources themselves. A successful deletion of the resource should return `204 NO CONTENT` with no response body. This operation is not fully idempotent, as follow-up requests to delete a non-existent resource should return a `404 NOT FOUND`. ## Requests Unless otherwise indicated, the default request body media type is `application/json`. ### Headers Commonly used request headers include: | Header | Example | Purpose | | ------------------ | --------------------------------------------- | ---------------------------------------------------------------------------------------------- | | `Accept` | `application/json` | Defines what acceptable content types are allowed by the client. For all types, use `*/*`. | | `Accept-Encoding` | `deflate, gzip` | Allows for the encoding to be specified (such as gzip). | | `Accept-Language` | `en-US` | Indicates to the server the client's locale (defaults `en-US`). | | `Authorization ` | `Basic Base64(\"username:password\")` | Basic authentication | | `Token ` | `123456` | Two-factor authentication token (if enabled) | ### Dates & Times Dates and/or times are specified as strings in the ISO 8601 format(s). The following formats are supported as input: | Value | Format | Notes | | --------------------------- | ------------------------------------------------------ | ----------------------------------------------------- | | Date | YYYY-MM-DD | Defaults to 12 am UTC (if used for a date & time | | Date & time only | YYYY-MM-DD'T'hh:mm:ss[.nnn] | Defaults to UTC | | Date & time in UTC | YYYY-MM-DD'T'hh:mm:ss[.nnn]Z | | | Date & time w/ offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm | | | Date & time w/ zone-offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm[<zone-id>] | | ### Timezones Timezones are specified in the regional zone format, such as `\"America/Los_Angeles\"`, `\"Asia/Tokyo\"`, or `\"GMT\"`. ### Paging Pagination is supported on certain collection resources using a combination of two query parameters, `page` and `size`. As these are control parameters, they are prefixed with the underscore character. The page parameter dictates the zero-based index of the page to retrieve, and the `size` indicates the size of the page. For example, `/resources?page=2&size=10` will return page 3, with 10 records per page, giving results 21-30. The maximum page size for a request is 500. ### Sorting Sorting is supported on paginated resources with the `sort` query parameter(s). The sort query parameter(s) supports identifying a single or multi-property sort with a single or multi-direction output. The format of the parameter is: ``` sort=property[,ASC|DESC]... ``` Therefore, the request `/resources?sort=name,title,DESC` would return the results sorted by the name and title descending, in that order. The sort directions are either ascending `ASC` or descending `DESC`. With single-order sorting, all properties are sorted in the same direction. To sort the results with varying orders by property, multiple sort parameters are passed. For example, the request `/resources?sort=name,ASC&sort=title,DESC` would sort by name ascending and title descending, in that order. ## Responses The following response statuses may be returned by this API. | Status | Meaning | Usage | | ------ | ------------------------ |------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `200` | OK | The operation performed without error according to the specification of the request, and no more specific 2xx code is suitable. | | `201` | Created | A create request has been fulfilled and a resource has been created. The resource is available as the URI specified in the response, including the `Location` header. | | `202` | Accepted | An asynchronous task has been accepted, but not guaranteed, to be processed in the future. | | `400` | Bad Request | The request was invalid or cannot be otherwise served. The request is not likely to succeed in the future without modifications. | | `401` | Unauthorized | The user is unauthorized to perform the operation requested, or does not maintain permissions to perform the operation on the resource specified. | | `403` | Forbidden | The resource exists to which the user has access, but the operating requested is not permitted. | | `404` | Not Found | The resource specified could not be located, does not exist, or an unauthenticated client does not have permissions to a resource. | | `405` | Method Not Allowed | The operations may not be performed on the specific resource. Allowed operations are returned and may be performed on the resource. | | `408` | Request Timeout | The client has failed to complete a request in a timely manner and the request has been discarded. | | `413` | Request Entity Too Large | The request being provided is too large for the server to accept processing. | | `415` | Unsupported Media Type | The media type is not supported for the requested resource. | | `500` | Internal Server Error | An internal and unexpected error has occurred on the server at no fault of the client. | ### Security The response statuses 401, 403 and 404 need special consideration for security purposes. As necessary, error statuses and messages may be obscured to strengthen security and prevent information exposure. The following is a guideline for privileged resource response statuses: | Use Case | Access | Resource | Permission | Status | | ------------------------------------------------------------------ | ------------------ |------------------- | ------------ | ------------ | | Unauthenticated access to an unauthenticated resource. | Unauthenticated | Unauthenticated | Yes | `20x` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Authenticated | No | `401` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Non-existent | No | `401` | | Authenticated access to a unauthenticated resource. | Authenticated | Unauthenticated | Yes | `20x` | | Authenticated access to an authenticated, unprivileged resource. | Authenticated | Authenticated | No | `404` | | Authenticated access to an authenticated, privileged resource. | Authenticated | Authenticated | Yes | `20x` | | Authenticated access to an authenticated, non-existent resource | Authenticated | Non-existent | Yes | `404` | ### Headers Commonly used response headers include: | Header | Example | Purpose | | -------------------------- | --------------------------------- | --------------------------------------------------------------- | | `Allow` | `OPTIONS, GET` | Defines the allowable HTTP operations on a resource. | | `Cache-Control` | `no-store, must-revalidate` | Disables caching of resources (as they are all dynamic). | | `Content-Encoding` | `gzip` | The encoding of the response body (if any). | | `Location` | | Refers to the URI of the resource created by a request. | | `Transfer-Encoding` | `chunked` | Specified the encoding used to transform response. | | `Retry-After` | 5000 | Indicates the time to wait before retrying a request. | | `X-Content-Type-Options` | `nosniff` | Disables MIME type sniffing. | | `X-XSS-Protection` | `1; mode=block` | Enables XSS filter protection. | | `X-Frame-Options` | `SAMEORIGIN` | Prevents rendering in a frame from a different origin. | | `X-UA-Compatible` | `IE=edge,chrome=1` | Specifies the browser mode to render in. | ### Format When `application/json` is returned in the response body it is always pretty-printed (indented, human readable output). Additionally, gzip compression/encoding is supported on all responses. #### Dates & Times Dates or times are returned as strings in the ISO 8601 'extended' format. When a date and time is returned (instant) the value is converted to UTC. For example: | Value | Format | Example | | --------------- | ------------------------------ | --------------------- | | Date | `YYYY-MM-DD` | 2017-12-03 | | Date & Time | `YYYY-MM-DD'T'hh:mm:ss[.nnn]Z` | 2017-12-03T10:15:30Z | #### Content In some resources a Content data type is used. This allows for multiple formats of representation to be returned within resource, specifically `\"html\"` and `\"text\"`. The `\"text\"` property returns a flattened representation suitable for output in textual displays. The `\"html\"` property returns an HTML fragment suitable for display within an HTML element. Note, the HTML returned is not a valid stand-alone HTML document. #### Paging The response to a paginated request follows the format: ```json { resources\": [ ... ], \"page\": { \"number\" : ..., \"size\" : ..., \"totalResources\" : ..., \"totalPages\" : ... }, \"links\": [ \"first\" : { \"href\" : \"...\" }, \"prev\" : { \"href\" : \"...\" }, \"self\" : { \"href\" : \"...\" }, \"next\" : { \"href\" : \"...\" }, \"last\" : { \"href\" : \"...\" } ] } ``` The `resources` property is an array of the resources being retrieved from the endpoint, each which should contain at minimum a \"self\" relation hypermedia link. The `page` property outlines the details of the current page and total possible pages. The object for the page includes the following properties: - number - The page number (zero-based) of the page returned. - size - The size of the pages, which is less than or equal to the maximum page size. - totalResources - The total amount of resources available across all pages. - totalPages - The total amount of pages. The last property of the paged response is the `links` array, which contains all available hypermedia links. For paginated responses, the \"self\", \"next\", \"previous\", \"first\", and \"last\" links are returned. The \"self\" link must always be returned and should contain a link to allow the client to replicate the original request against the collection resource in an identical manner to that in which it was invoked. The \"next\" and \"previous\" links are present if either or both there exists a previous or next page, respectively. The \"next\" and \"previous\" links have hrefs that allow \"natural movement\" to the next page, that is all parameters required to move the next page are provided in the link. The \"first\" and \"last\" links provide references to the first and last pages respectively. Requests outside the boundaries of the pageable will result in a `404 NOT FOUND`. Paginated requests do not provide a \"stateful cursor\" to the client, nor does it need to provide a read consistent view. Records in adjacent pages may change while pagination is being traversed, and the total number of pages and resources may change between requests within the same filtered/queries resource collection. #### Property Views The \"depth\" of the response of a resource can be configured using a \"view\". All endpoints supports two views that can tune the extent of the information returned in the resource. The supported views are `summary` and `details` (the default). View are specified using a query parameter, in this format: ```bash /<resource>?view={viewName} ``` #### Error Any error responses can provide a response body with a message to the client indicating more information (if applicable) to aid debugging of the error. All 40x and 50x responses will return an error response in the body. The format of the response is as follows: ```json { \"status\": <statusCode>, \"message\": <message>, \"links\" : [ { \"rel\" : \"...\", \"href\" : \"...\" } ] } ``` The `status` property is the same as the HTTP status returned in the response, to ease client parsing. The message property is a localized message in the request client's locale (if applicable) that articulates the nature of the error. The last property is the `links` property. This may contain additional [hypermedia links](#section/Overview/Authentication) to troubleshoot. #### Search Criteria <a section=\"section/Responses/SearchCriteria\"></a> Multiple resources make use of search criteria to match assets. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": <value>,] [\"lower\": <value>,] [\"upper\": <value>] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The operator is a type and property-specific operating performed on the filtered property. The valid values for fields and operators are outlined in the table below. Depending on the data type of the operator the value may be a numeric or string format. Every filter also defines one or more values that are supplied to the operator. The valid values vary by operator and are outlined below. ##### Fields The following table outlines the search criteria fields and the available operators: | Field | Operators | | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | | `alternate-address-type` | `in` | | `container-image` | `is` `is-not` `starts-with` `ends-with` `contains` `does-not-contain` `is-like` `not-like` | | `container-status` | `is` `is-not` | | `containers` | `are` | | `criticality-tag` | `is` `is-not` `is-greater-than` `is-less-than` `is-applied` ` is-not-applied` | | `custom-tag` | `is` `is-not` `starts-with` `ends-with` `contains` `does-not-contain` `is-applied` `is-not-applied` | | `cve` | `is` `is-not` `contains` `does-not-contain` | | `cvss-access-complexity` | `is` `is-not` | | `cvss-authentication-required` | `is` `is-not` | | `cvss-access-vector` | `is` `is-not` | | `cvss-availability-impact` | `is` `is-not` | | `cvss-confidentiality-impact` | `is` `is-not` | | `cvss-integrity-impact` | `is` `is-not` | | `cvss-v3-confidentiality-impact` | `is` `is-not` | | `cvss-v3-integrity-impact` | `is` `is-not` | | `cvss-v3-availability-impact` | `is` `is-not` | | `cvss-v3-attack-vector` | `is` `is-not` | | `cvss-v3-attack-complexity` | `is` `is-not` | | `cvss-v3-user-interaction` | `is` `is-not` | | `cvss-v3-privileges-required` | `is` `is-not` | | `host-name` | `is` `is-not` `starts-with` `ends-with` `contains` `does-not-contain` `is-empty` `is-not-empty` `is-like` `not-like` | | `host-type` | `in` `not-in` | | `ip-address` | `is` `is-not` `in-range` `not-in-range` `is-like` `not-like` | | `ip-address-type` | `in` `not-in` | | `last-scan-date` | `is-on-or-before` `is-on-or-after` `is-between` `is-earlier-than` `is-within-the-last` | | `location-tag` | `is` `is-not` `starts-with` `ends-with` `contains` `does-not-contain` `is-applied` `is-not-applied` | | `mobile-device-last-sync-time` | `is-within-the-last` `is-earlier-than` | | `open-ports` | `is` `is-not` ` in-range` | | `operating-system` | `contains` ` does-not-contain` ` is-empty` ` is-not-empty` | | `owner-tag` | `is` `is-not` `starts-with` `ends-with` `contains` `does-not-contain` `is-applied` `is-not-applied` | | `pci-compliance` | `is` | | `risk-score` | `is` `is-not` `is-greater-than` `is-less-than` `in-range` | | `service-name` | `contains` `does-not-contain` | | `site-id` | `in` `not-in` | | `software` | `contains` `does-not-contain` | | `vAsset-cluster` | `is` `is-not` `contains` `does-not-contain` `starts-with` | | `vAsset-datacenter` | `is` `is-not` | | `vAsset-host-name` | `is` `is-not` `contains` `does-not-contain` `starts-with` | | `vAsset-power-state` | `in` `not-in` | | `vAsset-resource-pool-path` | `contains` `does-not-contain` | | `vulnerability-assessed` | `is-on-or-before` `is-on-or-after` `is-between` `is-earlier-than` `is-within-the-last` | | `vulnerability-category` | `is` `is-not` `starts-with` `ends-with` `contains` `does-not-contain` | | `vulnerability-cvss-v3-score` | `is` `is-not` | | `vulnerability-cvss-score` | `is` `is-not` `in-range` `is-greater-than` `is-less-than` | | `vulnerability-exposures` | `includes` `does-not-include` | | `vulnerability-title` | `contains` `does-not-contain` `is` `is-not` `starts-with` `ends-with` | | `vulnerability-validated-status` | `are` | ##### Enumerated Properties The following fields have enumerated values: | Field | Acceptable Values | | ----------------------------------------- | ------------------------------------------------------------------------------------------------------------- | | `alternate-address-type` | 0=IPv4, 1=IPv6 | | `containers` | 0=present, 1=not present | | `container-status` | `created` `running` `paused` `restarting` `exited` `dead` `unknown` | | `cvss-access-complexity` | <ul><li><code>L</code> = Low</li><li><code>M</code> = Medium</li><li><code>H</code> = High</li></ul> | | `cvss-integrity-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-confidentiality-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-availability-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-access-vector` | <ul><li><code>L</code> = Local</li><li><code>A</code> = Adjacent</li><li><code>N</code> = Network</li></ul> | | `cvss-authentication-required` | <ul><li><code>N</code> = None</li><li><code>S</code> = Single</li><li><code>M</code> = Multiple</li></ul> | | `cvss-v3-confidentiality-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-integrity-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-availability-impact` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-attack-vector` | <ul><li><code>N</code> = Network</li><li><code>A</code> = Adjacent</li><li><code>L</code> = Local</li><li><code>P</code> = Physical</li></ul> | | `cvss-v3-attack-complexity` | <ul><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-user-interaction` | <ul><li><code>N</code> = None</li><li><code>R</code> = Required</li></ul> | | `cvss-v3-privileges-required` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `host-type` | 0=Unknown, 1=Guest, 2=Hypervisor, 3=Physical, 4=Mobile | | `ip-address-type` | 0=IPv4, 1=IPv6 | | `pci-compliance` | 0=fail, 1=pass | | `vulnerability-validated-status` | 0=present, 1=not present | ##### Operator Properties <a section=\"section/Responses/SearchCriteria/OperatorProperties\"></a> The following table outlines which properties are required for each operator and the appropriate data type(s): | Operator | `value` | `lower` | `upper` | | ----------------------|-----------------------|-----------------------|------------------------| | `are` | `string` | | | | `contains` | `string` | | | | `does-not-contain` | `string` | | | | `ends with` | `string` | | | | `in` | `Array[ string ]` | | | | `in-range` | | `numeric` | `numeric` | | `includes` | `Array[ string ]` | | | | `is` | `string` | | | | `is-applied` | | | | | `is-between` | | `string` (yyyy-MM-dd) | `numeric` (yyyy-MM-dd) | | `is-earlier-than` | `numeric` (days) | | | | `is-empty` | | | | | `is-greater-than` | `numeric` | | | | `is-on-or-after` | `string` (yyyy-MM-dd) | | | | `is-on-or-before` | `string` (yyyy-MM-dd) | | | | `is-not` | `string` | | | | `is-not-applied` | | | | | `is-not-empty` | | | | | `is-within-the-last` | `numeric` (days) | | | | `less-than` | `string` | | | | `like` | `string` | | | | `not-contains` | `string` | | | | `not-in` | `Array[ string ]` | | | | `not-in-range` | | `numeric` | `numeric` | | `not-like` | `string` | | | | `starts-with` | `string` | | | #### Discovery Connection Search Criteria <a section=\"section/Responses/DiscoverySearchCriteria\"></a> Dynamic sites make use of search criteria to match assets from a discovery connection. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The list of supported fields vary depending on the type of discovery connection configured for the dynamic site (e.g vSphere, ActiveSync, etc.). The operator is a type and property-specific operating performed on the filtered property. The valid values for fields outlined in the tables below and are grouped by the type of connection. Every filter also defines one or more values that are supplied to the operator. See <a href=\"#section/Responses/SearchCriteria/OperatorProperties\">Search Criteria Operator Properties</a> for more information on the valid values for each operator. ##### Fields (ActiveSync) This section documents search criteria information for ActiveSync discovery connections. The discovery connections must be one of the following types: `\"activesync-ldap\"`, `\"activesync-office365\"`, or `\"activesync-powershell\"`. The following table outlines the search criteria fields and the available operators for ActiveSync connections: | Field | Operators | | --------------------------------- | ------------------------------------------------------------- | | `last-sync-time` | `is-within-the-last` ` is-earlier-than` | | `operating-system` | `contains` ` does-not-contain` | | `user` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (AWS) This section documents search criteria information for AWS discovery connections. The discovery connections must be the type `\"aws\"`. The following table outlines the search criteria fields and the available operators for AWS connections: | Field | Operators | | ----------------------- | ------------------------------------------------------------- | | `availability-zone` | `contains` ` does-not-contain` | | `guest-os-family` | `contains` ` does-not-contain` | | `instance-id` | `contains` ` does-not-contain` | | `instance-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `instance-state` | `in` ` not-in` | | `instance-type` | `in` ` not-in` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `region` | `in` ` not-in` | | `vpc-id` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (DHCP) This section documents search criteria information for DHCP discovery connections. The discovery connections must be the type `\"dhcp\"`. The following table outlines the search criteria fields and the available operators for DHCP connections: | Field | Operators | | --------------- | ------------------------------------------------------------- | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `mac-address` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (Sonar) This section documents search criteria information for Sonar discovery connections. The discovery connections must be the type `\"sonar\"`. The following table outlines the search criteria fields and the available operators for Sonar connections: | Field | Operators | | ------------------- | -------------------- | | `search-domain` | `contains` ` is` | | `ip-address` | `in-range` ` is` | | `sonar-scan-date` | `is-within-the-last` | ##### Fields (vSphere) This section documents search criteria information for vSphere discovery connections. The discovery connections must be the type `\"vsphere\"`. The following table outlines the search criteria fields and the available operators for vSphere connections: | Field | Operators | | -------------------- | ------------------------------------------------------------------------------------------ | | `cluster` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `data-center` | `is` ` is-not` | | `discovered-time` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `guest-os-family` | `contains` ` does-not-contain` | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `power-state` | `in` ` not-in` | | `resource-pool-path` | `contains` ` does-not-contain` | | `last-time-seen` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `vm` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Enumerated Properties (vSphere) The following fields have enumerated values: | Field | Acceptable Values | | ------------- | ------------------------------------ | | `power-state` | `poweredOn` `poweredOff` `suspended` | ## HATEOAS This API follows Hypermedia as the Engine of Application State (HATEOAS) principals and is therefore hypermedia friendly. Hyperlinks are returned in the `links` property of any given resource and contain a fully-qualified hyperlink to the corresponding resource. The format of the hypermedia link adheres to both the <a target=\"_blank\" rel=\"noopener noreferrer\" href=\"http://jsonapi.org\">{json:api} v1</a> <a target=\"_blank\" rel=\"noopener noreferrer\" href=\"http://jsonapi.org/format/#document-links\">\"Link Object\"</a> and <a target=\"_blank\" rel=\"noopener noreferrer\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html\">JSON Hyper-Schema</a> <a target=\"_blank\" rel=\"noopener noreferrer\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html#rfc.section.5.2\">\"Link Description Object\"</a> formats. For example: ```json \"links\": [{ \"rel\": \"<relation>\", \"href\": \"<href>\" ... }] ``` Where appropriate link objects may also contain additional properties than the `rel` and `href` properties, such as `id`, `type`, etc. See the [Root](#tag/Root) resources for the entry points into API discovery. # noqa: E501
OpenAPI spec version: 3
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.excluded_scan_targets import ExcludedScanTargets # noqa: E501
from swagger_client.rest import ApiException
class TestExcludedScanTargets(unittest.TestCase):
"""ExcludedScanTargets unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testExcludedScanTargets(self):
"""Test ExcludedScanTargets"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.excluded_scan_targets.ExcludedScanTargets() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
fe74a07943cbbc198295913a72e0c97901d333e8 | 4bbb67ae8d51c29641b153371e1f404b9af404f0 | /tetris/tetris.py | d54e00fbee9dd349be46d1875ea764db646c4d88 | [] | no_license | timurbakibayev/python_advanced | e328dd3b26d4b94a05e9218be483e97b149fa8bf | da7beace64e2c17c447efec314d757f8181b6acf | refs/heads/master | 2023-01-30T04:07:19.296989 | 2020-12-08T12:47:34 | 2020-12-08T12:47:34 | 291,979,925 | 1 | 4 | null | null | null | null | UTF-8 | Python | false | false | 4,905 | py | import pygame
import random
pygame.init()
colors = [
(0, 0, 0),
(120, 37, 179),
(100, 179, 179),
(80, 34, 22),
(80, 134, 22),
(180, 34, 22),
(180, 34, 122),
]
transparent = (255,255,255,40)
figures = [
[[1, 5, 9, 13], [4, 5, 6, 7]],
[[4, 5, 9, 10], [2, 6, 5, 9]],
[[6, 7, 9, 10], [1, 5, 6, 10]],
[[1, 2, 5, 9], [0, 4, 5, 6], [1, 5, 9, 8], [4, 5, 6, 10]],
[[1, 2, 6, 10], [5, 6, 7, 9], [2, 6, 10, 11], [3, 5, 6, 7]],
[[1, 4, 5, 6], [1, 4, 5, 9], [4, 5, 6, 9], [1, 5, 6, 9]],
[[1, 2, 5, 6]],
]
score = 0
class Figure:
def __init__(self,x,y):
self.x = x
self.y = y
self.type = random.randint(0, len(figures)-1)
self.color = random.randint(1, len(colors)-1)
self.rotation = 0
def image(self):
return figures[self.type][self.rotation]
def rotate(self):
self.rotation = (self.rotation + 1) % len(figures[self.type])
if self.intersects():
self.rotation = (self.rotation - 1) % len(figures[self.type])
def intersects(self):
for i in range(4):
for j in range(4):
p = i * 4 + j
if p in self.image():
x = self.x + j
y = self.y + i
if x > width-1:
return True
if x < 0:
return True
if y > height-1:
return True
if field[y][x] > 0:
return True
return False
def freeze(self):
global score
for i in range(4):
for j in range(4):
p = i * 4 + j
if p in self.image():
x = self.x + j
y = self.y + i
field[y][x] = self.color
lines = 0
for i in range(1, height):
zeros = field[i].count(0)
if zeros == 0:
lines += 1
for i1 in range(i,1,-1):
for j in range(width):
field[i1][j] = field[i1-1][j]
score += lines*2
size = (400, 500)
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Tetris")
done = False
clock = pygame.time.Clock()
fps = 25
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GRAY = (128, 128, 128)
RED = (200, 0, 0)
font = pygame.font.SysFont('Calibri', 50, True, False)
text = font.render("Game Over", True, RED)
font_score = pygame.font.SysFont('Calibri', 25, True, False)
text = font.render("Game Over", True, RED)
height = 20
width = 10
field = []
zoom = 20
x,y = 100,40
counter = 0
game_over = False
for i in range(height):
new_line = []
for j in range(width):
new_line.append(0)
field.append(new_line)
figure = Figure(3,0)
while not done:
# Game update
if not game_over:
counter += 1
if counter % 5 == 0:
figure.y += 1
if figure.intersects():
figure.y -= 1
figure.freeze()
figure = Figure(3,0)
if figure.intersects():
game_over = True
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
if event.type == pygame.KEYDOWN:
if not game_over:
if event.key == pygame.K_LEFT:
figure.x -= 1
if figure.intersects():
figure.x += 1
if event.key == pygame.K_RIGHT:
figure.x += 1
if figure.intersects():
figure.x -= 1
if event.key == pygame.K_UP:
figure.rotate()
if event.key == pygame.K_DOWN or event.key == pygame.K_SPACE:
while not figure.intersects():
figure.y += 1
figure.y -= 1
screen.fill(WHITE)
for i in range(height):
for j in range(width):
pygame.draw.rect(screen, GRAY, [x + zoom * j, y + zoom * i, zoom, zoom], 1)
if field[i][j] > 0:
pygame.draw.rect(screen, colors[field[i][j]], [x + zoom * j, y + zoom * i, zoom, zoom])
if figure is not None:
for i in range(4):
for j in range(4):
p = i * 4 + j
if p in figure.image():
pygame.draw.rect(screen, colors[figure.color], [
x + zoom * (j + figure.x),
y + zoom * (i + figure.y),
zoom, zoom])
score_pic = font.render(str(score), True, RED)
screen.blit(score_pic, (25, 25))
if game_over:
screen.blit(text, (100,(height*zoom+y)//2))
pygame.display.flip()
clock.tick(fps)
pygame.quit()
| [
"[email protected]"
] | |
3a559961d460a100177dfb05a88dd44d35beacac | 17993dcca87d490bc9841437309f309a5592ab38 | /Codes/support_vector_machine/lib/svm_smo.py | 0e73fa2184ef4d2fcb86116b01efa95cec3c0255 | [] | no_license | dreamlikexin/machine_learning | bc86ea15ef8552ad1be78a5bc65fb74a2cdb274e | 850e87025270847210b6ad188d2da181983a72c7 | refs/heads/master | 2022-01-16T09:51:20.538340 | 2019-06-19T16:27:26 | 2019-06-19T16:27:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,937 | py | import numpy as np
class SVM:
def get_H(self, Lambda, i, j, y):
if y[i]==y[j]:
return Lambda[i] + Lambda[j]
else:
return float("inf")
def get_L(self, Lambda, i, j, y):
if y[i]==y[j]:
return 0.0
else:
return max(0, Lambda[j] - Lambda[i])
def smo(self, X, y, K, N):
m, n = X.shape
Lambda = np.zeros((m,1))
epsilon = 1e-6
for t in range(N):
for i in range(m):
for j in range(m):
D_ij = 2 * K[i][j] - K[i][i] - K[j][j]
if abs(D_ij) < epsilon:
continue
E_i = K[:, i].dot(Lambda * y) - y[i]
E_j = K[:, j].dot(Lambda * y) - y[j]
delta_j = 1.0 * y[j] * (E_j - E_i) / D_ij
H_ij = self.get_H(Lambda, i, j, y)
L_ij = self.get_L(Lambda, i, j, y)
if Lambda[j] + delta_j > H_ij:
delta_j = H_ij - Lambda[j]
Lambda[j] = H_ij
elif Lambda[j] + delta_j < L_ij:
delta_j = L_ij - Lambda[j]
Lambda[j] = L_ij
else:
Lambda[j] += delta_j
delta_i = - y[i] * y[j] * delta_j
Lambda[i] += delta_i
if Lambda[i] > epsilon:
b = y[i] - K[:, i].dot(Lambda * y)
elif Lambda[j] > epsilon:
b = y[j] - K[:, j].dot(Lambda * y)
self.Lambda = Lambda
self.b = b
def fit(self, X, y, N = 10):
K = X.dot(X.T)
self.smo(X, y, K, N)
self.w = X.T.dot(self.Lambda * y)
def predict(self, X):
return np.sign(X.dot(self.w) + self.b)
| [
"[email protected]"
] | |
5dc3bef4afd928a08151a42b378ae0cc9051a420 | a8062308fb3bf6c8952257504a50c3e97d801294 | /problems/N539_Minimum_Time_Difference.py | 606f46d1a86602b05472d086e54f47d8e7fe6dbb | [] | no_license | wan-catherine/Leetcode | 650d697a873ad23c0b64d08ad525bf9fcdb62b1b | 238995bd23c8a6c40c6035890e94baa2473d4bbc | refs/heads/master | 2023-09-01T00:56:27.677230 | 2023-08-31T00:49:31 | 2023-08-31T00:49:31 | 143,770,000 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 533 | py | class Solution(object):
def findMinDifference(self, timePoints):
"""
:type timePoints: List[str]
:rtype: int
"""
if not timePoints:
return
times = []
for point in timePoints:
times.append(int(point[:2])*60 + int(point[3:]))
times.sort()
res = 24*60
length = len(times)
for i in range(1, length):
res = min(res, times[i] - times[i-1])
res = min(res, times[0] + 24*60 - times[-1])
return res | [
"[email protected]"
] | |
91e4afe9a69b64b1252d3f879b3ec018d529bbad | 5dd190725aaaeb7287d935b3c99c20480b208816 | /object_detection/utils/np_mask_ops_test.py | a0ee46eff01f165f5ae94346b8a1b7fe2636149c | [
"MIT"
] | permissive | DemonDamon/mask-detection-based-on-tf2odapi | 32d947164fb54395b9e45368c0d4bcf3a6ea1c28 | 192ae544169c1230c21141c033800aa1bd94e9b6 | refs/heads/main | 2023-05-13T05:05:44.534885 | 2021-06-08T05:56:09 | 2021-06-08T05:56:09 | 369,463,131 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,719 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.np_mask_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.utils import np_mask_ops
class MaskOpsTests(tf.test.TestCase):
def setUp(self):
masks1_0 = np.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0]],
dtype=np.uint8)
masks1_1 = np.array([[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]],
dtype=np.uint8)
masks1 = np.stack([masks1_0, masks1_1])
masks2_0 = np.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0]],
dtype=np.uint8)
masks2_1 = np.array([[1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]],
dtype=np.uint8)
masks2_2 = np.array([[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0]],
dtype=np.uint8)
masks2 = np.stack([masks2_0, masks2_1, masks2_2])
self.masks1 = masks1
self.masks2 = masks2
def testArea(self):
areas = np_mask_ops.area(self.masks1)
expected_areas = np.array([8.0, 10.0], dtype=np.float32)
self.assertAllClose(expected_areas, areas)
def testIntersection(self):
intersection = np_mask_ops.intersection(self.masks1, self.masks2)
expected_intersection = np.array(
[[8.0, 0.0, 8.0], [0.0, 9.0, 7.0]], dtype=np.float32)
self.assertAllClose(intersection, expected_intersection)
def testIOU(self):
iou = np_mask_ops.iou(self.masks1, self.masks2)
expected_iou = np.array(
[[1.0, 0.0, 8.0/25.0], [0.0, 9.0 / 16.0, 7.0 / 28.0]], dtype=np.float32)
self.assertAllClose(iou, expected_iou)
def testIOA(self):
ioa21 = np_mask_ops.ioa(self.masks1, self.masks2)
expected_ioa21 = np.array([[1.0, 0.0, 8.0/25.0],
[0.0, 9.0/15.0, 7.0/25.0]],
dtype=np.float32)
self.assertAllClose(ioa21, expected_ioa21)
if __name__ == '__main__':
tf.test.main()
| [
"[email protected]"
] | |
e831296b955c4901d82f0f076e71adc6e4910c97 | 43dec81f8466acb925a9c4830fe274de9cd1f51e | /backend/home/migrations/0004_auto_20201124_1916.py | 31248f325cd782e66c1f5f5fd515ded3f3aaa5f0 | [] | no_license | crowdbotics-apps/lizz-11-10-mob2-22484 | 6cd2af76c92fb2f2c9047b70017b9e0c5adfbcdb | 61ab9254234534b8384ec2b64451cf5ba7d587de | refs/heads/master | 2023-02-03T02:07:07.406375 | 2020-12-22T18:53:04 | 2020-12-22T18:53:04 | 311,748,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | # Generated by Django 2.2.17 on 2020-11-24 19:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0003_homepage_hello'),
]
operations = [
migrations.RemoveField(
model_name='homepage',
name='body',
),
migrations.AddField(
model_name='homepage',
name='body2',
field=models.TextField(blank=True),
),
]
| [
"[email protected]"
] | |
bf29aac5c207544133fb68b24af753d74d7b9796 | a3746020cf091f433beb41bde1b62818b4de569b | /new_rule/ticket-rules/oracle/SQL_TO_CHANGE_TYPE.py | 1109b2501f3b147a116f67fc69a8723297cb7134 | [] | no_license | kk71/sqlaudit | 59bab5765a67f56f1dd2f3103812051c5acbbc49 | 747aaa02573a9c2b46a9e14415d27c0ab8e6158c | refs/heads/master | 2023-02-04T18:38:46.125746 | 2020-06-05T09:49:46 | 2020-06-05T09:49:46 | 323,559,338 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | import re
def code(rule, entries, **kwargs):
sql_plan_qs = kwargs["sql_plan_qs"]
plans = sql_plan_qs.filter(
filter_predicates=re.compile(r"(SYS_OP|TO_NUMBER|INTERNAL_FUNCTION)", re.I)
)
for x in plans:
return -rule.weight, [
x.statement_id,
x.plan_id,
x.object_name,
x.the_id,
x.cost
]
return None, []
code_hole.append(code)
| [
"[email protected]"
] | |
d039f7c165307c3cab9557169d0d0820f5754329 | cb4e07b2a5dd30804ce428ec84d9e9f77709fcd5 | /swea/D3/10570. 제곱 팰린드롬 수.py | 9345f0450146e6efeba20eb3d88dd2e9b24a99b9 | [] | no_license | jbsam2/algo_problem | 141c17003e88a69afdeea93a723e7f27c4626fdc | 18f2cab5a9af2dec57b7fd6f8218badd7de822e4 | refs/heads/master | 2023-05-18T10:03:00.408300 | 2021-06-02T10:36:50 | 2021-06-02T10:36:50 | 282,104,637 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | def c(num):return 1 if num==int(num**(0.5))**2 else 0
for t in range(int(input())):
a,b=map(int,input().split());ans=0
for i in range(a,b+1):
p=int(i**(0.5))
if c(i) and str(i)==str(i)[::-1] and str(p)==str(p)[::-1]:ans+=1
print(f'#{t+1}',ans) | [
"[email protected]"
] | |
5634ebaa358971f4de28704f86ff95ab91d76915 | 22b93005b05aa4cbfa6287c42e07244b9bf83be9 | /mlflow/ml_package_versions.py | b5be01dffc43abf48dbe7e9ab8fc1f6dee71ebf8 | [
"Apache-2.0"
] | permissive | dbczumar/mlflow | 63ede1f21966def17ded0da9c8e92a207b34b90d | e293a73b510c924cbca50b6337b6d6f9fd9f8f1b | refs/heads/master | 2023-08-31T23:40:55.475707 | 2023-07-15T04:22:18 | 2023-07-15T04:22:18 | 138,797,518 | 1 | 3 | Apache-2.0 | 2023-08-23T23:01:08 | 2018-06-26T21:51:19 | Python | UTF-8 | Python | false | false | 5,987 | py | # This file was auto-generated by update_ml_package_versions.py.
# Please do not edit it manually.
_ML_PACKAGE_VERSIONS = {
"sklearn": {
"package_info": {
"pip_release": "scikit-learn"
},
"models": {
"minimum": "0.22.1",
"maximum": "1.3.0"
},
"autologging": {
"minimum": "0.22.1",
"maximum": "1.3.0"
}
},
"pytorch": {
"package_info": {
"pip_release": "torch"
},
"models": {
"minimum": "1.6.0",
"maximum": "2.0.1"
},
"autologging": {
"minimum": "1.6.0",
"maximum": "2.0.1"
}
},
"pytorch-lightning": {
"package_info": {
"pip_release": "pytorch-lightning"
},
"autologging": {
"minimum": "1.0.5",
"maximum": "2.0.5"
}
},
"tensorflow": {
"package_info": {
"pip_release": "tensorflow"
},
"models": {
"minimum": "2.3.0",
"maximum": "2.13.0"
},
"autologging": {
"minimum": "2.3.0",
"maximum": "2.13.0"
}
},
"xgboost": {
"package_info": {
"pip_release": "xgboost"
},
"models": {
"minimum": "1.1.1",
"maximum": "1.7.6"
},
"autologging": {
"minimum": "1.1.1",
"maximum": "1.7.6"
}
},
"lightgbm": {
"package_info": {
"pip_release": "lightgbm"
},
"models": {
"minimum": "2.3.1",
"maximum": "4.0.0"
},
"autologging": {
"minimum": "2.3.1",
"maximum": "4.0.0"
}
},
"catboost": {
"package_info": {
"pip_release": "catboost"
},
"models": {
"minimum": "0.23.1",
"maximum": "1.2"
}
},
"gluon": {
"package_info": {
"pip_release": "mxnet"
},
"models": {
"minimum": "1.5.1",
"maximum": "1.9.1"
},
"autologging": {
"minimum": "1.5.1",
"maximum": "1.9.1"
}
},
"fastai": {
"package_info": {
"pip_release": "fastai"
},
"models": {
"minimum": "2.4.1",
"maximum": "2.7.12"
},
"autologging": {
"minimum": "2.4.1",
"maximum": "2.7.12"
}
},
"onnx": {
"package_info": {
"pip_release": "onnx"
},
"models": {
"minimum": "1.7.0",
"maximum": "1.14.0"
}
},
"spacy": {
"package_info": {
"pip_release": "spacy"
},
"models": {
"minimum": "2.2.4",
"maximum": "3.6.0"
}
},
"statsmodels": {
"package_info": {
"pip_release": "statsmodels"
},
"models": {
"minimum": "0.11.1",
"maximum": "0.14.0"
},
"autologging": {
"minimum": "0.11.1",
"maximum": "0.14.0"
}
},
"spark": {
"package_info": {
"pip_release": "pyspark"
},
"models": {
"minimum": "3.0.0",
"maximum": "3.4.1"
},
"autologging": {
"minimum": "3.0.0",
"maximum": "3.4.1"
}
},
"mleap": {
"package_info": {
"pip_release": "mleap"
},
"models": {
"minimum": "0.18.0",
"maximum": "0.23.0"
}
},
"prophet": {
"package_info": {
"pip_release": "prophet"
},
"models": {
"minimum": "1.0.1",
"maximum": "1.1.4"
}
},
"pmdarima": {
"package_info": {
"pip_release": "pmdarima"
},
"models": {
"minimum": "1.8.0",
"maximum": "2.0.3"
}
},
"diviner": {
"package_info": {
"pip_release": "diviner"
},
"models": {
"minimum": "0.1.0",
"maximum": "0.1.1"
}
},
"h2o": {
"package_info": {
"pip_release": "h2o"
},
"models": {
"minimum": "3.40.0.1",
"maximum": "3.42.0.1"
}
},
"shap": {
"package_info": {
"pip_release": "shap"
},
"models": {
"minimum": "0.41.0",
"maximum": "0.42.0"
}
},
"paddle": {
"package_info": {
"pip_release": "paddlepaddle"
},
"models": {
"minimum": "2.4.1",
"maximum": "2.5.0"
}
},
"transformers": {
"package_info": {
"pip_release": "transformers"
},
"models": {
"minimum": "4.25.1",
"maximum": "4.30.2"
},
"autologging": {
"minimum": "4.25.1",
"maximum": "4.30.2"
}
},
"openai": {
"package_info": {
"pip_release": "openai"
},
"models": {
"minimum": "0.27.2",
"maximum": "0.27.8"
}
},
"langchain": {
"package_info": {
"pip_release": "langchain"
},
"models": {
"minimum": "0.0.169",
"maximum": "0.0.232"
}
},
"sentence_transformers": {
"package_info": {
"pip_release": "sentence-transformers"
},
"models": {
"minimum": "2.2.2",
"maximum": "2.2.2"
}
},
"johnsnowlabs": {
"package_info": {
"pip_release": "johnsnowlabs"
},
"models": {
"minimum": "4.4.6",
"maximum": "5.0.0"
}
}
}
| [
"[email protected]"
] | |
615af758d32d2c1268ec81eea7298e8b8de7ef55 | 576cc83449e10fd3f98281970c46016ea7a5aea2 | /demo01/demo04-resize.py | 7fe6931b6b3219fd8402aa1089ff29d446980c6a | [] | no_license | HotView/PycharmProjects | 215ab9edd341e3293daebcf86d97537f8cd28d75 | 61393fe5ba781a8c1216a5cbe7e0d06149a10190 | refs/heads/master | 2020-06-02T07:41:53.608742 | 2019-11-13T08:31:57 | 2019-11-13T08:31:57 | 191,085,178 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | from matplotlib import pyplot as plt
import numpy as np
import cv2
img = cv2.imread("book01-.jpg")
img_cut = img[1000:,:]
img_resize = cv2.resize(img_cut,(600,600),interpolation=cv2.INTER_CUBIC)
#cv2.imwrite("book01resize-.jpg",img_resize)
cv2.imshow("origin",img)
cv2.imshow("reszie",img_resize)
cv2.waitKey()
cv2.destroyAllWindows() | [
"[email protected]"
] | |
2df9bb0f75e0aa888664ef8141109604c4fb80ce | 2f557f60fc609c03fbb42badf2c4f41ef2e60227 | /DQMServices/Components/python/test/test_good_online_run_cfg.py | b7f1fc42e7ebfd7330bb6887869aa39da91b28bf | [
"Apache-2.0"
] | permissive | CMS-TMTT/cmssw | 91d70fc40a7110832a2ceb2dc08c15b5a299bd3b | 80cb3a25c0d63594fe6455b837f7c3cbe3cf42d7 | refs/heads/TMTT_1060 | 2020-03-24T07:49:39.440996 | 2020-03-04T17:21:36 | 2020-03-04T17:21:36 | 142,576,342 | 3 | 5 | Apache-2.0 | 2019-12-05T21:16:34 | 2018-07-27T12:48:13 | C++ | UTF-8 | Python | false | false | 470 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("TestDQMFileSaver")
process.load("DQMServices.Components.test.test_good_online_basic_cfi")
process.load("DQMServices.Components.test.MessageLogger_cfi")
process.load("DQMServices.Components.DQMEnvironment_cfi")
process.load("DQMServices.Core.DQM_cfg")
process.dqmmodules = cms.Path(process.dqmEnv+process.dqmSaver)
process.dqmSaver.convention = 'Online'
process.dqmEnv.subSystemFolder = 'TestSystem'
| [
"[email protected]"
] | |
9137f27ec4c8f050a41a19a14e938c78fb1cd0e9 | 3dd43ff0dab514a39f611487ab421256b3b5b13b | /scripts/client/gui/Scaleform/daapi/view/lobby/hangar/carousels/ranked/carousel_data_provider.py | 048d8fce65aedbf46febbd1c5c6f3b312913163c | [] | no_license | kusaku/wotscripts | 04ab289e3fec134e290355ecf81cf703af189f72 | a89c2f825d3c7dade7bc5163a6c04e7f5bab587d | refs/heads/master | 2023-08-20T00:17:36.852522 | 2018-02-26T14:53:44 | 2018-02-26T14:53:44 | 80,610,354 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,222 | py | # Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/hangar/carousels/ranked/carousel_data_provider.py
from gui.Scaleform.daapi.view.lobby.hangar.carousels.basic.carousel_data_provider import HangarCarouselDataProvider
from gui.Scaleform.locale.RANKED_BATTLES import RANKED_BATTLES
from gui.shared.formatters import text_styles
from gui.shared.gui_items.Vehicle import Vehicle
from gui.shared.utils.functions import makeTooltip
class RankedCarouselDataProvider(HangarCarouselDataProvider):
@classmethod
def _vehicleComparisonKey(cls, vehicle):
result = [vehicle.getCustomState() == Vehicle.VEHICLE_STATE.UNSUITABLE_TO_QUEUE]
result.extend(super(RankedCarouselDataProvider, cls)._vehicleComparisonKey(vehicle))
return result
def _buildVehicle(self, vehicle):
result = super(RankedCarouselDataProvider, self)._buildVehicle(vehicle)
state, _ = vehicle.getState()
if state == Vehicle.VEHICLE_STATE.UNSUITABLE_TO_QUEUE:
result['lockedTooltip'] = makeTooltip(RANKED_BATTLES.RANKEDBATTLESCAROUSEL_LOCKEDTOOLTIP_HEADER, RANKED_BATTLES.RANKEDBATTLESCAROUSEL_LOCKEDTOOLTIP_BODY)
result['clickEnabled'] = True
return result | [
"[email protected]"
] | |
a0adb96467c96dad6dd4c36cc41e36636bc0d50b | b2625b1a1ef4a3a255ae88b6d77c425727187eeb | /.dev_scripts/github/update_copyright.py | 74320198598ed850edad4f2404605c54f1b4e17f | [
"Apache-2.0"
] | permissive | wojiazaiyugang/mmpose | acd4083d142c5c4c2dd87e6be94a5891a42d2797 | 8947b39294b037e8272c6cf2f53ae4aa7d22193b | refs/heads/master | 2023-09-01T23:45:43.857657 | 2021-11-23T03:03:02 | 2021-11-23T03:03:02 | 356,105,054 | 0 | 0 | Apache-2.0 | 2021-09-16T06:36:44 | 2021-04-09T02:08:50 | Python | UTF-8 | Python | false | false | 2,928 | py | #!/usr/bin/env python
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import re
import sys
HEADER = 'Copyright (c) OpenMMLab. All rights reserved.\n'
HEADER_KEYWORDS = {'Copyright', 'License'}
def contains_header(lines, comment_symbol, max_header_lines):
for line in lines[:max_header_lines]:
if line.startswith('#!'):
# skip shebang line
continue
elif re.match(f'{comment_symbol}.*({"|".join(HEADER_KEYWORDS)})',
line):
return True
return False
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'files',
type=str,
nargs='*',
help='Files to add copyright header. If an empty list is given, '
'search target files according to "--src", "--exclude" and '
'"--suffixes"')
parser.add_argument(
'--src', type=str, default=None, help='Root path to search files.')
parser.add_argument(
'--exclude', type=str, default=None, help='Path to exclude in search.')
parser.add_argument(
'--suffixes',
type=str,
nargs='+',
default=['.py', '.c', '.cpp', '.cu', '.sh'],
help='Only files with one of the given suffixes will be searched.')
parser.add_argument(
'--max-header-lines',
type=int,
default=5,
help='Only checkout copyright information in the first several lines '
'of a file.')
args = parser.parse_args()
return args
def main():
args = parse_args()
file_list = []
if args.files:
file_list = args.files
else:
assert args.src is not None
for root, _, files in os.walk(args.src):
if args.exclude and osp.realpath(root).startswith(
osp.realpath(args.exclude)):
continue
for file in files:
if osp.splitext(file)[1] in args.suffixes:
file_list.append(osp.join(root, file))
modified = False
for file in file_list:
suffix = osp.splitext(file)[1]
if suffix in {'.py', '.sh'}:
comment_symbol = '# '
elif suffix in {'.c', '.cpp', '.cu'}:
comment_symbol = '// '
else:
raise ValueError(f'Comment symbol of files with suffix {suffix} '
'is unspecified.')
with open(file, 'r') as f:
lines = f.readlines()
if not contains_header(lines, comment_symbol, args.max_header_lines):
if lines and lines[0].startswith('#!'):
lines.insert(1, comment_symbol + HEADER)
else:
lines.insert(0, comment_symbol + HEADER)
with open(file, 'w') as f:
f.writelines(lines)
modified = True
return int(modified)
if __name__ == '__main__':
sys.exit(main())
| [
"[email protected]"
] | |
3dc4fcb72c535bd5635233808c4b20178c15fe20 | c2849586a8f376cf96fcbdc1c7e5bce6522398ca | /ch39/interfacetracer.py | 033591fd7329b9bc42e2527b180411b9735c1e96 | [] | no_license | freebz/Learning-Python | 0559d7691517b4acb0228d1cc76de3e93915fb27 | 7f577edb6249f4bbcac4f590908b385192dbf308 | refs/heads/master | 2020-09-23T01:48:24.009383 | 2019-12-02T12:26:40 | 2019-12-02T12:26:40 | 225,371,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,033 | py | def Tracer(aClass): # @ 데코레이션할 때
class Wrapper:
def __init__(self, *args, **kargs): # 인스턴스 생성할 때
self.fetches = 0
self.wrapped = aClass(*args, **kargs) # 유효 범위 이름 사용
def __getattr__(self, attrname):
print('Trace: ' + attrname) # 자신의 속성을 제외한 모든 것을 잡아냄
self.fetches += 1
return getattr(self.wrapped, attrname) # 내장 객체에 위임
return Wrapper
if __name__ == '__main__':
@Tracer
class Spam: # Spam = Tracer(Spam)
def display(self): # Spam은 Wrapper에 재결합
print('Spam!' * 8)
@Tracer
class Person: # Person = Tracer(Person)
def __init__(self, name, hours, rate): # Wrapper는 Person을 기억
self.name = name
self.hours = hours
self.rate = rate
def pay(self): # 클래스 외부에서의 접근을 추적
return self.hours * self.rate # 메서드 내부 접근은 추적되지 않음
food = Spam() # Wrapper() 실행
food.display() # __getattr__ 실행
print([food.fetches])
bob = Person('Bob', 40, 50) # bob이 실제로 Wrapper임
print(bob.name) # Wrapper는 Person을 내장함
print(bob.pay())
print('')
sue = Person('Sue', rate=100, hours=60) # sue는 다른 Wrapper임
print(sue.name) # sue는 다른 Person을 가짐
print(sue.pay())
print(bob.name) # bob은 다른 상태를 가짐
print(bob.pay())
print([bob.fetches, sue.fetches]) # Wrapper 속성은 추적되지 않음
| [
"[email protected]"
] | |
2b5dca7db0f58ee69e6318f2c079d184c1943643 | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/python2.7/UserList.py | 873357b193bd393a87bea69e9ef416b43aad7303 | [] | no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 68 | py | /home/action/.parts/packages/python2/2.7.6/lib/python2.7/UserList.py | [
"[email protected]"
] | |
76884be84b73cffc74cbf5157ec534df610216e9 | 6a5ce7d885db1baa5a9d43b26f0ae623a5ef0f01 | /azure-mgmt-web/azure/mgmt/web/models/web_site_management_client_enums.py | fcb642b79aa76c81394fb900d1b60c22ede03d1c | [
"Apache-2.0"
] | permissive | JammyBrand82/azure-sdk-for-python | 333af194ff9143ec77f49203a5a71f15c399f278 | c65e189cd41bd3464556b17bfcdee1303867996c | refs/heads/master | 2021-01-17T18:31:10.661151 | 2016-03-17T21:03:08 | 2016-03-17T21:03:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,118 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class DomainStatus(Enum):
active = "Active"
awaiting = "Awaiting"
cancelled = "Cancelled"
confiscated = "Confiscated"
disabled = "Disabled"
excluded = "Excluded"
expired = "Expired"
failed = "Failed"
held = "Held"
locked = "Locked"
parked = "Parked"
pending = "Pending"
reserved = "Reserved"
reverted = "Reverted"
suspended = "Suspended"
transferred = "Transferred"
unknown = "Unknown"
unlocked = "Unlocked"
unparked = "Unparked"
updated = "Updated"
json_converter_failed = "JsonConverterFailed"
class ProvisioningState(Enum):
succeeded = "Succeeded"
failed = "Failed"
canceled = "Canceled"
in_progress = "InProgress"
deleting = "Deleting"
class AzureResourceType(Enum):
website = "Website"
traffic_manager = "TrafficManager"
class CustomHostNameDnsRecordType(Enum):
cname = "CName"
a = "A"
class HostNameType(Enum):
verified = "Verified"
managed = "Managed"
class StatusOptions(Enum):
ready = "Ready"
pending = "Pending"
class UsageState(Enum):
normal = "Normal"
exceeded = "Exceeded"
class SiteAvailabilityState(Enum):
normal = "Normal"
limited = "Limited"
disaster_recovery_mode = "DisasterRecoveryMode"
class SslState(Enum):
disabled = "Disabled"
sni_enabled = "SniEnabled"
ip_based_enabled = "IpBasedEnabled"
class DatabaseServerType(Enum):
my_sql = "MySql"
sql_server = "SQLServer"
sql_azure = "SQLAzure"
custom = "Custom"
class ManagedPipelineMode(Enum):
integrated = "Integrated"
classic = "Classic"
class SiteLoadBalancing(Enum):
weighted_round_robin = "WeightedRoundRobin"
least_requests = "LeastRequests"
least_response_time = "LeastResponseTime"
weighted_total_traffic = "WeightedTotalTraffic"
request_hash = "RequestHash"
class AutoHealActionType(Enum):
recycle = "Recycle"
log_event = "LogEvent"
custom_action = "CustomAction"
class UnauthenticatedClientAction(Enum):
redirect_to_login_page = "RedirectToLoginPage"
allow_anonymous = "AllowAnonymous"
class BuiltInAuthenticationProvider(Enum):
azure_active_directory = "AzureActiveDirectory"
facebook = "Facebook"
google = "Google"
microsoft_account = "MicrosoftAccount"
twitter = "Twitter"
class HostingEnvironmentStatus(Enum):
preparing = "Preparing"
ready = "Ready"
scaling = "Scaling"
deleting = "Deleting"
class InternalLoadBalancingMode(Enum):
none = "None"
web = "Web"
publishing = "Publishing"
class ComputeModeOptions(Enum):
shared = "Shared"
dedicated = "Dedicated"
class WorkerSizeOptions(Enum):
default = "Default"
small = "Small"
medium = "Medium"
large = "Large"
class AccessControlEntryAction(Enum):
permit = "Permit"
deny = "Deny"
class ManagedHostingEnvironmentStatus(Enum):
preparing = "Preparing"
ready = "Ready"
deleting = "Deleting"
class DomainType(Enum):
regular = "Regular"
soft_deleted = "SoftDeleted"
class NotificationLevel(Enum):
critical = "Critical"
warning = "Warning"
information = "Information"
non_urgent_suggestion = "NonUrgentSuggestion"
class Channels(Enum):
notification = "Notification"
api = "Api"
email = "Email"
all = "All"
class CloneAbilityResult(Enum):
cloneable = "Cloneable"
partially_cloneable = "PartiallyCloneable"
not_cloneable = "NotCloneable"
class LogLevel(Enum):
off = "Off"
verbose = "Verbose"
information = "Information"
warning = "Warning"
error = "Error"
class FrequencyUnit(Enum):
day = "Day"
hour = "Hour"
class BackupRestoreOperationType(Enum):
default = "Default"
clone = "Clone"
relocation = "Relocation"
class BackupItemStatus(Enum):
in_progress = "InProgress"
failed = "Failed"
succeeded = "Succeeded"
timed_out = "TimedOut"
created = "Created"
skipped = "Skipped"
partially_succeeded = "PartiallySucceeded"
delete_in_progress = "DeleteInProgress"
delete_failed = "DeleteFailed"
deleted = "Deleted"
| [
"[email protected]"
] | |
8d0a0b0dec63a2130b9ada6f938cfbddbaeeb4a8 | 10920b11a22a20f9a7f63157818327f3c4e41888 | /jibby_opencv/Object Recognition/two.py | 10e258ca8fedf5c5342ee07ae449937c19975227 | [] | no_license | dsall/computerv | e331b3d025c8cec0119b789107d1fef18d08f02a | 40671d618c31ad9d9b20fc902a218a8e281098bc | refs/heads/master | 2021-09-15T09:33:08.495580 | 2018-05-29T23:41:42 | 2018-05-29T23:41:42 | 135,363,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,172 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 5 00:23:15 2018
@author: djibrilsall
"""
import numpy as np
import cv2
# multiple cascades: https://github.com/Itseez/opencv/tree/master/data/haarcascades
#https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_frontalface_default.xml
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
#https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_eye.xml
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
cap = cv2.VideoCapture(1)
while 1:
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
cv2.imshow('img',img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows() | [
"[email protected]"
] | |
f01c9f4cb6e578f40f952d27a77dbdab38e9b181 | 0b5ab7349485da4ea40ca343bc50f4cab74c917c | /week09/tutorial/snippets/urls.py | 2dcc6decba1a0c26454d9a83c0f3e37011525c86 | [] | no_license | workherd/Python006-006 | 9bf2782ccda037de9af98eb7daa87fd1edeb3caf | 7aa176c3cf4effd015802b550edfb70f859e94d9 | refs/heads/main | 2023-04-29T14:37:43.545376 | 2021-05-16T04:13:08 | 2021-05-16T04:13:08 | 323,247,475 | 1 | 0 | null | 2020-12-21T06:13:42 | 2020-12-21T06:13:42 | null | UTF-8 | Python | false | false | 2,227 | py |
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from snippets import views
# Create a router and register our viewsets with it.
router = DefaultRouter()
router.register(r'snippets', views.SnippetViewSet)
router.register(r'users', views.UserViewSet)
# The API URLs are now determined automatically by the router.
urlpatterns = [
path('', include(router.urls)),
path('aaa/', views.api_root01),
]
# from rest_framework.urlpatterns import format_suffix_patterns
# from snippets.views import SnippetViewSet, UserViewSet, api_root
# from rest_framework import renderers
# snippet_list = SnippetViewSet.as_view({
# 'get': 'list',
# 'post': 'create'
# })
# snippet_detail = SnippetViewSet.as_view({
# 'get': 'retrieve',
# 'put': 'update',
# 'patch': 'partial_update',
# 'delete': 'destroy'
# })
# snippet_highlight = SnippetViewSet.as_view({
# 'get': 'highlight'
# }, renderer_classes=[renderers.StaticHTMLRenderer])
# user_list = UserViewSet.as_view({
# 'get': 'list'
# })
# user_detail = UserViewSet.as_view({
# 'get': 'retrieve'
# })
# urlpatterns = [
# path('', views.api_root),
# path('snippets/',
# views.SnippetList.as_view(),
# name='snippet-list'),
# path('snippets/<int:pk>/',
# views.SnippetDetail.as_view(),
# name='snippet-detail'),
# path('snippets/<int:pk>/highlight/',
# views.SnippetHighlight.as_view(),
# name='snippet-highlight'),
# path('users/',
# views.UserList.as_view(),
# name='user-list'),
# path('users/<int:pk>/',
# views.UserDetail.as_view(),
# name='user-detail')
# ]
# # 能够处理诸如http://example.com/api/items/4.json之类的URL
# urlpatterns = format_suffix_patterns(urlpatterns)
# urlpatterns = format_suffix_patterns([
# path('', api_root),
# path('snippets/', snippet_list, name='snippet-list'),
# path('snippets/<int:pk>/', snippet_detail, name='snippet-detail'),
# path('snippets/<int:pk>/highlight/', snippet_highlight, name='snippet-highlight'),
# path('users/', user_list, name='user-list'),
# path('users/<int:pk>/', user_detail, name='user-detail')
# ]) | [
"[email protected]"
] | |
27686a5573582bba05fe1c037ddb797cb55f040b | 3af6960c805e9903eb27c09d8bc7ebc77f5928fe | /problems/0216_Combination_Sum_III/solution.py | e332183e542f99042b9b4fe0e2ddce1af912459b | [] | no_license | romain-li/leetcode | b3c8d9d4473eebd039af16ad2d4d99abc2768bdd | 5e82b69bd041c2c168d75cb9179a8cbd7bf0173e | refs/heads/master | 2020-06-04T20:05:03.592558 | 2015-06-08T18:05:03 | 2015-06-08T18:05:03 | 27,431,664 | 2 | 1 | null | 2015-06-08T18:05:04 | 2014-12-02T12:31:58 | Python | UTF-8 | Python | false | false | 139 | py | class Solution:
# @param {integer} k
# @param {integer} n
# @return {integer[][]}
def combinationSum3(self, k, n):
| [
"[email protected]"
] | |
0ef3a3afbb0a5ad1f607d1f4fa56b8207a2c978d | 857a9e588a04b40a66b6ca115063cb67ef0427ea | /timemachines/skaters/glu/glusimple.py | 45f7a1894d2d9f43387ba47e6408b68c40771bb8 | [
"MIT"
] | permissive | rambam613/timemachines | 81b88357498871f77efed0faf9c25b4c408d822c | cd243d4606b4ad9c1d419988fc6c04b0964af2e6 | refs/heads/main | 2023-07-03T07:06:24.421114 | 2021-08-07T17:42:40 | 2021-08-07T17:42:40 | 393,793,785 | 1 | 0 | MIT | 2021-08-07T21:13:35 | 2021-08-07T21:13:34 | null | UTF-8 | Python | false | false | 1,092 | py | from timemachines.skatertools.utilities.conventions import Y_TYPE, A_TYPE, R_TYPE, E_TYPE, T_TYPE, wrap
from typing import Any
from timemachines.skatertools.components.parade import parade
from timemachines.skatertools.utilities.nonemath import nonecast
from timemachines.skatertools.ensembling.ensemblefactory import precision_weighted_ensemble_factory
def glu_simple(y :Y_TYPE, s, k:int, a:A_TYPE =None, t:T_TYPE =None, e:E_TYPE =None, r:R_TYPE=None):
""" Rolling gluon
"""
assert r is not None
y0 = wrap(y)[0]
if not s.get('p'):
s = {'p':{},
'x':y0,
'rho':r}
assert 0 <= s['rho'] <= 1, 'Expecting rho=r to be between 0 and 1'
else:
assert abs(r-s['rho'])<1e-6,'rho=r is immutable'
if y0 is None:
return None, s, None
else:
s['x'] = s['rho']*s['x'] + (1-s['rho'])*y0 # Make me better !
x = [s['x']]*k
_we_ignore_bias, x_std, s['p'] = parade(p=s['p'], x=x, y=y0)
x_std_fallback = nonecast(x_std,fill_value=1.0)
return [s['x']] * k, x_std_fallback, s
| [
"[email protected]"
] | |
4d4fa4d95d6b0e2d819c40e250b7941f2d805da8 | 7ac506e9890ff3295541bdd00bc3c40a13b23826 | /sr/imsitu_scorer.py | 122bbe01b33b7def418dc9ba864a9d0f3ba9df79 | [] | no_license | thilinicooray/CARN | dd64cde17e5c9f0e68e94b340c9271ae2a79f58a | 7f5e85c98d3178a3fe9b9f1b5a93a39ace01ccc5 | refs/heads/master | 2020-07-25T05:42:38.988848 | 2020-04-07T03:43:01 | 2020-04-07T03:43:01 | 208,182,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,433 | py | import torch
import json
class imsitu_scorer():
def __init__(self, encoder,topk, nref, write_to_file=False):
self.score_cards = []
self.topk = topk
self.nref = nref
self.encoder = encoder
self.hico_pred = None
self.hico_target = None
self.write_to_file = write_to_file
if self.write_to_file:
self.role_dict = {}
self.value_all_dict = {}
self.role_pred = {}
self.vall_all_correct = {}
self.fail_verb_role = {}
self.all_verb_role = {}
self.fail_agent = {}
self.pass_list = []
self.all_res = {}
self.correct_roles = {}
self.topk_issue = {}
def clear(self):
self.score_cards = {}
def add_point_noun(self, gt_verbs, labels_predict, gt_labels):
batch_size = gt_verbs.size()[0]
for i in range(batch_size):
gt_verb = gt_verbs[i]
label_pred = labels_predict[i]
gt_label = gt_labels[i]
gt_v = gt_verb
role_set = self.encoder.get_role_ids(gt_v)
new_card = {"verb":0.0, "value":0.0, "value*":0.0, "n_value":0.0, "value-all":0.0, "value-all*":0.0}
score_card = new_card
verb_found = False
gt_role_count = self.encoder.get_role_count(gt_v)
gt_role_list = self.encoder.verb2_role_dict[self.encoder.verb_list[gt_v]]
score_card["n_value"] += gt_role_count
all_found = True
pred_list = []
for k in range(gt_role_count):
label_id = torch.max(label_pred[k],0)[1]
pred_list.append(label_id.item())
found = False
for r in range(0,self.nref):
gt_label_id = gt_label[r][k]
if label_id == gt_label_id:
found = True
break
if not found: all_found = False
#both verb and at least one val found
if found and verb_found: score_card["value"] += 1
#at least one val found
if found: score_card["value*"] += 1
#both verb and all values found
score_card["value*"] /= gt_role_count
score_card["value"] /= gt_role_count
if all_found and verb_found: score_card["value-all"] += 1
#all values found
if all_found: score_card["value-all*"] += 1
self.score_cards.append(new_card)
def add_point_noun_single_role(self, labels_predict, gt_labels):
batch_size = gt_labels.size()[0]
for i in range(batch_size):
label_pred = labels_predict[i]
gt_label = gt_labels[i]
new_card = {"verb":0.0, "value":0.0, "value*":0.0, "n_value":0.0, "value-all":0.0, "value-all*":0.0}
score_card = new_card
verb_found = False
score_card["n_value"] += 1
all_found = True
label_id = torch.max(label_pred,0)[1]
found = False
for r in range(0,self.nref):
gt_label_id = gt_label[r]
if label_id == gt_label_id:
found = True
break
if not found: all_found = False
#both verb and at least one val found
if found and verb_found: score_card["value"] += 1
#at least one val found
if found: score_card["value*"] += 1
score_card["value*"] /= 1
score_card["value"] /= 1
if all_found and verb_found: score_card["value-all"] += 1
#all values found
if all_found: score_card["value-all*"] += 1
self.score_cards.append(new_card)
def add_point_noun_log(self, img_id, gt_verbs, labels_predict, gt_labels):
batch_size = gt_verbs.size()[0]
for i in range(batch_size):
imgid = img_id[i]
gt_verb = gt_verbs[i]
label_pred = labels_predict[i]
gt_label = gt_labels[i]
gt_v = gt_verb
new_card = {"verb":0.0, "value":0.0, "value*":0.0, "n_value":0.0, "value-all":0.0, "value-all*":0.0}
score_card = new_card
verb_found = False
gt_role_count = self.encoder.get_role_count(gt_v)
gt_role_list = self.encoder.verb2_role_dict[self.encoder.verb_list[gt_v]]
score_card["n_value"] += gt_role_count
if self.write_to_file:
self.all_res[imgid] = {'gtv': self.encoder.verb_list[gt_v], 'role_pred':{}, 'all_correct': True}
all_found = True
pred_situ = []
for k in range(0, gt_role_count):
if self.write_to_file:
all_val = self.encoder.verb_list[gt_v] + '_' + gt_role_list[k]
if all_val not in self.all_verb_role:
self.all_verb_role[all_val] = 1
else:
self.all_verb_role[all_val] += 1
label_id = torch.max(label_pred[k],0)[1]
found = False
pred_situ.append({gt_role_list[k] : self.encoder.all_words[self.encoder.labelid2nlword[self.encoder.label_list[label_id]]]})
if self.write_to_file:
self.all_res[imgid]['role_pred'][gt_role_list[k]] = {'pred' : self.encoder.all_words[self.encoder.labelid2nlword[self.encoder.label_list[label_id]]],'is_correct':False}
for r in range(0,self.nref):
gt_label_id = gt_label[r][k]
#################################
if self.write_to_file: # logginf predictions for analysis
role = gt_role_list[k]
gt_label_name = self.encoder.label_list[gt_label_id]
pred_label_name = self.encoder.label_list[label_id]
if role not in self.role_dict:
self.role_dict[role] = {gt_label_name : [pred_label_name]}
elif gt_label_name not in self.role_dict[role]:
self.role_dict[role][gt_label_name] = [pred_label_name]
else:
self.role_dict[role][gt_label_name].append(pred_label_name)
#######################################################################
if label_id == gt_label_id:
if self.write_to_file:
self.all_res[imgid]['role_pred'][gt_role_list[k]]['is_correct'] = True
found = True
break
if not found:
all_found = False
if self.write_to_file:
self.all_res[imgid]['all_correct'] = False
fail_val = self.encoder.verb_list[gt_v] + '_' + gt_role_list[k]
if fail_val not in self.fail_verb_role:
self.fail_verb_role[fail_val] = 1
else:
self.fail_verb_role[fail_val] += 1
#both verb and at least one val found
if found and verb_found: score_card["value"] += 1
#at least one val found
if found: score_card["value*"] += 1
#both verb and all values found
score_card["value*"] /= gt_role_count
score_card["value"] /= gt_role_count
if all_found and verb_found: score_card["value-all"] += 1
#all values found
if all_found:
score_card["value-all*"] += 1
if self.write_to_file:
self.vall_all_correct[imgid] = pred_situ
else:
if self.write_to_file:
self.value_all_dict[imgid] = pred_situ
self.score_cards.append(new_card)
def add_point_noun_log_topk(self, img_id, gt_verbs, labels_predict, gt_labels):
batch_size = gt_verbs.size()[0]
for i in range(batch_size):
imgid = img_id[i]
gt_verb = gt_verbs[i]
label_pred = labels_predict[i]
gt_label = gt_labels[i]
gt_v = gt_verb
new_card = {"verb":0.0, "value":0.0, "value*":0.0, "n_value":0.0, "value-all":0.0, "value-all*":0.0}
score_card = new_card
verb_found = False
gt_role_count = self.encoder.get_role_count(gt_v)
gt_role_list = self.encoder.verb2_role_dict[self.encoder.verb_list[gt_v]]
score_card["n_value"] += gt_role_count
if self.write_to_file:
self.all_res[imgid] = {'gtv': self.encoder.verb_list[gt_v], 'role_pred':[], 'all_correct': True}
all_found = True
pred_situ = []
for k in range(0, gt_role_count):
if self.write_to_file:
all_val = self.encoder.verb_list[gt_v] + '_' + gt_role_list[k]
if all_val not in self.all_verb_role:
self.all_verb_role[all_val] = 1
else:
self.all_verb_role[all_val] += 1
#label_id = torch.max(label_pred[k],0)[1]
sorted_idx = torch.sort(label_pred[k], 0, True)[1]
found = False
for r in range(0,self.nref):
gt_label_id = gt_label[r][k]
role_found = (torch.sum(sorted_idx[0:5] == gt_label_id) == 1)
if role_found:
found = True
break
if not found:
all_found = False
#both verb and at least one val found
if found and verb_found: score_card["value"] += 1
#at least one val found
if found: score_card["value*"] += 1
#both verb and all values found
score_card["value*"] /= gt_role_count
score_card["value"] /= gt_role_count
if all_found and verb_found: score_card["value-all"] += 1
#all values found
if all_found:
score_card["value-all*"] += 1
if self.write_to_file:
self.vall_all_correct[imgid] = pred_situ
else:
if self.write_to_file:
self.value_all_dict[imgid] = pred_situ
self.score_cards.append(new_card)
def add_point_verb_only_eval(self, img_id, verb_predict, gt_verbs):
#encoded predictions should be batch x verbs x values #assumes the are the same order as the references
#encoded reference should be batch x 1+ references*roles,values (sorted)
batch_size = verb_predict.size()[0]
for i in range(batch_size):
verb_pred = verb_predict[i]
gt_verb = gt_verbs[i]
current_id = img_id[i]
#print('check sizes:', verb_pred.size(), gt_verb.size(), label_pred.size(), gt_label.size())
sorted_idx = torch.sort(verb_pred, 0, True)[1]
gt_v = gt_verb
new_card = {"verb":0.0, "value":0.0, "value*":0.0, "n_value":0.0, "value-all":0.0, "value-all*":0.0}
if self.write_to_file:
self.all_res[current_id] = {'gtv': self.encoder.verb_list[gt_verb.item()],
'predicted' : self.encoder.verb_list[sorted_idx[0]]}
score_card = new_card
verb_found = (torch.sum(sorted_idx[0:self.topk] == gt_v) == 1)
if verb_found:
score_card["verb"] += 1
if self.write_to_file:
self.pass_list.append(current_id)
self.all_res[current_id]['found'] = 0
self.score_cards.append(score_card)
def add_point_both(self, verb_predict, gt_verbs, labels_predict, gt_labels):
batch_size = gt_verbs.size()[0]
for i in range(batch_size):
verb_pred = verb_predict[i]
gt_verb = gt_verbs[i]
label_pred = labels_predict[i]
gt_label = gt_labels[i]
gt_v = gt_verb
role_set = self.encoder.get_role_ids(gt_v)
new_card = {"verb":0.0, "value":0.0, "value*":0.0, "n_value":0.0, "value-all":0.0, "value-all*":0.0}
score_card = new_card
sorted_idx = torch.sort(verb_pred, 0, True)[1]
verb_found = (torch.sum(sorted_idx[0:self.topk] == gt_v) == 1)
if verb_found:
score_card["verb"] += 1
#verb_found = False
gt_role_count = self.encoder.get_role_count(gt_v)
gt_role_list = self.encoder.verb2_role_dict[self.encoder.verb_list[gt_v]]
score_card["n_value"] += gt_role_count
all_found = True
pred_list = []
for k in range(gt_role_count):
label_id = torch.max(label_pred[k],0)[1]
pred_list.append(label_id.item())
found = False
for r in range(0,self.nref):
gt_label_id = gt_label[r][k]
if label_id == gt_label_id:
found = True
break
if not found: all_found = False
#both verb and at least one val found
if found and verb_found: score_card["value"] += 1
#at least one val found
if found: score_card["value*"] += 1
#both verb and all values found
score_card["value*"] /= gt_role_count
score_card["value"] /= gt_role_count
if all_found and verb_found: score_card["value-all"] += 1
#all values found
if all_found: score_card["value-all*"] += 1
self.score_cards.append(new_card)
def add_point_eval5_log_sorted(self, img_id, verb_predict, gt_verbs, labels_predict, gt_labels):
#encoded predictions should be batch x verbs x values #assumes the are the same order as the references
#encoded reference should be batch x 1+ references*roles,values (sorted)
batch_size = verb_predict.size()[0]
for i in range(batch_size):
current_id = img_id[i]
verb_pred = verb_predict[i]
gt_verb = gt_verbs[i]
label_pred = labels_predict[i]
gt_label = gt_labels[i]
sorted_idx = verb_pred
gt_v = gt_verb
new_card = {"verb":0.0, "value":0.0, "value*":0.0, "n_value":0.0, "value-all":0.0, "value-all*":0.0}
if self.write_to_file:
self.all_res[current_id] = {'gtv': gt_verb.item(),'found':-1, 'verbs':sorted_idx[:5].tolist(),
'pred_role_labels':[]}
score_card = new_card
verb_found = (torch.sum(sorted_idx[0:self.topk] == gt_v) == 1)
if verb_found:
score_card["verb"] += 1
if self.write_to_file:
self.all_res[current_id]['found'] = 0
if verb_found and self.topk == 5:
gt_idx = 0
for cur_idx in range(0,self.topk):
if sorted_idx[cur_idx] == gt_v:
gt_idx = cur_idx
break
label_pred = label_pred[self.encoder.max_role_count*gt_idx : self.encoder.max_role_count*(gt_idx+1)]
else:
label_pred = label_pred[:self.encoder.max_role_count]
gt_role_count = self.encoder.get_role_count(gt_v)
gt_role_list = self.encoder.verb2_role_dict[self.encoder.verb_list[gt_v]]
score_card["n_value"] += gt_role_count
all_found = True
for k in range(0, gt_role_count):
label_id = label_pred[k]
found = False
for r in range(0,self.nref):
gt_label_id = gt_label[r][k]
if label_id == gt_label_id:
found = True
break
if not found: all_found = False
#both verb and at least one val found
if found and verb_found: score_card["value"] += 1
#at least one val found
if found: score_card["value*"] += 1
#both verb and all values found
score_card["value*"] /= gt_role_count
score_card["value"] /= gt_role_count
if all_found and verb_found: score_card["value-all"] += 1
#all values found
if all_found: score_card["value-all*"] += 1
self.score_cards.append(score_card)
def get_average_results(self):
#average across score cards for the entire frame.
rv = {"verb":0, "value":0 , "value*":0 , "value-all":0, "value-all*":0}
total_len = len(self.score_cards)
for card in self.score_cards:
rv["verb"] += card["verb"]
rv["value-all"] += card["value-all"]
rv["value"] += card["value"]
rv["verb"] /= total_len
rv["value-all"] /= total_len
#rv["value-all*"] /= total_len
rv["value"] /= total_len
#rv["value*"] /= total_len
return rv
def get_average_results_both(self):
#average across score cards for the entire frame.
rv = {"verb":0, "value":0 , "value*":0 , "value-all":0, "value-all*":0}
total_len = len(self.score_cards)
for card in self.score_cards:
rv["verb"] += card["verb"]
rv["value-all*"] += card["value-all*"]
rv["value*"] += card["value*"]
rv["verb"] /= total_len
rv["value-all*"] /= total_len
rv["value*"] /= total_len
return rv
def get_average_results_nouns(self, groups = []):
#average across score cards for nouns only.
rv = {"verb":0, "value":0 , "value*":0 , "value-all":0, "value-all*":0}
total_len = len(self.score_cards)
for card in self.score_cards:
rv["value-all*"] += card["value-all*"]
rv["value*"] += card["value*"]
rv["value-all*"] /= total_len
rv["value*"] /= total_len
return rv | [
"[email protected]"
] | |
4febe8ed7ba4ae0ec44e162ddac26a0a35201331 | e73761fd861010f4dd2e2be09507d86bd905f4f5 | /scud/main/migrations/0002_sessionstouser.py | 78e5450bb8e12f64aa092230e4fccdbb5aac8446 | [] | no_license | BakdauletBolatE/rfid-system | a57ca2fbb3518f9df6683bf899be1d9455e55c2a | 24f023cc801dc0d24dedb4e7ecd27091c439c068 | refs/heads/main | 2023-03-13T05:14:37.043832 | 2021-02-22T06:12:31 | 2021-02-22T06:12:31 | 341,097,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 825 | py | # Generated by Django 3.1.6 on 2021-02-18 17:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='SessionsToUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Member_ID', models.CharField(max_length=255, verbose_name='Мембер ID')),
('allowed_members', models.BooleanField(default=False, verbose_name='Авторизован ли')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='main.users')),
],
),
]
| [
"[email protected]"
] | |
de41905ee008a0a8004b2f583f9b16d0ab569823 | 95e9ec4b3b0d86063da53a0e62e138cf794cce3a | /python/Django/20190523/test01/test01/settings.py | 199c6f60e7fba053de9340c0fb1a68759f7ad78a | [] | no_license | wjl626nice/1902 | c3d350d91925a01628c9402cbceb32ebf812e43c | 5a1a6dd59cdd903563389fa7c73a283e8657d731 | refs/heads/master | 2023-01-05T23:51:47.667675 | 2019-08-19T06:42:09 | 2019-08-19T06:42:09 | 180,686,044 | 4 | 1 | null | 2023-01-04T07:35:24 | 2019-04-11T00:46:43 | Python | UTF-8 | Python | false | false | 4,057 | py | """
Django settings for test01 project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
# 项目根目录
# __file__ 文件的路径
# os.path.abspath 获取文件的绝对路径
# os.path.dirname 获取文件所在的目录
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# BASE_DIR = /Users/qingyun/1902/python/Django/20190523/test01
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'wv+64-c5u)4iyx)v3jl*ix&3j=gu1+*&)djfido(7^nus$l21n'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# 路由 uri 和函数对应的模块
ROOT_URLCONF = 'test01.urls'
# 设置模板配置
TEMPLATES = [
{
# 模板引擎,jinja2
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# 配置模板的位置 os.path.join 可以让咱们写的程序跨平台,不同操作系统目录分隔符不一样。
'DIRS': [os.path.join(BASE_DIR, 'templates')]
# window 目录:C:\\abc\\ab\\aa
# linux 目录:/abc/ab/aa
# os.path.join(BASE_DIR, 'templates') 结果:/Users/qingyun/1902/python/Django/20190523/test01/templates
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'test01.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
# 数据库的配置
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
# 设置中国的时区 PRC 中华人民共和国
TIME_ZONE = 'PRC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
# css js image等静态文件保存的路径 别名。相当于STATICFILES_DIRS的别名
STATIC_URL = '/static/'
# Django项目中所有的css js image 都会从该配置目录中查找对应文件
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'statics')
]
| [
"[email protected]"
] | |
a4f1eb5a73bb211daab14e6aac02273d7a07b9c7 | 093b9569be9d1c4e5daf92efbebc38f680917b2d | /.history/base/models_20210829142734.py | 4c98bfd01c1d244a8078fbe1666e967e01960773 | [] | no_license | Justin-Panagos/todoList | 95b1e97ff71af1b0be58e7f8937d726a687cea4d | 10539219b59fcea00f8b19a406db3d4c3f4d289e | refs/heads/master | 2023-08-04T13:27:13.309769 | 2021-08-29T14:06:43 | 2021-08-29T14:06:43 | 400,827,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 692 | py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Task(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, null = True, blank=True)
STATUS= [(,'P1'),(1,'P2'),(2,'P3'),]
priority = models.CharField(max_length=5, choices=STATUS,default='',)
title = models.Te(max_length=200)
description = models.TextField(null=True, blank=True)
duedate = models.DateField(null=True, blank=True)
complete = models.BooleanField(default=False)
create =models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.title
class Meta:
ordering =['complete'] | [
"[email protected]"
] | |
aca6239bb1902abcf69c2fa4ab44b62d104cc3ee | 78f43f8bd07ae0fc91738a63cd7bbca08ae26066 | /leetcode/interval/least_interval.py | b5418240bed39df4bf2a1b61baecf70e18b8cf95 | [] | no_license | hanrick2000/LeetcodePy | 2f3a841f696005e8f0bf4cd33fe586f97173731f | b24fb0e7403606127d26f91ff86ddf8d2b071318 | refs/heads/master | 2022-04-14T01:34:05.044542 | 2020-04-12T06:11:29 | 2020-04-12T06:11:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 745 | py | class Solution(object):
def leastInterval(self, tasks, n):
"""
:type tasks: List[str]
:type n: int
:rtype: int
"""
LETTER_NUM = 26
freqs = [0] * LETTER_NUM
for c in tasks:
freqs[ord(c) - ord('A')] += 1
freqs.sort() # in-place sort
ret = 0
while freqs[-1] > 0:
i = 0
while i <= n:
if freqs[-1] == 0:
break
# use most frequently appeared letter by turn
if i < LETTER_NUM and freqs[LETTER_NUM - i - 1] > 0:
freqs[LETTER_NUM - i - 1] -= 1
ret += 1
i += 1
freqs.sort()
return ret
| [
"[email protected]"
] | |
434f2a4e5d4c626ba4768123e191fc6823872f15 | c9f1cc3a6715917d658a6e525b7c2d35b0380f9f | /Non_canonical_introns/Analisys/Join_final_tables_seq.py | cf7f3dde6d7fa321b0e71a9c47f123d3081f1b1d | [] | no_license | geparada/my_src | 4f84887130b985e84aad3d0d35e85911087d9b4f | 8d64f7ef51e1f74303ca88beb0ee964f546d8301 | refs/heads/master | 2021-01-17T01:50:50.414690 | 2017-03-14T10:01:50 | 2017-03-14T10:01:50 | 20,638,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,188 | py | import sys
import csv
def main(hg19, SJ_hg19, hg19_reads_seq_tags, GM12878, SJ_GM12878_paternal, SJ_GM12878_maternal, GM12878_reads_seq_tags_paternal, GM12878_reads_seq_tags_maternal, TOTAL_final_table):
csv.field_size_limit(1000000000)
reader1 = csv.reader(open(hg19), delimiter = ' ')
reader2 = csv.reader(open(SJ_hg19), delimiter = ' ')
reader3 = csv.reader(open(hg19_reads_seq_tags), delimiter = ' ')
reader4 = csv.reader(open(GM12878), delimiter = ' ')
reader5 = csv.reader(open(SJ_GM12878_paternal), delimiter = ' ')
reader6 = csv.reader(open(SJ_GM12878_maternal), delimiter = ' ')
reader7 = csv.reader(open(GM12878_reads_seq_tags_paternal), delimiter = ' ')
reader8 = csv.reader(open(GM12878_reads_seq_tags_maternal), delimiter = ' ')
reader9 = csv.reader(open(TOTAL_final_table), delimiter = ' ')
hg19_intron_reads = {}
GM12878_intron_reads = {}
reads_seq = {}
for row in reader1:
intron = row[0]
read = row[10].split(",")[:5]
hg19_intron_reads[intron] = read
for row in reader2:
read = row[0]
dn = row[7]
seq = row[14]
if dn!="GTAG" and dn!="GCAG" and dn!="ATAC":
reads_seq[read] = seq
for row in reader3:
read = row[0]
seq = row[1]
reads_seq[read] = seq
for row in reader4:
intron = row[0]
read = row[10].split(",")[:5]
GM12878_intron_reads[intron] = read
for row in reader5:
read = row[0]
dn = row[7]
seq = row[14]
if dn!="GTAG" and dn!="GCAG" and dn!="ATAC":
reads_seq[read] = seq
for row in reader6:
read = row[0]
dn = row[7]
seq = row[14]
if dn!="GTAG" and dn!="GCAG" and dn!="ATAC":
reads_seq[read] = seq
for row in reader7:
read = row[0]
seq = row[1]
reads_seq[read] = seq
for row in reader8:
read = row[0]
seq = row[1]
reads_seq[read] = seq
for row in reader9:
intron = row[0]
dn = row[6]
hg19 = int(row[9])
GM12878 = int(row[10])
if dn!="GTAG" and dn!="GCAG" and dn!="ATAC":
seqs_hg19 = []
seqs_GM12878 = []
try:
reads_hg19 = hg19_intron_reads[intron]
for read in reads_hg19:
seq = reads_seq[read]
seqs_hg19.append(seq)
except KeyError:
pass
try:
reads_GM12878 = GM12878_intron_reads[intron]
for read in reads_GM12878:
seq = reads_seq[read]
seqs_GM12878.append(seq)
except KeyError:
pass
if seqs_hg19 == []:
seqs_hg19 = "0"
if seqs_GM12878 == []:
seqs_GM12878 = "0"
print " ".join(row), ",".join(seqs_hg19), ",".join(seqs_GM12878)
#python ~/my_src/Analisys/Join_final_tables_seq.py ../hg19/ALL/introns.final_table.hg19.fixed.tags ../hg19/ALL/SJ.introns.blat1.TOTAL tags/hg19/TOTAL.tags.filter.final ../GM12878/NA12878_Joel_Rozowsky/STRANDED/TOTAL/introns.final_table.hg19.fixed.tags ../GM12878/NA12878_Joel_Rozowsky/STRANDED/TOTAL_paternal/ALL/SJ.introns.blat1.TOTAL ../GM12878/NA12878_Joel_Rozowsky/STRANDED/TOTAL_maternal/ALL/SJ.introns.blat1.TOTAL tags/GM12878/paternal/TOTAL.tags.filter.final tags/GM12878/maternal/TOTAL.tags.filter.final TOTAL_introns.final_table.tags
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6], sys.argv[7], sys.argv[8], sys.argv[9])
| [
"geparada@omics.(none)"
] | geparada@omics.(none) |
3fdea1ed28d82a773de42cb6859f5ce3fa0ceefd | aab2f6f5f673bf16424d592142ba3af414423adb | /kafkaconsumer.py | 5b972353177ac3b7eef951403c753ffb888eaab2 | [] | no_license | ashishjsharda/KafkaUsingPython | 75f46ba4df25f264e853615c8fde73ed59aa620e | cbd9aff94b9d896736e14befcb42dfa64efc1562 | refs/heads/master | 2020-12-19T11:07:08.891269 | 2020-01-23T03:22:39 | 2020-01-23T03:22:39 | 235,715,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | '''
Created on Jan 22, 2020
@author: ashish
'''
from kafka import KafkaConsumer
consumer=KafkaConsumer('sample')
for message in consumer:
print(message)
| [
"[email protected]"
] | |
0d7c3cc6491bf482131fbbb00556b7368044d75d | 391d648132c1a05e7da575205eef89a7208a892a | /compare.py | 311165b27e102d591a5f36b47d2bd374e0a1c43b | [] | no_license | michaelbateman/DemographicInference | c3ceaf69f8b554f3973473607d6b5201cca423f9 | b1e2529b1ce0710f82d2867d08588ae4a6c72bb2 | refs/heads/master | 2021-01-10T06:28:43.698581 | 2015-10-06T18:25:45 | 2015-10-06T18:25:45 | 43,770,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,089 | py | import sys
import matplotlib.pyplot as plt
import numpy as np
import subprocess
import time
import pylab
from scipy import stats
num_haps = int(sys.argv[1])
num_times = int(sys.argv[2])
def readfire(s): # reads the output of a fire file and
# returns the time vector and the population vector
# input file should have two columns: first column is time in generations
# second column is population
time = []
pop = []
with open(s, 'r') as input_file:
throwaway = input_file.readline()
while throwaway.strip() != 'START HERE':
throwaway = input_file.readline()
for line in input_file:
temp = line.strip()
L = temp.split()
if 'START' in temp:
print 'START'
time = []
pop = []
elif 'f' in temp:
print temp
elif len(L) >= 2:
#print 'no'
#print temp
temp = line.strip()
a,b = temp.split()
time.append(float(a))
pop.append(float(b))
#with open(s, 'r') as input_file:
#throwaway = input_file.readline()
#while throwaway.strip() != 'START HERE':
#throwaway = input_file.readline()
#for line in input_file:
#print 'hello'
#temp = line.strip()
#a,b = temp.split()
#time.append(float(a))
#pop.append(float(b))
#print a, b
print 'readfire is done'
return [time, pop]
pop_list = ['GBR', 'CEU','YRI', 'FIN', 'PEL', 'ESN']
IC_list = ['FIN']
for pop in pop_list:
for start in IC_list:
root = pop + '.' + str(num_haps/2) + '.composite.max.binned'
t = root + '.fire'
print pop, 'starting from flat 50000 ',
[T, P] = readfire(t)
#print t
#t = pop + '.fire'
ic_file = start + '.ic.txt'
t = root +'.'+ ic_file + '.fire'
print pop, 'starting from', start
[T, P] = readfire(t)
#plt.plot(np.multiply(28,T) ,P, '-o', label = pop )
#plt.yscale('log')
#plt.xlabel('years')
#plt.title('re-started from ' + start +' curve')
#fig = plt.figure()
#fig.set_yscale('log')
#plt.legend(loc = 'lower right')
#title = 'ic.' + start + '.' +str(num_haps/2)
#for pop in pop_list:
#title+= '.' + pop
#pylab.savefig(title + '.png', bbox_inches='tight')
#plt.show()
| [
"[email protected]"
] | |
c8ef2e7798953aa2231f6ae172d6891809e33e43 | c155d27bf74255b8315603518c5ab76d0638dfea | /uv/serpens/profiles/3sigma_cn_3.py | f6f8c1ebbcce36349089a536d0cded0167ff74b4 | [] | no_license | amirocha/doktorat | 406c2e4476a5a22c863c37eb5f581a369800e936 | 22c90228f6bca9d0b116c73457b7e86ae4462167 | refs/heads/master | 2021-07-05T14:18:26.324886 | 2020-08-03T19:28:50 | 2020-08-03T19:28:50 | 152,316,686 | 0 | 0 | null | 2020-04-03T20:22:37 | 2018-10-09T20:28:29 | Python | UTF-8 | Python | false | false | 7,857 | py | #-*-coding: utf-8-*-
'''
DESCRIPTION: This script displays the averaged and resampled spectrum (0.5 km/s)
DESCRIPTION: for a given region on map and plots 3*RMS and 1*RMS levels
DESCRIPTION: and shows X ranges for flux calculation
The averaged region is consistent with HCN 1-0 beam size after convolution (27.8"),
because it's the biggest for this molecule. We used the same beam size for other molecules.
'''
#!/usr/bin/python3.5
# name the output file
psname = 'smm8_cn.eps'
# import packages
from numpy import *
from pylab import *
import matplotlib.pyplot as plt
from matplotlib import *
import pandas as pd
# ------------------------------------------------
# ------------------------------------------------
# find the x ranges (in km/s), which are above 3RMS
# level - for flux integration of line
rms = 0.122 # rms taken from CLASS
rms_3 = 3*rms
rms_2 = 2*rms
name = 'serpens_cn10_smm8.txt'
# read the spectrum
spec_df = pd.read_table(name, delim_whitespace=True, header=None)
### 3 SIGMA ### 3 SIGMA ### 3 SIGMA ### 3 SIGMA ### 3 SIGMA ###
# left (x1) and right (x2) ranges in which we are looking for minima
x1_ran_df = spec_df[(spec_df[0] > -20) & (spec_df[0] < -15)] #change ranges!!
x2_ran_df = spec_df[(spec_df[0] > -15) & (spec_df[0] < -10)]
#change ranges!!
# SERPENS HCN10: -5.0 - -0.0 and 10. - 20.
# for both X ranges take the column with flux and calculate abs(yi - 3rms)
y1_i_rms_3 = (x1_ran_df[1]-rms_3).abs()
y2_i_rms_3 = (x2_ran_df[1]-rms_3).abs()
# join two dataframes, reset and drop old index
# then change the names of column indexes from 011 to 123
final1_df = pd.concat([x1_ran_df, y1_i_rms_3], axis = 1).reset_index(drop=True)
final1_df.columns = [1,2,3]
final2_df = pd.concat([x2_ran_df, y2_i_rms_3], axis = 1).reset_index(drop=True)
final2_df.columns = [1,2,3]
# find the index of item which contains row with the minimum
min1 = final1_df[3].idxmin(axis=1, skipna=True)
min2 = final2_df[3].idxmin(axis=1, skipna=True)
# print the x value of minimum (in km/s)
print ('X1 (3s) =', final1_df[1].ix[min1].round(1))
print ('X2 (3s) =', final2_df[1].ix[min2].round(1))
# ------------------------------------------------
# ------------------------------------------------
### 1 SIGMA ### 1 SIGMA ### 1 SIGMA ### 1 SIGMA ### 1 SIGMA ###
# left (x3) and right (x4) ranges in which we are looking for minima
x3_ran_df = spec_df[(spec_df[0] > 5.0) & (spec_df[0] < 8.0)] #change ranges!!
x4_ran_df = spec_df[(spec_df[0] > 8.0) & (spec_df[0] < 12.1)]
#change ranges!!
# NGC1333 HCN10: -30.0 - -20.0 and -5. - -1.
# for both X ranges take the column with flux and calculate abs(yi - 3rms)
y3_i_rms = (x3_ran_df[1]-rms_2).abs()
y4_i_rms = (x4_ran_df[1]-rms_2).abs()
# join two dataframes, reset and drop old index
# then change the names of column indexes from 011 to 123
final3_df = pd.concat([x3_ran_df, y3_i_rms], axis = 1).reset_index(drop=True)
final3_df.columns = [1,2,3]
final4_df = pd.concat([x4_ran_df, y4_i_rms], axis = 1).reset_index(drop=True)
final4_df.columns = [1,2,3]
# find the index of item which contains row with the minimum
min3 = final3_df[3].idxmin(axis=1, skipna=True)
min4 = final4_df[3].idxmin(axis=1, skipna=True)
# print the x value of minimum (in km/s)
print ('X3 (2s) =', final3_df[1].ix[min3].round(1))
print ('X4 (2s) =', final4_df[1].ix[min4].round(1))
# ------------------------------------------------
# ------------------------------------------------
# ------------------------------------------------
# NOW PLOT THE SPEC WITH 3*RMS LEVEL AND X RANGES #
# ------------------------------------------------
fig = plt.figure(figsize = (9,7), dpi = 400)
#plt.rcParams["font.family"] = "Times New Roman"
rc('font', **{'family':'serif', 'serif':['Times New Roman']})
params = {'backend': 'pdf',
#'axes.labelsize': 12,
#'text.fontsize': 12,
#'legend.fontsize': 12,
#'xtick.labelsize': 7,
#'ytick.labelsize': 7,
# The comm. below determines whether you use LaTeX
# for all text in matplotlib (you probably don't want
# to turn this on, but may)
'text.usetex': False,
# four comm. below (math) determines what is used for math rendering
'mathtext.rm': 'serif',
'mathtext.it': 'serif:italic',
'mathtext.bf': 'serif:bold',
'mathtext.fontset': 'custom',
#'figure.figsize': fig_size,
'axes.unicode_minus': True}
matplotlib.rcParams.update(params)
""" READ INPUT DATA
########## SERPENS, HCN 1-0, center of ave.: 163.5 -142.7, range: 149.6 177.4 -156.6 -128.8 ##########
"""
v_hcn10, Tmb_hcn10 = loadtxt(name, usecols=(0, 1), unpack=True, skiprows=1)
ax = fig.add_subplot(111)
"""
CREATE A PLOT
"""
ax.set_xlabel(r'$\mathrm{V_{LSR}\;[km/s]}$', fontsize = 9)
ax.set_ylabel(r'$\mathrm{T_{MB}\;[K]}$', fontsize = 9)
# major x ticks every 20, minor ticks every 10
# major y ticks every 1, minor ticks every 0.5
major_ticks_x = np.arange(-80, 50, 5)
minor_ticks_x = np.arange(-80, 50, 1)
major_ticks_y = np.arange(0.0, 1.2, 0.2)
minor_ticks_y = np.arange(0.0, 1.2, 0.1)
ax.set_xticks(major_ticks_x)
ax.set_xticks(minor_ticks_x, minor=True)
ax.set_yticks(major_ticks_y)
ax.set_yticks(minor_ticks_y, minor=True)
# Set the tick labels font
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
# label.set_fontname('Arial')
label.set_fontsize(7)
"""
########## SERPENS, HCN 1-0, center of ave.: 163.5 -142.7, range: 149.6 177.4 -156.6 -128.8 ##########
"""
ax.plot(v_hcn10, Tmb_hcn10, color = 'black', linewidth=1.0, linestyle = '-')
plt.axhline(y=rms_3, xmin = -60.0, xmax = 40.0, color = 'red', linewidth=1.5, linestyle = '-')
plt.axhline(y=rms, xmin = -60.0, xmax = 40.0, color = 'green', linewidth=1.5, linestyle = '-')
# THE ANNOTATIONS ON A GRAPH
#---------------------------
# alpha - transparency, fc - a color of inner part of arrow, ec - a color of an edge of arrow
# headwidth - the size of arrow, frac - a lenght of the head of arrow
# shrink - fraction of total length to ‘shrink’ from both ends
#ax.annotate(r'$\mathrm{RMS\;=%.5f\;K;3*RMS\;=%.3f\;K}$'%(rms,rms_3), fontsize=10, xy=(-38.0, 1.13), textcoords='data')
#ax.annotate(r'$\mathrm{set\;window\;-30\;40}$', fontsize=10, xy=(-38.0, 1.1), textcoords='data')
#ax.annotate(r'$\mathrm{X_{1}\;(3s)\;=%.1f \;km/s}$'%(final1_df[1].ix[min1].round(1)), fontsize=10, xy=(-38.0, 1.07), textcoords='data')
#ax.annotate(r'$\mathrm{X_{2}\;(3s)\;=%.1f \;km/s}$'%(final2_df[1].ix[min2].round(1)), fontsize=10, xy=(-38.0, 1.04), textcoords='data')
#ax.annotate(r'$\mathrm{X_{3}\;(1s)\;=%.1f \;km/s}$'%(final3_df[1].ix[min3].round(1)), fontsize=10, xy=(-38.0, 1.01), textcoords='data')
#ax.annotate(r'$\mathrm{X_{4}\;(1s)\;=%.1f \;km/s}$'%(final4_df[1].ix[min4].round(1)), fontsize=10, xy=(-38.0, 0.98), textcoords='data')
# plot the vertical lines for x = min1 and x = min2
plt.axvline(x=final1_df[1].ix[min1].round(1), color='red', linestyle='--')
plt.axvline(x=final2_df[1].ix[min2].round(1), color='red', linestyle='--')
# plot the vertical lines for x = min3 and x = min4
plt.axvline(x=final3_df[1].ix[min3].round(1), color='green', linestyle='--')
plt.axvline(x=final4_df[1].ix[min4].round(1), color='green', linestyle='--')
# the upper and lower axis limits on a LEFT GRAPH
ax.set_xlim([-80.0, 30.0])
ax.set_ylim([-0.1, 1.2])
# close and save file
savefig(psname, format = 'eps', bbox_inches = 'tight')
clf()
| [
"[email protected]"
] | |
31012c36efe1adf3e32c6e9600220d6f672511ec | 027f52cbbd4e9ccd52b73dcf9ed523137ec78815 | /python_language/Day_Base_Code/Day_07/lambda_function_2nd.py | b0b5df2ea3a317f5bc58267771c1524dd860305c | [] | no_license | Jade2290/bigdata_class | 0c851440852857ee44496b7112db580cf9b60d57 | 380ad58d56ea4fbcea81f78f9648b1edf27e0554 | refs/heads/master | 2022-04-11T05:05:25.429853 | 2020-03-27T14:30:36 | 2020-03-27T14:30:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,789 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 14 11:48:50 2019
@author: sundooedu
"""
list1=list(range(0,10))
list2=list(filter(lambda x:x%2==0,list1))
list2
list3=list(filter(lambda x:x%2==0 and x%3==0,list1))
def factorial(s):
factor=1
for i in list(range(1,s+1)):
factor *= i
return factor
factorial(5)
list4=list(filter(lambda x: x*3 if x%3==0, list1))
from functools import reduce
factorial=reduce(lambda x,y:x*y,lst5)
lst5=list(range(9,0,-1))
def factorial(x,y):
return x*y
reduce(factorial,lst5)
#%%
bool(0) # '0'만 False 나머지 숫자들은 전부 True (음수도 True)
#%%
bool([]) # list가 비어있으면 False
#%%
bool('') # 문자가 비어있으면 False
#%%
# Q.01
def is_odd(number):
if number%2==0:
answer="짝수"
else:
answer="홀수"
print(answer)
def is_odds(number):
answer=(lambda x : "짝수" if (x%2==0) else "홀수")(number)
print(answer)
is_odds(5)
#%%
# Q.02
def input_avg(*number):
a=(lambda x: x+=i for i in number)
input_avg(input('숫자를 입력하세요 : '))
#%%
# Q.03
input1 = int(input("첫번째 숫자를 입력하세요:"))
input2 = int(input("두번째 숫자를 입력하세요:"))
total = input1 + input2
print("두 수의 합은 %s 입니다" % total)
#%%
# Q.04
#print("you" "need" "python")
#print("you"+"need"+"python")
print("you", "need", "python")
#print("".join(["you", "need", "python"]))
#%%
# Q.05
with open("test.txt", 'w') as f1:
f1.write("Life is too short")
with open("test.txt", 'r') as f2:
line= f2.read()
print(line)
#%%
# Q.06
from datetime import datetime
def writeDiary():
fstr='%Y%m%d'
today_str=datetime.strftime(datetime.now(),fstr)
file_name= today_str +'.diary'
lst=[]
#total_line='' #입력된 모든줄
while True:
line = input('내용을 입력하세요:')
if(line == '!quit'):break
#total_line += line + "\n"
lst.append(line+'\n')
with open(file_name,'w',encoding='utf-8') as f:
#f.write(total_line)
f.writelines(lst)
#f.close()
print('오늘자 파일'+'('+ file_name +')'+'생성완료')
def readDiary():
filename = input('일기파일명 : ')
with open(filename,'r',encoding='utf-8') as fp:
#data=fp.read()
data=fp.readlines()
print("파일내용 : ",data)
#fp.close()
def errorDiary():
print('미존재 메뉴번호')
menu='읽기 1 쓰기 2 : '
menu_bunho=input(menu)
if menu_bunho == '1' :
readDiary()
elif menu_bunho == '2' :
writeDiary()
else:
errorDiary()
| [
"[email protected]"
] | |
fbc6323c971bfc05403d49f31975f0959e172e9e | ae8a1d8f23ed08fcc14ecc9a6651cd738790ac00 | /tests/func/test_fetchdata.py | a4f6b1013135688f78c6a4ae54adee32aeb7ddcd | [] | no_license | murakami10/crawling-naist-lecture | 438ef9a6311630178641d2534b553e6ba20a8b5b | 7e4345983f452f20e9ba03495c42b2e922cb56f7 | refs/heads/main | 2023-05-19T10:45:34.863849 | 2021-06-08T09:17:47 | 2021-06-08T09:17:47 | 367,010,980 | 0 | 0 | null | 2021-06-08T09:17:48 | 2021-05-13T10:16:51 | Python | UTF-8 | Python | false | false | 4,696 | py | import threading
import time
from http.server import HTTPServer, SimpleHTTPRequestHandler
import pytest
import requests
from src.crawling_naist_syllabus.fetch import FetchData
from src.crawling_naist_syllabus.structure import Lecture
@pytest.fixture(scope="session")
def fetch_and_save_syllabus_html(tmpdir_factory):
"""
naistのシラバスを取得し、一時ディレクトリに保存する
:return syllabus.htmlが存在するdirectoryを返す
"""
syllabus_directory = tmpdir_factory.mktemp("syllabus_directory")
response = requests.get("https://syllabus.naist.jp/subjects/preview_list")
syllabus_file = syllabus_directory.join("syllabus.html")
syllabus_file.write(response.content)
# 実際のサイトにスクレイピングするため、アクセスの間隔をあける
time.sleep(1)
response = requests.get("https://syllabus.naist.jp/subjects/preview_detail/666")
detail_file = syllabus_directory.join("detail_1.html")
detail_file.write(response.content)
return syllabus_file.dirpath()
@pytest.fixture(scope="session")
def start_http_server():
"""
現在のdirectory配下を公開する
"""
host, port = ("127.0.0.1", 8888)
url = f"http://{host}:{port}/tests/index.html"
server = HTTPServer((host, port), SimpleHTTPRequestHandler)
thred = threading.Thread(target=server.serve_forever)
thred.start()
yield url
server.shutdown()
thred.join()
@pytest.fixture(scope="session")
def start_http_server_with_specific_directory(fetch_and_save_syllabus_html):
"""
指定したdirectoryをlocalhostで公開する
:param fetch_and_save_syllabus_html 公開するdirectory
"""
class HandlerWithDirectory(SimpleHTTPRequestHandler):
def __init__(self, *args, **kwargs):
"""
指定したdirectoryを公開
"""
super().__init__(*args, directory=fetch_and_save_syllabus_html, **kwargs)
host, port = ("127.0.0.1", 8889)
server = HTTPServer((host, port), HandlerWithDirectory)
url = f"http://{host}:{port}/"
# スレッドの起動
thred = threading.Thread(target=server.serve_forever)
thred.start()
yield url
server.shutdown()
thred.join()
@pytest.fixture()
def fetch_data(start_http_server_with_specific_directory):
"""
FetchDataのインスタンスを返す
"""
fd = FetchData(start_http_server_with_specific_directory + "syllabus.html")
return fd
@pytest.mark.parametrize(
"invalid_url",
[
"http://127.0.0.1:8888/not_existed_index.html",
"httpaaaa",
],
)
def test_init_with_invalid_url(start_http_server, invalid_url):
with pytest.raises(Exception):
FetchData(invalid_url)
def test_init_with_valid_url(start_http_server):
try:
_ = FetchData(start_http_server)
except Exception:
pytest.fail("Exception raised")
general_lecture = Lecture(
name="技術と倫理",
url="http://127.0.0.1:8889/subjects/preview_detail/644",
)
introduction_lecture = Lecture(
name="情報理工学序論",
url="http://127.0.0.1:8889/subjects/preview_detail/662",
)
basic_lecture = Lecture(
name="情報科学基礎Ⅰ",
url="http://127.0.0.1:8889/subjects/preview_detail/791",
)
specialized_lecture = Lecture(
name="ソフトウェア工学",
url="http://127.0.0.1:8889/subjects/preview_detail/688",
)
@pytest.mark.parametrize(
"lecture_type, contained_data",
[
(FetchData.LECTURE_TYPE_GENERAL, general_lecture),
(FetchData.LECTURE_TYPE_INTRODUCTION, introduction_lecture),
(FetchData.LECTURE_TYPE_BASIC, basic_lecture),
(FetchData.LECTURE_TYPE_SPECIALIZED, specialized_lecture),
],
)
def test_scrape_name_and_url(fetch_data, lecture_type, contained_data):
name_and_url_list = fetch_data.scrape_name_and_url(lecture_type)
assert contained_data in name_and_url_list
def test_scrape_name_and_url_key_error(fetch_data):
with pytest.raises(KeyError):
fetch_data.scrape_name_and_url("key error")
def dummy_init(self, url):
pass
def test_scrape_detail_of_lecture(
start_http_server_with_specific_directory, monkeypatch
):
monkeypatch.setattr(FetchData, "__init__", dummy_init)
fetch_data = FetchData("url")
detail_url = start_http_server_with_specific_directory + "/detail_1.html"
lecture = Lecture(name="高性能計算基盤", url=detail_url)
lecture = fetch_data.scrape_detail(lecture)
assert 1 == lecture.details[0].number
assert "4/22 [2]" == lecture.details[0].date
assert "スーパスカラとVLIW (日本語教科書8章)" == lecture.details[0].theme
| [
"[email protected]"
] | |
809258fbebe5a4d58326b515a82977274a9a9cba | 0bcd128368e2de959ca648960ffd7944067fcf27 | /infra/bots/assets/protoc/create.py | e363cc5068230dadf10809450d4bff5a04c530b9 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | google/skia | ac6e39179cd33cf0c8a46d29c1a70bf78b4d74ee | bf6b239838d3eb56562fffd0856f4047867ae771 | refs/heads/main | 2023-08-31T21:03:04.620734 | 2023-08-31T18:24:15 | 2023-08-31T20:20:26 | 15,773,229 | 8,064 | 1,487 | BSD-3-Clause | 2023-09-11T13:42:07 | 2014-01-09T17:09:57 | C++ | UTF-8 | Python | false | false | 768 | py | #!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Create the asset."""
import argparse
import subprocess
ZIP_URL = ('https://github.com/google/protobuf/releases/download/v3.3.0/'
'protoc-3.3.0-linux-x86_64.zip')
def create_asset(target_dir):
"""Create the asset."""
local_zip = '/tmp/protoc.zip'
subprocess.check_call(['curl', '-L', ZIP_URL, '-o', local_zip])
subprocess.check_call(['unzip', local_zip, '-d', target_dir])
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--target_dir', '-t', required=True)
args = parser.parse_args()
create_asset(args.target_dir)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
154bb8fc3ec4ecff7d4664dd60a36d89d9e9c287 | 7bd5ca970fbbe4a3ed0c7dadcf43ba8681a737f3 | /atcoder/abc/abc034/d.py | 39cae32435a6c76186d919f097f04446d71aa1b3 | [] | no_license | roiti46/Contest | c0c35478cd80f675965d10b1a371e44084f9b6ee | c4b850d76796c5388d2e0d2234f90dc8acfaadfa | refs/heads/master | 2021-01-17T13:23:30.551754 | 2017-12-10T13:06:42 | 2017-12-10T13:06:42 | 27,001,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | # -*- coding: utf-8 -*-
import sys,copy,math,heapq,itertools as it,fractions,re,bisect,collections as coll
N, K = map(int, raw_input().split())
w, p = [], []
for i in xrange(N):
wi, pi = map(int, raw_input().split())
w.append(wi)
p.append(pi)
wp = [[1.0*p[i]/w[i],i] for i in xrange(N)]
wp = sorted(wp, key = lambda x: x[0], reverse = True)
W, S = 0, 0
for i in xrange(K):
W += w[i]
S += p[i] * w[i]
print 1.0*S/W
| [
"[email protected]"
] | |
874a61ad25e6f5e3fb13dfdfa02f814cebfc737b | 631847fafbcfa07bf33eee078d9b59b464ce4b50 | /optimization/second_sdEta_mjj_optimization/lumi_and_kin_plots/four_cuts_lum3000/Output/Histos/MadAnalysis5job_0/selection_8.py | 63a5075a9c405cdb0b8114324ea73fc498ae8bb1 | [
"MIT"
] | permissive | sheride/axion_pheno | 7b46aeb7cc562800d78edd9048504fdbc0f5fa42 | 7d3fc08f5ae5b17a3500eba19a2e43f87f076ce5 | refs/heads/master | 2021-07-01T08:47:59.981416 | 2021-02-03T23:03:50 | 2021-02-03T23:03:50 | 219,261,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,794 | py | def selection_8():
# Library import
import numpy
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
# Library version
matplotlib_version = matplotlib.__version__
numpy_version = numpy.__version__
# Histo binning
xBinning = numpy.linspace(0.0,1000.0,101,endpoint=True)
# Creating data sequence: middle of each bin
xData = numpy.array([5.0,15.0,25.0,35.0,45.0,55.0,65.0,75.0,85.0,95.0,105.0,115.0,125.0,135.0,145.0,155.0,165.0,175.0,185.0,195.0,205.0,215.0,225.0,235.0,245.0,255.0,265.0,275.0,285.0,295.0,305.0,315.0,325.0,335.0,345.0,355.0,365.0,375.0,385.0,395.0,405.0,415.0,425.0,435.0,445.0,455.0,465.0,475.0,485.0,495.0,505.0,515.0,525.0,535.0,545.0,555.0,565.0,575.0,585.0,595.0,605.0,615.0,625.0,635.0,645.0,655.0,665.0,675.0,685.0,695.0,705.0,715.0,725.0,735.0,745.0,755.0,765.0,775.0,785.0,795.0,805.0,815.0,825.0,835.0,845.0,855.0,865.0,875.0,885.0,895.0,905.0,915.0,925.0,935.0,945.0,955.0,965.0,975.0,985.0,995.0])
# Creating weights for histo: y9_MET_0
y9_MET_0_weights = numpy.array([57524.5413546,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y9_MET_1
y9_MET_1_weights = numpy.array([3.6450972,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y9_MET_2
y9_MET_2_weights = numpy.array([87.3487344433,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y9_MET_3
y9_MET_3_weights = numpy.array([453.367357303,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y9_MET_4
y9_MET_4_weights = numpy.array([332.76568512,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y9_MET_5
y9_MET_5_weights = numpy.array([122.98208589,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y9_MET_6
y9_MET_6_weights = numpy.array([46.7250859722,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y9_MET_7
y9_MET_7_weights = numpy.array([4.11676537943,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y9_MET_8
y9_MET_8_weights = numpy.array([0.435869195774,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y9_MET_9
y9_MET_9_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y9_MET_10
y9_MET_10_weights = numpy.array([236.977528846,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y9_MET_11
y9_MET_11_weights = numpy.array([1433.83800865,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y9_MET_12
y9_MET_12_weights = numpy.array([920.034752885,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y9_MET_13
y9_MET_13_weights = numpy.array([269.944953921,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y9_MET_14
y9_MET_14_weights = numpy.array([110.337675,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y9_MET_15
y9_MET_15_weights = numpy.array([7.8827508417,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y9_MET_16
y9_MET_16_weights = numpy.array([0.731272517308,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating a new Canvas
fig = plt.figure(figsize=(12,6),dpi=80)
frame = gridspec.GridSpec(1,1,right=0.7)
pad = fig.add_subplot(frame[0])
# Creating a new Stack
pad.hist(x=xData, bins=xBinning, weights=y9_MET_0_weights+y9_MET_1_weights+y9_MET_2_weights+y9_MET_3_weights+y9_MET_4_weights+y9_MET_5_weights+y9_MET_6_weights+y9_MET_7_weights+y9_MET_8_weights+y9_MET_9_weights+y9_MET_10_weights+y9_MET_11_weights+y9_MET_12_weights+y9_MET_13_weights+y9_MET_14_weights+y9_MET_15_weights+y9_MET_16_weights,\
label="$bg\_dip\_1600\_inf$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#e5e5e5", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y9_MET_0_weights+y9_MET_1_weights+y9_MET_2_weights+y9_MET_3_weights+y9_MET_4_weights+y9_MET_5_weights+y9_MET_6_weights+y9_MET_7_weights+y9_MET_8_weights+y9_MET_9_weights+y9_MET_10_weights+y9_MET_11_weights+y9_MET_12_weights+y9_MET_13_weights+y9_MET_14_weights+y9_MET_15_weights,\
label="$bg\_dip\_1200\_1600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#f2f2f2", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y9_MET_0_weights+y9_MET_1_weights+y9_MET_2_weights+y9_MET_3_weights+y9_MET_4_weights+y9_MET_5_weights+y9_MET_6_weights+y9_MET_7_weights+y9_MET_8_weights+y9_MET_9_weights+y9_MET_10_weights+y9_MET_11_weights+y9_MET_12_weights+y9_MET_13_weights+y9_MET_14_weights,\
label="$bg\_dip\_800\_1200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#ccc6aa", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y9_MET_0_weights+y9_MET_1_weights+y9_MET_2_weights+y9_MET_3_weights+y9_MET_4_weights+y9_MET_5_weights+y9_MET_6_weights+y9_MET_7_weights+y9_MET_8_weights+y9_MET_9_weights+y9_MET_10_weights+y9_MET_11_weights+y9_MET_12_weights+y9_MET_13_weights,\
label="$bg\_dip\_600\_800$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#ccc6aa", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y9_MET_0_weights+y9_MET_1_weights+y9_MET_2_weights+y9_MET_3_weights+y9_MET_4_weights+y9_MET_5_weights+y9_MET_6_weights+y9_MET_7_weights+y9_MET_8_weights+y9_MET_9_weights+y9_MET_10_weights+y9_MET_11_weights+y9_MET_12_weights,\
label="$bg\_dip\_400\_600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#c1bfa8", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y9_MET_0_weights+y9_MET_1_weights+y9_MET_2_weights+y9_MET_3_weights+y9_MET_4_weights+y9_MET_5_weights+y9_MET_6_weights+y9_MET_7_weights+y9_MET_8_weights+y9_MET_9_weights+y9_MET_10_weights+y9_MET_11_weights,\
label="$bg\_dip\_200\_400$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#bab5a3", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y9_MET_0_weights+y9_MET_1_weights+y9_MET_2_weights+y9_MET_3_weights+y9_MET_4_weights+y9_MET_5_weights+y9_MET_6_weights+y9_MET_7_weights+y9_MET_8_weights+y9_MET_9_weights+y9_MET_10_weights,\
label="$bg\_dip\_100\_200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#b2a596", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y9_MET_0_weights+y9_MET_1_weights+y9_MET_2_weights+y9_MET_3_weights+y9_MET_4_weights+y9_MET_5_weights+y9_MET_6_weights+y9_MET_7_weights+y9_MET_8_weights+y9_MET_9_weights,\
label="$bg\_dip\_0\_100$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#b7a39b", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y9_MET_0_weights+y9_MET_1_weights+y9_MET_2_weights+y9_MET_3_weights+y9_MET_4_weights+y9_MET_5_weights+y9_MET_6_weights+y9_MET_7_weights+y9_MET_8_weights,\
label="$bg\_vbf\_1600\_inf$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#ad998c", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y9_MET_0_weights+y9_MET_1_weights+y9_MET_2_weights+y9_MET_3_weights+y9_MET_4_weights+y9_MET_5_weights+y9_MET_6_weights+y9_MET_7_weights,\
label="$bg\_vbf\_1200\_1600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#9b8e82", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y9_MET_0_weights+y9_MET_1_weights+y9_MET_2_weights+y9_MET_3_weights+y9_MET_4_weights+y9_MET_5_weights+y9_MET_6_weights,\
label="$bg\_vbf\_800\_1200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#876656", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y9_MET_0_weights+y9_MET_1_weights+y9_MET_2_weights+y9_MET_3_weights+y9_MET_4_weights+y9_MET_5_weights,\
label="$bg\_vbf\_600\_800$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#afcec6", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y9_MET_0_weights+y9_MET_1_weights+y9_MET_2_weights+y9_MET_3_weights+y9_MET_4_weights,\
label="$bg\_vbf\_400\_600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#84c1a3", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y9_MET_0_weights+y9_MET_1_weights+y9_MET_2_weights+y9_MET_3_weights,\
label="$bg\_vbf\_200\_400$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#89a8a0", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y9_MET_0_weights+y9_MET_1_weights+y9_MET_2_weights,\
label="$bg\_vbf\_100\_200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#829e8c", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y9_MET_0_weights+y9_MET_1_weights,\
label="$bg\_vbf\_0\_100$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#adbcc6", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y9_MET_0_weights,\
label="$signal$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#7a8e99", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
# Axis
plt.rc('text',usetex=False)
plt.xlabel(r"\slash{E}_{T} ( GeV ) ",\
fontsize=16,color="black")
plt.ylabel(r"$\mathrm{Events}$ $(\mathcal{L}_{\mathrm{int}} = 3000.0\ \mathrm{fb}^{-1})$ ",\
fontsize=16,color="black")
# Boundary of y-axis
ymax=(y9_MET_0_weights+y9_MET_1_weights+y9_MET_2_weights+y9_MET_3_weights+y9_MET_4_weights+y9_MET_5_weights+y9_MET_6_weights+y9_MET_7_weights+y9_MET_8_weights+y9_MET_9_weights+y9_MET_10_weights+y9_MET_11_weights+y9_MET_12_weights+y9_MET_13_weights+y9_MET_14_weights+y9_MET_15_weights+y9_MET_16_weights).max()*1.1
#ymin=0 # linear scale
ymin=min([x for x in (y9_MET_0_weights+y9_MET_1_weights+y9_MET_2_weights+y9_MET_3_weights+y9_MET_4_weights+y9_MET_5_weights+y9_MET_6_weights+y9_MET_7_weights+y9_MET_8_weights+y9_MET_9_weights+y9_MET_10_weights+y9_MET_11_weights+y9_MET_12_weights+y9_MET_13_weights+y9_MET_14_weights+y9_MET_15_weights+y9_MET_16_weights) if x])/100. # log scale
plt.gca().set_ylim(ymin,ymax)
# Log/Linear scale for X-axis
plt.gca().set_xscale("linear")
#plt.gca().set_xscale("log",nonposx="clip")
# Log/Linear scale for Y-axis
#plt.gca().set_yscale("linear")
plt.gca().set_yscale("log",nonposy="clip")
# Legend
plt.legend(bbox_to_anchor=(1.05,1), loc=2, borderaxespad=0.)
# Saving the image
plt.savefig('../../HTML/MadAnalysis5job_0/selection_8.png')
plt.savefig('../../PDF/MadAnalysis5job_0/selection_8.png')
plt.savefig('../../DVI/MadAnalysis5job_0/selection_8.eps')
# Running!
if __name__ == '__main__':
selection_8()
| [
"[email protected]"
] | |
5293142db0238e500bc8e112bb402720c56a2c77 | 99c4d4a6592fded0e8e59652484ab226ac0bd38c | /code/batch-2/vse-naloge-brez-testov/DN5-Z-146.py | 9e28868b4bb26ff8e9f3184e25bbf744f394f63e | [] | no_license | benquick123/code-profiling | 23e9aa5aecb91753e2f1fecdc3f6d62049a990d5 | 0d496d649247776d121683d10019ec2a7cba574c | refs/heads/master | 2021-10-08T02:53:50.107036 | 2018-12-06T22:56:38 | 2018-12-06T22:56:38 | 126,011,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,661 | py | def unikati(s):
t = []
for i in s:
if i not in t:
t.append(i)
return t
def avtor(tvit):
a = tvit.split(": ")
return a[0]
def vsi_avtorji(tviti):
t = []
a = [i.split(': ')[0] for i in tviti]
for name in a:
if name not in t:
t.append(name)
return t
def izloci_besedo(beseda):
s = 0
z = 1
y = ""
x = ""
g = ""
for b in beseda:
if b.isalnum() == False:
s += 1
elif b.isalnum() == True:
break
y += beseda[s:]
for d in y[::-1]:
if d.isalnum() == False:
z += 1
elif d.isalnum() == True:
break
x += beseda[:-z]
for i in y:
if i in x:
g += i
return g
def se_zacne_z(tvit, c):
n = []
a = tvit.split(" ")
while (True):
for i in a:
if i.isalnum() == False and i[0][:1] == c:
n.append(i[1:])
for d in n:
if d.isalnum() == False:
n.append(d[:-1])
n.remove(d)
n.sort()
return n
def zberi_se_zacne_z(tviti, c):
n = []
s = []
a = [i.split(' ') for i in tviti]
for e in a:
for d in e:
if d[0] == c:
n.append(d[1:])
for k in n:
if k.isalnum() == False:
n.append(k[:-1])
n.remove(k)
for i in n:
if i not in s:
s.append(i)
return s
def vse_afne(tviti):
n = []
s = []
a = [i.split(" ") for i in tviti]
while (True):
for tvit in a:
for e in tvit:
if e[0] == "@":
n.append(e[1:])
for d in n:
if d.isalnum() == False:
n.append(d[:-1])
n.remove(d)
for i in n:
if i not in s:
s.append(i)
break
return s
def vsi_hashtagi(tviti):
a = [i.split(" ") for i in tviti]
n = []
s = []
while (True):
for tvit in a:
for e in tvit:
if e[0] == "#":
n.append(e[1:])
for d in n:
if d.isalnum() == False:
n.append(d[:-1])
n.remove(d)
for i in n:
if i not in s:
s.append(i)
break
return s
def vse_osebe(tviti):
a = vse_afne(tviti)
b = vsi_avtorji(tviti)
return sorted(unikati(a+b))
| [
"[email protected]"
] | |
7c7c490a043db3015b8dbbef12cc43020cbffd1a | 67dd5749b247915ce7a0d3d95964e30503c4aa0c | /dev/getting_indexing_right.py | 2c5e5b0945cafd7cd5a79d587531fd4bb7a72c15 | [] | no_license | yddream/timspy | d74d78825844b69ed9730373809e3f09ab52060c | 034788db83d85dfca01fa31281a6de391ea2fe23 | refs/heads/master | 2022-11-11T13:10:48.880175 | 2020-06-23T07:38:21 | 2020-06-23T07:38:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,074 | py | """How to get the data."""
%load_ext autoreload
%autoreload 2
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pathlib import Path
from timspy.timspy import TimsDIA
from timspy.plot import plot_spectrum
from timsdata import TimsData
# plt.style.use('dark_background')
pd.set_option('display.max_rows', 10)
pd.set_option('display.max_columns', 500)
# I have my data here
p = Path('/home/matteo/Projects/bruker/BrukerMIDIA/MIDIA_CE10_precursor/20190912_HeLa_Bruker_TEN_MIDIA_200ng_CE10_100ms_Slot1-9_1_488.d')
# TD = TimsData(p) # timsdata does not support :
TD = TimsDIA(p) # timsdata does not support :
update_wrapper(TD.iter, TD.iter_arrays)
TD.iter.__getitem__
?TD.iter_arrays
TD[1:10,:]
next(TD.iter[1:10, 100:500])
list(TD.iter[1:10, 100:500])
TD[1:10, 100:500]
TD[1:100, 100]
list(TD.iter[1:100, 100])
TD[1:10, 100:500].dtype
TD[[10, 20, 30], 100:500]
TD[[10, 20, 30], [40, 49]]
TD[[10, 20, 30], [41, 60]]
TD[:20, [41, 60]]
TD[11552,10]
TD[11552:,10] # exception will be raised automatically!
TD[(i**2 for i in range(1,10)), 10:50]
| [
"[email protected]"
] | |
153eaf590327a3928e4f39de1f87e5e3b6434798 | cb0e7d6493b23e870aa625eb362384a10f5ee657 | /solutions/python3/0200.py | bf259566a4c651f2576a82f375668f59e4e04686 | [] | no_license | sweetpand/LeetCode-1 | 0acfa603af254a3350d457803449a91322f2d1a7 | 65f4ef26cb8b2db0b4bf8c42bfdc76421b479f94 | refs/heads/master | 2022-11-14T07:01:42.502172 | 2020-07-12T12:25:56 | 2020-07-12T12:25:56 | 279,088,171 | 1 | 0 | null | 2020-07-12T15:03:20 | 2020-07-12T15:03:19 | null | UTF-8 | Python | false | false | 739 | py | class Solution:
def numIslands(self, grid: List[List[str]]) -> int:
def dfs(i: int, j: int) -> None:
if i < 0 or j < 0 or i >= len(grid) or j >= len(grid[0]) or visited[i][j] or grid[i][j] == '0':
return
visited[i][j] = True
dfs(i + 1, j)
dfs(i - 1, j)
dfs(i, j + 1)
dfs(i, j - 1)
if not grid:
return 0
m = len(grid)
n = len(grid[0])
ans = 0
visited = [[False] * n for _ in range(m)]
for i in range(m):
for j in range(n):
if not visited[i][j] and grid[i][j] == '1':
ans += 1
dfs(i, j)
return ans
| [
"[email protected]"
] | |
493149b85cd8f8ac097c095a5d003bc8d8250e26 | 2f44cecd8fc447c9e2f2d9f55abdea36ebb40cc5 | /84.py | ef3779fbdfec69cf469c9f29d365c5b3495f449a | [] | no_license | yuzumei/leetcode | 751a234b429131169e3eaf4594ffeb3b94f6ab34 | b6708b03c92ec92e89fc7ecf13f1995dee346657 | refs/heads/master | 2023-07-28T05:48:53.192948 | 2021-09-11T06:16:07 | 2021-09-11T06:16:07 | 365,780,916 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,234 | py | heights=[6,7,5,2,4,5,9,3]
def search(heights):
leftstack=[]
leftnum=[]
for i,num in enumerate(heights):
if not leftstack:
leftstack.append([num,i])
leftnum.append(-1)
else:
while leftstack:
if num<=leftstack[-1][0]:
leftstack.pop()
else:
break
if leftstack:
leftnum.append(leftstack[-1][1])
else:
leftnum.append(-1)
leftstack.append([num,i])
return leftnum
leftnum=search(heights)
rightnum=search(heights[::-1])[::-1]
print(leftnum,rightnum)
ans=-1
for i in range(len(heights)):
ans=max(ans,(len(heights)-2-rightnum[i]-leftnum[i])*heights[i])
print(ans)
'''遍历两次'''
def twoside(heights):
n=len(heights)
if n==0:
return 0
leftnum=[0]*n
rightnum=[n]*n
stack=[]
for i,num in enumerate(heights):
while stack and num<=stack[-1][1]:
temp=stack.pop()
rightnum[temp[0]]=i
leftnum[i]=stack[-1][0] if stack else -1
stack.append([i,num])
ans=max((rightnum[i]-leftnum[i]-1)*heights[i] for i in range(n))
return ans
print(twoside(heights))
| [
"[email protected]"
] | |
0b1ea520891319bd3ec29901ce458c89203a9974 | 15a992391375efd487b6442daf4e9dd963167379 | /tests/runner.py | 7356581365e84bd1ae22e702cf2f4a2df1dc1e59 | [
"Apache-2.0"
] | permissive | Bala93/MONAI | b0e68e1b513adcd20eab5158d4a0e5c56347a2cd | e0a7eff5066da307a73df9145077f6f1fec7a514 | refs/heads/master | 2022-08-22T18:01:25.892982 | 2022-08-12T18:13:53 | 2022-08-12T18:13:53 | 259,398,958 | 2 | 0 | null | 2020-04-27T17:09:12 | 2020-04-27T17:09:11 | null | UTF-8 | Python | false | false | 5,491 | py | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import glob
import inspect
import os
import re
import sys
import time
import unittest
from monai.utils import PerfContext
results: dict = {}
class TimeLoggingTestResult(unittest.TextTestResult):
"""Overload the default results so that we can store the results."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.timed_tests = {}
def startTest(self, test): # noqa: N802
"""Start timer, print test name, do normal test."""
self.start_time = time.time()
name = self.getDescription(test)
self.stream.write(f"Starting test: {name}...\n")
super().startTest(test)
def stopTest(self, test): # noqa: N802
"""On test end, get time, print, store and do normal behaviour."""
elapsed = time.time() - self.start_time
name = self.getDescription(test)
self.stream.write(f"Finished test: {name} ({elapsed:.03}s)\n")
if name in results:
raise AssertionError("expected all keys to be unique")
results[name] = elapsed
super().stopTest(test)
def print_results(results, discovery_time, thresh, status):
# only keep results >= threshold
results = dict(filter(lambda x: x[1] > thresh, results.items()))
if len(results) == 0:
return
print(f"\n\n{status}, printing completed times >{thresh}s in ascending order...\n")
timings = dict(sorted(results.items(), key=lambda item: item[1]))
for r in timings:
if timings[r] >= thresh:
print(f"{r} ({timings[r]:.03}s)")
print(f"test discovery time: {discovery_time:.03}s")
print(f"total testing time: {sum(results.values()):.03}s")
print("Remember to check above times for any errors!")
def parse_args():
parser = argparse.ArgumentParser(description="Runner for MONAI unittests with timing.")
parser.add_argument(
"-s", action="store", dest="path", default=".", help="Directory to start discovery (default: '%(default)s')"
)
parser.add_argument(
"-p",
action="store",
dest="pattern",
default="test_*.py",
help="Pattern to match tests (default: '%(default)s')",
)
parser.add_argument(
"-t",
"--thresh",
dest="thresh",
default=10.0,
type=float,
help="Display tests longer than given threshold (default: %(default)d)",
)
parser.add_argument(
"-v",
"--verbosity",
action="store",
dest="verbosity",
type=int,
default=1,
help="Verbosity level (default: %(default)d)",
)
parser.add_argument("-q", "--quick", action="store_true", dest="quick", default=False, help="Only do quick tests")
parser.add_argument(
"-f", "--failfast", action="store_true", dest="failfast", default=False, help="Stop testing on first failure"
)
args = parser.parse_args()
print(f"Running tests in folder: '{args.path}'")
if args.pattern:
print(f"With file pattern: '{args.pattern}'")
return args
def get_default_pattern(loader):
signature = inspect.signature(loader.discover)
params = {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty}
return params["pattern"]
if __name__ == "__main__":
# Parse input arguments
args = parse_args()
# If quick is desired, set environment variable
if args.quick:
os.environ["QUICKTEST"] = "True"
# Get all test names (optionally from some path with some pattern)
with PerfContext() as pc:
# the files are searched from `tests/` folder, starting with `test_`
files = glob.glob(os.path.join(os.path.dirname(__file__), "test_*.py"))
cases = []
for test_module in {os.path.basename(f)[:-3] for f in files}:
if re.match(args.pattern, test_module):
cases.append(f"tests.{test_module}")
else:
print(f"monai test runner: excluding tests.{test_module}")
tests = unittest.TestLoader().loadTestsFromNames(cases)
discovery_time = pc.total_time
print(f"time to discover tests: {discovery_time}s, total cases: {tests.countTestCases()}.")
test_runner = unittest.runner.TextTestRunner(
resultclass=TimeLoggingTestResult, verbosity=args.verbosity, failfast=args.failfast
)
# Use try catches to print the current results if encountering exception or keyboard interruption
try:
test_result = test_runner.run(tests)
print_results(results, discovery_time, args.thresh, "tests finished")
sys.exit(not test_result.wasSuccessful())
except KeyboardInterrupt:
print_results(results, discovery_time, args.thresh, "tests cancelled")
sys.exit(1)
except Exception:
print_results(results, discovery_time, args.thresh, "exception reached")
raise
| [
"[email protected]"
] | |
1fc3b7b27452778c90b70cbaf2d0890cbc7c878c | 2ea61e98627dd6b170590b69ead79a828614dec0 | /youtrack_api/models/__init__.py | b32db78643832993e6d781eddcfd07e3509c2415 | [] | no_license | alpduez/youtrack_api | 55dc25465f027645525efe5296c5699f7d824f33 | 2450523d87e6bdbbd53ca4908042a701a1a867e6 | refs/heads/master | 2023-09-01T01:34:33.356354 | 2021-10-20T15:32:05 | 2021-10-20T15:32:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,174 | py | # flake8: noqa
# import all models into this package
# if you have many models here with many references from one model to another this may
# raise a RecursionError
# to avoid this, import only the models that you directly need like:
# from from youtrack_api.model.pet import Pet
# or import this package, but before doing it, use:
# import sys
# sys.setrecursionlimit(n)
from youtrack_api.model.activity_category import ActivityCategory
from youtrack_api.model.activity_cursor_page import ActivityCursorPage
from youtrack_api.model.activity_item import ActivityItem
from youtrack_api.model.agile import Agile
from youtrack_api.model.agile_column import AgileColumn
from youtrack_api.model.agile_column_field_value import AgileColumnFieldValue
from youtrack_api.model.agile_column_field_value_all_of import AgileColumnFieldValueAllOf
from youtrack_api.model.agile_status import AgileStatus
from youtrack_api.model.appearance_settings import AppearanceSettings
from youtrack_api.model.attachment_activity_item import AttachmentActivityItem
from youtrack_api.model.attachment_activity_item_all_of import AttachmentActivityItemAllOf
from youtrack_api.model.attribute_based_swimlane_settings import AttributeBasedSwimlaneSettings
from youtrack_api.model.attribute_based_swimlane_settings_all_of import AttributeBasedSwimlaneSettingsAllOf
from youtrack_api.model.backup_error import BackupError
from youtrack_api.model.backup_file import BackupFile
from youtrack_api.model.backup_status import BackupStatus
from youtrack_api.model.base_bundle import BaseBundle
from youtrack_api.model.build_bundle import BuildBundle
from youtrack_api.model.build_bundle_all_of import BuildBundleAllOf
from youtrack_api.model.build_bundle_custom_field_defaults import BuildBundleCustomFieldDefaults
from youtrack_api.model.build_bundle_custom_field_defaults_all_of import BuildBundleCustomFieldDefaultsAllOf
from youtrack_api.model.build_bundle_element import BuildBundleElement
from youtrack_api.model.build_bundle_element_all_of import BuildBundleElementAllOf
from youtrack_api.model.build_project_custom_field import BuildProjectCustomField
from youtrack_api.model.bundle import Bundle
from youtrack_api.model.bundle_custom_field_defaults import BundleCustomFieldDefaults
from youtrack_api.model.bundle_element import BundleElement
from youtrack_api.model.bundle_project_custom_field import BundleProjectCustomField
from youtrack_api.model.color_coding import ColorCoding
from youtrack_api.model.column_settings import ColumnSettings
from youtrack_api.model.command_limited_visibility import CommandLimitedVisibility
from youtrack_api.model.command_limited_visibility_all_of import CommandLimitedVisibilityAllOf
from youtrack_api.model.command_list import CommandList
from youtrack_api.model.command_unlimited_visibility import CommandUnlimitedVisibility
from youtrack_api.model.command_visibility import CommandVisibility
from youtrack_api.model.comment_activity_item import CommentActivityItem
from youtrack_api.model.comment_activity_item_all_of import CommentActivityItemAllOf
from youtrack_api.model.comment_attachments_activity_item import CommentAttachmentsActivityItem
from youtrack_api.model.comment_attachments_activity_item_all_of import CommentAttachmentsActivityItemAllOf
from youtrack_api.model.created_deleted_activity_item import CreatedDeletedActivityItem
from youtrack_api.model.custom_field import CustomField
from youtrack_api.model.custom_field_activity_item import CustomFieldActivityItem
from youtrack_api.model.custom_field_activity_item_all_of import CustomFieldActivityItemAllOf
from youtrack_api.model.custom_field_defaults import CustomFieldDefaults
from youtrack_api.model.custom_filter_field import CustomFilterField
from youtrack_api.model.custom_filter_field_all_of import CustomFilterFieldAllOf
from youtrack_api.model.database_attribute_value import DatabaseAttributeValue
from youtrack_api.model.database_backup_settings import DatabaseBackupSettings
from youtrack_api.model.database_multi_value_issue_custom_field import DatabaseMultiValueIssueCustomField
from youtrack_api.model.database_multi_value_issue_custom_field_all_of import DatabaseMultiValueIssueCustomFieldAllOf
from youtrack_api.model.database_single_value_issue_custom_field import DatabaseSingleValueIssueCustomField
from youtrack_api.model.date_format_descriptor import DateFormatDescriptor
from youtrack_api.model.date_issue_custom_field import DateIssueCustomField
from youtrack_api.model.date_issue_custom_field_all_of import DateIssueCustomFieldAllOf
from youtrack_api.model.duplicate_vote import DuplicateVote
from youtrack_api.model.duration_value import DurationValue
from youtrack_api.model.email_settings import EmailSettings
from youtrack_api.model.enum_bundle import EnumBundle
from youtrack_api.model.enum_bundle_all_of import EnumBundleAllOf
from youtrack_api.model.enum_bundle_custom_field_defaults import EnumBundleCustomFieldDefaults
from youtrack_api.model.enum_bundle_custom_field_defaults_all_of import EnumBundleCustomFieldDefaultsAllOf
from youtrack_api.model.enum_bundle_element import EnumBundleElement
from youtrack_api.model.enum_project_custom_field import EnumProjectCustomField
from youtrack_api.model.external_issue import ExternalIssue
from youtrack_api.model.field_based_color_coding import FieldBasedColorCoding
from youtrack_api.model.field_based_color_coding_all_of import FieldBasedColorCodingAllOf
from youtrack_api.model.field_style import FieldStyle
from youtrack_api.model.field_type import FieldType
from youtrack_api.model.filter_field import FilterField
from youtrack_api.model.general_user_profile import GeneralUserProfile
from youtrack_api.model.global_settings import GlobalSettings
from youtrack_api.model.global_time_tracking_settings import GlobalTimeTrackingSettings
from youtrack_api.model.group_project_custom_field import GroupProjectCustomField
from youtrack_api.model.group_project_custom_field_all_of import GroupProjectCustomFieldAllOf
from youtrack_api.model.issue import Issue
from youtrack_api.model.issue_attachment import IssueAttachment
from youtrack_api.model.issue_based_swimlane_settings import IssueBasedSwimlaneSettings
from youtrack_api.model.issue_based_swimlane_settings_all_of import IssueBasedSwimlaneSettingsAllOf
from youtrack_api.model.issue_comment import IssueComment
from youtrack_api.model.issue_created_activity_item import IssueCreatedActivityItem
from youtrack_api.model.issue_created_activity_item_all_of import IssueCreatedActivityItemAllOf
from youtrack_api.model.issue_custom_field import IssueCustomField
from youtrack_api.model.issue_folder import IssueFolder
from youtrack_api.model.issue_link import IssueLink
from youtrack_api.model.issue_link_type import IssueLinkType
from youtrack_api.model.issue_resolved_activity_item import IssueResolvedActivityItem
from youtrack_api.model.issue_resolved_activity_item_all_of import IssueResolvedActivityItemAllOf
from youtrack_api.model.issue_tag import IssueTag
from youtrack_api.model.issue_tag_all_of import IssueTagAllOf
from youtrack_api.model.issue_time_tracker import IssueTimeTracker
from youtrack_api.model.issue_voters import IssueVoters
from youtrack_api.model.issue_watcher import IssueWatcher
from youtrack_api.model.issue_watchers import IssueWatchers
from youtrack_api.model.issue_work_item import IssueWorkItem
from youtrack_api.model.jabber_settings import JabberSettings
from youtrack_api.model.license import License
from youtrack_api.model.limited_visibility import LimitedVisibility
from youtrack_api.model.links_activity_item import LinksActivityItem
from youtrack_api.model.locale_descriptor import LocaleDescriptor
from youtrack_api.model.locale_settings import LocaleSettings
from youtrack_api.model.localizable_bundle_element import LocalizableBundleElement
from youtrack_api.model.localizable_bundle_element_all_of import LocalizableBundleElementAllOf
from youtrack_api.model.logo import Logo
from youtrack_api.model.me import Me
from youtrack_api.model.multi_build_issue_custom_field import MultiBuildIssueCustomField
from youtrack_api.model.multi_build_issue_custom_field_all_of import MultiBuildIssueCustomFieldAllOf
from youtrack_api.model.multi_enum_issue_custom_field import MultiEnumIssueCustomField
from youtrack_api.model.multi_enum_issue_custom_field_all_of import MultiEnumIssueCustomFieldAllOf
from youtrack_api.model.multi_group_issue_custom_field import MultiGroupIssueCustomField
from youtrack_api.model.multi_group_issue_custom_field_all_of import MultiGroupIssueCustomFieldAllOf
from youtrack_api.model.multi_owned_issue_custom_field import MultiOwnedIssueCustomField
from youtrack_api.model.multi_owned_issue_custom_field_all_of import MultiOwnedIssueCustomFieldAllOf
from youtrack_api.model.multi_user_issue_custom_field import MultiUserIssueCustomField
from youtrack_api.model.multi_user_issue_custom_field_all_of import MultiUserIssueCustomFieldAllOf
from youtrack_api.model.multi_value_activity_item import MultiValueActivityItem
from youtrack_api.model.multi_version_issue_custom_field import MultiVersionIssueCustomField
from youtrack_api.model.multi_version_issue_custom_field_all_of import MultiVersionIssueCustomFieldAllOf
from youtrack_api.model.notification_settings import NotificationSettings
from youtrack_api.model.notifications_user_profile import NotificationsUserProfile
from youtrack_api.model.online_users import OnlineUsers
from youtrack_api.model.owned_bundle import OwnedBundle
from youtrack_api.model.owned_bundle_all_of import OwnedBundleAllOf
from youtrack_api.model.owned_bundle_custom_field_defaults import OwnedBundleCustomFieldDefaults
from youtrack_api.model.owned_bundle_custom_field_defaults_all_of import OwnedBundleCustomFieldDefaultsAllOf
from youtrack_api.model.owned_bundle_element import OwnedBundleElement
from youtrack_api.model.owned_bundle_element_all_of import OwnedBundleElementAllOf
from youtrack_api.model.owned_project_custom_field import OwnedProjectCustomField
from youtrack_api.model.parsed_command import ParsedCommand
from youtrack_api.model.period_field_format import PeriodFieldFormat
from youtrack_api.model.period_issue_custom_field import PeriodIssueCustomField
from youtrack_api.model.period_issue_custom_field_all_of import PeriodIssueCustomFieldAllOf
from youtrack_api.model.period_project_custom_field import PeriodProjectCustomField
from youtrack_api.model.period_value import PeriodValue
from youtrack_api.model.predefined_filter_field import PredefinedFilterField
from youtrack_api.model.project import Project
from youtrack_api.model.project_activity_item import ProjectActivityItem
from youtrack_api.model.project_all_of import ProjectAllOf
from youtrack_api.model.project_based_color_coding import ProjectBasedColorCoding
from youtrack_api.model.project_based_color_coding_all_of import ProjectBasedColorCodingAllOf
from youtrack_api.model.project_color import ProjectColor
from youtrack_api.model.project_custom_field import ProjectCustomField
from youtrack_api.model.project_time_tracking_settings import ProjectTimeTrackingSettings
from youtrack_api.model.rest_cors_settings import RestCorsSettings
from youtrack_api.model.saved_query import SavedQuery
from youtrack_api.model.saved_query_all_of import SavedQueryAllOf
from youtrack_api.model.search_suggestions import SearchSuggestions
from youtrack_api.model.simple_issue_custom_field import SimpleIssueCustomField
from youtrack_api.model.simple_project_custom_field import SimpleProjectCustomField
from youtrack_api.model.simple_value_activity_item import SimpleValueActivityItem
from youtrack_api.model.simple_value_activity_item_all_of import SimpleValueActivityItemAllOf
from youtrack_api.model.single_build_issue_custom_field import SingleBuildIssueCustomField
from youtrack_api.model.single_enum_issue_custom_field import SingleEnumIssueCustomField
from youtrack_api.model.single_group_issue_custom_field import SingleGroupIssueCustomField
from youtrack_api.model.single_owned_issue_custom_field import SingleOwnedIssueCustomField
from youtrack_api.model.single_user_issue_custom_field import SingleUserIssueCustomField
from youtrack_api.model.single_value_activity_item import SingleValueActivityItem
from youtrack_api.model.single_version_issue_custom_field import SingleVersionIssueCustomField
from youtrack_api.model.sprint import Sprint
from youtrack_api.model.sprint_activity_item import SprintActivityItem
from youtrack_api.model.sprint_activity_item_all_of import SprintActivityItemAllOf
from youtrack_api.model.sprints_settings import SprintsSettings
from youtrack_api.model.state_bundle import StateBundle
from youtrack_api.model.state_bundle_all_of import StateBundleAllOf
from youtrack_api.model.state_bundle_custom_field_defaults import StateBundleCustomFieldDefaults
from youtrack_api.model.state_bundle_custom_field_defaults_all_of import StateBundleCustomFieldDefaultsAllOf
from youtrack_api.model.state_bundle_element import StateBundleElement
from youtrack_api.model.state_bundle_element_all_of import StateBundleElementAllOf
from youtrack_api.model.state_issue_custom_field import StateIssueCustomField
from youtrack_api.model.state_issue_custom_field_all_of import StateIssueCustomFieldAllOf
from youtrack_api.model.state_project_custom_field import StateProjectCustomField
from youtrack_api.model.storage_entry import StorageEntry
from youtrack_api.model.suggestion import Suggestion
from youtrack_api.model.swimlane_entity_attribute_value import SwimlaneEntityAttributeValue
from youtrack_api.model.swimlane_settings import SwimlaneSettings
from youtrack_api.model.swimlane_value import SwimlaneValue
from youtrack_api.model.system_settings import SystemSettings
from youtrack_api.model.tags_activity_item import TagsActivityItem
from youtrack_api.model.tags_activity_item_all_of import TagsActivityItemAllOf
from youtrack_api.model.telemetry import Telemetry
from youtrack_api.model.text_custom_field_activity_item import TextCustomFieldActivityItem
from youtrack_api.model.text_custom_field_activity_item_all_of import TextCustomFieldActivityItemAllOf
from youtrack_api.model.text_field_value import TextFieldValue
from youtrack_api.model.text_issue_custom_field import TextIssueCustomField
from youtrack_api.model.text_issue_custom_field_all_of import TextIssueCustomFieldAllOf
from youtrack_api.model.text_markup_activity_item import TextMarkupActivityItem
from youtrack_api.model.text_markup_activity_item_all_of import TextMarkupActivityItemAllOf
from youtrack_api.model.text_project_custom_field import TextProjectCustomField
from youtrack_api.model.time_tracking_user_profile import TimeTrackingUserProfile
from youtrack_api.model.time_zone_descriptor import TimeZoneDescriptor
from youtrack_api.model.unlimited_visibility import UnlimitedVisibility
from youtrack_api.model.user import User
from youtrack_api.model.user_bundle import UserBundle
from youtrack_api.model.user_bundle_all_of import UserBundleAllOf
from youtrack_api.model.user_custom_field_defaults import UserCustomFieldDefaults
from youtrack_api.model.user_custom_field_defaults_all_of import UserCustomFieldDefaultsAllOf
from youtrack_api.model.user_group import UserGroup
from youtrack_api.model.user_profiles import UserProfiles
from youtrack_api.model.user_project_custom_field import UserProjectCustomField
from youtrack_api.model.uses_markup_activity_item import UsesMarkupActivityItem
from youtrack_api.model.uses_markup_activity_item_all_of import UsesMarkupActivityItemAllOf
from youtrack_api.model.vcs_change_activity_item import VcsChangeActivityItem
from youtrack_api.model.vcs_change_activity_item_all_of import VcsChangeActivityItemAllOf
from youtrack_api.model.vcs_unresolved_user import VcsUnresolvedUser
from youtrack_api.model.vcs_unresolved_user_all_of import VcsUnresolvedUserAllOf
from youtrack_api.model.version_bundle import VersionBundle
from youtrack_api.model.version_bundle_all_of import VersionBundleAllOf
from youtrack_api.model.version_bundle_custom_field_defaults import VersionBundleCustomFieldDefaults
from youtrack_api.model.version_bundle_custom_field_defaults_all_of import VersionBundleCustomFieldDefaultsAllOf
from youtrack_api.model.version_bundle_element import VersionBundleElement
from youtrack_api.model.version_bundle_element_all_of import VersionBundleElementAllOf
from youtrack_api.model.version_project_custom_field import VersionProjectCustomField
from youtrack_api.model.visibility import Visibility
from youtrack_api.model.visibility_activity_item import VisibilityActivityItem
from youtrack_api.model.visibility_activity_item_all_of import VisibilityActivityItemAllOf
from youtrack_api.model.visibility_group_activity_item import VisibilityGroupActivityItem
from youtrack_api.model.visibility_group_activity_item_all_of import VisibilityGroupActivityItemAllOf
from youtrack_api.model.visibility_user_activity_item import VisibilityUserActivityItem
from youtrack_api.model.visibility_user_activity_item_all_of import VisibilityUserActivityItemAllOf
from youtrack_api.model.voters_activity_item import VotersActivityItem
from youtrack_api.model.voters_activity_item_all_of import VotersActivityItemAllOf
from youtrack_api.model.wip_limit import WIPLimit
from youtrack_api.model.watch_folder import WatchFolder
from youtrack_api.model.watch_folder_all_of import WatchFolderAllOf
from youtrack_api.model.work_item_activity_item import WorkItemActivityItem
from youtrack_api.model.work_item_activity_item_all_of import WorkItemActivityItemAllOf
from youtrack_api.model.work_item_author_activity_item import WorkItemAuthorActivityItem
from youtrack_api.model.work_item_author_activity_item_all_of import WorkItemAuthorActivityItemAllOf
from youtrack_api.model.work_item_duration_activity_item import WorkItemDurationActivityItem
from youtrack_api.model.work_item_duration_activity_item_all_of import WorkItemDurationActivityItemAllOf
from youtrack_api.model.work_item_type import WorkItemType
from youtrack_api.model.work_item_type_activity_item import WorkItemTypeActivityItem
from youtrack_api.model.work_item_type_activity_item_all_of import WorkItemTypeActivityItemAllOf
from youtrack_api.model.work_time_settings import WorkTimeSettings
| [
"[email protected]"
] | |
4c130f1d7ab50ea7707adef47c427ff415ec9ec4 | 78d35bb7876a3460d4398e1cb3554b06e36c720a | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_08_01/aio/operations/_vpn_sites_configuration_operations.py | 528c2beb6e372606f8298a7459a2824db3f21598 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | catchsrinivas/azure-sdk-for-python | e35f59b60318a31b3c940a7a3a07b61b28118aa5 | 596227a7738a5342274486e30489239d539b11d1 | refs/heads/main | 2023-08-27T09:08:07.986249 | 2021-11-11T11:13:35 | 2021-11-11T11:13:35 | 427,045,896 | 0 | 0 | MIT | 2021-11-11T15:14:31 | 2021-11-11T15:14:31 | null | UTF-8 | Python | false | false | 8,266 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VpnSitesConfigurationOperations:
"""VpnSitesConfigurationOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _download_initial(
self,
resource_group_name: str,
virtual_wan_name: str,
request: "_models.GetVpnSitesConfigurationRequest",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._download_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(request, 'GetVpnSitesConfigurationRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_download_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWANName}/vpnConfiguration'} # type: ignore
async def begin_download(
self,
resource_group_name: str,
virtual_wan_name: str,
request: "_models.GetVpnSitesConfigurationRequest",
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Gives the sas-url to download the configurations for vpn-sites in a resource group.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWAN for which configuration of all vpn-sites is
needed.
:type virtual_wan_name: str
:param request: Parameters supplied to download vpn-sites configuration.
:type request: ~azure.mgmt.network.v2019_08_01.models.GetVpnSitesConfigurationRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._download_initial(
resource_group_name=resource_group_name,
virtual_wan_name=virtual_wan_name,
request=request,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_download.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWANName}/vpnConfiguration'} # type: ignore
| [
"[email protected]"
] | |
988fcc3bad6c71902716794d5af98b5ed49ce94a | 1f936103af336af6bbd335f45d6baa55c426922b | /monatbx/test_cov.py | fed4bc94b6147527eeec78e5faf7cafd539bf891 | [] | no_license | monarin/monatbx | 2ec342d67f1fbccb82656218ffd136f2eb7d96ab | 43f56974f811e5b2b0dcc428d4f9b36043ed9d04 | refs/heads/master | 2020-06-18T13:08:58.893701 | 2016-11-30T00:58:18 | 2016-11-30T00:58:18 | 75,136,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | import numpy as np
from cctbx.array_family import flex
G = [5, 6, 0.1, 20, 16, 12, 11, 10, 11.5, 15]
B = [100, 80, 200, 60, 70, 80, 85, 90, 70, 40]
rotx = [0.01, 0.002, 0.001, 0.05, 0.1, 0.025, 0.008, 0.01, 0.002, 0.001]
X = np.array([G, B, rotx])
print X
COV = np.cov(X)
print COV
CORR = np.correlate(X)
print CORR
| [
"[email protected]"
] | |
f4e9e05d8f49cab8f597e4920e0ebf279ac1185a | 32274e14bce27331626f734aaf2e9074dec9bbf0 | /great_expectations/data_asset/__init__.py | 9de24563be3757e723d06cee5b996be4c8d615f5 | [
"Apache-2.0"
] | permissive | dataguage/great_expectations | 1036a3d20dc320ce2e9986ae18a84932bb1fad3b | dc94522ca9b6742c0c1844cf378b781f74dfc9b5 | refs/heads/master | 2022-07-04T18:38:21.332245 | 2020-05-04T17:56:02 | 2020-05-04T17:56:02 | 261,509,508 | 2 | 0 | Apache-2.0 | 2020-05-05T15:24:58 | 2020-05-05T15:24:57 | null | UTF-8 | Python | false | false | 76 | py | from .data_asset import DataAsset
from .file_data_asset import FileDataAsset | [
"[email protected]"
] | |
706658a5ad3c0ba6ba13b0b90069ae2c6aca8d3d | 47deebe6fefedb01fdce5d4e82f58bb08f8e1e92 | /python core/Lesson_2/io_4.py | b4c12d1fa85d38f06e385bcd672cb795c9629322 | [] | no_license | developeryuldashev/python-core | 5bb162603bdb5782acf05e3fb25ca5dd6347067a | 08fca77c9cfde69d93a7875b3fb65b98f3dabd78 | refs/heads/main | 2023-08-21T03:33:12.160133 | 2021-10-19T04:56:53 | 2021-10-19T04:56:53 | 393,383,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28 | py | d=10
pi=3.14
l=pi*d
print(l) | [
"[email protected]"
] | |
dca8dd72172381372dcb94f00fbeecad81c8ddd6 | e2e08d7c97398a42e6554f913ee27340226994d9 | /pyautoTest-master(ICF-7.5.0)/test_case/scg_old/scg_Route/test_c37280.py | e1f0802b3947f00158ecb0ded8f58e68ea24a2c8 | [] | no_license | lizhuoya1111/Automated_testing_practice | 88e7be512e831d279324ad710946232377fb4c01 | b3a532d33ddeb8d01fff315bcd59b451befdef23 | refs/heads/master | 2022-12-04T08:19:29.806445 | 2020-08-14T03:51:20 | 2020-08-14T03:51:20 | 287,426,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,359 | py | import pytest
import time
import sys
from page_obj.common.rail import *
from os.path import dirname, abspath
from page_obj.common.ssh import *
from page_obj.scg.scg_def_static_route import *
from page_obj.scg.scg_def_interface import *
sys.path.insert(0, dirname(dirname(abspath(__file__))))
test_id = 37280
# 验证批量删除条目、单条删除路由
def test_route_wxw(browser):
try:
# 验证批量删除路由
login_web(browser, url="10.2.2.83")
add_static_route_single_wxw(browser, ip='20.1.1.0', mask='24', out_device='ge0/2', gateway='13.1.1.1',
enable='yes')
add_static_route_single_wxw(browser, ip='21.1.1.0', mask='24', out_device='ge0/2', gateway='13.1.1.1',
enable='yes')
login_web(browser, url="10.2.2.81")
change_physical_interface_workmode_wxw(browser, interface='ge0/5',
route="yes", ip='21.1.1.1', mask='24',
trans='no')
ssh = SSH("10.1.1.202", 'root', 'root', 22)
ssh.connect()
ssh.execute('route add -net 13.1.1.0/24 gw 20.1.1.1')
result1 = ssh.execute('ping 13.1.1.3 -c 3')
# print(result1)
ssh.close()
ssh = SSH("10.1.1.212", 'root', 'root', 22)
ssh.connect()
ssh.execute('route add -net 13.1.1.0/24 gw 21.1.1.1')
result2 = ssh.execute('ping 13.1.1.3 -c 3')
# print(result2)
ssh.close()
login_web(browser, url="10.2.2.83")
# 删除两段路由
del_static_route_single_wxw(browser, destination1='20.1.1.0/255.255.255.0',
destination2='21.1.1.0/255.255.255.0')
ssh = SSH("10.1.1.202", 'root', 'root', 22)
ssh.connect()
result1_1 = ssh.execute('ping 13.1.1.3 -c 3')
# print(result1_1)
ssh.close()
ssh = SSH("10.1.1.212", 'root', 'root', 22)
ssh.connect()
result2_1 = ssh.execute('ping 13.1.1.3 -c 3')
# print(result2_1)
ssh.execute('route del -net 13.1.1.0/24 gw 21.1.1.1')
ssh.close()
# 验证单条删除路由
login_web(browser, url="10.2.2.83")
add_static_route_single_wxw(browser, ip='20.1.1.0', mask='24', out_device='ge0/2', gateway='13.1.1.1',
enable='yes')
ssh = SSH("10.1.1.202", 'root', 'root', 22)
ssh.connect()
ssh.execute('route add -net 13.1.1.0/24 gw 20.1.1.1')
result3 = ssh.execute('ping 13.1.1.3 -c 3')
# print(result3)
ssh.close()
del_ipv4_static_route_bydestination(browser, destination='20.1.1.0/255.255.255.0')
ssh = SSH("10.1.1.202", 'root', 'root', 22)
ssh.connect()
ssh.execute('route add -net 13.1.1.0/24 gw 20.1.1.1')
result3_1 = ssh.execute('ping 13.1.1.3 -c 3')
# print(result3_1)
ssh.execute('route del -net 13.1.1.0/24 gw 21.1.1.1')
ssh.close()
login_web(browser, url="10.2.2.81")
change_physical_interface_workmode_wxw(browser, interface='ge0/5',
route="no", ip='21.1.1.1', mask='24',
trans='yes')
try:
assert "ttl" in result1
assert "ttl" in result2
assert "100% packet loss" in result1_1
assert "100% packet loss" in result2_1
assert "ttl"in result3
assert "100% packet loss" in result3_1
rail_pass(test_run_id, test_id)
except:
rail_fail(test_run_id, test_id)
assert "ttl" in result1
assert "ttl" in result2
assert "100% packet loss" in result1_1
assert "100% packet loss" in result2_1
assert "ttl" in result3
assert "100% packet loss" in result3_1
except Exception as err:
# 如果上面的步骤有报错,重新设备,恢复配置
print(err)
rail_fail(test_run_id, test_id)
reload()
assert False
if __name__ == '__main__':
pytest.main(["-v", "-s", "test_c"+str(test_id)+".py"]) | [
"[email protected]"
] | |
dc4851b40cb641b66fe8f67c685289776001336a | 66fa0bcfe8fcd9e5087caf7e0b42e944ec636fc6 | /catalog/models.py | bcfdd700c38e2be4f0d8b7dac61af1245336ebd9 | [] | no_license | vintkor/for_prom | 899a7eb70978c1d4d77bf3a958d11423fd1bcda5 | 4b692c192d931e2ffb97dcd36bb7f31b242f848b | refs/heads/master | 2022-12-09T05:49:11.150879 | 2017-08-21T20:37:51 | 2017-08-21T20:37:51 | 99,605,489 | 0 | 0 | null | 2022-12-08T00:41:44 | 2017-08-07T18:02:53 | Python | UTF-8 | Python | false | false | 9,434 | py | from django.db import models
from testsite.baseModel import BaseModel
from ckeditor_uploader.fields import RichTextUploadingField
from mptt.models import MPTTModel, TreeForeignKey
from feature.models import Set, Feature, Unit, Value
from django.utils.crypto import get_random_string
from PIL import Image
from resizeimage import resizeimage
from testsite.settings import BASE_DIR
from information.models import Condition, ProviderFile
def set_image_name(instance, filename):
name = get_random_string(40)
ext = filename.split('.')[-1]
path = 'images/{}.{}'.format(name, ext)
return path
class CatalogCategory(BaseModel, MPTTModel):
title = models.CharField(verbose_name='Категория', max_length=255)
slug = models.SlugField(verbose_name="Слаг", max_length=255, default='')
parent = TreeForeignKey('self', null=True, blank=True, related_name='children', db_index=True)
image = models.ImageField(blank=True, default="", upload_to=set_image_name)
description = RichTextUploadingField(verbose_name="Описание категории", blank=True, default="")
active = models.BooleanField(default=True, verbose_name="Вкл/Выкл")
feature_set = models.ManyToManyField(Set, verbose_name="Набор характеристик")
class Meta:
verbose_name = "Категория"
verbose_name_plural = "Категории"
class MPTTMeta:
order_insertion_by = ['title']
def __str__(self):
return "{}".format(self.title)
def show_image(self):
""" Показывает превью изображения в админке """
if self.image:
return '<img src="{}" width=40>'.format(self.image.url)
else:
return "--"
def get_count_product(self):
""" Получает количество постов """
count = CatalogProduct.objects.filter(category=self.id).count()
return count
show_image.allow_tags = True
show_image.short_description = "Изображение"
get_count_product.short_description = "Кол-во товаров"
class CatalogCurrency(BaseModel):
title = models.CharField(max_length=100, verbose_name="Название")
short_title = models.CharField(max_length=5, verbose_name="Сокращение")
code = models.CharField(max_length=3, verbose_name="Код валюты")
course = models.DecimalField(verbose_name="Курс", max_digits=10, decimal_places=4)
is_main = models.BooleanField(verbose_name="Главная")
class Meta:
verbose_name = "Валюта"
verbose_name_plural = "Валюты"
def __str__(self):
return "{}".format(self.title)
class CatalogProduct(BaseModel):
title = models.CharField(max_length=255, verbose_name="Заголовок")
slug = models.SlugField(verbose_name="Слаг", max_length=255, default='', blank=True, null=True)
category = TreeForeignKey(CatalogCategory, blank=True, null=True)
price = models.DecimalField(verbose_name="Цена", max_digits=8, decimal_places=2, blank=True, null=True)
currency = models.ForeignKey(CatalogCurrency, verbose_name="Валюта", blank=True, null=True, on_delete=models.SET_NULL)
unit = models.ForeignKey(Unit, verbose_name="Единица измерения", default=None, on_delete=models.SET_NULL, blank=True, null=True)
step = models.DecimalField(verbose_name="Шаг", max_digits=8, decimal_places=3, default=1)
description = models.CharField(max_length=170, blank=True, verbose_name="META DESC", default="")
text = RichTextUploadingField(verbose_name="Текст поста", blank=True, default="")
image = models.ImageField(verbose_name="Изображение", blank=True, default='', upload_to=set_image_name)
active = models.BooleanField(default=True, verbose_name="Вкл/Выкл")
condition = models.ManyToManyField(Condition, verbose_name='Условия поставщика', default=None, null=True, blank=True)
provider_price = models.ManyToManyField(ProviderFile, verbose_name='Прайс поставщика', default=None, null=True, blank=True)
def __str__(self):
return "{}".format(self.title)
def show_image(self):
""" Показывает превью главного изображения в админке """
if self.image:
return '<img src="{}" width=40>'.format(self.image.url)
else:
return "--"
def get_files(self):
files = ProviderFile.objects.filter(catalogproduct=self)
return files
def get_conditions(self):
return Condition.objects.filter(catalogproduct=self)
def get_thumb_image(self):
if self.image:
url = self.image.url.split('.')
url = url[0] + '__100x160.' + url[1]
else:
url = '//placehold.it/300x360'
return url
def get_medium_image(self):
if self.image:
url = self.image.url.split('.')
url = url[0] + '__400x400.' + url[1]
else:
url = '//placehold.it/400x400'
return url
def get_count_comments(self):
""" Получает количество комментарив поста """
count = CatalogComment.objects.filter(parent=self.id).count()
return count
def get_all_images(self):
return CatalogImage.objects.filter(active=True, parent=self)
def get_price_in_main_currency(self):
return self.currency.course * self.price
def save(self, *args, **kwargs):
super(CatalogProduct, self).save(*args, **kwargs)
if self.category:
for s in Set.objects.filter(catalogcategory=self.category):
for f in Feature.objects.filter(set=s):
try:
ProductFeature.objects.get(product=self, feature=f)
except ProductFeature.DoesNotExist:
feature = ProductFeature(product=self, feature=f)
feature.save()
if self.image:
old_name = self.image.url.split('/')[-1].split('.')[0]
ext = self.image.url.split('/')[-1].split('.')[1]
thumb_size = [100, 160]
medium_size = [400, 400]
with open('{}{}'.format(BASE_DIR, self.image.url), 'r+b') as f:
with Image.open(f) as image:
cover = resizeimage.resize_cover(image, thumb_size)
cover.save('{}/media/images/{}__{}x{}.{}'.format(BASE_DIR, old_name, thumb_size[0], thumb_size[1], ext))
with Image.open(f) as image:
cover = resizeimage.resize_cover(image, medium_size)
cover.save('{}/media/images/{}__{}x{}.{}'.format(BASE_DIR, old_name, medium_size[0], medium_size[1], ext))
show_image.allow_tags = True
show_image.short_description = "Главное изображение"
get_count_comments.short_description = "Кол-во комментариев"
class Meta:
verbose_name = "Товар"
verbose_name_plural = "Товары"
class ProductFeature(BaseModel):
feature = models.ForeignKey(Feature, default=None, null=True, verbose_name="арактеристака", on_delete=models.SET_NULL)
value = models.CharField(default=None, null=True, blank=True, verbose_name="Значение", max_length=150)
unit = models.ForeignKey(Unit, default=None, blank=True, null=True, verbose_name="Единица измерения", on_delete=models.SET_NULL)
product = models.ForeignKey(CatalogProduct, default=None, null=True, verbose_name="Товар", on_delete=models.SET_NULL)
class Meta:
verbose_name = "Характеристика товара"
verbose_name_plural = "Характеристики товара"
class CatalogComment(BaseModel):
parent = models.ForeignKey(CatalogProduct, related_name="parent", verbose_name="Товар коментария")
text = models.TextField()
def __str__(self):
return "Комментарий записи - {}".format(self.parent.title)
class Meta:
verbose_name = "Комментарий"
verbose_name_plural = "Комментарии"
class CatalogImage(BaseModel):
parent = models.ForeignKey(CatalogProduct, related_name="images", verbose_name="Изображение")
image = models.ImageField(blank=True, default='', upload_to=set_image_name, verbose_name="Изображение")
active = models.BooleanField(default=True, verbose_name="Вкл/Выкл")
class Meta:
verbose_name = "Изображение"
verbose_name_plural = "Изображения"
def __str__(self):
return "Изображение товара - {}".format(self.parent.title)
def show_image(self):
""" Показывает превью изображения в админке """
if self.image:
return '<img src="{}" width=30>'.format(self.image.url)
else:
return "--"
# def get_thumb(self):
# if self.image:
# url = self.image.url.split('.')
# url = url[0] + '__300x360.' + url[1]
# else:
# url = '//placehold.it/300x360'
# return url
show_image.allow_tags = True
show_image.short_description = "Изображение"
| [
"[email protected]"
] | |
4605daa21593dccfe9560412c3fb87185a00fb91 | 726754863696235c66c8ed9aa184fc13ade33afe | /basics/rotation_count.py | 116e425096403014edc34a74e8a6695ac5528788 | [] | no_license | harshalms/python | ef6a3eeb93c5051528cb0b76fd600a2943e10616 | bfea8f00795c4308b09d80852cb995a8109c1568 | refs/heads/master | 2021-07-17T18:52:36.053780 | 2020-07-25T15:45:41 | 2020-07-25T15:45:41 | 189,624,661 | 0 | 0 | null | 2019-06-17T05:37:24 | 2019-05-31T16:20:52 | Python | UTF-8 | Python | false | false | 461 | py | '''GeeksForGeeks
Find the Rotation Count in Rotated Sorted array
Consider an array of distinct numbers sorted in increasing order.
The array has been rotated (clockwise) k number of times. Given such an array,
find the value of k.
Approch : Just find the index of minimum element.
'''
A = [15, 18, 2, 3, 6, 12]
def indexOFmin(A):
min = A[0]
for i in range(len(A)):
if min > A[i]:
min, k = A[i], i
return k
print(indexOFmin(A))
| [
"[email protected]"
] | |
f04cfa1c8e45c8afac3256b904185d5a59c54180 | f8e6234ff0b3a4f2a30ad8c6e09f7b6d40ba9f0e | /mcscf/mc_ao2mo.py | 40301c13da0721d768b95a01fdf64d76bc6ecf8e | [
"BSD-2-Clause"
] | permissive | armunoz/pyscf | b358b90cfea46e4527eed997eaf509a11a282551 | 258167cabf2101148feadd3bc0da2dd2eccd725a | refs/heads/master | 2020-06-26T10:08:25.454426 | 2017-07-12T15:05:21 | 2017-07-12T15:05:21 | 97,016,486 | 0 | 0 | null | 2017-07-12T14:19:35 | 2017-07-12T14:19:34 | null | UTF-8 | Python | false | false | 14,827 | py | #!/usr/bin/env python
import sys
import ctypes
import time
import tempfile
from functools import reduce
import numpy
import h5py
from pyscf import lib
from pyscf.lib import logger
from pyscf import ao2mo
from pyscf.ao2mo import _ao2mo
from pyscf.ao2mo import outcore
# least memory requirements:
# nmo ncore ncas outcore incore
# 200 40 16 0.8GB 3.7 GB (_eri 1.6GB intermediates 1.3G)
# 250 50 16 1.7GB 8.2 GB (_eri 3.9GB intermediates 2.6G)
# 300 60 16 3.1GB 16.8GB (_eri 8.1GB intermediates 5.6G)
# 400 80 16 8.5GB 53 GB (_eri 25.6GB intermediates 19G)
# 500 100 16 19 GB
# 600 120 16 37 GB
# 750 150 16 85 GB
libmcscf = lib.load_library('libmcscf')
def trans_e1_incore(eri_ao, mo, ncore, ncas):
nmo = mo.shape[1]
nocc = ncore + ncas
eri1 = ao2mo.incore.half_e1(eri_ao, (mo,mo[:,:nocc]), compact=False)
eri1 = eri1.reshape(nmo,nocc,-1)
klppshape = (0, nmo, 0, nmo)
klpashape = (0, nmo, ncore, nocc)
aapp = numpy.empty((ncas,ncas,nmo,nmo))
for i in range(ncas):
_ao2mo.nr_e2(eri1[ncore+i,ncore:nocc], mo, klppshape,
aosym='s4', mosym='s1', out=aapp[i])
ppaa = lib.transpose(aapp.reshape(ncas*ncas,-1)).reshape(nmo,nmo,ncas,ncas)
aapp = None
papa = numpy.empty((nmo,ncas,nmo,ncas))
for i in range(nmo):
_ao2mo.nr_e2(eri1[i,ncore:nocc], mo, klpashape,
aosym='s4', mosym='s1', out=papa[i])
pp = numpy.empty((nmo,nmo))
j_cp = numpy.zeros((ncore,nmo))
k_pc = numpy.zeros((nmo,ncore))
for i in range(ncore):
_ao2mo.nr_e2(eri1[i,i:i+1], mo, klppshape, aosym='s4', mosym='s1', out=pp)
j_cp[i] = pp.diagonal()
j_pc = j_cp.T.copy()
pp = numpy.empty((ncore,ncore))
for i in range(nmo):
klshape = (i, i+1, 0, ncore)
_ao2mo.nr_e2(eri1[i,:ncore], mo, klshape, aosym='s4', mosym='s1', out=pp)
k_pc[i] = pp.diagonal()
return j_pc, k_pc, ppaa, papa
# level = 1: ppaa, papa and jpc, kpc
# level = 2 or 3: ppaa, papa
def trans_e1_outcore(mol, mo, ncore, ncas, erifile,
max_memory=None, level=1, verbose=logger.WARN):
time0 = (time.clock(), time.time())
if isinstance(verbose, logger.Logger):
log = verbose
else:
log = logger.Logger(mol.stdout, verbose)
log.debug1('trans_e1_outcore level %d max_memory %d', level, max_memory)
nao, nmo = mo.shape
nao_pair = nao*(nao+1)//2
nocc = ncore + ncas
_tmpfile1 = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)
faapp_buf = h5py.File(_tmpfile1.name)
feri = h5py.File(erifile, 'w')
mo_c = numpy.asarray(mo, order='C')
mo = numpy.asarray(mo, order='F')
pashape = (0, nmo, ncore, nocc)
papa_buf = numpy.zeros((nao,ncas,nmo*ncas))
j_pc = numpy.zeros((nmo,ncore))
k_pc = numpy.zeros((nmo,ncore))
mem_words = int(max(2000,max_memory-papa_buf.nbytes/1e6)*1e6/8)
aobuflen = mem_words//(nao_pair+nocc*nmo) + 1
ao_loc = numpy.array(mol.ao_loc_nr(), dtype=numpy.int32)
shranges = outcore.guess_shell_ranges(mol, True, aobuflen, None, ao_loc)
ao2mopt = _ao2mo.AO2MOpt(mol, 'cint2e_sph',
'CVHFnr_schwarz_cond', 'CVHFsetnr_direct_scf')
nstep = len(shranges)
paapp = 0
maxbuflen = max([x[2] for x in shranges])
log.debug('mem_words %.8g MB, maxbuflen = %d', mem_words*8/1e6, maxbuflen)
bufs1 = numpy.empty((maxbuflen, nao_pair))
bufs2 = numpy.empty((maxbuflen, nmo*ncas))
if level == 1:
bufs3 = numpy.empty((maxbuflen, nao*ncore))
log.debug('mem cache %.8g MB',
(bufs1.nbytes+bufs2.nbytes+bufs3.nbytes)/1e6)
else:
log.debug('mem cache %.8g MB', (bufs1.nbytes+bufs2.nbytes)/1e6)
ti0 = log.timer('Initializing trans_e1_outcore', *time0)
# fmmm, ftrans, fdrv for level 1
fmmm = libmcscf.AO2MOmmm_ket_nr_s2
ftrans = libmcscf.AO2MOtranse1_nr_s4
fdrv = libmcscf.AO2MOnr_e2_drv
for istep,sh_range in enumerate(shranges):
log.debug('[%d/%d], AO [%d:%d], len(buf) = %d',
istep+1, nstep, *sh_range)
buf = bufs1[:sh_range[2]]
_ao2mo.nr_e1fill('cint2e_sph', sh_range,
mol._atm, mol._bas, mol._env, 's4', 1, ao2mopt, buf)
if log.verbose >= logger.DEBUG1:
ti1 = log.timer('AO integrals buffer', *ti0)
bufpa = bufs2[:sh_range[2]]
_ao2mo.nr_e1(buf, mo, pashape, 's4', 's1', out=bufpa)
# jc_pp, kc_pp
if level == 1: # ppaa, papa and vhf, jcp, kcp
if log.verbose >= logger.DEBUG1:
ti1 = log.timer('buffer-pa', *ti1)
buf1 = bufs3[:sh_range[2]]
fdrv(ftrans, fmmm,
buf1.ctypes.data_as(ctypes.c_void_p),
buf.ctypes.data_as(ctypes.c_void_p),
mo.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(sh_range[2]), ctypes.c_int(nao),
(ctypes.c_int*4)(0, nao, 0, ncore),
ctypes.POINTER(ctypes.c_void_p)(), ctypes.c_int(0))
p0 = 0
for ij in range(sh_range[0], sh_range[1]):
i,j = _ao2mo._extract_pair(ij)
i0 = ao_loc[i]
j0 = ao_loc[j]
i1 = ao_loc[i+1]
j1 = ao_loc[j+1]
di = i1 - i0
dj = j1 - j0
if i == j:
dij = di * (di+1) // 2
buf = numpy.empty((di,di,nao*ncore))
idx = numpy.tril_indices(di)
buf[idx] = buf1[p0:p0+dij]
buf[idx[1],idx[0]] = buf1[p0:p0+dij]
buf = buf.reshape(di,di,nao,ncore)
mo1 = mo_c[i0:i1]
tmp = numpy.einsum('uvpc,pc->uvc', buf, mo[:,:ncore])
tmp = lib.dot(mo1.T, tmp.reshape(di,-1))
j_pc += numpy.einsum('vp,pvc->pc', mo1, tmp.reshape(nmo,di,ncore))
tmp = numpy.einsum('uvpc,uc->vcp', buf, mo1[:,:ncore])
tmp = lib.dot(tmp.reshape(-1,nmo), mo).reshape(di,ncore,nmo)
k_pc += numpy.einsum('vp,vcp->pc', mo1, tmp)
else:
dij = di * dj
buf = buf1[p0:p0+dij].reshape(di,dj,nao,ncore)
mo1 = mo_c[i0:i1]
mo2 = mo_c[j0:j1]
tmp = numpy.einsum('uvpc,pc->uvc', buf, mo[:,:ncore])
tmp = lib.dot(mo1.T, tmp.reshape(di,-1))
j_pc += numpy.einsum('vp,pvc->pc',
mo2, tmp.reshape(nmo,dj,ncore)) * 2
tmp = numpy.einsum('uvpc,uc->vcp', buf, mo1[:,:ncore])
tmp = lib.dot(tmp.reshape(-1,nmo), mo).reshape(dj,ncore,nmo)
k_pc += numpy.einsum('vp,vcp->pc', mo2, tmp)
tmp = numpy.einsum('uvpc,vc->ucp', buf, mo2[:,:ncore])
tmp = lib.dot(tmp.reshape(-1,nmo), mo).reshape(di,ncore,nmo)
k_pc += numpy.einsum('up,ucp->pc', mo1, tmp)
p0 += dij
if log.verbose >= logger.DEBUG1:
ti1 = log.timer('j_cp and k_cp', *ti1)
if log.verbose >= logger.DEBUG1:
ti1 = log.timer('half transformation of the buffer', *ti1)
# ppaa, papa
faapp_buf[str(istep)] = \
bufpa.reshape(sh_range[2],nmo,ncas)[:,ncore:nocc].reshape(-1,ncas**2).T
p0 = 0
for ij in range(sh_range[0], sh_range[1]):
i,j = _ao2mo._extract_pair(ij)
i0 = ao_loc[i]
j0 = ao_loc[j]
i1 = ao_loc[i+1]
j1 = ao_loc[j+1]
di = i1 - i0
dj = j1 - j0
if i == j:
dij = di * (di+1) // 2
buf1 = numpy.empty((di,di,nmo*ncas))
idx = numpy.tril_indices(di)
buf1[idx] = bufpa[p0:p0+dij]
buf1[idx[1],idx[0]] = bufpa[p0:p0+dij]
else:
dij = di * dj
buf1 = bufpa[p0:p0+dij].reshape(di,dj,-1)
mo1 = mo[j0:j1,ncore:nocc].copy()
for i in range(di):
lib.dot(mo1.T, buf1[i], 1, papa_buf[i0+i], 1)
mo1 = mo[i0:i1,ncore:nocc].copy()
buf1 = lib.dot(mo1.T, buf1.reshape(di,-1))
papa_buf[j0:j1] += buf1.reshape(ncas,dj,-1).transpose(1,0,2)
p0 += dij
if log.verbose >= logger.DEBUG1:
ti1 = log.timer('ppaa and papa buffer', *ti1)
ti0 = log.timer('gen AO/transform MO [%d/%d]'%(istep+1,nstep), *ti0)
buf = buf1 = bufpa = None
bufs1 = bufs2 = bufs3 = None
time1 = log.timer('mc_ao2mo pass 1', *time0)
log.debug1('Half transformation done. Current memory %d',
lib.current_memory()[0])
nblk = int(max(8, min(nmo, (max_memory*1e6/8-papa_buf.size)/(ncas**2*nmo))))
log.debug1('nblk for papa = %d', nblk)
dset = feri.create_dataset('papa', (nmo,ncas,nmo,ncas), 'f8')
for i0, i1 in prange(0, nmo, nblk):
tmp = lib.dot(mo[:,i0:i1].T, papa_buf.reshape(nao,-1))
dset[i0:i1] = tmp.reshape(i1-i0,ncas,nmo,ncas)
papa_buf = tmp = None
time1 = log.timer('papa pass 2', *time1)
tmp = numpy.empty((ncas**2,nao_pair))
p0 = 0
for istep, sh_range in enumerate(shranges):
tmp[:,p0:p0+sh_range[2]] = faapp_buf[str(istep)]
p0 += sh_range[2]
nblk = int(max(8, min(nmo, (max_memory*1e6/8-tmp.size)/(ncas**2*nmo)-1)))
log.debug1('nblk for ppaa = %d', nblk)
dset = feri.create_dataset('ppaa', (nmo,nmo,ncas,ncas), 'f8')
for i0, i1 in prange(0, nmo, nblk):
tmp1 = _ao2mo.nr_e2(tmp, mo, (i0,i1,0,nmo), 's4', 's1', ao_loc=ao_loc)
tmp1 = tmp1.reshape(ncas,ncas,i1-i0,nmo)
for j in range(i1-i0):
dset[i0+j] = tmp1[:,:,j].transpose(2,0,1)
tmp = tmp1 = None
time1 = log.timer('ppaa pass 2', *time1)
faapp_buf.close()
feri.close()
_tmpfile1 = None
time0 = log.timer('mc_ao2mo', *time0)
return j_pc, k_pc
# level = 1: ppaa, papa and vhf, jpc, kpc
# level = 2: ppaa, papa, vhf, jpc=0, kpc=0
class _ERIS(object):
def __init__(self, casscf, mo, method='incore', level=1):
mol = casscf.mol
nao, nmo = mo.shape
ncore = casscf.ncore
ncas = casscf.ncas
mem_incore, mem_outcore, mem_basic = _mem_usage(ncore, ncas, nmo)
mem_now = lib.current_memory()[0]
eri = casscf._scf._eri
if (method == 'incore' and eri is not None and
(mem_incore+mem_now < casscf.max_memory*.9) or
mol.incore_anyway):
if eri is None:
from pyscf.scf import _vhf
eri = _vhf.int2e_sph(mol._atm, mol._bas, mol._env)
self.j_pc, self.k_pc, self.ppaa, self.papa = \
trans_e1_incore(eri, mo, casscf.ncore, casscf.ncas)
else:
import gc
gc.collect()
log = logger.Logger(casscf.stdout, casscf.verbose)
self._tmpfile = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)
max_memory = max(3000, casscf.max_memory*.9-mem_now)
if max_memory < mem_basic:
log.warn('Calculation needs %d MB memory, over CASSCF.max_memory (%d MB) limit',
(mem_basic+mem_now)/.9, casscf.max_memory)
self.j_pc, self.k_pc = \
trans_e1_outcore(mol, mo, casscf.ncore, casscf.ncas,
self._tmpfile.name,
max_memory=max_memory,
level=level, verbose=log)
self.feri = lib.H5TmpFile(self._tmpfile.name, 'r')
self.ppaa = self.feri['ppaa']
self.papa = self.feri['papa']
dm_core = numpy.dot(mo[:,:ncore], mo[:,:ncore].T)
vj, vk = casscf._scf.get_jk(mol, dm_core)
self.vhf_c = reduce(numpy.dot, (mo.T, vj*2-vk, mo))
def _mem_usage(ncore, ncas, nmo):
nvir = nmo - ncore
outcore = basic = ncas**2*nmo**2*2 * 8/1e6
incore = outcore + (ncore+ncas)*nmo**3*4/1e6
if outcore > 10000:
sys.stderr.write('Be careful with the virtual memorty address space `ulimit -v`\n')
return incore, outcore, basic
def prange(start, end, step):
for i in range(start, end, step):
yield i, min(i+step, end)
if __name__ == '__main__':
from pyscf import scf
from pyscf import gto
from pyscf import ao2mo
from pyscf.mcscf import mc1step
mol = gto.Mole()
mol.verbose = 0
mol.output = None#"out_h2o"
mol.atom = [
['O', ( 0., 0. , 0. )],
['H', ( 0., -0.757, 0.587)],
['H', ( 0., 0.757 , 0.587)],]
mol.basis = {'H': 'cc-pvtz',
'O': 'cc-pvtz',}
mol.build()
m = scf.RHF(mol)
ehf = m.scf()
mc = mc1step.CASSCF(m, 6, 4)
mc.verbose = 4
mo = m.mo_coeff.copy()
eris0 = _ERIS(mc, mo, 'incore')
eris1 = _ERIS(mc, mo, 'outcore')
eris2 = _ERIS(mc, mo, 'outcore', level=1)
eris3 = _ERIS(mc, mo, 'outcore', level=2)
print('vhf_c', numpy.allclose(eris0.vhf_c, eris1.vhf_c))
print('j_pc ', numpy.allclose(eris0.j_pc , eris1.j_pc ))
print('k_pc ', numpy.allclose(eris0.k_pc , eris1.k_pc ))
print('ppaa ', numpy.allclose(eris0.ppaa , eris1.ppaa ))
print('papa ', numpy.allclose(eris0.papa , eris1.papa ))
print('vhf_c', numpy.allclose(eris0.vhf_c, eris2.vhf_c))
print('j_pc ', numpy.allclose(eris0.j_pc , eris2.j_pc ))
print('k_pc ', numpy.allclose(eris0.k_pc , eris2.k_pc ))
print('ppaa ', numpy.allclose(eris0.ppaa , eris2.ppaa ))
print('papa ', numpy.allclose(eris0.papa , eris2.papa ))
print('vhf_c', numpy.allclose(eris0.vhf_c, eris3.vhf_c))
print('ppaa ', numpy.allclose(eris0.ppaa , eris3.ppaa ))
print('papa ', numpy.allclose(eris0.papa , eris3.papa ))
ncore = mc.ncore
ncas = mc.ncas
nocc = ncore + ncas
nmo = mo.shape[1]
eri = ao2mo.incore.full(m._eri, mo, compact=False).reshape((nmo,)*4)
aaap = numpy.array(eri[ncore:nocc,ncore:nocc,ncore:nocc,:])
ppaa = numpy.array(eri[:,:,ncore:nocc,ncore:nocc])
papa = numpy.array(eri[:,ncore:nocc,:,ncore:nocc])
jc_pp = numpy.einsum('iipq->ipq', eri[:ncore,:ncore,:,:])
kc_pp = numpy.einsum('ipqi->ipq', eri[:ncore,:,:,:ncore])
vhf_c = numpy.einsum('cij->ij', jc_pp)*2 - numpy.einsum('cij->ij', kc_pp)
j_pc = numpy.einsum('ijj->ji', jc_pp)
k_pc = numpy.einsum('ijj->ji', kc_pp)
print('vhf_c', numpy.allclose(vhf_c, eris1.vhf_c))
print('j_pc ', numpy.allclose(j_pc, eris1.j_pc))
print('k_pc ', numpy.allclose(k_pc, eris1.k_pc))
print('ppaa ', numpy.allclose(ppaa , eris0.ppaa ))
print('papa ', numpy.allclose(papa , eris0.papa ))
| [
"[email protected]"
] | |
b7c6cb163f6f35915e4fd76d905f8bb4b2ff6646 | 23bdcb74f9c4d98ff790d140bea56fbb0fd7bbbd | /app/env/Lib/site-packages/sqlalchemy/dialects/mysql/base.py | 2bba2f81a7249980748bccf376d1a90900ce35f0 | [
"MIT"
] | permissive | OswaldoCaleone/transporte_uniescolar | 85c680c062fa14d0bc90afa60f7434b059fdf245 | 1ef10869358457a6a9d0f04df891ce4eb0905887 | refs/heads/main | 2023-08-30T18:37:11.496451 | 2021-11-12T13:35:45 | 2021-11-12T13:35:45 | 429,673,776 | 1 | 0 | MIT | 2021-11-19T04:53:24 | 2021-11-19T04:53:24 | null | UTF-8 | Python | false | false | 117,932 | py | # mysql/base.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: mysql
:name: MySQL / MariaDB
:full_support: 5.6, 5.7, 8.0 / 10.4, 10.5
:normal_support: 5.6+ / 10+
:best_effort: 5.0.2+ / 5.0.2+
Supported Versions and Features
-------------------------------
SQLAlchemy supports MySQL starting with version 5.0.2 through modern releases,
as well as all modern versions of MariaDB. See the official MySQL
documentation for detailed information about features supported in any given
server release.
.. versionchanged:: 1.4 minimum MySQL version supported is now 5.0.2.
MariaDB Support
~~~~~~~~~~~~~~~
The MariaDB variant of MySQL retains fundamental compatibility with MySQL's
protocols however the development of these two products continues to diverge.
Within the realm of SQLAlchemy, the two databases have a small number of
syntactical and behavioral differences that SQLAlchemy accommodates automatically.
To connect to a MariaDB database, no changes to the database URL are required::
engine = create_engine("mysql+pymysql://user:pass@some_mariadb/dbname?charset=utf8mb4")
Upon first connect, the SQLAlchemy dialect employs a
server version detection scheme that determines if the
backing database reports as MariaDB. Based on this flag, the dialect
can make different choices in those of areas where its behavior
must be different.
.. _mysql_mariadb_only_mode:
MariaDB-Only Mode
~~~~~~~~~~~~~~~~~
The dialect also supports an **optional** "MariaDB-only" mode of connection, which may be
useful for the case where an application makes use of MariaDB-specific features
and is not compatible with a MySQL database. To use this mode of operation,
replace the "mysql" token in the above URL with "mariadb"::
engine = create_engine("mariadb+pymysql://user:pass@some_mariadb/dbname?charset=utf8mb4")
The above engine, upon first connect, will raise an error if the server version
detection detects that the backing database is not MariaDB.
When using an engine with ``"mariadb"`` as the dialect name, **all mysql-specific options
that include the name "mysql" in them are now named with "mariadb"**. This means
options like ``mysql_engine`` should be named ``mariadb_engine``, etc. Both
"mysql" and "mariadb" options can be used simultaneously for applications that
use URLs with both "mysql" and "mariadb" dialects::
my_table = Table(
"mytable",
metadata,
Column("id", Integer, primary_key=True),
Column("textdata", String(50)),
mariadb_engine="InnoDB",
mysql_engine="InnoDB",
)
Index(
"textdata_ix",
my_table.c.textdata,
mysql_prefix="FULLTEXT",
mariadb_prefix="FULLTEXT",
)
Similar behavior will occur when the above structures are reflected, i.e. the
"mariadb" prefix will be present in the option names when the database URL
is based on the "mariadb" name.
.. versionadded:: 1.4 Added "mariadb" dialect name supporting "MariaDB-only mode"
for the MySQL dialect.
.. _mysql_connection_timeouts:
Connection Timeouts and Disconnects
-----------------------------------
MySQL / MariaDB feature an automatic connection close behavior, for connections that
have been idle for a fixed period of time, defaulting to eight hours.
To circumvent having this issue, use
the :paramref:`_sa.create_engine.pool_recycle` option which ensures that
a connection will be discarded and replaced with a new one if it has been
present in the pool for a fixed number of seconds::
engine = create_engine('mysql+mysqldb://...', pool_recycle=3600)
For more comprehensive disconnect detection of pooled connections, including
accommodation of server restarts and network issues, a pre-ping approach may
be employed. See :ref:`pool_disconnects` for current approaches.
.. seealso::
:ref:`pool_disconnects` - Background on several techniques for dealing
with timed out connections as well as database restarts.
.. _mysql_storage_engines:
CREATE TABLE arguments including Storage Engines
------------------------------------------------
Both MySQL's and MariaDB's CREATE TABLE syntax includes a wide array of special options,
including ``ENGINE``, ``CHARSET``, ``MAX_ROWS``, ``ROW_FORMAT``,
``INSERT_METHOD``, and many more.
To accommodate the rendering of these arguments, specify the form
``mysql_argument_name="value"``. For example, to specify a table with
``ENGINE`` of ``InnoDB``, ``CHARSET`` of ``utf8mb4``, and ``KEY_BLOCK_SIZE``
of ``1024``::
Table('mytable', metadata,
Column('data', String(32)),
mysql_engine='InnoDB',
mysql_charset='utf8mb4',
mysql_key_block_size="1024"
)
When supporting :ref:`mysql_mariadb_only_mode` mode, similar keys against
the "mariadb" prefix must be included as well. The values can of course
vary independently so that different settings on MySQL vs. MariaDB may
be maintained::
# support both "mysql" and "mariadb-only" engine URLs
Table('mytable', metadata,
Column('data', String(32)),
mysql_engine='InnoDB',
mariadb_engine='InnoDB',
mysql_charset='utf8mb4',
mariadb_charset='utf8',
mysql_key_block_size="1024"
mariadb_key_block_size="1024"
)
The MySQL / MariaDB dialects will normally transfer any keyword specified as
``mysql_keyword_name`` to be rendered as ``KEYWORD_NAME`` in the
``CREATE TABLE`` statement. A handful of these names will render with a space
instead of an underscore; to support this, the MySQL dialect has awareness of
these particular names, which include ``DATA DIRECTORY``
(e.g. ``mysql_data_directory``), ``CHARACTER SET`` (e.g.
``mysql_character_set``) and ``INDEX DIRECTORY`` (e.g.
``mysql_index_directory``).
The most common argument is ``mysql_engine``, which refers to the storage
engine for the table. Historically, MySQL server installations would default
to ``MyISAM`` for this value, although newer versions may be defaulting
to ``InnoDB``. The ``InnoDB`` engine is typically preferred for its support
of transactions and foreign keys.
A :class:`_schema.Table`
that is created in a MySQL / MariaDB database with a storage engine
of ``MyISAM`` will be essentially non-transactional, meaning any
INSERT/UPDATE/DELETE statement referring to this table will be invoked as
autocommit. It also will have no support for foreign key constraints; while
the ``CREATE TABLE`` statement accepts foreign key options, when using the
``MyISAM`` storage engine these arguments are discarded. Reflecting such a
table will also produce no foreign key constraint information.
For fully atomic transactions as well as support for foreign key
constraints, all participating ``CREATE TABLE`` statements must specify a
transactional engine, which in the vast majority of cases is ``InnoDB``.
Case Sensitivity and Table Reflection
-------------------------------------
Both MySQL and MariaDB have inconsistent support for case-sensitive identifier
names, basing support on specific details of the underlying
operating system. However, it has been observed that no matter
what case sensitivity behavior is present, the names of tables in
foreign key declarations are *always* received from the database
as all-lower case, making it impossible to accurately reflect a
schema where inter-related tables use mixed-case identifier names.
Therefore it is strongly advised that table names be declared as
all lower case both within SQLAlchemy as well as on the MySQL / MariaDB
database itself, especially if database reflection features are
to be used.
.. _mysql_isolation_level:
Transaction Isolation Level
---------------------------
All MySQL / MariaDB dialects support setting of transaction isolation level both via a
dialect-specific parameter :paramref:`_sa.create_engine.isolation_level`
accepted
by :func:`_sa.create_engine`, as well as the
:paramref:`.Connection.execution_options.isolation_level` argument as passed to
:meth:`_engine.Connection.execution_options`.
This feature works by issuing the
command ``SET SESSION TRANSACTION ISOLATION LEVEL <level>`` for each new
connection. For the special AUTOCOMMIT isolation level, DBAPI-specific
techniques are used.
To set isolation level using :func:`_sa.create_engine`::
engine = create_engine(
"mysql://scott:tiger@localhost/test",
isolation_level="READ UNCOMMITTED"
)
To set using per-connection execution options::
connection = engine.connect()
connection = connection.execution_options(
isolation_level="READ COMMITTED"
)
Valid values for ``isolation_level`` include:
* ``READ COMMITTED``
* ``READ UNCOMMITTED``
* ``REPEATABLE READ``
* ``SERIALIZABLE``
* ``AUTOCOMMIT``
The special ``AUTOCOMMIT`` value makes use of the various "autocommit"
attributes provided by specific DBAPIs, and is currently supported by
MySQLdb, MySQL-Client, MySQL-Connector Python, and PyMySQL. Using it,
the database connection will return true for the value of
``SELECT @@autocommit;``.
.. seealso::
:ref:`dbapi_autocommit`
AUTO_INCREMENT Behavior
-----------------------
When creating tables, SQLAlchemy will automatically set ``AUTO_INCREMENT`` on
the first :class:`.Integer` primary key column which is not marked as a
foreign key::
>>> t = Table('mytable', metadata,
... Column('mytable_id', Integer, primary_key=True)
... )
>>> t.create()
CREATE TABLE mytable (
id INTEGER NOT NULL AUTO_INCREMENT,
PRIMARY KEY (id)
)
You can disable this behavior by passing ``False`` to the
:paramref:`_schema.Column.autoincrement` argument of :class:`_schema.Column`.
This flag
can also be used to enable auto-increment on a secondary column in a
multi-column key for some storage engines::
Table('mytable', metadata,
Column('gid', Integer, primary_key=True, autoincrement=False),
Column('id', Integer, primary_key=True)
)
.. _mysql_ss_cursors:
Server Side Cursors
-------------------
Server-side cursor support is available for the mysqlclient, PyMySQL,
mariadbconnector dialects and may also be available in others. This makes use
of either the "buffered=True/False" flag if available or by using a class such
as ``MySQLdb.cursors.SSCursor`` or ``pymysql.cursors.SSCursor`` internally.
Server side cursors are enabled on a per-statement basis by using the
:paramref:`.Connection.execution_options.stream_results` connection execution
option::
with engine.connect() as conn:
result = conn.execution_options(stream_results=True).execute(text("select * from table"))
Note that some kinds of SQL statements may not be supported with
server side cursors; generally, only SQL statements that return rows should be
used with this option.
.. deprecated:: 1.4 The dialect-level server_side_cursors flag is deprecated
and will be removed in a future release. Please use the
:paramref:`_engine.Connection.stream_results` execution option for
unbuffered cursor support.
.. seealso::
:ref:`engine_stream_results`
.. _mysql_unicode:
Unicode
-------
Charset Selection
~~~~~~~~~~~~~~~~~
Most MySQL / MariaDB DBAPIs offer the option to set the client character set for
a connection. This is typically delivered using the ``charset`` parameter
in the URL, such as::
e = create_engine(
"mysql+pymysql://scott:tiger@localhost/test?charset=utf8mb4")
This charset is the **client character set** for the connection. Some
MySQL DBAPIs will default this to a value such as ``latin1``, and some
will make use of the ``default-character-set`` setting in the ``my.cnf``
file as well. Documentation for the DBAPI in use should be consulted
for specific behavior.
The encoding used for Unicode has traditionally been ``'utf8'``. However, for
MySQL versions 5.5.3 and MariaDB 5.5 on forward, a new MySQL-specific encoding
``'utf8mb4'`` has been introduced, and as of MySQL 8.0 a warning is emitted by
the server if plain ``utf8`` is specified within any server-side directives,
replaced with ``utf8mb3``. The rationale for this new encoding is due to the
fact that MySQL's legacy utf-8 encoding only supports codepoints up to three
bytes instead of four. Therefore, when communicating with a MySQL or MariaDB
database that includes codepoints more than three bytes in size, this new
charset is preferred, if supported by both the database as well as the client
DBAPI, as in::
e = create_engine(
"mysql+pymysql://scott:tiger@localhost/test?charset=utf8mb4")
All modern DBAPIs should support the ``utf8mb4`` charset.
In order to use ``utf8mb4`` encoding for a schema that was created with legacy
``utf8``, changes to the MySQL/MariaDB schema and/or server configuration may be
required.
.. seealso::
`The utf8mb4 Character Set \
<https://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html>`_ - \
in the MySQL documentation
.. _mysql_binary_introducer:
Dealing with Binary Data Warnings and Unicode
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
MySQL versions 5.6, 5.7 and later (not MariaDB at the time of this writing) now
emit a warning when attempting to pass binary data to the database, while a
character set encoding is also in place, when the binary data itself is not
valid for that encoding::
default.py:509: Warning: (1300, "Invalid utf8mb4 character string:
'F9876A'")
cursor.execute(statement, parameters)
This warning is due to the fact that the MySQL client library is attempting to
interpret the binary string as a unicode object even if a datatype such
as :class:`.LargeBinary` is in use. To resolve this, the SQL statement requires
a binary "character set introducer" be present before any non-NULL value
that renders like this::
INSERT INTO table (data) VALUES (_binary %s)
These character set introducers are provided by the DBAPI driver, assuming the
use of mysqlclient or PyMySQL (both of which are recommended). Add the query
string parameter ``binary_prefix=true`` to the URL to repair this warning::
# mysqlclient
engine = create_engine(
"mysql+mysqldb://scott:tiger@localhost/test?charset=utf8mb4&binary_prefix=true")
# PyMySQL
engine = create_engine(
"mysql+pymysql://scott:tiger@localhost/test?charset=utf8mb4&binary_prefix=true")
The ``binary_prefix`` flag may or may not be supported by other MySQL drivers.
SQLAlchemy itself cannot render this ``_binary`` prefix reliably, as it does
not work with the NULL value, which is valid to be sent as a bound parameter.
As the MySQL driver renders parameters directly into the SQL string, it's the
most efficient place for this additional keyword to be passed.
.. seealso::
`Character set introducers <https://dev.mysql.com/doc/refman/5.7/en/charset-introducer.html>`_ - on the MySQL website
ANSI Quoting Style
------------------
MySQL / MariaDB feature two varieties of identifier "quoting style", one using
backticks and the other using quotes, e.g. ```some_identifier``` vs.
``"some_identifier"``. All MySQL dialects detect which version
is in use by checking the value of ``sql_mode`` when a connection is first
established with a particular :class:`_engine.Engine`.
This quoting style comes
into play when rendering table and column names as well as when reflecting
existing database structures. The detection is entirely automatic and
no special configuration is needed to use either quoting style.
MySQL / MariaDB SQL Extensions
------------------------------
Many of the MySQL / MariaDB SQL extensions are handled through SQLAlchemy's generic
function and operator support::
table.select(table.c.password==func.md5('plaintext'))
table.select(table.c.username.op('regexp')('^[a-d]'))
And of course any valid SQL statement can be executed as a string as well.
Some limited direct support for MySQL / MariaDB extensions to SQL is currently
available.
* INSERT..ON DUPLICATE KEY UPDATE: See
:ref:`mysql_insert_on_duplicate_key_update`
* SELECT pragma, use :meth:`_expression.Select.prefix_with` and
:meth:`_query.Query.prefix_with`::
select(...).prefix_with(['HIGH_PRIORITY', 'SQL_SMALL_RESULT'])
* UPDATE with LIMIT::
update(..., mysql_limit=10, mariadb_limit=10)
* optimizer hints, use :meth:`_expression.Select.prefix_with` and
:meth:`_query.Query.prefix_with`::
select(...).prefix_with("/*+ NO_RANGE_OPTIMIZATION(t4 PRIMARY) */")
* index hints, use :meth:`_expression.Select.with_hint` and
:meth:`_query.Query.with_hint`::
select(...).with_hint(some_table, "USE INDEX xyz")
* MATCH operator support::
from sqlalchemy.dialects.mysql import match
select(...).where(match(col1, col2, against="some expr").in_boolean_mode())
.. seealso::
:class:`_mysql.match`
.. _mysql_insert_on_duplicate_key_update:
INSERT...ON DUPLICATE KEY UPDATE (Upsert)
------------------------------------------
MySQL / MariaDB allow "upserts" (update or insert)
of rows into a table via the ``ON DUPLICATE KEY UPDATE`` clause of the
``INSERT`` statement. A candidate row will only be inserted if that row does
not match an existing primary or unique key in the table; otherwise, an UPDATE
will be performed. The statement allows for separate specification of the
values to INSERT versus the values for UPDATE.
SQLAlchemy provides ``ON DUPLICATE KEY UPDATE`` support via the MySQL-specific
:func:`.mysql.insert()` function, which provides
the generative method :meth:`~.mysql.Insert.on_duplicate_key_update`:
.. sourcecode:: pycon+sql
>>> from sqlalchemy.dialects.mysql import insert
>>> insert_stmt = insert(my_table).values(
... id='some_existing_id',
... data='inserted value')
>>> on_duplicate_key_stmt = insert_stmt.on_duplicate_key_update(
... data=insert_stmt.inserted.data,
... status='U'
... )
>>> print(on_duplicate_key_stmt)
{opensql}INSERT INTO my_table (id, data) VALUES (%s, %s)
ON DUPLICATE KEY UPDATE data = VALUES(data), status = %s
Unlike PostgreSQL's "ON CONFLICT" phrase, the "ON DUPLICATE KEY UPDATE"
phrase will always match on any primary key or unique key, and will always
perform an UPDATE if there's a match; there are no options for it to raise
an error or to skip performing an UPDATE.
``ON DUPLICATE KEY UPDATE`` is used to perform an update of the already
existing row, using any combination of new values as well as values
from the proposed insertion. These values are normally specified using
keyword arguments passed to the
:meth:`_mysql.Insert.on_duplicate_key_update`
given column key values (usually the name of the column, unless it
specifies :paramref:`_schema.Column.key`
) as keys and literal or SQL expressions
as values:
.. sourcecode:: pycon+sql
>>> insert_stmt = insert(my_table).values(
... id='some_existing_id',
... data='inserted value')
>>> on_duplicate_key_stmt = insert_stmt.on_duplicate_key_update(
... data="some data",
... updated_at=func.current_timestamp(),
... )
>>> print(on_duplicate_key_stmt)
{opensql}INSERT INTO my_table (id, data) VALUES (%s, %s)
ON DUPLICATE KEY UPDATE data = %s, updated_at = CURRENT_TIMESTAMP
In a manner similar to that of :meth:`.UpdateBase.values`, other parameter
forms are accepted, including a single dictionary:
.. sourcecode:: pycon+sql
>>> on_duplicate_key_stmt = insert_stmt.on_duplicate_key_update(
... {"data": "some data", "updated_at": func.current_timestamp()},
... )
as well as a list of 2-tuples, which will automatically provide
a parameter-ordered UPDATE statement in a manner similar to that described
at :ref:`updates_order_parameters`. Unlike the :class:`_expression.Update`
object,
no special flag is needed to specify the intent since the argument form is
this context is unambiguous:
.. sourcecode:: pycon+sql
>>> on_duplicate_key_stmt = insert_stmt.on_duplicate_key_update(
... [
... ("data", "some data"),
... ("updated_at", func.current_timestamp()),
... ]
... )
>>> print(on_duplicate_key_stmt)
{opensql}INSERT INTO my_table (id, data) VALUES (%s, %s)
ON DUPLICATE KEY UPDATE data = %s, updated_at = CURRENT_TIMESTAMP
.. versionchanged:: 1.3 support for parameter-ordered UPDATE clause within
MySQL ON DUPLICATE KEY UPDATE
.. warning::
The :meth:`_mysql.Insert.on_duplicate_key_update`
method does **not** take into
account Python-side default UPDATE values or generation functions, e.g.
e.g. those specified using :paramref:`_schema.Column.onupdate`.
These values will not be exercised for an ON DUPLICATE KEY style of UPDATE,
unless they are manually specified explicitly in the parameters.
In order to refer to the proposed insertion row, the special alias
:attr:`_mysql.Insert.inserted` is available as an attribute on
the :class:`_mysql.Insert` object; this object is a
:class:`_expression.ColumnCollection` which contains all columns of the target
table:
.. sourcecode:: pycon+sql
>>> stmt = insert(my_table).values(
... id='some_id',
... data='inserted value',
... author='jlh')
>>> do_update_stmt = stmt.on_duplicate_key_update(
... data="updated value",
... author=stmt.inserted.author
... )
>>> print(do_update_stmt)
{opensql}INSERT INTO my_table (id, data, author) VALUES (%s, %s, %s)
ON DUPLICATE KEY UPDATE data = %s, author = VALUES(author)
When rendered, the "inserted" namespace will produce the expression
``VALUES(<columnname>)``.
.. versionadded:: 1.2 Added support for MySQL ON DUPLICATE KEY UPDATE clause
rowcount Support
----------------
SQLAlchemy standardizes the DBAPI ``cursor.rowcount`` attribute to be the
usual definition of "number of rows matched by an UPDATE or DELETE" statement.
This is in contradiction to the default setting on most MySQL DBAPI drivers,
which is "number of rows actually modified/deleted". For this reason, the
SQLAlchemy MySQL dialects always add the ``constants.CLIENT.FOUND_ROWS``
flag, or whatever is equivalent for the target dialect, upon connection.
This setting is currently hardcoded.
.. seealso::
:attr:`_engine.CursorResult.rowcount`
.. _mysql_indexes:
MySQL / MariaDB- Specific Index Options
-----------------------------------------
MySQL and MariaDB-specific extensions to the :class:`.Index` construct are available.
Index Length
~~~~~~~~~~~~~
MySQL and MariaDB both provide an option to create index entries with a certain length, where
"length" refers to the number of characters or bytes in each value which will
become part of the index. SQLAlchemy provides this feature via the
``mysql_length`` and/or ``mariadb_length`` parameters::
Index('my_index', my_table.c.data, mysql_length=10, mariadb_length=10)
Index('a_b_idx', my_table.c.a, my_table.c.b, mysql_length={'a': 4,
'b': 9})
Index('a_b_idx', my_table.c.a, my_table.c.b, mariadb_length={'a': 4,
'b': 9})
Prefix lengths are given in characters for nonbinary string types and in bytes
for binary string types. The value passed to the keyword argument *must* be
either an integer (and, thus, specify the same prefix length value for all
columns of the index) or a dict in which keys are column names and values are
prefix length values for corresponding columns. MySQL and MariaDB only allow a
length for a column of an index if it is for a CHAR, VARCHAR, TEXT, BINARY,
VARBINARY and BLOB.
Index Prefixes
~~~~~~~~~~~~~~
MySQL storage engines permit you to specify an index prefix when creating
an index. SQLAlchemy provides this feature via the
``mysql_prefix`` parameter on :class:`.Index`::
Index('my_index', my_table.c.data, mysql_prefix='FULLTEXT')
The value passed to the keyword argument will be simply passed through to the
underlying CREATE INDEX, so it *must* be a valid index prefix for your MySQL
storage engine.
.. versionadded:: 1.1.5
.. seealso::
`CREATE INDEX <https://dev.mysql.com/doc/refman/5.0/en/create-index.html>`_ - MySQL documentation
Index Types
~~~~~~~~~~~~~
Some MySQL storage engines permit you to specify an index type when creating
an index or primary key constraint. SQLAlchemy provides this feature via the
``mysql_using`` parameter on :class:`.Index`::
Index('my_index', my_table.c.data, mysql_using='hash', mariadb_using='hash')
As well as the ``mysql_using`` parameter on :class:`.PrimaryKeyConstraint`::
PrimaryKeyConstraint("data", mysql_using='hash', mariadb_using='hash')
The value passed to the keyword argument will be simply passed through to the
underlying CREATE INDEX or PRIMARY KEY clause, so it *must* be a valid index
type for your MySQL storage engine.
More information can be found at:
https://dev.mysql.com/doc/refman/5.0/en/create-index.html
https://dev.mysql.com/doc/refman/5.0/en/create-table.html
Index Parsers
~~~~~~~~~~~~~
CREATE FULLTEXT INDEX in MySQL also supports a "WITH PARSER" option. This
is available using the keyword argument ``mysql_with_parser``::
Index(
'my_index', my_table.c.data,
mysql_prefix='FULLTEXT', mysql_with_parser="ngram",
mariadb_prefix='FULLTEXT', mariadb_with_parser="ngram",
)
.. versionadded:: 1.3
.. _mysql_foreign_keys:
MySQL / MariaDB Foreign Keys
-----------------------------
MySQL and MariaDB's behavior regarding foreign keys has some important caveats.
Foreign Key Arguments to Avoid
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Neither MySQL nor MariaDB support the foreign key arguments "DEFERRABLE", "INITIALLY",
or "MATCH". Using the ``deferrable`` or ``initially`` keyword argument with
:class:`_schema.ForeignKeyConstraint` or :class:`_schema.ForeignKey`
will have the effect of
these keywords being rendered in a DDL expression, which will then raise an
error on MySQL or MariaDB. In order to use these keywords on a foreign key while having
them ignored on a MySQL / MariaDB backend, use a custom compile rule::
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.schema import ForeignKeyConstraint
@compiles(ForeignKeyConstraint, "mysql", "mariadb")
def process(element, compiler, **kw):
element.deferrable = element.initially = None
return compiler.visit_foreign_key_constraint(element, **kw)
The "MATCH" keyword is in fact more insidious, and is explicitly disallowed
by SQLAlchemy in conjunction with the MySQL or MariaDB backends. This argument is
silently ignored by MySQL / MariaDB, but in addition has the effect of ON UPDATE and ON
DELETE options also being ignored by the backend. Therefore MATCH should
never be used with the MySQL / MariaDB backends; as is the case with DEFERRABLE and
INITIALLY, custom compilation rules can be used to correct a
ForeignKeyConstraint at DDL definition time.
Reflection of Foreign Key Constraints
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Not all MySQL / MariaDB storage engines support foreign keys. When using the
very common ``MyISAM`` MySQL storage engine, the information loaded by table
reflection will not include foreign keys. For these tables, you may supply a
:class:`~sqlalchemy.ForeignKeyConstraint` at reflection time::
Table('mytable', metadata,
ForeignKeyConstraint(['other_id'], ['othertable.other_id']),
autoload_with=engine
)
.. seealso::
:ref:`mysql_storage_engines`
.. _mysql_unique_constraints:
MySQL / MariaDB Unique Constraints and Reflection
----------------------------------------------------
SQLAlchemy supports both the :class:`.Index` construct with the
flag ``unique=True``, indicating a UNIQUE index, as well as the
:class:`.UniqueConstraint` construct, representing a UNIQUE constraint.
Both objects/syntaxes are supported by MySQL / MariaDB when emitting DDL to create
these constraints. However, MySQL / MariaDB does not have a unique constraint
construct that is separate from a unique index; that is, the "UNIQUE"
constraint on MySQL / MariaDB is equivalent to creating a "UNIQUE INDEX".
When reflecting these constructs, the
:meth:`_reflection.Inspector.get_indexes`
and the :meth:`_reflection.Inspector.get_unique_constraints`
methods will **both**
return an entry for a UNIQUE index in MySQL / MariaDB. However, when performing
full table reflection using ``Table(..., autoload_with=engine)``,
the :class:`.UniqueConstraint` construct is
**not** part of the fully reflected :class:`_schema.Table` construct under any
circumstances; this construct is always represented by a :class:`.Index`
with the ``unique=True`` setting present in the :attr:`_schema.Table.indexes`
collection.
TIMESTAMP / DATETIME issues
---------------------------
.. _mysql_timestamp_onupdate:
Rendering ON UPDATE CURRENT TIMESTAMP for MySQL / MariaDB's explicit_defaults_for_timestamp
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
MySQL / MariaDB have historically expanded the DDL for the :class:`_types.TIMESTAMP`
datatype into the phrase "TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE
CURRENT_TIMESTAMP", which includes non-standard SQL that automatically updates
the column with the current timestamp when an UPDATE occurs, eliminating the
usual need to use a trigger in such a case where server-side update changes are
desired.
MySQL 5.6 introduced a new flag `explicit_defaults_for_timestamp
<https://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html
#sysvar_explicit_defaults_for_timestamp>`_ which disables the above behavior,
and in MySQL 8 this flag defaults to true, meaning in order to get a MySQL
"on update timestamp" without changing this flag, the above DDL must be
rendered explicitly. Additionally, the same DDL is valid for use of the
``DATETIME`` datatype as well.
SQLAlchemy's MySQL dialect does not yet have an option to generate
MySQL's "ON UPDATE CURRENT_TIMESTAMP" clause, noting that this is not a general
purpose "ON UPDATE" as there is no such syntax in standard SQL. SQLAlchemy's
:paramref:`_schema.Column.server_onupdate` parameter is currently not related
to this special MySQL behavior.
To generate this DDL, make use of the :paramref:`_schema.Column.server_default`
parameter and pass a textual clause that also includes the ON UPDATE clause::
from sqlalchemy import Table, MetaData, Column, Integer, String, TIMESTAMP
from sqlalchemy import text
metadata = MetaData()
mytable = Table(
"mytable",
metadata,
Column('id', Integer, primary_key=True),
Column('data', String(50)),
Column(
'last_updated',
TIMESTAMP,
server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP")
)
)
The same instructions apply to use of the :class:`_types.DateTime` and
:class:`_types.DATETIME` datatypes::
from sqlalchemy import DateTime
mytable = Table(
"mytable",
metadata,
Column('id', Integer, primary_key=True),
Column('data', String(50)),
Column(
'last_updated',
DateTime,
server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP")
)
)
Even though the :paramref:`_schema.Column.server_onupdate` feature does not
generate this DDL, it still may be desirable to signal to the ORM that this
updated value should be fetched. This syntax looks like the following::
from sqlalchemy.schema import FetchedValue
class MyClass(Base):
__tablename__ = 'mytable'
id = Column(Integer, primary_key=True)
data = Column(String(50))
last_updated = Column(
TIMESTAMP,
server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"),
server_onupdate=FetchedValue()
)
.. _mysql_timestamp_null:
TIMESTAMP Columns and NULL
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
MySQL historically enforces that a column which specifies the
TIMESTAMP datatype implicitly includes a default value of
CURRENT_TIMESTAMP, even though this is not stated, and additionally
sets the column as NOT NULL, the opposite behavior vs. that of all
other datatypes::
mysql> CREATE TABLE ts_test (
-> a INTEGER,
-> b INTEGER NOT NULL,
-> c TIMESTAMP,
-> d TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
-> e TIMESTAMP NULL);
Query OK, 0 rows affected (0.03 sec)
mysql> SHOW CREATE TABLE ts_test;
+---------+-----------------------------------------------------
| Table | Create Table
+---------+-----------------------------------------------------
| ts_test | CREATE TABLE `ts_test` (
`a` int(11) DEFAULT NULL,
`b` int(11) NOT NULL,
`c` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
`d` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
`e` timestamp NULL DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1
Above, we see that an INTEGER column defaults to NULL, unless it is specified
with NOT NULL. But when the column is of type TIMESTAMP, an implicit
default of CURRENT_TIMESTAMP is generated which also coerces the column
to be a NOT NULL, even though we did not specify it as such.
This behavior of MySQL can be changed on the MySQL side using the
`explicit_defaults_for_timestamp
<https://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html
#sysvar_explicit_defaults_for_timestamp>`_ configuration flag introduced in
MySQL 5.6. With this server setting enabled, TIMESTAMP columns behave like
any other datatype on the MySQL side with regards to defaults and nullability.
However, to accommodate the vast majority of MySQL databases that do not
specify this new flag, SQLAlchemy emits the "NULL" specifier explicitly with
any TIMESTAMP column that does not specify ``nullable=False``. In order to
accommodate newer databases that specify ``explicit_defaults_for_timestamp``,
SQLAlchemy also emits NOT NULL for TIMESTAMP columns that do specify
``nullable=False``. The following example illustrates::
from sqlalchemy import MetaData, Integer, Table, Column, text
from sqlalchemy.dialects.mysql import TIMESTAMP
m = MetaData()
t = Table('ts_test', m,
Column('a', Integer),
Column('b', Integer, nullable=False),
Column('c', TIMESTAMP),
Column('d', TIMESTAMP, nullable=False)
)
from sqlalchemy import create_engine
e = create_engine("mysql://scott:tiger@localhost/test", echo=True)
m.create_all(e)
output::
CREATE TABLE ts_test (
a INTEGER,
b INTEGER NOT NULL,
c TIMESTAMP NULL,
d TIMESTAMP NOT NULL
)
.. versionchanged:: 1.0.0 - SQLAlchemy now renders NULL or NOT NULL in all
cases for TIMESTAMP columns, to accommodate
``explicit_defaults_for_timestamp``. Prior to this version, it will
not render "NOT NULL" for a TIMESTAMP column that is ``nullable=False``.
""" # noqa
from array import array as _array
from collections import defaultdict
from itertools import compress
import re
from sqlalchemy import literal_column
from sqlalchemy import text
from sqlalchemy.sql import visitors
from . import reflection as _reflection
from .enumerated import ENUM
from .enumerated import SET
from .json import JSON
from .json import JSONIndexType
from .json import JSONPathType
from .types import _FloatType
from .types import _IntegerType
from .types import _MatchType
from .types import _NumericType
from .types import _StringType
from .types import BIGINT
from .types import BIT
from .types import CHAR
from .types import DATETIME
from .types import DECIMAL
from .types import DOUBLE
from .types import FLOAT
from .types import INTEGER
from .types import LONGBLOB
from .types import LONGTEXT
from .types import MEDIUMBLOB
from .types import MEDIUMINT
from .types import MEDIUMTEXT
from .types import NCHAR
from .types import NUMERIC
from .types import NVARCHAR
from .types import REAL
from .types import SMALLINT
from .types import TEXT
from .types import TIME
from .types import TIMESTAMP
from .types import TINYBLOB
from .types import TINYINT
from .types import TINYTEXT
from .types import VARCHAR
from .types import YEAR
from ... import exc
from ... import log
from ... import schema as sa_schema
from ... import sql
from ... import types as sqltypes
from ... import util
from ...engine import default
from ...engine import reflection
from ...sql import coercions
from ...sql import compiler
from ...sql import elements
from ...sql import functions
from ...sql import operators
from ...sql import roles
from ...sql import util as sql_util
from ...sql.sqltypes import Unicode
from ...types import BINARY
from ...types import BLOB
from ...types import BOOLEAN
from ...types import DATE
from ...types import VARBINARY
from ...util import topological
RESERVED_WORDS = set(
[
"accessible",
"action",
"add",
"admin",
"all",
"alter",
"analyze",
"and",
"array", # 8.0
"as",
"asc",
"asensitive",
"before",
"between",
"bigint",
"binary",
"blob",
"both",
"by",
"call",
"cascade",
"case",
"change",
"char",
"character",
"check",
"collate",
"column",
"columns",
"condition",
"constraint",
"continue",
"convert",
"create",
"cross",
"cube",
"cume_dist",
"current_date",
"current_time",
"current_timestamp",
"current_user",
"cursor",
"database",
"databases",
"day_hour",
"day_microsecond",
"day_minute",
"day_second",
"dec",
"decimal",
"declare",
"default",
"delayed",
"delete",
"desc",
"describe",
"deterministic",
"distinct",
"distinctrow",
"div",
"double",
"drop",
"dual",
"each",
"else",
"elseif",
"empty",
"enclosed",
"escaped",
"except",
"exists",
"exit",
"explain",
"false",
"fetch",
"fields",
"first_value",
"float",
"float4",
"float8",
"for",
"force",
"foreign",
"from",
"fulltext",
"function",
"general",
"generated",
"get",
"grant",
"group",
"grouping",
"groups",
"having",
"high_priority",
"hour_microsecond",
"hour_minute",
"hour_second",
"if",
"ignore",
"ignore_server_ids",
"in",
"index",
"infile",
"inner",
"inout",
"insensitive",
"insert",
"int",
"int1",
"int2",
"int3",
"int4",
"int8",
"integer",
"interval",
"into",
"io_after_gtids",
"io_before_gtids",
"is",
"iterate",
"join",
"json_table",
"key",
"keys",
"kill",
"last_value",
"lateral",
"leading",
"leave",
"left",
"level",
"like",
"limit",
"linear",
"linear",
"lines",
"load",
"localtime",
"localtimestamp",
"lock",
"long",
"longblob",
"longtext",
"loop",
"low_priority",
"master_bind",
"master_heartbeat_period",
"master_ssl_verify_server_cert",
"master_ssl_verify_server_cert",
"match",
"maxvalue",
"mediumblob",
"mediumint",
"mediumtext",
"member", # 8.0
"middleint",
"minute_microsecond",
"minute_second",
"mod",
"mode",
"modifies",
"natural",
"no_write_to_binlog",
"not",
"nth_value",
"ntile",
"null",
"numeric",
"of",
"on",
"one_shot",
"optimize",
"optimizer_costs",
"option",
"optionally",
"or",
"order",
"out",
"outer",
"outfile",
"over",
"partition",
"percent_rank",
"persist",
"persist_only",
"precision",
"primary",
"privileges",
"procedure",
"purge",
"range",
"range",
"rank",
"read",
"read_only",
"read_only",
"read_write",
"read_write", # 5.1
"reads",
"real",
"recursive",
"references",
"regexp",
"release",
"rename",
"repeat",
"replace",
"require",
"resignal",
"restrict",
"return",
"revoke",
"right",
"rlike",
"role",
"row",
"row_number",
"rows",
"schema",
"schemas",
"second_microsecond",
"select",
"sensitive",
"separator",
"set",
"show",
"signal",
"slow", # 5.5
"smallint",
"soname",
"spatial",
"specific",
"sql",
"sql_after_gtids",
"sql_before_gtids", # 5.6
"sql_big_result",
"sql_calc_found_rows",
"sql_small_result",
"sqlexception",
"sqlstate",
"sqlwarning",
"ssl",
"starting",
"status",
"stored",
"straight_join",
"system",
"table",
"tables", # 4.1
"terminated",
"text",
"then",
"time",
"tinyblob",
"tinyint",
"tinytext",
"to",
"trailing",
"trigger",
"true",
"undo",
"union",
"unique",
"unlock",
"unsigned",
"update",
"usage",
"use",
"using",
"utc_date",
"utc_time",
"utc_timestamp",
"values",
"varbinary",
"varchar",
"varcharacter",
"varying",
"virtual", # 5.7
"when",
"where",
"while",
"window", # 8.0
"with",
"write",
"x509",
"xor",
"year_month",
"zerofill", # 5.0
]
)
AUTOCOMMIT_RE = re.compile(
r"\s*(?:UPDATE|INSERT|CREATE|DELETE|DROP|ALTER|LOAD +DATA|REPLACE)",
re.I | re.UNICODE,
)
SET_RE = re.compile(
r"\s*SET\s+(?:(?:GLOBAL|SESSION)\s+)?\w", re.I | re.UNICODE
)
# old names
MSTime = TIME
MSSet = SET
MSEnum = ENUM
MSLongBlob = LONGBLOB
MSMediumBlob = MEDIUMBLOB
MSTinyBlob = TINYBLOB
MSBlob = BLOB
MSBinary = BINARY
MSVarBinary = VARBINARY
MSNChar = NCHAR
MSNVarChar = NVARCHAR
MSChar = CHAR
MSString = VARCHAR
MSLongText = LONGTEXT
MSMediumText = MEDIUMTEXT
MSTinyText = TINYTEXT
MSText = TEXT
MSYear = YEAR
MSTimeStamp = TIMESTAMP
MSBit = BIT
MSSmallInteger = SMALLINT
MSTinyInteger = TINYINT
MSMediumInteger = MEDIUMINT
MSBigInteger = BIGINT
MSNumeric = NUMERIC
MSDecimal = DECIMAL
MSDouble = DOUBLE
MSReal = REAL
MSFloat = FLOAT
MSInteger = INTEGER
colspecs = {
_IntegerType: _IntegerType,
_NumericType: _NumericType,
_FloatType: _FloatType,
sqltypes.Numeric: NUMERIC,
sqltypes.Float: FLOAT,
sqltypes.Time: TIME,
sqltypes.Enum: ENUM,
sqltypes.MatchType: _MatchType,
sqltypes.JSON: JSON,
sqltypes.JSON.JSONIndexType: JSONIndexType,
sqltypes.JSON.JSONPathType: JSONPathType,
}
# Everything 3.23 through 5.1 excepting OpenGIS types.
ischema_names = {
"bigint": BIGINT,
"binary": BINARY,
"bit": BIT,
"blob": BLOB,
"boolean": BOOLEAN,
"char": CHAR,
"date": DATE,
"datetime": DATETIME,
"decimal": DECIMAL,
"double": DOUBLE,
"enum": ENUM,
"fixed": DECIMAL,
"float": FLOAT,
"int": INTEGER,
"integer": INTEGER,
"json": JSON,
"longblob": LONGBLOB,
"longtext": LONGTEXT,
"mediumblob": MEDIUMBLOB,
"mediumint": MEDIUMINT,
"mediumtext": MEDIUMTEXT,
"nchar": NCHAR,
"nvarchar": NVARCHAR,
"numeric": NUMERIC,
"set": SET,
"smallint": SMALLINT,
"text": TEXT,
"time": TIME,
"timestamp": TIMESTAMP,
"tinyblob": TINYBLOB,
"tinyint": TINYINT,
"tinytext": TINYTEXT,
"varbinary": VARBINARY,
"varchar": VARCHAR,
"year": YEAR,
}
class MySQLExecutionContext(default.DefaultExecutionContext):
def should_autocommit_text(self, statement):
return AUTOCOMMIT_RE.match(statement)
def create_server_side_cursor(self):
if self.dialect.supports_server_side_cursors:
return self._dbapi_connection.cursor(self.dialect._sscursor)
else:
raise NotImplementedError()
def fire_sequence(self, seq, type_):
return self._execute_scalar(
(
"select nextval(%s)"
% self.identifier_preparer.format_sequence(seq)
),
type_,
)
class MySQLCompiler(compiler.SQLCompiler):
render_table_with_column_in_update_from = True
"""Overridden from base SQLCompiler value"""
extract_map = compiler.SQLCompiler.extract_map.copy()
extract_map.update({"milliseconds": "millisecond"})
def default_from(self):
"""Called when a ``SELECT`` statement has no froms,
and no ``FROM`` clause is to be appended.
"""
if self.stack:
stmt = self.stack[-1]["selectable"]
if stmt._where_criteria:
return " FROM DUAL"
return ""
def visit_random_func(self, fn, **kw):
return "rand%s" % self.function_argspec(fn)
def visit_sequence(self, seq, **kw):
return "nextval(%s)" % self.preparer.format_sequence(seq)
def visit_sysdate_func(self, fn, **kw):
return "SYSDATE()"
def _render_json_extract_from_binary(self, binary, operator, **kw):
# note we are intentionally calling upon the process() calls in the
# order in which they appear in the SQL String as this is used
# by positional parameter rendering
if binary.type._type_affinity is sqltypes.JSON:
return "JSON_EXTRACT(%s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
# for non-JSON, MySQL doesn't handle JSON null at all so it has to
# be explicit
case_expression = "CASE JSON_EXTRACT(%s, %s) WHEN 'null' THEN NULL" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
if binary.type._type_affinity is sqltypes.Integer:
type_expression = (
"ELSE CAST(JSON_EXTRACT(%s, %s) AS SIGNED INTEGER)"
% (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
)
elif binary.type._type_affinity is sqltypes.Numeric:
if (
binary.type.scale is not None
and binary.type.precision is not None
):
# using DECIMAL here because MySQL does not recognize NUMERIC
type_expression = (
"ELSE CAST(JSON_EXTRACT(%s, %s) AS DECIMAL(%s, %s))"
% (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
binary.type.precision,
binary.type.scale,
)
)
else:
# FLOAT / REAL not added in MySQL til 8.0.17
type_expression = (
"ELSE JSON_EXTRACT(%s, %s)+0.0000000000000000000000"
% (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
)
elif binary.type._type_affinity is sqltypes.Boolean:
# the NULL handling is particularly weird with boolean, so
# explicitly return true/false constants
type_expression = "WHEN true THEN true ELSE false"
elif binary.type._type_affinity is sqltypes.String:
# (gord): this fails with a JSON value that's a four byte unicode
# string. SQLite has the same problem at the moment
# (zzzeek): I'm not really sure. let's take a look at a test case
# that hits each backend and maybe make a requires rule for it?
type_expression = "ELSE JSON_UNQUOTE(JSON_EXTRACT(%s, %s))" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
else:
# other affinity....this is not expected right now
type_expression = "ELSE JSON_EXTRACT(%s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
return case_expression + " " + type_expression + " END"
def visit_json_getitem_op_binary(self, binary, operator, **kw):
return self._render_json_extract_from_binary(binary, operator, **kw)
def visit_json_path_getitem_op_binary(self, binary, operator, **kw):
return self._render_json_extract_from_binary(binary, operator, **kw)
def visit_on_duplicate_key_update(self, on_duplicate, **kw):
statement = self.current_executable
if on_duplicate._parameter_ordering:
parameter_ordering = [
coercions.expect(roles.DMLColumnRole, key)
for key in on_duplicate._parameter_ordering
]
ordered_keys = set(parameter_ordering)
cols = [
statement.table.c[key]
for key in parameter_ordering
if key in statement.table.c
] + [c for c in statement.table.c if c.key not in ordered_keys]
else:
cols = statement.table.c
clauses = []
# traverses through all table columns to preserve table column order
for column in (col for col in cols if col.key in on_duplicate.update):
val = on_duplicate.update[column.key]
if coercions._is_literal(val):
val = elements.BindParameter(None, val, type_=column.type)
value_text = self.process(val.self_group(), use_schema=False)
else:
def replace(obj):
if (
isinstance(obj, elements.BindParameter)
and obj.type._isnull
):
obj = obj._clone()
obj.type = column.type
return obj
elif (
isinstance(obj, elements.ColumnClause)
and obj.table is on_duplicate.inserted_alias
):
obj = literal_column(
"VALUES(" + self.preparer.quote(column.name) + ")"
)
return obj
else:
# element is not replaced
return None
val = visitors.replacement_traverse(val, {}, replace)
value_text = self.process(val.self_group(), use_schema=False)
name_text = self.preparer.quote(column.name)
clauses.append("%s = %s" % (name_text, value_text))
non_matching = set(on_duplicate.update) - set(c.key for c in cols)
if non_matching:
util.warn(
"Additional column names not matching "
"any column keys in table '%s': %s"
% (
self.statement.table.name,
(", ".join("'%s'" % c for c in non_matching)),
)
)
return "ON DUPLICATE KEY UPDATE " + ", ".join(clauses)
def visit_concat_op_binary(self, binary, operator, **kw):
return "concat(%s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
_match_valid_flag_combinations = frozenset(
(
# (boolean_mode, natural_language, query_expansion)
(False, False, False),
(True, False, False),
(False, True, False),
(False, False, True),
(False, True, True),
)
)
_match_flag_expressions = (
"IN BOOLEAN MODE",
"IN NATURAL LANGUAGE MODE",
"WITH QUERY EXPANSION",
)
def visit_mysql_match(self, element, **kw):
return self.visit_match_op_binary(element, element.operator, **kw)
def visit_match_op_binary(self, binary, operator, **kw):
"""
Note that `mysql_boolean_mode` is enabled by default because of
backward compatibility
"""
modifiers = binary.modifiers
boolean_mode = modifiers.get("mysql_boolean_mode", True)
natural_language = modifiers.get("mysql_natural_language", False)
query_expansion = modifiers.get("mysql_query_expansion", False)
flag_combination = (boolean_mode, natural_language, query_expansion)
if flag_combination not in self._match_valid_flag_combinations:
flags = (
"in_boolean_mode=%s" % boolean_mode,
"in_natural_language_mode=%s" % natural_language,
"with_query_expansion=%s" % query_expansion,
)
flags = ", ".join(flags)
raise exc.CompileError("Invalid MySQL match flags: %s" % flags)
match_clause = binary.left
match_clause = self.process(match_clause, **kw)
against_clause = self.process(binary.right, **kw)
if any(flag_combination):
flag_expressions = compress(
self._match_flag_expressions,
flag_combination,
)
against_clause = [against_clause]
against_clause.extend(flag_expressions)
against_clause = " ".join(against_clause)
return "MATCH (%s) AGAINST (%s)" % (match_clause, against_clause)
def get_from_hint_text(self, table, text):
return text
def visit_typeclause(self, typeclause, type_=None, **kw):
if type_ is None:
type_ = typeclause.type.dialect_impl(self.dialect)
if isinstance(type_, sqltypes.TypeDecorator):
return self.visit_typeclause(typeclause, type_.impl, **kw)
elif isinstance(type_, sqltypes.Integer):
if getattr(type_, "unsigned", False):
return "UNSIGNED INTEGER"
else:
return "SIGNED INTEGER"
elif isinstance(type_, sqltypes.TIMESTAMP):
return "DATETIME"
elif isinstance(
type_,
(
sqltypes.DECIMAL,
sqltypes.DateTime,
sqltypes.Date,
sqltypes.Time,
),
):
return self.dialect.type_compiler.process(type_)
elif isinstance(type_, sqltypes.String) and not isinstance(
type_, (ENUM, SET)
):
adapted = CHAR._adapt_string_for_cast(type_)
return self.dialect.type_compiler.process(adapted)
elif isinstance(type_, sqltypes._Binary):
return "BINARY"
elif isinstance(type_, sqltypes.JSON):
return "JSON"
elif isinstance(type_, sqltypes.NUMERIC):
return self.dialect.type_compiler.process(type_).replace(
"NUMERIC", "DECIMAL"
)
elif (
isinstance(type_, sqltypes.Float)
and self.dialect._support_float_cast
):
return self.dialect.type_compiler.process(type_)
else:
return None
def visit_cast(self, cast, **kw):
type_ = self.process(cast.typeclause)
if type_ is None:
util.warn(
"Datatype %s does not support CAST on MySQL/MariaDb; "
"the CAST will be skipped."
% self.dialect.type_compiler.process(cast.typeclause.type)
)
return self.process(cast.clause.self_group(), **kw)
return "CAST(%s AS %s)" % (self.process(cast.clause, **kw), type_)
def render_literal_value(self, value, type_):
value = super(MySQLCompiler, self).render_literal_value(value, type_)
if self.dialect._backslash_escapes:
value = value.replace("\\", "\\\\")
return value
# override native_boolean=False behavior here, as
# MySQL still supports native boolean
def visit_true(self, element, **kw):
return "true"
def visit_false(self, element, **kw):
return "false"
def get_select_precolumns(self, select, **kw):
"""Add special MySQL keywords in place of DISTINCT.
.. deprecated 1.4:: this usage is deprecated.
:meth:`_expression.Select.prefix_with` should be used for special
keywords at the start of a SELECT.
"""
if isinstance(select._distinct, util.string_types):
util.warn_deprecated(
"Sending string values for 'distinct' is deprecated in the "
"MySQL dialect and will be removed in a future release. "
"Please use :meth:`.Select.prefix_with` for special keywords "
"at the start of a SELECT statement",
version="1.4",
)
return select._distinct.upper() + " "
return super(MySQLCompiler, self).get_select_precolumns(select, **kw)
def visit_join(self, join, asfrom=False, from_linter=None, **kwargs):
if from_linter:
from_linter.edges.add((join.left, join.right))
if join.full:
join_type = " FULL OUTER JOIN "
elif join.isouter:
join_type = " LEFT OUTER JOIN "
else:
join_type = " INNER JOIN "
return "".join(
(
self.process(
join.left, asfrom=True, from_linter=from_linter, **kwargs
),
join_type,
self.process(
join.right, asfrom=True, from_linter=from_linter, **kwargs
),
" ON ",
self.process(join.onclause, from_linter=from_linter, **kwargs),
)
)
def for_update_clause(self, select, **kw):
if select._for_update_arg.read:
tmp = " LOCK IN SHARE MODE"
else:
tmp = " FOR UPDATE"
if select._for_update_arg.of and self.dialect.supports_for_update_of:
tables = util.OrderedSet()
for c in select._for_update_arg.of:
tables.update(sql_util.surface_selectables_only(c))
tmp += " OF " + ", ".join(
self.process(table, ashint=True, use_schema=False, **kw)
for table in tables
)
if select._for_update_arg.nowait:
tmp += " NOWAIT"
if select._for_update_arg.skip_locked:
tmp += " SKIP LOCKED"
return tmp
def limit_clause(self, select, **kw):
# MySQL supports:
# LIMIT <limit>
# LIMIT <offset>, <limit>
# and in server versions > 3.3:
# LIMIT <limit> OFFSET <offset>
# The latter is more readable for offsets but we're stuck with the
# former until we can refine dialects by server revision.
limit_clause, offset_clause = (
select._limit_clause,
select._offset_clause,
)
if limit_clause is None and offset_clause is None:
return ""
elif offset_clause is not None:
# As suggested by the MySQL docs, need to apply an
# artificial limit if one wasn't provided
# https://dev.mysql.com/doc/refman/5.0/en/select.html
if limit_clause is None:
# hardwire the upper limit. Currently
# needed by OurSQL with Python 3
# (https://bugs.launchpad.net/oursql/+bug/686232),
# but also is consistent with the usage of the upper
# bound as part of MySQL's "syntax" for OFFSET with
# no LIMIT
return " \n LIMIT %s, %s" % (
self.process(offset_clause, **kw),
"18446744073709551615",
)
else:
return " \n LIMIT %s, %s" % (
self.process(offset_clause, **kw),
self.process(limit_clause, **kw),
)
else:
# No offset provided, so just use the limit
return " \n LIMIT %s" % (self.process(limit_clause, **kw),)
def update_limit_clause(self, update_stmt):
limit = update_stmt.kwargs.get("%s_limit" % self.dialect.name, None)
if limit:
return "LIMIT %s" % limit
else:
return None
def update_tables_clause(self, update_stmt, from_table, extra_froms, **kw):
kw["asfrom"] = True
return ", ".join(
t._compiler_dispatch(self, **kw)
for t in [from_table] + list(extra_froms)
)
def update_from_clause(
self, update_stmt, from_table, extra_froms, from_hints, **kw
):
return None
def delete_table_clause(self, delete_stmt, from_table, extra_froms):
"""If we have extra froms make sure we render any alias as hint."""
ashint = False
if extra_froms:
ashint = True
return from_table._compiler_dispatch(
self, asfrom=True, iscrud=True, ashint=ashint
)
def delete_extra_from_clause(
self, delete_stmt, from_table, extra_froms, from_hints, **kw
):
"""Render the DELETE .. USING clause specific to MySQL."""
kw["asfrom"] = True
return "USING " + ", ".join(
t._compiler_dispatch(self, fromhints=from_hints, **kw)
for t in [from_table] + extra_froms
)
def visit_empty_set_expr(self, element_types):
return (
"SELECT %(outer)s FROM (SELECT %(inner)s) "
"as _empty_set WHERE 1!=1"
% {
"inner": ", ".join(
"1 AS _in_%s" % idx
for idx, type_ in enumerate(element_types)
),
"outer": ", ".join(
"_in_%s" % idx for idx, type_ in enumerate(element_types)
),
}
)
def visit_is_distinct_from_binary(self, binary, operator, **kw):
return "NOT (%s <=> %s)" % (
self.process(binary.left),
self.process(binary.right),
)
def visit_is_not_distinct_from_binary(self, binary, operator, **kw):
return "%s <=> %s" % (
self.process(binary.left),
self.process(binary.right),
)
def _mariadb_regexp_flags(self, flags, pattern, **kw):
return "CONCAT('(?', %s, ')', %s)" % (
self.process(flags, **kw),
self.process(pattern, **kw),
)
def _regexp_match(self, op_string, binary, operator, **kw):
flags = binary.modifiers["flags"]
if flags is None:
return self._generate_generic_binary(binary, op_string, **kw)
elif self.dialect.is_mariadb:
return "%s%s%s" % (
self.process(binary.left, **kw),
op_string,
self._mariadb_regexp_flags(flags, binary.right),
)
else:
text = "REGEXP_LIKE(%s, %s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
self.process(flags, **kw),
)
if op_string == " NOT REGEXP ":
return "NOT %s" % text
else:
return text
def visit_regexp_match_op_binary(self, binary, operator, **kw):
return self._regexp_match(" REGEXP ", binary, operator, **kw)
def visit_not_regexp_match_op_binary(self, binary, operator, **kw):
return self._regexp_match(" NOT REGEXP ", binary, operator, **kw)
def visit_regexp_replace_op_binary(self, binary, operator, **kw):
flags = binary.modifiers["flags"]
replacement = binary.modifiers["replacement"]
if flags is None:
return "REGEXP_REPLACE(%s, %s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
self.process(replacement, **kw),
)
elif self.dialect.is_mariadb:
return "REGEXP_REPLACE(%s, %s, %s)" % (
self.process(binary.left, **kw),
self._mariadb_regexp_flags(flags, binary.right),
self.process(replacement, **kw),
)
else:
return "REGEXP_REPLACE(%s, %s, %s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
self.process(replacement, **kw),
self.process(flags, **kw),
)
class MySQLDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kw):
"""Builds column DDL."""
colspec = [
self.preparer.format_column(column),
self.dialect.type_compiler.process(
column.type, type_expression=column
),
]
if column.computed is not None:
colspec.append(self.process(column.computed))
is_timestamp = isinstance(
column.type._unwrapped_dialect_impl(self.dialect),
sqltypes.TIMESTAMP,
)
if not column.nullable:
colspec.append("NOT NULL")
# see: https://docs.sqlalchemy.org/en/latest/dialects/mysql.html#mysql_timestamp_null # noqa
elif column.nullable and is_timestamp:
colspec.append("NULL")
comment = column.comment
if comment is not None:
literal = self.sql_compiler.render_literal_value(
comment, sqltypes.String()
)
colspec.append("COMMENT " + literal)
if (
column.table is not None
and column is column.table._autoincrement_column
and (
column.server_default is None
or isinstance(column.server_default, sa_schema.Identity)
)
and not (
self.dialect.supports_sequences
and isinstance(column.default, sa_schema.Sequence)
and not column.default.optional
)
):
colspec.append("AUTO_INCREMENT")
else:
default = self.get_column_default_string(column)
if default is not None:
colspec.append("DEFAULT " + default)
return " ".join(colspec)
def post_create_table(self, table):
"""Build table-level CREATE options like ENGINE and COLLATE."""
table_opts = []
opts = dict(
(k[len(self.dialect.name) + 1 :].upper(), v)
for k, v in table.kwargs.items()
if k.startswith("%s_" % self.dialect.name)
)
if table.comment is not None:
opts["COMMENT"] = table.comment
partition_options = [
"PARTITION_BY",
"PARTITIONS",
"SUBPARTITIONS",
"SUBPARTITION_BY",
]
nonpart_options = set(opts).difference(partition_options)
part_options = set(opts).intersection(partition_options)
for opt in topological.sort(
[
("DEFAULT_CHARSET", "COLLATE"),
("DEFAULT_CHARACTER_SET", "COLLATE"),
("CHARSET", "COLLATE"),
("CHARACTER_SET", "COLLATE"),
],
nonpart_options,
):
arg = opts[opt]
if opt in _reflection._options_of_type_string:
arg = self.sql_compiler.render_literal_value(
arg, sqltypes.String()
)
if opt in (
"DATA_DIRECTORY",
"INDEX_DIRECTORY",
"DEFAULT_CHARACTER_SET",
"CHARACTER_SET",
"DEFAULT_CHARSET",
"DEFAULT_COLLATE",
):
opt = opt.replace("_", " ")
joiner = "="
if opt in (
"TABLESPACE",
"DEFAULT CHARACTER SET",
"CHARACTER SET",
"COLLATE",
):
joiner = " "
table_opts.append(joiner.join((opt, arg)))
for opt in topological.sort(
[
("PARTITION_BY", "PARTITIONS"),
("PARTITION_BY", "SUBPARTITION_BY"),
("PARTITION_BY", "SUBPARTITIONS"),
("PARTITIONS", "SUBPARTITIONS"),
("PARTITIONS", "SUBPARTITION_BY"),
("SUBPARTITION_BY", "SUBPARTITIONS"),
],
part_options,
):
arg = opts[opt]
if opt in _reflection._options_of_type_string:
arg = self.sql_compiler.render_literal_value(
arg, sqltypes.String()
)
opt = opt.replace("_", " ")
joiner = " "
table_opts.append(joiner.join((opt, arg)))
return " ".join(table_opts)
def visit_create_index(self, create, **kw):
index = create.element
self._verify_index_table(index)
preparer = self.preparer
table = preparer.format_table(index.table)
columns = [
self.sql_compiler.process(
elements.Grouping(expr)
if (
isinstance(expr, elements.BinaryExpression)
or (
isinstance(expr, elements.UnaryExpression)
and expr.modifier
not in (operators.desc_op, operators.asc_op)
)
or isinstance(expr, functions.FunctionElement)
)
else expr,
include_table=False,
literal_binds=True,
)
for expr in index.expressions
]
name = self._prepared_index_name(index)
text = "CREATE "
if index.unique:
text += "UNIQUE "
index_prefix = index.kwargs.get("%s_prefix" % self.dialect.name, None)
if index_prefix:
text += index_prefix + " "
text += "INDEX "
if create.if_not_exists:
text += "IF NOT EXISTS "
text += "%s ON %s " % (name, table)
length = index.dialect_options[self.dialect.name]["length"]
if length is not None:
if isinstance(length, dict):
# length value can be a (column_name --> integer value)
# mapping specifying the prefix length for each column of the
# index
columns = ", ".join(
"%s(%d)" % (expr, length[col.name])
if col.name in length
else (
"%s(%d)" % (expr, length[expr])
if expr in length
else "%s" % expr
)
for col, expr in zip(index.expressions, columns)
)
else:
# or can be an integer value specifying the same
# prefix length for all columns of the index
columns = ", ".join(
"%s(%d)" % (col, length) for col in columns
)
else:
columns = ", ".join(columns)
text += "(%s)" % columns
parser = index.dialect_options["mysql"]["with_parser"]
if parser is not None:
text += " WITH PARSER %s" % (parser,)
using = index.dialect_options["mysql"]["using"]
if using is not None:
text += " USING %s" % (preparer.quote(using))
return text
def visit_primary_key_constraint(self, constraint):
text = super(MySQLDDLCompiler, self).visit_primary_key_constraint(
constraint
)
using = constraint.dialect_options["mysql"]["using"]
if using:
text += " USING %s" % (self.preparer.quote(using))
return text
def visit_drop_index(self, drop):
index = drop.element
text = "\nDROP INDEX "
if drop.if_exists:
text += "IF EXISTS "
return text + "%s ON %s" % (
self._prepared_index_name(index, include_schema=False),
self.preparer.format_table(index.table),
)
def visit_drop_constraint(self, drop):
constraint = drop.element
if isinstance(constraint, sa_schema.ForeignKeyConstraint):
qual = "FOREIGN KEY "
const = self.preparer.format_constraint(constraint)
elif isinstance(constraint, sa_schema.PrimaryKeyConstraint):
qual = "PRIMARY KEY "
const = ""
elif isinstance(constraint, sa_schema.UniqueConstraint):
qual = "INDEX "
const = self.preparer.format_constraint(constraint)
elif isinstance(constraint, sa_schema.CheckConstraint):
if self.dialect.is_mariadb:
qual = "CONSTRAINT "
else:
qual = "CHECK "
const = self.preparer.format_constraint(constraint)
else:
qual = ""
const = self.preparer.format_constraint(constraint)
return "ALTER TABLE %s DROP %s%s" % (
self.preparer.format_table(constraint.table),
qual,
const,
)
def define_constraint_match(self, constraint):
if constraint.match is not None:
raise exc.CompileError(
"MySQL ignores the 'MATCH' keyword while at the same time "
"causes ON UPDATE/ON DELETE clauses to be ignored."
)
return ""
def visit_set_table_comment(self, create):
return "ALTER TABLE %s COMMENT %s" % (
self.preparer.format_table(create.element),
self.sql_compiler.render_literal_value(
create.element.comment, sqltypes.String()
),
)
def visit_drop_table_comment(self, create):
return "ALTER TABLE %s COMMENT ''" % (
self.preparer.format_table(create.element)
)
def visit_set_column_comment(self, create):
return "ALTER TABLE %s CHANGE %s %s" % (
self.preparer.format_table(create.element.table),
self.preparer.format_column(create.element),
self.get_column_specification(create.element),
)
class MySQLTypeCompiler(compiler.GenericTypeCompiler):
def _extend_numeric(self, type_, spec):
"Extend a numeric-type declaration with MySQL specific extensions."
if not self._mysql_type(type_):
return spec
if type_.unsigned:
spec += " UNSIGNED"
if type_.zerofill:
spec += " ZEROFILL"
return spec
def _extend_string(self, type_, defaults, spec):
"""Extend a string-type declaration with standard SQL CHARACTER SET /
COLLATE annotations and MySQL specific extensions.
"""
def attr(name):
return getattr(type_, name, defaults.get(name))
if attr("charset"):
charset = "CHARACTER SET %s" % attr("charset")
elif attr("ascii"):
charset = "ASCII"
elif attr("unicode"):
charset = "UNICODE"
else:
charset = None
if attr("collation"):
collation = "COLLATE %s" % type_.collation
elif attr("binary"):
collation = "BINARY"
else:
collation = None
if attr("national"):
# NATIONAL (aka NCHAR/NVARCHAR) trumps charsets.
return " ".join(
[c for c in ("NATIONAL", spec, collation) if c is not None]
)
return " ".join(
[c for c in (spec, charset, collation) if c is not None]
)
def _mysql_type(self, type_):
return isinstance(type_, (_StringType, _NumericType))
def visit_NUMERIC(self, type_, **kw):
if type_.precision is None:
return self._extend_numeric(type_, "NUMERIC")
elif type_.scale is None:
return self._extend_numeric(
type_,
"NUMERIC(%(precision)s)" % {"precision": type_.precision},
)
else:
return self._extend_numeric(
type_,
"NUMERIC(%(precision)s, %(scale)s)"
% {"precision": type_.precision, "scale": type_.scale},
)
def visit_DECIMAL(self, type_, **kw):
if type_.precision is None:
return self._extend_numeric(type_, "DECIMAL")
elif type_.scale is None:
return self._extend_numeric(
type_,
"DECIMAL(%(precision)s)" % {"precision": type_.precision},
)
else:
return self._extend_numeric(
type_,
"DECIMAL(%(precision)s, %(scale)s)"
% {"precision": type_.precision, "scale": type_.scale},
)
def visit_DOUBLE(self, type_, **kw):
if type_.precision is not None and type_.scale is not None:
return self._extend_numeric(
type_,
"DOUBLE(%(precision)s, %(scale)s)"
% {"precision": type_.precision, "scale": type_.scale},
)
else:
return self._extend_numeric(type_, "DOUBLE")
def visit_REAL(self, type_, **kw):
if type_.precision is not None and type_.scale is not None:
return self._extend_numeric(
type_,
"REAL(%(precision)s, %(scale)s)"
% {"precision": type_.precision, "scale": type_.scale},
)
else:
return self._extend_numeric(type_, "REAL")
def visit_FLOAT(self, type_, **kw):
if (
self._mysql_type(type_)
and type_.scale is not None
and type_.precision is not None
):
return self._extend_numeric(
type_, "FLOAT(%s, %s)" % (type_.precision, type_.scale)
)
elif type_.precision is not None:
return self._extend_numeric(
type_, "FLOAT(%s)" % (type_.precision,)
)
else:
return self._extend_numeric(type_, "FLOAT")
def visit_INTEGER(self, type_, **kw):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(
type_,
"INTEGER(%(display_width)s)"
% {"display_width": type_.display_width},
)
else:
return self._extend_numeric(type_, "INTEGER")
def visit_BIGINT(self, type_, **kw):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(
type_,
"BIGINT(%(display_width)s)"
% {"display_width": type_.display_width},
)
else:
return self._extend_numeric(type_, "BIGINT")
def visit_MEDIUMINT(self, type_, **kw):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(
type_,
"MEDIUMINT(%(display_width)s)"
% {"display_width": type_.display_width},
)
else:
return self._extend_numeric(type_, "MEDIUMINT")
def visit_TINYINT(self, type_, **kw):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(
type_, "TINYINT(%s)" % type_.display_width
)
else:
return self._extend_numeric(type_, "TINYINT")
def visit_SMALLINT(self, type_, **kw):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(
type_,
"SMALLINT(%(display_width)s)"
% {"display_width": type_.display_width},
)
else:
return self._extend_numeric(type_, "SMALLINT")
def visit_BIT(self, type_, **kw):
if type_.length is not None:
return "BIT(%s)" % type_.length
else:
return "BIT"
def visit_DATETIME(self, type_, **kw):
if getattr(type_, "fsp", None):
return "DATETIME(%d)" % type_.fsp
else:
return "DATETIME"
def visit_DATE(self, type_, **kw):
return "DATE"
def visit_TIME(self, type_, **kw):
if getattr(type_, "fsp", None):
return "TIME(%d)" % type_.fsp
else:
return "TIME"
def visit_TIMESTAMP(self, type_, **kw):
if getattr(type_, "fsp", None):
return "TIMESTAMP(%d)" % type_.fsp
else:
return "TIMESTAMP"
def visit_YEAR(self, type_, **kw):
if type_.display_width is None:
return "YEAR"
else:
return "YEAR(%s)" % type_.display_width
def visit_TEXT(self, type_, **kw):
if type_.length:
return self._extend_string(type_, {}, "TEXT(%d)" % type_.length)
else:
return self._extend_string(type_, {}, "TEXT")
def visit_TINYTEXT(self, type_, **kw):
return self._extend_string(type_, {}, "TINYTEXT")
def visit_MEDIUMTEXT(self, type_, **kw):
return self._extend_string(type_, {}, "MEDIUMTEXT")
def visit_LONGTEXT(self, type_, **kw):
return self._extend_string(type_, {}, "LONGTEXT")
def visit_VARCHAR(self, type_, **kw):
if type_.length:
return self._extend_string(type_, {}, "VARCHAR(%d)" % type_.length)
else:
raise exc.CompileError(
"VARCHAR requires a length on dialect %s" % self.dialect.name
)
def visit_CHAR(self, type_, **kw):
if type_.length:
return self._extend_string(
type_, {}, "CHAR(%(length)s)" % {"length": type_.length}
)
else:
return self._extend_string(type_, {}, "CHAR")
def visit_NVARCHAR(self, type_, **kw):
# We'll actually generate the equiv. "NATIONAL VARCHAR" instead
# of "NVARCHAR".
if type_.length:
return self._extend_string(
type_,
{"national": True},
"VARCHAR(%(length)s)" % {"length": type_.length},
)
else:
raise exc.CompileError(
"NVARCHAR requires a length on dialect %s" % self.dialect.name
)
def visit_NCHAR(self, type_, **kw):
# We'll actually generate the equiv.
# "NATIONAL CHAR" instead of "NCHAR".
if type_.length:
return self._extend_string(
type_,
{"national": True},
"CHAR(%(length)s)" % {"length": type_.length},
)
else:
return self._extend_string(type_, {"national": True}, "CHAR")
def visit_VARBINARY(self, type_, **kw):
return "VARBINARY(%d)" % type_.length
def visit_JSON(self, type_, **kw):
return "JSON"
def visit_large_binary(self, type_, **kw):
return self.visit_BLOB(type_)
def visit_enum(self, type_, **kw):
if not type_.native_enum:
return super(MySQLTypeCompiler, self).visit_enum(type_)
else:
return self._visit_enumerated_values("ENUM", type_, type_.enums)
def visit_BLOB(self, type_, **kw):
if type_.length:
return "BLOB(%d)" % type_.length
else:
return "BLOB"
def visit_TINYBLOB(self, type_, **kw):
return "TINYBLOB"
def visit_MEDIUMBLOB(self, type_, **kw):
return "MEDIUMBLOB"
def visit_LONGBLOB(self, type_, **kw):
return "LONGBLOB"
def _visit_enumerated_values(self, name, type_, enumerated_values):
quoted_enums = []
for e in enumerated_values:
quoted_enums.append("'%s'" % e.replace("'", "''"))
return self._extend_string(
type_, {}, "%s(%s)" % (name, ",".join(quoted_enums))
)
def visit_ENUM(self, type_, **kw):
return self._visit_enumerated_values("ENUM", type_, type_.enums)
def visit_SET(self, type_, **kw):
return self._visit_enumerated_values("SET", type_, type_.values)
def visit_BOOLEAN(self, type_, **kw):
return "BOOL"
class MySQLIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
def __init__(self, dialect, server_ansiquotes=False, **kw):
if not server_ansiquotes:
quote = "`"
else:
quote = '"'
super(MySQLIdentifierPreparer, self).__init__(
dialect, initial_quote=quote, escape_quote=quote
)
def _quote_free_identifiers(self, *ids):
"""Unilaterally identifier-quote any number of strings."""
return tuple([self.quote_identifier(i) for i in ids if i is not None])
@log.class_logger
class MySQLDialect(default.DefaultDialect):
"""Details of the MySQL dialect.
Not used directly in application code.
"""
name = "mysql"
supports_statement_cache = True
supports_alter = True
# MySQL has no true "boolean" type; we
# allow for the "true" and "false" keywords, however
supports_native_boolean = False
# identifiers are 64, however aliases can be 255...
max_identifier_length = 255
max_index_name_length = 64
max_constraint_name_length = 64
supports_native_enum = True
supports_sequences = False # default for MySQL ...
# ... may be updated to True for MariaDB 10.3+ in initialize()
sequences_optional = False
supports_for_update_of = False # default for MySQL ...
# ... may be updated to True for MySQL 8+ in initialize()
# MySQL doesn't support "DEFAULT VALUES" but *does* support
# "VALUES (DEFAULT)"
supports_default_values = False
supports_default_metavalue = True
supports_sane_rowcount = True
supports_sane_multi_rowcount = False
supports_multivalues_insert = True
supports_comments = True
inline_comments = True
default_paramstyle = "format"
colspecs = colspecs
cte_follows_insert = True
statement_compiler = MySQLCompiler
ddl_compiler = MySQLDDLCompiler
type_compiler = MySQLTypeCompiler
ischema_names = ischema_names
preparer = MySQLIdentifierPreparer
is_mariadb = False
_mariadb_normalized_version_info = None
# default SQL compilation settings -
# these are modified upon initialize(),
# i.e. first connect
_backslash_escapes = True
_server_ansiquotes = False
construct_arguments = [
(sa_schema.Table, {"*": None}),
(sql.Update, {"limit": None}),
(sa_schema.PrimaryKeyConstraint, {"using": None}),
(
sa_schema.Index,
{
"using": None,
"length": None,
"prefix": None,
"with_parser": None,
},
),
]
def __init__(
self,
isolation_level=None,
json_serializer=None,
json_deserializer=None,
is_mariadb=None,
**kwargs
):
kwargs.pop("use_ansiquotes", None) # legacy
default.DefaultDialect.__init__(self, **kwargs)
self.isolation_level = isolation_level
self._json_serializer = json_serializer
self._json_deserializer = json_deserializer
self._set_mariadb(is_mariadb, None)
def on_connect(self):
if self.isolation_level is not None:
def connect(conn):
self.set_isolation_level(conn, self.isolation_level)
return connect
else:
return None
_isolation_lookup = set(
[
"SERIALIZABLE",
"READ UNCOMMITTED",
"READ COMMITTED",
"REPEATABLE READ",
]
)
def set_isolation_level(self, connection, level):
level = level.replace("_", " ")
# adjust for ConnectionFairy being present
# allows attribute set e.g. "connection.autocommit = True"
# to work properly
if hasattr(connection, "dbapi_connection"):
connection = connection.dbapi_connection
self._set_isolation_level(connection, level)
def _set_isolation_level(self, connection, level):
if level not in self._isolation_lookup:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s"
% (level, self.name, ", ".join(self._isolation_lookup))
)
cursor = connection.cursor()
cursor.execute("SET SESSION TRANSACTION ISOLATION LEVEL %s" % level)
cursor.execute("COMMIT")
cursor.close()
def get_isolation_level(self, connection):
cursor = connection.cursor()
if self._is_mysql and self.server_version_info >= (5, 7, 20):
cursor.execute("SELECT @@transaction_isolation")
else:
cursor.execute("SELECT @@tx_isolation")
row = cursor.fetchone()
if row is None:
util.warn(
"Could not retrieve transaction isolation level for MySQL "
"connection."
)
raise NotImplementedError()
val = row[0]
cursor.close()
if util.py3k and isinstance(val, bytes):
val = val.decode()
return val.upper().replace("-", " ")
@classmethod
def _is_mariadb_from_url(cls, url):
dbapi = cls.dbapi()
dialect = cls(dbapi=dbapi)
cargs, cparams = dialect.create_connect_args(url)
conn = dialect.connect(*cargs, **cparams)
try:
cursor = conn.cursor()
cursor.execute("SELECT VERSION() LIKE '%MariaDB%'")
val = cursor.fetchone()[0]
except:
raise
else:
return bool(val)
finally:
conn.close()
def _get_server_version_info(self, connection):
# get database server version info explicitly over the wire
# to avoid proxy servers like MaxScale getting in the
# way with their own values, see #4205
dbapi_con = connection.connection
cursor = dbapi_con.cursor()
cursor.execute("SELECT VERSION()")
val = cursor.fetchone()[0]
cursor.close()
if util.py3k and isinstance(val, bytes):
val = val.decode()
return self._parse_server_version(val)
def _parse_server_version(self, val):
version = []
is_mariadb = False
r = re.compile(r"[.\-+]")
tokens = r.split(val)
for token in tokens:
parsed_token = re.match(
r"^(?:(\d+)(?:a|b|c)?|(MariaDB\w*))$", token
)
if not parsed_token:
continue
elif parsed_token.group(2):
self._mariadb_normalized_version_info = tuple(version[-3:])
is_mariadb = True
else:
digit = int(parsed_token.group(1))
version.append(digit)
server_version_info = tuple(version)
self._set_mariadb(server_version_info and is_mariadb, val)
if not is_mariadb:
self._mariadb_normalized_version_info = server_version_info
if server_version_info < (5, 0, 2):
raise NotImplementedError(
"the MySQL/MariaDB dialect supports server "
"version info 5.0.2 and above."
)
# setting it here to help w the test suite
self.server_version_info = server_version_info
return server_version_info
def _set_mariadb(self, is_mariadb, server_version_info):
if is_mariadb is None:
return
if not is_mariadb and self.is_mariadb:
raise exc.InvalidRequestError(
"MySQL version %s is not a MariaDB variant."
% (server_version_info,)
)
self.is_mariadb = is_mariadb
def do_begin_twophase(self, connection, xid):
connection.execute(sql.text("XA BEGIN :xid"), dict(xid=xid))
def do_prepare_twophase(self, connection, xid):
connection.execute(sql.text("XA END :xid"), dict(xid=xid))
connection.execute(sql.text("XA PREPARE :xid"), dict(xid=xid))
def do_rollback_twophase(
self, connection, xid, is_prepared=True, recover=False
):
if not is_prepared:
connection.execute(sql.text("XA END :xid"), dict(xid=xid))
connection.execute(sql.text("XA ROLLBACK :xid"), dict(xid=xid))
def do_commit_twophase(
self, connection, xid, is_prepared=True, recover=False
):
if not is_prepared:
self.do_prepare_twophase(connection, xid)
connection.execute(sql.text("XA COMMIT :xid"), dict(xid=xid))
def do_recover_twophase(self, connection):
resultset = connection.exec_driver_sql("XA RECOVER")
return [row["data"][0 : row["gtrid_length"]] for row in resultset]
def is_disconnect(self, e, connection, cursor):
if isinstance(
e, (self.dbapi.OperationalError, self.dbapi.ProgrammingError)
):
return self._extract_error_code(e) in (
1927,
2006,
2013,
2014,
2045,
2055,
)
elif isinstance(
e, (self.dbapi.InterfaceError, self.dbapi.InternalError)
):
# if underlying connection is closed,
# this is the error you get
return "(0, '')" in str(e)
else:
return False
def _compat_fetchall(self, rp, charset=None):
"""Proxy result rows to smooth over MySQL-Python driver
inconsistencies."""
return [_DecodingRow(row, charset) for row in rp.fetchall()]
def _compat_fetchone(self, rp, charset=None):
"""Proxy a result row to smooth over MySQL-Python driver
inconsistencies."""
row = rp.fetchone()
if row:
return _DecodingRow(row, charset)
else:
return None
def _compat_first(self, rp, charset=None):
"""Proxy a result row to smooth over MySQL-Python driver
inconsistencies."""
row = rp.first()
if row:
return _DecodingRow(row, charset)
else:
return None
def _extract_error_code(self, exception):
raise NotImplementedError()
def _get_default_schema_name(self, connection):
return connection.exec_driver_sql("SELECT DATABASE()").scalar()
def has_table(self, connection, table_name, schema=None):
self._ensure_has_table_connection(connection)
if schema is None:
schema = self.default_schema_name
rs = connection.execute(
text(
"SELECT COUNT(*) FROM information_schema.tables WHERE "
"table_schema = :table_schema AND "
"table_name = :table_name"
).bindparams(
sql.bindparam("table_schema", type_=Unicode),
sql.bindparam("table_name", type_=Unicode),
),
{
"table_schema": util.text_type(schema),
"table_name": util.text_type(table_name),
},
)
return bool(rs.scalar())
def has_sequence(self, connection, sequence_name, schema=None):
if not self.supports_sequences:
self._sequences_not_supported()
if not schema:
schema = self.default_schema_name
# MariaDB implements sequences as a special type of table
#
cursor = connection.execute(
sql.text(
"SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES "
"WHERE TABLE_TYPE='SEQUENCE' and TABLE_NAME=:name AND "
"TABLE_SCHEMA=:schema_name"
),
dict(
name=util.text_type(sequence_name),
schema_name=util.text_type(schema),
),
)
return cursor.first() is not None
def _sequences_not_supported(self):
raise NotImplementedError(
"Sequences are supported only by the "
"MariaDB series 10.3 or greater"
)
@reflection.cache
def get_sequence_names(self, connection, schema=None, **kw):
if not self.supports_sequences:
self._sequences_not_supported()
if not schema:
schema = self.default_schema_name
# MariaDB implements sequences as a special type of table
cursor = connection.execute(
sql.text(
"SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES "
"WHERE TABLE_TYPE='SEQUENCE' and TABLE_SCHEMA=:schema_name"
),
dict(schema_name=schema),
)
return [
row[0]
for row in self._compat_fetchall(
cursor, charset=self._connection_charset
)
]
def initialize(self, connection):
self._connection_charset = self._detect_charset(connection)
self._detect_sql_mode(connection)
self._detect_ansiquotes(connection)
self._detect_casing(connection)
if self._server_ansiquotes:
# if ansiquotes == True, build a new IdentifierPreparer
# with the new setting
self.identifier_preparer = self.preparer(
self, server_ansiquotes=self._server_ansiquotes
)
default.DefaultDialect.initialize(self, connection)
self.supports_sequences = (
self.is_mariadb and self.server_version_info >= (10, 3)
)
self.supports_for_update_of = (
self._is_mysql and self.server_version_info >= (8,)
)
self._needs_correct_for_88718_96365 = (
not self.is_mariadb and self.server_version_info >= (8,)
)
self._warn_for_known_db_issues()
def _warn_for_known_db_issues(self):
if self.is_mariadb:
mdb_version = self._mariadb_normalized_version_info
if mdb_version > (10, 2) and mdb_version < (10, 2, 9):
util.warn(
"MariaDB %r before 10.2.9 has known issues regarding "
"CHECK constraints, which impact handling of NULL values "
"with SQLAlchemy's boolean datatype (MDEV-13596). An "
"additional issue prevents proper migrations of columns "
"with CHECK constraints (MDEV-11114). Please upgrade to "
"MariaDB 10.2.9 or greater, or use the MariaDB 10.1 "
"series, to avoid these issues." % (mdb_version,)
)
@property
def _support_float_cast(self):
if not self.server_version_info:
return False
elif self.is_mariadb:
# ref https://mariadb.com/kb/en/mariadb-1045-release-notes/
return self.server_version_info >= (10, 4, 5)
else:
# ref https://dev.mysql.com/doc/relnotes/mysql/8.0/en/news-8-0-17.html#mysqld-8-0-17-feature # noqa
return self.server_version_info >= (8, 0, 17)
@property
def _is_mariadb(self):
return self.is_mariadb
@property
def _is_mysql(self):
return not self.is_mariadb
@property
def _is_mariadb_102(self):
return self.is_mariadb and self._mariadb_normalized_version_info > (
10,
2,
)
@reflection.cache
def get_schema_names(self, connection, **kw):
rp = connection.exec_driver_sql("SHOW schemas")
return [r[0] for r in rp]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
"""Return a Unicode SHOW TABLES from a given schema."""
if schema is not None:
current_schema = schema
else:
current_schema = self.default_schema_name
charset = self._connection_charset
rp = connection.exec_driver_sql(
"SHOW FULL TABLES FROM %s"
% self.identifier_preparer.quote_identifier(current_schema)
)
return [
row[0]
for row in self._compat_fetchall(rp, charset=charset)
if row[1] == "BASE TABLE"
]
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
charset = self._connection_charset
rp = connection.exec_driver_sql(
"SHOW FULL TABLES FROM %s"
% self.identifier_preparer.quote_identifier(schema)
)
return [
row[0]
for row in self._compat_fetchall(rp, charset=charset)
if row[1] in ("VIEW", "SYSTEM VIEW")
]
@reflection.cache
def get_table_options(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw
)
return parsed_state.table_options
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw
)
return parsed_state.columns
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw
)
for key in parsed_state.keys:
if key["type"] == "PRIMARY":
# There can be only one.
cols = [s[0] for s in key["columns"]]
return {"constrained_columns": cols, "name": None}
return {"constrained_columns": [], "name": None}
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw
)
default_schema = None
fkeys = []
for spec in parsed_state.fk_constraints:
ref_name = spec["table"][-1]
ref_schema = len(spec["table"]) > 1 and spec["table"][-2] or schema
if not ref_schema:
if default_schema is None:
default_schema = connection.dialect.default_schema_name
if schema == default_schema:
ref_schema = schema
loc_names = spec["local"]
ref_names = spec["foreign"]
con_kw = {}
for opt in ("onupdate", "ondelete"):
if spec.get(opt, False) not in ("NO ACTION", None):
con_kw[opt] = spec[opt]
fkey_d = {
"name": spec["name"],
"constrained_columns": loc_names,
"referred_schema": ref_schema,
"referred_table": ref_name,
"referred_columns": ref_names,
"options": con_kw,
}
fkeys.append(fkey_d)
if self._needs_correct_for_88718_96365:
self._correct_for_mysql_bugs_88718_96365(fkeys, connection)
return fkeys
def _correct_for_mysql_bugs_88718_96365(self, fkeys, connection):
# Foreign key is always in lower case (MySQL 8.0)
# https://bugs.mysql.com/bug.php?id=88718
# issue #4344 for SQLAlchemy
# table name also for MySQL 8.0
# https://bugs.mysql.com/bug.php?id=96365
# issue #4751 for SQLAlchemy
# for lower_case_table_names=2, information_schema.columns
# preserves the original table/schema casing, but SHOW CREATE
# TABLE does not. this problem is not in lower_case_table_names=1,
# but use case-insensitive matching for these two modes in any case.
if self._casing in (1, 2):
def lower(s):
return s.lower()
else:
# if on case sensitive, there can be two tables referenced
# with the same name different casing, so we need to use
# case-sensitive matching.
def lower(s):
return s
default_schema_name = connection.dialect.default_schema_name
col_tuples = [
(
lower(rec["referred_schema"] or default_schema_name),
lower(rec["referred_table"]),
col_name,
)
for rec in fkeys
for col_name in rec["referred_columns"]
]
if col_tuples:
correct_for_wrong_fk_case = connection.execute(
sql.text(
"""
select table_schema, table_name, column_name
from information_schema.columns
where (table_schema, table_name, lower(column_name)) in
:table_data;
"""
).bindparams(sql.bindparam("table_data", expanding=True)),
dict(table_data=col_tuples),
)
# in casing=0, table name and schema name come back in their
# exact case.
# in casing=1, table name and schema name come back in lower
# case.
# in casing=2, table name and schema name come back from the
# information_schema.columns view in the case
# that was used in CREATE DATABASE and CREATE TABLE, but
# SHOW CREATE TABLE converts them to *lower case*, therefore
# not matching. So for this case, case-insensitive lookup
# is necessary
d = defaultdict(dict)
for schema, tname, cname in correct_for_wrong_fk_case:
d[(lower(schema), lower(tname))]["SCHEMANAME"] = schema
d[(lower(schema), lower(tname))]["TABLENAME"] = tname
d[(lower(schema), lower(tname))][cname.lower()] = cname
for fkey in fkeys:
rec = d[
(
lower(fkey["referred_schema"] or default_schema_name),
lower(fkey["referred_table"]),
)
]
fkey["referred_table"] = rec["TABLENAME"]
if fkey["referred_schema"] is not None:
fkey["referred_schema"] = rec["SCHEMANAME"]
fkey["referred_columns"] = [
rec[col.lower()] for col in fkey["referred_columns"]
]
@reflection.cache
def get_check_constraints(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw
)
return [
{"name": spec["name"], "sqltext": spec["sqltext"]}
for spec in parsed_state.ck_constraints
]
@reflection.cache
def get_table_comment(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw
)
return {
"text": parsed_state.table_options.get(
"%s_comment" % self.name, None
)
}
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw
)
indexes = []
for spec in parsed_state.keys:
dialect_options = {}
unique = False
flavor = spec["type"]
if flavor == "PRIMARY":
continue
if flavor == "UNIQUE":
unique = True
elif flavor in ("FULLTEXT", "SPATIAL"):
dialect_options["%s_prefix" % self.name] = flavor
elif flavor is None:
pass
else:
self.logger.info(
"Converting unknown KEY type %s to a plain KEY", flavor
)
pass
if spec["parser"]:
dialect_options["%s_with_parser" % (self.name)] = spec[
"parser"
]
index_d = {}
if dialect_options:
index_d["dialect_options"] = dialect_options
index_d["name"] = spec["name"]
index_d["column_names"] = [s[0] for s in spec["columns"]]
index_d["unique"] = unique
if flavor:
index_d["type"] = flavor
indexes.append(index_d)
return indexes
@reflection.cache
def get_unique_constraints(
self, connection, table_name, schema=None, **kw
):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw
)
return [
{
"name": key["name"],
"column_names": [col[0] for col in key["columns"]],
"duplicates_index": key["name"],
}
for key in parsed_state.keys
if key["type"] == "UNIQUE"
]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
charset = self._connection_charset
full_name = ".".join(
self.identifier_preparer._quote_free_identifiers(schema, view_name)
)
sql = self._show_create_table(
connection, None, charset, full_name=full_name
)
return sql
def _parsed_state_or_create(
self, connection, table_name, schema=None, **kw
):
return self._setup_parser(
connection,
table_name,
schema,
info_cache=kw.get("info_cache", None),
)
@util.memoized_property
def _tabledef_parser(self):
"""return the MySQLTableDefinitionParser, generate if needed.
The deferred creation ensures that the dialect has
retrieved server version information first.
"""
preparer = self.identifier_preparer
return _reflection.MySQLTableDefinitionParser(self, preparer)
@reflection.cache
def _setup_parser(self, connection, table_name, schema=None, **kw):
charset = self._connection_charset
parser = self._tabledef_parser
full_name = ".".join(
self.identifier_preparer._quote_free_identifiers(
schema, table_name
)
)
sql = self._show_create_table(
connection, None, charset, full_name=full_name
)
if re.match(r"^CREATE (?:ALGORITHM)?.* VIEW", sql):
# Adapt views to something table-like.
columns = self._describe_table(
connection, None, charset, full_name=full_name
)
sql = parser._describe_to_create(table_name, columns)
return parser.parse(sql, charset)
def _detect_charset(self, connection):
raise NotImplementedError()
def _detect_casing(self, connection):
"""Sniff out identifier case sensitivity.
Cached per-connection. This value can not change without a server
restart.
"""
# https://dev.mysql.com/doc/refman/en/identifier-case-sensitivity.html
charset = self._connection_charset
show_var = connection.execute(
sql.text("SHOW VARIABLES LIKE 'lower_case_table_names'")
)
row = self._compat_first(
show_var,
charset=charset,
)
if not row:
cs = 0
else:
# 4.0.15 returns OFF or ON according to [ticket:489]
# 3.23 doesn't, 4.0.27 doesn't..
if row[1] == "OFF":
cs = 0
elif row[1] == "ON":
cs = 1
else:
cs = int(row[1])
self._casing = cs
return cs
def _detect_collations(self, connection):
"""Pull the active COLLATIONS list from the server.
Cached per-connection.
"""
collations = {}
charset = self._connection_charset
rs = connection.exec_driver_sql("SHOW COLLATION")
for row in self._compat_fetchall(rs, charset):
collations[row[0]] = row[1]
return collations
def _detect_sql_mode(self, connection):
row = self._compat_first(
connection.exec_driver_sql("SHOW VARIABLES LIKE 'sql_mode'"),
charset=self._connection_charset,
)
if not row:
util.warn(
"Could not retrieve SQL_MODE; please ensure the "
"MySQL user has permissions to SHOW VARIABLES"
)
self._sql_mode = ""
else:
self._sql_mode = row[1] or ""
def _detect_ansiquotes(self, connection):
"""Detect and adjust for the ANSI_QUOTES sql mode."""
mode = self._sql_mode
if not mode:
mode = ""
elif mode.isdigit():
mode_no = int(mode)
mode = (mode_no | 4 == mode_no) and "ANSI_QUOTES" or ""
self._server_ansiquotes = "ANSI_QUOTES" in mode
# as of MySQL 5.0.1
self._backslash_escapes = "NO_BACKSLASH_ESCAPES" not in mode
def _show_create_table(
self, connection, table, charset=None, full_name=None
):
"""Run SHOW CREATE TABLE for a ``Table``."""
if full_name is None:
full_name = self.identifier_preparer.format_table(table)
st = "SHOW CREATE TABLE %s" % full_name
rp = None
try:
rp = connection.execution_options(
skip_user_error_events=True
).exec_driver_sql(st)
except exc.DBAPIError as e:
if self._extract_error_code(e.orig) == 1146:
util.raise_(exc.NoSuchTableError(full_name), replace_context=e)
else:
raise
row = self._compat_first(rp, charset=charset)
if not row:
raise exc.NoSuchTableError(full_name)
return row[1].strip()
def _describe_table(self, connection, table, charset=None, full_name=None):
"""Run DESCRIBE for a ``Table`` and return processed rows."""
if full_name is None:
full_name = self.identifier_preparer.format_table(table)
st = "DESCRIBE %s" % full_name
rp, rows = None, None
try:
try:
rp = connection.execution_options(
skip_user_error_events=True
).exec_driver_sql(st)
except exc.DBAPIError as e:
code = self._extract_error_code(e.orig)
if code == 1146:
util.raise_(
exc.NoSuchTableError(full_name), replace_context=e
)
elif code == 1356:
util.raise_(
exc.UnreflectableTableError(
"Table or view named %s could not be "
"reflected: %s" % (full_name, e)
),
replace_context=e,
)
else:
raise
rows = self._compat_fetchall(rp, charset=charset)
finally:
if rp:
rp.close()
return rows
class _DecodingRow(object):
"""Return unicode-decoded values based on type inspection.
Smooth over data type issues (esp. with alpha driver versions) and
normalize strings as Unicode regardless of user-configured driver
encoding settings.
"""
# Some MySQL-python versions can return some columns as
# sets.Set(['value']) (seriously) but thankfully that doesn't
# seem to come up in DDL queries.
_encoding_compat = {
"koi8r": "koi8_r",
"koi8u": "koi8_u",
"utf16": "utf-16-be", # MySQL's uft16 is always bigendian
"utf8mb4": "utf8", # real utf8
"eucjpms": "ujis",
}
def __init__(self, rowproxy, charset):
self.rowproxy = rowproxy
self.charset = self._encoding_compat.get(charset, charset)
def __getitem__(self, index):
item = self.rowproxy[index]
if isinstance(item, _array):
item = item.tostring()
if self.charset and isinstance(item, util.binary_type):
return item.decode(self.charset)
else:
return item
def __getattr__(self, attr):
item = getattr(self.rowproxy, attr)
if isinstance(item, _array):
item = item.tostring()
if self.charset and isinstance(item, util.binary_type):
return item.decode(self.charset)
else:
return item
| [
"[email protected]"
] | |
c4194b35b0c4d19fbf61ada823bfe4a80aa83e71 | 40f4908483b98fc4f370ff4f2d520e1284d045b3 | /phase02/immortals_repo/harness/pymmortals/generated/com/securboration/immortals/ontology/resources/memory/physicalmemoryresource.py | 9be4f40b65705d8717bea20004f42fc927ec8dd1 | [] | no_license | TF-185/bbn-immortals | 7f70610bdbbcbf649f3d9021f087baaa76f0d8ca | e298540f7b5f201779213850291337a8bded66c7 | refs/heads/master | 2023-05-31T00:16:42.522840 | 2019-10-24T21:45:07 | 2019-10-24T21:45:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,009 | py | from pymmortals.generated.com.securboration.immortals.ontology.property.property import Property
from pymmortals.generated.com.securboration.immortals.ontology.resources.memory.memoryresource import MemoryResource
from pymmortals.generated.com.securboration.immortals.ontology.resources.memory.memorytype import MemoryType
from typing import List
# noinspection PyPep8Naming
class PhysicalMemoryResource(MemoryResource):
_validator_values = dict()
_types = dict()
def __init__(self,
canRead: bool = None,
canWrite: bool = None,
humanReadableDescription: str = None,
maxAvailableBytes: int = None,
memoryType: MemoryType = None,
resourceProperty: List[Property] = None):
super().__init__(canRead=canRead, canWrite=canWrite, humanReadableDescription=humanReadableDescription, maxAvailableBytes=maxAvailableBytes, resourceProperty=resourceProperty)
self.memoryType = memoryType
| [
"[email protected]"
] | |
9229b2f555f79cbed6c76a6d4cd3c57907fe831f | 4e92106302c784431115cea8e822f56234e2684b | /lib/plot.py | 5c515658ec0f0625310cab69f0e794fed096ddaf | [] | no_license | CardiacModelling/cside-2018 | 950a7cd92c791fd1840e38ce1726d1c13f68b267 | d91df2bc71a6e2fb18e1d009a3f5722b1e0fc271 | refs/heads/master | 2020-04-15T17:09:13.118292 | 2019-03-12T11:51:01 | 2019-03-12T11:51:01 | 164,862,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,223 | py | #
# Quick diagnostic plots.
#
# This file is part of PINTS.
# Copyright (c) 2017-2018, University of Oxford.
# For licensing information, see the LICENSE file distributed with the PINTS
# software package.
#
from __future__ import absolute_import, division
from __future__ import print_function, unicode_literals
import warnings
import numpy as np
import scipy.stats as stats
import pints
def function(f, x, lower=None, upper=None, evaluations=20):
"""
Creates 1d plots of a :class:`LogPDF` or a :class:`ErrorMeasure` around a
point `x` (i.e. a 1-dimensional plot in each direction).
Arguments:
``f``
A :class:`pints.LogPDF` or :class:`pints.ErrorMeasure` to plot.
``x``
A point in the function's input space.
``lower``
(Optional) Lower bounds for each parameter, used to specify the lower
bounds of the plot.
``upper``
(Optional) Upper bounds for each parameter, used to specify the upper
bounds of the plot.
``evaluations``
(Optional) The number of evaluations to use in each plot.
Returns a ``matplotlib`` figure object and axes handle.
"""
import matplotlib.pyplot as plt
# Check function get dimension
if not (isinstance(f, pints.LogPDF) or isinstance(f, pints.ErrorMeasure)):
raise ValueError(
'Given function must be pints.LogPDF or pints.ErrorMeasure.')
dimension = f.n_parameters()
# Check point
x = pints.vector(x)
if len(x) != dimension:
raise ValueError(
'Given point `x` must have same dimension as function.')
# Check boundaries
if lower is None:
# Guess boundaries based on point x
lower = x * 0.95
lower[lower == 0] = -1
else:
lower = pints.vector(lower)
if len(lower) != dimension:
raise ValueError(
'Lower bounds must have same dimension as function.')
if upper is None:
# Guess boundaries based on point x
upper = x * 1.05
upper[upper == 0] = 1
else:
upper = pints.vector(upper)
if len(upper) != dimension:
raise ValueError(
'Upper bounds must have same dimension as function.')
# Check number of evaluations
evaluations = int(evaluations)
if evaluations < 1:
raise ValueError('Number of evaluations must be greater than zero.')
# Create points to plot
xs = np.tile(x, (dimension * evaluations, 1))
for j in range(dimension):
i1 = j * evaluations
i2 = i1 + evaluations
xs[i1:i2, j] = np.linspace(lower[j], upper[j], evaluations)
# Evaluate points
fs = pints.evaluate(f, xs, parallel=False)
# Create figure
fig, axes = plt.subplots(4, 3, figsize=(12, 7))
for j, p in enumerate(x):
i1 = j * evaluations
i2 = i1 + evaluations
a1 = j % 4
a2 = j // 4
axes[a1, a2].plot(xs[i1:i2, j], fs[i1:i2], c='green', label='Function')
axes[a1, a2].axvline(p, c='blue', label='Value')
axes[a1, a2].set_xlabel('Parameter ' + str(1 + j))
if j == 0:
axes[a1, a2].legend()
plt.tight_layout()
return fig, axes
def pairwise(samples,
kde=False,
heatmap=False,
opacity=None,
ref_parameters=None,
n_percentiles=None,
fig_axes=None):
"""
Takes a markov chain or list of `samples` and creates a set of pairwise
scatterplots for all parameters (p1 versus p2, p1 versus p3, p2 versus p3,
etc.).
The returned plot is in a 'matrix' form, with histograms of each individual
parameter on the diagonal, and scatter plots of parameters ``i`` and ``j``
on each entry ``(i, j)`` below the diagonal.
Arguments:
``samples``
A list of samples, with shape ``(n_samples, dimension)``, where
``n_samples`` is the number of samples in the list and ``dimension`` is
the number of parameters.
``kde``
(Optional) Set to ``True`` to use kernel-density estimation for the
histograms and scatter plots.
``opacity``
(Optional) When ``kde=False``, this value can be used to manually set
the opacity of the points in the scatter plots.
``ref_parameters``
(Optional) A set of parameters for reference in the plot. For example,
if true values of parameters are known, they can be passed in for
plotting.
``n_percentiles``
(Optional) Shows only the middle n-th percentiles of the distribution.
Default shows all samples in ``samples``.
Returns a ``matplotlib`` figure object and axes handle.
"""
import matplotlib.pyplot as plt
# Check samples size
try:
n_sample, n_param = samples.shape
except ValueError:
raise ValueError('`samples` must be of shape (n_sample, n_param).')
# Check reference parameters
if ref_parameters is not None:
if len(ref_parameters) != n_param:
raise ValueError(
'Length of `ref_parameters` must be same as number of'
' parameters.')
# Create figure
fig_size = (3 * n_param, 3 * n_param)
if fig_axes is None:
fig, axes = plt.subplots(n_param, n_param, figsize=fig_size)
else:
fig, axes = fig_axes
bins = 25
for i in range(n_param):
for j in range(n_param):
if i == j:
# Diagonal: Plot a histogram
if n_percentiles is None:
xmin, xmax = np.min(samples[:, i]), np.max(samples[:, i])
else:
xmin = np.percentile(samples[:, i],
50 - n_percentiles / 2.)
xmax = np.percentile(samples[:, i],
50 + n_percentiles / 2.)
xbins = np.linspace(xmin, xmax, bins)
axes[i, j].set_xlim(xmin, xmax)
axes[i, j].hist(samples[:, i], bins=xbins, normed=True)
# Add kde plot
if kde:
x = np.linspace(xmin, xmax, 100)
axes[i, j].plot(x, stats.gaussian_kde(samples[:, i])(x))
# Add reference parameters if given
if ref_parameters is not None:
ymin_tv, ymax_tv = axes[i, j].get_ylim()
axes[i, j].plot(
[ref_parameters[i], ref_parameters[i]],
[0.0, ymax_tv],
'--', c='k', lw=2)
elif i < j:
# Top-right: no plot
axes[i, j].axis('off')
else:
# Lower-left: Plot the samples as density map
if n_percentiles is None:
xmin, xmax = np.min(samples[:, j]), np.max(samples[:, j])
ymin, ymax = np.min(samples[:, i]), np.max(samples[:, i])
else:
xmin = np.percentile(samples[:, j],
50 - n_percentiles / 2.)
xmax = np.percentile(samples[:, j],
50 + n_percentiles / 2.)
ymin = np.percentile(samples[:, i],
50 - n_percentiles / 2.)
ymax = np.percentile(samples[:, i],
50 + n_percentiles / 2.)
axes[i, j].set_xlim(xmin, xmax)
axes[i, j].set_ylim(ymin, ymax)
if not kde and not heatmap:
# Create scatter plot
# Determine point opacity
num_points = len(samples[:, i])
if opacity is None:
if num_points < 10:
opacity = 1.0
else:
opacity = 1.0 / np.log10(num_points)
# Scatter points
axes[i, j].scatter(
samples[:, j], samples[:, i], alpha=opacity, s=0.1)
elif kde:
# Create a KDE-based plot
# Plot values
values = np.vstack([samples[:, j], samples[:, i]])
axes[i, j].imshow(
np.rot90(values), cmap=plt.cm.Blues,
extent=[xmin, xmax, ymin, ymax])
# Create grid
xx, yy = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
positions = np.vstack([xx.ravel(), yy.ravel()])
# Get kernel density estimate and plot contours
kernel = stats.gaussian_kde(values)
f = np.reshape(kernel(positions).T, xx.shape)
axes[i, j].contourf(xx, yy, f, cmap='Blues')
axes[i, j].contour(xx, yy, f, colors='k')
# Force equal aspect ratio
# See: https://stackoverflow.com/questions/7965743
im = axes[i, j].get_images()
ex = im[0].get_extent()
# Matplotlib raises a warning here (on 2.7 at least)
# We can't do anything about it, so no other option than
# to suppress it at this stage...
with warnings.catch_warnings():
warnings.simplefilter('ignore', UnicodeWarning)
axes[i, j].set_aspect(
abs((ex[1] - ex[0]) / (ex[3] - ex[2])))
elif heatmap:
# Create a heatmap-like plot
xbins = np.linspace(xmin, xmax, bins)
ybins = np.linspace(ymin, ymax, bins)
axes[i, j].hist2d(samples[:, j], samples[:, i],
bins=[xbins, ybins], normed=True,
cmap='Blues')
# Add reference parameters if given
if ref_parameters is not None:
axes[i, j].plot(
[ref_parameters[j], ref_parameters[j]],
[ymin, ymax],
'--', c='k', lw=2)
axes[i, j].plot(
[xmin, xmax],
[ref_parameters[i], ref_parameters[i]],
'--', c='k', lw=2)
# Set tick labels
if i < n_param - 1:
# Only show x tick labels for the last row
axes[i, j].set_xticklabels([])
else:
# Rotate the x tick labels to fit in the plot
for tl in axes[i, j].get_xticklabels():
tl.set_rotation(45)
if j > 0:
# Only show y tick labels for the first column
axes[i, j].set_yticklabels([])
# Set axis labels
axes[-1, i].set_xlabel('Parameter %d' % (i + 1))
if i == 0:
# The first one is not a parameter
axes[i, 0].set_ylabel('Frequency')
else:
axes[i, 0].set_ylabel('Parameter %d' % (i + 1))
return fig, axes
def hist(samples, ref_parameters=None, n_percentiles=None):
"""
Takes one or more markov chains or lists of samples as input and creates
and returns a plot showing histograms and traces for each chain or list of
samples.
Arguments:
``samples``
A list of lists of samples, with shape
``(n_lists, n_samples, dimension)``, where ``n_lists`` is the number of
lists of samples, ``n_samples`` is the number of samples in one list
and ``dimension`` is the number of parameters.
``ref_parameters``
(Optional) A set of parameters for reference in the plot. For example,
if true values of parameters are known, they can be passed in for
plotting.
``n_percentiles``
(Optional) Shows only the middle n-th percentiles of the distribution.
Default shows all samples in ``samples``.
Returns a ``matplotlib`` figure object and axes handle.
"""
import matplotlib.pyplot as plt
# If we switch to Python3 exclusively, bins and alpha can be keyword-only
# arguments
bins = 40
alpha = 0.5
n_list = len(samples)
_, n_param = samples[0].shape
# Check number of parameters
for samples_j in samples:
if n_param != samples_j.shape[1]:
raise ValueError(
'All samples must have the same number of parameters.'
)
# Check reference parameters
if ref_parameters is not None:
if len(ref_parameters) != n_param:
raise ValueError(
'Length of `ref_parameters` must be same as number of'
' parameters.')
# Set up figure
fig, axes = plt.subplots(4, 3, figsize=(12, 7))
# Plot first samples
for i in range(n_param):
a1 = i % 4
a2 = i // 4
for j_list, samples_j in enumerate(samples):
# Add histogram subplot
axes[a1, a2].set_xlabel('Parameter ' + str(i + 1))
if n_percentiles is None:
xmin = np.min(samples_j[:, i])
xmax = np.max(samples_j[:, i])
else:
xmin = np.percentile(samples_j[:, i],
50 - n_percentiles / 2.)
xmax = np.percentile(samples_j[:, i],
50 + n_percentiles / 2.)
xbins = np.linspace(xmin, xmax, bins)
axes[a1, a2].hist(samples_j[:, i], bins=xbins, alpha=alpha)
# label='Samples ' + str(1 + j_list))
# Add reference parameters if given
if ref_parameters is not None:
# For histogram subplot
ymin_tv, ymax_tv = axes[i, 0].get_ylim()
axes[a1, a2].plot(
[ref_parameters[i], ref_parameters[i]],
[0.0, ymax_tv],
'--', c='k')
if n_list > 1:
axes[0, 0].legend()
axes[1, 0].set_ylabel('Frequency', fontsize=12)
plt.tight_layout()
return fig, axes
| [
"[email protected]"
] | |
9ebcfe440aadfcb8bf00181e22e0cfadd7c707ac | 9b6a8923e783bd2641d7af3b118ff83f38c1de31 | /review/list/list.py | db0572268d2cf805fbed1cf651fd1c590a2e4f44 | [] | no_license | mbrsagor/PyLearn | 1c625698802fc5325ea06b754dc9b80d716d9f31 | 94e68f10efd1f5b1a26d1fd965a29dbbe6c2253d | refs/heads/master | 2023-02-25T05:50:23.530150 | 2021-01-31T19:09:01 | 2021-01-31T19:09:01 | 263,316,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | students = [
{
"name": "Jacob Martin",
"father name": "Ros Martin",
"Address": "123 Hill Street",
}, {
"name": "Angela Stevens",
"father name": "Robert Stevens",
"Address": "3 Upper Street London",
}, {
"name": "Ricky Smart",
"father name": "William Smart",
"Address": "Unknown",
}
]
names_list = [student['name'] for student in students]
print(names_list)
f = ["Mango", "Apple", "Orange"]
a, b, c = f
print(a)
print(b)
print(c)
| [
"[email protected]"
] | |
4971148ded922aab1c7bcceb73ea4136e79c4e7f | c7cb4e768cbb4110c1995234bae2967fb92b0bd3 | /mySite/mySite/urls.py | 52c38cceaf3621feb74b8355d96a957b9a943ae3 | [] | no_license | backtobasic18/main_roni_site | d50f7418f8e6a48855746bb2229a840a2af7d7f8 | 06cabff9b960f762bb2ad64421cc8e2a3e5422a4 | refs/heads/master | 2020-07-06T13:44:34.805318 | 2016-09-10T17:34:16 | 2016-09-10T17:34:16 | 67,798,890 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 812 | py | """mySite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('mainApp.urls')),
]
| [
"[email protected]"
] | |
544691d1ddac1a2ff9e0419bfc69e8b15f00a0b1 | a8c0867109974ff7586597fe2c58521277ab9d4d | /LC648.py | e3823a187d57e17a47a6641bfc77b27bfb8ab450 | [] | no_license | Qiao-Liang/LeetCode | 1491b01d2ddf11495fbc23a65bb6ecb74ac1cee2 | dbdb227e12f329e4ca064b338f1fbdca42f3a848 | refs/heads/master | 2023-05-06T15:00:58.939626 | 2021-04-21T06:30:33 | 2021-04-21T06:30:33 | 82,885,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py | class Solution(object):
def replaceWords(self, dict, sentence):
"""
:type dict: List[str]
:type sentence: str
:rtype: str
"""
sen_list = sentence.split(' ')
for idx, word in enumerate(sen_list):
min_root = word
len_word = len(word)
for root in dict:
len_root = len(root)
if len_root < len(min_root) and len_root <= len_word and word[:len_root] == root:
min_root = root
sen_list[idx] = min_root
return ' '.join(sen_list)
sol = Solution()
# dict = ["cat", "bat", "rat"]
# sentence = "the cattle was rattled by the battery"
dict = ["a", "b", "c"]
sentence = "aadsfasf absbs bbab cadsfafs"
print(sol.replaceWords(dict, sentence))
| [
"[email protected]"
] | |
a8c5c7e3b1280c05481bf5cf00de1d61e37f5aa5 | 923d035a4762a19b30d5900db91143a83837ae70 | /ichnaea/async/config.py | 1fd35092f9eaed4de2aaf9188da99570999ed30d | [
"Apache-2.0"
] | permissive | voolitels/ichnaea | d5d5da34cb30b3e0c85675e32dab3972cc31d7b0 | bd0350fcba9efb0bad3957309ed3a471ae07e41b | refs/heads/master | 2021-01-17T14:21:16.056481 | 2015-11-10T16:38:22 | 2015-11-10T16:57:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,402 | py | """
Contains celery specific one time configuration code.
"""
import os
from kombu import Queue
from kombu.serialization import register
from ichnaea.async.schedule import CELERYBEAT_SCHEDULE
from ichnaea.cache import configure_redis
from ichnaea.config import read_config
from ichnaea import internaljson
from ichnaea.db import configure_db
from ichnaea.geoip import configure_geoip
from ichnaea.log import (
configure_raven,
configure_stats,
)
from ichnaea.queue import (
DataQueue,
ExportQueue,
)
CELERY_QUEUES = (
Queue('celery_cell', routing_key='celery_cell'),
Queue('celery_default', routing_key='celery_default'),
Queue('celery_export', routing_key='celery_export'),
Queue('celery_incoming', routing_key='celery_incoming'),
Queue('celery_monitor', routing_key='celery_monitor'),
Queue('celery_ocid', routing_key='celery_ocid'),
Queue('celery_reports', routing_key='celery_reports'),
Queue('celery_upload', routing_key='celery_upload'),
Queue('celery_wifi', routing_key='celery_wifi'),
) #: List of :class:`kombu.Queue` instances.
register('internal_json',
internaljson.internal_dumps,
internaljson.internal_loads,
content_type='application/x-internaljson',
content_encoding='utf-8')
def configure_celery(celery_app):
"""
Configure the celery app stored in :data:`ichnaea.async.app.celery_app`.
This is executed both inside the master worker process and once in
each forked worker process.
This parses the application ini and reads in the
:mod:`ichnaea.async.settings`.
"""
conf = read_config()
if conf.has_section('celery'):
section = conf.get_map('celery')
else: # pragma: no cover
# happens while building docs locally and on rtfd.org
return
# testing settings
always_eager = bool(os.environ.get('CELERY_ALWAYS_EAGER', False))
redis_uri = os.environ.get('REDIS_URI', 'redis://localhost:6379/1')
if always_eager and redis_uri:
broker_url = redis_uri
result_url = redis_uri
else: # pragma: no cover
broker_url = section['broker_url']
result_url = section['result_url']
celery_app.config_from_object('ichnaea.async.settings')
celery_app.conf.update(
BROKER_URL=broker_url,
CELERY_RESULT_BACKEND=result_url,
CELERY_QUEUES=CELERY_QUEUES,
CELERYBEAT_SCHEDULE=CELERYBEAT_SCHEDULE,
)
def configure_data(redis_client):
"""
Configure fixed set of data queues.
"""
data_queues = {
'update_cell': DataQueue('update_cell', redis_client,
queue_key='update_cell'),
'update_cellarea': DataQueue('update_cellarea', redis_client,
queue_key='update_cellarea'),
'update_cellarea_ocid': DataQueue('update_cellarea_ocid', redis_client,
queue_key='update_cellarea_ocid'),
'update_score': DataQueue('update_score', redis_client,
queue_key='update_score'),
}
for shard_id in ('ne', 'nw', 'se', 'sw'):
name = 'update_datamap_' + shard_id
data_queues[name] = DataQueue(name, redis_client, queue_key=name)
for shard_id in ['%x' % i for i in range(16)]:
name = 'update_wifi_' + shard_id
data_queues[name] = DataQueue(
name, redis_client, queue_key=name)
return data_queues
def configure_export(redis_client, app_config):
"""
Configure export queues, based on the `[export:*]` sections from
the application ini file.
"""
export_queues = {}
for section_name in app_config.sections():
if section_name.startswith('export:'):
section = app_config.get_map(section_name)
name = section_name.split(':')[1]
export_queues[name] = ExportQueue(name, redis_client, section)
return export_queues
def init_worker(celery_app, app_config,
_db_rw=None, _db_ro=None, _geoip_db=None,
_raven_client=None, _redis_client=None, _stats_client=None):
"""
Configure the passed in celery app, usually stored in
:data:`ichnaea.async.app.celery_app`.
Does connection, settings and queue setup. Attaches some
additional functionality to the :class:`celery.Celery` instance.
This is executed inside each forked worker process.
The parameters starting with an underscore are test-only hooks
to provide pre-configured connection objects.
:param _db_ro: Ignored, read-only database connection isn't used.
"""
# make config file settings available
celery_app.settings = app_config.asdict()
# configure outside connections
celery_app.db_rw = configure_db(
app_config.get('database', 'rw_url'), _db=_db_rw)
celery_app.raven_client = raven_client = configure_raven(
app_config.get('sentry', 'dsn'),
transport='threaded', _client=_raven_client)
celery_app.redis_client = redis_client = configure_redis(
app_config.get('cache', 'cache_url'), _client=_redis_client)
celery_app.stats_client = configure_stats(
app_config, _client=_stats_client)
celery_app.geoip_db = configure_geoip(
app_config.get('geoip', 'db_path'), raven_client=raven_client,
_client=_geoip_db)
# configure data / export queues
celery_app.all_queues = all_queues = set([q.name for q in CELERY_QUEUES])
celery_app.data_queues = data_queues = configure_data(redis_client)
for queue in data_queues.values():
if queue.monitor_name:
all_queues.add(queue.monitor_name)
celery_app.export_queues = configure_export(redis_client, app_config)
for queue in celery_app.export_queues.values():
if queue.monitor_name:
all_queues.add(queue.monitor_name)
def shutdown_worker(celery_app):
"""
Close outbound connections and remove custom celery_app state.
This is executed inside each forked worker process.
"""
celery_app.db_rw.engine.pool.dispose()
del celery_app.db_rw
del celery_app.raven_client
celery_app.redis_client.connection_pool.disconnect()
del celery_app.redis_client
del celery_app.stats_client
del celery_app.all_queues
del celery_app.data_queues
del celery_app.export_queues
del celery_app.settings
| [
"[email protected]"
] | |
be4e9e3a42b8c97562deeecd8c3b871fa6fb01c7 | f124cb2443577778d8708993c984eafbd1ae3ec3 | /saleor/account/migrations/0054_alter_user_language_code.py | c4843696a427a6c0f24f171c7feab251cf7eca57 | [
"BSD-3-Clause"
] | permissive | quangtynu/saleor | ac467193a7779fed93c80251828ac85d92d71d83 | 5b0e5206c5fd30d81438b6489d0441df51038a85 | refs/heads/master | 2023-03-07T19:41:20.361624 | 2022-10-20T13:19:25 | 2022-10-20T13:19:25 | 245,860,106 | 1 | 0 | BSD-3-Clause | 2023-03-06T05:46:25 | 2020-03-08T17:44:18 | Python | UTF-8 | Python | false | false | 40,337 | py | # Generated by Django 3.2.6 on 2021-08-17 11:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("account", "0053_auto_20210719_1048"),
]
operations = [
migrations.AlterField(
model_name="user",
name="language_code",
field=models.CharField(
choices=[
("af", "Afrikaans"),
("af-NA", "Afrikaans (Namibia)"),
("af-ZA", "Afrikaans (South Africa)"),
("agq", "Aghem"),
("agq-CM", "Aghem (Cameroon)"),
("ak", "Akan"),
("ak-GH", "Akan (Ghana)"),
("am", "Amharic"),
("am-ET", "Amharic (Ethiopia)"),
("ar", "Arabic"),
("ar-AE", "Arabic (United Arab Emirates)"),
("ar-BH", "Arabic (Bahrain)"),
("ar-DJ", "Arabic (Djibouti)"),
("ar-DZ", "Arabic (Algeria)"),
("ar-EG", "Arabic (Egypt)"),
("ar-EH", "Arabic (Western Sahara)"),
("ar-ER", "Arabic (Eritrea)"),
("ar-IL", "Arabic (Israel)"),
("ar-IQ", "Arabic (Iraq)"),
("ar-JO", "Arabic (Jordan)"),
("ar-KM", "Arabic (Comoros)"),
("ar-KW", "Arabic (Kuwait)"),
("ar-LB", "Arabic (Lebanon)"),
("ar-LY", "Arabic (Libya)"),
("ar-MA", "Arabic (Morocco)"),
("ar-MR", "Arabic (Mauritania)"),
("ar-OM", "Arabic (Oman)"),
("ar-PS", "Arabic (Palestinian Territories)"),
("ar-QA", "Arabic (Qatar)"),
("ar-SA", "Arabic (Saudi Arabia)"),
("ar-SD", "Arabic (Sudan)"),
("ar-SO", "Arabic (Somalia)"),
("ar-SS", "Arabic (South Sudan)"),
("ar-SY", "Arabic (Syria)"),
("ar-TD", "Arabic (Chad)"),
("ar-TN", "Arabic (Tunisia)"),
("ar-YE", "Arabic (Yemen)"),
("as", "Assamese"),
("as-IN", "Assamese (India)"),
("asa", "Asu"),
("asa-TZ", "Asu (Tanzania)"),
("ast", "Asturian"),
("ast-ES", "Asturian (Spain)"),
("az", "Azerbaijani"),
("az-Cyrl", "Azerbaijani (Cyrillic)"),
("az-Cyrl-AZ", "Azerbaijani (Cyrillic, Azerbaijan)"),
("az-Latn", "Azerbaijani (Latin)"),
("az-Latn-AZ", "Azerbaijani (Latin, Azerbaijan)"),
("bas", "Basaa"),
("bas-CM", "Basaa (Cameroon)"),
("be", "Belarusian"),
("be-BY", "Belarusian (Belarus)"),
("bem", "Bemba"),
("bem-ZM", "Bemba (Zambia)"),
("bez", "Bena"),
("bez-TZ", "Bena (Tanzania)"),
("bg", "Bulgarian"),
("bg-BG", "Bulgarian (Bulgaria)"),
("bm", "Bambara"),
("bm-ML", "Bambara (Mali)"),
("bn", "Bangla"),
("bn-BD", "Bangla (Bangladesh)"),
("bn-IN", "Bangla (India)"),
("bo", "Tibetan"),
("bo-CN", "Tibetan (China)"),
("bo-IN", "Tibetan (India)"),
("br", "Breton"),
("br-FR", "Breton (France)"),
("brx", "Bodo"),
("brx-IN", "Bodo (India)"),
("bs", "Bosnian"),
("bs-Cyrl", "Bosnian (Cyrillic)"),
("bs-Cyrl-BA", "Bosnian (Cyrillic, Bosnia & Herzegovina)"),
("bs-Latn", "Bosnian (Latin)"),
("bs-Latn-BA", "Bosnian (Latin, Bosnia & Herzegovina)"),
("ca", "Catalan"),
("ca-AD", "Catalan (Andorra)"),
("ca-ES", "Catalan (Spain)"),
("ca-ES-VALENCIA", "Catalan (Spain, Valencian)"),
("ca-FR", "Catalan (France)"),
("ca-IT", "Catalan (Italy)"),
("ccp", "Chakma"),
("ccp-BD", "Chakma (Bangladesh)"),
("ccp-IN", "Chakma (India)"),
("ce", "Chechen"),
("ce-RU", "Chechen (Russia)"),
("ceb", "Cebuano"),
("ceb-PH", "Cebuano (Philippines)"),
("cgg", "Chiga"),
("cgg-UG", "Chiga (Uganda)"),
("chr", "Cherokee"),
("chr-US", "Cherokee (United States)"),
("ckb", "Central Kurdish"),
("ckb-IQ", "Central Kurdish (Iraq)"),
("ckb-IR", "Central Kurdish (Iran)"),
("cs", "Czech"),
("cs-CZ", "Czech (Czechia)"),
("cu", "Church Slavic"),
("cu-RU", "Church Slavic (Russia)"),
("cy", "Welsh"),
("cy-GB", "Welsh (United Kingdom)"),
("da", "Danish"),
("da-DK", "Danish (Denmark)"),
("da-GL", "Danish (Greenland)"),
("dav", "Taita"),
("dav-KE", "Taita (Kenya)"),
("de", "German"),
("de-AT", "German (Austria)"),
("de-BE", "German (Belgium)"),
("de-CH", "German (Switzerland)"),
("de-DE", "German (Germany)"),
("de-IT", "German (Italy)"),
("de-LI", "German (Liechtenstein)"),
("de-LU", "German (Luxembourg)"),
("dje", "Zarma"),
("dje-NE", "Zarma (Niger)"),
("dsb", "Lower Sorbian"),
("dsb-DE", "Lower Sorbian (Germany)"),
("dua", "Duala"),
("dua-CM", "Duala (Cameroon)"),
("dyo", "Jola-Fonyi"),
("dyo-SN", "Jola-Fonyi (Senegal)"),
("dz", "Dzongkha"),
("dz-BT", "Dzongkha (Bhutan)"),
("ebu", "Embu"),
("ebu-KE", "Embu (Kenya)"),
("ee", "Ewe"),
("ee-GH", "Ewe (Ghana)"),
("ee-TG", "Ewe (Togo)"),
("el", "Greek"),
("el-CY", "Greek (Cyprus)"),
("el-GR", "Greek (Greece)"),
("en", "English"),
("en-AE", "English (United Arab Emirates)"),
("en-AG", "English (Antigua & Barbuda)"),
("en-AI", "English (Anguilla)"),
("en-AS", "English (American Samoa)"),
("en-AT", "English (Austria)"),
("en-AU", "English (Australia)"),
("en-BB", "English (Barbados)"),
("en-BE", "English (Belgium)"),
("en-BI", "English (Burundi)"),
("en-BM", "English (Bermuda)"),
("en-BS", "English (Bahamas)"),
("en-BW", "English (Botswana)"),
("en-BZ", "English (Belize)"),
("en-CA", "English (Canada)"),
("en-CC", "English (Cocos (Keeling) Islands)"),
("en-CH", "English (Switzerland)"),
("en-CK", "English (Cook Islands)"),
("en-CM", "English (Cameroon)"),
("en-CX", "English (Christmas Island)"),
("en-CY", "English (Cyprus)"),
("en-DE", "English (Germany)"),
("en-DG", "English (Diego Garcia)"),
("en-DK", "English (Denmark)"),
("en-DM", "English (Dominica)"),
("en-ER", "English (Eritrea)"),
("en-FI", "English (Finland)"),
("en-FJ", "English (Fiji)"),
("en-FK", "English (Falkland Islands)"),
("en-FM", "English (Micronesia)"),
("en-GB", "English (United Kingdom)"),
("en-GD", "English (Grenada)"),
("en-GG", "English (Guernsey)"),
("en-GH", "English (Ghana)"),
("en-GI", "English (Gibraltar)"),
("en-GM", "English (Gambia)"),
("en-GU", "English (Guam)"),
("en-GY", "English (Guyana)"),
("en-HK", "English (Hong Kong SAR China)"),
("en-IE", "English (Ireland)"),
("en-IL", "English (Israel)"),
("en-IM", "English (Isle of Man)"),
("en-IN", "English (India)"),
("en-IO", "English (British Indian Ocean Territory)"),
("en-JE", "English (Jersey)"),
("en-JM", "English (Jamaica)"),
("en-KE", "English (Kenya)"),
("en-KI", "English (Kiribati)"),
("en-KN", "English (St. Kitts & Nevis)"),
("en-KY", "English (Cayman Islands)"),
("en-LC", "English (St. Lucia)"),
("en-LR", "English (Liberia)"),
("en-LS", "English (Lesotho)"),
("en-MG", "English (Madagascar)"),
("en-MH", "English (Marshall Islands)"),
("en-MO", "English (Macao SAR China)"),
("en-MP", "English (Northern Mariana Islands)"),
("en-MS", "English (Montserrat)"),
("en-MT", "English (Malta)"),
("en-MU", "English (Mauritius)"),
("en-MW", "English (Malawi)"),
("en-MY", "English (Malaysia)"),
("en-NA", "English (Namibia)"),
("en-NF", "English (Norfolk Island)"),
("en-NG", "English (Nigeria)"),
("en-NL", "English (Netherlands)"),
("en-NR", "English (Nauru)"),
("en-NU", "English (Niue)"),
("en-NZ", "English (New Zealand)"),
("en-PG", "English (Papua New Guinea)"),
("en-PH", "English (Philippines)"),
("en-PK", "English (Pakistan)"),
("en-PN", "English (Pitcairn Islands)"),
("en-PR", "English (Puerto Rico)"),
("en-PW", "English (Palau)"),
("en-RW", "English (Rwanda)"),
("en-SB", "English (Solomon Islands)"),
("en-SC", "English (Seychelles)"),
("en-SD", "English (Sudan)"),
("en-SE", "English (Sweden)"),
("en-SG", "English (Singapore)"),
("en-SH", "English (St. Helena)"),
("en-SI", "English (Slovenia)"),
("en-SL", "English (Sierra Leone)"),
("en-SS", "English (South Sudan)"),
("en-SX", "English (Sint Maarten)"),
("en-SZ", "English (Eswatini)"),
("en-TC", "English (Turks & Caicos Islands)"),
("en-TK", "English (Tokelau)"),
("en-TO", "English (Tonga)"),
("en-TT", "English (Trinidad & Tobago)"),
("en-TV", "English (Tuvalu)"),
("en-TZ", "English (Tanzania)"),
("en-UG", "English (Uganda)"),
("en-UM", "English (U.S. Outlying Islands)"),
("en-US", "English (United States)"),
("en-VC", "English (St. Vincent & Grenadines)"),
("en-VG", "English (British Virgin Islands)"),
("en-VI", "English (U.S. Virgin Islands)"),
("en-VU", "English (Vanuatu)"),
("en-WS", "English (Samoa)"),
("en-ZA", "English (South Africa)"),
("en-ZM", "English (Zambia)"),
("en-ZW", "English (Zimbabwe)"),
("eo", "Esperanto"),
("es", "Spanish"),
("es-AR", "Spanish (Argentina)"),
("es-BO", "Spanish (Bolivia)"),
("es-BR", "Spanish (Brazil)"),
("es-BZ", "Spanish (Belize)"),
("es-CL", "Spanish (Chile)"),
("es-CO", "Spanish (Colombia)"),
("es-CR", "Spanish (Costa Rica)"),
("es-CU", "Spanish (Cuba)"),
("es-DO", "Spanish (Dominican Republic)"),
("es-EA", "Spanish (Ceuta & Melilla)"),
("es-EC", "Spanish (Ecuador)"),
("es-ES", "Spanish (Spain)"),
("es-GQ", "Spanish (Equatorial Guinea)"),
("es-GT", "Spanish (Guatemala)"),
("es-HN", "Spanish (Honduras)"),
("es-IC", "Spanish (Canary Islands)"),
("es-MX", "Spanish (Mexico)"),
("es-NI", "Spanish (Nicaragua)"),
("es-PA", "Spanish (Panama)"),
("es-PE", "Spanish (Peru)"),
("es-PH", "Spanish (Philippines)"),
("es-PR", "Spanish (Puerto Rico)"),
("es-PY", "Spanish (Paraguay)"),
("es-SV", "Spanish (El Salvador)"),
("es-US", "Spanish (United States)"),
("es-UY", "Spanish (Uruguay)"),
("es-VE", "Spanish (Venezuela)"),
("et", "Estonian"),
("et-EE", "Estonian (Estonia)"),
("eu", "Basque"),
("eu-ES", "Basque (Spain)"),
("ewo", "Ewondo"),
("ewo-CM", "Ewondo (Cameroon)"),
("fa", "Persian"),
("fa-AF", "Persian (Afghanistan)"),
("fa-IR", "Persian (Iran)"),
("ff", "Fulah"),
("ff-Adlm", "Fulah (Adlam)"),
("ff-Adlm-BF", "Fulah (Adlam, Burkina Faso)"),
("ff-Adlm-CM", "Fulah (Adlam, Cameroon)"),
("ff-Adlm-GH", "Fulah (Adlam, Ghana)"),
("ff-Adlm-GM", "Fulah (Adlam, Gambia)"),
("ff-Adlm-GN", "Fulah (Adlam, Guinea)"),
("ff-Adlm-GW", "Fulah (Adlam, Guinea-Bissau)"),
("ff-Adlm-LR", "Fulah (Adlam, Liberia)"),
("ff-Adlm-MR", "Fulah (Adlam, Mauritania)"),
("ff-Adlm-NE", "Fulah (Adlam, Niger)"),
("ff-Adlm-NG", "Fulah (Adlam, Nigeria)"),
("ff-Adlm-SL", "Fulah (Adlam, Sierra Leone)"),
("ff-Adlm-SN", "Fulah (Adlam, Senegal)"),
("ff-Latn", "Fulah (Latin)"),
("ff-Latn-BF", "Fulah (Latin, Burkina Faso)"),
("ff-Latn-CM", "Fulah (Latin, Cameroon)"),
("ff-Latn-GH", "Fulah (Latin, Ghana)"),
("ff-Latn-GM", "Fulah (Latin, Gambia)"),
("ff-Latn-GN", "Fulah (Latin, Guinea)"),
("ff-Latn-GW", "Fulah (Latin, Guinea-Bissau)"),
("ff-Latn-LR", "Fulah (Latin, Liberia)"),
("ff-Latn-MR", "Fulah (Latin, Mauritania)"),
("ff-Latn-NE", "Fulah (Latin, Niger)"),
("ff-Latn-NG", "Fulah (Latin, Nigeria)"),
("ff-Latn-SL", "Fulah (Latin, Sierra Leone)"),
("ff-Latn-SN", "Fulah (Latin, Senegal)"),
("fi", "Finnish"),
("fi-FI", "Finnish (Finland)"),
("fil", "Filipino"),
("fil-PH", "Filipino (Philippines)"),
("fo", "Faroese"),
("fo-DK", "Faroese (Denmark)"),
("fo-FO", "Faroese (Faroe Islands)"),
("fr", "French"),
("fr-BE", "French (Belgium)"),
("fr-BF", "French (Burkina Faso)"),
("fr-BI", "French (Burundi)"),
("fr-BJ", "French (Benin)"),
("fr-BL", "French (St. Barthélemy)"),
("fr-CA", "French (Canada)"),
("fr-CD", "French (Congo - Kinshasa)"),
("fr-CF", "French (Central African Republic)"),
("fr-CG", "French (Congo - Brazzaville)"),
("fr-CH", "French (Switzerland)"),
("fr-CI", "French (Côte d’Ivoire)"),
("fr-CM", "French (Cameroon)"),
("fr-DJ", "French (Djibouti)"),
("fr-DZ", "French (Algeria)"),
("fr-FR", "French (France)"),
("fr-GA", "French (Gabon)"),
("fr-GF", "French (French Guiana)"),
("fr-GN", "French (Guinea)"),
("fr-GP", "French (Guadeloupe)"),
("fr-GQ", "French (Equatorial Guinea)"),
("fr-HT", "French (Haiti)"),
("fr-KM", "French (Comoros)"),
("fr-LU", "French (Luxembourg)"),
("fr-MA", "French (Morocco)"),
("fr-MC", "French (Monaco)"),
("fr-MF", "French (St. Martin)"),
("fr-MG", "French (Madagascar)"),
("fr-ML", "French (Mali)"),
("fr-MQ", "French (Martinique)"),
("fr-MR", "French (Mauritania)"),
("fr-MU", "French (Mauritius)"),
("fr-NC", "French (New Caledonia)"),
("fr-NE", "French (Niger)"),
("fr-PF", "French (French Polynesia)"),
("fr-PM", "French (St. Pierre & Miquelon)"),
("fr-RE", "French (Réunion)"),
("fr-RW", "French (Rwanda)"),
("fr-SC", "French (Seychelles)"),
("fr-SN", "French (Senegal)"),
("fr-SY", "French (Syria)"),
("fr-TD", "French (Chad)"),
("fr-TG", "French (Togo)"),
("fr-TN", "French (Tunisia)"),
("fr-VU", "French (Vanuatu)"),
("fr-WF", "French (Wallis & Futuna)"),
("fr-YT", "French (Mayotte)"),
("fur", "Friulian"),
("fur-IT", "Friulian (Italy)"),
("fy", "Western Frisian"),
("fy-NL", "Western Frisian (Netherlands)"),
("ga", "Irish"),
("ga-GB", "Irish (United Kingdom)"),
("ga-IE", "Irish (Ireland)"),
("gd", "Scottish Gaelic"),
("gd-GB", "Scottish Gaelic (United Kingdom)"),
("gl", "Galician"),
("gl-ES", "Galician (Spain)"),
("gsw", "Swiss German"),
("gsw-CH", "Swiss German (Switzerland)"),
("gsw-FR", "Swiss German (France)"),
("gsw-LI", "Swiss German (Liechtenstein)"),
("gu", "Gujarati"),
("gu-IN", "Gujarati (India)"),
("guz", "Gusii"),
("guz-KE", "Gusii (Kenya)"),
("gv", "Manx"),
("gv-IM", "Manx (Isle of Man)"),
("ha", "Hausa"),
("ha-GH", "Hausa (Ghana)"),
("ha-NE", "Hausa (Niger)"),
("ha-NG", "Hausa (Nigeria)"),
("haw", "Hawaiian"),
("haw-US", "Hawaiian (United States)"),
("he", "Hebrew"),
("he-IL", "Hebrew (Israel)"),
("hi", "Hindi"),
("hi-IN", "Hindi (India)"),
("hr", "Croatian"),
("hr-BA", "Croatian (Bosnia & Herzegovina)"),
("hr-HR", "Croatian (Croatia)"),
("hsb", "Upper Sorbian"),
("hsb-DE", "Upper Sorbian (Germany)"),
("hu", "Hungarian"),
("hu-HU", "Hungarian (Hungary)"),
("hy", "Armenian"),
("hy-AM", "Armenian (Armenia)"),
("ia", "Interlingua"),
("id", "Indonesian"),
("id-ID", "Indonesian (Indonesia)"),
("ig", "Igbo"),
("ig-NG", "Igbo (Nigeria)"),
("ii", "Sichuan Yi"),
("ii-CN", "Sichuan Yi (China)"),
("is", "Icelandic"),
("is-IS", "Icelandic (Iceland)"),
("it", "Italian"),
("it-CH", "Italian (Switzerland)"),
("it-IT", "Italian (Italy)"),
("it-SM", "Italian (San Marino)"),
("it-VA", "Italian (Vatican City)"),
("ja", "Japanese"),
("ja-JP", "Japanese (Japan)"),
("jgo", "Ngomba"),
("jgo-CM", "Ngomba (Cameroon)"),
("jmc", "Machame"),
("jmc-TZ", "Machame (Tanzania)"),
("jv", "Javanese"),
("jv-ID", "Javanese (Indonesia)"),
("ka", "Georgian"),
("ka-GE", "Georgian (Georgia)"),
("kab", "Kabyle"),
("kab-DZ", "Kabyle (Algeria)"),
("kam", "Kamba"),
("kam-KE", "Kamba (Kenya)"),
("kde", "Makonde"),
("kde-TZ", "Makonde (Tanzania)"),
("kea", "Kabuverdianu"),
("kea-CV", "Kabuverdianu (Cape Verde)"),
("khq", "Koyra Chiini"),
("khq-ML", "Koyra Chiini (Mali)"),
("ki", "Kikuyu"),
("ki-KE", "Kikuyu (Kenya)"),
("kk", "Kazakh"),
("kk-KZ", "Kazakh (Kazakhstan)"),
("kkj", "Kako"),
("kkj-CM", "Kako (Cameroon)"),
("kl", "Kalaallisut"),
("kl-GL", "Kalaallisut (Greenland)"),
("kln", "Kalenjin"),
("kln-KE", "Kalenjin (Kenya)"),
("km", "Khmer"),
("km-KH", "Khmer (Cambodia)"),
("kn", "Kannada"),
("kn-IN", "Kannada (India)"),
("ko", "Korean"),
("ko-KP", "Korean (North Korea)"),
("ko-KR", "Korean (South Korea)"),
("kok", "Konkani"),
("kok-IN", "Konkani (India)"),
("ks", "Kashmiri"),
("ks-Arab", "Kashmiri (Arabic)"),
("ks-Arab-IN", "Kashmiri (Arabic, India)"),
("ksb", "Shambala"),
("ksb-TZ", "Shambala (Tanzania)"),
("ksf", "Bafia"),
("ksf-CM", "Bafia (Cameroon)"),
("ksh", "Colognian"),
("ksh-DE", "Colognian (Germany)"),
("ku", "Kurdish"),
("ku-TR", "Kurdish (Turkey)"),
("kw", "Cornish"),
("kw-GB", "Cornish (United Kingdom)"),
("ky", "Kyrgyz"),
("ky-KG", "Kyrgyz (Kyrgyzstan)"),
("lag", "Langi"),
("lag-TZ", "Langi (Tanzania)"),
("lb", "Luxembourgish"),
("lb-LU", "Luxembourgish (Luxembourg)"),
("lg", "Ganda"),
("lg-UG", "Ganda (Uganda)"),
("lkt", "Lakota"),
("lkt-US", "Lakota (United States)"),
("ln", "Lingala"),
("ln-AO", "Lingala (Angola)"),
("ln-CD", "Lingala (Congo - Kinshasa)"),
("ln-CF", "Lingala (Central African Republic)"),
("ln-CG", "Lingala (Congo - Brazzaville)"),
("lo", "Lao"),
("lo-LA", "Lao (Laos)"),
("lrc", "Northern Luri"),
("lrc-IQ", "Northern Luri (Iraq)"),
("lrc-IR", "Northern Luri (Iran)"),
("lt", "Lithuanian"),
("lt-LT", "Lithuanian (Lithuania)"),
("lu", "Luba-Katanga"),
("lu-CD", "Luba-Katanga (Congo - Kinshasa)"),
("luo", "Luo"),
("luo-KE", "Luo (Kenya)"),
("luy", "Luyia"),
("luy-KE", "Luyia (Kenya)"),
("lv", "Latvian"),
("lv-LV", "Latvian (Latvia)"),
("mai", "Maithili"),
("mai-IN", "Maithili (India)"),
("mas", "Masai"),
("mas-KE", "Masai (Kenya)"),
("mas-TZ", "Masai (Tanzania)"),
("mer", "Meru"),
("mer-KE", "Meru (Kenya)"),
("mfe", "Morisyen"),
("mfe-MU", "Morisyen (Mauritius)"),
("mg", "Malagasy"),
("mg-MG", "Malagasy (Madagascar)"),
("mgh", "Makhuwa-Meetto"),
("mgh-MZ", "Makhuwa-Meetto (Mozambique)"),
("mgo", "Metaʼ"),
("mgo-CM", "Metaʼ (Cameroon)"),
("mi", "Maori"),
("mi-NZ", "Maori (New Zealand)"),
("mk", "Macedonian"),
("mk-MK", "Macedonian (North Macedonia)"),
("ml", "Malayalam"),
("ml-IN", "Malayalam (India)"),
("mn", "Mongolian"),
("mn-MN", "Mongolian (Mongolia)"),
("mni", "Manipuri"),
("mni-Beng", "Manipuri (Bangla)"),
("mni-Beng-IN", "Manipuri (Bangla, India)"),
("mr", "Marathi"),
("mr-IN", "Marathi (India)"),
("ms", "Malay"),
("ms-BN", "Malay (Brunei)"),
("ms-ID", "Malay (Indonesia)"),
("ms-MY", "Malay (Malaysia)"),
("ms-SG", "Malay (Singapore)"),
("mt", "Maltese"),
("mt-MT", "Maltese (Malta)"),
("mua", "Mundang"),
("mua-CM", "Mundang (Cameroon)"),
("my", "Burmese"),
("my-MM", "Burmese (Myanmar (Burma))"),
("mzn", "Mazanderani"),
("mzn-IR", "Mazanderani (Iran)"),
("naq", "Nama"),
("naq-NA", "Nama (Namibia)"),
("nb", "Norwegian Bokmål"),
("nb-NO", "Norwegian Bokmål (Norway)"),
("nb-SJ", "Norwegian Bokmål (Svalbard & Jan Mayen)"),
("nd", "North Ndebele"),
("nd-ZW", "North Ndebele (Zimbabwe)"),
("nds", "Low German"),
("nds-DE", "Low German (Germany)"),
("nds-NL", "Low German (Netherlands)"),
("ne", "Nepali"),
("ne-IN", "Nepali (India)"),
("ne-NP", "Nepali (Nepal)"),
("nl", "Dutch"),
("nl-AW", "Dutch (Aruba)"),
("nl-BE", "Dutch (Belgium)"),
("nl-BQ", "Dutch (Caribbean Netherlands)"),
("nl-CW", "Dutch (Curaçao)"),
("nl-NL", "Dutch (Netherlands)"),
("nl-SR", "Dutch (Suriname)"),
("nl-SX", "Dutch (Sint Maarten)"),
("nmg", "Kwasio"),
("nmg-CM", "Kwasio (Cameroon)"),
("nn", "Norwegian Nynorsk"),
("nn-NO", "Norwegian Nynorsk (Norway)"),
("nnh", "Ngiemboon"),
("nnh-CM", "Ngiemboon (Cameroon)"),
("nus", "Nuer"),
("nus-SS", "Nuer (South Sudan)"),
("nyn", "Nyankole"),
("nyn-UG", "Nyankole (Uganda)"),
("om", "Oromo"),
("om-ET", "Oromo (Ethiopia)"),
("om-KE", "Oromo (Kenya)"),
("or", "Odia"),
("or-IN", "Odia (India)"),
("os", "Ossetic"),
("os-GE", "Ossetic (Georgia)"),
("os-RU", "Ossetic (Russia)"),
("pa", "Punjabi"),
("pa-Arab", "Punjabi (Arabic)"),
("pa-Arab-PK", "Punjabi (Arabic, Pakistan)"),
("pa-Guru", "Punjabi (Gurmukhi)"),
("pa-Guru-IN", "Punjabi (Gurmukhi, India)"),
("pcm", "Nigerian Pidgin"),
("pcm-NG", "Nigerian Pidgin (Nigeria)"),
("pl", "Polish"),
("pl-PL", "Polish (Poland)"),
("prg", "Prussian"),
("ps", "Pashto"),
("ps-AF", "Pashto (Afghanistan)"),
("ps-PK", "Pashto (Pakistan)"),
("pt", "Portuguese"),
("pt-AO", "Portuguese (Angola)"),
("pt-BR", "Portuguese (Brazil)"),
("pt-CH", "Portuguese (Switzerland)"),
("pt-CV", "Portuguese (Cape Verde)"),
("pt-GQ", "Portuguese (Equatorial Guinea)"),
("pt-GW", "Portuguese (Guinea-Bissau)"),
("pt-LU", "Portuguese (Luxembourg)"),
("pt-MO", "Portuguese (Macao SAR China)"),
("pt-MZ", "Portuguese (Mozambique)"),
("pt-PT", "Portuguese (Portugal)"),
("pt-ST", "Portuguese (São Tomé & Príncipe)"),
("pt-TL", "Portuguese (Timor-Leste)"),
("qu", "Quechua"),
("qu-BO", "Quechua (Bolivia)"),
("qu-EC", "Quechua (Ecuador)"),
("qu-PE", "Quechua (Peru)"),
("rm", "Romansh"),
("rm-CH", "Romansh (Switzerland)"),
("rn", "Rundi"),
("rn-BI", "Rundi (Burundi)"),
("ro", "Romanian"),
("ro-MD", "Romanian (Moldova)"),
("ro-RO", "Romanian (Romania)"),
("rof", "Rombo"),
("rof-TZ", "Rombo (Tanzania)"),
("ru", "Russian"),
("ru-BY", "Russian (Belarus)"),
("ru-KG", "Russian (Kyrgyzstan)"),
("ru-KZ", "Russian (Kazakhstan)"),
("ru-MD", "Russian (Moldova)"),
("ru-RU", "Russian (Russia)"),
("ru-UA", "Russian (Ukraine)"),
("rw", "Kinyarwanda"),
("rw-RW", "Kinyarwanda (Rwanda)"),
("rwk", "Rwa"),
("rwk-TZ", "Rwa (Tanzania)"),
("sah", "Sakha"),
("sah-RU", "Sakha (Russia)"),
("saq", "Samburu"),
("saq-KE", "Samburu (Kenya)"),
("sat", "Santali"),
("sat-Olck", "Santali (Ol Chiki)"),
("sat-Olck-IN", "Santali (Ol Chiki, India)"),
("sbp", "Sangu"),
("sbp-TZ", "Sangu (Tanzania)"),
("sd", "Sindhi"),
("sd-Arab", "Sindhi (Arabic)"),
("sd-Arab-PK", "Sindhi (Arabic, Pakistan)"),
("sd-Deva", "Sindhi (Devanagari)"),
("sd-Deva-IN", "Sindhi (Devanagari, India)"),
("se", "Northern Sami"),
("se-FI", "Northern Sami (Finland)"),
("se-NO", "Northern Sami (Norway)"),
("se-SE", "Northern Sami (Sweden)"),
("seh", "Sena"),
("seh-MZ", "Sena (Mozambique)"),
("ses", "Koyraboro Senni"),
("ses-ML", "Koyraboro Senni (Mali)"),
("sg", "Sango"),
("sg-CF", "Sango (Central African Republic)"),
("shi", "Tachelhit"),
("shi-Latn", "Tachelhit (Latin)"),
("shi-Latn-MA", "Tachelhit (Latin, Morocco)"),
("shi-Tfng", "Tachelhit (Tifinagh)"),
("shi-Tfng-MA", "Tachelhit (Tifinagh, Morocco)"),
("si", "Sinhala"),
("si-LK", "Sinhala (Sri Lanka)"),
("sk", "Slovak"),
("sk-SK", "Slovak (Slovakia)"),
("sl", "Slovenian"),
("sl-SI", "Slovenian (Slovenia)"),
("smn", "Inari Sami"),
("smn-FI", "Inari Sami (Finland)"),
("sn", "Shona"),
("sn-ZW", "Shona (Zimbabwe)"),
("so", "Somali"),
("so-DJ", "Somali (Djibouti)"),
("so-ET", "Somali (Ethiopia)"),
("so-KE", "Somali (Kenya)"),
("so-SO", "Somali (Somalia)"),
("sq", "Albanian"),
("sq-AL", "Albanian (Albania)"),
("sq-MK", "Albanian (North Macedonia)"),
("sq-XK", "Albanian (Kosovo)"),
("sr", "Serbian"),
("sr-Cyrl", "Serbian (Cyrillic)"),
("sr-Cyrl-BA", "Serbian (Cyrillic, Bosnia & Herzegovina)"),
("sr-Cyrl-ME", "Serbian (Cyrillic, Montenegro)"),
("sr-Cyrl-RS", "Serbian (Cyrillic, Serbia)"),
("sr-Cyrl-XK", "Serbian (Cyrillic, Kosovo)"),
("sr-Latn", "Serbian (Latin)"),
("sr-Latn-BA", "Serbian (Latin, Bosnia & Herzegovina)"),
("sr-Latn-ME", "Serbian (Latin, Montenegro)"),
("sr-Latn-RS", "Serbian (Latin, Serbia)"),
("sr-Latn-XK", "Serbian (Latin, Kosovo)"),
("su", "Sundanese"),
("su-Latn", "Sundanese (Latin)"),
("su-Latn-ID", "Sundanese (Latin, Indonesia)"),
("sv", "Swedish"),
("sv-AX", "Swedish (Åland Islands)"),
("sv-FI", "Swedish (Finland)"),
("sv-SE", "Swedish (Sweden)"),
("sw", "Swahili"),
("sw-CD", "Swahili (Congo - Kinshasa)"),
("sw-KE", "Swahili (Kenya)"),
("sw-TZ", "Swahili (Tanzania)"),
("sw-UG", "Swahili (Uganda)"),
("ta", "Tamil"),
("ta-IN", "Tamil (India)"),
("ta-LK", "Tamil (Sri Lanka)"),
("ta-MY", "Tamil (Malaysia)"),
("ta-SG", "Tamil (Singapore)"),
("te", "Telugu"),
("te-IN", "Telugu (India)"),
("teo", "Teso"),
("teo-KE", "Teso (Kenya)"),
("teo-UG", "Teso (Uganda)"),
("tg", "Tajik"),
("tg-TJ", "Tajik (Tajikistan)"),
("th", "Thai"),
("th-TH", "Thai (Thailand)"),
("ti", "Tigrinya"),
("ti-ER", "Tigrinya (Eritrea)"),
("ti-ET", "Tigrinya (Ethiopia)"),
("tk", "Turkmen"),
("tk-TM", "Turkmen (Turkmenistan)"),
("to", "Tongan"),
("to-TO", "Tongan (Tonga)"),
("tr", "Turkish"),
("tr-CY", "Turkish (Cyprus)"),
("tr-TR", "Turkish (Turkey)"),
("tt", "Tatar"),
("tt-RU", "Tatar (Russia)"),
("twq", "Tasawaq"),
("twq-NE", "Tasawaq (Niger)"),
("tzm", "Central Atlas Tamazight"),
("tzm-MA", "Central Atlas Tamazight (Morocco)"),
("ug", "Uyghur"),
("ug-CN", "Uyghur (China)"),
("uk", "Ukrainian"),
("uk-UA", "Ukrainian (Ukraine)"),
("ur", "Urdu"),
("ur-IN", "Urdu (India)"),
("ur-PK", "Urdu (Pakistan)"),
("uz", "Uzbek"),
("uz-Arab", "Uzbek (Arabic)"),
("uz-Arab-AF", "Uzbek (Arabic, Afghanistan)"),
("uz-Cyrl", "Uzbek (Cyrillic)"),
("uz-Cyrl-UZ", "Uzbek (Cyrillic, Uzbekistan)"),
("uz-Latn", "Uzbek (Latin)"),
("uz-Latn-UZ", "Uzbek (Latin, Uzbekistan)"),
("vai", "Vai"),
("vai-Latn", "Vai (Latin)"),
("vai-Latn-LR", "Vai (Latin, Liberia)"),
("vai-Vaii", "Vai (Vai)"),
("vai-Vaii-LR", "Vai (Vai, Liberia)"),
("vi", "Vietnamese"),
("vi-VN", "Vietnamese (Vietnam)"),
("vo", "Volapük"),
("vun", "Vunjo"),
("vun-TZ", "Vunjo (Tanzania)"),
("wae", "Walser"),
("wae-CH", "Walser (Switzerland)"),
("wo", "Wolof"),
("wo-SN", "Wolof (Senegal)"),
("xh", "Xhosa"),
("xh-ZA", "Xhosa (South Africa)"),
("xog", "Soga"),
("xog-UG", "Soga (Uganda)"),
("yav", "Yangben"),
("yav-CM", "Yangben (Cameroon)"),
("yi", "Yiddish"),
("yo", "Yoruba"),
("yo-BJ", "Yoruba (Benin)"),
("yo-NG", "Yoruba (Nigeria)"),
("yue", "Cantonese"),
("yue-Hans", "Cantonese (Simplified)"),
("yue-Hans-CN", "Cantonese (Simplified, China)"),
("yue-Hant", "Cantonese (Traditional)"),
("yue-Hant-HK", "Cantonese (Traditional, Hong Kong SAR China)"),
("zgh", "Standard Moroccan Tamazight"),
("zgh-MA", "Standard Moroccan Tamazight (Morocco)"),
("zh", "Chinese"),
("zh-Hans", "Chinese (Simplified)"),
("zh-Hans-CN", "Chinese (Simplified, China)"),
("zh-Hans-HK", "Chinese (Simplified, Hong Kong SAR China)"),
("zh-Hans-MO", "Chinese (Simplified, Macao SAR China)"),
("zh-Hans-SG", "Chinese (Simplified, Singapore)"),
("zh-Hant", "Chinese (Traditional)"),
("zh-Hant-HK", "Chinese (Traditional, Hong Kong SAR China)"),
("zh-Hant-MO", "Chinese (Traditional, Macao SAR China)"),
("zh-Hant-TW", "Chinese (Traditional, Taiwan)"),
("zu", "Zulu"),
("zu-ZA", "Zulu (South Africa)"),
],
default="en",
max_length=35,
),
),
]
| [
"[email protected]"
] | |
39a78544f35be6ef9bef4d434c0609cc0f0f6d53 | 5eb29ce7104e10a399d9afd7e253f029bf8bc0ff | /workflows/images-incremental-update/image_dl.py | 642aff4941ac111e9fcba03ef642bc5941dbcc25 | [
"BSD-2-Clause"
] | permissive | svebk/DeepSentiBank_memex | 69789dc09316e97aad711edeb251837a60184e7e | 4e69ce66e3a177817ff360ddc263f55c6e0b63f7 | refs/heads/master | 2021-01-18T18:55:10.870052 | 2017-10-19T22:51:29 | 2017-10-19T22:51:29 | 36,091,024 | 22 | 1 | null | 2017-02-09T20:31:20 | 2015-05-22T19:20:54 | Python | UTF-8 | Python | false | false | 11,040 | py | import requests
imagedltimeout=3
class UnknownImageFormat(Exception):
pass
def get_image_size_and_format(input):
# adapted from https://github.com/scardine/image_size
"""
Return (width, height, format) for a given img file content stream.
No external dependencies except the struct modules from core.
"""
import struct
height = -1
width = -1
format = None
data = input.read(25)
if data[:6] in ('GIF87a', 'GIF89a'):
# GIFs
w, h = struct.unpack("<HH", data[6:10])
width = int(w)
height = int(h)
format = 'GIF'
elif data.startswith('\211PNG\r\n\032\n') and (data[12:16] == 'IHDR'):
# PNGs
w, h = struct.unpack(">LL", data[16:24])
width = int(w)
height = int(h)
format = 'PNG'
elif data.startswith('\211PNG\r\n\032\n'):
# older PNGs?
w, h = struct.unpack(">LL", data[8:16])
width = int(w)
height = int(h)
format = 'PNG'
elif data.startswith('\377\330'):
# JPEG
format = 'JPEG'
msg = " raised while trying to decode as JPEG."
input.seek(0)
input.read(2)
b = input.read(1)
try:
while (b and ord(b) != 0xDA):
while (ord(b) != 0xFF): b = input.read(1)
while (ord(b) == 0xFF): b = input.read(1)
if (ord(b) >= 0xC0 and ord(b) <= 0xC3):
input.read(3)
h, w = struct.unpack(">HH", input.read(4))
break
else:
input.read(int(struct.unpack(">H", input.read(2))[0])-2)
b = input.read(1)
width = int(w)
height = int(h)
except struct.error:
raise UnknownImageFormat("StructError" + msg)
except ValueError:
raise UnknownImageFormat("ValueError" + msg)
except Exception as e:
raise UnknownImageFormat(e.__class__.__name__ + msg)
else:
raise UnknownImageFormat("Sorry, don't know how to get information from this file.")
return width, height, format
def mkpath(outpath):
import os
pos_slash=[pos for pos,c in enumerate(outpath) if c=="/"]
for pos in pos_slash:
try:
os.mkdir(outpath[:pos])
except:
pass
def dlimage(url,verbose=False):
import numpy as np
import shutil
import time
import os
pos_slash=[pos for pos,c in enumerate(url) if c=="/"]
file_img=url[pos_slash[-1]+1:]
# path with time and random to ensure unique names
outpath=os.path.join('./'+str(time.time())+'_'+str(np.int32(np.random.random()*(10e6)))+'_'+file_img)
mkpath(outpath)
if verbose:
print "Downloading image from {} to {}.".format(url,outpath)
try:
r = requests.get(url, stream=True, timeout=imagedltimeout)
if r.status_code == 200:
with open(outpath, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
return outpath
except Exception as inst:
if verbose:
print "Download failed for img that should be saved at {} from url {}.".format(outpath,url)
print inst
return None
def get_SHA1_from_data(data):
import hashlib
sha1hash = None
try:
sha1 = hashlib.sha1()
sha1.update(data)
sha1hash = sha1.hexdigest().upper()
except:
print "Could not read data to compute SHA1."
return sha1hash
def get_SHA1_from_URL_StringIO(url,verbose=0):
from cStringIO import StringIO
import sys
if verbose>1:
print "Downloading image from {}.".format(url)
try:
r = requests.get(url, timeout=imagedltimeout)
if r.status_code == 200:
r_sio = StringIO(r.content)
if int(r.headers['content-length']) == 0:
del r
raise ValueError("Empty image.")
else:
data = r_sio.read()
sha1hash = get_SHA1_from_data(data)
del r,r_sio,data
return sha1hash
else:
raise ValueError("Incorrect status_code: {}.".format(r.status_code))
except Exception as inst:
print "Download failed from url {}. [{}]".format(url, inst)
return None
def get_SHA1_imginfo_from_URL_StringIO_PIL(url,verbose=0):
from cStringIO import StringIO
import requests
if verbose>1:
print "Downloading image from {}.".format(url)
try:
r = requests.get(url, timeout=imagedltimeout)
if r.status_code == 200:
r_sio = StringIO(r.content)
if int(r.headers['content-length']) == 0:
del r
raise ValueError("Empty image.")
else:
# with PIL, takes 1 second...
#start = time.time()
from PIL import Image
img = Image.open(r_sio)
w,h = img.size
format = img.format
del img
#print "PIL get image size and format:",time.time()-start
r_sio.seek(0)
data = r_sio.read()
# use a dict for img info so we can store any other info we may need
img_info = dict()
img_info['size'] = dict()
img_info['size']['width'] = w
img_info['size']['height'] = h
img_info['format'] = format
sha1hash = get_SHA1_from_data(data)
del r,r_sio,data
return sha1hash,img_info
else:
raise ValueError("Incorrect status_code: {}.".format(r.status_code))
except Exception as inst:
print "Download failed from url {}. [{}]".format(url, inst)
return None
def get_SHA1_imginfo_from_URL_StringIO(url,verbose=0):
from cStringIO import StringIO
import requests
if verbose>1:
print "Downloading image from {}.".format(url)
try:
r = requests.get(url, timeout=imagedltimeout)
if r.status_code == 200:
r_sio = StringIO(r.content)
if int(r.headers['content-length']) == 0:
del r
raise ValueError("Empty image.")
else:
# No PIL dependency, 10-5s.
#start = time.time()
w,h,format = get_image_size_and_format(r_sio)
#print "get_image_size_and_format:",time.time()-start
# Seek back to compute SHA1 on the whole binary content!
r_sio.seek(0)
data = r_sio.read()
# use a dict for img info so we can store any other info we may need
img_info = dict()
img_info['size'] = dict()
img_info['size']['width'] = w
img_info['size']['height'] = h
img_info['format'] = format
sha1hash = get_SHA1_from_data(data)
del r,r_sio,data
return sha1hash,img_info
else:
raise ValueError("Incorrect status_code: {}.".format(r.status_code))
except Exception as inst:
print "Download failed from url {}. [{}]".format(url, inst)
return None,None
def get_SHA1_from_URL(url,verbose=False):
if verbose:
print "Downloading image from {}.".format(url)
try:
r = requests.get(url, stream=True, timeout=imagedltimeout)
if r.status_code == 200:
sha1hash = get_SHA1_from_data(r.raw.data)
return sha1hash
except Exception as inst:
if verbose:
print "Download failed from url {}.".format(url)
print inst
return None
def get_b64_from_data(data):
import base64
b64_from_data = None
try:
b64_from_data = base64.b64encode(data)
except:
print "Could not read data to compute base64 string."
return b64_from_data
def get_b64_from_URL(url,verbose=False):
if verbose:
print "Downloading image from {}.".format(url)
try:
r = requests.get(url, stream=True, timeout=imagedltimeout)
if r.status_code == 200:
b64_from_data = get_b64_from_data(r.raw.data)
return b64_from_data
except Exception as inst:
if verbose:
print "Download failed from url {}.".format(url)
print inst
return None
def get_b64_from_URL_StringIO(url,verbose=False):
from StringIO import StringIO
if verbose:
print "Downloading image from {}.".format(url)
try:
r = requests.get(url, timeout=imagedltimeout)
if r.status_code == 200:
r_sio = StringIO(r.content)
data = r_sio.read()
b64_from_data = get_b64_from_data(data)
return b64_from_data
else:
print "Incorrect status_code {} for url {}".format(r.status_code,url)
except Exception as inst:
if verbose:
print "Download failed from url {}.".format(url)
print inst
return None
def get_SHA1_b64_from_URL(url,verbose=False):
if verbose:
print "Downloading image from {}.".format(url)
try:
r = requests.get(url, stream=True, timeout=imagedltimeout)
if r.status_code == 200:
sha1hash = get_SHA1_from_data(r.raw.data)
b64_from_data = get_b64_from_data(r.raw.data)
return sha1hash,b64_from_data
except Exception as inst:
if verbose:
print "Download failed from url {}.".format(url)
print inst
return None,None
if __name__ == "__main__":
import profile
import time
#profile.run('sha1 = get_SHA1_from_URL("https://s3.amazonaws.com/memex-images/full/581ed33d3e12498f12c86b44010306b172f4ad6a.jpg")')
#profile.run('sha1_sio = get_SHA1_from_URL_StringIO("https://s3.amazonaws.com/memex-images/full/581ed33d3e12498f12c86b44010306b172f4ad6a.jpg")')
#profile.run('sha1_sio, img_info = get_SHA1_imginfo_from_URL_StringIO("https://s3.amazonaws.com/memex-images/full/581ed33d3e12498f12c86b44010306b172f4ad6a.jpg")')
start = time.time()
sha1 = get_SHA1_from_URL("https://s3.amazonaws.com/memex-images/full/581ed33d3e12498f12c86b44010306b172f4ad6a.jpg")
print sha1,time.time()-start
start = time.time()
sha1_sio = get_SHA1_from_URL_StringIO("https://s3.amazonaws.com/memex-images/full/581ed33d3e12498f12c86b44010306b172f4ad6a.jpg")
print sha1_sio,time.time()-start
start = time.time()
sha1_sio, img_info = get_SHA1_imginfo_from_URL_StringIO("https://s3.amazonaws.com/memex-images/full/581ed33d3e12498f12c86b44010306b172f4ad6a.jpg")
print sha1_sio,img_info,time.time()-start
start = time.time()
sha1_sio, img_info = get_SHA1_imginfo_from_URL_StringIO_PIL("https://s3.amazonaws.com/memex-images/full/581ed33d3e12498f12c86b44010306b172f4ad6a.jpg")
print sha1_sio,img_info,time.time()-start | [
"[email protected]"
] | |
c1763ba5175c207c332aa37cf46bb1aa24f588dd | a82aa8430e32eaf62df0f44b20afb0e7d50c3d7b | /ippon/group_phase/serializers.py | ed76683a6121808cf8e4196a5a8a7ebad298988f | [
"MIT"
] | permissive | morynicz/ippon_back | 314daac99f79247b749dc46d59a645a6eb840263 | dce901bfc649c6f8efbbf0907654e0860606b3e3 | refs/heads/master | 2022-12-20T23:33:10.898738 | 2021-10-17T09:25:39 | 2021-10-17T09:25:39 | 124,851,931 | 0 | 2 | MIT | 2022-12-08T12:37:26 | 2018-03-12T07:43:17 | Python | UTF-8 | Python | false | false | 307 | py | from rest_framework import serializers
import ippon.models
class GroupPhaseSerializer(serializers.ModelSerializer):
class Meta:
model = ippon.models.group_phase.GroupPhase
fields = (
'id',
'tournament',
'fight_length',
'name'
)
| [
"[email protected]"
] | |
d9e361b8b4602c4fc876a19fa99b58a00dd92860 | d07432b9969c4bfa5b4e0d0d2ce7b74236d6a33d | /pygeodesy/gars.py | 28c68e4904de16df15b071f6765348679771ba8a | [
"MIT"
] | permissive | cwilder23/PyGeodesy | aeb8301a7134bc5c19b35335ac1aff1fa0933e02 | 6e2f989fb9040c525040270b5834b259b0b4d0d9 | refs/heads/master | 2022-11-12T00:25:26.253946 | 2020-06-24T17:39:54 | 2020-06-24T17:39:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,312 | py |
# -*- coding: utf-8 -*-
u'''Classes L{Garef} and L{GARSError} and several functions to encode,
decode and inspect I{Global Area Reference System} (GARS) references.
Transcribed from C++ class U{GARS
<https://GeographicLib.SourceForge.io/html/classGeographicLib_1_1GARS.html>}
by I{Charles Karney}. See also U{Global Area Reference System
<https://WikiPedia.org/wiki/Global_Area_Reference_System>} and U{NGA (GARS)
<https://Earth-Info.NGA.mil/GandG/coordsys/grids/gars.html>}.
@newfield example: Example, Examples
'''
from pygeodesy.basics import EPS1_2, isstr, NN, property_RO
from pygeodesy.dms import parse3llh # parseDMS2
from pygeodesy.errors import _ValueError
from pygeodesy.lazily import _ALL_LAZY
from pygeodesy.named import LatLon2Tuple, LatLonPrec3Tuple, nameof, \
_xnamed
from pygeodesy.units import Int, Lat, Lon, Precision_, Scalar_, \
Str, _xStrError
from math import floor
# all public contants, classes and functions
__all__ = _ALL_LAZY.gars + ('decode3', # functions
'encode', 'precision', 'resolution')
__version__ = '20.05.14'
_Digits = '0123456789'
_LatLen = 2
_LatOrig = -90
_Letters = 'ABCDEFGHJKLMNPQRSTUVWXYZ'
_LonLen = 3
_LonOrig = -180
_MaxPrec = 2
_MinLen = _LonLen + _LatLen
_MaxLen = _MinLen + _MaxPrec
_M1 = 2
_M2 = 2
_M3 = 3
_M_ = _M1 * _M2 * _M3
_LatOrig_M_ = _LatOrig * _M_
_LonOrig_M_ = _LonOrig * _M_
_LatOrig_M1 = _LatOrig * _M1
_LonOrig_M1_1 = _LonOrig * _M1 - 1
_Resolutions = tuple(1.0 / _ for _ in (_M1, _M1 * _M2, _M_))
def _2divmod2(ll, Orig_M_):
x = int(floor(ll * _M_)) - Orig_M_
i = (x * _M1) // _M_
x -= i * _M_ // _M1
return i, x
def _2fll(lat, lon, *unused):
'''(INTERNAL) Convert lat, lon.
'''
# lat, lon = parseDMS2(lat, lon)
return (Lat(lat, Error=GARSError),
Lon(lon, Error=GARSError))
# def _2Garef(garef):
# '''(INTERNAL) Check or create a L{Garef} instance.
# '''
# if not isinstance(garef, Garef):
# try:
# garef = Garef(garef)
# except (TypeError, ValueError):
# raise _xStrError(Garef, Str, garef=garef)
# return garef
def _2garstr2(garef):
'''(INTERNAL) Check a garef string.
'''
try:
n, garstr = len(garef), garef.upper()
if n < _MinLen or n > _MaxLen \
or garstr[:3] == 'INV' \
or not garstr.isalnum():
raise ValueError
return garstr, _2Precision(n - _MinLen)
except (AttributeError, TypeError, ValueError) as x:
raise GARSError(Garef.__name__, garef, txt=str(x))
def _2Precision(precision):
'''(INTERNAL) Return a L{Precision_} instance.
'''
return Precision_(precision, Error=GARSError, low=0, high=_MaxPrec)
class GARSError(_ValueError):
'''Global Area Reference System (GARS) encode, decode or other L{Garef} issue.
'''
pass
class Garef(Str):
'''Garef class, a named C{str}.
'''
_latlon = None # cached latlon property
_precision = None
# no str.__init__ in Python 3
def __new__(cls, cll, precision=1, name=NN):
'''New L{Garef} from an other L{Garef} instance or garef
C{str} or from a C{LatLon} instance or lat-/longitude C{str}.
@arg cll: Cell or location (L{Garef} or C{str}, C{LatLon}
or C{str}).
@kwarg precision: Optional, the desired garef resolution
and length (C{int} 0..2), see function
L{gars.encode} for more details.
@kwarg name: Optional name (C{str}).
@return: New L{Garef}.
@raise RangeError: Invalid B{C{cll}} lat- or longitude.
@raise TypeError: Invalid B{C{cll}}.
@raise GARSError: INValid or non-alphanumeric B{C{cll}}.
'''
if isinstance(cll, Garef):
g, p = _2garstr2(str(cll))
self = Str.__new__(cls, g)
self._latlon = LatLon2Tuple(*cll._latlon)
self._name = cll._name
self._precision = p # cll._precision
elif isstr(cll):
if ',' in cll:
lat, lon = _2fll(*parse3llh(cll))
cll = encode(lat, lon, precision=precision) # PYCHOK false
self = Str.__new__(cls, cll)
self._latlon = LatLon2Tuple(lat, lon)
else:
self = Str.__new__(cls, cll.upper())
self._decode()
else: # assume LatLon
try:
lat, lon = _2fll(cll.lat, cll.lon)
except AttributeError:
raise _xStrError(Garef, cll=cll) # Error=GARSError
cll = encode(lat, lon, precision=precision) # PYCHOK false
self = Str.__new__(cls, cll)
self._latlon = LatLon2Tuple(lat, lon)
if self._precision is None:
self._precision = _2Precision(precision)
if name:
self.name = name
return self
def _decode(self):
# cache all decoded attrs
lat, lon, p = decode3(self)
if self._latlon is None:
self._latlon = LatLon2Tuple(lat, lon)
if self._precision is None:
self._precision = p
@property_RO
def latlon(self):
'''Get this garef's (center) lat- and longitude (L{LatLon2Tuple}).
'''
if self._latlon is None:
self._decode()
return self._latlon
@property_RO
def precision(self):
'''Get this garef's precision (C{int}).
'''
if self._precision is None:
self._decode()
return self._precision
def toLatLon(self, LatLon, **kwds):
'''Return (the center of) this garef cell as an instance
of the supplied C{LatLon} class.
@arg LatLon: Class to use (C{LatLon}).
@kwarg kwds: Optional keyword arguments for B{C{LatLon}}.
@return: This garef location (B{C{LatLon}}).
@raise GARSError: Invalid B{C{LatLon}}.
'''
if LatLon is None:
raise GARSError(LatLon=LatLon)
return self._xnamed(LatLon(*self.latlon, **kwds))
def decode3(garef, center=True):
'''Decode a C{garef} to lat-, longitude and precision.
@arg garef: To be decoded (L{Garef} or C{str}).
@kwarg center: If C{True} the center, otherwise the south-west,
lower-left corner (C{bool}).
@return: A L{LatLonPrec3Tuple}C{(lat, lon, precision)}.
@raise GARSError: Invalid B{C{garef}}, INValid, non-alphanumeric
or bad length B{C{garef}}.
'''
def _Error(i):
return GARSError(garef='%r[%s]' % (garef, i))
def _ll(chars, g, i, j, lo, hi):
ll, b = 0, len(chars)
for i in range(i, j):
d = chars.find(g[i])
if d < 0:
raise _Error(i)
ll = ll * b + d
if ll < lo or ll > hi:
raise _Error(j)
return ll
def _ll2(lon, lat, g, i, m):
d = _Digits.find(g[i])
if d < 1 or d > m * m:
raise _Error(i)
d, r = divmod(d - 1, m)
lon = lon * m + r
lat = lat * m + (m - 1 - d)
return lon, lat
g, precision = _2garstr2(garef)
lon = _ll(_Digits, g, 0, _LonLen, 1, 720) + _LonOrig_M1_1
lat = _ll(_Letters, g, _LonLen, _MinLen, 0, 359) + _LatOrig_M1
if precision > 0:
lon, lat = _ll2(lon, lat, g, _MinLen, _M2)
if precision > 1:
lon, lat = _ll2(lon, lat, g, _MinLen + 1, _M3)
if center: # ll = (ll * 2 + 1) / 2
lon += 0.5
lat += 0.5
r = _Resolutions[precision] # == 1.0 / unit
r = LatLonPrec3Tuple(Lat(lat * r, Error=GARSError),
Lon(lon * r, Error=GARSError), precision)
return _xnamed(r, nameof(garef))
def encode(lat, lon, precision=1): # MCCABE 14
'''Encode a lat-/longitude as a C{garef} of the given precision.
@arg lat: Latitude (C{degrees}).
@arg lon: Longitude (C{degrees}).
@kwarg precision: Optional, the desired C{garef} resolution
and length (C{int} 0..2).
@return: The C{garef} (C{str}).
@raise RangeError: Invalid B{C{lat}} or B{C{lon}}.
@raise GARSError: Invalid B{C{precision}}.
@note: The C{garef} length is M{precision + 5} and the C{garef}
resolution is B{30′} for B{C{precision}} 0, B{15′} for 1
and B{5′} for 2, respectively.
'''
def _digit(x, y, m):
return _Digits[m * (m - y - 1) + x + 1],
def _str(chars, x, n):
s, b = [], len(chars)
for i in range(n):
x, i = divmod(x, b)
s.append(chars[i])
return tuple(reversed(s))
p = _2Precision(precision)
lat, lon = _2fll(lat, lon)
if lat == 90:
lat *= EPS1_2
ix, x = _2divmod2(lon, _LonOrig_M_)
iy, y = _2divmod2(lat, _LatOrig_M_)
g = _str(_Digits, ix + 1, _LonLen) + _str(_Letters, iy, _LatLen)
if p > 0:
ix, x = divmod(x, _M3)
iy, y = divmod(y, _M3)
g += _digit(ix, iy, _M2)
if p > 1:
g += _digit(x, y, _M3)
return ''.join(g)
def precision(res):
'''Determine the L{Garef} precision to meet a required (geographic)
resolution.
@arg res: The required resolution (C{degrees}).
@return: The L{Garef} precision (C{int} 0..2).
@raise ValueError: Invalid B{C{res}}.
@see: Function L{gars.encode} for more C{precision} details.
'''
r = Scalar_(res, name='res')
for p in range(_MaxPrec):
if resolution(p) <= r:
return p
return _MaxPrec
def resolution(prec):
'''Determine the (geographic) resolution of a given L{Garef} precision.
@arg prec: The given precision (C{int}).
@return: The (geographic) resolution (C{degrees}).
@raise ValueError: Invalid B{C{prec}}.
@see: Function L{gars.encode} for more C{precision} details.
'''
p = Int(prec, name='prec', Error=GARSError)
return _Resolutions[max(0, min(p, _MaxPrec))]
# **) MIT License
#
# Copyright (C) 2016-2020 -- mrJean1 at Gmail -- All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
| [
"[email protected]"
] | |
4f613303615c55d2729147ae5cb8c6cd97c4ca83 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02383/s686099688.py | 8350ca148ab207ab25ea1b72f0d85043ae5cfc38 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 526 | py | dice = input().split()
direction = list(input())
dice2 = []
for i in range(len(direction)):
dice2 = dice
if direction[i] == 'E':
dice = [dice2[3],dice2[1],dice2[0],dice2[5],dice2[4],dice2[2]]
elif direction[i] == 'N':
dice = [dice2[1],dice2[5],dice2[2],dice2[3],dice2[0],dice2[4]]
elif direction[i] == 'S':
dice = [dice2[4],dice2[0],dice2[2],dice2[3],dice2[5],dice2[1]]
else:
dice = [dice2[2],dice2[1],dice2[5],dice2[0],dice2[4],dice2[3]]
print('{0}'.format(int(dice[0])))
| [
"[email protected]"
] | |
a778be45a738c428c09f03ec65096a832a8df811 | b66c83dbdb1181d3274cfb309637c0bdf590553f | /build/extrinsic_Industrial/intelligent_actuator/robo_cylinder/catkin_generated/pkg.develspace.context.pc.py | 50404616a97e27fc816577449f0fc4dc7749b87a | [] | no_license | Sinchiguano/repo_project | 9079c80f6544cbe39902c68f61f421bd7cfd55e6 | 666da1d6d91704302b69ec9e0b0d30db3a709f30 | refs/heads/master | 2020-04-30T18:06:16.162025 | 2019-04-25T18:47:58 | 2019-04-25T18:47:58 | 177,000,277 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/casch/yumi_depends_ws/devel/include".split(';') if "/home/casch/yumi_depends_ws/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;rospy;std_msgs;message_runtime".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "robo_cylinder"
PROJECT_SPACE_DIR = "/home/casch/yumi_depends_ws/devel"
PROJECT_VERSION = "0.0.0"
| [
"[email protected]"
] | |
330465e9c7046e35f6e9643ad0783b8b0f75b9e3 | 466912406272829982f75854cf0104c6ce8c9814 | /data/nlp/task/tn.py | 1f80c0f141aa16e10b8cdf4cd2f2570f501f9aef | [] | no_license | logonmy/Codes | 9631fa103fc499663361fa7eeccd7cedb9bb08e4 | 92723efdeccfc193f9ee5d0ab77203c254f34bc2 | refs/heads/master | 2021-09-21T18:07:22.985184 | 2018-08-30T05:53:26 | 2018-08-30T05:53:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,710 | py | # -*- coding: utf-8 -*-
__author__ = 'victor'
import os
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
sys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '..'))
sys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '../../util'))
import db as dbcon
from fundtag import FundingClassifier
from common import dbutil, dicts
from common.chunk import SentenceChunker
from common.feed import NewsFeeder
from common.dsutil import FrozenLenList
from news.mentioned import CompanyLinker
from task.news_postprocess import NewsTagger
from score.similar_news import ScorerNews
import re
import time
import codecs
import logging
import pymongo
from copy import copy
from itertools import chain
from collections import Counter
from datetime import datetime, timedelta
import fasttext
# logging
logging.getLogger('task_news').handlers = []
logger_tn = logging.getLogger('task_news')
logger_tn.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s %(asctime)s %(levelname)-8s %(message)s', '%a, %d %b %Y %H:%M:%S',)
stream_handler = logging.StreamHandler(sys.stderr)
stream_handler.setFormatter(formatter)
logger_tn.addHandler(stream_handler)
viptag_model = os.path.join(os.path.split(os.path.realpath(__file__))[0], '../keywords/models/20180319.bin')
class NoEventDetectError(Exception):
def __init__(self):
Exception.__init__(self)
class NewsTask(object):
def __init__(self):
global logger_tn, viptag_model
self.mongo = dbcon.connect_mongo()
self.db = dbcon.connect_torndb()
# self.fund_transformer, self.fund_classifier = get_fund_classifier()
self.funding_clf = FundingClassifier()
self.company_linker = CompanyLinker()
self.sentence_chunker = SentenceChunker()
self.snscorer = ScorerNews()
self.news_feeder = NewsFeeder()
self.news_tagger = NewsTagger()
self.viptag_clf = fasttext.load_model(viptag_model)
self.tags_l2 = dicts.get_vip_succession()
self.trusted_fund_source = [13800]
self.non_trusted_fund_source = [13848, 13866]
self.important_sources = dicts.get_important_news_source()
self.tags = self.load_tags()
category_file = os.path.join(os.path.split(os.path.realpath(__file__))[0], 'files/category')
self.categories = {int(line.split('#')[0]): line.strip().split('#')[1].split(',')
for line in codecs.open(category_file, encoding='utf-8') if not line.startswith('#')}
self.fund_tips = [u'融资', u'融资方', u'领投', u'跟投']
self.fund_keywords = [u'融资', u'融资方', u'领投', u'跟投', u'投资', u'收购']
self.round_tips = [u'A轮', u'B轮', u'C轮', u'D轮', u'Pre-A', u'A+轮', u'天使轮', u'天使融资', u'B+轮', u'种子轮',
u'战略性融资', u'战略投资', u'IPO']
self.amount_tips = [u'数千万', u'数百万', u'数十万', u'上百万', u'上千万', u'上亿', u'几十万', u'几百万', u'几千万',
u'百万级', u'千万级', u'亿级',
u'\d+\.*\d*万', u'\d+\.*\d*百万', u'\d+\.*\d*千万', u'\d+\.*\d*亿']
self.currency_tips = [u'人民币', u'美元', u'美金']
self.company_size = 5
self.brief_size = 5
self.tags_l2_min_appearance = 3
self.life_circle = 25
self.max_life = 100
self.latest_funding_companies = FrozenLenList(200)
self.date_3_days_ago = datetime.utcnow() - timedelta(days=3)
logger_tn.info('Fund event extract model inited')
def process_all(self):
global logger_tn
while True:
for record in list(self.mongo.article.news.find({'type': {'$in': [60001, 60003, 60005, 60006,
60007, 60008, 60009]},
'fund_extract': None}).sort('createTime',
pymongo.DESCENDING)):
if record.get('source', 0) == 13022:
self.mongo.article.news.update({'_id': record['_id']}, {'$set': {'fund_extract': -2}})
continue
if record.get('date') and record.get('date') < datetime.now() - timedelta(days=7):
self.mongo.article.news.update({'_id': record['_id']}, {'$set': {'fund_extract': -3}})
continue
if self.life_circle <= 0:
self.reload_models()
logger_tn.info('Model reloaded')
try:
logger_tn.info('Processing, %s' % record['_id'])
self.process_piece(record['_id'], record)
logger_tn.info('%s processed' % record['_id'])
self.life_circle -= 1
except NoEventDetectError, nede:
logger_tn.exception('%s not extracted, %s' % (record['_id'], nede))
self.mongo.article.news.update({'_id': record['_id']}, {'$set': {'fund_extract': -1}})
except Exception, e:
logger_tn.exception('%s failed, %s' % (record['_id'], e))
time.sleep(300)
logger_tn.info('Nice sleep')
def load_tags(self):
global logger_tn
try:
tags = {dbutil.get_tag_info(self.db, tid, 'name'): tid for tid in dbutil.get_industry_tags(self.db) if tid}
logger_tn.info('Tags: %s' % ','.join(tags.keys()))
except Exception, e:
tags = {}
logger_tn.exception('Fail to load tags, due to %s' % e)
return tags
def reload_models(self):
self.life_circle = self.max_life
self.company_linker = CompanyLinker()
self.tags = self.load_tags()
self.date_3_days_ago = datetime.utcnow() - timedelta(days=3)
for funding in dbutil.get_funding_by_date(self.db, (self.date_3_days_ago, datetime.now())):
if funding and funding.companyId:
self.latest_funding_companies.append(funding.companyId)
def process_piece(self, nid, given_record=None):
global logger_tn
record = self.mongo.article.news.find({'_id': nid}).limit(1)[0] if not given_record else given_record
# generate report task
if record.get('type', 0) == 60006:
task = {
'news_id': record['_id'],
'news_date': record['date'],
'type': 'report',
'createTime': datetime.utcnow(),
'processStatus': int(0)
}
self.mongo.task.news.update({'news_id': str(record['_id'])}, task, True)
self.mongo.article.news.update({'_id': record['_id']}, {'$set': {'fund_extract': 0}})
return
# fund classify
if record.get('category', 0) == 60101 and \
((record.get('category_confidence') is None) or record.get('category_confidence', 0) == 1):
is_fund, strict = True, False
elif record.get('source', 0) in self.non_trusted_fund_source:
is_fund, strict = False, True
elif record.get('source', 0) in self.trusted_fund_source:
is_fund, strict = True, False
else:
# label = self.fund_classifier.predict(
# self.fund_transformer.transform([' '.join(self.news_feeder.feed(record))]))[0]
label = self.funding_clf.predict(record)
is_fund = True if label == 1 else False
strict = True
is_fund_before_review = copy(is_fund)
is_fund = self.__review_fund(is_fund, record, strict)
logger_tn.info('Processing %s, fund %s, before review %s' % (nid, is_fund, is_fund_before_review))
# task base
if is_fund:
task = self.__generate_fund_task(record)
self.mongo.article.news.update({'_id': record['_id']}, {'$set': {'fund_extract': 1}})
else:
task = self.__generate_news_task(record)
if not task:
self.mongo.article.news.update({'_id': record['_id']}, {'$set': {'fund_extract': -6,
"processStatus": -7}})
logger_tn.info('Classify as buyao news, %s' % nid)
return
self.mongo.article.news.update({'_id': record['_id']}, {'$set': {'fund_extract': 0}})
task['categories'] = self.classify_category(record, task.get('categories', []))
# tags
tags = []
tags.extend(self.classify_tags(record))
# sectors
try:
tags.extend(self.classify_sector_tags(record, task.get('categories')))
except Exception, e:
logger_tn.exception('Fail to classify sector for %s' % record['_id'])
task['newsTags'] = tags
# sentiment
task['sentiment'] = 578361
# investorIds
task['investorIds'] = []
# dups company ids
task['companyIds_139'] = copy(task.get('companyIds'))
self.mongo.task.news.update({'news_id': str(record['_id'])}, task, True)
self.news_tagger.label_11800_record(record, record['_id'])
# relevant news
# relevant = self.snscorer.get_similar_news(nid)
# relevant = [str(task['_id'])
# for task in [self.mongo.task.news.find_one({'news_id': str(r)}) for r in relevant] if task]
# if relevant:
# self_id = str(self.mongo.task.news.find_one({'news_id': str(record['_id'])})['_id'])
# relevant.append(self_id)
# for tnid in set(relevant):
# my_relevant = set(relevant)
# my_relevant.remove(tnid)
# self.mongo.task.news.update({'_id': ObjectId(tnid)}, {'$set': {'relevant': list(my_relevant)}})
def __review_fund(self, is_fund, record, strict):
if not is_fund:
return is_fund
if not strict:
return is_fund
# other non fund source
if record.get('source', 0) not in self.trusted_fund_source:
try:
task = self.extract_funding_from_contents(**record)
if task and task.get('items'):
return True
return False
except TypeError:
return False
return is_fund
def __generate_fund_task(self, record):
try:
task = self.extract_funding_from_contents(**record)
except TypeError:
task = {}
task['news_id'] = str(record['_id'])
task['news_date'] = record['date']
task['createTime'] = datetime.utcnow()
task['processStatus'] = int(0)
task['type'] = 'fund'
task['categories'] = [578349]
task['section'] = 'step1'
task['subtype'] = self.classify_fund_subtype(task)
return task
def __generate_news_task(self, record):
# no need to have
if record.get('type', 60008) == 60008:
return False
# from weixin, needed by topic
if self.news_tagger.label_11800_record(record, record['_id']) \
or self.news_tagger.label_11810_record(record, record['_id']):
return {'news_id': str(record['_id']),
'news_date': record['date'],
'type': 'important',
'createTime': datetime.utcnow(),
'processStatus': int(0),
'section': 'step1'}
# check source
if record.get('source') in self.important_sources:
news_important = True
else:
news_important = False
# extract companies from news
companies = map(lambda y: y[0],
sorted(dict(self.company_linker.find_from_record(record)).items(), key=lambda x: -x[1]))[:3]
cids = map(lambda x: int(x), chain(*[dbutil.get_id_from_name(self.db, name) for name in companies]))
trusted_cid = record.get('companyId', False)
if trusted_cid and trusted_cid not in cids:
cids.append(trusted_cid)
if not (cids or news_important):
return False
task_type = 'important' if news_important else 'check'
task = {'news_id': str(record['_id']),
'news_date': record['date'],
'type': task_type,
'companyIds': cids,
'createTime': datetime.utcnow(),
'processStatus': int(0),
'section': 'step1'}
return task
def classify_category(self, record, prejudgement):
results = prejudgement
# 汇总类新闻
if u'日报' in record['title'] or u'周报' in record['title']:
return [578358]
for cate_id, category in self.categories.items():
if len(category) == 1:
if category[0] in (set(record.get('categoryNames', [])) | set(record.get('original_tags', []) or [])):
results.append(int(cate_id))
else:
if set(self.news_feeder.feed(record)) & set(category):
results.append(int(cate_id))
if len(results) > 0:
result = [results[0]]
return list(set(int(result) for result in results)) if results else [578359]
def classify_tags(self, record):
news_content = ' '.join([c.get('content', '') for c in record.get('contents')])
for tag, tid in self.tags.items():
if news_content.count(tag) > 5 or (float(news_content.count(tag)) / len(news_content) > 0.005):
yield tid
def classify_sector_tags(self, record, categories):
desc = ' '.join(self.news_feeder.feed(record))
if desc and len(desc) > 20:
classifier_vips = {int(tag.replace(u'__label__', '')): weight for (tag, weight) in
self.viptag_clf.predict_proba([desc], 2)[0]}
if max(classifier_vips.values()) < 0.25:
sectors = [(999, 0)]
elif max(classifier_vips.values()) - min(classifier_vips.values()) < 0.15:
sectors = [(dbutil.get_sector_from_tag(self.db, tid), confidence)
for (tid, confidence) in sorted(classifier_vips.iteritems(), key=lambda x: -x[1])]
else:
sectors = [(dbutil.get_sector_from_tag(self.db, tid), confidence)
for (tid, confidence) in sorted(classifier_vips.iteritems(), key=lambda x: -x[1])]
sectors = [sectors[0]]
else:
sectors = [(999, 0)]
sids = [sid for (sid, _) in sectors]
if 7 in sids and u'区块链' in desc and 20006 not in sids:
sids.remove(7)
sids.append(20006)
confidences = []
else:
confidences = [confidence for (_, confidence) in sectors]
tags = [dbutil.get_tag_from_sector(self.db, sid) for sid in sids]
tags = [t for t in tags if t]
if {128, 578353, 578349, 578351, 578356, 578351} & set(categories):
self.mongo.article.news.update({'_id': record['_id']}, {'$set': {'sectors': sids,
'features': tags,
'sector_confidence': confidences}})
return tags
def extract_funding_from_contents(self, **kwargs):
results = {}
# data with format of a mongodb news record
data = dict(kwargs)
# splits all sentences
contents = [piece.get('content') for piece in data.get('contents') if piece.get('content', '').strip()]
sentences = list(self.sentence_chunker.chunk(data.get('title'), *contents))
# find all company candidates
company_candidates = self.company_linker.find_candidates(data)
company_re = '|'.join(company_candidates.keys()).replace('.', '\.').replace('*', '\*')\
.replace('+', '\+').replace('?', '\?').replace('(', '\(').replace(')', '\)')
# fund event sentences
event_sentences = [i for i in xrange(len(sentences)) if self.__is_event_sentence(sentences[i])]
# extract company candidates, fund round, fund amount, fund currency
try:
for event_index in event_sentences:
results.setdefault('companyIds', []).extend(
re.findall(company_re, sentences[event_index]))
rounds = re.findall('|'.join(self.round_tips), sentences[event_index])
amounts = re.findall('|'.join(self.amount_tips), sentences[event_index])
supports = re.findall('|'.join(self.fund_tips), sentences[event_index])
currency = re.findall('|'.join(self.currency_tips), sentences[event_index])
support = sum([int(len(item) > 0) for item in [rounds, amounts, currency, supports]])
if sentences[event_index] in set(item.get('brief') for item in results.get('items', [])):
continue
results.setdefault('items', []).append({'brief': sentences[event_index],
'round': ' '.join(rounds),
'amount': ' '.join(amounts),
'currency': ' '.join(currency),
'sort': support + len(sentences[event_index])/1000.0})
except Exception, e:
pass
if self.__is_event_title(data.get('title', '')):
results.setdefault('items', []).append({'brief': data.get('title'),
'sort': 1})
# organize
if results:
results['companyIds'] = [int(cid) for cid in set(chain(*[dbutil.get_id_from_name(self.db, name)
for name in set(results.get('companyIds', []))
if name.strip()]))]
results['items'] = sorted(results.get('items'),
key=lambda x: x.get('sort', 0), reverse=True)[:self.brief_size]
return results
def classify_fund_subtype(self, task):
company_candidates = task.get('companyIds', [])
if set(company_candidates) & set(self.latest_funding_companies):
subtype = 'duplicate'
elif self.mongo.task.news.find({'companyIds': {'$in': company_candidates}, 'processStatus': 1, 'type': 'fund',
'news_date': {'$gt': self.date_3_days_ago}}).count() > 0:
subtype = 'duplicate'
else:
subtype = 'major'
return subtype
def __is_event_sentence(self, sentence):
for fund_tip in self.fund_tips:
if fund_tip in sentence:
return True
for round_tip in self.round_tips:
if round_tip in sentence:
return True
return False
def __is_event_title(self, sentence):
for fund_keyword in self.fund_keywords:
if fund_keyword in sentence:
return True
return False
if __name__ == '__main__':
print __file__
logger_tn.info('News Task Model Initing')
nt = NewsTask()
logger_tn.info('News Task Model Inited')
nt.process_all()
# fe.process_piece(ObjectId('57ce684d4877af20e02b12a6'))
| [
"[email protected]"
] | |
c4228442ea7bb005562f42293f54db22ffd4a496 | 85738a4cacd2a6d93c4487cf856c883c3d9d314a | /tests/web/settings.py | 9a68dbff38a085e8bd59eb1e7053c05ef3ac066a | [
"Apache-2.0"
] | permissive | Kitware/tangelo | cc0cb1372bc5728e0585f739a9412a58a5069069 | 470034ee9b3d7a01becc1ce5fddc7adc1d5263ef | refs/heads/develop | 2023-08-28T11:57:57.909917 | 2016-01-25T15:56:18 | 2016-01-25T15:56:18 | 6,885,877 | 40 | 21 | Apache-2.0 | 2018-03-05T01:24:16 | 2012-11-27T15:38:26 | JavaScript | UTF-8 | Python | false | false | 320 | py | import cherrypy
import tangelo
# This service reports the value of cherrypy's thread pool setting
def run(**kwargs):
if kwargs.get('pool'):
tangelo.util.set_server_setting('server.thread_pool', int(kwargs['pool']))
response = 'pool="%r"' % cherrypy.config.get('server.thread_pool')
return response
| [
"[email protected]"
] | |
718d0f430682a0b47ec3c0d95755ecd74fa8612a | 7134e45563b2045837296cb5c4f1974a025e4f2b | /.history/MathmeticPracticeTimeLimit_20200411174234.py | 16d18819de876f7987bf70910f5d9fe1a1e15342 | [] | no_license | Nordenbox/Nordenbox_Python_Fundmental | dca175c471ac2c64453cc4bcf291dd0773be4add | 9c79fd5d0dada580072b523d5aa1d72f996e3a22 | refs/heads/master | 2022-01-21T06:37:15.084437 | 2022-01-06T13:55:30 | 2022-01-06T13:55:30 | 240,154,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,601 | py | import random
import time
import os
import subprocess
def mode_choice():
print('请选用练习模式:\n1,选择练习题目数。\n')
print('2, 选择练习时间')
choice_num = int(input('您的选择: '))
if choice_num == 1:
multipile_issues()
else:
multipile_time()
def multipile_issues():
practicese_times = int(input('您要做几道题: '))
for i in range(practicese_times):
practicese_issues()
print("正确为%d,错误为%d。" % (practicese_issues.corrected, practicese_issues.wrong),
"你的分数是%d分" % (practicese_issues.corrected / practicese_times * 100))
if practicese_issues.sum_wrong_list != [] and practicese_issues.minos_wrong_list != []:
print("错误的题目是:\n", practicese_issues.sum_wrong_list,
"\n", practicese_issues.minos_wrong_list)
elif practicese_issues.sum_wrong_list == [] and practicese_issues.minos_wrong_list != []:
print("错误的题目是:\n", practicese_issues.minos_wrong_list)
elif practicese_issues.sum_wrong_list != [] and practicese_issues.minos_wrong_list == []:
print("错误的题目是:\n", practicese_issues.sum_wrong_list)
def multipile_time():
pass
def practicese_issues():
orrected = 0
wrong = 0
#counting = howmuch
sum_wrong_list = []
minos_wrong_list = []
sum = 0
minos = 0
while True:
plused = random.randint(1, 20)
plus = random.randint(1, 20)
p = random.randint(0, 1)
if p == 1:
sum = int(input("%d+%d= " % (plused, plus)))
if sum == plused + plus:
corrected = corrected + 1
else:
sum_wrong_list.append("%d+%d= %d" % (plused, plus, sum))
wrong = wrong + 1
else:
if plused < plus:
minos = int(input("%d-%d= " % (plus, plused)))
if minos == plus - plused:
corrected = corrected + 1
else:
minos_wrong_list.append(
"%d-%d=%d " % (plus, plused, minos))
wrong = wrong + 1
else:
minos = int(input("%d-%d= " % (plused, plus)))
if minos == plused - plus:
corrected = corrected + 1
else:
minos_wrong_list.append(
"%d-%d=%d " % (plused, plus, minos))
wrong = wrong + 1
return corrected, wrong, sum_wrong_list, minos_wrong_list
mode_choice()
| [
"[email protected]"
] | |
6dcb30fec438ec2e9fff5f0b0626da1774055b61 | 653eaef652627b155569b5fe9ab9bb3607fc1e78 | /alg/discriminative-jackknife/models/BNN.py | 3a17799407f89d0061443791a187ff674aeeaeab | [
"BSD-3-Clause"
] | permissive | IlyaTrofimov/mlforhealthlabpub | 11ab86a83bd2ffd2574364a956b322b0c62406ae | 190cbad2faae9e559ffe7a68143df7f747d70adc | refs/heads/main | 2023-04-16T03:58:38.423288 | 2021-04-21T10:22:43 | 2021-04-21T10:22:43 | 358,528,623 | 0 | 0 | NOASSERTION | 2021-04-16T08:25:26 | 2021-04-16T08:25:25 | null | UTF-8 | Python | false | false | 1,534 | py | import torch
import torch.nn as nn
class BNN(nn.Module):
def __init__(self, *layers):
super(BNN, self).__init__()
self.layers, self.params = [], nn.ParameterList()
for layer in layers:
self.layers.append(layer)
self.params.extend([*layer.parameters()]) # register module parameters
def forward(self, x, mode):
if mode == 'forward':
net_kl = 0
for layer in self.layers:
x, layer_kl = layer.forward(x, mode)
net_kl += layer_kl
return x, net_kl
else:
for layer in self.layers:
x = layer.forward(x, mode)
return x
def Forward(self, x, y, n_samples, type):
assert type in {'Gaussian', 'Softmax'}, 'Likelihood type not found'
# Sample N samples and average
total_kl, total_likelh = 0., 0.
for _ in range(n_samples):
out, kl = self.forward(x, mode='forward')
# Gaussian output (with unit var)
# lklh = torch.log(torch.exp(-(y - out) ** 2 / 2e-2) / math.sqrt(2e-2 * math.pi)).sum()
if type == 'Gaussian':
lklh = (-.5 * (y - out) ** 2).sum()
else: # softmax
lklh = torch.log(out.gather(1, y)).sum()
total_kl += kl
total_likelh += lklh
return total_kl / n_samples, total_likelh / n_samples
@staticmethod
def loss_fn(kl, lklh, n_batch):
return (kl / n_batch - lklh).mean()
| [
"[email protected]"
] | |
59b111e7c48d6899e63795c608a24e3d51ca5fb3 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02706/s901477236.py | e5cd388ef56bc4369f68551299e42a2e436c4640 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | #import numpy as np
#import math
#from decimal import *
#from numba import njit
#@njit
def main():
N,M = map(int, input().split())
A = list(map(int, input().split()))
s = sum(A)
if s > N:
print(-1)
else:
print(N-s)
main()
| [
"[email protected]"
] | |
010427a5856703a295156243fe70f85976250e8c | 2f17bb840634eab6f08a7bb488781f6951ce6b47 | /AOJ_courses/ITP1_4_D.py | 42e76330f54037ab4673edbc5a65b21db34f3da8 | [] | no_license | NHRD/Atcoderpractice | 3d5c1175e147a0bdbacf46f51b23db1a1b2dea22 | 958835069c84791afa36d119298b742d53e86ae0 | refs/heads/master | 2022-12-15T17:30:10.310049 | 2020-09-19T13:39:07 | 2020-09-19T13:39:07 | 279,771,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | n = int(input())
nums = list(map(int, input().split()))
nums = sorted(nums)
sumnum = sum(nums)
print("{} {} {}" .format(nums[0], nums[len(nums)-1], sumnum)) | [
"[email protected]"
] | |
80e9eeda1efc064f19b56be3222ec30e6dd1564d | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5744014401732608_1/Python/feigao/main.py | 9d1ebc3970ed5aa77f1c7df72eca82287fe1e3c1 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 3,015 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
from functools import wraps, lru_cache
def io_wrapper(func):
@wraps(func)
def _func(in_file=None, out_file=None, lines_per_case=1):
in_buffers = []
if in_file is None:
while True:
try:
s = input()
if s.strip():
in_buffers.append(s.strip())
except:
break
else:
with open(in_file, 'r') as f:
in_buffers.extend([line.strip() for line in f.read().strip().splitlines()])
total_case_nums = int(in_buffers[0])
in_buffers = in_buffers[1:]
# print(in_buffers)
assert len(in_buffers) == total_case_nums * lines_per_case
out_buffers = []
for case_id in range(1, total_case_nums + 1):
case_result_str = func('\n'.join(in_buffers[(case_id - 1) * lines_per_case: case_id * lines_per_case]))
out_buffers.append('Case #{}: {}'.format(case_id, case_result_str))
if out_file is not None and os.path.exists(out_file):
print('Out file {} already exists!'.format(out_file), file=sys.stderr)
out_buffers = None
if out_file is None:
print('\n'.join(out_buffers))
else:
with open(out_file, 'w') as f:
f.write('\n'.join(out_buffers))
return _func
@io_wrapper
@lru_cache(maxsize=None)
def solution(line_str):
return "Answer Str"
@io_wrapper
def a(lines):
n, *parties = map(int, lines.split())
# print(n, parties)
resutls = []
total = sum(parties)
import string
names = string.ascii_uppercase[:n]
numbers = dict(zip(names, parties))
while total > 0:
m = max(numbers, key=lambda c: numbers[c])
resutls.append(m)
total -= 1
v = numbers[m]
if v == 1:
del numbers[m]
else:
numbers[m] = v - 1
if len(resutls) % 2 == 1:
resutls.insert(0, '')
return ' '.join(a + b for a, b in zip(resutls[::2], resutls[1::2]))
@io_wrapper
def b(lines):
# print(lines)
b, m = map(int, lines.split())
if m > 2 ** (b - 2):
return 'IMPOSSIBLE'
resp = 'POSSIBLE'
if m == 2 ** (b - 2):
matrix = [[1 if r < c else 0 for c in range(b)] for r in range(b)]
else:
matrix = [[1 if r < c < b - 1 else 0 for c in range(b)] for r in range(b)]
for r in range(b):
if m & (2 ** r):
matrix[r + 1][b - 1] = 1
return '\n'.join([resp] + [''.join(map(str, row)) for row in matrix])
def c():
pass
if __name__ == '__main__':
# solution()
# a('A-sample.txt', lines_per_case=2)
# a('A-small-attempt0.in', lines_per_case=2)
# a('A-large.in.txt', 'A-large.out.txt', lines_per_case=2)
# b('B-sample.txt')
# b('B-small-attempt0.in.txt', 'B-small-attempt0.out.txt')
b('B-large.in', 'B-large.out.txt')
pass
| [
"[email protected]"
] | |
e9880143e1cf66275f3cb00db8e80924fd0897d1 | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-iam/huaweicloudsdkiam/v3/model/keystone_create_protocol_request.py | ad11aa561d4c11a287678808d35c64ee2c118655 | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,463 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class KeystoneCreateProtocolRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'idp_id': 'str',
'protocol_id': 'str',
'body': 'KeystoneCreateProtocolRequestBody'
}
attribute_map = {
'idp_id': 'idp_id',
'protocol_id': 'protocol_id',
'body': 'body'
}
def __init__(self, idp_id=None, protocol_id=None, body=None):
"""KeystoneCreateProtocolRequest - a model defined in huaweicloud sdk"""
self._idp_id = None
self._protocol_id = None
self._body = None
self.discriminator = None
self.idp_id = idp_id
self.protocol_id = protocol_id
if body is not None:
self.body = body
@property
def idp_id(self):
"""Gets the idp_id of this KeystoneCreateProtocolRequest.
身份提供商ID。
:return: The idp_id of this KeystoneCreateProtocolRequest.
:rtype: str
"""
return self._idp_id
@idp_id.setter
def idp_id(self, idp_id):
"""Sets the idp_id of this KeystoneCreateProtocolRequest.
身份提供商ID。
:param idp_id: The idp_id of this KeystoneCreateProtocolRequest.
:type: str
"""
self._idp_id = idp_id
@property
def protocol_id(self):
"""Gets the protocol_id of this KeystoneCreateProtocolRequest.
待注册的协议ID。
:return: The protocol_id of this KeystoneCreateProtocolRequest.
:rtype: str
"""
return self._protocol_id
@protocol_id.setter
def protocol_id(self, protocol_id):
"""Sets the protocol_id of this KeystoneCreateProtocolRequest.
待注册的协议ID。
:param protocol_id: The protocol_id of this KeystoneCreateProtocolRequest.
:type: str
"""
self._protocol_id = protocol_id
@property
def body(self):
"""Gets the body of this KeystoneCreateProtocolRequest.
:return: The body of this KeystoneCreateProtocolRequest.
:rtype: KeystoneCreateProtocolRequestBody
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this KeystoneCreateProtocolRequest.
:param body: The body of this KeystoneCreateProtocolRequest.
:type: KeystoneCreateProtocolRequestBody
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, KeystoneCreateProtocolRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
1f17b1b6a2c0bd4d5957fdf6884f06dc99a93f8a | 82c562bf2257c248fae968ad27876f840c98d5cc | /python_stack/django/django_orm/firstorm/firstorm/settings.py | f42135421e8b5e24068980b7988b67264548ecc7 | [] | no_license | tbkrft567/CodingDojo | 5175dbf5cae1e7b23ad1b91e6370bbf1467e2c31 | 142974abeffa0aef93f3761ffd3a9e2cc452e990 | refs/heads/master | 2023-02-19T09:03:26.644627 | 2021-02-21T22:06:54 | 2021-02-21T22:06:54 | 230,340,732 | 0 | 0 | null | 2023-01-19T16:39:50 | 2019-12-26T23:24:56 | Python | UTF-8 | Python | false | false | 3,100 | py | """
Django settings for firstorm project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'bstws@=2bx0n1i5@-6uf+jrt9i%bj6m4!^zes)9s4io!jvm2w&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'firstorm.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'firstorm.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
18772b128b050a7ff3d1dabdbdd5d2091d72921c | 2d94902e0367f364eabd038b4aa49ac34e1ebd47 | /config.py | 7e17e0f65019ab54b69a72e387746c5c258de67a | [
"Apache-2.0"
] | permissive | qitianchan/Patap | 25f642759698391c77527ed3f676a84fcf918023 | e71bd95300d94f7e26f9d87e5bdb9f4c73175383 | refs/heads/master | 2021-01-20T20:03:52.546735 | 2016-07-27T12:01:31 | 2016-07-27T12:01:31 | 64,106,084 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 807 | py | # Create dummy secrey key so we can use sessions
SECRET_KEY = '123456790'
# Create in-memory database
DATABASE_FILE = 'patap.sqlite'
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + DATABASE_FILE
SQLALCHEMY_ECHO = True
# Flask-Security config
# SECURITY_URL_PREFIX = "/admin"
SECURITY_PASSWORD_HASH = "pbkdf2_sha512"
SECURITY_PASSWORD_SALT = "ATGUOHAELKiubahiughaerGOJAEGj"
# Flask-Security URLs, overridden because they don't put a / at the end
SECURITY_LOGIN_URL = "/login/"
SECURITY_LOGOUT_URL = "/logout/"
SECURITY_REGISTER_URL = "/register/"
SECURITY_POST_LOGIN_VIEW = "/facilitator/"
SECURITY_POST_LOGOUT_VIEW = "/"
SECURITY_POST_REGISTER_VIEW = "/admin/"
# Flask-Security features
SECURITY_REGISTERABLE = True
SECURITY_SEND_REGISTER_EMAIL = False
# Blueprint prefix
FACILITATOR_PREFIX = '/facilitator'
| [
"[email protected]"
] | |
e87ad93a3d3dcdf7cfea8ea52573b0f8fcc1a35a | 54290d468f91a21f131458b136213354a3780a46 | /fabfile.py | 2f37bf95d517421a6878a2eb07b905c48515e92f | [
"WTFPL"
] | permissive | LaPosteSNA/addok | 9435cccd37e936c89c538caed08f5db04e6bd0eb | 22a4e407c5a94d412673d223a25a8a3651801a71 | refs/heads/master | 2021-01-22T13:13:37.465954 | 2016-01-26T17:02:01 | 2016-01-26T17:02:01 | 45,954,224 | 1 | 0 | null | 2015-11-11T02:15:32 | 2015-11-11T02:15:31 | null | UTF-8 | Python | true | false | 3,886 | py | from fabric.api import cd, env, execute, hide, puts, roles, sudo, task
env.project_name = 'addok'
env.repository = 'https://github.com/etalab/addok.git'
env.local_branch = 'master'
env.remote_ref = 'origin/master'
env.requirements_file = 'requirements.txt'
env.use_ssh_config = True
env.shell = "/bin/bash -c" # Default uses -l option that we don't want.
env.virtualenv_dir = '/home/addok/.virtualenvs/addok'
env.project_dir = '/home/addok/src/'
env.restart_command = 'sudo service addok restart'
def run_as_addok(*args, **kwargs):
"""
Run command sudoing user `addok`.
"""
kwargs['user'] = "addok"
return sudo(*args, **kwargs)
# =============================================================================
# Tasks which set up deployment environments
# =============================================================================
@task
def dev():
"""
Use the dev deployment environment on Etalab servers.
You need the "banapidev" server to be referenced in your ~/.ssh/config
file.
"""
server = 'banapidev'
env.roledefs = {
'web': [server],
}
env.system_users = {server: 'addok'}
@task
def live():
"""
Use the live deployment environment on Etalab servers.
You need the "banapi" server to be referenced in your ~/.ssh/config file.
"""
server = 'banapi'
env.roledefs = {
'web': [server],
}
env.system_users = {server: 'addok'}
# Set the default environment.
dev()
# =============================================================================
# Actual tasks
# =============================================================================
@task
@roles('web')
def setup():
"""
Install the service (tested on Ubuntu 14.04).
"""
sudo('apt install redis-server python3.4-dev python-virtualenv python-pip '
'virtualenvwrapper')
# run_as_addok('source /usr/local/bin/virtualenvwrapper.sh')
run_as_addok('mkvirtualenv addok --python=/usr/bin/python3.4')
run_as_addok('git clone {repository} {project_dir}'.format(**env))
with cd(env.project_dir):
run_as_addok('pip install -r {requirements_file}'.format(**env))
@task
@roles('web')
def restart():
"""
Restart the web service.
"""
run_as_addok(env.restart_command)
@task
@roles('web')
def update(action='check'):
"""
Update the repository (server-side).
"""
with cd(env.project_dir):
remote, dest_branch = env.remote_ref.split('/', 1)
run_as_addok('git fetch {remote}'.format(
remote=remote, dest_branch=dest_branch, **env))
with hide('running', 'stdout'):
cmd = 'git diff-index --cached --name-only {remote_ref}'
changed_files = run_as_addok(cmd.format(**env)).splitlines()
if not changed_files and action != 'force':
# No changes, we can exit now.
return
run_as_addok('git merge {remote_ref}'.format(**env))
run_as_addok('find -name "*.pyc" -delete')
if action == "clean":
run_as_addok('git clean -df')
execute(install)
@task
@roles('web')
def install():
"""
Update the requirements.
"""
puts('Installing...')
cmd = '{virtualenv_dir}/bin/python setup.py develop'
run_as_addok(cmd.format(**env))
@task
@roles('web')
def shell():
cmd = "{virtualenv_dir}/bin/python /home/addok/src/run.py shell"
run_as_addok(cmd.format(virtualenv_dir=env.virtualenv_dir))
@task
def deploy(verbosity='normal'):
"""
Full server deploy.
Updates the repository (server-side) and restarts the web service.
"""
if verbosity == 'noisy':
hide_args = []
else:
hide_args = ['running', 'stdout']
with hide(*hide_args):
puts('Updating repository...')
execute(update)
puts('Restarting web server...')
execute(restart)
| [
"[email protected]"
] | |
b3b1665fb21f6233aa62577bc888715c6c87326f | e5a044708032b853f1cdf8906da63502716fd410 | /test/test_acs_response.py | 50ae8c13ac7310108d49f9d76f8061799b9c37d7 | [] | no_license | GBSEcom/Python | 4b93bab80476051fc99f379f018ac9fa109a8a6a | 5fa37dba8d0c3853686fdc726f863743376060c9 | refs/heads/master | 2021-12-04T12:55:29.605843 | 2021-11-19T22:01:03 | 2021-11-19T22:01:03 | 136,058,345 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,064 | py | # coding: utf-8
"""
Payment Gateway API Specification.
The documentation here is designed to provide all of the technical guidance required to consume and integrate with our APIs for payment processing. To learn more about our APIs please visit https://docs.firstdata.com/org/gateway. # noqa: E501
The version of the OpenAPI document: 21.5.0.20211029.001
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import openapi_client
from openapi_client.models.acs_response import ACSResponse # noqa: E501
from openapi_client.rest import ApiException
class TestACSResponse(unittest.TestCase):
"""ACSResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testACSResponse(self):
"""Test ACSResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = openapi_client.models.acs_response.ACSResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
763b3c41f750dca1f47c7fdc416ee621be024e3c | 90cdae33e672b23a3ccb84dec0f281e78d3934ce | /auto_test_leke/web_test/src/common/method.py | 038feba19e8b7600635d53982ce2dd7eead0e753 | [] | no_license | kuangtao94/TestHome | dffdb3737ab60f6db435c770c33f423d814b5594 | 46acedadd225b07fe73f43feebd5c66d19c7eeac | refs/heads/master | 2020-06-29T07:38:37.316844 | 2019-11-24T02:14:25 | 2019-11-24T02:14:25 | 200,475,947 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,918 | py | # coding:utf-8
from selenium import webdriver
import os
from selenium.webdriver.common.by import By #导入by定位
from selenium.webdriver.support.ui import WebDriverWait #导入显示等待包
from selenium.webdriver.support import expected_conditions as EC #导入期望条件
from logging import log #导入日志文件中的日志 类
# driver=webdriver.Chrome()
class public():
def __init__(self,driver): #创建对象时加入初始化参数,每次调用类时都会执行初始化参数
self.driver=driver
self.log=log('execute.log') #创建日志类对象,并初始化文件
def login(self,username,password):
self.input_text(By.NAME,"name",username)
self.input_text(By.NAME,"password",password)
self.click_element(By.NAME,"submit")
def logout(self):
self.click_element(By.XPATH,'')
def locat_element(self,*loc): #定义元素的方法
try:
element = WebDriverWait(self.driver,5,0.5).until(
EC.presence_of_element_located(loc)
)
return element
except:
self.log.error(u"元素找不到"+str(loc))
def input_text(self,a,b,text,clear=True): #输入框的方法
if clear:
try:
self.locat_element(a,b).clear()
self.locat_element(a,b).send_keys(text)
except:
self.log.error(u'文本输入失败'+str(text))
else:
try:
self.locat_element(a,b).send_keys(text)
except:
self.log.error(u'文本输入失败'+str(text))
def click_element(self,a,b): #点击元素的方法
try:
self.locat_element(a,b).click()
except:
self.log.error(u'点击失败'+str(b))
| [
"[email protected]"
] | |
94be3ed0169b5b1a099858c6d26cca996a1e3f6c | ee7ca0fed1620c3426fdfd22e5a82bba2a515983 | /dsn_qc_pbsa/models/qc.py | 49236e76f3e03ea571e2b17105d32c21ef6826d9 | [] | no_license | disna-sistemas/odoo | 318d0e38d9b43bea56978fe85fc72850d597f033 | 0826091462cc10c9edc3cc29ea59c417f8e66c33 | refs/heads/8.0 | 2022-03-08T19:01:21.162717 | 2022-02-15T13:06:26 | 2022-02-15T13:06:26 | 99,210,381 | 0 | 5 | null | 2019-07-24T08:49:58 | 2017-08-03T08:36:55 | Python | UTF-8 | Python | false | false | 1,399 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 OpenERP SA (<http://openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api
from datetime import datetime
class dsnQcInspection(models.Model):
_inherit = "qc.inspection"
buf_date_analysis = fields.Date('Date of Analysis', readonly=True)
@api.multi
def action_confirm(self):
for inspection in self:
inspection.buf_date_analysis = datetime.now()
result = super(dsnQcInspection, self).action_confirm()
return result | [
"[email protected]"
] | |
f2c256c323ba1cec97493ee8de823784aeb287cc | ac8779998b00a35b2fced864b007334a32873bf1 | /manage.py | cf1caf56512e01322d1767d71e4b7fce84a389b2 | [] | no_license | openmaker-eu/dsp-explorer | 5cb5ad4e7190dc8559b69a3154da9ae6383ef536 | 8a8b007ac6095ac6864e030641545724a6872f2e | refs/heads/master | 2022-12-09T13:50:47.218076 | 2019-02-14T20:21:53 | 2019-02-14T20:21:53 | 90,641,109 | 5 | 8 | null | 2022-12-08T02:06:16 | 2017-05-08T15:02:38 | Python | UTF-8 | Python | false | false | 809 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dspexplorer.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
46823e51aef1425db664261b1bf8807eda1cf97f | ff21dd1b906db472584aa92a32c22fb9351c9ffd | /NOTE/02_PythonBase/day20/exercise/mycopy.py | 84c9355038bbc272ced2e384557d1676f5064f61 | [] | no_license | Bertram-Liu/Note | 0e176b2c9625f02e463b8f6be3587f1f0b873e9b | 60a30b03ff5d41ab6233e6fd30074de396703b68 | refs/heads/master | 2020-07-18T18:14:24.920528 | 2019-09-04T11:55:59 | 2019-09-04T11:55:59 | 206,290,323 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 702 | py | # 1. 写程序,实现复制文件的功能
# 要求:
# 1. 要考虑关闭文件的问题
# 2. 要考虑超大文件的问题
# 3. 要能复制二进制文件
def copy(src_file, dst_file):
'''src_file 源文件
dst_file 目标文件'''
# 以下实现复制
try:
with open(src_file, 'rb') as fr, \
open(dst_file, 'wb') as fw:
while True:
b = fr.read(4096)
if not b: # 到达文件尾
break
fw.write(b)
except OSError:
print("复制失败!")
src = input("请输入源文件名: ")
dst = input('请输入目标文件名: ')
copy(src, dst)
| [
"[email protected]"
] | |
1125be93d6bf6ec5f578a754b518db5452606a9d | 4d8f0c491943e0c688c11561c7f57d815b014b76 | /experiments/tf_vae_pixel/resnet_16.py | a289791f5c82669c58bc4a02e9871acbdb7e5406 | [] | no_license | kundan2510/nn | d5e434e9d6275a82a04c12df4d8e4be855c39ab5 | 39d247f3b64f9ac9400835dd72d3af7a84578449 | refs/heads/master | 2020-12-26T03:10:23.949984 | 2016-12-27T20:23:58 | 2016-12-27T20:23:58 | 64,513,518 | 0 | 1 | null | 2016-07-29T22:19:40 | 2016-07-29T22:19:39 | null | UTF-8 | Python | false | false | 42,905 | py | """
Multilayer VAE + Pixel CNN
Ishaan Gulrajani
"""
import os, sys
sys.path.append(os.getcwd())
N_GPUS = 1
try: # This only matters on Ishaan's computer
import experiment_tools
experiment_tools.wait_for_gpu(tf=True, n_gpus=N_GPUS)
except ImportError:
pass
import tflib as lib
import tflib.debug
import tflib.train_loop
import tflib.ops.kl_unit_gaussian
import tflib.ops.kl_gaussian_gaussian
import tflib.ops.conv2d
import tflib.ops.deconv2d
import tflib.ops.linear
import tflib.ops.batchnorm
import tflib.ops.embedding
import tflib.lsun_bedrooms
import tflib.mnist_256
# import tflib.small_imagenet
import numpy as np
import tensorflow as tf
import scipy.misc
from scipy.misc import imsave
import time
import functools
import argparse
parser = argparse.ArgumentParser(description='Generating images pixel by pixel')
add_arg = parser.add_argument
add_arg('--model_weights', default=None, help = 'Pretrained model weights to load')
add_arg('--algo', default='1', help = '"1" or "RMB"')
args = parser.parse_args()
out_paths = ['/Tmp/kumarkun/pixel_vae', '/scratch/jvb-000-aa/kundan/pixel_vae']
for p in out_paths:
if os.path.exists(p):
OUT_DIR_PREFIX = p
break
DATASET = 'lsun_64' # mnist_256, lsun_32, lsun_64, imagenet_64
SETTINGS = '64px' # mnist_256, 32px_small, 32px_big, 64px
OUT_DIR = '{}/{}/{}/samples'.format(OUT_DIR_PREFIX, DATASET, SETTINGS)
PARAM_DIR = '{}/{}/{}/params'.format(OUT_DIR_PREFIX, DATASET, SETTINGS)
if not os.path.exists(OUT_DIR):
os.makedirs(OUT_DIR)
print "Created {}".format(OUT_DIR)
if not os.path.exists(PARAM_DIR):
os.makedirs(PARAM_DIR)
print "Created {}".format(PARAM_DIR)
if SETTINGS == 'mnist_256':
# two_level uses Enc1/Dec1 for the bottom level, Enc2/Dec2 for the top level
# one_level uses EncFull/DecFull for the bottom (and only) level
MODE = 'one_level'
EMBED_INPUTS = True
# Turn on/off the bottom-level PixelCNN in Dec1/DecFull
PIXEL_LEVEL_PIXCNN = True
HIGHER_LEVEL_PIXCNN = True
PIXCNN_ONLY = False
# These settings are good for a 'smaller' model that trains (up to 200K iters)
# in ~1 day on a GTX 1080 (probably equivalent to 2 K40s).
DIM_PIX_1 = 32
PIX1_FILT_SIZE = 5
DIM_1 = 16
DIM_2 = 32
DIM_3 = 32
# LATENT_DIM_1 = 32
# DIM_PIX_2 = 32
DIM_4 = 64
DIM_5 = 128
LATENT_DIM_2 = 2
ALPHA1_ITERS = 10000
# ALPHA2_ITERS = 5000
KL_PENALTY = 1.05
BETA_ITERS = 1000
# In Dec2, we break each spatial location into N blocks (analogous to channels
# in the original PixelCNN) and model each spatial location autoregressively
# as P(x)=P(x0)*P(x1|x0)*P(x2|x0,x1)... In my experiments values of N > 1
# actually hurt performance. Unsure why; might be a bug.
PIX_2_N_BLOCKS = 1
TIMES = {
'mode': 'iters',
'print_every': 2*500,
'test_every': 2*500,
'stop_after': 500*500,
'callback_every': 10*500
}
VANILLA = False
LR = 1e-3
LR_DECAY_AFTER = TIMES['stop_after']
LR_DECAY_FACTOR = 1.
BATCH_SIZE = 100
N_CHANNELS = 1
HEIGHT = 28
WIDTH = 28
LATENTS1_HEIGHT = 7
LATENTS1_WIDTH = 7
elif SETTINGS == '32px_small':
# two_level uses Enc1/Dec1 for the bottom level, Enc2/Dec2 for the top level
# one_level uses EncFull/DecFull for the bottom (and only) level
MODE = 'two_level'
EMBED_INPUTS = False
# Turn on/off the bottom-level PixelCNN in Dec1/DecFull
PIXEL_LEVEL_PIXCNN = True
HIGHER_LEVEL_PIXCNN = True
PIXCNN_ONLY = False
# These settings are good for a 'smaller' model that trains (up to 200K iters)
# in ~1 day on a GTX 1080 (probably equivalent to 2 K40s).
DIM_PIX_1 = 128
PIX1_FILT_SIZE = 3
DIM_1 = 64
DIM_2 = 128
DIM_3 = 256
LATENT_DIM_1 = 64
DIM_PIX_2 = 512
DIM_4 = 512
DIM_5 = 2048
LATENT_DIM_2 = 512
ALPHA1_ITERS = 5000
ALPHA2_ITERS = 5000
KL_PENALTY = 1.00
SQUARE_ALPHA = False
BETA_ITERS = 1000
# In Dec2, we break each spatial location into N blocks (analogous to channels
# in the original PixelCNN) and model each spatial location autoregressively
# as P(x)=P(x0)*P(x1|x0)*P(x2|x0,x1)... In my experiments values of N > 1
# actually hurt performance. Unsure why; might be a bug.
PIX_2_N_BLOCKS = 1
TIMES = {
'mode': 'iters',
'print_every': 1000,
'test_every': 1000,
'stop_after': 200000,
'callback_every': 20000
}
VANILLA = False
LR = 1e-3
LR_DECAY_AFTER = 180000
LR_DECAY_FACTOR = 1e-1
BATCH_SIZE = 64
N_CHANNELS = 3
HEIGHT = 32
WIDTH = 32
LATENTS1_HEIGHT = 8
LATENTS1_WIDTH = 8
elif SETTINGS == '32px_big':
# two_level uses Enc1/Dec1 for the bottom level, Enc2/Dec2 for the top level
# one_level uses EncFull/DecFull for the bottom (and only) level
MODE = 'two_level'
EMBED_INPUTS = False
# Turn on/off the bottom-level PixelCNN in Dec1/DecFull
PIXEL_LEVEL_PIXCNN = True
HIGHER_LEVEL_PIXCNN = True
PIXCNN_ONLY = False
# These settings are good for a 'smaller' model that trains (up to 200K iters)
# in ~1 day on a GTX 1080 (probably equivalent to 2 K40s).
DIM_PIX_1 = 256
PIX1_FILT_SIZE = 3
DIM_1 = 128
DIM_2 = 256
DIM_3 = 512
LATENT_DIM_1 = 128
DIM_PIX_2 = 1024
DIM_4 = 1024
DIM_5 = 2048
LATENT_DIM_2 = 512
ALPHA1_ITERS = 5000
ALPHA2_ITERS = 5000
KL_PENALTY = 1.00
SQUARE_ALPHA = False
BETA_ITERS = 1000
# In Dec2, we break each spatial location into N blocks (analogous to channels
# in the original PixelCNN) and model each spatial location autoregressively
# as P(x)=P(x0)*P(x1|x0)*P(x2|x0,x1)... In my experiments values of N > 1
# actually hurt performance. Unsure why; might be a bug.
PIX_2_N_BLOCKS = 1
TIMES = {
'mode': 'iters',
'print_every': 1000,
'test_every': 1000,
'stop_after': 300000,
'callback_every': 20000
}
VANILLA = False
LR = 5e-4
LR_DECAY_AFTER = 250000
LR_DECAY_FACTOR = 2e-1
BATCH_SIZE = 64
N_CHANNELS = 3
HEIGHT = 32
WIDTH = 32
LATENTS1_HEIGHT = 8
LATENTS1_WIDTH = 8
elif SETTINGS == '64px':
# WARNING! Some parts of the network architecture have hardcoded checks for
# (SETTTINGS == '64px'), so if you just copy these settings under a new
# label things will be different! TODO maybe fix this eventually.
# two_level uses Enc1/Dec1 for the bottom level, Enc2/Dec2 for the top level
# one_level uses EncFull/DecFull for the bottom (and only) level
MODE = 'two_level'
EMBED_INPUTS = False
# Turn on/off the bottom-level PixelCNN in Dec1/DecFull
PIXEL_LEVEL_PIXCNN = True
HIGHER_LEVEL_PIXCNN = True
DIM_PIX_1 = 128
PIX1_FILT_SIZE = 7
DIM_1 = 64
DIM_2 = 128
DIM_3 = 256
LATENT_DIM_1 = 64
DIM_PIX_2 = 512
DIM_4 = 512
DIM_5 = 2048
LATENT_DIM_2 = 512
PIXCNN_ONLY = False
# Uncomment for PixelCNN only (NO VAE)
# print "WARNING PIXCNN ONLY"
# PIXCNN_ONLY = True
# DIM_PIX_1 = 128
# PIX1_FILT_SIZE = 3
# In Dec2, we break each spatial location into N blocks (analogous to channels
# in the original PixelCNN) and model each spatial location autoregressively
# as P(x)=P(x0)*P(x1|x0)*P(x2|x0,x1)... In my experiments values of N > 1
# actually hurt performance. Unsure why; might be a bug.
PIX_2_N_BLOCKS = 1
TIMES = {
'mode': 'iters',
'print_every': 1,
'test_every': 10000,
'stop_after': 200000,
'callback_every': 25000
}
VANILLA = False
LR = 5e-4
LR_DECAY_AFTER = 180000
LR_DECAY_FACTOR = 2e-1
ALPHA1_ITERS = 5000
ALPHA2_ITERS = 20000
KL_PENALTY = 1.01
BETA_ITERS = 1000
BATCH_SIZE = 64
N_CHANNELS = 3
HEIGHT = 64
WIDTH = 64
LATENTS1_WIDTH = 8
LATENTS1_HEIGHT = 8
if DATASET == 'mnist_256':
train_data, dev_data, test_data = lib.mnist_256.load(BATCH_SIZE, BATCH_SIZE)
elif DATASET == 'lsun_32':
train_data, dev_data = lib.lsun_bedrooms.load(BATCH_SIZE, downsample=True)
elif DATASET == 'lsun_64':
train_data, dev_data = lib.lsun_bedrooms.load(BATCH_SIZE, downsample=False)
elif DATASET == 'imagenet_64':
train_data, dev_data = lib.small_imagenet.load(BATCH_SIZE)
lib.print_model_settings(locals().copy())
DEVICES = ['/gpu:{}'.format(i) for i in xrange(N_GPUS)]
DEVICES = ['/gpu:3']
lib.ops.conv2d.enable_default_weightnorm()
lib.ops.deconv2d.enable_default_weightnorm()
lib.ops.linear.enable_default_weightnorm()
def nonlinearity(x):
return tf.nn.elu(x)
def pixcnn_gated_nonlinearity(a, b):
return tf.sigmoid(a) * tf.tanh(b)
def ResidualBlock(name, input_dim, output_dim, inputs, inputs_stdev, filter_size, mask_type=None, resample=None, he_init=True):
"""
resample: None, 'down', or 'up'
"""
if mask_type != None and resample != None:
raise Exception('Unsupported configuration')
if resample=='down':
conv_shortcut = functools.partial(lib.ops.conv2d.Conv2D, stride=2)
conv_1 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=input_dim)
conv_2 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=output_dim, stride=2)
elif resample=='up':
def conv_shortcut(*args, **kwargs):
kwargs['output_dim'] = 4*kwargs['output_dim']
output = lib.ops.conv2d.Conv2D(*args, **kwargs)
output = tf.transpose(output, [0,2,3,1])
old_shape = tf.shape(output)
output = tf.reshape(output, tf.pack([old_shape[0], 2*old_shape[1], 2*old_shape[2], old_shape[3]/4]))
output = tf.transpose(output, [0,3,1,2])
return output
conv_1 = functools.partial(lib.ops.deconv2d.Deconv2D, input_dim=input_dim, output_dim=output_dim)
conv_2 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=output_dim, output_dim=output_dim)
elif resample==None:
conv_shortcut = lib.ops.conv2d.Conv2D
conv_1 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=output_dim)
conv_2 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=output_dim, output_dim=output_dim)
else:
raise Exception('invalid resample value')
if output_dim==input_dim and resample==None:
shortcut = inputs # Identity skip-connection
else:
shortcut = conv_shortcut(name+'.Shortcut', input_dim=input_dim, output_dim=output_dim, filter_size=1, mask_type=mask_type, he_init=False, biases=False, inputs=inputs)
output = inputs
if mask_type == None:
output = nonlinearity(output)
output = conv_1(name+'.Conv1', filter_size=filter_size, mask_type=mask_type, inputs=output, he_init=he_init)
output = nonlinearity(output)
output = conv_2(name+'.Conv2', filter_size=filter_size, mask_type=mask_type, inputs=output, he_init=he_init)
else:
output = nonlinearity(output)
output_a = conv_1(name+'.Conv1A', filter_size=filter_size, mask_type=mask_type, inputs=output, he_init=he_init)
output_b = conv_1(name+'.Conv1B', filter_size=filter_size, mask_type=mask_type, inputs=output, he_init=he_init)
output = pixcnn_gated_nonlinearity(output_a, output_b)
output = conv_2(name+'.Conv2', filter_size=filter_size, mask_type=mask_type, inputs=output, he_init=he_init)
return shortcut + (0.3 * output)
def RMB(name, input_dim, output_dim, inputs, inputs_stdev, filter_size, mask_type=None, resample=None, he_init=True):
"""
resample: None, 'down', or 'up'
"""
if mask_type != None and resample != None:
raise Exception('Unsupported configuration')
if resample=='down':
conv_shortcut = functools.partial(lib.ops.conv2d.Conv2D, stride=2)
conv_1 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=input_dim)
conv_2 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=output_dim, stride=2)
elif resample=='up':
def conv_shortcut(*args, **kwargs):
kwargs['output_dim'] = 4*kwargs['output_dim']
output = lib.ops.conv2d.Conv2D(*args, **kwargs)
output = tf.transpose(output, [0,2,3,1])
old_shape = tf.shape(output)
output = tf.reshape(output, tf.pack([old_shape[0], 2*old_shape[1], 2*old_shape[2], old_shape[3]/4]))
output = tf.transpose(output, [0,3,1,2])
return output
conv_1 = functools.partial(lib.ops.deconv2d.Deconv2D, input_dim=input_dim, output_dim=output_dim)
conv_2 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=output_dim, output_dim=output_dim)
elif resample==None:
conv_shortcut = lib.ops.conv2d.Conv2D
conv_1 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=output_dim)
conv_2 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=output_dim, output_dim=output_dim)
else:
raise Exception('invalid resample value')
if output_dim==input_dim and resample==None:
shortcut = inputs # Identity skip-connection
else:
shortcut = conv_shortcut(name+'.Shortcut', input_dim=input_dim, output_dim=output_dim, filter_size=1, mask_type=mask_type, he_init=False, biases=False, inputs=inputs)
output = inputs
if mask_type == None:
output = nonlinearity(output)
output = conv_1(name+'.Conv1', filter_size=filter_size, mask_type=mask_type, inputs=output, he_init=he_init)
output = nonlinearity(output)
output = conv_2(name+'.Conv2', filter_size=filter_size, mask_type=mask_type, inputs=output, he_init=he_init)
else:
output = nonlinearity(output)
output_a = conv_1(name+'.Conv1A', filter_size=filter_size, mask_type=mask_type, inputs=output, he_init=he_init)
output_b = conv_1(name+'.Conv1B', filter_size=filter_size, mask_type=mask_type, inputs=output, he_init=he_init)
output = pixcnn_gated_nonlinearity(output_a, output_b)
output = conv_2(name+'.Conv2', filter_size=filter_size, mask_type=mask_type, inputs=output, he_init=he_init)
output = nonlinearity(output)
output = lib.ops.conv2d.Conv2D(name+'.Conv3_1x1', input_dim=output_dim, output_dim=output_dim, filter_size=1, mask_type=None, inputs=output, he_init=he_init)
return shortcut + output
def Enc1(images):
if args.algo == "RMB":
ResidualBlock = RMB
if PIXCNN_ONLY:
batch_size = tf.shape(images)[0]
return tf.zeros(tf.pack([batch_size, 2*LATENT_DIM_1, LATENTS1_WIDTH, LATENTS1_HEIGHT]), tf.float16)
output = images
if SETTINGS == '64px':
output = lib.ops.conv2d.Conv2D('Enc1.Input', input_dim=N_CHANNELS, output_dim=DIM_1, filter_size=5, inputs=output, he_init=False, stride=2)
else:
output = lib.ops.conv2d.Conv2D('Enc1.Input', input_dim=N_CHANNELS, output_dim=DIM_1, filter_size=1, inputs=output, he_init=False)
output = ResidualBlock('Enc1.Res1', input_dim=DIM_1, output_dim=DIM_2, filter_size=3, resample='down', inputs_stdev=1, inputs=output)
output = ResidualBlock('Enc1.Res2', input_dim=DIM_2, output_dim=DIM_3, filter_size=3, resample='down', inputs_stdev=np.sqrt(2), inputs=output)
output = ResidualBlock('Enc1.Res3', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs_stdev=np.sqrt(3), inputs=output)
output = lib.ops.conv2d.Conv2D('Enc1.Out', input_dim=DIM_3, output_dim=2*LATENT_DIM_1, filter_size=1, inputs=output, he_init=False)
return output
def Dec1(latents, images):
if args.algo == "RMB":
ResidualBlock = RMB
if PIXCNN_ONLY:
batch_size = tf.shape(latents)[0]
output = tf.zeros(tf.pack([batch_size, DIM_1, HEIGHT, WIDTH]), tf.float16)
else:
output = tf.clip_by_value(latents, -50., 50.)
output = lib.ops.conv2d.Conv2D('Dec1.Input', input_dim=LATENT_DIM_1, output_dim=DIM_3, filter_size=1, inputs=output, he_init=False)
output = ResidualBlock('Dec1.Res1', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs_stdev=1, inputs=output)
output = ResidualBlock('Dec1.Res2', input_dim=DIM_3, output_dim=DIM_2, filter_size=3, resample='up', inputs_stdev=np.sqrt(2), inputs=output)
output = ResidualBlock('Dec1.Res3', input_dim=DIM_2, output_dim=DIM_1, filter_size=3, resample='up', inputs_stdev=np.sqrt(3), inputs=output)
if SETTINGS == '64px':
output = ResidualBlock('Dec1.Res4', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample='up', inputs_stdev=np.sqrt(3), inputs=output)
if PIXEL_LEVEL_PIXCNN:
masked_images = lib.ops.conv2d.Conv2D('Dec1.Pix1', input_dim=N_CHANNELS, output_dim=DIM_1, filter_size=7, inputs=images, mask_type=('a', N_CHANNELS), he_init=False)
# Make the stdev of output and masked_images match
output /= np.sqrt(4)
# Warning! Because of the masked convolutions it's very important that masked_images comes first in this concat
output = tf.concat(1, [masked_images, output])
output = ResidualBlock('Dec1.Pix2Res', input_dim=2*DIM_1, output_dim=DIM_PIX_1, filter_size=PIX1_FILT_SIZE, mask_type=('b', N_CHANNELS), inputs_stdev=1, inputs=output)
if PIXCNN_ONLY:
for i in xrange(9):
output = ResidualBlock('Dec1.Pix2Res_'+str(i), input_dim=DIM_PIX_1, output_dim=DIM_PIX_1, filter_size=PIX1_FILT_SIZE, mask_type=('b', N_CHANNELS), inputs_stdev=1, inputs=output)
output = ResidualBlock('Dec1.Pix3Res', input_dim=DIM_PIX_1, output_dim=DIM_PIX_1, filter_size=1, mask_type=('b', N_CHANNELS), inputs_stdev=np.sqrt(2), inputs=output)
output = lib.ops.conv2d.Conv2D('Dec1.Out', input_dim=DIM_PIX_1, output_dim=256*N_CHANNELS, filter_size=1, mask_type=('b', N_CHANNELS), he_init=False, inputs=output)
else:
output = lib.ops.conv2d.Conv2D('Dec1.Out', input_dim=DIM_1, output_dim=256*N_CHANNELS, filter_size=1, he_init=False, inputs=output)
return tf.transpose(
tf.reshape(output, [-1, 256, N_CHANNELS, HEIGHT, WIDTH]),
[0,2,3,4,1]
)
def DecRMB(latents, images):
if args.algo == "RMB":
ResidualBlock = RMB
if PIXCNN_ONLY:
batch_size = tf.shape(latents)[0]
output = tf.zeros(tf.pack([batch_size, DIM_1, HEIGHT, WIDTH]), tf.float16)
else:
output = tf.clip_by_value(latents, -50., 50.)
output = lib.ops.conv2d.Conv2D('Dec1.Input', input_dim=LATENT_DIM_1, output_dim=DIM_3, filter_size=1, inputs=output, he_init=False)
output = ResidualBlock('Dec1.Res1', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs_stdev=1, inputs=output)
output = ResidualBlock('Dec1.Res2', input_dim=DIM_3, output_dim=DIM_2, filter_size=3, resample='up', inputs_stdev=np.sqrt(2), inputs=output)
output = ResidualBlock('Dec1.Res3', input_dim=DIM_2, output_dim=DIM_1, filter_size=3, resample='up', inputs_stdev=np.sqrt(3), inputs=output)
if SETTINGS == '64px':
output = ResidualBlock('Dec1.Res4', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample='up', inputs_stdev=np.sqrt(3), inputs=output)
if PIXEL_LEVEL_PIXCNN:
masked_images = lib.ops.conv2d.Conv2D('Dec1.Pix1', input_dim=N_CHANNELS, output_dim=DIM_1, filter_size=7, inputs=images, mask_type=('a', N_CHANNELS), he_init=False)
# Make the stdev of output and masked_images match
output /= np.sqrt(4)
# Warning! Because of the masked convolutions it's very important that masked_images comes first in this concat
output = tf.concat(1, [masked_images, output])
output = ResidualBlock('Dec1.Pix2Res', input_dim=2*DIM_1, output_dim=DIM_PIX_1, filter_size=PIX1_FILT_SIZE, mask_type=('b', N_CHANNELS), inputs_stdev=1, inputs=output)
if PIXCNN_ONLY:
for i in xrange(9):
output = ResidualBlock('Dec1.Pix2Res_'+str(i), input_dim=DIM_PIX_1, output_dim=DIM_PIX_1, filter_size=PIX1_FILT_SIZE, mask_type=('b', N_CHANNELS), inputs_stdev=1, inputs=output)
output = ResidualBlock('Dec1.Pix3Res', input_dim=DIM_PIX_1, output_dim=DIM_PIX_1, filter_size=1, mask_type=('b', N_CHANNELS), inputs_stdev=np.sqrt(2), inputs=output)
output = lib.ops.conv2d.Conv2D('Dec1.Out', input_dim=DIM_PIX_1, output_dim=256*N_CHANNELS, filter_size=1, mask_type=('b', N_CHANNELS), he_init=False, inputs=output)
else:
output = lib.ops.conv2d.Conv2D('Dec1.Out', input_dim=DIM_1, output_dim=256*N_CHANNELS, filter_size=1, he_init=False, inputs=output)
return tf.transpose(
tf.reshape(output, [-1, 256, N_CHANNELS, HEIGHT, WIDTH]),
[0,2,3,4,1]
)
def Enc2(latents):
if args.algo == "RMB":
ResidualBlock = RMB
if PIXCNN_ONLY:
batch_size = tf.shape(latents)[0]
return tf.zeros(tf.pack([batch_size, 2*LATENT_DIM_2]), tf.float16)
output = tf.clip_by_value(latents, -50., 50.)
output = lib.ops.conv2d.Conv2D('Enc2.Input', input_dim=LATENT_DIM_1, output_dim=DIM_3, filter_size=1, inputs=output, he_init=False)
output = ResidualBlock('Enc2.Res1', input_dim=DIM_3, output_dim=DIM_4, filter_size=3, resample='down', inputs_stdev=1, he_init=True, inputs=output)
# output = ResidualBlock('Enc2.Res2', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, resample=None, inputs_stdev=np.sqrt(2), he_init=True, inputs=output)
output = tf.reshape(output, [-1, 4*4*DIM_4])
output = tf.nn.elu(output)
output = lib.ops.linear.Linear('Enc2.ConvToFC', input_dim=4*4*DIM_4, output_dim=DIM_5, inputs=output)
output = tf.nn.elu(output)
# We implement an FC residual block as a conv over a 1x1 featuremap
# output = tf.reshape(output, [-1, DIM_5, 1, 1])
# output = ResidualBlock('Enc2.Res3', input_dim=DIM_5, output_dim=DIM_5, filter_size=1, inputs_stdev=np.sqrt(3), he_init=True, inputs=output)
# output = tf.reshape(output, [-1, DIM_5])
output = lib.ops.linear.Linear('Enc2.Output', input_dim=DIM_5, output_dim=2*LATENT_DIM_2, inputs=output, initialization='glorot')
return output
def Dec2(latents, targets):
if args.algo == "RMB":
ResidualBlock = RMB
if PIXCNN_ONLY:
batch_size = tf.shape(latents)[0]
return tf.zeros(tf.pack([batch_size, 2*LATENT_DIM_1, LATENTS1_HEIGHT, LATENTS1_WIDTH]), tf.float16)
output = tf.clip_by_value(latents, -50., 50.)
output = lib.ops.linear.Linear('Dec2.Input', input_dim=LATENT_DIM_2, output_dim=DIM_5, inputs=output)
output = tf.nn.elu(output)
# output = tf.reshape(output, [-1, DIM_5, 1, 1])
# output = ResidualBlock('Dec2.Res1', input_dim=DIM_5, output_dim=DIM_5, filter_size=1, inputs_stdev=1, he_init=True, inputs=output)
# output = tf.reshape(output, [-1, DIM_5])
output = lib.ops.linear.Linear('Dec2.FCToConv', input_dim=DIM_5, output_dim=4*4*DIM_4, inputs=output)
output = tf.reshape(output, [-1, DIM_4, 4, 4])
# output = ResidualBlock('Dec2.Res2', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, resample=None, inputs_stdev=np.sqrt(2), he_init=True, inputs=output)
output = ResidualBlock('Dec2.Res3', input_dim=DIM_4, output_dim=DIM_3, filter_size=3, resample='up', inputs_stdev=np.sqrt(3), he_init=True, inputs=output)
if WIDTH == 28:
output = tf.slice(output, [0, 0, 0, 0], [-1, -1, 7, 7])
if HIGHER_LEVEL_PIXCNN:
masked_targets = lib.ops.conv2d.Conv2D('Dec2.Pix1', input_dim=LATENT_DIM_1, output_dim=DIM_3, filter_size=7, mask_type=('a', PIX_2_N_BLOCKS), he_init=False, inputs=targets)
# Make the stdev of output and masked_targets match
output /= np.sqrt(4)
output = tf.concat(1, [masked_targets, output])
output = ResidualBlock('Dec2.Pix2Res', input_dim=2*DIM_3, output_dim=DIM_PIX_2, filter_size=3, mask_type=('b', PIX_2_N_BLOCKS), inputs_stdev=1, he_init=True, inputs=output)
output = ResidualBlock('Dec2.Pix3Res', input_dim=DIM_PIX_2, output_dim=DIM_PIX_2, filter_size=1, mask_type=('b', PIX_2_N_BLOCKS), inputs_stdev=np.sqrt(2), he_init=True, inputs=output)
output = lib.ops.conv2d.Conv2D('Dec2.Out', input_dim=DIM_PIX_2, output_dim=2*LATENT_DIM_1, filter_size=1, mask_type=('b', PIX_2_N_BLOCKS), he_init=False, inputs=output)
else:
output = lib.ops.conv2d.Conv2D('Dec2.Out', input_dim=DIM_3, output_dim=2*LATENT_DIM_1, filter_size=1, mask_type=('b', PIX_2_N_BLOCKS), he_init=False, inputs=output)
return output
def EncFull(images):
if args.algo == "RMB":
ResidualBlock = RMB
output = images
if EMBED_INPUTS:
output = lib.ops.conv2d.Conv2D('EncFull.Input', input_dim=N_CHANNELS*DIM_1, output_dim=DIM_1, filter_size=1, inputs=output, he_init=False)
else:
output = lib.ops.conv2d.Conv2D('EncFull.Input', input_dim=N_CHANNELS, output_dim=DIM_1, filter_size=1, inputs=output, he_init=False)
output = ResidualBlock('EncFull.Res1', input_dim=DIM_1, output_dim=DIM_2, filter_size=3, resample='down', inputs_stdev=1, inputs=output)
output = ResidualBlock('EncFull.Res2', input_dim=DIM_2, output_dim=DIM_3, filter_size=3, resample='down', inputs_stdev=np.sqrt(2), inputs=output)
output = ResidualBlock('EncFull.Res3', input_dim=DIM_3, output_dim=DIM_4, filter_size=3, resample='down', inputs_stdev=np.sqrt(3), inputs=output)
# output = ResidualBlock('EncFull.Res4', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, resample=None, inputs_stdev=np.sqrt(4), inputs=output)
output = tf.reshape(output, [-1, 4*4*DIM_4])
output = lib.ops.linear.Linear('EncFull.ConvToFC', input_dim=4*4*DIM_4, output_dim=DIM_5, initialization='glorot', inputs=output)
# We implement an FC residual block as a conv over a 1x1 featuremap
output = tf.reshape(output, [-1, DIM_5, 1, 1])
output = ResidualBlock('EncFull.Res5', input_dim=DIM_5, output_dim=DIM_5, filter_size=1, inputs_stdev=np.sqrt(5), he_init=True, inputs=output)
output = tf.reshape(output, [-1, DIM_5])
output = lib.ops.linear.Linear('EncFull.Output', input_dim=DIM_5, output_dim=2*LATENT_DIM_2, inputs=output, initialization='glorot')
return output
def DecFull(latents, images):
if args.algo == "RMB":
ResidualBlock = RMB
output = tf.clip_by_value(latents, -50., 50.)
output = lib.ops.linear.Linear('DecFull.Input', input_dim=LATENT_DIM_2, output_dim=DIM_5, initialization='glorot', inputs=output)
output = tf.reshape(output, [-1, DIM_5, 1, 1])
output = ResidualBlock('DecFull.Res1', input_dim=DIM_5, output_dim=DIM_5, filter_size=1, inputs_stdev=1, he_init=True, inputs=output)
output = tf.reshape(output, [-1, DIM_5])
output = lib.ops.linear.Linear('DecFull.FCToConv', input_dim=DIM_5, output_dim=4*4*DIM_4, initialization='glorot', inputs=output)
output = tf.reshape(output, [-1, DIM_4, 4, 4])
# output = ResidualBlock('DecFull.Res2', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, resample=None, inputs_stdev=np.sqrt(2), he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res3', input_dim=DIM_4, output_dim=DIM_3, filter_size=3, resample='up', inputs_stdev=np.sqrt(3), he_init=True, inputs=output)
if WIDTH == 28:
output = tf.slice(output, [0, 0, 0, 0], [-1, -1, 7, 7])
output = ResidualBlock('DecFull.Res4', input_dim=DIM_3, output_dim=DIM_2, filter_size=3, resample='up', inputs_stdev=np.sqrt(4), he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res5', input_dim=DIM_2, output_dim=DIM_1, filter_size=3, resample='up', inputs_stdev=np.sqrt(5), he_init=True, inputs=output)
# position-invariant latent projection
# output = lib.ops.linear.Linear('DecFull.Input', input_dim=LATENT_DIM_2, output_dim=DIM_1, initialization='glorot', inputs=output)
# output = tf.tile(output, [1, HEIGHT*WIDTH])
# output = tf.reshape(output, [-1, DIM_1, HEIGHT, WIDTH])
if PIXEL_LEVEL_PIXCNN:
if EMBED_INPUTS:
masked_images = lib.ops.conv2d.Conv2D('DecFull.Pix1', input_dim=N_CHANNELS*DIM_1, output_dim=DIM_1, filter_size=7, inputs=images, mask_type=('a', N_CHANNELS), he_init=False)
else:
masked_images = lib.ops.conv2d.Conv2D('DecFull.Pix1', input_dim=N_CHANNELS, output_dim=DIM_1, filter_size=7, inputs=images, mask_type=('a', N_CHANNELS), he_init=False)
# Make the stdev of output and masked_images match
# output /= np.sqrt(6)
# Warning! Because of the masked convolutions it's very important that masked_images comes first in this concat
output = tf.concat(1, [masked_images, output])
output = ResidualBlock('DecFull.Pix2Res', input_dim=2*DIM_1, output_dim=DIM_PIX_1, filter_size=5, mask_type=('b', N_CHANNELS), inputs_stdev=1, inputs=output)
# output = ResidualBlock('DecFull.Pix3Res', input_dim=DIM_PIX_1, output_dim=DIM_PIX_1, filter_size=5, mask_type=('b', N_CHANNELS), inputs_stdev=1, inputs=output)
# output = ResidualBlock('DecFull.Pix4Res', input_dim=DIM_PIX_1, output_dim=DIM_PIX_1, filter_size=5, mask_type=('b', N_CHANNELS), inputs_stdev=1, inputs=output)
output = ResidualBlock('DecFull.Pix5Res', input_dim=DIM_PIX_1, output_dim=DIM_PIX_1, filter_size=1, mask_type=('b', N_CHANNELS), inputs_stdev=np.sqrt(2), inputs=output)
output = lib.ops.conv2d.Conv2D('Dec1.Out', input_dim=DIM_PIX_1, output_dim=256*N_CHANNELS, filter_size=1, mask_type=('b', N_CHANNELS), he_init=False, inputs=output)
else:
output = lib.ops.conv2d.Conv2D('Dec1.Out', input_dim=DIM_1, output_dim=256*N_CHANNELS, filter_size=1, he_init=False, inputs=output)
return tf.transpose(
tf.reshape(output, [-1, 256, N_CHANNELS, HEIGHT, WIDTH]),
[0,2,3,4,1]
)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as session:
total_iters = tf.placeholder(tf.int32, shape=None, name='total_iters')
all_images = tf.placeholder(tf.int32, shape=[None, N_CHANNELS, HEIGHT, WIDTH], name='all_images')
def split(mu_and_logsig):
mu, logsig = tf.split(1, 2, mu_and_logsig)
sig = tf.nn.softsign(logsig)+1
logsig = tf.log(sig)
return mu, logsig, sig
def clamp_logsig_and_sig(logsig, sig):
floor = 1. - tf.minimum(1., tf.cast(total_iters, 'float16') / BETA_ITERS)
log_floor = tf.log(floor)
return tf.maximum(logsig, log_floor), tf.maximum(sig, floor)
split_images = tf.split(0, len(DEVICES), all_images)
tower_cost = []
for device, images in zip(DEVICES, split_images):
with tf.device(device):
scaled_images = (tf.cast(images, 'float16') - 128.) / 64.
if EMBED_INPUTS:
embedded_images = lib.ops.embedding.Embedding('Embedding', 256, DIM_1, images)
embedded_images = tf.transpose(embedded_images, [0,4,1,2,3])
embedded_images = tf.reshape(embedded_images, [-1, N_CHANNELS*DIM_1, HEIGHT, WIDTH])
if MODE == 'one_level':
# Layer 1
if EMBED_INPUTS:
mu_and_logsig1 = EncFull(embedded_images)
else:
mu_and_logsig1 = EncFull(scaled_images)
mu1, logsig1, sig1 = split(mu_and_logsig1)
if VANILLA:
latents1 = mu1
else:
eps = tf.random_normal(tf.shape(mu1))
latents1 = mu1 + (eps * sig1)
if EMBED_INPUTS:
outputs1 = DecFull(latents1, embedded_images)
else:
outputs1 = DecFull(latents1, scaled_images)
reconst_cost = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
tf.reshape(outputs1, [-1, 256]),
tf.reshape(images, [-1])
)
)
# Assembly
# An alpha of exactly 0 can sometimes cause inf/nan values, so we're
# careful to avoid it.
alpha = tf.minimum(1., tf.cast(total_iters+1, 'float16') / ALPHA1_ITERS) * KL_PENALTY
kl_cost_1 = tf.reduce_mean(
lib.ops.kl_unit_gaussian.kl_unit_gaussian(
mu1,
logsig1,
sig1
)
)
kl_cost_1 *= float(LATENT_DIM_2) / (N_CHANNELS * WIDTH * HEIGHT)
if VANILLA:
cost = reconst_cost
else:
cost = reconst_cost + (alpha * kl_cost_1)
elif MODE == 'two_level':
# Layer 1
mu_and_logsig1 = Enc1(scaled_images)
mu1, logsig1, sig1 = split(mu_and_logsig1)
if VANILLA:
latents1 = mu1
else:
eps = tf.random_normal(tf.shape(mu1))
latents1 = mu1 + (eps * sig1)
outputs1 = Dec1(latents1, scaled_images)
reconst_cost = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
tf.reshape(outputs1, [-1, 256]),
tf.reshape(images, [-1])
)
)
# Layer 2
# No need to inject noise into the encoder, so I pass mu1
# instead of latents1 to Enc2
mu_and_logsig2 = Enc2(mu1)
mu2, logsig2, sig2 = split(mu_and_logsig2)
if VANILLA:
latents2 = mu2
else:
eps = tf.random_normal(tf.shape(mu2))
latents2 = mu2 + (eps * sig2)
outputs2 = Dec2(latents2, latents1)
mu1_prior, logsig1_prior, sig1_prior = split(outputs2)
logsig1_prior, sig1_prior = clamp_logsig_and_sig(logsig1_prior, sig1_prior)
# Assembly
# An alpha of exactly 0 can sometimes cause inf/nan values, so we're
# careful to avoid it.
alpha1 = tf.minimum(1., tf.cast(total_iters+1, 'float16') / ALPHA1_ITERS) * KL_PENALTY
alpha2 = tf.minimum(1., tf.cast(total_iters+1, 'float16') / ALPHA2_ITERS) * alpha1# * KL_PENALTY
kl_cost_1 = tf.reduce_mean(
lib.ops.kl_gaussian_gaussian.kl_gaussian_gaussian(
mu1,
logsig1,
sig1,
mu1_prior,
logsig1_prior,
sig1_prior
)
)
kl_cost_2 = tf.reduce_mean(
lib.ops.kl_unit_gaussian.kl_unit_gaussian(
mu2,
logsig2,
sig2
)
)
kl_cost_1 *= float(LATENT_DIM_1 * LATENTS1_WIDTH * LATENTS1_HEIGHT) / (N_CHANNELS * WIDTH * HEIGHT)
kl_cost_2 *= float(LATENT_DIM_2) / (N_CHANNELS * WIDTH * HEIGHT)
if VANILLA:
cost = reconst_cost
else:
cost = reconst_cost + (alpha1 * kl_cost_1) + (alpha2 * kl_cost_2)
tower_cost.append(cost)
full_cost = tf.reduce_mean(
tf.concat(0, [tf.expand_dims(x, 0) for x in tower_cost]), 0
)
# Sampling
if MODE == 'one_level':
ch_sym = tf.placeholder(tf.int32, shape=None)
y_sym = tf.placeholder(tf.int32, shape=None)
x_sym = tf.placeholder(tf.int32, shape=None)
logits = tf.reshape(tf.slice(outputs1, tf.pack([0, ch_sym, y_sym, x_sym, 0]), tf.pack([-1, 1, 1, 1, -1])), [-1, 256])
dec1_fn_out = tf.multinomial(logits, 1)[:, 0]
def dec1_fn(_latents, _targets, _ch, _y, _x):
return session.run(dec1_fn_out, feed_dict={latents1: _latents, images: _targets, ch_sym: _ch, y_sym: _y, x_sym: _x, total_iters: 99999})
def enc_fn(_images):
return session.run(latents1, feed_dict={images: _images, total_iters: 99999})
sample_fn_latents1 = np.random.normal(size=(8, LATENT_DIM_2)).astype('float16')
def generate_and_save_samples(tag):
def color_grid_vis(X, nh, nw, save_path):
# from github.com/Newmu
X = X.transpose(0,2,3,1)
h, w = X[0].shape[:2]
img = np.zeros((h*nh, w*nw, 3))
for n, x in enumerate(X):
j = n/nw
i = n%nw
img[j*h:j*h+h, i*w:i*w+w, :] = x
imsave(save_path, img)
print "Generating latents1"
latents1_copied = np.zeros((64, LATENT_DIM_2), dtype='float16')
for i in xrange(8):
latents1_copied[i::8] = sample_fn_latents1
samples = np.zeros(
(64, N_CHANNELS, HEIGHT, WIDTH),
dtype='int32'
)
print "Generating samples"
for y in xrange(HEIGHT):
for x in xrange(WIDTH):
for ch in xrange(N_CHANNELS):
next_sample = dec1_fn(latents1_copied, samples, ch, y, x)
samples[:,ch,y,x] = next_sample
print "Saving samples"
color_grid_vis(
samples,
8,
8,
'{}/samples_{}.png'.format(OUT_DIR, tag)
)
elif MODE == 'two_level':
def dec2_fn(_latents, _targets):
return session.run([mu1_prior, logsig1_prior], feed_dict={latents2: _latents, latents1: _targets, total_iters: 99999})
ch_sym = tf.placeholder(tf.int32, shape=None)
y_sym = tf.placeholder(tf.int32, shape=None)
x_sym = tf.placeholder(tf.int32, shape=None)
logits = tf.reshape(tf.slice(outputs1, tf.pack([0, ch_sym, y_sym, x_sym, 0]), tf.pack([-1, 1, 1, 1, -1])), [-1, 256])
dec1_fn_out = tf.multinomial(logits, 1)[:, 0]
def dec1_fn(_latents, _targets, _ch, _y, _x):
return session.run(dec1_fn_out, feed_dict={latents1: _latents, images: _targets, ch_sym: _ch, y_sym: _y, x_sym: _x, total_iters: 99999})
def enc_fn(_images):
return session.run([latents1, latents2], feed_dict={images: _images, total_iters: 99999})
sample_fn_latents2 = np.random.normal(size=(32, LATENT_DIM_2)).astype('float16')
sample_fn_latents2[1::2] = sample_fn_latents2[0::2]
sample_fn_latent1_randn = np.random.normal(size=(LATENTS1_HEIGHT,LATENTS1_WIDTH,32,LATENT_DIM_1))
def generate_and_save_samples(tag):
def color_grid_vis(X, nh, nw, save_path):
# from github.com/Newmu
X = X.transpose(0,2,3,1)
h, w = X[0].shape[:2]
img = np.zeros((h*nh, w*nw, 3))
for n, x in enumerate(X):
j = n/nw
i = n%nw
img[j*h:j*h+h, i*w:i*w+w, :] = x
imsave(save_path, img)
print "Generating latents1"
latents1 = np.zeros(
(32, LATENT_DIM_1, LATENTS1_HEIGHT, LATENTS1_WIDTH),
dtype='float16'
)
for y in xrange(8):
for x in xrange(8):
for block in xrange(PIX_2_N_BLOCKS):
mu, logsig = dec2_fn(sample_fn_latents2, latents1)
mu = mu[:,:,y,x]
logsig = logsig[:,:,y,x]
z = mu + ( np.exp(logsig) * sample_fn_latent1_randn[y,x] )
latents1[:,block::PIX_2_N_BLOCKS,y,x] = z[:,block::PIX_2_N_BLOCKS]
latents1_copied = np.zeros(
(64, LATENT_DIM_1, LATENTS1_HEIGHT, LATENTS1_WIDTH),
dtype='float16'
)
latents1_copied[0::2] = latents1
latents1_copied[1::2] = latents1
samples = np.zeros(
(64, N_CHANNELS, HEIGHT, WIDTH),
dtype='int32'
)
print "Generating samples"
for y in xrange(HEIGHT):
for x in xrange(WIDTH):
for ch in xrange(N_CHANNELS):
next_sample = dec1_fn(latents1_copied, samples, ch, y, x)
samples[:,ch,y,x] = next_sample
print "Saving samples"
color_grid_vis(
samples,
8,
8,
'samples_{}.png'.format(tag)
)
# Train!
if MODE == 'one_level':
prints=[
('alpha', alpha),
('reconst', reconst_cost),
('kl1', kl_cost_1)
]
elif MODE == 'two_level':
prints=[
('alpha1', alpha1),
('alpha2', alpha2),
('reconst', reconst_cost),
('kl1', kl_cost_1),
('kl2', kl_cost_2),
]
decayed_lr = tf.train.exponential_decay(
LR,
total_iters,
LR_DECAY_AFTER,
LR_DECAY_FACTOR,
staircase=True
)
if args.model_weights is not None:
tf.train.Saver().restore(session, args.model_weights)
print "Model restored with file {}".format(args.model_weights)
lib.train_loop.train_loop(
session=session,
inputs=[total_iters, all_images],
inject_total_iters=True,
cost=full_cost,
prints=prints,
optimizer=tf.train.AdamOptimizer(decayed_lr),
train_data=train_data,
test_data=dev_data,
callback=generate_and_save_samples,
times=TIMES,
save_params=True,
param_dir = PARAM_DIR,
# profile=True
# debug_mode=True
)
# all_zs = []
# targets = []
# for (_images,_targets) in test_data():
# _z = enc_fn(_images)
# all_zs.append(_z)
# targets.append(_targets)
# _z = np.concatenate(all_zs, axis=0)
# targets = np.concatenate(targets, axis=0)
# import matplotlib
# matplotlib.use('Agg')
# import matplotlib.pyplot as plt
# x, y = _z.T
# # x = np.random.rand(N)
# # y = np.random.rand(N)
# colors = targets
# print colors[:50]
# area = 5 #np.pi * (15 * np.random.rand(N))**2 # 0 to 15 point radiuses
# plt.scatter(x, y, s=area, c=colors, alpha=0.5)
# plt.savefig('plot.png')
| [
"[email protected]"
] | |
b529d4e2ef137a416b1a7794a47c6e9eebffab3b | 01ac9e40052a468dd472a296df0003c4e629e2c9 | /news_all/spiders_ydyl/cppcc_all.py | 4081ec07efd2853fce653816d12c6835d575b706 | [] | no_license | Pintrue/news_all | b5cee16584ed92e6574edd825b574214df65d917 | eb8c32c79bdacd8e2f76b88f27871c3cd0118006 | refs/heads/master | 2022-03-23T13:34:10.354029 | 2019-11-22T07:40:50 | 2019-11-22T07:40:50 | 223,058,997 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,433 | py | # -*- coding: utf-8 -*-
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import Rule
from news_all.spider_models import NewsRCSpider
class Zgzxw_allSpider(NewsRCSpider):
"""中国政协网"""
name = 'zgzxw'
mystart_urls = {
'http://www.cppcc.gov.cn/zxww/newcppcc/zxyw/index.shtml': 7642, # 中国人民政治协商会议全国委员会
}
rules = (
# http://www.cppcc.gov.cn/zxww/2019/06/25/ARTI1561421709036136.shtml
Rule(LinkExtractor(allow=(r'cppcc.gov.cn.*?/\d{4}/\d{2}/\d{2}/\w+\d+.shtml'),
), callback='parse_item',
follow=False),
)
def parse_item(self, response):
xp = response.xpath
try:
title = xp("//div[@class='cnt_box']/h3/text()").extract_first()
content_div = xp("//div[@class='cnt_box']/div[@class='con']")[0]
pubtime = xp("//span[@class='info']/i").re(r'\d{2,4}-\d{1,2}-\d{1,2}')[0]
origin_name = xp("//span[@class='info']/em/text()[2]").extract_first()
content, media, _, _ = self.content_clean(content_div)
except:
return self.produce_debugitem(response, "xpath error")
return self.produce_item(
response=response,
title=title,
pubtime=pubtime,
origin_name=origin_name,
content=content,
media=media
)
| [
"[email protected]"
] | |
22fe6252c1d33a331415ffaf644c9dbdb687b865 | 99b062cb9f5f3ff10c9f1fa00e43f6e8151a43a6 | /algorithm/IM/반나누기.py | 0f16cc1db1f921f7c87b424d4ce293b0641550ca | [] | no_license | HSx3/TIL | 92acc90758015c2e31660617bd927f7f100f5f64 | 981c9aaaf09c930d980205f68a28f2fc8006efcb | refs/heads/master | 2020-04-11T21:13:36.239246 | 2019-05-08T08:18:03 | 2019-05-08T08:18:03 | 162,099,042 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 783 | py | import sys
sys.stdin = open("반나누기.txt")
T = int(input())
for tc in range(1, T+1):
N, Kmin, Kmax = map(int, input().split())
score = list(map(int, input().split()))
div_class = []
for T1 in range(1, 101):
for T2 in range(T1+1, 101):
A = []
B = []
C = []
for i in score:
if i >= T2:
A.append(i)
elif i < T1:
C.append(i)
else:
B.append(i)
num = [len(A), len(B), len(C)]
if max(num) <= Kmax and min(num) >= Kmin:
ans = max(num)-min(num)
div_class.append(ans)
if div_class:
print(min(div_class))
else:
print(-1)
| [
"[email protected]"
] | |
2294bcc3211c94ad16f0784191f9eb000b41fb76 | 23805cffc86ac4dfb5bcce672b8c7070b4616e41 | /Apprendre-Python/sum-1-n/scripts/feedback.py | 638e0fc434e034cca499389e17e3f7f853abd000 | [] | no_license | ukonline/pythia-tasks | f90ff90299fe0eedd0e2787bcf666df07c709a00 | 81a3731eb0cdfe16b26a4e75a165a5071fb48ff5 | refs/heads/master | 2021-01-25T03:26:33.915795 | 2016-01-04T20:03:24 | 2016-01-04T20:03:24 | 40,974,655 | 0 | 2 | null | 2016-12-21T13:12:14 | 2015-08-18T13:49:39 | Python | UTF-8 | Python | false | false | 1,247 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# Course: Apprendre Python
# Problem: Somme de 1 à n
# Feedback script
import ast
import csv
import json
import os
import sys
sys.path.append('/task/static')
from lib import pythia
import math
def computesum(n):
result = 0
i = 1
while i <= n:
result += i
i += 1
return result
class TaskFeedbackSuite(pythia.FeedbackSuite):
def __init__(self, config):
pythia.FeedbackSuite.__init__(self, '/tmp/work/output/stderr', None, '/tmp/work/input/data.csv', '/tmp/work/output/data.res', config)
def teacherCode(self, data):
return computesum(data)
def parseTestData(self, data):
return int(data[0])
# Retrieve task id
with open('/tmp/work/tid', 'r', encoding='utf-8') as file:
tid = file.read()
output = {'tid': tid, 'status': 'failed', 'feedback': {'score': 0}}
# Read test configuration
config = []
with open('/task/config/test.json', 'r', encoding='utf-8') as file:
content = file.read()
config = json.loads(content)
config = config['predefined']
(verdict, feedback) = TaskFeedbackSuite(config).generate()
output['feedback'] = feedback
output['status'] = 'success' if verdict else 'failed'
print(json.dumps(output))
| [
"[email protected]"
] | |
d7db13e9901dfdb2541b150c96b70055368e00ee | cf720b69d428b92186e84e52ff4f7eb39b8dd723 | /Probablity and Statistics/3. WAP to find the probability of drawing an ace after drawing an ace on the first draw.py | d831885a30876fdd223cd5f274b1fb19dbec87e6 | [] | no_license | bl-deepakchawla/ML-Followship-Program | b0fd2232f6dd2ea4356e4402be86cca84a5fbd60 | 41d88172ea226c42c1f56fd9e59769142575734c | refs/heads/master | 2020-04-01T13:56:11.595143 | 2018-10-31T10:28:19 | 2018-10-31T10:28:19 | 153,273,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | def pr_ace_after_ace_draw(l_ace_cards, l_total_cards):
l_pr_ace_card = (l_ace_cards/l_total_cards) * 100
return l_pr_ace_card
g_total_cards = 52
g_ace_draw = 1
g_total_cards = g_total_cards - g_ace_draw
g_ace_cards = 4 - g_ace_draw
g_pr_ace_card = pr_ace_after_ace_draw(g_ace_cards, g_total_cards)
print("Probability of the ace cards after drawing a ace from the packed card is", g_pr_ace_card, "%")
| [
"[email protected]"
] | |
af54c2b60779c31d1d0d83a753f2c43b38f14d77 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /mDzheHpwtqyXePEBE_1.py | ab0f5df7676101d833a8e557fffe7d8f9cbc98e4 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45 | py |
def sum_polygon(n):
return (n - 2) * 180
| [
"[email protected]"
] | |
ce2c39c3477019baaa00ce42f360612cc208f501 | 24d8cf871b092b2d60fc85d5320e1bc761a7cbe2 | /eXe/rev2283-2409/base-trunk-2283/exe/engine/path.py | 8c18ee779da2843b3377442bd070d39038cb31c2 | [] | no_license | joliebig/featurehouse_fstmerge_examples | af1b963537839d13e834f829cf51f8ad5e6ffe76 | 1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad | refs/heads/master | 2016-09-05T10:24:50.974902 | 2013-03-28T16:28:47 | 2013-03-28T16:28:47 | 9,080,611 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 29,655 | py | """ path.py - An object representing a path to a file or directory.
Example:
from path import Path
d = Path('/home/guido/bin')
for file_ in d.files('*.py'):
file_.chmod(0755)
This module requires Python 2.2 or later.
Licensed under GPL.
URL: http://www.jorendorff.com/articles/python/path
Author: Jason Orendorff <[email protected]> (and others - see the url!)
Date: 7 Mar 2004
"""
from __future__ import generators
import sys, os, fnmatch, glob, shutil, codecs, md5
from tempfile import mkdtemp
__version__ = '2.0.4'
__all__ = ['Path', 'TempDirPath']
_textmode = 'r'
if hasattr(file, 'newlines'):
_textmode = 'U'
def getFileSystemEncoding():
"""
Returns file system default encoding name,
eg. Ascii, mbcs, utf-8, etc...
"""
encoding = sys.getfilesystemencoding()
if encoding is None:
return 'utf-8'
else:
return encoding
class Path(unicode):
""" Represents a filesystem Path.
For documentation on individual methods, consult their
counterparts in os.path.
"""
fileSystemEncoding = getFileSystemEncoding()
def __new__(cls, filename=u'', encoding=None):
"""
Gently converts the filename to unicode
"""
if encoding is None:
encoding = Path.fileSystemEncoding
return unicode.__new__(cls, toUnicode(filename, encoding))
def __repr__(self):
return 'Path(%s)' % unicode.__repr__(self)
def __str__(self):
return self.encode(Path.fileSystemEncoding)
def __add__(self, more):
return Path(toUnicode(self) + toUnicode(more))
def __radd__(self, other):
return Path(toUnicode(other) + toUnicode(self))
def __div__(self, rel):
""" fp.__div__(rel) == fp / rel == fp.joinpath(rel)
Join two Path components, adding a separator character if
needed.
"""
return Path(os.path.join(toUnicode(self), toUnicode(rel)))
__truediv__ = __div__
@staticmethod
def getcwd():
""" Return the current working directory as a path object. """
return Path(os.getcwd())
def abspath(self):
"""Wraps os.path.abspath"""
return Path(os.path.abspath(self))
def normcase(self):
"""Wraps os.path.normcase"""
return Path(os.path.normcase(self))
def normpath(self):
"""Wraps os.path.normpath"""
return Path(os.path.normpath(self))
def realpath(self):
"""Wraps os.path.realpath"""
return Path(os.path.realpath(self))
def expanduser(self):
"""Wraps os.path.expanduser"""
return Path(os.path.expanduser(self))
def expandvars(self):
"""Wraps os.path.expandvars"""
return Path(os.path.expandvars(self))
def dirname(self):
"""Wraps os.path.dirname"""
return Path(os.path.dirname(self))
def basename(self):
"""Wraps os.path.basename"""
return Path(os.path.basename(self))
def expand(self):
""" Clean up a filename by calling expandvars(),
expanduser(), and normpath() on it.
This is commonly everything needed to clean up a filename
read from a configuration file, for example.
"""
return self.expandvars().expanduser().normpath()
def _get_namebase(self):
"""Returns everything before the . in the extension"""
return Path(os.path.splitext(self.name)[0])
def _get_ext(self):
"""Returns the extension only (including the dot)"""
return os.path.splitext(toUnicode(self))[1]
def _get_drive(self):
"""Returns the drive letter (in dos & win)"""
drive = os.path.splitdrive(self)[0]
return Path(drive)
parent = property(
dirname, None, None,
""" This path's parent directory, as a new path object.
For example,
Path('/usr/local/lib/libpython.so').parent == Path('/usr/local/lib')
""")
name = property(
basename, None, None,
""" The name of this file or directory without the full path.
For example, Path('/usr/local/lib/libpython.so').name == 'libpython.so'
""")
namebase = property(
_get_namebase, None, None,
""" The same as path.name, but with one file extension stripped off.
For example,
Path('/home/guido/python.tar.gz').name == 'python.tar.gz',
but Path('/home/guido/python.tar.gz').namebase == 'python.tar'
""")
ext = property(
_get_ext, None, None,
""" The file extension, for example '.py'. """)
drive = property(
_get_drive, None, None,
""" The drive specifier, for example 'C:'.
This is always empty on systems that don't use drive specifiers.
""")
def splitpath(self):
""" p.splitpath() -> Return (p.parent, p.name). """
parent, child = os.path.split(self)
return Path(parent), Path(child)
def splitdrive(self):
""" p.splitdrive() -> Return (p.drive, <the rest of p>).
Split the drive specifier from this path. If there is
no drive specifier, p.drive is empty, so the return value
is simply (Path(''), p). This is always the case on Unix.
"""
drive, rel = os.path.splitdrive(self)
return Path(drive), rel
def splitext(self):
""" p.splitext() -> Return (p.stripext(), p.ext).
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from '.' to the end of the
last path segment. This has the property that if
(a, b) == p.splitext(), then a + b == p.
"""
filename, ext = os.path.splitext(self)
return Path(filename), ext
def stripext(self):
""" p.stripext() -> Remove one file extension from the path.
For example, Path('/home/guido/python.tar.gz').stripext()
returns Path('/home/guido/python.tar').
"""
return self.splitext()[0]
if hasattr(os.path, 'splitunc'):
def splitunc(self):
"""NT Only: Split a pathname into UNC mount point and relative path
specifiers.
eg. Path(r'\\dbserver\homes\matthew\work\stuff.py').splitunc() == \
(Path(r'\\dbserver\homes'), Path(r'\\matthew\work\stuff.py'))"""
unc, rest = os.path.splitunc(self)
return Path(unc), Path(rest)
def _get_uncshare(self):
"""NT Only: Returns only the server and share name from a unc path
name.
eg. Path(r'\\dbserver\homes\matthew\work\stuff.py').uncshare() == \
Path(r'\\dbserver\homes')"""
return Path(os.path.splitunc(self)[0])
uncshare = property(
_get_uncshare, None, None,
""" The UNC mount point for this path.
This is empty for paths on local drives. """)
def joinpath(self, *args):
""" Join two or more path components, adding a separator
character (os.sep) if needed. Returns a new path
object.
"""
return Path(os.path.join(toUnicode(self), *args))
def splitall(self):
""" Return a list of the path components in this path.
The first item in the list will be a path. Its value will be
either os.curdir, os.pardir, empty, or the root directory of
this path (for example, '/' or 'C:\\'). The other items in
the list will be strings.
path.path.joinpath(*result) will yield the original path.
"""
parts = []
loc = self
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = prev.splitpath()
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
def relpath(self):
""" Return this path as a relative path,
based from the current working directory.
"""
cwd = Path(os.getcwd())
return cwd.relpathto(self)
def relpathto(self, dest):
""" Return a relative path from self to dest.
If there is no relative path from self to dest, for example if
they reside on different drives in Windows, then this returns
dest.abspath().
"""
origin = self.abspath()
dest = Path(dest).abspath()
orig_list = origin.normcase().splitall()
dest_list = dest.splitall()
if orig_list[0] != os.path.normcase(dest_list[0]):
return dest
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
segments = [os.pardir] * (len(orig_list) - i)
segments += dest_list[i:]
if len(segments) == 0:
return Path(os.curdir)
else:
return Path(os.path.join(*segments))
def listdir(self, pattern=None):
""" D.listdir() -> List of items in this directory.
Use D.files() or D.dirs() instead if you want a listing
of just files or just subdirectories.
The elements of the list are path objects.
With the optional 'pattern' argument, this only lists
items whose names match the given pattern.
"""
names = os.listdir(self)
if pattern is not None:
names = fnmatch.filter(names, pattern)
return [self / child for child in names]
def dirs(self, pattern=None):
""" D.dirs() -> List of this directory's subdirectories.
The elements of the list are path objects.
This does not walk recursively into subdirectories
(but see path.walkdirs).
With the optional 'pattern' argument, this only lists
directories whose names match the given pattern. For
example, d.dirs('build-*').
"""
return [pth for pth in self.listdir(pattern) if pth.isdir()]
def files(self, pattern=None):
""" D.files() -> List of the files in this directory.
The elements of the list are path objects.
This does not walk into subdirectories (see path.walkfiles).
With the optional 'pattern' argument, this only lists files
whose names match the given pattern. For example,
d.files('*.pyc').
"""
return [pth for pth in self.listdir(pattern) if pth.isfile()]
def walk(self, pattern=None):
""" D.walk() -> iterator over files and subdirs, recursively.
The iterator yields path objects naming each child item of
this directory and its descendants. This requires that
D.isdir().
This performs a depth-first traversal of the directory tree.
Each directory is returned just before all its children.
"""
for child in self.listdir():
if pattern is None or child.fnmatch(pattern):
yield child
if child.isdir():
for item in child.walk(pattern):
yield item
def walkdirs(self, pattern=None):
""" D.walkdirs() -> iterator over subdirs, recursively.
With the optional 'pattern' argument, this yields only
directories whose names match the given pattern. For
example, mydir.walkdirs('*test') yields only directories
with names ending in 'test'.
"""
for child in self.dirs():
if pattern is None or child.fnmatch(pattern):
yield child
for subsubdir in child.walkdirs(pattern):
yield subsubdir
def walkfiles(self, pattern=None):
""" D.walkfiles() -> iterator over files in D, recursively.
The optional argument, pattern, limits the results to files
with names that match the pattern. For example,
mydir.walkfiles('*.tmp') yields only files with the .tmp
extension.
"""
for child in self.listdir():
if child.isfile():
if pattern is None or child.fnmatch(pattern):
yield child
elif child.isdir():
for pth in child.walkfiles(pattern):
yield pth
def fnmatch(self, pattern):
""" Return True if self.name matches the given pattern.
pattern - A filename pattern with wildcards,
for example '*.py'.
"""
return fnmatch.fnmatch(self.name, pattern)
def glob(self, pattern):
""" Return a list of path objects that match the pattern.
pattern - a path relative to this directory, with wildcards.
For example, Path('/users').glob('*/bin/*') returns a list
of all the files users have in their bin directories.
"""
return map(Path, glob.glob(toUnicode(self / pattern)))
def open(self, mode='r'):
""" Open this file. Return a file object. """
return file(self, mode)
def bytes(self):
""" Open this file, read all bytes, return them as a string. """
file_ = self.open('rb')
try:
return file_.read()
finally:
file_.close()
def write_bytes(self, bytes, append=False):
""" Open this file and write the given bytes to it.
Default behavior is to overwrite any existing file.
Call this with write_bytes(bytes, append=True) to append instead.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
file_ = self.open(mode)
try:
file_.write(bytes)
finally:
file_.close()
def text(self, encoding=None, errors='strict'):
""" Open this file, read it in, return the content as a string.
This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r'
are automatically translated to '\n'.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. If present, the content of the file is
decoded and returned as a unicode object; otherwise
it is returned as an 8-bit str.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'.
"""
if encoding is None:
file_ = self.open(_textmode)
try:
return file_.read()
finally:
file_.close()
else:
file_ = codecs.open(self, 'r', encoding, errors)
try:
data = file_.read()
finally:
file_.close()
return (data.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
def write_text(self, text, encoding=None,
errors='strict', linesep=os.linesep,
append=False):
""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the 'append=True' keyword argument.
There are two differences between path.write_text() and
path.write_bytes(): newline handling and Unicode handling.
See below.
Parameters:
- text - str/unicode - The text to be written.
- encoding - str - The Unicode encoding that will be used.
This is ignored if 'text' isn't a Unicode string.
- errors - str - How to handle Unicode encoding errors.
Default is 'strict'. See help(unicode.encode) for the
options. This is ignored if 'text' isn't a Unicode
string.
- linesep - keyword argument - str/unicode - The sequence of
characters to be used to mark end-of-line. The default is
os.linesep. You can also specify None; this means to
leave all newlines as they are in 'text'.
- append - keyword argument - bool - Specifies what to do if
the file already exists (True: append to the end of it;
False: overwrite it.) The default is False.
--- Newline handling.
write_text() converts all standard end-of-line sequences
('\n', '\r', and '\r\n') to your platform's default end-of-line
sequence (see os.linesep; on Windows, for example, the
end-of-line marker is '\r\n').
If you don't like your platform's default, you can override it
using the 'linesep=' keyword argument. If you specifically want
write_text() to preserve the newlines as-is, use 'linesep=None'.
This applies to Unicode text the same as to 8-bit text, except
there are three additional standard Unicode end-of-line sequences:
u'\x85', u'\r\x85', and u'\u2028'.
(This is slightly different from when you open a file for
writing with fopen(filename, "w") in C or file(filename, 'w')
in Python.)
--- Unicode
If 'text' isn't Unicode, then apart from newline handling, the
bytes are written verbatim to the file. The 'encoding' and
'errors' arguments are not used and must be omitted.
If 'text' is Unicode, it is first converted to bytes using the
specified 'encoding' (or the default encoding if 'encoding'
isn't specified). The 'errors' argument applies only to this
conversion.
"""
if isinstance(text, unicode):
if linesep is not None:
text = (text.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
text = text.replace(u'\n', linesep)
if encoding is None:
encoding = sys.getdefaultencoding()
bytes = text.encode(encoding, errors)
else:
assert encoding is None
if linesep is not None:
text = (text.replace('\r\n', '\n')
.replace('\r', '\n'))
bytes = text.replace('\n', linesep)
self.write_bytes(bytes, append)
def lines(self, encoding=None, errors='strict', retain=True):
""" Open this file, read all lines, return them in a list.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. The default is None, meaning the content
of the file is read as 8-bit characters and returned
as a list of (non-Unicode) str objects.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'
retain - If true, retain newline characters; but all newline
character combinations ('\r', '\n', '\r\n') are
translated to '\n'. If false, newline characters are
stripped off. Default is True.
This uses 'U' mode in Python 2.3 and later.
"""
if encoding is None and retain:
file_ = self.open(_textmode)
try:
return file_.readlines()
finally:
file_.close()
else:
return self.text(encoding, errors).splitlines(retain)
def write_lines(self, lines, encoding=None, errors='strict',
linesep=os.linesep, append=False):
""" Write the given lines of text to this file.
By default this overwrites any existing file at this path.
This puts a platform-specific newline sequence on every line.
See 'linesep' below.
lines - A list of strings.
encoding - A Unicode encoding to use. This applies only if
'lines' contains any Unicode strings.
errors - How to handle errors in Unicode encoding. This
also applies only to Unicode strings.
linesep - The desired line-ending. This line-ending is
applied to every line. If a line already has any
standard line ending ('\r', '\n', '\r\n', u'\x85',
u'\r\x85', u'\u2028'), that will be stripped off and
this will be used instead. The default is os.linesep,
which is platform-dependent ('\r\n' on Windows, '\n' on
Unix, etc.) Specify None to write the lines as-is,
like file.writelines().
Use the keyword argument append=True to append lines to the
file. The default is to overwrite the file. Warning:
When you use this with Unicode data, if the encoding of the
existing data in the file is different from the encoding
you specify with the encoding= parameter, the result is
mixed-encoding data, which can really confuse someone trying
to read the file later.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
file_ = self.open(mode)
try:
for line in lines:
isUnicode = isinstance(line, unicode)
if linesep is not None:
if isUnicode:
if line[-2:] in (u'\r\n', u'\x0d\x85'):
line = line[:-2]
elif line[-1:] in (u'\r', u'\n',
u'\x85', u'\u2028'):
line = line[:-1]
else:
if line[-2:] == '\r\n':
line = line[:-2]
elif line[-1:] in ('\r', '\n'):
line = line[:-1]
line += linesep
if isUnicode:
if encoding is None:
encoding = sys.getdefaultencoding()
line = line.encode(encoding, errors)
file_.write(line)
finally:
file_.close()
exists = os.path.exists
isabs = os.path.isabs
isdir = os.path.isdir
isfile = os.path.isfile
islink = os.path.islink
ismount = os.path.ismount
if hasattr(os.path, 'samefile'):
samefile = os.path.samefile
getatime = os.path.getatime
atime = property(
getatime, None, None,
""" Last access time of the file. """)
getmtime = os.path.getmtime
mtime = property(
getmtime, None, None,
""" Last-modified time of the file. """)
if hasattr(os.path, 'getctime'):
getctime = os.path.getctime
ctime = property(
getctime, None, None,
""" Creation time of the file. """)
getsize = os.path.getsize
size = property(
getsize, None, None,
""" Size of the file, in bytes. """)
if hasattr(os, 'access'):
def access(self, mode):
""" Return true if current user has access to this path.
mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK
"""
return os.access(self, mode)
def stat(self):
""" Perform a stat() system call on this path. """
return os.stat(self)
def lstat(self):
""" Like path.stat(), but do not follow symbolic links. """
return os.lstat(self)
if hasattr(os, 'statvfs'):
def statvfs(self):
""" Perform a statvfs() system call on this path. """
return os.statvfs(self)
if hasattr(os, 'pathconf'):
def pathconf(self, name):
"""Wraps os.pathconf"""
return os.pathconf(self, name)
def utime(self, times):
""" Set the access and modified times of this file. """
os.utime(self, times)
def chmod(self, mode):
"""Change the permissions of the file"""
os.chmod(self, mode)
def chdir(self):
"""Change the current working directory
to self"""
os.chdir(toUnicode(self))
if hasattr(os, 'chown'):
def chown(self, uid, gid):
"""Change the owner (uid) and owning group
(gid) of the file"""
os.chown(self, uid, gid)
def rename(self, new):
"""Rename the file.
Returns a new path object with the new name"""
os.rename(self, new)
return Path(new)
def renames(self, new):
"""Renames creating directories if necessary.
Returns a new path object with the new name"""
os.renames(self, new)
return Path(new)
def mkdir(self, mode=0777):
"""Make a new directory with
this pathname"""
os.mkdir(self, mode)
def makedirs(self, mode=0777):
"""Make directories with this pathname
will create multiple dirs as necessary"""
os.makedirs(self, mode)
def rmdir(self):
"""Remove the directory with this pathname"""
os.rmdir(self)
def removedirs(self):
"""Remove all the empty dirs mentioned in this pathname"""
os.removedirs(self)
def touch(self):
""" Set the access/modified times of this file to the current time.
Create the file if it does not exist.
"""
fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666)
os.close(fd)
os.utime(self, None)
def remove(self):
"""Delete this file"""
os.remove(self)
def unlink(self):
"""Unlink this symlink"""
os.unlink(self)
if hasattr(os, 'link'):
def link(self, newpath):
""" Create a hard link at 'newpath', pointing to this file. """
os.link(self, newpath)
if hasattr(os, 'symlink'):
def symlink(self, newlink):
""" Create a symbolic link at 'newlink', pointing here. """
os.symlink(self, newlink)
if hasattr(os, 'readlink'):
def readlink(self):
""" Return the path to which this symbolic link points.
The result may be an absolute or a relative path.
"""
return Path(os.readlink(self))
def readlinkabs(self):
""" Return the path to which this symbolic link points.
The result is always an absolute path.
"""
pth = self.readlink()
if pth.isabs():
return pth
else:
return (self.parent / pth).abspath()
def copyfile(self, dst):
"""Wraps shutil.copyfile"""
return shutil.copyfile(toUnicode(self), toUnicode(dst))
def copymode(self, dst):
"""Wraps shutil.copymode"""
return shutil.copymode(toUnicode(self), toUnicode(dst))
def copystat(self, dst):
"""Wraps shutil.copystat"""
return shutil.copystat(toUnicode(self), toUnicode(dst))
def copy(self, dst):
"""Wraps shutil.copy"""
return shutil.copy(toUnicode(self), toUnicode(dst))
def copy2(self, dst):
"""Wraps shutil.copy2"""
return shutil.copy2(toUnicode(self), toUnicode(dst))
def copytree(self, dst):
"""Wraps shutil.copytree"""
return shutil.copytree(toUnicode(self), toUnicode(dst))
if hasattr(shutil, 'move'):
def move(self, dst):
"""Wraps shutil.move"""
return shutil.move(toUnicode(self), toUnicode(dst))
def rmtree(self):
"""Wraps shutil.rmtree"""
return shutil.rmtree(toUnicode(self))
if hasattr(os, 'chroot'):
def chroot(self):
"""Change the root dir to this path name"""
os.chroot(self)
if hasattr(os, 'startfile'):
def startfile(self):
"""Run this file with the appropriate program"""
os.startfile(self)
def copyglob(self, globStr, destination):
"""
>>> x = Path('~')
>>> Path('~/tmp').mkdir()
>>> x.copyglob('.*', '~/tmp')
"""
for fn in self.glob(globStr):
fn.copy(destination)
def copylist(self, fnlist, destination):
"""
Given a sequence of relative file names ('fnlist')
copies them to 'destination'
"""
for fn in fnlist:
(self / fn).copy(destination)
def copyfiles(self, destination):
"""
Copies the content of files to 'destination' directory
"""
for fn in self.files():
fn.copy(destination)
def getMd5(self):
"""Returns an md5 hash for an object with read() method."""
try:
file_ = file(self, 'rb')
except:
raise Exception("Could not open %s" % self)
hasher = md5.new()
while True:
block = file_.read(8096)
if not block:
break
hasher.update(block)
file_.close()
return hasher.hexdigest()
md5 = property(getMd5)
class TempDirPath(Path):
"""
This object, when created gives you a Path object
pointing to a newly created temporary directory.
When this object goes out of scope, the directory
is obliterated.
You can call 'rmtree' yourself if you want before
dropping the object to make sure.
"""
def __new__(cls):
return Path.__new__(cls, mkdtemp())
def __del__(self):
"""Destroy the temporary directory"""
if self.exists():
self.rmtree()
def toUnicode(string, encoding='utf8'):
"""
Turns everything passed to it to unicode.
"""
if isinstance(string, str):
return unicode(string, encoding)
elif isinstance(string, unicode):
return unicode(string)
elif string is None:
return u''
else:
return unicode(str(string), encoding)
| [
"[email protected]"
] | |
964f0f6d8e473b5059787827e5796aee63a0f3bf | 5e6ee5d0f840bf031bab13df120987dc31491034 | /restfulapicrud/restfulapicrud/urls.py | 31852b1d2f0d2de7f58a9906e25779311f8ea86f | [] | no_license | cal1log/heroku_api_rest | 9027d3b24c704ed8f15c6badd7dd8fca62894cfa | a800706ac5e24a26ef7c68dc039b6bba0448fce3 | refs/heads/main | 2023-06-28T00:11:14.578647 | 2021-07-26T21:03:12 | 2021-07-26T21:03:12 | 388,584,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 832 | py | """restfulapicrud URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from .router import router
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include(router.urls))
]
| [
"[email protected]"
] | |
7d4e6bffda2aa0fba7162dcdddd61002522e4a74 | 5f408e5c43cf39a0ae6abf1c1309004f997cc7ee | /extras/benchmark/format_bench_results.py | 45065d4b7c14abb9180393e80434e2d0bf098081 | [
"Apache-2.0"
] | permissive | fran6co/fruit | 408e24385ded3c8d49e23f0c572e4ce4d7569622 | b34b703b4d50b49a658aeab227765bc3260d7d89 | refs/heads/master | 2021-01-19T10:34:32.890334 | 2017-05-09T08:42:15 | 2017-05-09T08:42:15 | 82,211,310 | 0 | 0 | null | 2017-02-16T18:14:59 | 2017-02-16T18:14:59 | null | UTF-8 | Python | false | false | 13,384 | py | #!/usr/bin/env python3
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import yaml
from collections import defaultdict
def extract_results(bench_results, fixed_benchmark_params, column_dimension, row_dimension, result_dimension):
table_data = defaultdict(lambda: dict())
remaining_dimensions_by_row_column = dict()
for bench_result in bench_results:
try:
params = {dimension_name: make_immutable(dimension_value)
for dimension_name, dimension_value in bench_result['benchmark'].items()}
results = bench_result['results']
for param_name, param_value in fixed_benchmark_params.items():
if params.get(param_name) != param_value:
# fixed_benchmark_params not satisfied by this result, skip
break
if result_dimension not in results:
# result_dimension not found in this result, skip
break
params.pop(param_name)
else:
# fixed_benchmark_params were satisfied by these params (and were removed)
assert row_dimension in params.keys(), '%s not in %s' % (row_dimension, params.keys())
assert column_dimension in params.keys(), '%s not in %s' % (column_dimension, params.keys())
assert result_dimension in results, '%s not in %s' % (result_dimension, results)
row_value = params[row_dimension]
column_value = params[column_dimension]
remaining_dimensions = params.copy()
remaining_dimensions.pop(row_dimension)
remaining_dimensions.pop(column_dimension)
if column_value in table_data[row_value]:
previous_remaining_dimensions = remaining_dimensions_by_row_column[(row_value, column_value)]
raise Exception(
'Found multiple benchmark results with the same fixed benchmark params, benchmark param for row and benchmark param for column, so a result can\'t be uniquely determined. '
+ 'Consider adding additional values in fixed_benchmark_params. Remaining dimensions: %s vs %s' % (
remaining_dimensions, previous_remaining_dimensions))
table_data[row_value][column_value] = results[result_dimension]
remaining_dimensions_by_row_column[(row_value, column_value)] = remaining_dimensions
except Exception as e:
raise Exception('While processing %s' % bench_result) from e
return table_data
def identity(x):
return x
# Takes a 2-dimensional array (list of lists) and prints a markdown table with that content.
def print_markdown_table(table_data):
max_content_length_by_column = [max([len(str(row[column_index])) for row in table_data])
for column_index in range(len(table_data[0]))]
for row_index in range(len(table_data)):
row = table_data[row_index]
cell_strings = []
for column_index in range(len(row)):
value = str(row[column_index])
# E.g. if max_content_length_by_column=20, table_cell_format='%20s'
table_cell_format = '%%%ss' % max_content_length_by_column[column_index]
cell_strings += [table_cell_format % value]
print('| ' + ' | '.join(cell_strings) + ' |')
if row_index == 0:
# Print the separator line, e.g. |---|-----|---|
print('|-'
+ '-|-'.join(['-' * max_content_length_by_column[column_index]
for column_index in range(len(row))])
+ '-|')
# Takes a table as a dict of dicts (where each table_data[row_key][column_key] is a confidence interval) and prints it as a markdown table using
# the specified pretty print functions for column keys, row keys and values respectively.
# column_header_pretty_printer and row_header_pretty_printer must be functions taking a single value and returning the pretty-printed version.
# value_pretty_printer must be a function taking (value_confidence_interval, min_in_table, max_in_table).
def print_confidence_intervals_table(table_name,
table_data,
column_header_pretty_printer=identity,
row_header_pretty_printer=identity,
value_pretty_printer=identity):
if table_data == {}:
print('%s: (no data)' % table_name)
return
row_headers = sorted(list(table_data.keys()))
# We need to compute the union of the headers of all rows; some rows might be missing values for certain columns.
column_headers = sorted(set().union(*[list(row_values.keys()) for row_values in table_data.values()]))
values_by_row = {row_header: [table_data[row_header][column_header]
for column_header in column_headers
if column_header in table_data[row_header]]
for row_header in row_headers}
# We compute min and max and pass it to the value pretty-printer, so that it can determine a unit that works well for all values in the table.
min_in_table = min([min([interval[0] for interval in values_by_row[row_header]])
for row_header in row_headers])
max_in_table = max([max([interval[1] for interval in values_by_row[row_header]])
for row_header in row_headers])
table_content = []
table_content += [[table_name] + [column_header_pretty_printer(column_header) for column_header in column_headers]]
for row_header in row_headers:
table_content += [[row_header_pretty_printer(row_header)]
+ [value_pretty_printer(table_data[row_header][column_header], min_in_table, max_in_table) if column_header in table_data[
row_header]
else 'N/A'
for column_header in column_headers]]
print_markdown_table(table_content)
def format_string_pretty_printer(format_string):
def pretty_print(s):
return format_string % s
return pretty_print
def interval_pretty_printer(interval, unit, multiplier):
interval = interval.copy()
interval[0] *= multiplier
interval[1] *= multiplier
# This prevents the format strings below from printing '.0' for numbers that already have 2 digits:
# 23.0 -> 23
# 2.0 -> 2.0 (here we don't remove the '.0' because printing just '2' might suggest a lower precision)
if int(interval[0]) == interval[0] and interval[0] >= 10:
interval[0] = int(interval[0])
else:
interval[0] = '%.3g' % interval[0]
if int(interval[1]) == interval[1] and interval[1] >= 10:
interval[1] = int(interval[1])
else:
interval[1] = '%.3g' % interval[1]
if interval[0] == interval[1]:
return '%s %s' % (interval[0], unit)
else:
return '%s-%s %s' % (interval[0], interval[1], unit)
# Finds the best unit to represent values in the range [min_value, max_value].
# The units must be specified as an ordered list [multiplier1, ..., multiplierN]
def find_best_unit(units, min_value, max_value):
assert min_value <= max_value
if max_value <= units[0]:
return units[0]
for i in range(len(units) - 1):
if min_value > units[i] and max_value < units[i + 1]:
return units[i]
if min_value > units[-1]:
return units[-1]
# There is no unit that works very well for all values, first let's try relaxing the min constraint
for i in range(len(units) - 1):
if min_value > units[i] * 0.2 and max_value < units[i + 1]:
return units[i]
if min_value > units[-1] * 0.2:
return units[-1]
# That didn't work either, just use a unit that works well for the min values then
for i in reversed(range(len(units))):
if min_value > units[i]:
return units[i]
assert min_value <= min(units)
# Pick the smallest unit
return units[0]
def time_interval_pretty_printer(time_interval, min_in_table, max_in_table):
sec = 1
milli = 0.001
micro = milli * milli
units = [micro, milli, sec]
unit_name_by_unit = {micro: 'us', milli: 'ms', sec: 's'}
unit = find_best_unit(units, min_in_table, max_in_table)
unit_name = unit_name_by_unit[unit]
return interval_pretty_printer(time_interval, unit=unit_name, multiplier=1 / unit)
def file_size_interval_pretty_printer(file_size_interval, min_in_table, max_in_table):
byte = 1
kb = 1024
mb = kb * kb
units = [byte, kb, mb]
unit_name_by_unit = {byte: 'bytes', kb: 'KB', mb: 'MB'}
unit = find_best_unit(units, min_in_table, max_in_table)
unit_name = unit_name_by_unit[unit]
return interval_pretty_printer(file_size_interval, unit=unit_name, multiplier=1 / unit)
def make_immutable(x):
if isinstance(x, list):
return tuple(make_immutable(elem) for elem in x)
return x
def dict_pretty_printer(dict_data):
if isinstance(dict_data, list):
dict_data = {make_immutable(mapping['from']): mapping['to'] for mapping in dict_data}
def pretty_print(s):
if s in dict_data:
return dict_data[s]
else:
raise Exception('dict_pretty_printer(%s) can\'t handle the value %s' % (dict_data, s))
return pretty_print
def determine_column_pretty_printer(pretty_printer_definition):
if 'format_string' in pretty_printer_definition:
return format_string_pretty_printer(pretty_printer_definition['format_string'])
if 'fixed_map' in pretty_printer_definition:
return dict_pretty_printer(pretty_printer_definition['fixed_map'])
raise Exception("Unrecognized pretty printer description: %s" % pretty_printer_definition)
def determine_row_pretty_printer(pretty_printer_definition):
return determine_column_pretty_printer(pretty_printer_definition)
def determine_value_pretty_printer(unit):
if unit == "seconds":
return time_interval_pretty_printer
if unit == "bytes":
return file_size_interval_pretty_printer
raise Exception("Unrecognized unit: %s" % unit)
def main():
parser = argparse.ArgumentParser(description='Runs all the benchmarks whose results are on the Fruit website.')
parser.add_argument('--benchmark-results',
help='The input file where benchmark results will be read from (1 per line, with each line in JSON format). You can use the run_benchmarks.py to run a benchmark and generate results in this format.')
parser.add_argument('--benchmark-tables-definition', help='The YAML file that defines the benchmark tables (e.g. fruit_wiki_bench_tables.yaml).')
args = parser.parse_args()
if args.benchmark_results is None:
raise Exception("You must specify a benchmark results file using --benchmark-results.")
if args.benchmark_tables_definition is None:
raise Exception("You must specify a benchmark tables definition file using --benchmark-tables-definition.")
with open(args.benchmark_results, 'r') as f:
bench_results = [json.loads(line) for line in f.readlines()]
with open(args.benchmark_tables_definition, 'r') as f:
for table_definition in yaml.load(f)["tables"]:
fixed_benchmark_params = {dimension_name: make_immutable(dimension_value) for dimension_name, dimension_value in table_definition['benchmark_filter'].items()}
table_data = extract_results(
bench_results,
fixed_benchmark_params=fixed_benchmark_params,
column_dimension=table_definition['columns']['dimension'],
row_dimension=table_definition['rows']['dimension'],
result_dimension=table_definition['results']['dimension'])
rows_pretty_printer_definition = table_definition['rows']['pretty_printer']
columns_pretty_printer_definition = table_definition['columns']['pretty_printer']
results_unit = table_definition['results']['unit']
print_confidence_intervals_table(table_definition['name'],
table_data,
column_header_pretty_printer=determine_column_pretty_printer(columns_pretty_printer_definition),
row_header_pretty_printer=determine_row_pretty_printer(rows_pretty_printer_definition),
value_pretty_printer=determine_value_pretty_printer(results_unit))
print()
print()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
cc93233f0b52c7d0783f65cc3716005438cc0fd3 | 164ffe077dde59373ad9fadcfd727f279a1cfe93 | /jni_build/jni/include/tensorflow/python/client/timeline.py | 03e07a27ad5cafa21dfe51e752b2a4f8cbfac7b7 | [] | no_license | Basofe/Community_Based_Repository_Traffic_Signs | 524a4cfc77dc6ed3b279556e4201ba63ee8cf6bd | a20da440a21ed5160baae4d283c5880b8ba8e83c | refs/heads/master | 2021-01-22T21:17:37.392145 | 2017-09-28T21:35:58 | 2017-09-28T21:35:58 | 85,407,197 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 24,412 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Timeline visualization for TensorFlow using Chrome Trace Format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import six # pylint: disable=unused-import
# The timeline target is usually imported as part of BUILD target
# "platform_test", which includes also includes the "platform"
# dependency. This is why the logging import here is okay.
from tensorflow.python.platform import tf_logging as logging
class AllocationMaximum(collections.namedtuple(
'AllocationMaximum', ('timestamp', 'num_bytes', 'tensors'))):
"""Stores the maximum allocation for a given allocator within the timelne.
Parameters:
timestamp: `tensorflow::Env::NowMicros()` when this maximum was reached.
num_bytes: the total memory used at this time.
tensors: the set of tensors allocated at this time.
"""
pass
class StepStatsAnalysis(collections.namedtuple(
'StepStatsAnalysis', ('chrome_trace', 'allocator_maximums'))):
"""Stores the step stats analysis output.
Parameters:
chrome_trace: A dict containing the chrome trace analysis.
allocator_maximums: A dict mapping allocator names to AllocationMaximum.
"""
pass
class _ChromeTraceFormatter(object):
"""A helper class for generating traces in Chrome Trace Format."""
def __init__(self, show_memory=False):
"""Constructs a new Chrome Trace formatter."""
self._show_memory = show_memory
self._events = []
self._metadata = []
def _create_event(self, ph, category, name, pid, tid, timestamp):
"""Creates a new Chrome Trace event.
For details of the file format, see:
https://github.com/catapult-project/catapult/blob/master/tracing/README.md
Args:
ph: The type of event - usually a single character.
category: The event category as a string.
name: The event name as a string.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
timestamp: The timestamp of this event as a long integer.
Returns:
A JSON compatible event object.
"""
event = {}
event['ph'] = ph
event['cat'] = category
event['name'] = name
event['pid'] = pid
event['tid'] = tid
event['ts'] = timestamp
return event
def emit_pid(self, name, pid):
"""Adds a process metadata event to the trace.
Args:
name: The process name as a string.
pid: Identifier of the process as an integer.
"""
event = {}
event['name'] = 'process_name'
event['ph'] = 'M'
event['pid'] = pid
event['args'] = {'name': name}
self._metadata.append(event)
def emit_tid(self, name, pid, tid):
"""Adds a thread metadata event to the trace.
Args:
name: The thread name as a string.
pid: Identifier of the process as an integer.
tid: Identifier of the thread as an integer.
"""
event = {}
event['name'] = 'thread_name'
event['ph'] = 'M'
event['pid'] = pid
event['tid'] = tid
event['args'] = {'name': name}
self._metadata.append(event)
def emit_region(self, timestamp, duration, pid, tid, category, name, args):
"""Adds a region event to the trace.
Args:
timestamp: The start timestamp of this region as a long integer.
duration: The duration of this region as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
category: The event category as a string.
name: The event name as a string.
args: A JSON-compatible dictionary of event arguments.
"""
event = self._create_event('X', category, name, pid, tid, timestamp)
event['dur'] = duration
event['args'] = args
self._events.append(event)
def emit_obj_create(self, category, name, timestamp, pid, tid, object_id):
"""Adds an object creation event to the trace.
Args:
category: The event category as a string.
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
object_id: Identifier of the object as an integer.
"""
event = self._create_event('N', category, name, pid, tid, timestamp)
event['id'] = object_id
self._events.append(event)
def emit_obj_delete(self, category, name, timestamp, pid, tid, object_id):
"""Adds an object deletion event to the trace.
Args:
category: The event category as a string.
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
object_id: Identifier of the object as an integer.
"""
event = self._create_event('D', category, name, pid, tid, timestamp)
event['id'] = object_id
self._events.append(event)
def emit_obj_snapshot(self, category, name, timestamp, pid, tid, object_id,
snapshot):
"""Adds an object snapshot event to the trace.
Args:
category: The event category as a string.
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
object_id: Identifier of the object as an integer.
snapshot: A JSON-compatible representation of the object.
"""
event = self._create_event('O', category, name, pid, tid, timestamp)
event['id'] = object_id
event['args'] = {'snapshot': snapshot}
self._events.append(event)
def emit_flow_start(self, name, timestamp, pid, tid, flow_id):
"""Adds a flow start event to the trace.
When matched with a flow end event (with the same 'flow_id') this will
cause the trace viewer to draw an arrow between the start and end events.
Args:
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
flow_id: Identifier of the flow as an integer.
"""
event = self._create_event('s', 'DataFlow', name, pid, tid, timestamp)
event['id'] = flow_id
self._events.append(event)
def emit_flow_end(self, name, timestamp, pid, tid, flow_id):
"""Adds a flow end event to the trace.
When matched with a flow start event (with the same 'flow_id') this will
cause the trace viewer to draw an arrow between the start and end events.
Args:
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
flow_id: Identifier of the flow as an integer.
"""
event = self._create_event('t', 'DataFlow', name, pid, tid, timestamp)
event['id'] = flow_id
self._events.append(event)
def emit_counter(self, category, name, pid, timestamp, counter, value):
"""Emits a record for a single counter.
Args:
category: The event category as a string.
name: The event name as a string.
pid: Identifier of the process generating this event as an integer.
timestamp: The timestamp of this event as a long integer.
counter: Name of the counter as a string.
value: Value of the counter as an integer.
"""
event = self._create_event('C', category, name, pid, 0, timestamp)
event['args'] = {counter: value}
self._events.append(event)
def emit_counters(self, category, name, pid, timestamp, counters):
"""Emits a counter record for the dictionary 'counters'.
Args:
category: The event category as a string.
name: The event name as a string.
pid: Identifier of the process generating this event as an integer.
timestamp: The timestamp of this event as a long integer.
counters: Dictionary of counter values.
"""
event = self._create_event('C', category, name, pid, 0, timestamp)
event['args'] = counters.copy()
self._events.append(event)
def format_to_string(self, pretty=False):
"""Formats the chrome trace to a string.
Args:
pretty: (Optional.) If True, produce human-readable JSON output.
Returns:
A JSON-formatted string in Chrome Trace format.
"""
trace = {}
trace['traceEvents'] = self._metadata + self._events
if pretty:
return json.dumps(trace, indent=4, separators=(',', ': '))
else:
return json.dumps(trace, separators=(',', ':'))
class _TensorTracker(object):
"""An internal class to track the lifetime of a Tensor."""
def __init__(self, name, object_id, timestamp, pid, allocator, num_bytes):
"""Creates an object to track tensor references.
This class is not thread safe and is intended only for internal use by
the 'Timeline' class in this file.
Args:
name: The name of the Tensor as a string.
object_id: Chrome Trace object identifier assigned for this Tensor.
timestamp: The creation timestamp of this event as a long integer.
pid: Process identifier of the assicaiated device, as an integer.
allocator: Name of the allocator used to create the Tensor.
num_bytes: Number of bytes allocated (long integer).
Returns:
A 'TensorTracker' object.
"""
self._name = name
self._pid = pid
self._object_id = object_id
self._create_time = timestamp
self._allocator = allocator
self._num_bytes = num_bytes
self._ref_times = []
self._unref_times = []
@property
def name(self):
"""Name of this tensor."""
return self._name
@property
def pid(self):
"""ID of the process which created this tensor (an integer)."""
return self._pid
@property
def create_time(self):
"""Timestamp when this tensor was created (long integer)."""
return self._create_time
@property
def object_id(self):
"""Returns the object identifier of this tensor (integer)."""
return self._object_id
@property
def num_bytes(self):
"""Size of this tensor in bytes (long integer)."""
return self._num_bytes
@property
def allocator(self):
"""Name of the allocator used to create this tensor (string)."""
return self._allocator
@property
def last_unref(self):
"""Last unreference timestamp of this tensor (long integer)."""
return max(self._unref_times)
def add_ref(self, timestamp):
"""Adds a reference to this tensor with the specified timestamp.
Args:
timestamp: Timestamp of object reference as an integer.
"""
self._ref_times.append(timestamp)
def add_unref(self, timestamp):
"""Adds an unref to this tensor with the specified timestamp.
Args:
timestamp: Timestamp of object unreference as an integer.
"""
self._unref_times.append(timestamp)
class Timeline(object):
"""A class for visualizing execution timelines of TensorFlow steps."""
def __init__(self, step_stats, graph=None):
"""Constructs a new Timeline.
A 'Timeline' is used for visualizing the execution of a TensorFlow
computation. It shows the timings and concurrency of execution at
the granularity of TensorFlow Ops.
This class is not thread safe.
Args:
step_stats: The 'StepStats' proto recording execution times.
graph: (Optional) The 'Graph' that was executed.
"""
self._step_stats = step_stats
self._graph = graph
self._chrome_trace = _ChromeTraceFormatter()
self._next_pid = 0
self._device_pids = {} # device name -> pid for compute activity.
self._tensor_pids = {} # device name -> pid for tensors.
self._tensors = {} # tensor_name -> TensorTracker
self._next_flow_id = 0
self._flow_starts = {} # tensor_name -> (timestamp, pid, tid)
self._alloc_times = {} # tensor_name -> ( time, allocator, size )
self._allocator_maximums = {} # allocator name => maximum bytes long
def _alloc_pid(self):
"""Allocate a process Id."""
pid = self._next_pid
self._next_pid += 1
return pid
def _alloc_flow_id(self):
"""Allocate a flow Id."""
flow_id = self._next_flow_id
self._next_flow_id += 1
return flow_id
def _parse_op_label(self, label):
"""Parses the fields in a node timeline label."""
nn, rest = label.split(' = ')
op, rest = rest.split('(')
if rest == ')':
inputs = []
else:
inputs = rest[:-1].split(', ')
return nn, op, inputs
def _assign_lanes(self):
"""Assigns non-overlapping lanes for the activities on each device."""
for device_stats in self._step_stats.dev_stats:
# TODO(pbar): Genuine thread IDs in NodeExecStats might be helpful.
lanes = [0]
for ns in device_stats.node_stats:
l = -1
for (i, lts) in enumerate(lanes):
if ns.all_start_micros > lts:
l = i
lanes[l] = ns.all_start_micros + ns.all_end_rel_micros
break
if l < 0:
l = len(lanes)
lanes.append(ns.all_start_micros + ns.all_end_rel_micros)
ns.thread_id = l
def _emit_op(self, nodestats, pid, is_gputrace):
"""Generates a Chrome Trace event to show Op execution.
Args:
nodestats: The 'NodeExecStats' proto recording op execution.
pid: The pid assigned for the device where this op ran.
is_gputrace: If True then this op came from the GPUTracer.
"""
node_name = nodestats.node_name
start = nodestats.all_start_micros
duration = nodestats.all_end_rel_micros
tid = nodestats.thread_id
if is_gputrace:
# Node names should always have the form 'name:op'.
fields = node_name.split(':') + ['unknown']
node_name, op = fields[:2]
inputs = []
else:
_, op, inputs = self._parse_op_label(nodestats.timeline_label)
args = {'name': node_name, 'op': op}
for i, iname in enumerate(inputs):
args['input%d' % i] = iname
self._chrome_trace.emit_region(start, duration, pid, tid, 'Op', op, args)
def _emit_tensor_snapshot(self, tensor, timestamp, pid, tid, value):
"""Generate Chrome Trace snapshot event for a computed Tensor.
Args:
tensor: A 'TensorTracker' object.
timestamp: The timestamp of this snapshot as a long integer.
pid: The pid assigned for showing the device where this op ran.
tid: The tid of the thread computing the tensor snapshot.
value: A JSON-compliant snapshot of the object.
"""
desc = str(value.tensor_description).replace('"', '')
snapshot = {'tensor_description': desc}
self._chrome_trace.emit_obj_snapshot('Tensor', tensor.name, timestamp, pid,
tid, tensor.object_id, snapshot)
def _produce_tensor(self, name, timestamp, tensors_pid, allocator, num_bytes):
object_id = len(self._tensors)
tensor = _TensorTracker(name, object_id, timestamp, tensors_pid, allocator,
num_bytes)
self._tensors[name] = tensor
return tensor
def _is_gputrace_device(self, device_name):
"""Returns true if this device is part of the GPUTracer logging."""
return '/stream:' in device_name or '/memcpy' in device_name
def _allocate_pids(self):
"""Allocate fake process ids for each device in the StepStats."""
self._allocators_pid = self._alloc_pid()
self._chrome_trace.emit_pid('Allocators', self._allocators_pid)
# Add processes in the Chrome trace to show compute and data activity.
for dev_stats in self._step_stats.dev_stats:
device_pid = self._alloc_pid()
self._device_pids[dev_stats.device] = device_pid
tensors_pid = self._alloc_pid()
self._tensor_pids[dev_stats.device] = tensors_pid
self._chrome_trace.emit_pid(dev_stats.device + ' Compute', device_pid)
self._chrome_trace.emit_pid(dev_stats.device + ' Tensors', tensors_pid)
def _analyze_tensors(self, show_memory):
"""Analyze tensor references to track dataflow."""
for dev_stats in self._step_stats.dev_stats:
device_pid = self._device_pids[dev_stats.device]
tensors_pid = self._tensor_pids[dev_stats.device]
for node_stats in dev_stats.node_stats:
tid = node_stats.thread_id
node_name = node_stats.node_name
start_time = node_stats.all_start_micros
end_time = node_stats.all_start_micros + node_stats.all_end_rel_micros
for index, output in enumerate(node_stats.output):
if index:
output_name = '%s:%d' % (node_name, index)
else:
output_name = node_name
allocation = output.tensor_description.allocation_description
num_bytes = allocation.requested_bytes
allocator_name = allocation.allocator_name
tensor = self._produce_tensor(output_name, start_time, tensors_pid,
allocator_name, num_bytes)
tensor.add_ref(start_time)
tensor.add_unref(end_time)
self._flow_starts[output_name] = (end_time, device_pid, tid)
if show_memory:
self._chrome_trace.emit_obj_create('Tensor', output_name,
start_time, tensors_pid, tid,
tensor.object_id)
self._emit_tensor_snapshot(tensor, end_time - 1, tensors_pid, tid,
output)
def _show_compute(self, show_dataflow):
"""Visualize the computation activity."""
for dev_stats in self._step_stats.dev_stats:
device_name = dev_stats.device
device_pid = self._device_pids[device_name]
is_gputrace = self._is_gputrace_device(device_name)
for node_stats in dev_stats.node_stats:
tid = node_stats.thread_id
start_time = node_stats.all_start_micros
end_time = node_stats.all_start_micros + node_stats.all_end_rel_micros
self._emit_op(node_stats, device_pid, is_gputrace)
if is_gputrace:
continue
_, _, inputs = self._parse_op_label(node_stats.timeline_label)
for input_name in inputs:
if input_name not in self._tensors:
# This can happen when partitioning has inserted a Send/Recv.
# We remove the numeric suffix so that the dataflow appears to
# come from the original node. Ideally, the StepStats would
# contain logging for the Send and Recv nodes.
index = input_name.rfind('/_')
if index > 0:
input_name = input_name[:index]
if input_name in self._tensors:
tensor = self._tensors[input_name]
tensor.add_ref(start_time)
tensor.add_unref(end_time - 1)
if show_dataflow:
# We use a different flow ID for every graph edge.
create_time, create_pid, create_tid = self._flow_starts[
input_name]
# Don't add flows when producer and consumer ops are on the same
# pid/tid since the horizontal arrows clutter the visualization.
if create_pid != device_pid or create_tid != tid:
flow_id = self._alloc_flow_id()
self._chrome_trace.emit_flow_start(input_name, create_time,
create_pid, create_tid,
flow_id)
self._chrome_trace.emit_flow_end(input_name, start_time,
device_pid, tid, flow_id)
else:
logging.vlog(1, 'Can\'t find tensor %s - removed by CSE?',
input_name)
def _show_memory_counters(self):
"""Produce a counter series for each memory allocator."""
# Iterate over all tensor trackers to build a list of allocations and
# frees for each allocator. Then sort the lists and emit a cumulative
# counter series for each allocator.
allocations = {}
for name in self._tensors:
tensor = self._tensors[name]
self._chrome_trace.emit_obj_delete('Tensor', name, tensor.last_unref,
tensor.pid, 0, tensor.object_id)
allocator = tensor.allocator
if allocator not in allocations:
allocations[allocator] = []
num_bytes = tensor.num_bytes
allocations[allocator].append((tensor.create_time, num_bytes, name))
allocations[allocator].append((tensor.last_unref, -num_bytes, name))
alloc_maxes = {}
# Generate a counter series showing total allocations for each allocator.
for allocator in allocations:
alloc_list = allocations[allocator]
alloc_list.sort()
total_bytes = 0
alloc_tensor_set = set()
alloc_maxes[allocator] = AllocationMaximum(
timestamp=0, num_bytes=0, tensors=set())
for time, num_bytes, name in alloc_list:
total_bytes += num_bytes
if num_bytes < 0:
alloc_tensor_set.discard(name)
else:
alloc_tensor_set.add(name)
if total_bytes > alloc_maxes[allocator].num_bytes:
alloc_maxes[allocator] = AllocationMaximum(
timestamp=time,
num_bytes=total_bytes,
tensors=copy.deepcopy(alloc_tensor_set))
self._chrome_trace.emit_counter('Memory', allocator,
self._allocators_pid, time, allocator,
total_bytes)
self._allocator_maximums = alloc_maxes
def analyze_step_stats(self, show_dataflow=True, show_memory=True):
self._allocate_pids()
self._assign_lanes()
self._analyze_tensors(show_memory)
self._show_compute(show_dataflow)
if show_memory:
self._show_memory_counters()
return StepStatsAnalysis(
chrome_trace=self._chrome_trace,
allocator_maximums=self._allocator_maximums)
def generate_chrome_trace_format(self, show_dataflow=True, show_memory=False):
"""Produces a trace in Chrome Trace Format.
Args:
show_dataflow: (Optional.) If True, add flow events to the trace
connecting producers and consumers of tensors.
show_memory: (Optional.) If True, add object snapshot events to the trace
showing the sizes and lifetimes of tensors.
Returns:
A JSON formatted string in Chrome Trace format.
"""
step_stats_analysis = self.analyze_step_stats(
show_dataflow=show_dataflow, show_memory=show_memory)
return step_stats_analysis.chrome_trace.format_to_string(pretty=True)
| [
"[email protected]"
] | |
8138e1872ba83e8b4e5232c1d3cc450e30f9a153 | 527cc44efaa5a2d738d638d76cf4737b37a0e27d | /fiepipe.py | 3302504191367767067a54043f89092e1dc52f4e | [
"MIT"
] | permissive | leith-bartrich/fiepipe | c98a978d81a24013a98bbae97c65ca053e9af481 | 2f48054a349059ec5919ff9402a02c03b27b5915 | refs/heads/master | 2021-04-06T13:52:51.391039 | 2019-06-25T21:01:39 | 2019-06-25T21:01:39 | 125,394,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 565 | py | #!/usr/local/bin/python
import fiepipelib.localplatform.routines.localplatform
import fiepipelib.localuser.routines.localuser
import fiepipedesktoplib.shells.fiepipe
def main():
# TODO register fie.us and populate the public key from somewhere authoritative.
platform = fiepipelib.localplatform.routines.localplatform.get_local_platform_routines()
localuser = fiepipelib.localuser.routines.localuser.LocalUserRoutines(platform)
shell = fiepipedesktoplib.shells.fiepipe.Shell(localuser)
shell.cmdloop()
if __name__ == "__main__":
main()
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.