filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_2988 | """Summary
"""
from PyQt5.QtWidgets import QGraphicsRectItem
from . import slicestyles as styles
from .sliceextras import PreXoverItemGroup, WEDGE_RECT
_RADIUS = styles.SLICE_HELIX_RADIUS
class PreXoverManager(QGraphicsRectItem):
"""Summary
Attributes:
active_group (TYPE): Description
active_neighbor_group (TYPE): Description
groups (dict): Description
neighbor_pairs (tuple): Description
neighbor_prexover_items (dict): Description
part_item (TYPE): Description
prexover_item_map (dict): Description
virtual_helix_item (cadnano.views.sliceview.virtualhelixitem.VirtualHelixItem): Description
"""
def __init__(self, part_item):
"""Summary
Args:
part_item (TYPE): Description
"""
super(PreXoverManager, self).__init__(part_item)
self.part_item = part_item
self.virtual_helix_item = None
self.active_group = None
self.active_neighbor_group = None
self.groups = {}
# dictionary of tuple of a
# (PreXoverItemGroup, PreXoverItemGroup, List[PreXoverItem])
# tracks connections between prexovers
self.prexover_item_map = {}
self.neighbor_prexover_items = {} # just a dictionary of neighbors
self.neighbor_pairs = () # accounting for neighbor pairing
self._active_items = []
# end def
def partItem(self):
"""Summary
Returns:
TYPE: Description
"""
return self.part_item
# end def
def clearPreXoverItemGroups(self):
"""Summary
Returns:
TYPE: Description
"""
groups = self.groups
while groups:
k, item = groups.popitem()
item.remove()
if self.active_group is not None:
self.active_group.remove()
self.active_group = None
self._active_items = []
self.prexover_item_map = {}
self.neighbor_prexover_items = {}
if self.virtual_helix_item is not None:
self.virtual_helix_item.setZValue(styles.ZSLICEHELIX)
# end def
def hideGroups(self):
"""Summary
Returns:
TYPE: Description
"""
self.clearPreXoverItemGroups()
if self.active_group is not None:
self.active_group.hide()
for group in self.groups.values():
group.hide()
self.virtual_helix_item = None
# end def
def activateVirtualHelix(self, virtual_helix_item, idx, per_neighbor_hits, pairs):
"""Create PreXoverItemGroups for the active virtual_helix_item and its
neighbors and connect the neighboring bases
Args:
virtual_helix_item (cadnano.views.sliceview.virtualhelixitem.VirtualHelixItem): Description
idx (int): the base index within the virtual helix
per_neighbor_hits (TYPE): Description
pairs (TYPE): Description
"""
self.clearPreXoverItemGroups()
pxis = self.prexover_item_map
neighbor_pxis_dict = self.neighbor_prexover_items # for avoiding duplicates)
self.neighbor_pairs = pairs
self.virtual_helix_item = virtual_helix_item
part_item = self.part_item
groups = self.groups
self.active_group = agroup = PreXoverItemGroup(_RADIUS, WEDGE_RECT,
virtual_helix_item, True)
id_num = virtual_helix_item.idNum()
virtual_helix_item.setZValue(styles.ZSLICEHELIX + 10)
fwd_st_type, rev_st_type = True, False # for clarity in the call to constructors
for neighbor_id, hits in per_neighbor_hits.items():
nvhi = part_item.idToVirtualHelixItem(neighbor_id)
ngroup = PreXoverItemGroup(_RADIUS, WEDGE_RECT, nvhi, False)
groups[neighbor_id] = ngroup
fwd_axis_hits, rev_axis_hits = hits
# n_step_size = nvhi.getProperty('bases_per_repeat')
for idx, fwd_idxs, rev_idxs in fwd_axis_hits:
neighbor_pxis = []
# print((id_num, fwd_st_type, idx))
pxis[(id_num, fwd_st_type, idx)] = (agroup.getItemIdx(fwd_st_type, idx),
ngroup,
neighbor_pxis
)
for j in fwd_idxs:
nkey = (neighbor_id, fwd_st_type, j)
npxi = neighbor_pxis_dict.get(nkey)
if npxi is None:
npxi = ngroup.getItemIdx(fwd_st_type, j)
neighbor_pxis_dict[nkey] = npxi
neighbor_pxis.append(npxi)
for j in rev_idxs:
nkey = (neighbor_id, rev_st_type, j)
npxi = neighbor_pxis_dict.get(nkey)
if npxi is None:
npxi = ngroup.getItemIdx(rev_st_type, j)
neighbor_pxis_dict[nkey] = npxi
neighbor_pxis.append(npxi)
for idx, fwd_idxs, rev_idxs in rev_axis_hits:
neighbor_pxis = []
# print((id_num, rev_st_type, idx))
pxis[(id_num, rev_st_type, idx)] = (agroup.getItemIdx(rev_st_type, idx),
ngroup,
neighbor_pxis
)
for j in fwd_idxs:
nkey = (neighbor_id, fwd_st_type, j)
npxi = neighbor_pxis_dict.get(nkey)
if npxi is None:
npxi = ngroup.getItemIdx(fwd_st_type, j)
neighbor_pxis_dict[nkey] = npxi
neighbor_pxis.append(npxi)
for j in rev_idxs:
nkey = (neighbor_id, rev_st_type, j)
npxi = neighbor_pxis_dict.get(nkey)
if npxi is None:
npxi = ngroup.getItemIdx(rev_st_type, j)
neighbor_pxis_dict[nkey] = npxi
neighbor_pxis.append(npxi)
# end for per_neighbor_hits
# end def
def activateNeighbors(self, id_num, is_fwd, idx):
"""Summary
Args:
id_num (int): VirtualHelix ID number. See `NucleicAcidPart` for description and related methods.
is_fwd (bool): True if fwd (top) strand, False if rev (bottom) strand
idx (int): the base index within the virtual helix
Returns:
TYPE: Description
Raises:
ValueError: Description
"""
# print("ACTIVATING neighbors", id_num, idx)
if self.active_group is None:
return
agroup = self.active_group
if id_num != agroup.id_num:
raise ValueError("not active id_num {} != {}".format(id_num, agroup.id_num))
active_items = self._active_items
item = self.prexover_item_map.get((id_num, is_fwd, idx))
if item is None:
apxi = agroup.getItemIdx(is_fwd, idx)
apxi.setActive5p(True) if is_fwd else apxi.setActive3p(True)
agroup.active_wedge_gizmo.pointToPreXoverItem(apxi)
active_items.append(apxi)
else:
apxi, npxig, neighbor_list = item
pairs = self.neighbor_pairs[0] if is_fwd else self.neighbor_pairs[1]
check_5prime = pairs.get(idx)
is_5prime_strand = None
if check_5prime is not None:
is_5prime_strand = check_5prime[0]
else:
if is_fwd and idx == 0:
is_5prime_strand = False
elif not is_5prime_strand and self.virtual_helix_item.getProperty('length') == idx + 1:
is_5prime_strand = False
else:
is_5prime_strand = True
agroup.active_wedge_gizmo.pointToPreXoverItem(apxi)
active_items.append(apxi)
self.active_neighbor_group = npxig
# print("Should have {} neighbors".format(len(neighbor_list)))
# color = neighbor_list[0].color if neighbor_list else '#aaaaa'
# angle = 0
for npxi in neighbor_list:
npxi.setActive3p(True, apxi) if is_5prime_strand else npxi.setActive5p(True, apxi)
active_items.append(npxi)
apxi.setActive5p(True, npxi) if is_5prime_strand else apxi.setActive3p(True, npxi)
# end def
def deactivateNeighbors(self):
"""Summary
Returns:
TYPE: Description
"""
while self._active_items:
npxi = self._active_items.pop()
npxi.setActive3p(False)
npxi.setActive5p(False)
if self.active_neighbor_group is None:
return
wg = self.active_neighbor_group.active_wedge_gizmo
if wg is not None:
wg.deactivate()
self.active_neighbor_group = None
# end def
# end class
|
the-stack_0_2989 | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
# file: main.py
from .sequiturpython.grammar import Grammar, Symbol
from .sequiturpython.symbol import RuleIndex, RULE_INDEX_STR
# Few constants for presentation logics
#RULE_INDEX_STR = "^%s"
SEQUENCE_KEY = "S"
ARROW = "→"
NEWLINE_REPLACEMENT = "↵"
SPACE_REPLACEMENT = "_"
TAB_REPLACEMENT = "↹"
class AlphabetsTransformer:
def __init__(self):
self.alphabets_encoder = [chr(num) for num in range(1000)]
self.alphabets_decoder = {key: idx for idx, key in enumerate(self.alphabets_encoder)}
def list_ids2alphabets(self, one_list):
return [self.alphabets_encoder[cur_ele] for cur_ele in one_list]
def list_alphabets2ids(self, one_list):
return [self.alphabets_decoder[cur_ele] for cur_ele in one_list]
class Rule(list):
""" Rule class keeps track of digrams on a list """
def __new__(cls, v=[]):
obj = list.__new__(cls, [v])
obj.c = 0 # set default counter value
obj.i = RuleIndex(0) # set default index value
return obj
def ind(self, i=None):
""" Set and get index """
if i is not None:
self.i = RuleIndex(i)
return self.i
def inc(self, n=1):
""" Increase counter """
self.c += n
return self.c
def dec(self, n=1):
""" Decrease counter """
self.c -= n
return self.c
def cnt(self):
""" Get counter """
return self.c
def replace(self, rule):
"""
Replace rule digram values by other rule digrams. This is not used for Sequencer2!
If self rule is: [[1, 2], [2, 3]] and 1 is replaced with rule given on argument: [['a', 'b'], ['b', 'c']]
would become: [['a', 'b'], ['b', 'c'], ['c', 2], [2, 3]]
"""
for ind, digram in enumerate(self):
# Digram has two values, potentially rule indexes
# both of them must be compared with the given rule index
for j, el in enumerate(digram):
ind += j # j = 0 or 1
if isinstance(el, RuleIndex) and el == rule.ind():
if ind > 0:
self[ind-1][1] = rule[0][0]
if ind < len(self):
self[ind][0] = rule[-1][1]
self[ind:ind] = rule[:]
class Sequencer(list):
""" Main class to use algorithm. This implements the digram based approach for the algo. """
def __init__(self, seq=[], utilize=True):
self.first = None
if seq:
for c in seq:
self.stream(c, utilize)
def utilize(self):
""" Remove redundant rules i.e. if rule is used only once on rules """
rules = self[1:]
for rule1 in rules:
# only rules with count = 1
if rule1 is None or rule1.cnt() != 1:
continue
for rule2 in rules:
# iterate over all rules except the excluded rule and None
if rule2 is None or rule2 is rule1:
continue
rule2.replace(rule1)
# free up the slot for the next reoccurring rule
self[rule1.ind()] = None
def find(self, digram):
""" Find given digram from main rule / sequence and rest of the rules """
for i, rule in enumerate(self):
if rule is None:
continue
# main rule
if i == 0:
j = rule.index(digram) if digram in rule[:-1] else None
if j is not None:
return 0, j, -1
# rules with one digram
elif len(rule) == 1:
if rule[0] == digram:
return i, 0, -1
# rules with multiple digrams
else:
j = rule.index(digram) if digram in rule else None
if j is not None:
return i, j, 1
return (-1, -1, -1)
def new_rule(self, rule):
""" New rule creator helper """
# get new index from empty slots if available
if None in self:
c = rule.ind(self.index(None))
self[c] = rule
# else get new index from total length of the sequence
else:
c = rule.ind(len(self))
self.append(rule)
return c
def stream(self, c, utilize=True):
""" Main sequence handler / algorithm """
# create first item, if not exists yet
if self.first is None:
self.first = c
r = [[None, c]]
self.append(Rule(r))
return
main = self[0]
util = False
# loop as many times as there are no more repeating digrams
while True:
# create a new digram from previous digram last item and coming item c
digram = [main[-1][1], c]
# search if main sequence of rest of the rules has the digram
ind, j, k = self.find(digram)
# rule is a list of digrams, the first digram is instantiated here
rule = Rule([digram])
# digram found from main rule
if ind == 0:
# increase potential previous rule index
if isinstance(c, RuleIndex):
self[c].inc()
# get a new item by rule creation
c = self.new_rule(rule)
# every new rule will get counter increased by two
self[c].inc(2)
# decrease counter of the replaced rules
if isinstance(main[j-1][1], RuleIndex):
self[main[j-1][1]].dec()
util = True
if isinstance(main[j+1][0], RuleIndex):
self[main[j+1][0]].dec()
util = True
# replace certain items with a new rule item: c
main[-1][1] = main[j+1][0] = main[j-1][1] = c
del main[j]
# break while loop
break
else:
# digram was not found from the main sequence, but is found from the other rules
if ind > 0:
# digram was found especially from longer rules, i.e. rules that are longer than one digram long
if k > 0:
# get a new item by rule creation
c = self.new_rule(rule)
# increase counter
rule.inc()
# change rule content by adding new index
if j < len(self[ind])-1:
self[ind][j+1][0] = c
if j-1 > -1:
self[ind][j-1][1] = c
# delete old rule digram
del self[ind][j]
else:
# create index for the next digram
c = RuleIndex(ind)
# remove last item from the main sequence
l = main.pop()
# if the rightmost value of the removed rule is a RuleIndex, decrease counter
if isinstance(l[1], RuleIndex):
self[l[1]].dec()
util = True
# digram was not found from the main sequence or from the rules
else:
# append new object to the main sequence
main.append(digram)
# if character is an index, increment counter
if isinstance(c, RuleIndex):
self[c].inc()
# break while loop
break
# if rule utility is on (as it is recommended by default), remove redundant rules
if utilize and util:
self.utilize()
def grammar_recursive(self, rule, recursive=False):
""" Grammar helper function """
if not isinstance(rule, list):
return str(rule)
s = ''
for i, r in enumerate(rule):
if isinstance(r, list):
if i == 0:
s += str(self.grammar_recursive(r, recursive))
elif isinstance(r[1], RuleIndex):
s += "%s" % (self.grammar_recursive(self[r[1]], recursive) if recursive else RULE_INDEX_STR % r[1])
else:
s += str(self.grammar_recursive(r[1], recursive))
elif isinstance(r, RuleIndex):
s += "%s" % (self.grammar_recursive(self[r], recursive) if recursive else RULE_INDEX_STR % r)
else:
s += str(r).replace("\r\n", NEWLINE_REPLACEMENT).\
replace("\n", NEWLINE_REPLACEMENT).\
replace("\r", "").\
replace("\t", TAB_REPLACEMENT).\
replace(" ", SPACE_REPLACEMENT)
return s
def grammar_sequence(self, join=False):
""" Retrieve the main sequence / rule from the sequencer """
x = [item[1] for item in self[0]]
return {SEQUENCE_KEY: self.grammar_recursive(x, False) if join else x}
def grammar_rules(self, join=False, recursive=False):
""" Retrieve rest of the rules from the sequencer """
return {x.ind(): self.grammar_recursive(x, recursive) if join else x for x in self[1:] if x}
def resolve(self, flatten=True):
"""
When sequencer has succesfully created rules from the given input,
resolve method can be used to decode compressed sequence back to the original input.
Flatten argument can be used to keep/unkeep hierarchic structure present on a returned list.
"""
def _recur(ind):
if isinstance(ind, RuleIndex) and self[ind] is not None:
b = []
l = len(self[ind])-1
for i, item in enumerate(self[ind]):
if item is None:
continue
if i == 0:
b.append(_recur(item[0]))
b.append(_recur(item[1]))
elif i == l:
b.append(_recur(item[1]))
else:
b.append(_recur(item[1]))
return b
else:
return ind
# start from main sequence / first rule
items = [_recur(item[1]) for item in self[0]]
# should we flatten the result?
return flatten_list(items) if flatten else items
def get(self):
""" Getter for sequence """
return list(self)
def __str__(self):
"""
String representation of the sequencer.
This merges only the first of the rules i.e. the main sequence
"""
return ''.join([(RULE_INDEX_STR % i) if isinstance(i, RuleIndex) else str(i) for i in [item[1] for item in self[0]]])
class Sequencer2(list):
""" Main class to use algorithm. This implements the array slice based approach for the algo. """
def __init__(self, seq=[], utilize=True):
self += [Rule([])]
if seq:
for c in seq:
self.stream(c, utilize)
def find(self, rule):
ind, x, j = (-1, -1, -1)
i = 0
for x in self:
if x:
j = self.digram_index(x, rule)
if j > -1:
x = len(x)
ind = i
break
i += 1
return (ind, x, j)
def digram_index(self, target, digram):
l = len(target)-1
# target list length smaller than 2
if l < 1:
return -1
# if target and digram are equal in length, we can compare them directly
if l == 1:
return 0 if target == digram else -1
i = 0
while i < l:
# find "digrams" from target list and match with passed digram argument
if target[i:i+2] == digram:
return i
i += 1
return -1
def stream(self, c, utilize = True):
""" Main sequence handler / algorithm """
s = self
main = s[0]
if len(main) < 2:
main.append(c)
else:
util = False
# loop as many times as there are no more repeating digrams
while True:
# create new digram
rule = Rule(main[-1:]+[c])
# find digram from main sequence or other rules
ind, x, j = self.find(rule)
# if main sequence has digram
if ind == 0:
# reuse temporarily disabled index?
if None in s:
i = rule.ind(s.index(None))
s[i] = rule
else:
# create new unique index
i = rule.ind(len(s))
s.append(rule)
# increment rule counter
s[i].inc()
# replace digram left item
main[j] = i
# remove digram right item
del main[j+1]
else:
# main sequence didnt have digram, how about other rules?
if ind > 0:
# digram is found from long rules
if x > 2:
c = rule.ind(len(s))
s.append(rule)
rule.inc()
# change rule content by adding new index
c1 = s[ind][j+2:]
del s[ind][j:]
s[ind] += [c] + c1
else:
# lets try to retrieve index from all rules for the next digram
c = RuleIndex(s.index(rule))
# remove last item from main sequence
l = main.pop()
# if removed object is an index, decrease count
if isinstance(l, RuleIndex) and s[l] is not None:
s[l].dec()
util = True
else:
# append new object to the main sequence
main.append(c)
# if character is an index, increment count
if isinstance(c, RuleIndex):
s[c].inc()
break
if utilize and util:
self.utilize()
def utilize(self):
# remove redundant rules i.e. if rule is used only once on right side of the rules list
for rule in self:
# only rules with count = 1
if rule is None or rule.cnt() != 1:
continue
self[rule.ind()] = None
for r in self:
# all rules except the excluded rule
if r is None or r is rule:
continue
ind = 0
l = len(r)
while ind < l:
if isinstance(r[ind], RuleIndex) and r[ind] == rule.ind():
c = r[ind+1:]
del r[ind:]
r += rule + c
ind += 1
def grammar_recursive(self, rule, recursive=False):
s = ''
for r in rule:
if isinstance(r, list):
s += str(self.grammar_recursive(r, recursive))
elif isinstance(r, RuleIndex):
s += "%s" % (self.grammar_recursive(self[r], recursive) if recursive else RULE_INDEX_STR % r)
else:
s += str(r).replace("\r\n", NEWLINE_REPLACEMENT).\
replace("\n", NEWLINE_REPLACEMENT).\
replace("\r", "").\
replace("\t", TAB_REPLACEMENT).\
replace(" ", SPACE_REPLACEMENT)
return s
def grammar_sequence(self, join=False):
""" Retrieve the main sequence / rule from the sequencer """
return {SEQUENCE_KEY: self.grammar_recursive(self[0]) if join else self[0]}
def grammar_rules(self, join=False, recursive=False):
""" Retrieve rest of the rules from the sequencer """
return {x.ind(): self.grammar_recursive(x, recursive) if join else x for x in self[1:] if x}
def resolve(self, flatten=True):
"""
When sequencer has succesfully created rules from the given input,
resolve method can be used to decode compressed sequence back to the original input.
Flatten argument can be used to keep/unkeep hierarchic structure present on a returned list.
"""
def _recur(i):
if not isinstance(i, RuleIndex):
return i
return [_recur(x) for x in self[i]]
# start from main sequence / first rule
items = [_recur(item) for item in self[0]]
# should we flatten the result?
return flatten_list(items) if flatten else items
def get(self):
""" Getter for sequence """
return list(self)
def __str__(self):
"""
String representation of the sequencer.
This merges only the first of the rules i.e. the main sequence
"""
return ''.join([(RULE_INDEX_STR % i) if isinstance(i, RuleIndex) else str(i) for i in self[0]])
class Sequencer3():
"""
Main class to use algorithm.
This implements Sequitur from the JavaScript version to Python based approach for the algo:
https://github.com/mspandit/sequitur-python
"""
def __init__(self, seq = None, utilize = True):
self.first = None
self.grammar_cache = None
self.g = Grammar()
self.production = self.g.root_production
if seq:
for c in seq:
self.stream(c, utilize)
def stream(self, c, utilize = True):
self.production.last().insert_after(Symbol.factory(self.g, c))
if self.first is None:
self.first = True
return
match = self.g.get_index(self.production.last().prev)
if not match:
self.g.add_index(self.production.last().prev)
elif match.next != self.production.last().prev:
self.production.last().prev.process_match(match)
def grammar_recursive(self, rule, recursive=False):
s = ''
for r in rule:
if isinstance(r, list):
s += str(self.grammar_recursive(r, recursive))
elif isinstance(r, RuleIndex):
s += "%s" % (self.grammar_recursive(self.get(True)[r], recursive) if recursive else RULE_INDEX_STR % r)
else:
s += str(r).replace("\r\n", NEWLINE_REPLACEMENT).\
replace("\n", NEWLINE_REPLACEMENT).\
replace("\r", "").\
replace("\t", TAB_REPLACEMENT).\
replace(" ", SPACE_REPLACEMENT)
return s
def grammar_sequence(self, join=False):
""" Retrieve the main sequence / rule from the sequencer """
x = self.get(False)[0]
return {SEQUENCE_KEY: self.grammar_recursive(x, False) if join else x}
def grammar_rules(self, join=False, recursive=False):
""" Retrieve rest of the rules from the sequencer """
rules = self.get(False)[1:]
return {(i+1): self.grammar_recursive(x, recursive) if join else x for i, x in enumerate(rules)}
def resolve(self, flatten=True):
"""
When sequencer has succesfully created rules from the given input,
resolve method can be used to decode compressed sequence back to the original input.
Flatten argument can be used to keep/unkeep hierarchic structure present on a returned list.
"""
def _recur(i):
if not isinstance(i, RuleIndex):
return i
return [_recur(x) for x in self.get()[i]]
# start from main sequence / first rule
items = [_recur(item) for item in self.get()[0]]
# should we flatten the result?
return flatten_list(items) if flatten else items
def get(self, cache=True):
if not self.grammar_cache or not cache:
self.grammar_cache = self.g.get_grammar()
return self.grammar_cache
def __str__(self):
"""
String representation of the sequencer.
This merges only the first of the rules i.e. the main sequence
"""
return ''.join([(RULE_INDEX_STR % i) if isinstance(i, RuleIndex) else str(i) for i in self.get(False)[0]])
def flatten_list(items):
""" List flattener helper function """
for i, x in enumerate(items):
while isinstance(items[i], list):
items[i:i+1] = items[i]
return items
def print_grammar(seguencer, join=True, recursive=False):
""" Nicely output grammar of the sequencer """
# main sequence only
for i, item in seguencer.grammar_sequence(join).items():
print ("%s%s" % ("%s " % i, ARROW), item)
# rules only
for i, item in seguencer.grammar_rules(join, recursive).items():
print ("%s%s" % ("%s " % i, ARROW), item)
|
the-stack_0_2992 | #!/usr/bin/env python
"""
Perform cleanup actions
"""
import time
import random
import threading
from Utils.Timers import timeFunction
from WMCore.WorkerThreads.BaseWorkerThread import BaseWorkerThread
from WMCore.Services.ReqMgr.ReqMgr import ReqMgr
from WMCore.DAOFactory import DAOFactory
class WorkQueueManagerCleaner(BaseWorkerThread):
"""
Cleans expired items, updates element status.
"""
def __init__(self, queue, config):
"""
Initialise class members
"""
BaseWorkerThread.__init__(self)
self.forbiddenStatus = ["aborted", "aborted-completed", "force-complete", "completed"]
self.queue = queue
self.config = config
self.reqmgr2Svc = ReqMgr(self.config.General.ReqMgr2ServiceURL)
myThread = threading.currentThread()
daoFactory = DAOFactory(package="WMCore.WMBS",
logger=myThread.logger,
dbinterface=myThread.dbi)
self.finishedWorflowCheck = daoFactory(classname="Subscriptions.CountFinishedSubscriptionsByWorkflow")
def setup(self, parameters):
"""
Called at startup - introduce random delay
to avoid workers all starting at once
"""
t = random.randrange(self.idleTime)
self.logger.info('Sleeping for %d seconds before 1st loop' % t)
time.sleep(t)
@timeFunction
def algorithm(self, parameters):
"""
Check & expire negotiation failures
"""
self.queue.logger.info("Start updating & cleaning...")
try:
self.queue.performQueueCleanupActions()
# this will clean up whatever left over from above clean up.
# also if the wq replication has problem it won't delay the killing jobs in condor
# and updating wmbs status
# state lists which shouldn't be populated in wmbs. (To prevent creating work before WQE status updated)
# added completed status in the list due to the race condition
requests = self.reqmgr2Svc.getRequestByStatusFromMemoryCache(self.forbiddenStatus).getData()
results = self.finishedWorflowCheck.execute(workflowNames=requests)
requestsToKill = [reqInfo["workflow"] for reqInfo in results if reqInfo["open"] > 0]
self.queue.logger.info("Killing %d requests in WMBS ...", len(requestsToKill))
self.queue.killWMBSWorkflows(requestsToKill)
except Exception as ex:
self.queue.logger.exception("Error cleaning queue: %s", str(ex))
self.queue.logger.info("Finished updating & cleaning.")
|
the-stack_0_2993 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
import copy
import logging
import os
import subprocess
import threading
import time
from .constants import PIPE, STDOUT, DEVNULL
from .exceptions import TimeoutExpired
logger = logging.getLogger(__name__)
class Popen:
'''
It wraps multiple subprocess.popen and provides I/F like subprocess.Popen.
'''
polling_interval = 0.1
'''
Parameters
----------
popen_args_list
The list of pipechildren.PopenArgs
stderr
Specify One of pipechildren.DEVNULL, pipechildren.STDOUT, or file-like object
'''
def __init__(self, popen_args_list, stdin=None, stdout=None, stderr=None, universal_newlines=None, encoding=None, errors=None, text=None, _debug_communicate_io=False):
self.text = universal_newlines or encoding or errors or text
self.encoding = encoding
self.popen_args_list = popen_args_list
self.processes = []
self.stdin = None
self.stdout = None
self.stderr = None
self.stderr_write_end = None
self.outs = None
self.errs = None
self.pids = []
self.returncodes = []
self._debug_communicate_io = _debug_communicate_io
self._communicate_called = False
self._workers = {
"stderr_drainer": None,
"close_stderr_write_end_worker": None,
"waiter": None,
"stdin_worker": None,
"stdout_worker": None,
"stderr_worker": None
}
self._stop_workers = False
'''
Call popen with each popen_args and connect stdout -> stdin
between subprocesses.
'''
# previous stdout goes into current stdin
prev_out = stdin
for i in range(len(self.popen_args_list)):
pa = self.popen_args_list[i]
if i == len(self.popen_args_list) - 1:
# Last
_stdout = stdout
else:
_stdout = subprocess.PIPE
_stderr = pa.stderr if pa.stderr else stderr
p = subprocess.Popen(stdout=_stdout,
stdin=prev_out,
stderr=_stderr,
text=self.text,
encoding=self.encoding,
**pa.popen_kwargs)
setattr(p, "name", pa.name)
logger.info(f"Popening({pa.fullname})")
if i > 0:
"""
piped stdout/stdin is connected between subprocesses and used in
forked sub-processes. We should release them not to prevent pipe close.
"""
self.processes[-1].stdout.close()
self.processes[-1].stdout = None
self.processes.append(p)
self.pids.append(p.pid)
prev_out = p.stdout
#self._start_pipe_closer()
if stdin is PIPE:
self.stdin = self.processes[0].stdin
else:
self.stdin = None
if stdout is PIPE:
self.stdout = self.processes[-1].stdout
else:
self.stdout = None
if stderr is PIPE:
logger.debug("stderr is PIPE")
if len(self.processes) == 1:
self.stderr = self.processes[0].stderr
else:
r, w = os.pipe()
if self.text:
self.stderr = os.fdopen(r, 'r')
self.stderr_write_end = os.fdopen(w, 'w')
else:
self.stderr = os.fdopen(r, 'rb')
self.stderr_write_end = os.fdopen(w, 'wb')
self._start_stderr_drainer()
else:
self.stderr = None
self.stderr_write_end = stderr
if stderr:
self._start_stderr_drainer()
@staticmethod
def _work_text_drainer(_self, name, reader, data_writer):
'''
Generic thread reader to read data from <reader> and write
data to callback data_writer(data).
NOTE: It is STATIC method.
Called like self._work_text_drainer(self)
data_writer() gets binary data as 1st argument and needs to return
False if writer is no longer avaialb.e
'''
logger.debug(f"_work_text_drainer {name} started")
while (not _self._stop_workers):
line = reader.readline()
if not line:
break
if _self._debug_communicate_io:
logger.debug(f"{name} -> {line}")
if not data_writer(line):
break
logger.debug(f"_work_text_drainer {name} finished.")
@staticmethod
def _work_binary_drainer(_self, name, reader, data_writer):
'''
Generic thread reader to read data from <reader> and write
data to callback data_writer(data).
NOTE: It is STATIC method.
Called like self._work_binary_drainer(self)
data_writer() gets binary data as 1st argument and need to return
False if writer is no longer avaialb.e
'''
logger.debug(f"_work_binary_drainer {name} started")
while (not _self._stop_workers):
data = reader.read(4096)
if not data:
break
if _self._debug_communicate_io:
logger.debug(f"{name} -> {data}")
if not data_writer(data):
logger.debug(f"{name} -> EOF")
break
logger.debug(f"_work_binary_drainer {name} finished.")
def _start_stderr_drainer(self):
'''
drain stderr from all sub-processes and gather to one piped stderr
'''
stderr_drainer = []
def stderr_write_end_writer(data):
if self.stderr_write_end.closed:
return False
else:
self.stderr_write_end.write(data)
return True
for p in self.processes:
name=f"{p.name}_stderr_drainer"
if self.text:
drainer = lambda: self._work_text_drainer(self,
name,
p.stderr,
stderr_write_end_writer)
else:
drainer = lambda: self._work_binary_drainer(self,
name,
p.stderr,
stderr_write_end_writer)
t = threading.Thread(name=name, target=drainer)
t.start()
stderr_drainer.append(t)
self._workers["stderr_drainer"] = stderr_drainer
if self.stderr:
# We need close worker otherwise reader cannot finish reading.
def work_close_stderr_write_end():
logger.debug(f"work_close_stderr_write_end started")
drainers = self._workers["stderr_drainer"]
while not self._stop_workers:
alive = False
for t in drainers:
if t.is_alive():
alive = True
break
if not alive:
break
logger.debug(f"work_close_stderr_write_end finished")
self.stderr_write_end.close()
close_stderr_write_end_worker = threading.Thread(
target=work_close_stderr_write_end,
name=name)
close_stderr_write_end_worker.start()
self._workers["close_stderr_write_end_worker"] = close_stderr_write_end_worker
def __enter__(self):
return self
def __exit__(self):
# To support "with pipechildren.Popen() as p:"
self.wait()
def poll(self):
'''
Check if child process has terminated. Set and return returncode list attribute. Otherwise, returns None.
Returns
----------
returncode
list of returncode of subprocesses.
'''
self.returncodes = [p.poll() for p in self.processes]
if None in self.returncodes:
return None
return self.returncodes
def wait(self, timeout=None):
'''
Wait for child processes to terminate. Set and return returncode attribute.
If the process does not terminate after timeout seconds,
raise a TimeoutExpired exception.
It is safe to catch this exception and retry the wait.
Returns
----------
returncodes
list of returncodes of subprocesses.
'''
logger.debug("wait started")
def work_wait(name, p, timeout):
logger.debug(f"waiter {name} started")
ret = None
try:
ret = p.wait(timeout=timeout)
except subprocess.TimeoutExpired:
logger.debug(f"waiter {name} timed out.")
else:
logger.debug(f"waiter {name} finished")
return ret
waiter = []
for p in self.processes:
name = f"{p.name}_waiter"
t = threading.Thread(
target=lambda: work_wait(name, p, timeout),
name=name)
t.start()
waiter.append(t)
self._workers["waiter"] = waiter
for t in waiter:
t.join()
self._workers["waiter"] = None
returncodes = self.poll()
if returncodes is None:
raise TimeoutExpired(self.popen_args_list, timeout, stdout=self.outs, stderr=self.errs)
logger.debug("wait finished")
return returncodes
def _time_left_sec(self, timeout_at):
if timeout_at:
time_left_sec = (timeout_at - datetime.now()).total_seconds()
if time_left_sec < 0:
return 0
else:
return time_left_sec
return None
def get_timeout_at(self, timeout):
return datetime.now() + timedelta(seconds=timeout)
def _start_communicate_pipes(self, input=input):
'''
Start threads below. It's called only once when communicate is called first time.
- Thread1: write <input> to stdin if stdin is PIPE and <input> is given.
- Thread2: read stdout to outs if stdout is PIPE
- Thread3: read stderr to errs if stderr is PIPE
'''
logger.debug("_start_communicate_pipes called")
def work_stdin(input=None):
'''
Thread worker to write <input> to stdin
'''
logger.debug("stdin_worker started")
start = 0
step = 4096
end = start + step
while not self._stop_workers and not self.stdin.closed:
if len(input) > end:
if self._debug_communicate_io:
logger.debug(f"->stdin {input[start:end]}")
self.stdin.write(input[start:end])
else:
if self._debug_communicate_io:
logger.debug(f"->stdin {input[start:]}")
self.stdin.write(input[start:])
break
start += step
end += step
self.stdin.close()
logger.debug("stdin_worker finished")
def add_to_outs_writer(data):
'''
Writer used by stdout drainer thread
'''
self.outs += data
return True
def add_to_errs_writer(data):
'''
Writer used by stderr drainer thread
'''
self.errs += data
return True
if input and self.stdin:
stdin_worker = threading.Thread(
target=lambda: work_stdin(input=input),
name="stdin_worker")
stdin_worker.start()
self._workers["stdin_worker"] = stdin_worker
elif self.stdin:
self.stdin.close()
if self.stdout:
if self.text:
self.outs = ''
drainer = lambda: self._work_text_drainer(self,
'stdout_drainer',
self.stdout,
add_to_outs_writer)
else:
self.outs = b''
drainer = lambda: self._work_binary_drainer(self,
'stdout_drainer',
self.stdout,
add_to_outs_writer)
stdout_worker = threading.Thread(
target=drainer,
name="stdout_worker")
stdout_worker.start()
self._workers["stdout_worker"] = stdout_worker
if self.stderr:
if self.text:
self.errs = ''
drainer = lambda: self._work_text_drainer(self,
'stderr_drainer',
self.stderr,
add_to_errs_writer)
else:
self.errs = b''
drainer = lambda: self._work_binary_drainer(self,
'stderr_drainer',
self.stderr,
add_to_errs_writer)
stderr_worker = threading.Thread(
target=drainer,
name="stderr_worker")
stderr_worker.start()
self._workers["stderr_worker"] = stderr_worker
def communicate(self, input=None, timeout=None):
'''
Send data to stdin. Read data from stdout and stderr, until end-of-file is reached.
Wait for process to terminate. The optional input argument should be data to be sent
to the upper stream child process, or None, if no data should be sent to the child.
If streams were opened in text mode, input must be a string. Otherwise, it must be bytes.
Returns
----------
stdout_data
stdout of down most process
stderr_data
stderr of whole process if pipechildren.PIPE is specified.
The data will be strings if streams were opened in text mode; otherwise, bytes.
'''
logger.debug("communicate called")
if len(self.processes) == 1:
# In this case, just call subprocess.communicate
self.outs, self.errs = self.processes[0].communicate(input=input, timeout=timeout)
return self.outs, self.errs
firsttime = True
if self._communicate_called:
firsttime = False
self._communicate_called = True
if firsttime:
self._start_communicate_pipes(input=input)
timeout_at = None
if timeout:
timeout_at = self.get_timeout_at(timeout)
self.wait(timeout=timeout)
# If self.wait() timedout, it raises to caller out of thie method.
# If we reach here, all processes have finished.
# Close stdin first then wait for the end of output workers.
if self.stdin:
self.stdin.close()
timedout = False
if self._workers["stdin_worker"]:
timeout_left = self._time_left_sec(timeout_at)
self._workers["stdin_worker"].join(timeout=timeout_left)
timedout = self._workers["stdin_worker"].is_alive()
if self._workers["stdout_worker"] and not timedout:
timeout_left = self._time_left_sec(timeout_at)
self._workers["stdout_worker"].join(timeout=timeout_left)
timedout = self._workers["stdout_worker"].is_alive()
if self._workers["stderr_worker"] and not timedout:
timeout_left = self._time_left_sec(timeout_at)
self._workers["stderr_worker"].join(timeout=timeout_left)
if not timedout:
timedout = self._workers["stderr_worker"].is_alive()
if timedout:
raise TimeoutExpired(self.popen_args_list, timeout, stdout=self.outs, stderr=self.errs)
# Guard all workers from running just in case.
self._stop_workers = True
# Close up pipes
if self.stdout:
self.stdout.close()
if self.stderr:
self.stderr.close()
for p in self.processes:
if p.stderr:
p.stderr.close()
return self.outs, self.errs
def kill(self, *args):
if args and isinstance(args[0], list):
for i in args[0]:
self.processes[i].kill()
else:
for p in self.processes:
p.kill()
def terminate(self, *args):
if args and isinstance(args[0], list):
for i in args[0]:
self.processes[i].terminate()
else:
for p in self.processes:
p.terminate()
def send_signal(self, signal, *args):
if args and isinstance(args[0], list):
for i in args[0]:
self.processes[i].send_signal(signal)
else:
for p in self.processes:
p.send_signal(signal)
|
the-stack_0_2995 | # Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pathlib
import tempfile
import unittest
from unittest.mock import patch
import click.testing
import cpo.config
import cpo.utils.file
import cpo.utils.operating_system
from cpo.cpo import cli
from cpo.lib.dependency_manager.plugins.ibm_cloud_terraform_provider_plugin import (
IBMCloudTerraformProviderPlugIn,
)
class TestDownloadDependencies(unittest.TestCase):
def add_os_specific_executable_extension(self, executable_name: str) -> str:
operating_system = cpo.utils.operating_system.get_operating_system()
if operating_system == cpo.utils.operating_system.OperatingSystem.WINDOWS:
executable_name += ".exe"
return executable_name
def check_executable_exists(self, bin_directory_path: pathlib.Path, executable_name: str):
self.assertTrue(
(pathlib.Path(bin_directory_path) / self.add_os_specific_executable_extension(executable_name)).exists()
)
@patch(
"cpo.config.binaries_manager.configuration_manager.get_home_directory_path",
return_value=pathlib.Path(tempfile.gettempdir()),
)
def test_command(self, test_mock):
"""Tests that cpo adm download-dependencies downloads
dependencies"""
bin_directory_path = cpo.config.configuration_manager.get_bin_directory_path()
terraform_plugins_directory_path = IBMCloudTerraformProviderPlugIn().get_terraform_plugins_directory_path()
for entry in bin_directory_path.glob("*"):
if entry.is_file():
os.remove(entry)
runner = click.testing.CliRunner()
result = runner.invoke(cli, ["adm", "download-dependencies"])
self.assertEqual(result.exit_code, 0)
self.assertGreaterEqual(
len(list(terraform_plugins_directory_path.glob("terraform-provider-ibm*"))),
1,
)
self.check_executable_exists(bin_directory_path, "ibmcloud")
self.check_executable_exists(bin_directory_path, "oc")
self.check_executable_exists(bin_directory_path, "terraform")
if __name__ == "__main__":
unittest.main()
|
the-stack_0_2996 | # -*- coding: utf-8 -*-
# Copyright (c) The python-semanticversion project
# This code is distributed under the two-clause BSD License.
from __future__ import unicode_literals
import functools
import re
from .compat import base_cmp
def _to_int(value):
try:
return int(value), True
except ValueError:
return value, False
def _has_leading_zero(value):
return (value
and value[0] == '0'
and value.isdigit()
and value != '0')
def identifier_cmp(a, b):
"""Compare two identifier (for pre-release/build components)."""
a_cmp, a_is_int = _to_int(a)
b_cmp, b_is_int = _to_int(b)
if a_is_int and b_is_int:
# Numeric identifiers are compared as integers
return base_cmp(a_cmp, b_cmp)
elif a_is_int:
# Numeric identifiers have lower precedence
return -1
elif b_is_int:
return 1
else:
# Non-numeric identifiers are compared by a natural comparison
# adapted from https://stackoverflow.com/questions/8408125/python-natural-comparison-between-strings
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return base_cmp(alphanum_key(a_cmp), alphanum_key(b_cmp))
def identifier_list_cmp(a, b):
"""Compare two identifier list (pre-release/build components).
The rule is:
- Identifiers are paired between lists
- They are compared from left to right
- If all first identifiers match, the longest list is greater.
>>> identifier_list_cmp(['1', '2'], ['1', '2'])
0
>>> identifier_list_cmp(['1', '2a'], ['1', '2b'])
-1
>>> identifier_list_cmp(['1'], ['1', '2'])
-1
"""
identifier_pairs = zip(a, b)
for id_a, id_b in identifier_pairs:
cmp_res = identifier_cmp(id_a, id_b)
if cmp_res != 0:
return cmp_res
# alpha1.3 < alpha1.3.1
return base_cmp(len(a), len(b))
class Version(object):
version_re = re.compile(r'^(\d+)\.(\d+)\.(\d+)(?:-([0-9a-zA-Z.-]+))?(?:\+([0-9a-zA-Z.-]+))?$')
partial_version_re = re.compile(r'^(\d+)(?:\.(\d+)(?:\.(\d+))?)?(?:-([0-9a-zA-Z.-]*))?(?:\+([0-9a-zA-Z.-]*))?$')
def __init__(self, version_string, partial=False):
major, minor, patch, prerelease, build = self.parse(version_string, partial)
self.major = major
self.minor = minor
self.patch = patch
self.prerelease = prerelease
self.build = build
self.partial = partial
@classmethod
def _coerce(cls, value, allow_none=False):
if value is None and allow_none:
return value
return int(value)
def next_major(self):
if self.prerelease and self.minor == 0 and self.patch == 0:
return Version('.'.join(str(x) for x in [self.major, self.minor, self.patch]))
else:
return Version('.'.join(str(x) for x in [self.major + 1, 0, 0]))
def next_minor(self):
if self.prerelease and self.patch == 0:
return Version('.'.join(str(x) for x in [self.major, self.minor, self.patch]))
else:
return Version(
'.'.join(str(x) for x in [self.major, self.minor + 1, 0]))
def next_patch(self):
if self.prerelease:
return Version('.'.join(str(x) for x in [self.major, self.minor, self.patch]))
else:
return Version(
'.'.join(str(x) for x in [self.major, self.minor, self.patch + 1]))
@classmethod
def coerce(cls, version_string, partial=False):
"""Coerce an arbitrary version string into a semver-compatible one.
The rule is:
- If not enough components, fill minor/patch with zeroes; unless
partial=True
- If more than 3 dot-separated components, extra components are "build"
data. If some "build" data already appeared, append it to the
extra components
Examples:
>>> Version.coerce('0.1')
Version(0, 1, 0)
>>> Version.coerce('0.1.2.3')
Version(0, 1, 2, (), ('3',))
>>> Version.coerce('0.1.2.3+4')
Version(0, 1, 2, (), ('3', '4'))
>>> Version.coerce('0.1+2-3+4_5')
Version(0, 1, 0, (), ('2-3', '4-5'))
"""
base_re = re.compile(r'^\d+(?:\.\d+(?:\.\d+)?)?')
match = base_re.match(version_string)
if not match:
raise ValueError(
"Version string lacks a numerical component: %r"
% version_string
)
version = version_string[:match.end()]
if not partial:
# We need a not-partial version.
while version.count('.') < 2:
version += '.0'
if match.end() == len(version_string):
return Version(version, partial=partial)
rest = version_string[match.end():]
# Cleanup the 'rest'
rest = re.sub(r'[^a-zA-Z0-9+.-]', '-', rest)
if rest[0] == '+':
# A 'build' component
prerelease = ''
build = rest[1:]
elif rest[0] == '.':
# An extra version component, probably 'build'
prerelease = ''
build = rest[1:]
elif rest[0] == '-':
rest = rest[1:]
if '+' in rest:
prerelease, build = rest.split('+', 1)
else:
prerelease, build = rest, ''
elif '+' in rest:
prerelease, build = rest.split('+', 1)
else:
prerelease, build = rest, ''
build = build.replace('+', '.')
if prerelease:
version = '%s-%s' % (version, prerelease)
if build:
version = '%s+%s' % (version, build)
return cls(version, partial=partial)
@classmethod
def parse(cls, version_string, partial=False, coerce=False):
"""Parse a version string into a Version() object.
Args:
version_string (str), the version string to parse
partial (bool), whether to accept incomplete input
coerce (bool), whether to try to map the passed in string into a
valid Version.
"""
if not version_string:
raise ValueError('Invalid empty version string: %r' % version_string)
if partial:
version_re = cls.partial_version_re
else:
version_re = cls.version_re
match = version_re.match(version_string)
if not match:
raise ValueError('Invalid version string: %r' % version_string)
major, minor, patch, prerelease, build = match.groups()
if _has_leading_zero(major):
raise ValueError("Invalid leading zero in major: %r" % version_string)
if _has_leading_zero(minor):
raise ValueError("Invalid leading zero in minor: %r" % version_string)
if _has_leading_zero(patch):
raise ValueError("Invalid leading zero in patch: %r" % version_string)
major = int(major)
minor = cls._coerce(minor, partial)
patch = cls._coerce(patch, partial)
if prerelease is None:
if partial and (build is None):
# No build info, strip here
return (major, minor, patch, None, None)
else:
prerelease = ()
elif prerelease == '':
prerelease = ()
else:
prerelease = tuple(prerelease.split('.'))
cls._validate_identifiers(prerelease, allow_leading_zeroes=False)
if build is None:
if partial:
build = None
else:
build = ()
elif build == '':
build = ()
else:
build = tuple(build.split('.'))
cls._validate_identifiers(build, allow_leading_zeroes=True)
return (major, minor, patch, prerelease, build)
@classmethod
def _validate_identifiers(cls, identifiers, allow_leading_zeroes=False):
for item in identifiers:
if not item:
raise ValueError(
"Invalid empty identifier %r in %r"
% (item, '.'.join(identifiers))
)
if item[0] == '0' and item.isdigit() and item != '0' and not allow_leading_zeroes:
raise ValueError("Invalid leading zero in identifier %r" % item)
def __iter__(self):
return iter((self.major, self.minor, self.patch, self.prerelease, self.build))
def __str__(self):
version = '%d' % self.major
if self.minor is not None:
version = '%s.%d' % (version, self.minor)
if self.patch is not None:
version = '%s.%d' % (version, self.patch)
if self.prerelease or (self.partial and self.prerelease == () and self.build is None):
version = '%s-%s' % (version, '.'.join(self.prerelease))
if self.build or (self.partial and self.build == ()):
version = '%s+%s' % (version, '.'.join(self.build))
return version
def __repr__(self):
return '%s(%r%s)' % (
self.__class__.__name__,
str(self),
', partial=True' if self.partial else '',
)
@classmethod
def _comparison_functions(cls, partial=False):
"""Retrieve comparison methods to apply on version components.
This is a private API.
Args:
partial (bool): whether to provide 'partial' or 'strict' matching.
Returns:
5-tuple of cmp-like functions.
"""
def prerelease_cmp(a, b):
"""Compare prerelease components.
Special rule: a version without prerelease component has higher
precedence than one with a prerelease component.
"""
if a and b:
return identifier_list_cmp(a, b)
elif a:
# Versions with prerelease field have lower precedence
return -1
elif b:
return 1
else:
return 0
def build_cmp(a, b):
"""Compare build metadata.
Special rule: there is no ordering on build metadata.
"""
if a == b:
return 0
else:
return NotImplemented
def make_optional(orig_cmp_fun):
"""Convert a cmp-like function to consider 'None == *'."""
@functools.wraps(orig_cmp_fun)
def alt_cmp_fun(a, b):
if a is None or b is None:
return 0
return orig_cmp_fun(a, b)
return alt_cmp_fun
if partial:
return [
base_cmp, # Major is still mandatory
make_optional(base_cmp),
make_optional(base_cmp),
make_optional(prerelease_cmp),
make_optional(build_cmp),
]
else:
return [
base_cmp,
base_cmp,
base_cmp,
prerelease_cmp,
build_cmp,
]
def __compare(self, other):
comparison_functions = self._comparison_functions(partial=self.partial or other.partial)
comparisons = zip(comparison_functions, self, other)
for cmp_fun, self_field, other_field in comparisons:
cmp_res = cmp_fun(self_field, other_field)
if cmp_res != 0:
return cmp_res
return 0
def __hash__(self):
return hash((self.major, self.minor, self.patch, self.prerelease, self.build))
def __cmp__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.__compare(other)
def __compare_helper(self, other, condition, notimpl_target):
"""Helper for comparison.
Allows the caller to provide:
- The condition
- The return value if the comparison is meaningless (ie versions with
build metadata).
"""
if not isinstance(other, self.__class__):
return NotImplemented
cmp_res = self.__cmp__(other)
if cmp_res is NotImplemented:
return notimpl_target
return condition(cmp_res)
def __eq__(self, other):
return self.__compare_helper(other, lambda x: x == 0, notimpl_target=False)
def __ne__(self, other):
return self.__compare_helper(other, lambda x: x != 0, notimpl_target=True)
def __lt__(self, other):
return self.__compare_helper(other, lambda x: x < 0, notimpl_target=False)
def __le__(self, other):
return self.__compare_helper(other, lambda x: x <= 0, notimpl_target=False)
def __gt__(self, other):
return self.__compare_helper(other, lambda x: x > 0, notimpl_target=False)
def __ge__(self, other):
return self.__compare_helper(other, lambda x: x >= 0, notimpl_target=False)
class SpecItem(object):
"""A requirement specification."""
KIND_ANY = '*'
KIND_LT = '<'
KIND_LTE = '<='
KIND_EQUAL = '=='
KIND_SHORTEQ = '='
KIND_EMPTY = ''
KIND_GTE = '>='
KIND_GT = '>'
KIND_NEQ = '!='
KIND_CARET = '^'
KIND_TILDE = '~'
KIND_COMPATIBLE = '~='
# Map a kind alias to its full version
KIND_ALIASES = {
KIND_SHORTEQ: KIND_EQUAL,
KIND_EMPTY: KIND_EQUAL,
}
re_spec = re.compile(r'^(<|<=||=|==|>=|>|!=|\^|~|~=)(\d.*)$')
def __init__(self, requirement_string):
kind, spec = self.parse(requirement_string)
self.kind = kind
self.spec = spec
@classmethod
def parse(cls, requirement_string):
if not requirement_string:
raise ValueError("Invalid empty requirement specification: %r" % requirement_string)
# Special case: the 'any' version spec.
if requirement_string == '*':
return (cls.KIND_ANY, '')
match = cls.re_spec.match(requirement_string)
if not match:
raise ValueError("Invalid requirement specification: %r" % requirement_string)
kind, version = match.groups()
if kind in cls.KIND_ALIASES:
kind = cls.KIND_ALIASES[kind]
spec = Version(version, partial=True)
if spec.build is not None and kind not in (cls.KIND_EQUAL, cls.KIND_NEQ):
raise ValueError(
"Invalid requirement specification %r: build numbers have no ordering."
% requirement_string
)
return (kind, spec)
def match(self, version):
if self.kind == self.KIND_ANY:
return True
elif self.kind == self.KIND_LT:
return version < self.spec
elif self.kind == self.KIND_LTE:
return version <= self.spec
elif self.kind == self.KIND_EQUAL:
return version == self.spec
elif self.kind == self.KIND_GTE:
return version >= self.spec
elif self.kind == self.KIND_GT:
return version > self.spec
elif self.kind == self.KIND_NEQ:
return version != self.spec
elif self.kind == self.KIND_CARET:
if self.spec.major != 0:
upper = self.spec.next_major()
elif self.spec.minor != 0:
upper = self.spec.next_minor()
else:
upper = self.spec.next_patch()
return self.spec <= version < upper
elif self.kind == self.KIND_TILDE:
return self.spec <= version < self.spec.next_minor()
elif self.kind == self.KIND_COMPATIBLE:
if self.spec.patch is not None:
upper = self.spec.next_minor()
else:
upper = self.spec.next_major()
return self.spec <= version < upper
else: # pragma: no cover
raise ValueError('Unexpected match kind: %r' % self.kind)
def __str__(self):
return '%s%s' % (self.kind, self.spec)
def __repr__(self):
return '<SpecItem: %s %r>' % (self.kind, self.spec)
def __eq__(self, other):
if not isinstance(other, SpecItem):
return NotImplemented
return self.kind == other.kind and self.spec == other.spec
def __hash__(self):
return hash((self.kind, self.spec))
class Spec(object):
def __init__(self, *specs_strings):
subspecs = [self.parse(spec) for spec in specs_strings]
self.specs = sum(subspecs, ())
@classmethod
def parse(self, specs_string):
spec_texts = specs_string.split(',')
return tuple(SpecItem(spec_text) for spec_text in spec_texts)
def match(self, version):
"""Check whether a Version satisfies the Spec."""
return all(spec.match(version) for spec in self.specs)
def filter(self, versions):
"""Filter an iterable of versions satisfying the Spec."""
for version in versions:
if self.match(version):
yield version
def select(self, versions):
"""Select the best compatible version among an iterable of options."""
options = list(self.filter(versions))
if options:
return max(options)
return None
def __contains__(self, version):
if isinstance(version, Version):
return self.match(version)
return False
def __iter__(self):
return iter(self.specs)
def __str__(self):
return ','.join(str(spec) for spec in self.specs)
def __repr__(self):
return '<Spec: %r>' % (self.specs,)
def __eq__(self, other):
if not isinstance(other, Spec):
return NotImplemented
return set(self.specs) == set(other.specs)
def __hash__(self):
return hash(self.specs)
def compare(v1, v2):
return base_cmp(Version(v1), Version(v2))
def match(spec, version):
return Spec(spec).match(Version(version))
def validate(version_string):
"""Validates a version string againt the SemVer specification."""
try:
Version.parse(version_string)
return True
except ValueError:
return False
|
the-stack_0_2998 | """Fully connected layer."""
import numpy as np
import theano
import theano.tensor as T
from athenet.layers import WeightedLayer
class FullyConnectedLayer(WeightedLayer):
"""Fully connected layer."""
def __init__(self, n_out, n_in=None, input_layer_name=None, name='fc'):
"""Create fully connected layer.
:param integer n_out: Number of output neurons.
:param integer n_in: Number of input neurons.
"""
super(FullyConnectedLayer, self).__init__(input_layer_name, name)
self._n_in = None
self.W_shared = None
self.n_out = n_out
self.n_in = n_in
@property
def n_in(self):
"""Number of input neurons."""
return self._n_in
@n_in.setter
def n_in(self, value):
if not value or self._n_in == value:
return
self._n_in = value
W_value = np.asarray(
np.random.normal(
loc=0.,
scale=np.sqrt(1. / self.n_out),
size=(self.n_in, self.n_out)
),
dtype=theano.config.floatX
)
self.W_shared = theano.shared(W_value, borrow=True)
b_value = np.zeros((self.n_out,), dtype=theano.config.floatX)
self.b_shared = theano.shared(b_value, borrow=True)
@property
def input_shape(self):
return self.n_in
@input_shape.setter
def input_shape(self, value):
self.n_in = np.prod(value)
@property
def output_shape(self):
return self.n_out
def _reshape_input(self, raw_layer_input):
"""Return input in the correct format for fully connected layer.
:param raw_layer_input: Input in the format (n_batches, n_in) or
compatible.
:type raw_layer_input: pair of integers
"""
return raw_layer_input.flatten(2)
def _get_output(self, layer_input):
"""Return layer's output.
:param layer_input: Input in the format (n_batches, n_in).
:return: Layer output.
"""
return T.dot(self.input, self.W_shared) + self.b_shared
|
the-stack_0_2999 | #!/usr/bin/env python3
"""
TODO
USAGE:
yb_create_log_query_history.py [options]
PURPOSE:
Build/update long term history db table/views sourced from the sys.log_query view.
OPTIONS:
See the command line help message for all options.
(yb_create_log_query_history.py --help)
Output:
Action taken, like:
--created log_query_history table, log_query_history_text table and log_query_history_v view
--inserted X queries into log_query_history and log_query_history_text
"""
import getpass, re
from yb_common import Common, DBConnect, Text, Util
class create_log_query_history(Util):
"""Build/update long term history db table/views sourced from the sys.log_query view.
"""
config = {
'description': (
'Build/update long term history db table/views sourced from the sys.log_query view.'
'\n'
'\nnote:'
'\n On the first execution the create_log_query_history will;'
'\n 1. request super user credentials to create supporting stored procs.'
'\n 2. create the history query table, query_text table and query view.'
'\n Every run inserts new log queries into the history query and query_text tables.')
, 'optional_args_single': []
, 'usage_example': {
'cmd_line_args': """@$HOME/conn.args --log_table_name user_log_query_hist --where_clause "username NOT LIKE 'sys_ybd_%'" """
, 'file_args': [Util.conn_args_file] } }
def additional_args(self):
log_query_hist_grp = self.args_handler.args_parser.add_argument_group(
'log query history arguments')
log_query_hist_grp.add_argument("--log_table_name", default="log_query_history"
, help="the object name prefix used for the 2 log tables and view, defaults to 'log_query_history'")
log_query_hist_grp.add_argument("--where_clause", default="TRUE"
, help=("where clause applied to sys.log_query to limit the queries for which history is maintained,"
" defaults to 'TRUE' meaning all queries") )
def complete_db_conn(self):
if self.db_conn.ybdb['is_super_user']:
self.args_handler.args_parser.error("dbuser '%s' must not ba a db super user..." % self.db_conn.ybdb['user'])
return
def create_log_query_history(self):
result = self.db_conn.ybsql_query("""
SELECT create_log_query_history_p(
'{log_table_name}'
, $${where_clause}$$);""".format(
log_table_name=Common.quote_object_paths(self.args_handler.args.log_table_name)
, where_clause=self.args_handler.args.where_clause) )
return(result)
def create_su_db_conn(self):
su_env = self.db_conn.env.copy()
su_env['conn_db'] = self.db_conn.database
su_env['dbuser'] = input("Enter the super user name to create required stored procs with: ")
prompt = ("Enter the password for cluster %s, user %s: "
% (Text.color(su_env['host'], fg='cyan')
, Text.color(su_env['dbuser'], fg='cyan')))
su_env['pwd'] = getpass.getpass(prompt)
DBConnect.set_env(su_env)
self.su_db_conn = DBConnect(env=su_env, conn_type='su')
DBConnect.set_env(self.db_conn.env_pre)
if not self.su_db_conn.ybdb['is_super_user']:
Common.error("dbuser '%s' is not a super user..." % su_env['dbuser'])
def create_stored_procs(self):
filename = '%s/sql/log_query_history/materialize_sys_log_query_p.sql' % Common.util_dir_path
sql = open(filename).read()
sql = ("""SET SCHEMA '%s';
%s;
GRANT EXECUTE ON PROCEDURE materialize_sys_log_query_p(VARCHAR, VARCHAR, VARCHAR, BOOLEAN) TO %s;"""
% (self.db_conn.schema, sql, self.db_conn.env['dbuser']) )
result = self.su_db_conn.ybsql_query(sql)
result.on_error_exit()
filename = '%s/sql/log_query_history/create_log_query_history_p.sql' % Common.util_dir_path
sql = open(filename).read()
result = self.db_conn.ybsql_query(sql)
result.on_error_exit()
def fix_stored_proc_stdout(self, result):
"""stored procs print everything to stderr. This routine moves all stderr
lines starting with 'INFO: --' to stdout."""
matches = re.finditer(r"^(INFO:\s*)?(--.*)$", result.stderr, re.MULTILINE)
stdout = ''
stderr = ''
for matchNum, match in enumerate(matches, start=1):
if (match.group(1)):
stdout = ('%s\n%s' % (stdout, match.group(2))) if len(stdout) else match.group(2)
else:
stderr = ('%s\n%s' % (stderr, match.group(2))) if len(stderr) else match.group(2)
result.proc_return = result.stdout
result.stdout = stdout if len(stdout.strip()) else ''
result.stderr = stderr if len(stderr.strip()) else ''
def execute(self):
self.complete_db_conn()
result = self.create_log_query_history()
if re.search(r"create_log_query_history_p.*does not exist", result.stderr):
self.create_su_db_conn()
self.create_stored_procs()
result = self.create_log_query_history()
self.fix_stored_proc_stdout(result)
result.on_error_exit()
result.write()
exit(result.exit_code)
def main():
clqh = create_log_query_history()
clqh.execute()
if __name__ == "__main__":
main() |
the-stack_0_3000 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
"""RAMSES RF - a RAMSES-II protocol decoder & analyser."""
import logging
from inspect import getmembers, isclass
from sys import modules
from typing import List
from .const import Discover, __dev_mode__
from .protocol import I_, RP, RQ, W_ # noqa: F401, isort: skip
from .protocol import ( # noqa: F401, isort: skip
_0001,
_0002,
_0004,
_0005,
_0006,
_0008,
_0009,
_000A,
_000C,
_000E,
_0016,
_0100,
_0150,
_01D0,
_01E9,
_0404,
_0418,
_042F,
_0B04,
_1030,
_1060,
_1081,
_1090,
_1098,
_10A0,
_10B0,
_10E0,
_10E1,
_1100,
_1260,
_1280,
_1290,
_1298,
_12A0,
_12B0,
_12C0,
_12C8,
_12F0,
_1300,
_1F09,
_1F41,
_1FC9,
_1FD0,
_1FD4,
_2249,
_22C9,
_22D0,
_22D9,
_22F1,
_22F3,
_2309,
_2349,
_2389,
_2400,
_2401,
_2410,
_2420,
_2D49,
_2E04,
_30C9,
_3120,
_313F,
_3150,
_31D9,
_31DA,
_31E0,
_3200,
_3210,
_3220,
_3221,
_3223,
_3B00,
_3EF0,
_3EF1,
_PUZZ,
)
DEFAULT_BDR_ID = "13:000730"
DEFAULT_EXT_ID = "17:000730"
DEFAULT_THM_ID = "03:000730"
_QOS_TX_LIMIT = 12
DEV_MODE = __dev_mode__ and False
_LOGGER = logging.getLogger(__name__)
if DEV_MODE:
_LOGGER.setLevel(logging.DEBUG)
def class_by_attr(name: str, attr: str) -> dict:
"""Return a mapping of a (unique) attr of classes in a module to that class.
For example:
{"OTB": OtbGateway, "CTL": Controller}
{"RAD": RadZone, "UFH": UfhZone}
{"evohome": Evohome}
"""
return {
getattr(c[1], attr): c[1]
for c in getmembers(
modules[name],
lambda m: isclass(m) and m.__module__ == name and hasattr(m, attr),
)
}
def discover_decorator(fnc):
# NOTE: only need to Wrap top-level entities
def wrapper(self, discover_flag=Discover.ALL) -> None:
if self._gwy.config.disable_discovery:
return
if not discover_flag:
return
return fnc(self, discover_flag=discover_flag)
return wrapper
class Entity:
"""The Device/Zone base class.
This class is mainly concerned with the entity's state database.
"""
def __init__(self, gwy) -> None:
self._loop = gwy._loop
self._gwy = gwy
self.id = None
self._msgs = {}
self._msgz = {}
self._qos_tx_count = 0 # the number of pkts Tx'd with no matching Rx
def _qos_function(self, pkt, reset=False) -> None:
if reset:
self._qos_tx_count = 0
return
self._qos_tx_count += 1
if self._qos_tx_count == _QOS_TX_LIMIT:
_LOGGER.warning(
f"{pkt} < Sending now deprecated for {self} "
"(consider adjusting device_id filters)"
) # TODO: take whitelist into account
def _discover(self, discover_flag=Discover.ALL) -> None:
pass
def _handle_msg(self, msg) -> None: # TODO: beware, this is a mess
if (
self._gwy.pkt_protocol is None
or msg.src.id != self._gwy.pkt_protocol._hgi80.get("device_id")
):
self._qos_function(msg._pkt, reset=True)
if msg.verb in (I_, RP):
self._msgs[msg.code] = msg
if msg.code not in self._msgz:
self._msgz[msg.code] = {msg.verb: {msg._pkt._ctx: msg}}
elif msg.verb not in self._msgz[msg.code]:
self._msgz[msg.code][msg.verb] = {msg._pkt._ctx: msg}
else:
self._msgz[msg.code][msg.verb][msg._pkt._ctx] = msg
# TODO:
# if msg.verb == RP and msg._pkt._idx in self._msgz[msg.code].get(I_, []):
# assert msg.raw_payload == self._msgz[msg.code][I_][msg._pkt._idx].raw_payload, (
# f"\r\n{msg._pkt} ({msg._pkt._idx}),"
# f"\r\n{self._msgz[msg.code][I_][msg._pkt._idx]._pkt} ({msg._pkt._idx})"
# )
# del self._msgz[msg.code][I_][msg._pkt._idx]
# elif msg.verb == I_ and msg._pkt._idx in self._msgz[msg.code].get(RP, []):
# assert msg.raw_payload == self._msgz[msg.code][RP][msg._pkt._idx].raw_payload, (
# f"\r\n{msg._pkt} ({msg._pkt._idx}),"
# f"\r\n{self._msgz[msg.code][RP][msg._pkt._idx]._pkt} ({msg._pkt._idx})"
# )
# del self._msgz[msg.code][RP][msg._pkt._idx]
@property
def _msg_db(self) -> List: # a flattened version of _msgz[code][verb][indx]
"""Return a flattened version of _msgz[code][verb][indx]."""
return [m for c in self._msgz.values() for v in c.values() for m in v.values()]
# @property
# def _pkt_db(self) -> Dict:
# """Return a flattened version of ..."""
# return {msg.dtm: msg._pkt for msg in self._msgs_db}
def _make_cmd(self, code, dest_id, payload, verb=RQ, **kwargs) -> None:
self._send_cmd(self._gwy.create_cmd(verb, dest_id, code, payload, **kwargs))
def _send_cmd(self, cmd, **kwargs) -> None:
if self._gwy.config.disable_sending:
_LOGGER.info(f"{cmd} < Sending is disabled")
return
if self._qos_tx_count > _QOS_TX_LIMIT:
_LOGGER.info(f"{cmd} < Sending is deprecated for {self}")
return
if getattr(self, "has_battery", None) and cmd.dst.id == self.id:
_LOGGER.info(f"{cmd} < Sending inadvisable for {self} (has a battery)")
cmd._source_entity = self
# self._msgs.pop(cmd.code, None) # NOTE: Cause of DHW bug
self._gwy.send_cmd(cmd)
def _msg_value(self, code, *args, **kwargs) -> dict:
if isinstance(code, (str, tuple)): # a code or a tuple of codes
return self._msg_value_code(code, *args, **kwargs)
return self._msg_value_msg(code, *args, **kwargs) # assume is a Message
def _msg_value_code(self, code, verb=None, key=None, **kwargs) -> dict:
assert (
not isinstance(code, tuple) or verb is None
), f"Unsupported: using a tuple ({code}) with a verb ({verb})"
if verb:
try:
msgs = self._msgz[code][verb]
except KeyError:
msg = None
else:
msg = max(msgs.values()) if msgs else None
elif isinstance(code, tuple):
msgs = [m for m in self._msgs.values() if m.code in code]
msg = max(msgs) if msgs else None
else:
msg = self._msgs.get(code)
return self._msg_value_msg(msg, key=key, **kwargs)
def _msg_value_msg(self, msg, key=None, zone_idx=None, domain_id=None) -> dict:
if msg is None:
return
elif msg._expired:
delete_msg(msg)
if domain_id:
idx, val = "domain_id", domain_id
elif zone_idx:
idx, val = "zone_idx", zone_idx
else:
idx = val = None
if isinstance(msg.payload, list) and idx:
msg_dict = {
k: v for d in msg.payload for k, v in d.items() if d[idx] == val
}
elif isinstance(msg.payload, list):
# TODO: this isn't ideal: e.g. a controller is being treated like a 'stat
# I 101 --:------ --:------ 12:126457 2309 006 0107D0-0207D0 # is a CTL
msg_dict = msg.payload[0]
else:
msg_dict = msg.payload
assert (
not domain_id and not zone_idx or msg_dict.get(idx) == val
), f"{msg_dict} < Coding error: key={idx}, val={val}"
if key:
return msg_dict.get(key)
return {
k: v
for k, v in msg_dict.items()
if k not in ("dhw_idx", "domain_id", "zone_idx") and k[:1] != "_"
}
@property
def _codes(self) -> dict:
return {
"codes": sorted([k for k, v in self._msgs.items()]),
}
@property
def controller(self): # -> Optional[Controller]:
"""Return the entity's controller, if known."""
return self._ctl # TODO: if the controller is not known, try to find it?
def delete_msg(msg) -> None:
"""Remove the msg from all state databases."""
entities = [msg.src]
if hasattr(msg.src, "_evo"):
entities.append(msg.src._evo)
if msg.src._evo._dhw:
entities.append(msg.src._evo._dhw)
entities.extend(msg.src._evo.zones)
# remove the msg from all the state DBs
for obj in entities:
if msg in obj._msgs.values():
del obj._msgs[msg.code]
try:
del obj._msgz[msg.code][msg.verb][msg._pkt._ctx]
except KeyError:
pass
|
the-stack_0_3003 | #!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ........................................ NOTICE
#
# This file has been derived and modified from a source licensed under Apache Version 2.0.
# See files NOTICE and README.md for more details.
#
# ........................................ ******
"""Unit-test code for logtools"""
import os
import sys
import unittest
import logging
from tempfile import mkstemp
from datetime import datetime
from io import StringIO
from operator import itemgetter
from logtools import (filterbots, logfilter, geoip, logsample, logsample_weighted,
logparse, urlparse, logmerge, logplot, qps, sumstat)
from logtools.parsers import *
from logtools import logtools_config, interpolate_config, AttrDict
logging.basicConfig(level=logging.INFO)
class ConfigurationTestCase(unittest.TestCase):
def testInterpolation(self):
self.assertEqual(1, interpolate_config(1, 'bogus_sec', 'bogus_key'))
self.assertRaises(KeyError, interpolate_config, None, 'bogus_sec', 'bogus_key')
class URLParseTestCase(unittest.TestCase):
def setUp(self):
self.rows = [
"http://www.mydomain.com/my/path/myfile?myparam1=myval1&myparam2=myval2",
"http://www.mydomain2.com",
"http://www.mydomain3.com/home",
"http://fun.com/index.php?home"
]
def testUrlParse(self):
i=0
for row in urlparse(StringIO('\n'.join(self.rows)+'\n'), part='netloc'):
i+=1
self.assertEqual(i, len(self.rows), \
"Number of rows output is not equal to input size")
def testMultipleQueryParams(self):
url = "http://www.mydomain.com/my/path/myfile?myparam1=myval1&myparam2=myval2"
for row in urlparse(StringIO(url+"\n"), part='query', query_params='myparam1,myparam2'):
self.assertEqual(row[0], 'myval1', "Returned query param value was not as expected: %s" % \
row)
class ParsingTestCase(unittest.TestCase):
def setUp(self):
self.clf_rows = [
'127.0.0.1 - frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326',
'127.0.0.2 - jay [10/Oct/2000:13:56:12 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326'
]
self.json_rows = [
'{"key1":"val1","key2":true,"key3":31337,"key4":null,"nested_key":[{"nested_key_1":"2"}]}'
]
self.uwsgi_rows = [
"[pid: 11216|app: 0|req: 2680/5864] 24.218.159.119 () {40 vars in 957 bytes} [Thu Jun 13 22:29:59 2013] GET /my/uri/path/?param_id=52&token=s61048gkje_l001z => generated 1813 bytes in 11 msecs (HTTP/1.1 200) 2 headers in 73 bytes (1 switches on core 0)",
"[pid: 11217|app: 0|req: 3064/5865] 10.18.50.145 () {34 vars in 382 bytes} [Thu Jun 13 22:30:00 2013] GET / => generated 8264 bytes in 9 msecs (HTTP/1.1 200) 2 headers in 73 bytes (1 switches on core 0)"
]
def testJSONParser(self):
parser = JSONParser()
for logrow in self.json_rows:
parsed = parser(logrow)
self.assertNotEqual(parsed, None, "Could not parse line: %s" % str(logrow))
def testAccessLog(self):
parser = AccessLog()
parser.set_format(format='%h %l %u %t "%r" %>s %b')
self.assertRaises(ValueError, parser, 'example for invalid format')
for logrow in self.clf_rows:
parsed = parser(logrow)
self.assertNotEqual(parsed, None, "Could not parse line: %s" % str(logrow))
def testCommonLogFormat(self):
parser = CommonLogFormat()
self.assertRaises(ValueError, parser, 'example for invalid format')
for logrow in self.clf_rows:
parsed = parser(logrow)
self.assertNotEqual(parsed, None, "Could not parse line: %s" % str(logrow))
def testuWSGIParser(self):
parser = uWSGIParser()
for logrow in self.uwsgi_rows:
parsed = parser(logrow)
self.assertNotEqual(parsed, None, "Could not parse line: %s" % logrow)
def testLogParse(self):
options = AttrDict({'parser': 'CommonLogFormat', 'field': 4, 'header': False})
fh = StringIO('\n'.join(self.clf_rows))
output = [l for l in logparse(options, None, fh)]
self.assertEqual(len(output), len(self.clf_rows), "Output size was not equal to input size!")
def testMultiKeyGetter(self):
parser = parser = CommonLogFormat()
func = multikey_getter_gen(parser, keys=(1,2), is_indices=True)
fh = StringIO('\n'.join(self.clf_rows))
output = [func(l) for l in fh]
self.assertEqual(len(output), len(self.clf_rows), "Output size was not equal to input size!")
class FilterBotsTestCase(unittest.TestCase):
def setUp(self):
self.options = AttrDict({
"reverse": False,
"unescape": False,
"printlines": False,
"ip_ua_re": "^(?P<ip>.*?) - USER_AGENT:'(?P<ua>.*?)'",
"bots_ips": StringIO("\n".join([
"6.6.6.6"
]) + "\n"),
"bots_ua": StringIO("\n".join([
"## Example comment ##",
"Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)",
"ssearch_bot/Nutch-1.0 (sSearch Crawler; http://www.semantissimo.de)",
"r'.*crawler'",
"s'MSIECrawler)'",
"p'DotSpotsBot'",
"p'Java/'"
]) + "\n")
})
self.fh = StringIO(
"127.0.0.1 - USER_AGENT:'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)' - ...\n" \
"255.255.255.255 - USER_AGENT:'Mozilla' - ...\n" \
"1.1.1.1 - USER_AGENT:'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; MSIECrawler)'\n" \
"2.2.2.2 - USER_AGENT:'Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; Win 9x 4.90; .NET CLR 1.1.4322; MSIECrawler)'\n" \
"3.3.3.3 - USER_AGENT:'DotSpotsBot/0.2 (crawler; support at dotspots.com)'\n" \
"4.4.4.4 - USER_AGENT:'inagist.com url crawler'\n" \
"5.5.5.5 - USER_AGENT:'Java/1.6.0_18'\n" \
"6.6.6.6 - USER_AGENT:'ssearch_bot/Nutch-1.0 (sSearch Crawler; http://www.semantissimo.de)'\n"
)
self.json_fh = StringIO(
'''{"timestamp":"2010\/09\/01 00:00:01","user_agent":"Mozilla\/5.0 (compatible; Googlebot\/2.1; +http:\/\/www.google.com\/bot.html)","user_ip":"66.249.71.108"}\n''' \
'''{"timestamp":"2010\/10\/01 11:00:01","user_agent":"Mozilla\/5.0 (compatible; Googlebot\/2.1; +http:\/\/www.google.com\/bot.html)","user_ip":"66.249.71.109"}\n''' \
'''{"timestamp":"2010\/09\/01 00:00:01","user_agent":"Mozilla\/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.11) Gecko\/20100701 Firefox\/3.5.11 (.NET CLR 3.5.30729)","user_ip":"100.100.1.100"}\n''' \
'''{"timestamp":"2010\/10\/01 00:00:01","user_agent":"Mozilla\/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.11) Gecko\/20100701 Firefox\/3.5.11 (.NET CLR 3.5.30729)","user_ip":"6.6.6.6"}\n''' \
)
def testParserFiltering(self):
json_options = self.options
json_options['parser'] = 'JSONParser'
json_options['ip_ua_fields'] = 'ua:user_agent,ip:user_ip'
i=0
for l in filterbots(fh=self.json_fh, **json_options):
i+=1
self.assertEqual(i, 1, "filterbots output size different than expected: %s" % str(i))
def testRegExpFiltering(self):
i=0
for l in filterbots(fh=self.fh, **self.options):
i+=1
self.assertEqual(i, 1, "filterbots output size different than expected: %s" % str(i))
class GeoIPTestCase(unittest.TestCase):
def setUp(self):
self.options = AttrDict({ 'ip_re': '^(.*?) -' })
self.fh = StringIO(
"127.0.0.1 - USER_AGENT:'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)' - ...\n" \
"255.255.255.255 - USER_AGENT:'Mozilla' - ...\n" \
"74.125.225.48 - USER_AGENT:'IE' - ...\n" \
"65.55.175.254 - USER_AGENT:'IE' - ...\n"
)
def testGeoIP(self):
try:
import GeoIP
except ImportError:
print( "GeoIP Python package not available - skipping geoip unittest.",
file = sys.stderr)
return
output = [(geocode, ip, line) for geocode, ip, line in geoip(fh=self.fh, **self.options)]
self.assertEqual(len(output), 2, "Output size was different than expected: %s" % str(len(output)))
def testFilter(self):
"""Test GeoIP filtering functionality"""
try:
import GeoIP
except ImportError:
print ("GeoIP Python package not available - skipping geoip unittest.",
file = sys.stderr)
return
# Check positive filter
self.options['filter'] = 'United States'
output = [(geocode, ip, line) for geocode, ip, line in geoip(fh=self.fh, **self.options)]
self.assertEqual(len(output), 2, "Output size was different than expected: %s" % str(len(output)))
# Check negative filter
self.options['filter'] = 'India'
output = [(geocode, ip, line) for geocode, ip, line in geoip(fh=self.fh, **self.options)]
self.assertEquals(len(output), 0, "Output size was different than expected: %s" % str(len(output)))
class SamplingTestCase(unittest.TestCase):
def setUp(self):
self.options = AttrDict({ 'num_samples': 1 })
self.weighted_opts = AttrDict({
'num_samples': 5,
'field': 1,
'delimiter': ' '
})
self.fh = StringIO("\n".join([
'5 five', '1 one', '300 threehundred', '500 fivehundred',
'0 zero', '-1 minusone', '670 sixhundredseventy', '1000 thousand',
'22 twentytwo', '80 eighty', '3 three'
]))
def testUniformSampling(self):
output = [r for r in logsample(fh=self.fh, **self.options)]
self.assertEqual(len(output), self.options.num_samples,
"logsample output size different than expected: %s" % len(output))
def testWeightedSampling(self):
output = [(k, r) for k, r in logsample_weighted(fh=self.fh, **self.weighted_opts)]
self.assertEqual(len(output), self.weighted_opts.num_samples,
"logsample output size different than expected: %s" % len(output))
class FilterTestCase(unittest.TestCase):
"""Unit-test for the logfilter functionality"""
def setUp(self):
self.testset = StringIO("\n".join([
"AA word",
"word AA word",
"word AA",
"AA",
"aa word",
"wordAA",
"AAword",
"wordAAword",
"CC DD word"
])+"\n")
self.exp_emitted_wb = 4
self.exp_emitted = 1
self.blacklist = StringIO("\n".join([
'AA',
'bb',
'CC DD'
])+"\n")
def testACWB(self):
"""Aho-Corasick-based matching with Word Boundaries"""
lines = 0
for l in logfilter(self.testset, blacklist=self.blacklist, field=1, delimiter="\t",
with_acora=True, ignorecase=False,
word_boundaries=True):
#print(l)
lines += 1
self.assertEqual(lines, self.exp_emitted_wb, "Number of lines emitted was not as expected: %s (Expected: %s)" %
(lines, self.exp_emitted_wb))
def testAC(self):
"""Aho-Corasick-based matching"""
lines = 0
for l in logfilter(self.testset, blacklist=self.blacklist, field=1, delimiter="\t",
with_acora=True, ignorecase=False,
word_boundaries=False):
#print(l)
lines += 1
self.assertEqual(lines, self.exp_emitted, "Number of lines emitted was not as expected: %s (Expected: %s)" %
(lines, self.exp_emitted))
def testRE(self):
"""Regular Expression-based matching"""
lines = 0
for l in logfilter(self.testset, blacklist=self.blacklist, field=1, delimiter="\t",
with_acora=False, ignorecase=False,
word_boundaries=False):
#print( l)
lines += 1
self.assertEqual(lines, self.exp_emitted, "Number of lines emitted was not as expected: %s (Expected: %s)" %
(lines, self.exp_emitted))
def testREWB(self):
"""Regular Expression-based matching with Word Boundaries"""
lines = 0
for l in logfilter(self.testset, blacklist=self.blacklist, field=1, delimiter="\t",
with_acora=False, ignorecase=False,
word_boundaries=True):
#print( l)
lines += 1
self.assertEqual(lines, self.exp_emitted_wb, "Number of lines emitted was not as expected: %s (Expected: %s)" %
(lines, self.exp_emitted_wb))
class MergeTestCase(unittest.TestCase):
def setUp(self):
self.tempfiles = [mkstemp(), mkstemp(), mkstemp()]
self.args = [fname for fh, fname in self.tempfiles]
def tearDown(self):
"""Cleanup temporary files created by test"""
for fh, fname in self.tempfiles:
os.remove(fname)
def testNumericMerge(self):
t1 =['1 one', '5 five', '300 threehundred',
'500 fivehundred']
os.write(self.tempfiles[0][0], "\n".join(t1).encode())
t2 = ['-1 minusone', '0 zero',
'670 sixhundredseventy' ,'1000 thousand']
os.write(self.tempfiles[1][0], "\n".join(t2).encode())
t3= ['3 three', '22 twentytwo', '80 eighty']
os.write(self.tempfiles[2][0], "\n".join(t3).encode())
options = AttrDict({'delimiter': ' ', 'field': 1, 'numeric': True })
output = [(k, l) for k, l in logmerge(options, self.args)]
self.assertEqual(len(output), 11, "Output size was not equal to input size!")
self.assertEqual( list( map(itemgetter(0), output)),
sorted ( list( map( lambda x: int(x[0]), output))),
"Output was not numerically sorted!")
def testDateMerge(self):
t1 = ['2010/01/12 07:00:00,one', '2010/01/12 08:00:00,five',
'2010/01/13 10:00:00,threehundred']
os.write(self.tempfiles[0][0], "\n".join(t1).encode())
t2 =['2010/01/12 07:30:00,one', '2010/01/12 08:10:00,five',
'2010/01/12 21:00:00,threehundred']
os.write(self.tempfiles[1][0], "\n".join(t2).encode())
t3 = ['2010/01/11 05:33:03,one', '2010/01/12 03:10:00,five',
'2010/01/21 22:00:00,threehundred']
os.write(self.tempfiles[2][0], "\n".join(t3).encode())
dateformat = '%Y/%m/%d %H:%M:%S'
options = AttrDict({'delimiter': ',', 'field': 1, 'datetime': True, 'dateformat': dateformat })
output = [(k, l) for k, l in logmerge(options, self.args)]
self.assertEqual(len(output), 9, "Output size was not equal to input size!")
self.assertEqual( list( map(itemgetter(0), output)),
sorted( list( map(itemgetter(0), output))),
"Output was not time sorted!")
def testLexicalMerge(self):
t1 = ['1 one', '300 threehundred', '5 five',
'500 fivehundred']
os.write(self.tempfiles[0][0], "\n".join(t1).encode())
t2 = ['-1 minusone', '0 zero', '1000 thousand',
'670 sixhundredseventy']
os.write(self.tempfiles[1][0], "\n".join(t2).encode())
t3 = ['22 twentytwo', '3 three',
'80 eighty']
os.write(self.tempfiles[2][0], "\n".join(t3).encode())
options = AttrDict({ 'delimiter': ' ', 'field': 1, 'numeric': False })
output = [(k, l) for k, l in logmerge(options, self.args)]
self.assertEqual(len(output), 11, "Output size was not equal to input size!")
self.assertEqual( list( map(itemgetter(0), output)),
sorted( list( map(itemgetter(0), output))),
"Output was not lexically sorted!")
#
# QPS: Queries Per Second
#
class QPSTestCase(unittest.TestCase):
def setUp(self):
self.options = AttrDict({
"ignore": True,
"dt_re": r'^\[(.*?)\]',
"dateformat": "%d/%b/%Y:%H:%M:%S -0700",
"window_size": 15
})
self.fh = StringIO(
'[10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" \n' \
'[10/Oct/2000:13:55:38 -0700] "GET /apache_pb.gif HTTP/1.0" \n' \
'[10/Oct/2000:13:56:59 -0700] "GET /apache_pb.gif HTTP/1.0" \n' \
'[10/Oct/2000:13:57:01 -0700] "GET /apache_pb.gif HTTP/1.0" \n' \
'[11/Oct/2000:14:01:00 -0700] "GET /apache_pb.gif HTTP/1.0" \n' \
'[11/Oct/2000:14:01:13 -0700] "GET /apache_pb.gif HTTP/1.0" \n' \
'[11/Oct/2000:14:01:14 -0700] "GET /apache_pb.gif HTTP/1.0" \n'
)
def testQps(self):
blocks=0
qs=[]
qpsVal = list( qps(fh=self.fh, **self.options))
sys.stderr.write(f"In testQps, qpsVal ({type(qpsVal)}):\t{qpsVal}\n")
for q in qpsVal:
blocks+=1
qs.append(q)
self.assertEqual(blocks, 3,
"qps output size different than expected: %s" % str(blocks))
class PlotTestCase(unittest.TestCase):
def setUp(self):
self.fh = StringIO("\n".join([
'5 five', '1 one', '300 threehundred', '500 fivehundred',
'0 zero', '-1 minusone', '670 sixhundredseventy', '1000 thousand',
'22 twentytwo', '80 eighty', '3 three'
]))
def testGChart(self):
try:
import pygooglechart
except ImportError:
print( "pygooglechart Python package not available - skipping logplot gchart unittest.",
file = sys.stderr)
return
options = AttrDict({
'backend': 'gchart',
'output': False,
'limit': 10,
'field': 1,
'delimiter': ' ',
'legend': True,
'width': 600,
'height': 300
})
chart = None
for plot_type in ('pie', 'line'):
self.fh.seek(0)
options['type'] = plot_type
chart = logplot(options, None, self.fh)
self.assertNotEqual(chart, None, "logplot returned None. Expected a Plot object")
# Should raise ValueError here due to fh being at EOF
self.assertRaises(ValueError, logplot, options, None, self.fh)
tmp_fh, tmp_fname = mkstemp()
chart.download(tmp_fname)
os.remove(tmp_fname)
class SumstatTestCase(unittest.TestCase):
def setUp(self):
self.data = StringIO('\n'.join([
'500 val1',
'440 val2',
'320 val3',
'85 val4',
'13 val5'
]))
self.avg = 271.6
self.N = 1358
self.M = 5
def testSumstat(self):
stat = sumstat(fh=self.data, delimiter=' ', reverse=True)
self.assertEqual(stat['M'], self.M)
self.assertEqual(stat['N'], self.N)
self.assertEqual(stat['avg'], self.avg)
if __name__ == "__main__":
unittest.main()
|
the-stack_0_3004 | from model.contact import Contact
import re
class ContactHelper:
def __init__(self, app):
self.app = app
def open_contacts_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("index.php") and len(wd.find_elements_by_link_text("Last name")) > 0
and len(wd.find_elements_by_link_text("All phones")) > 0):
wd.find_element_by_link_text("home").click()
def fill_form(self, contact):
# fill in form
self.change_field_value("firstname", contact.first_name)
self.change_field_value("middlename", contact.middle_name)
self.change_field_value("lastname", contact.last_name)
self.change_field_value("nickname", contact.nick_name)
self.change_field_value("address", contact.address)
self.change_field_value("email", contact.email)
self.change_field_value("email2", contact.email2)
self.change_field_value("email3", contact.email3)
self.change_field_value("home", contact.home_phone)
self.change_field_value("work", contact.work_phone)
self.change_field_value("mobile", contact.mobile_phone)
self.change_field_value("fax", contact.fax_phone)
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def create(self, contact):
wd = self.app.wd
self.open_contacts_page()
# click on add new
wd.find_element_by_link_text("add new").click()
self.fill_form(contact)
# submit form
wd.find_element_by_xpath("(//input[@name='submit'])[2]").click()
self.contact_cache = None
def delete_contact_by_index(self, index):
wd = self.app.wd
self.open_contacts_page()
self.select_contact_by_index(index)
wd.find_element_by_xpath("//input[@value='Delete']").click()
# confirm deletion
wd.switch_to_alert().accept()
self.open_contacts_page()
self.contact_cache = None
def delete_contact_by_id(self, id):
wd = self.app.wd
self.open_contacts_page()
self.select_contact_by_id(id)
wd.find_element_by_xpath("//input[@value='Delete']").click()
# confirm deletion
wd.switch_to_alert().accept()
self.open_contacts_page()
self.contact_cache = None
def select_contact_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[value='%s']" % id).click()
def select_contact_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def delete_first_contact(self):
self.delete_contact_by_index(0)
def edit_contact_by_index(self, index, contact):
wd = self.app.wd
self.open_contacts_page()
self.open_contact_to_edit_by_index(index)
self.fill_form(contact)
wd.find_element_by_xpath("(//input[@name='update'])[2]").click()
self.open_contacts_page()
self.contact_cache = None
def edit_contact_by_id(self, id, contact):
wd = self.app.wd
self.open_contacts_page()
self.open_contact_to_edit_by_id(id)
self.fill_form(contact)
wd.find_element_by_xpath("(//input[@name='update'])[2]").click()
self.open_contacts_page()
self.contact_cache = None
def open_contact_to_edit_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_xpath("//img[@alt='Edit']")[index].click()
def open_contact_to_edit_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("a[href='edit.php?id=%s']" % id).click()
def edit_first_contact(self, contact):
wd = self.app.wd
self.open_contacts_page()
# select first contact
self.open_contact_to_edit_by_index(0)
self.fill_form(contact)
wd.find_element_by_xpath("(//input[@name='update'])[2]").click()
self.open_contacts_page()
self.contact_cache = None
contact_cache = None
def get_contact_list(self):
if self.contact_cache is None:
wd = self.app.wd
self.open_contacts_page()
self.contact_cache = []
for row in wd.find_elements_by_name("entry"):
cells = row.find_elements_by_tag_name("td")
last_name = cells[1].text
first_name = cells[2].text
id = row.find_element_by_name("selected[]").get_attribute("value")
all_phones = cells[5].text
address = cells[3].text
all_emails = cells[4].text
self.contact_cache.append(Contact(first_name=first_name, last_name=last_name, contact_id=id,
all_phones_from_home_page=all_phones, address=address,
all_emails_from_home_page=all_emails))
return list(self.contact_cache)
def get_simple_contact_list(self):
if self.contact_cache is None:
wd = self.app.wd
self.open_contacts_page()
self.contact_cache = []
for row in wd.find_elements_by_name("entry"):
cells = row.find_elements_by_tag_name("td")
last_name = cells[1].text
first_name = cells[2].text
id = row.find_element_by_name("selected[]").get_attribute("value")
self.contact_cache.append(Contact(first_name=first_name, last_name=last_name, contact_id=id))
return list(self.contact_cache)
def count(self):
wd = self.app.wd
self.open_contacts_page()
return len(wd.find_elements_by_xpath("//img[@alt='Edit']"))
def open_contact_view_by_index(self, index):
wd = self.app.wd
self.open_contacts_page()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[6]
cell.find_element_by_tag_name("a").click()
def get_contact_info_from_edit_page(self, index):
wd = self.app.wd
self.open_contacts_page()
self.open_contact_to_edit_by_index(index)
firstname = wd.find_element_by_name("firstname").get_attribute("value")
lastname = wd.find_element_by_name("lastname").get_attribute("value")
middlename = wd.find_element_by_name("middlename").get_attribute("value")
nickname = wd.find_element_by_name("nickname").get_attribute("value")
id = wd.find_element_by_name("id").get_attribute("value")
homephone = wd.find_element_by_name("home").get_attribute("value")
workphone = wd.find_element_by_name("work").get_attribute("value")
mobilephone = wd.find_element_by_name("mobile").get_attribute("value")
faxphone = wd.find_element_by_name("fax").get_attribute("value")
address = wd.find_element_by_name("address").get_attribute("value")
email = wd.find_element_by_name("email").get_attribute("value")
email2 = wd.find_element_by_name("email2").get_attribute("value")
email3 = wd.find_element_by_name("email3").get_attribute("value")
return Contact(first_name=firstname, last_name=lastname, contact_id=id, home_phone=homephone, mobile_phone=mobilephone,
work_phone=workphone, fax_phone=faxphone, middle_name=middlename, nick_name=nickname,
email=email, email2=email2, email3=email3, address=address)
def get_contact_from_view_page(self, index):
wd = self.app.wd
self.open_contact_view_by_index(index)
text = wd.find_element_by_id("content").text
homephone = re.search("H: (.*)", text).group(1)
workphone = re.search("W: (.*)", text).group(1)
mobilephone = re.search("M: (.*)", text).group(1)
faxphone = re.search("F: (.*)", text).group(1)
return Contact(home_phone=homephone, mobile_phone=mobilephone, work_phone=workphone, fax_phone=faxphone)
|
the-stack_0_3007 | #-*-coding:utf-8-*-
import torch.utils.data
from data.base_data_loader import BaseDataLoader
def CreateDataset(opt):
dataset = None
if opt.dataset_mode == 'aligned':
from data.aligned_dataset import AlignedDataset
dataset = AlignedDataset()
elif opt.dataset_mode == 'single':
from data.single_dataset import SingleDataset
dataset = SingleDataset()
else:
raise ValueError("Dataset [%s] not recognized." % opt.dataset_mode)
print("dataset [%s] was created" % (dataset.name()))
dataset.initialize(opt)
return dataset
class CustomDatasetDataLoader(BaseDataLoader):
def name(self):
return 'CustomDatasetDataLoader'
def initialize(self, opt):
BaseDataLoader.initialize(self, opt)
self.dataset = CreateDataset(opt)
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
batch_size=opt.batchSize,
shuffle=not opt.serial_batches,
num_workers=int(opt.nThreads))
def load_data(self):
return self
def __len__(self):
return min(len(self.dataset), self.opt.max_dataset_size)
def __iter__(self):
for i, data in enumerate(self.dataloader):
if i >= self.opt.max_dataset_size:
break
yield data |
the-stack_0_3008 | import math
import os
import random
import time
import albumentations as A
import cv2
import numpy as np
import pandas as pd
import tifffile
import torch
from torch.utils.data import Dataset
# VV mean: -15.830463789539426
# VV std: 6.510123043441801
# VH mean: -24.66130160959856
# VH std: 6.684547156770566
def normalize_band(band, ignored_mask=0):
band[band < -32760] = -100
ignored_idx = band == -100
if np.count_nonzero(band != -100) == 0:
band[:, :] = ignored_mask
else:
band = (band + 40) / 15
band[ignored_idx] = ignored_mask
return band
train_transforms = A.Compose([
#A.Rotate(limit=30, border_mode=cv2.BORDER_CONSTANT, p=0.3),
# A.HorizontalFlip(),
# A.VerticalFlip()
], additional_targets={
'conf_mask': 'mask',
'length_mask': 'mask',
'vessel_mask': 'mask',
'fishing_mask': 'mask',
'center_mask': 'mask'})
class XviewValDataset(Dataset):
def __init__(
self,
mode: str,
dataset_dir: str,
annotation_csv: str,
folds_csv: str,
multiplier: int = 1,
fold: int = 0,
crop_size: int = 1024,
sigma: int = 2,
radius: int = 4,
transforms: A.Compose = train_transforms
):
df = pd.read_csv(folds_csv)
self.radius = radius
if mode == "train":
self.names = df[df.fold != fold].scene_id.tolist()
else:
self.names = df[df.fold == fold].scene_id.tolist()
self.mode = mode
self.dataset_dir = dataset_dir
self.transforms = transforms
self.df = pd.read_csv(annotation_csv)
self.crop_size = crop_size
self.sigma = sigma
self.names = multiplier * self.names
if self.mode == "train":
random.shuffle(self.names)
def __getitem__(self, i):
if self.mode == "val":
return {
"name": self.names[i],
}
rm = random.Random()
rm.seed(time.time_ns())
name = self.names[i]
crop_size = self.crop_size
vv_full = tifffile.memmap(os.path.join(self.dataset_dir, "validation", name, "VV_dB.tif"), mode="r")
vh_full = tifffile.memmap(os.path.join(self.dataset_dir, "validation", name, "VH_dB.tif"), mode="r")
h, w = vv_full.shape
df = self.df
df = df[df.scene_id == name]
points = [row for _, row in df.iterrows()]
if len(points) > 1 and random.random() > 0.5:
point_idx = rm.randint(0, len(points) - 1)
point = points[point_idx]
y, x = point.detect_scene_row, point.detect_scene_column
max_shift_pad = 32
min_x_start = min(max(x - crop_size + max_shift_pad, 0), w - crop_size - 32)
min_y_start = min(max(y - crop_size + max_shift_pad, 0), h - crop_size - 32)
max_x_start = max(min(x - max_shift_pad, w - crop_size - 1), 0)
max_y_start = max(min(y - max_shift_pad, h - crop_size - 1), 0)
if max_x_start < min_x_start:
min_x_start, max_x_start = max_x_start, min_x_start
if max_y_start < min_y_start:
min_y_start, max_y_start = max_y_start, min_y_start
h_start = rm.randint(int(min_y_start), int(max_y_start))
w_start = rm.randint(int(min_x_start), int(max_x_start))
h_end = h_start + crop_size
w_end = w_start + crop_size
vh = vh_full[h_start: h_end, w_start: w_end].astype(np.float32)
vv = vv_full[h_start: h_end, w_start: w_end].astype(np.float32)
else:
for i in range(5):
h_start = rm.randint(0, h - crop_size - 1)
w_start = rm.randint(0, w - crop_size - 1)
h_end = h_start + crop_size
w_end = w_start + crop_size
vh = vh_full[h_start: h_end, w_start: w_end].astype(np.float32)
known_pixels = np.count_nonzero(vh > -1000)
vv = vv_full[h_start: h_end, w_start: w_end].astype(np.float32)
if known_pixels / (crop_size * crop_size) > 0.05:
break
object_mask = np.zeros_like(vv, dtype=np.float32)
vessel_mask = np.zeros_like(vv, dtype=np.float32)
fishing_mask = np.zeros_like(vv, dtype=np.float32)
conf_mask = np.zeros_like(vv, dtype=np.float32)
length_mask = np.zeros_like(vv)
length_mask[:, :] = -1
center_mask = np.zeros_like(vv)
size = 6 * self.sigma + 3
x = np.arange(0, size, 1, float)
y = x[:, np.newaxis]
x0, y0 = 3 * self.sigma + 1, 3 * self.sigma + 1
g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * self.sigma ** 2))
crop_coords = np.zeros((1024, 4))
crop_coords_idx = 0
for _, row in df.iterrows():
if h_start < row.detect_scene_row < h_end and w_start < row.detect_scene_column < w_end:
x = row.detect_scene_column - w_start
y = row.detect_scene_row - h_start
# CENTER MASK
# upper left
ul = int(np.round(x - 3 * self.sigma - 1)), int(np.round(y - 3 * self.sigma - 1))
# bottom right
br = int(np.round(x + 3 * self.sigma + 2)), int(np.round(y + 3 * self.sigma + 2))
c, d = max(0, -ul[0]), min(br[0], self.crop_size) - ul[0]
a, b = max(0, -ul[1]), min(br[1], self.crop_size) - ul[1]
cc, dd = max(0, ul[0]), min(br[0], self.crop_size)
aa, bb = max(0, ul[1]), min(br[1], self.crop_size)
center_mask[aa:bb, cc:dd] = np.maximum(
center_mask[aa:bb, cc:dd], g[a:b, c:d])
# DEFINE VESSELS
# man-made maritime object
object_cls = 1
vessel_cls = 0
fishing_cls = 0
if math.isnan(row.is_vessel):
vessel_cls = 255
elif row.is_vessel:
vessel_cls = 1
if vessel_cls == 0:
fishing_cls = 0
elif math.isnan(row.is_fishing):
fishing_cls = 255
elif row.is_fishing:
fishing_cls = 1
confs = ['none', 'LOW', 'MEDIUM', 'HIGH']
conf_idx = confs.index(row.confidence)
if conf_idx > 1:
conf_idx = 2
cv2.circle(conf_mask, center=(x, y), radius=self.radius, color=conf_idx, thickness=-1)
cv2.circle(object_mask, center=(x, y), radius=self.radius if object_cls < 200 else 7, color=object_cls,
thickness=-1)
cv2.circle(vessel_mask, center=(x, y), radius=self.radius if vessel_cls < 200 else 7, color=vessel_cls,
thickness=-1)
cv2.circle(fishing_mask, center=(x, y), radius=self.radius if fishing_cls < 200 else 7,
color=fishing_cls,
thickness=-1)
# length MASK
vessel_length = -1
if not math.isnan(row.vessel_length_m):
vessel_length = row.vessel_length_m
cv2.circle(length_mask, center=(x, y), radius=self.radius if vessel_length > 0 else 7,
color=vessel_length,
thickness=-1)
if conf_idx > 1:
pad = 9
y1, y2 = y - pad, y + pad
x1, x2 = x - pad, x + pad
if x1 > 32 and x2 < self.crop_size - 32 and y1 > 32 and y2 < self.crop_size - 32:
crop_coords[crop_coords_idx] = np.array([x1, y1, x2, y2])
crop_coords_idx += 1
vv = normalize_band(band=vv, ignored_mask=0)
vh = normalize_band(band=vh, ignored_mask=0)
image = np.stack([vv, vh], axis=-1).astype(np.float32)
sample = self.transforms(image=image, mask=object_mask, center_mask=center_mask, length_mask=length_mask,
conf_mask=conf_mask, fishing_mask=fishing_mask, vessel_mask=vessel_mask)
image = sample["image"]
object_mask = sample["mask"]
center_mask = sample["center_mask"]
length_mask = sample["length_mask"]
vessel_mask = sample["vessel_mask"]
fishing_mask = sample["fishing_mask"]
conf_mask = sample["conf_mask"]
image = torch.from_numpy(image).float().moveaxis(-1, 0)
center_mask = torch.from_numpy(center_mask).float().unsqueeze(0) * 255
length_mask = torch.from_numpy(length_mask).float().unsqueeze(0)
conf_mask = torch.from_numpy(conf_mask).long()
object_mask = torch.from_numpy(object_mask).float().unsqueeze(0)
vessel_mask = torch.from_numpy(vessel_mask).float().unsqueeze(0)
fishing_mask = torch.from_numpy(fishing_mask).float().unsqueeze(0)
if random.random() < 0.5:
# 180 rotate to handle different sar orientation
image = torch.rot90(image, 2, dims=(1, 2))
center_mask = torch.rot90(center_mask, 2, dims=(1, 2))
length_mask = torch.rot90(length_mask, 2, dims=(1, 2))
conf_mask = torch.rot90(conf_mask, 2, dims=(0, 1))
object_mask = torch.rot90(object_mask, 2, dims=(1, 2))
vessel_mask = torch.rot90(vessel_mask, 2, dims=(1, 2))
fishing_mask = torch.rot90(fishing_mask, 2, dims=(1, 2))
ori_crops = crop_coords.copy()
crop_coords = self.crop_size - crop_coords
crop_coords[ori_crops == 0] = 0
crop_coords = crop_coords[:, [2, 3, 0, 1]]
crop_coords = torch.from_numpy(crop_coords).long()
return {
"image": image,
"object_mask": object_mask,
"crop_coords": crop_coords,
"conf_mask": conf_mask,
"vessel_mask": vessel_mask,
"fishing_mask": fishing_mask,
"center_mask": center_mask,
"length_mask": length_mask,
"name": name,
}
def __len__(self):
return len(self.names)
|
the-stack_0_3009 | # -*- coding: utf-8 -*-
# Copyright(C) 2017 Phyks (Lucas Verney)
#
# This file is part of a woob module.
#
# This woob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This woob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this woob module. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import datetime
from woob.browser.pages import JsonPage, HTMLPage, pagination
from woob.browser.filters.standard import (
CleanDecimal, CleanText, Currency, Date, Env, Format, Regexp, RegexpError
)
from woob.browser.filters.html import AbsoluteLink, Attr, Link, XPathNotFound
from woob.browser.elements import ItemElement, ListElement, method
from woob.capabilities.base import NotAvailable, NotLoaded
from woob.capabilities.housing import (
City, Housing, HousingPhoto,
UTILITIES, ENERGY_CLASS, POSTS_TYPES, ADVERT_TYPES
)
from woob.tools.capabilities.housing.housing import PricePerMeterFilter
from .constants import AVAILABLE_TYPES, QUERY_TYPES, QUERY_HOUSE_TYPES
class CitiesPage(JsonPage):
def iter_cities(self):
cities_list = self.doc
if isinstance(self.doc, dict):
cities_list = self.doc.values()
for city in cities_list:
city_obj = City()
city_obj.id = city
city_obj.name = city
yield city_obj
class HousingPage(HTMLPage):
@method
class get_housing(ItemElement):
klass = Housing
obj_id = Format(
'%s:%s',
Env('type'),
Attr('//div[boolean(@data-property-reference)]', 'data-property-reference')
)
obj_advert_type = ADVERT_TYPES.PROFESSIONAL
def obj_type(self):
type = Env('type')(self)
if type == 'location':
if 'appartement-meuble' in self.page.url:
return POSTS_TYPES.FURNISHED_RENT
else:
return POSTS_TYPES.RENT
elif type == 'achat':
return POSTS_TYPES.SALE
else:
return NotAvailable
def obj_url(self):
return self.page.url
def obj_house_type(self):
url = self.obj_url()
for house_type, types in QUERY_HOUSE_TYPES.items():
for type in types:
if ('/%s/' % type) in url:
return house_type
return NotAvailable
obj_title = CleanText('//h1[has-class("OfferTop-title")]')
obj_area = CleanDecimal(
Regexp(
CleanText(
'//div[has-class("MiniData")]//p[has-class("MiniData-item")][1]'
),
r'(\d*\.*\d*) .*',
default=NotAvailable
),
default=NotAvailable
)
obj_cost = CleanDecimal(
'//span[has-class("OfferTop-price")]',
default=NotAvailable
)
obj_price_per_meter = PricePerMeterFilter()
obj_currency = Currency(
'//span[has-class("OfferTop-price")]'
)
obj_location = Format(
'%s - %s',
CleanText('//p[@data-behat="adresseBien"]'),
CleanText('//p[has-class("OfferTop-loc")]')
)
obj_text = CleanText('//div[has-class("OfferDetails-content")]/p[1]')
obj_phone = Regexp(
Link(
'//a[has-class("OfferContact-btn--tel")]'
),
r'tel:(.*)'
)
def obj_photos(self):
photos = []
for photo in self.xpath('//div[has-class("OfferSlider")]//img'):
photo_url = Attr('.', 'src')(photo)
photo_url = photo_url.replace('640/480', '800/600')
photos.append(HousingPhoto(photo_url))
return photos
obj_date = datetime.date.today()
def obj_utilities(self):
price = CleanText(
'//p[has-class("OfferTop-price")]'
)(self)
if "charges comprises" in price.lower():
return UTILITIES.INCLUDED
else:
return UTILITIES.EXCLUDED
obj_rooms = CleanDecimal(
'//div[has-class("MiniData")]//p[has-class("MiniData-item")][2]',
default=NotAvailable
)
obj_bedrooms = CleanDecimal(
'//div[has-class("MiniData")]//p[has-class("MiniData-item")][3]',
default=NotAvailable
)
def obj_DPE(self):
try:
electric_consumption = CleanDecimal(Regexp(
Attr('//div[has-class("OfferDetails-content")]//img', 'src'),
r'https://dpe.foncia.net\/(\d+)\/.*'
))(self)
except (RegexpError, XPathNotFound):
electric_consumption = None
DPE = ""
if electric_consumption is not None:
if electric_consumption <= 50:
DPE = "A"
elif 50 < electric_consumption <= 90:
DPE = "B"
elif 90 < electric_consumption <= 150:
DPE = "C"
elif 150 < electric_consumption <= 230:
DPE = "D"
elif 230 < electric_consumption <= 330:
DPE = "E"
elif 330 < electric_consumption <= 450:
DPE = "F"
else:
DPE = "G"
return getattr(ENERGY_CLASS, DPE, NotAvailable)
return NotAvailable
def obj_details(self):
details = {}
dispo = Date(
Regexp(
CleanText('//p[has-class("OfferTop-dispo")]'),
r'.* (\d\d\/\d\d\/\d\d\d\d)',
default=datetime.date.today().isoformat()
)
)(self)
if dispo is not None:
details["dispo"] = dispo
priceMentions = CleanText(
'//p[has-class("OfferTop-mentions")]',
default=None
)(self)
if priceMentions is not None:
details["priceMentions"] = priceMentions
agency = CleanText(
'//p[has-class("OfferContact-address")]',
default=None
)(self)
if agency is not None:
details["agency"] = agency
for item in self.xpath('//div[has-class("OfferDetails-columnize")]/div'):
category = CleanText(
'./h3[has-class("OfferDetails-title--2")]',
default=None
)(item)
if not category:
continue
details[category] = {}
for detail_item in item.xpath('.//ul[has-class("List--data")]/li'):
detail_title = CleanText('.//span[has-class("List-data")]')(detail_item)
detail_value = CleanText('.//*[has-class("List-value")]')(detail_item)
details[category][detail_title] = detail_value
for detail_item in item.xpath('.//ul[has-class("List--bullet")]/li'):
detail_title = CleanText('.')(detail_item)
details[category][detail_title] = True
try:
electric_consumption = CleanDecimal(Regexp(
Attr('//div[has-class("OfferDetails-content")]//img', 'src'),
r'https://dpe.foncia.net\/(\d+)\/.*'
))(self)
details["electric_consumption"] = (
'{} kWhEP/m².an'.format(electric_consumption)
)
except (RegexpError, XPathNotFound):
pass
return details
class SearchPage(HTMLPage):
def do_search(self, query, cities):
form = self.get_form('//form[@name="searchForm"]')
form['searchForm[type]'] = QUERY_TYPES.get(query.type, None)
form['searchForm[localisation]'] = cities
form['searchForm[type_bien][]'] = []
for house_type in query.house_types:
try:
form['searchForm[type_bien][]'].extend(
QUERY_HOUSE_TYPES[house_type]
)
except KeyError:
pass
form['searchForm[type_bien][]'] = [
x for x in form['searchForm[type_bien][]']
if x in AVAILABLE_TYPES.get(query.type, [])
]
if query.area_min:
form['searchForm[surface_min]'] = query.area_min
if query.area_max:
form['searchForm[surface_max]'] = query.area_max
if query.cost_min:
form['searchForm[prix_min]'] = query.cost_min
if query.cost_max:
form['searchForm[prix_max]'] = query.cost_max
if query.nb_rooms:
form['searchForm[pieces]'] = [i for i in range(1, query.nb_rooms + 1)]
form.submit()
def find_housing(self, query_type, housing):
form = self.get_form('//form[@name="searchForm"]')
form['searchForm[type]'] = query_type
form['searchForm[reference]'] = housing
form.submit()
class SearchResultsPage(HTMLPage):
@pagination
@method
class iter_housings(ListElement):
item_xpath = '//article[has-class("TeaserOffer")]'
next_page = Link('//div[has-class("Pagination--more")]/a[contains(text(), "Suivant")]')
class item(ItemElement):
klass = Housing
obj_id = Format(
'%s:%s',
Env('type'),
Attr('.//span[boolean(@data-reference)]', 'data-reference')
)
obj_url = AbsoluteLink('.//h3[has-class("TeaserOffer-title")]/a')
obj_type = Env('query_type')
obj_advert_type = ADVERT_TYPES.PROFESSIONAL
def obj_house_type(self):
url = self.obj_url(self)
for house_type, types in QUERY_HOUSE_TYPES.items():
for type in types:
if ('/%s/' % type) in url:
return house_type
return NotLoaded
obj_url = AbsoluteLink('.//h3[has-class("TeaserOffer-title")]/a')
obj_title = CleanText('.//h3[has-class("TeaserOffer-title")]')
obj_area = CleanDecimal(
Regexp(
CleanText(
'.//div[has-class("MiniData")]//p[@data-behat="surfaceDesBiens"]'
),
r'(\d*\.*\d*) .*',
default=NotAvailable
),
default=NotAvailable
)
obj_cost = CleanDecimal(
'.//strong[has-class("TeaserOffer-price-num")]',
default=NotAvailable
)
obj_price_per_meter = PricePerMeterFilter()
obj_currency = Currency(
'.//strong[has-class("TeaserOffer-price-num")]'
)
obj_location = CleanText('.//p[has-class("TeaserOffer-loc")]')
obj_text = CleanText('.//p[has-class("TeaserOffer-description")]')
def obj_photos(self):
url = CleanText(Attr('.//a[has-class("TeaserOffer-ill")]/img', 'src'))(self)
# If the used photo is a default no photo, the src is on the same domain.
if url[0] == '/':
return []
else:
return [HousingPhoto(url)]
obj_date = datetime.date.today()
def obj_utilities(self):
price = CleanText(
'.//strong[has-class("TeaserOffer-price-num")]'
)(self)
if "charges comprises" in price.lower():
return UTILITIES.INCLUDED
else:
return UTILITIES.EXCLUDED
obj_rooms = CleanDecimal(
'.//div[has-class("MiniData")]//p[@data-behat="nbPiecesDesBiens"]',
default=NotLoaded
)
obj_bedrooms = CleanDecimal(
'.//div[has-class("MiniData")]//p[@data-behat="nbChambresDesBiens"]',
default=NotLoaded
)
def obj_details(self):
return {
"dispo": Date(
Attr('.//span[boolean(@data-dispo)]', 'data-dispo',
default=datetime.date.today().isoformat())
)(self),
"priceMentions": CleanText('.//span[has-class("TeaserOffer-price-mentions")]')(self)
}
|
the-stack_0_3012 | from nose.tools import eq_, ok_
import wtforms
from flask import Flask
from flask_superadmin import Admin
from flask_superadmin.model import base
import flask_wtf as wtf
class Model(object):
def __init__(self, id=None, c1=1, c2=2, c3=3):
self.id = id
self.col1 = c1
self.col2 = c2
self.col3 = c3
DoesNotExist = "dummy"
class Form(wtf.Form):
col1 = wtforms.TextField()
col2 = wtforms.TextField()
col3 = wtforms.TextField()
class MockModelView(base.BaseModelAdmin):
fields = ("col1", "col2", "col3")
def __init__(
self, model, name=None, category=None, endpoint=None, url=None, **kwargs
):
# Allow to set any attributes from parameters
for k, v in list(kwargs.items()):
setattr(self, k, v)
super(MockModelView, self).__init__(model, name, category, endpoint, url)
self.created_models = []
self.updated_models = []
self.deleted_models = []
self.search_arguments = []
self.all_models = {1: Model(1), 2: Model(2)}
self.last_id = 3
# Scaffolding
def get_pk(self, instance):
return instance.id
def get_object(self, pk):
return self.all_models.get(int(pk))
def get_objects(self, *pks):
ret = []
for pk in pks:
ret.append(self.all_models.get(int(pk)))
return ret
def get_model_form(self):
def fake_model_form(*args, **kwargs):
return Form
return fake_model_form
def get_converter(self):
pass
def scaffold_list_columns(self):
columns = ["col1", "col2", "col3"]
if self.excluded_list_columns:
return [x for x in columns if x not in self.excluded_list_columns]
return columns
def init_search(self):
return bool(self.searchable_columns)
def scaffold_sortable_columns(self):
return ["col1", "col2", "col3"]
def scaffold_form(self):
return Form
# Data
def get_list(self, page, sort, sort_desc, search_query, **kwargs):
self.search_arguments.append((page, sort, sort_desc, search_query))
return len(self.all_models), iter(self.all_models.values())
def save_model(self, instance, form, adding=False):
if adding:
model = Model(self.last_id)
self.last_id += 1
form.populate_obj(model)
self.created_models.append(model)
self.all_models[model.id] = model
else:
form.populate_obj(instance)
self.updated_models.append(instance)
return True
def update_model(self, form, model):
return True
def delete_models(self, *pks):
for pk in pks:
self.deleted_models.append(self.all_models.get(int(pk)))
return True
def setup():
app = Flask(__name__)
app.config["WTF_CSRF_ENABLED"] = False
app.secret_key = "1"
admin = Admin(app)
return app, admin
def test_mockview():
app, admin = setup()
view = MockModelView(Model)
admin.add_view(view)
eq_(view.model, Model)
eq_(view.name, "Model")
eq_(view.url, "/admin/model")
eq_(view.endpoint, "model")
ok_(view.blueprint is not None)
client = app.test_client()
# Make model view requests
rv = client.get("/admin/model/")
eq_(rv.status_code, 200)
# Test model creation view
rv = client.get("/admin/model/add/")
eq_(rv.status_code, 200)
rv = client.post(
"/admin/model/add/", data=dict(col1="test1", col2="test2", col3="test3")
)
eq_(rv.status_code, 302)
eq_(len(view.created_models), 1)
model = view.created_models.pop()
eq_(model.id, 3)
eq_(model.col1, "test1")
eq_(model.col2, "test2")
eq_(model.col3, "test3")
# Try model edit view
rv = client.get("/admin/model/3/")
eq_(rv.status_code, 200)
ok_("test1" in rv.data.decode())
rv = client.post(
"/admin/model/3/", data=dict(col1="test!", col2="test@", col3="test#")
)
eq_(rv.status_code, 302)
eq_(len(view.updated_models), 1)
model = view.updated_models.pop()
eq_(model.col1, "test!")
eq_(model.col2, "test@")
eq_(model.col3, "test#")
rv = client.get("/admin/modelview/4/")
eq_(rv.status_code, 404)
# Attempt to delete model
rv = client.post("/admin/model/3/delete/", data=dict(confirm_delete=True))
eq_(rv.status_code, 302)
eq_(rv.headers["location"], "http://localhost/admin/model/")
def test_permissions():
app, admin = setup()
view = MockModelView(Model)
admin.add_view(view)
client = app.test_client()
view.can_create = False
rv = client.get("/admin/model/add/")
eq_(rv.status_code, 403)
view.can_edit = False
rv = client.get("/admin/model/1/")
# 200 resp, but readonly fields
eq_(rv.status_code, 200)
eq_(rv.data.decode().count('<div class="readonly-value">'), 3)
view.can_delete = False
rv = client.post("/admin/model/1/delete/")
eq_(rv.status_code, 403)
def test_permissions_and_add_delete_buttons():
app, admin = setup()
view = MockModelView(Model)
admin.add_view(view)
client = app.test_client()
resp = client.get("/admin/model/")
eq_(resp.status_code, 200)
ok_("Add Model" in resp.data.decode())
view.can_create = False
resp = client.get("/admin/model/")
eq_(resp.status_code, 200)
ok_("Add Model" not in resp.data.decode())
view.can_edit = False
view.can_delete = False
resp = client.get("/admin/model/1/")
eq_(resp.status_code, 200)
ok_("Submit" not in resp.data.decode())
ok_("Save and stay on page" not in resp.data.decode())
ok_("Delete" not in resp.data.decode())
view.can_edit = False
view.can_delete = True
resp = client.get("/admin/model/1/")
eq_(resp.status_code, 200)
ok_("Submit" not in resp.data.decode())
ok_("Save and stay on page" not in resp.data.decode())
ok_("Delete" in resp.data.decode())
view.can_edit = True
view.can_delete = False
resp = client.get("/admin/model/1/")
eq_(resp.status_code, 200)
ok_("Submit" in resp.data.decode())
ok_("Save and stay on page" in resp.data.decode())
ok_("Delete" not in resp.data.decode())
def test_templates():
app, admin = setup()
view = MockModelView(Model)
admin.add_view(view)
client = app.test_client()
view.list_template = "mock.html"
view.add_template = "mock.html"
view.edit_template = "mock.html"
rv = client.get("/admin/model/")
eq_(rv.data.decode(), "Success!")
rv = client.get("/admin/model/add/")
eq_(rv.data.decode(), "Success!")
rv = client.get("/admin/model/1/")
eq_(rv.data.decode(), "Success!")
def test_list_display_header():
app, admin = setup()
view = MockModelView(Model, list_display=["test_header"])
admin.add_view(view)
eq_(len(view.list_display), 1)
client = app.test_client()
rv = client.get("/admin/model/")
ok_("Test Header" in rv.data.decode())
def test_search_fields():
app, admin = setup()
view = MockModelView(Model, search_fields=["col1", "col2"])
admin.add_view(view)
eq_(view.search_fields, ["col1", "col2"])
client = app.test_client()
rv = client.get("/admin/model/")
ok_('<div class="search">' in rv.data.decode())
|
the-stack_0_3015 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Simple, extendable nlp engine that can extract data based on provided conditions.
"""
__version__ = "0.0.7"
import os
import os.path
import sys
import re
import collections
from .model import *
from .normalizer import *
# -------------
# Stemmer
# -------------
class SuffixStemmer:
"""
Base class for stemming words
Return tuple of stemmed outputs
"""
def __init__(self, language, path=None):
self.language = language
self.stemwords = ()
if path is None:
subpath = os.path.dirname(__file__)
path = subpath+"/data/{}/suffix.txt".format(language)
with open(path) as f:
# read file strip \n sort by length and save as tuple
w = [w.strip() for w in f.readlines()]
w.sort(key=len)
self.stemwords = tuple(w)
def stem(self, word):
stem_list = []
for s in self.stemwords:
if word.endswith(s):
stem_list.append(word[:-len(s)])
return tuple(stem_list)
# -------------
# Tokenzier
# -------------
class LanguageTokenizer:
"""Tokenize string of data to array of tokens"""
def __init__(self, charset):
self.charset = charset
self.charset_counter = collections.Counter()
self.char_counter = collections.Counter()
self.tokens = []
def tokenize(self, text):
partial = ""
for i, character in enumerate(text):
# pick
if character.lower() in self.charset:
partial += character
else:
if len(partial) > 0:
self.append(partial, i)
partial = ""
self.append(character, i, False)
if len(partial) > 0:
self.append(partial, i)
return self.tokens
def append(self, data, index, charset=True):
if charset:
self.charset_counter[data.lower()] += 1
else:
self.char_counter[data.lower()] += 1
self.tokens.append(Token(token=data, end=index-1))
def most_common(self, n, charset=True):
if charset:
return self.charset_counter.most_common(n)
return self.char_counter.most_common(n)
# -------------
# Lexer / Visitor
# -------------
class Visitor:
"""
Utility class for Lexer that use Condition class to check wheather
we add Lemma to Lexer output or process list of tokens further
"""
def __init__(self, conditions, empty=True, auto_space=False, num_words=10):
self.conditions = conditions
# empty lexer token list
self.empty = empty
self.auto_space = auto_space
self.lemma = None
self.prev = None
self.num_words = num_words
def __contains__(self, item):
# get items of size num_words
token_list, next_token = item
item_copy = token_list[-self.num_words:]
while len(item_copy) > 0:
# make sentence from list of item
if self.auto_space:
data = Lemma.filter_space(item_copy)
sentence = Lemma.build_sentence(data, separator=" ")
else:
sentence = Lemma.build_sentence(item_copy)
# check sentence against conditions
for condition in self.conditions:
if (sentence, next_token) in condition:
self.lemma = Lemma(type=condition.lemma_type,
data=item_copy[:],
condition=condition.found,
sentence=sentence,
prev=self.prev)
if self.prev is not None:
self.prev.next = self.lemma
self.prev = self.lemma
return True
item_copy.pop(0)
return False
class Lexer:
"""
Converts list of tokens based on conditions in LexVisitor
"""
def __init__(self, tokens, visitor):
self.tokens = tokens
self.visitor = visitor
def lex(self, progress=False):
lemma_list = []
token_list = []
last_index = len(self.tokens) - 1
for i, token in enumerate(self.tokens):
token_list.append(token)
if i != last_index:
next_token = self.tokens[i+1]
else:
next_token = None
if (token_list, next_token) in self.visitor:
lemma_list.append(self.visitor.lemma)
if self.visitor.empty:
token_list = []
if progress:
sys.stdout.write("\r{}%".format(int(i/len(self.tokens)*100)))
return lemma_list
# -------------
# Prosecco
# -------------
class Prosecco:
"""Let's drink"""
def __init__(self, charset=Charset.EN, conditions=None, num_words=10):
conditions = conditions or [Condition(compare=r".*")]
# custom
self.lemmas = None
self.tokenizer = LanguageTokenizer(charset)
self.visitor = Visitor(conditions=conditions, num_words=num_words)
def drink(self, text, progress=False):
self.tokenizer.tokenize(text)
self.lexer = Lexer(tokens=self.tokenizer.tokens, visitor=self.visitor)
self.lemmas = self.lexer.lex(progress=progress)
return self.lemmas[:]
def get_lemmas(self, type):
return [l for l in self.lemmas if re.match(type, l.type)]
|
the-stack_0_3017 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Evaluation for CIFAR-10/100.
Accuracy:
cifar_train.py achieves 83.0% accuracy after 100K steps (256 epochs
of data) as judged by cifar_eval.py.
Speed:
On a single Tesla K40, cifar_train.py processes a single batch of 128 images
in 0.25-0.35 sec (i.e. 350 - 600 images /sec). The model reaches ~86%
accuracy after 100K steps in 8 hours of training time.
Usage:
Please see the tutorial and website for how to download the CIFAR-10/100
data set, compile the program and train the model.
http://tensorflow.org/tutorials/deep_cnn/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import math
import time
import numpy as np
import tensorflow as tf
import os
import sys
dir_path = os.path.dirname(os.path.realpath(__file__))
dir_name = dir_path + "/inception"
print(dir_name)
sys.path.append(dir_name)
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('eval_dir', '/tmp/cifar_eval',
"""Directory where to write event logs.""")
tf.app.flags.DEFINE_string('eval_data', 'test',
"""Either 'test' or 'train_eval'.""")
tf.app.flags.DEFINE_string('checkpoint_dir', '/tmp/cifar_train',
"""Directory where to read model checkpoints.""")
tf.app.flags.DEFINE_integer('eval_interval_secs', 60 * 5,
"""How often to run the eval.""")
tf.app.flags.DEFINE_integer('num_examples', 10000,
"""Number of examples to run.""")
tf.app.flags.DEFINE_boolean('run_once', False,
"""Whether to run eval only once.""")
import cifar_common
import cifar_resnet_tf as cifar
def eval_once(saver, summary_writer, top_k_op, summary_op):
"""Run Eval once.
Args:
saver: Saver.
summary_writer: Summary writer.
top_k_op: Top K op.
summary_op: Summary op.
"""
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
saver.restore(sess, ckpt.model_checkpoint_path)
# Assuming model_checkpoint_path looks something like:
# /my-favorite-path/cifar_train/model.ckpt-0,
# extract global_step from it.
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
print('Loaded checkpoint: ', ckpt.model_checkpoint_path)
else:
print('No checkpoint file found')
return
# Start the queue runners.
coord = tf.train.Coordinator()
try:
threads = []
for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
threads.extend(qr.create_threads(sess, coord=coord, daemon=True,
start=True))
num_iter = int(math.ceil(FLAGS.num_examples / FLAGS.batch_size))
true_count = 0 # Counts the number of correct predictions.
total_sample_count = num_iter * FLAGS.batch_size
step = 0
while step < num_iter and not coord.should_stop():
predictions = sess.run([top_k_op])
true_count += np.sum(predictions)
step += 1
# Compute precision @ 1.
precision = true_count / total_sample_count
print('Precision for ', FLAGS.eval_data)
print('%s: precision @ 1 = %.3f' % (datetime.now(), precision))
summary = tf.Summary()
summary.ParseFromString(sess.run(summary_op))
summary.value.add(tag='Precision @ 1', simple_value=precision)
summary_writer.add_summary(summary, global_step)
except Exception as e: # pylint: disable=broad-except
coord.request_stop(e)
coord.request_stop()
coord.join(threads, stop_grace_period_secs=10)
def evaluate():
"""Eval CIFAR-10/100 for a number of steps."""
with tf.Graph().as_default() as g:
# Get images and labels for CIFAR-10.
eval_data = FLAGS.eval_data == 'test'
images, labels = cifar_common.inputs(eval_data=eval_data)
# Build a Graph that computes the logits predictions from the
# inference model.
num_classes = 10
if FLAGS.dataset == 'cifar100':
num_classes = 100
logits = cifar.inference(images, num_classes=num_classes, for_training=False)
# Calculate predictions.
top_k_op = tf.nn.in_top_k(logits, labels, 1)
# Restore the moving average version of the learned variables for eval.
variable_averages = tf.train.ExponentialMovingAverage(
cifar.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, g)
while True:
eval_once(saver, summary_writer, top_k_op, summary_op)
if FLAGS.run_once:
break
time.sleep(FLAGS.eval_interval_secs)
def main(argv=None): # pylint: disable=unused-argument
cifar_common.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.eval_dir):
tf.gfile.DeleteRecursively(FLAGS.eval_dir)
tf.gfile.MakeDirs(FLAGS.eval_dir)
evaluate()
if __name__ == '__main__':
tf.app.run()
|
the-stack_0_3019 | """
Sets the config parameters for the flask app object.
These are accessible in a dictionary, with each line defining a key.
"""
import os
from tempfile import TemporaryDirectory
import torch
_TEMP_FOLDER_OBJECT = TemporaryDirectory()
DEFAULT_USER_ID = 1
ROOT_FOLDER = os.path.dirname(os.path.realpath(__file__))
DATA_FOLDER = os.path.join(ROOT_FOLDER, 'app/web_data')
CHECKPOINT_FOLDER = os.path.join(ROOT_FOLDER, 'app/web_checkpoints')
TEMP_FOLDER = os.path.join(ROOT_FOLDER, _TEMP_FOLDER_OBJECT.name)
SMILES_FILENAME = 'smiles.csv'
PREDICTIONS_FILENAME = 'predictions.csv'
DB_FILENAME = 'chemprop.sqlite3'
CUDA = torch.cuda.is_available()
GPUS = list(range(torch.cuda.device_count()))
|
the-stack_0_3020 | from __future__ import absolute_import
from django.core.urlresolvers import reverse
from exam import fixture
from sentry.models import Organization
from sentry.testutils import APITestCase
class OrganizationsListTest(APITestCase):
@fixture
def path(self):
return reverse('sentry-api-0-organizations')
def test_simple(self):
org = self.create_organization(owner=self.user)
self.login_as(user=self.user)
response = self.client.get(self.path)
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]['id'] == str(org.id)
class OrganizationsCreateTest(APITestCase):
@fixture
def path(self):
return reverse('sentry-api-0-organizations')
def test_missing_params(self):
self.login_as(user=self.user)
resp = self.client.post(self.path)
assert resp.status_code == 400
def test_valid_params(self):
self.login_as(user=self.user)
resp = self.client.post(self.path, data={
'name': 'hello world',
'slug': 'foobar',
})
assert resp.status_code == 201, resp.content
org = Organization.objects.get(id=resp.data['id'])
assert org.name == 'hello world'
assert org.slug == 'foobar'
def test_without_slug(self):
self.login_as(user=self.user)
resp = self.client.post(self.path, data={
'name': 'hello world',
})
assert resp.status_code == 201, resp.content
org = Organization.objects.get(id=resp.data['id'])
assert org.slug == 'hello-world'
|
the-stack_0_3023 | from flask import Flask, render_template, g
from flask_socketio import SocketIO, emit
from flask_cors import CORS
from models import Database
import settings
app = Flask(__name__, template_folder="./templates",
static_folder="./assets/", static_url_path="")
with app.app_context():
DB = Database()
app.config['SECRET_KEY'] = getattr(settings, 'SECRET_KEY', 'mySecretKey')
cors = CORS(app, resources={r"/*": {"origins": "*"}})
socketio = SocketIO(app, logger=True, engineio_logger=True)
@socketio.on('connect')
def on_connect():
res = DB.query_db("""
SELECT *
FROM comments
ORDER BY id DESC
LIMIT 20;
""")
emit("load_comments", res, broadcast=True)
@socketio.on('add comment event')
def on_add_comment(data=None):
comment_id = DB.insert_query("""INSERT INTO comments(
parent_id,
content,
nbr_vote,
added,
author_lastname,
author_firstname)
VALUES(?,?,?,?,?,?)
""", (
data.get('parent_id', None),
data['content'],
data['nbr_vote'],
data['date_added'],
'DOE',
'John',
))
data['id'] = comment_id
print(">>>> Add", data)
emit('add_handler_comment', data, broadcast=True)
@socketio.on('delete comment event')
def on_delete_comment(data=None):
print(">>>> Delete", data)
result = DB.delete_query(
"DELETE FROM comments WHERE id = ?",
(data['comment_id'],)
)
data['message'] = result
emit('delete_handler_comment', data, broadcast=True)
@socketio.on('vote comment event')
def on_vote_comment(data=None):
print("DEBUG UPDATE", "data", data)
DB.update_query(
"UPDATE comments SET nbr_vote = ? WHERE id = ?", (
data['nbr_vote'],
data['comment_id'],
)
)
print(">>>> Vote", data)
emit('vote_handler_comment', data, broadcast=True)
@app.route("/")
def indexRoute():
# db = Database()
return render_template('index.html')
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
if __name__ == '__main__':
socketio.run(app, port=5000, host='127.0.0.1')
|
the-stack_0_3025 | """user
Revision ID: ba4c10cd1c2e
Revises:
Create Date: 2021-02-14 17:14:42.063643
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ba4c10cd1c2e'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.BigInteger(), nullable=False),
sa.Column('username', sa.String(length=50), nullable=False),
sa.Column('password', sa.String(length=32), nullable=False),
sa.Column('created', sa.DateTime(timezone=True), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('username')
)
op.create_table('session',
sa.Column('key', sa.String(length=32), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('expires', sa.DateTime(timezone=True), nullable=False),
sa.Column('created', sa.DateTime(timezone=True), nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('key')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('session')
op.drop_table('user')
# ### end Alembic commands ###
|
the-stack_0_3027 | #!/usr/bin/env python3.7
# Copyright: Ismael Narvaez Berenjeno
import asyncio
import json
from typing import Union, List
from nats.aio.client import Client as NatsClient
from async_nats_connection.data_model.async_nats_params import AsyncNatsParams
from async_nats_connection.util.time import get_time_isoformat
class AsyncNatsConnection:
def __init__(self, params: AsyncNatsParams, logger):
self.name = self.__class__.__name__
self._params = params
self.nats_client = None
self.input_messages_queue = asyncio.Queue()
self.subscribed_subjects = {}
self.logger = logger
self.logger.debug(f"Instantiated class {self.name}")
def __repr__(self):
net_connection_info = None
if self.nats_client:
net_connection_info = self.nats_client.connected_url.netloc
if net_connection_info:
return f"<AsyncNatsConnection with {net_connection_info}>"
else:
return "<AsyncNatsConnection not connected>"
async def _disconnected_cb(self):
self.logger.warning("Disconnected from NATS server.")
async def _reconnected_cb(self):
self.logger.info(f"Got reconnected to {self.nats_client.connected_url.netloc}")
async def _error_cb(self, error):
self.logger.error(f"Error with NATS connection: {error}")
async def _closed_cb(self):
self.logger.info("Connection closed with NATS server.")
async def _connect(self):
result = False
if self.nats_client is None:
self.nats_client = NatsClient()
try:
await self.nats_client.connect(
**self._params.dict(),
io_loop=asyncio.get_running_loop(),
disconnected_cb=self._disconnected_cb,
reconnected_cb=self._reconnected_cb,
error_cb=self._error_cb,
closed_cb=self._closed_cb
)
result = self.nats_client.is_connected
except Exception as exception:
self.logger.exception(exception)
self.nats_client = None
return result
async def _disconnect(self):
await self.nats_client.drain()
await self.nats_client.close()
self.nats_client = None
async def _message_handler(self, message):
"""Execute this function when we received a message.
Put in a queue.
"""
output_message = {
"subject": message.subject,
"payload": message.data.decode(),
"timestamp": get_time_isoformat()
}
await self.input_messages_queue.put(output_message)
async def publish(self, subject: str, data: Union[str, dict, list]) -> List[bool]:
"""Publish messages in a subject.
:param subject: Subject where publish
:param data: Data to publish
:return: Operation result
"""
if self.nats_client is None:
await self._connect()
result = [False]
if isinstance(data, str):
try:
await self.nats_client.publish(subject, data.encode())
result = [True]
except Exception as exception:
self.logger.exception(exception)
result = [False]
elif isinstance(data, dict):
data = json.dumps(data)
result = await self.publish(subject, data)
elif isinstance(data, list):
tasks = [self.publish(subject, element) for element in data]
result = await asyncio.gather(
*tasks
)
else:
self.logger.error(f"Data sent must be str, dict or list, not {type(data)}")
result = [False]
return result
async def subscribe(self, subject: str) -> bool:
"""Subscribe to a subject.
:param subject: Subject to subscribe
:return: Operation result
"""
result = False
if self.nats_client is None:
await self._connect()
try:
if subject in self.subscribed_subjects:
raise ValueError(f"Duplicated subject: {subject}. Unsubscribe first.")
subscribe_id = await self.nats_client.subscribe(subject, cb=self._message_handler)
self.subscribed_subjects[subject] = {
"id": subscribe_id,
"timestamp": get_time_isoformat()
}
result = True
except Exception as exception:
self.logger.exception(exception)
result = False
return result
async def unsubscribe(self, subject: str) -> bool:
"""Unsubscribe from a subject.
:param subject: Subject to unsubscribe
:return: Operation result
"""
result = False
if subject in self.subscribed_subjects:
subscribe_id = self.subscribed_subjects[subject]['id']
try:
await self.nats_client.unsubscribe(subscribe_id)
del self.subscribed_subjects[subject]
result = True
except Exception as exception:
self.logger.exception(exception)
result = False
else:
self.logger.error(f"Subject {subject} doesn't exist.")
result = False
return result
def get_subscribed_subjects(self) -> dict:
"""
Return dict with info about subscribed subjects.
:return: Subscribed subjects.
"""
return self.subscribed_subjects
def exist_queued_messages(self) -> bool:
"""Check if exists elements to read.
:return: Operation result.
"""
is_emptied_queue = self.input_messages_queue.empty()
exist_element = not is_emptied_queue
return exist_element
async def get_message(self) -> dict:
"""Return a received message.
:return: Message with subject, payload and received timestamp.
"""
if self.exist_queued_messages():
return await self.input_messages_queue.get()
else:
self.logger.info("Not messages queued.")
async def connect(self):
"""Connect to NATS server."""
await self._connect()
async def disconnect(self):
"""Disconnect from NATS server."""
await self._disconnect()
|
the-stack_0_3029 | import copy
import json
import furl
from waterbutler.core import streams
from waterbutler.core import provider
from waterbutler.core import exceptions
from waterbutler.providers.github import settings
from waterbutler.providers.github.path import GitHubPath
from waterbutler.providers.github.metadata import GitHubRevision
from waterbutler.providers.github.metadata import GitHubFileContentMetadata
from waterbutler.providers.github.metadata import GitHubFolderContentMetadata
from waterbutler.providers.github.metadata import GitHubFileTreeMetadata
from waterbutler.providers.github.metadata import GitHubFolderTreeMetadata
from waterbutler.providers.github.exceptions import GitHubUnsupportedRepoError
GIT_EMPTY_SHA = '4b825dc642cb6eb9a060e54bf8d69288fbee4904'
class GitHubProvider(provider.BaseProvider):
"""Provider for GitHub repositories.
**On paths:** WB and GH use slightly different default conventions for their paths, so we
often have to munge our WB paths before comparison. Here is a quick overview::
WB (dirs): wb_dir.path == 'foo/bar/' str(wb_dir) == '/foo/bar/'
WB (file): wb_file.path = 'foo/bar.txt' str(wb_file) == '/foo/bar.txt'
GH (dir): 'foo/bar'
GH (file): 'foo/bar.txt'
API docs: https://developer.github.com/v3/
Quirks:
* git doesn't have a concept of empty folders, so this provider creates 0-byte ``.gitkeep``
files in the requested folder.
* The ``contents`` endpoint cannot be used to fetch metadata reliably for all files. Requesting
a file that is larger than 1Mb will result in a error response directing you to the ``blob``
endpoint. A recursive tree fetch may be used instead.
* The tree endpoint truncates results after a large number of files. It does not provide a way
to page through the tree. Since move, copy, and folder delete operations rely on whole-tree
replacement, they cannot be reliably supported for large repos. Attempting to use them will
throw a 501 Not Implemented error.
"""
NAME = 'github'
BASE_URL = settings.BASE_URL
VIEW_URL = settings.VIEW_URL
@staticmethod
def is_sha(ref):
# sha1 is always 40 characters in length
try:
if len(ref) != 40:
return False
# sha1 is always base 16 (hex)
int(ref, 16)
except (TypeError, ValueError, ):
return False
return True
def __init__(self, auth, credentials, settings):
super().__init__(auth, credentials, settings)
self.name = self.auth.get('name', None)
self.email = self.auth.get('email', None)
self.token = self.credentials['token']
self.owner = self.settings['owner']
self.repo = self.settings['repo']
self.metrics.add('repo', {'repo': self.repo, 'owner': self.owner})
async def validate_v1_path(self, path, **kwargs):
if not getattr(self, '_repo', None):
self._repo = await self._fetch_repo()
self.default_branch = self._repo['default_branch']
branch_ref, ref_from = None, None
if kwargs.get('ref'):
branch_ref = kwargs.get('ref')
ref_from = 'query_ref'
elif kwargs.get('branch'):
branch_ref = kwargs.get('branch')
ref_from = 'query_branch'
else:
branch_ref = self.default_branch
ref_from = 'default_branch'
if isinstance(branch_ref, list):
raise exceptions.InvalidParameters('Only one ref or branch may be given.')
self.metrics.add('branch_ref_from', ref_from)
if path == '/':
return GitHubPath(path, _ids=[(branch_ref, '')])
branch_data = await self._fetch_branch(branch_ref)
# throws Not Found if path not in tree
await self._search_tree_for_path(path, branch_data['commit']['commit']['tree']['sha'])
path = GitHubPath(path)
for part in path.parts:
part._id = (branch_ref, None)
# TODO Validate that filesha is a valid sha
path.parts[-1]._id = (branch_ref, kwargs.get('fileSha'))
self.metrics.add('file_sha_given', True if kwargs.get('fileSha') else False)
return path
async def validate_path(self, path, **kwargs):
if not getattr(self, '_repo', None):
self._repo = await self._fetch_repo()
self.default_branch = self._repo['default_branch']
path = GitHubPath(path)
branch_ref, ref_from = None, None
if kwargs.get('ref'):
branch_ref = kwargs.get('ref')
ref_from = 'query_ref'
elif kwargs.get('branch'):
branch_ref = kwargs.get('branch')
ref_from = 'query_branch'
else:
branch_ref = self.default_branch
ref_from = 'default_branch'
if isinstance(branch_ref, list):
raise exceptions.InvalidParameters('Only one ref or branch may be given.')
self.metrics.add('branch_ref_from', ref_from)
for part in path.parts:
part._id = (branch_ref, None)
# TODO Validate that filesha is a valid sha
path.parts[-1]._id = (branch_ref, kwargs.get('fileSha'))
self.metrics.add('file_sha_given', True if kwargs.get('fileSha') else False)
return path
async def revalidate_path(self, base, path, folder=False):
return base.child(path, _id=((base.branch_ref, None)), folder=folder)
def path_from_metadata(self, parent_path, metadata):
"""Build a path from a parent path and a metadata object. Will correctly set the _id
Used for building zip archives."""
file_sha = metadata.extra.get('fileSha', None)
return parent_path.child(metadata.name, _id=(metadata.ref, file_sha), folder=metadata.is_folder, )
def can_duplicate_names(self):
return False
@property
def default_headers(self):
return {'Authorization': 'token {}'.format(self.token)}
@property
def committer(self):
return {
'name': self.name,
'email': self.email,
}
def build_repo_url(self, *segments, **query):
segments = ('repos', self.owner, self.repo) + segments
return self.build_url(*segments, **query)
def can_intra_move(self, other, path=None):
return self.can_intra_copy(other, path=path)
def can_intra_copy(self, other, path=None):
return (
type(self) == type(other) and
self.repo == other.repo and
self.owner == other.owner
)
# do these need async?
async def intra_copy(self, dest_provider, src_path, dest_path):
return (await self._do_intra_move_or_copy(src_path, dest_path, True))
async def intra_move(self, dest_provider, src_path, dest_path):
return (await self._do_intra_move_or_copy(src_path, dest_path, False))
async def download(self, path, revision=None, **kwargs):
'''Get the stream to the specified file on github
:param str path: The path to the file on github
:param str ref: The git 'ref' a branch or commit sha at which to get the file from
:param str fileSha: The sha of file to be downloaded if specifed path will be ignored
:param dict kwargs: Ignored
'''
data = await self.metadata(path, revision=revision)
file_sha = path.file_sha or data.extra['fileSha']
resp = await self.make_request(
'GET',
self.build_repo_url('git', 'blobs', file_sha),
headers={'Accept': 'application/vnd.github.v3.raw'},
expects=(200, ),
throws=exceptions.DownloadError,
)
return streams.ResponseStreamReader(resp, size=data.size)
async def upload(self, stream, path, message=None, branch=None, **kwargs):
assert self.name is not None
assert self.email is not None
try:
exists = await self.exists(path)
except exceptions.ProviderError as e:
if e.data.get('message') == 'Git Repository is empty.':
self.metrics.add('upload.initialized_empty_repo', True)
exists = False
resp = await self.make_request(
'PUT',
self.build_repo_url('contents', '.gitkeep'),
data=json.dumps({
'content': '',
'path': '.gitkeep',
'committer': self.committer,
'branch': path.branch_ref,
'message': 'Initial commit'
}),
expects=(201,),
throws=exceptions.CreateFolderError
)
data = await resp.json()
latest_sha = data['commit']['sha']
else:
latest_sha = await self._get_latest_sha(ref=path.branch_ref)
blob = await self._create_blob(stream)
tree = await self._create_tree({
'base_tree': latest_sha,
'tree': [{
'path': path.path,
'mode': '100644',
'type': 'blob',
'sha': blob['sha']
}]
})
if exists and await self._is_blob_in_tree(blob, path): # Avoids empty commits
return GitHubFileTreeMetadata({
'path': path.path,
'sha': blob['sha'],
'size': stream.size,
}, ref=path.branch_ref), not exists
commit = await self._create_commit({
'tree': tree['sha'],
'parents': [latest_sha],
'committer': self.committer,
'message': message or (settings.UPDATE_FILE_MESSAGE if exists else settings.UPLOAD_FILE_MESSAGE),
})
# Doesn't return anything useful
await self._update_ref(commit['sha'], ref=path.branch_ref)
# You're hacky
return GitHubFileTreeMetadata({
'path': path.path,
'sha': blob['sha'],
'size': stream.size,
}, commit=commit, ref=path.branch_ref), not exists
async def delete(self, path, sha=None, message=None, branch=None,
confirm_delete=0, **kwargs):
"""Delete file, folder, or provider root contents
:param GitHubPath path: GitHubPath path object for file, folder, or root
:param str sha: SHA-1 checksum of file/folder object
:param str message: Commit message
:param str branch: Repository branch
:param int confirm_delete: Must be 1 to confirm root folder delete
"""
assert self.name is not None
assert self.email is not None
if path.is_root:
if confirm_delete == 1:
await self._delete_root_folder_contents(path)
else:
raise exceptions.DeleteError(
'confirm_delete=1 is required for deleting root provider folder',
code=400,
)
elif path.is_dir:
await self._delete_folder(path, message, **kwargs)
else:
await self._delete_file(path, message, **kwargs)
async def metadata(self, path, **kwargs):
"""Get Metadata about the requested file or folder
:param str path: The path to a file or folder
:rtype dict: if file, metadata object describing the file
:rtype list: if folder, array of metadata objects describing contents
"""
if path.is_dir:
return (await self._metadata_folder(path, **kwargs))
else:
return (await self._metadata_file(path, **kwargs))
async def revisions(self, path, sha=None, **kwargs):
resp = await self.make_request(
'GET',
self.build_repo_url('commits', path=path.path, sha=sha or path.file_sha),
expects=(200, ),
throws=exceptions.RevisionsError
)
return [
GitHubRevision(item)
for item in (await resp.json())
]
async def create_folder(self, path, branch=None, message=None, **kwargs):
GitHubPath.validate_folder(path)
assert self.name is not None
assert self.email is not None
message = message or settings.UPLOAD_FILE_MESSAGE
keep_path = path.child('.gitkeep')
data = {
'content': '',
'path': keep_path.path,
'committer': self.committer,
'branch': path.branch_ref,
'message': message or settings.UPLOAD_FILE_MESSAGE
}
resp = await self.make_request(
'PUT',
self.build_repo_url('contents', keep_path.path),
data=json.dumps(data),
expects=(201, 422, 409),
throws=exceptions.CreateFolderError
)
data = await resp.json()
if resp.status in (422, 409):
if resp.status == 409 or data.get('message') == 'Invalid request.\n\n"sha" wasn\'t supplied.':
raise exceptions.FolderNamingConflict(str(path))
raise exceptions.CreateFolderError(data, code=resp.status)
data['content']['name'] = path.name
data['content']['path'] = data['content']['path'].replace('.gitkeep', '')
return GitHubFolderContentMetadata(data['content'], commit=data['commit'], ref=path.branch_ref)
async def _delete_file(self, path, message=None, **kwargs):
if path.file_sha:
sha = path.file_sha
else:
sha = (await self.metadata(path)).extra['fileSha']
if not sha:
raise exceptions.MetadataError('A sha is required for deleting')
data = {
'sha': sha,
'branch': path.branch_ref,
'committer': self.committer,
'message': message or settings.DELETE_FILE_MESSAGE,
}
resp = await self.make_request(
'DELETE',
self.build_repo_url('contents', path.path),
headers={'Content-Type': 'application/json'},
data=json.dumps(data),
expects=(200, ),
throws=exceptions.DeleteError,
)
await resp.release()
async def _delete_folder(self, path, message=None, **kwargs):
branch_data = await self._fetch_branch(path.branch_ref)
old_commit_sha = branch_data['commit']['sha']
old_commit_tree_sha = branch_data['commit']['commit']['tree']['sha']
# e.g. 'level1', 'level2', or ''
tree_paths = path.parts[1:]
trees = [{
'target': tree_paths[0].value,
'tree': [
{
'path': item['path'],
'mode': item['mode'],
'type': item['type'],
'sha': item['sha'],
}
for item in (await self._fetch_tree(old_commit_tree_sha))['tree']
]
}]
for idx, tree_path in enumerate(tree_paths[:-1]):
try:
tree_sha = next(x for x in trees[-1]['tree'] if x['path'] == tree_path.value)['sha']
except StopIteration:
raise exceptions.MetadataError(
'Could not delete folder \'{0}\''.format(path),
code=404,
)
trees.append({
'target': tree_paths[idx + 1].value,
'tree': [
{
'path': item['path'],
'mode': item['mode'],
'type': item['type'],
'sha': item['sha'],
}
for item in (await self._fetch_tree(tree_sha))['tree']
]
})
# The last tree's structure is rewritten w/o the target folder, all others
# in the hierarchy are simply updated to reflect this change.
tree = trees.pop()
if tree['target'] == '':
# Git Empty SHA
tree_sha = GIT_EMPTY_SHA
else:
# Delete the folder from the tree cast to list iterator over all values
current_tree = tree['tree']
tree['tree'] = list(filter(lambda x: x['path'] != tree['target'], tree['tree']))
if current_tree == tree['tree']:
raise exceptions.NotFoundError(str(path))
tree_data = await self._create_tree({'tree': tree['tree']})
tree_sha = tree_data['sha']
# Update parent tree(s)
for tree in reversed(trees):
for item in tree['tree']:
if item['path'] == tree['target']:
item['sha'] = tree_sha
break
tree_data = await self._create_tree({'tree': tree['tree']})
tree_sha = tree_data['sha']
# Create a new commit which references our top most tree change.
message = message or settings.DELETE_FOLDER_MESSAGE
commit_resp = await self.make_request(
'POST',
self.build_repo_url('git', 'commits'),
headers={'Content-Type': 'application/json'},
data=json.dumps({
'message': message,
'committer': self.committer,
'tree': tree_sha,
'parents': [
old_commit_sha,
],
}),
expects=(201, ),
throws=exceptions.DeleteError,
)
commit_data = await commit_resp.json()
commit_sha = commit_data['sha']
# Update repository reference, point to the newly created commit.
# No need to store data, rely on expects to raise exceptions
resp = await self.make_request(
'PATCH',
self.build_repo_url('git', 'refs', 'heads', path.branch_ref),
headers={'Content-Type': 'application/json'},
data=json.dumps({'sha': commit_sha}),
expects=(200, ),
throws=exceptions.DeleteError,
)
await resp.release()
async def _delete_root_folder_contents(self, path, message=None, **kwargs):
"""Delete the contents of the root folder.
:param GitHubPath path: GitHubPath path object for folder
:param str message: Commit message
"""
branch_data = await self._fetch_branch(path.branch_ref)
old_commit_sha = branch_data['commit']['sha']
tree_sha = GIT_EMPTY_SHA
message = message or settings.DELETE_FOLDER_MESSAGE
commit_resp = await self.make_request(
'POST',
self.build_repo_url('git', 'commits'),
headers={'Content-Type': 'application/json'},
data=json.dumps({
'message': message,
'committer': self.committer,
'tree': tree_sha,
'parents': [
old_commit_sha,
],
}),
expects=(201, ),
throws=exceptions.DeleteError,
)
commit_data = await commit_resp.json()
commit_sha = commit_data['sha']
# Update repository reference, point to the newly created commit.
# No need to store data, rely on expects to raise exceptions
await self.make_request(
'PATCH',
self.build_repo_url('git', 'refs', 'heads', path.branch_ref),
headers={'Content-Type': 'application/json'},
data=json.dumps({'sha': commit_sha}),
expects=(200, ),
throws=exceptions.DeleteError,
)
async def _fetch_branch(self, branch):
resp = await self.make_request(
'GET',
self.build_repo_url('branches', branch)
)
if resp.status == 404:
await resp.release()
raise exceptions.NotFoundError('. No such branch \'{}\''.format(branch))
return (await resp.json())
async def _fetch_contents(self, path, ref=None):
url = furl.furl(self.build_repo_url('contents', path.path))
if ref:
url.args.update({'ref': ref})
resp = await self.make_request(
'GET',
url.url,
expects=(200, ),
throws=exceptions.MetadataError
)
return (await resp.json())
async def _fetch_repo(self):
resp = await self.make_request(
'GET',
self.build_repo_url(),
expects=(200, ),
throws=exceptions.MetadataError
)
return (await resp.json())
async def _fetch_tree(self, sha, recursive=False):
url = furl.furl(self.build_repo_url('git', 'trees', sha))
if recursive:
url.args.update({'recursive': 1})
resp = await self.make_request(
'GET',
url.url,
expects=(200, ),
throws=exceptions.MetadataError
)
tree = await resp.json()
if tree['truncated']:
raise GitHubUnsupportedRepoError
return tree
async def _search_tree_for_path(self, path, tree_sha, recursive=True):
"""Search through the given tree for an entity matching the name and type of `path`.
"""
tree = await self._fetch_tree(tree_sha, recursive=True)
if tree['truncated']:
raise GitHubUnsupportedRepoError
implicit_type = 'tree' if path.endswith('/') else 'blob'
for entity in tree['tree']:
if entity['path'] == path.strip('/') and entity['type'] == implicit_type:
return entity
raise exceptions.NotFoundError(str(path))
async def _create_tree(self, tree):
resp = await self.make_request(
'POST',
self.build_repo_url('git', 'trees'),
headers={'Content-Type': 'application/json'},
data=json.dumps(tree),
expects=(201, ),
throws=exceptions.ProviderError,
)
return (await resp.json())
async def _create_commit(self, commit):
resp = await self.make_request(
'POST',
self.build_repo_url('git', 'commits'),
headers={'Content-Type': 'application/json'},
data=json.dumps(commit),
expects=(201, ),
throws=exceptions.ProviderError,
)
return (await resp.json())
async def _create_blob(self, stream):
blob_stream = streams.JSONStream({
'encoding': 'base64',
'content': streams.Base64EncodeStream(stream),
})
resp = await self.make_request(
'POST',
self.build_repo_url('git', 'blobs'),
data=blob_stream,
headers={
'Content-Type': 'application/json',
'Content-Length': str(blob_stream.size),
},
expects=(201, ),
throws=exceptions.UploadError,
)
return (await resp.json())
def _is_sha(self, ref):
# sha1 is always 40 characters in length
try:
if len(ref) != 40:
return False
# sha1 is always base 16 (hex)
int(ref, 16)
except (TypeError, ValueError, ):
return False
return True
def _web_view(self, path):
segments = (self.owner, self.repo, 'blob', path.branch_ref, path.path)
return provider.build_url(settings.VIEW_URL, *segments)
async def _metadata_folder(self, path, **kwargs):
ref = path.branch_ref
try:
# it's cool to use the contents API here because we know path is a dir and won't hit
# the 1mb size limit
data = await self._fetch_contents(path, ref=ref)
except exceptions.MetadataError as e:
if e.data.get('message') == 'This repository is empty.':
data = []
else:
raise
if isinstance(data, dict):
raise exceptions.MetadataError(
'Could not retrieve folder "{0}"'.format(str(path)),
code=404,
)
ret = []
for item in data:
if item['type'] == 'dir':
ret.append(GitHubFolderContentMetadata(item, ref=ref))
else:
ret.append(GitHubFileContentMetadata(item, ref=ref, web_view=item['html_url']))
return ret
async def _metadata_file(self, path, revision=None, **kwargs):
resp = await self.make_request(
'GET',
self.build_repo_url('commits', path=path.path, sha=revision or path.branch_ref),
expects=(200, ),
throws=exceptions.MetadataError,
)
commits = await resp.json()
if not commits:
raise exceptions.NotFoundError(str(path))
latest = commits[0]
tree = await self._fetch_tree(latest['commit']['tree']['sha'], recursive=True)
try:
data = next(
x for x in tree['tree']
if x['path'] == path.path
)
except StopIteration:
raise exceptions.NotFoundError(str(path))
if isinstance(data, list):
raise exceptions.MetadataError(
'Could not retrieve file "{0}"'.format(str(path)),
code=404,
)
return GitHubFileTreeMetadata(
data, commit=latest['commit'], web_view=self._web_view(path),
ref=path.branch_ref
)
async def _get_latest_sha(self, ref='master'):
resp = await self.make_request(
'GET',
self.build_repo_url('git', 'refs', 'heads', ref),
expects=(200, ),
throws=exceptions.ProviderError
)
data = await resp.json()
return data['object']['sha']
async def _update_ref(self, sha, ref='master'):
resp = await self.make_request(
'POST',
self.build_repo_url('git', 'refs', 'heads', ref),
data=json.dumps({
'sha': sha,
}),
expects=(200, ),
throws=exceptions.ProviderError
)
return (await resp.json())
async def _do_intra_move_or_copy(self, src_path, dest_path, is_copy):
# ON PATHS:
# WB and GH use slightly different default conventions for their paths, so we often
# have to munge our WB paths before comparison. Here is a quick overview:
# WB (dirs): wb_dir.path == 'foo/bar/' str(wb_dir) == '/foo/bar/'
# WB (file): wb_file.path = 'foo/bar.txt' str(wb_file) == '/foo/bar.txt'
# GH (dir): 'foo/bar'
# GH (file): 'foo/bar.txt'
src_tree, src_head = await self._get_tree_and_head(src_path.branch_ref)
# these are the blobs to copy/move
blobs = [
item
for item in src_tree['tree']
if src_path.is_dir and item['path'].startswith(src_path.path) or
src_path.is_file and item['path'] == src_path.path
]
if len(blobs) == 0:
raise exceptions.NotFoundError(str(src_path))
if src_path.is_file:
assert len(blobs) == 1, 'Found multiple targets'
commit_msg = settings.COPY_MESSAGE if is_copy else settings.MOVE_MESSAGE
commit = None
if src_path.branch_ref == dest_path.branch_ref:
exists = self._path_exists_in_tree(src_tree['tree'], dest_path)
# if we're overwriting an existing dir, we must remove its blobs from the tree
if dest_path.is_dir:
src_tree['tree'] = self._remove_path_from_tree(src_tree['tree'], dest_path)
# if this is a copy, duplicate and append our source blobs. The originals will be updated
# with the new destination path.
if is_copy:
src_tree['tree'].extend(copy.deepcopy(blobs))
# see, I told you they'd be overwritten
self._reparent_blobs(blobs, src_path, dest_path)
src_tree['tree'] = self._prune_subtrees(src_tree['tree'])
commit = await self._commit_tree_and_advance_branch(src_tree, {'sha': src_head},
commit_msg, src_path.branch_ref)
else:
dest_tree, dest_head = await self._get_tree_and_head(dest_path.branch_ref)
exists = self._path_exists_in_tree(dest_tree['tree'], dest_path)
dest_tree['tree'] = self._remove_path_from_tree(dest_tree['tree'], dest_path)
new_blobs = copy.deepcopy(blobs)
self._reparent_blobs(new_blobs, src_path, dest_path)
dest_tree['tree'].extend(new_blobs)
dest_tree['tree'] = self._prune_subtrees(dest_tree['tree'])
commit = await self._commit_tree_and_advance_branch(dest_tree, {'sha': dest_head},
commit_msg, dest_path.branch_ref)
if not is_copy:
src_tree['tree'] = self._remove_path_from_tree(src_tree['tree'], src_path)
src_tree['tree'] = self._prune_subtrees(src_tree['tree'])
await self._commit_tree_and_advance_branch(src_tree, {'sha': src_head},
commit_msg, src_path.branch_ref)
blobs = new_blobs # for the metadata
if dest_path.is_file:
assert len(blobs) == 1, 'Destination file should have exactly one candidate'
return GitHubFileTreeMetadata(
blobs[0], commit=commit, ref=dest_path.branch_ref
), not exists
folder = GitHubFolderTreeMetadata({
'path': dest_path.path.strip('/')
}, commit=commit, ref=dest_path.branch_ref)
folder.children = []
for item in blobs:
if item['path'] == dest_path.path.rstrip('/'):
continue
if item['type'] == 'tree':
folder.children.append(GitHubFolderTreeMetadata(item, ref=dest_path.branch_ref))
else:
folder.children.append(GitHubFileTreeMetadata(item, ref=dest_path.branch_ref))
return folder, not exists
async def _get_blobs_and_trees(self, branch_ref):
"""This method takes a branch ref (usually the branch name) to call the github api and
returns a flat list of a repo's blobs and trees (with no commits).
:param str branch_ref: The reference which leads to the branch, that the blobs and trees
are gathered from.
:returns dict response json: This is a JSON dict with the flattened list of blobs and trees
include in the dict.
"""
resp = await self.make_request(
'GET',
self.build_repo_url('git', 'trees') + '/{}:?recursive=99999'.format(branch_ref),
expects=(200,)
)
return await resp.json()
async def _is_blob_in_tree(self, new_blob, path):
"""This method checks to see if a branch's tree already contains a blob with the same sha
and at the path provided, basically checking if a new blob has identical path and has
identical content to a blob already in the tree. This ensures we don't overwrite a blob if
it serves no purpose.
:param dict new_blob: a dict with data and metadata of the newly created blob which is not
yet committed.
:param GitHubPath path: The path where the newly created blob is to be committed.
:returns: bool: True if new_blob is in the tree, False if no blob or a different blob
exists at the path given
"""
blob_tree = await self._get_blobs_and_trees(path.branch_ref)
return any(new_blob['sha'] == blob['sha'] and
path.path == blob['path'] for blob in blob_tree['tree'])
async def _get_tree_and_head(self, branch):
"""Fetch the head commit and tree for the given branch.
:param str branch: The branch to fetch
:returns dict: A GitHub tree object. Contents are under the ``tree`` key.
:returns dict: A GitHub commit object. The SHA is under the ``sha`` key.
"""
branch_data = await self._fetch_branch(branch)
head = branch_data['commit']['sha']
tree_sha = branch_data['commit']['commit']['tree']['sha']
tree = await self._fetch_tree(tree_sha, recursive=True)
return tree, head
def _path_exists_in_tree(self, tree, path):
"""Search through a tree and return true if the given path is found.
:param list tree: A list of blobs in a git tree.
:param GitHubPath path: The path to search for.
:returns bool: true if ``path`` is found in ``tree``
"""
return any(x['path'] == path.path.rstrip('/') for x in tree)
def _remove_path_from_tree(self, tree, path):
"""Search through a tree and remove any blobs or trees that match ``path`` or are a child of
``path``.
:param list tree: A list of blobs in a git tree.
:param GitHubPath path: The path to exclude.
:returns list: A new list containing the filtered tree contents.
"""
return [
item
for item in tree
if (path.is_file and not item['path'] == path.path) or # file != path
(path.is_dir and not
(item['path'].startswith(path.path) or # file/folder != child of path
(item['type'] == 'tree' and item['path'] == path.path.rstrip('/')))) # folder != path
]
def _reparent_blobs(self, blobs, src_path, dest_path):
"""Take a list of blobs and replace the source path with the dest path.
Two caveats:
* This method operates on the list of blobs in place. This is intentional. Anything you pass
as the ``blobs`` arg will be mutated back in the calling scope.
* This method assumes that the list of blobs all begin with ``src_path``, since its purpose
is to rewite all the blobs found at or under ``src_path`` to be at or under ``dest_path``.
If you pass it something that is not located under ``src_path``, a later part of the path
may be updated.
:param list blobs: A list of blobs whose paths should be updated.
:param GitHubPath src_path: The original path.
:param GitHubPath dest_path: The new path.
:returns None: This methods returns **nothing**. It operates on the blobs in-place.
"""
for blob in blobs:
if blob['path'] == src_path.path.rstrip('/') and blob['type'] == 'tree':
# Renaming the parent folder is not necessary. Tress are pruned before uploading
# to GH. This is only here because at somepoint someone will use it without pruning
# and wonder why on earth the parent folder isn't renamed.
blob['path'] = dest_path.path.rstrip('/')
else:
blob['path'] = blob['path'].replace(src_path.path, dest_path.path, 1)
return
def _prune_subtrees(self, tree):
"""Takes in a list representing a git tree and remove all the entries that are also trees.
Only blobs should remain. GitHub infers tree structure from blob paths. Deleting a blob
without removing its parent tree will result in the blob *NOT* being deleted. See:
http://www.levibotelho.com/development/commit-a-file-with-the-github-api/
:param list tree: A list representing a git tree. May contain trees, in addition to blobs.
:returns list: A new list containing just the blobs.
"""
return [item for item in tree if item['type'] != 'tree']
async def _commit_tree_and_advance_branch(self, old_tree, old_head, commit_msg, branch_ref):
"""Utilty method to bundle several commands into one. Takes a tree, head commit, a message,
and a branch, creates a new commit pointing to tree, then advances branch to point to the
new commit. Basically the same thing as ``git commit -am "foo message"`` on the command
line. Returns the new commit.
:param list old_tree: A list of blobs representing the new file tree.
:param dict old_head: The commit object will be the parent of the new commit. Must have 'sha' key.
:param str commit_msg: The commit message for the new commit.
:param str branch_ref: The branch that will be advanced to the new commit.
:returns dict new_head: The commit object returned by GitHub.
"""
new_tree = await self._create_tree({'tree': old_tree['tree']})
# Create a new commit which references our top most tree change.
if new_tree['sha'] == old_tree['sha']: # prevents empty commits
return None
else:
new_head = await self._create_commit({
'tree': new_tree['sha'],
'parents': [old_head['sha']],
'committer': self.committer,
'message': commit_msg,
})
# Update repository reference, point to the newly created commit.
# No need to store data, rely on expects to raise exceptions
await self._update_ref(new_head['sha'], ref=branch_ref)
return new_head
|
the-stack_0_3030 | # Copyright 2022 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from synapse.events import make_event_from_dict
from synapse.events.snapshot import EventContext
from synapse.federation.transport.client import StateRequestResponse
from synapse.logging.context import LoggingContext
from synapse.rest import admin
from synapse.rest.client import login, room
from tests import unittest
from tests.test_utils import event_injection, make_awaitable
class FederationEventHandlerTests(unittest.FederatingHomeserverTestCase):
servlets = [
admin.register_servlets,
login.register_servlets,
room.register_servlets,
]
def make_homeserver(self, reactor, clock):
# mock out the federation transport client
self.mock_federation_transport_client = mock.Mock(
spec=["get_room_state_ids", "get_room_state", "get_event"]
)
return super().setup_test_homeserver(
federation_transport_client=self.mock_federation_transport_client
)
def test_process_pulled_event_with_missing_state(self) -> None:
"""Ensure that we correctly handle pulled events with lots of missing state
In this test, we pretend we are processing a "pulled" event (eg, via backfill
or get_missing_events). The pulled event has a prev_event we haven't previously
seen, so the server requests the state at that prev_event. There is a lot
of state we don't have, so we expect the server to make a /state request.
We check that the pulled event is correctly persisted, and that the state is
as we expect.
"""
return self._test_process_pulled_event_with_missing_state(False)
def test_process_pulled_event_with_missing_state_where_prev_is_outlier(
self,
) -> None:
"""Ensure that we correctly handle pulled events with lots of missing state
A slight modification to test_process_pulled_event_with_missing_state. Again
we have a "pulled" event which refers to a prev_event with lots of state,
but in this case we already have the prev_event (as an outlier, obviously -
if it were a regular event, we wouldn't need to request the state).
"""
return self._test_process_pulled_event_with_missing_state(True)
def _test_process_pulled_event_with_missing_state(
self, prev_exists_as_outlier: bool
) -> None:
OTHER_USER = f"@user:{self.OTHER_SERVER_NAME}"
main_store = self.hs.get_datastores().main
state_storage_controller = self.hs.get_storage_controllers().state
# create the room
user_id = self.register_user("kermit", "test")
tok = self.login("kermit", "test")
room_id = self.helper.create_room_as(room_creator=user_id, tok=tok)
room_version = self.get_success(main_store.get_room_version(room_id))
# allow the remote user to send state events
self.helper.send_state(
room_id,
"m.room.power_levels",
{"events_default": 0, "state_default": 0},
tok=tok,
)
# add the remote user to the room
member_event = self.get_success(
event_injection.inject_member_event(self.hs, room_id, OTHER_USER, "join")
)
initial_state_map = self.get_success(
main_store.get_partial_current_state_ids(room_id)
)
auth_event_ids = [
initial_state_map[("m.room.create", "")],
initial_state_map[("m.room.power_levels", "")],
initial_state_map[("m.room.join_rules", "")],
member_event.event_id,
]
# mock up a load of state events which we are missing
state_events = [
make_event_from_dict(
self.add_hashes_and_signatures(
{
"type": "test_state_type",
"state_key": f"state_{i}",
"room_id": room_id,
"sender": OTHER_USER,
"prev_events": [member_event.event_id],
"auth_events": auth_event_ids,
"origin_server_ts": 1,
"depth": 10,
"content": {"body": f"state_{i}"},
}
),
room_version,
)
for i in range(1, 10)
]
# this is the state that we are going to claim is active at the prev_event.
state_at_prev_event = state_events + self.get_success(
main_store.get_events_as_list(initial_state_map.values())
)
# mock up a prev event.
# Depending on the test, we either persist this upfront (as an outlier),
# or let the server request it.
prev_event = make_event_from_dict(
self.add_hashes_and_signatures(
{
"type": "test_regular_type",
"room_id": room_id,
"sender": OTHER_USER,
"prev_events": [],
"auth_events": auth_event_ids,
"origin_server_ts": 1,
"depth": 11,
"content": {"body": "missing_prev"},
}
),
room_version,
)
if prev_exists_as_outlier:
prev_event.internal_metadata.outlier = True
persistence = self.hs.get_storage_controllers().persistence
self.get_success(
persistence.persist_event(
prev_event,
EventContext.for_outlier(self.hs.get_storage_controllers()),
)
)
else:
async def get_event(destination: str, event_id: str, timeout=None):
self.assertEqual(destination, self.OTHER_SERVER_NAME)
self.assertEqual(event_id, prev_event.event_id)
return {"pdus": [prev_event.get_pdu_json()]}
self.mock_federation_transport_client.get_event.side_effect = get_event
# mock up a regular event to pass into _process_pulled_event
pulled_event = make_event_from_dict(
self.add_hashes_and_signatures(
{
"type": "test_regular_type",
"room_id": room_id,
"sender": OTHER_USER,
"prev_events": [prev_event.event_id],
"auth_events": auth_event_ids,
"origin_server_ts": 1,
"depth": 12,
"content": {"body": "pulled"},
}
),
room_version,
)
# we expect an outbound request to /state_ids, so stub that out
self.mock_federation_transport_client.get_room_state_ids.return_value = (
make_awaitable(
{
"pdu_ids": [e.event_id for e in state_at_prev_event],
"auth_chain_ids": [],
}
)
)
# we also expect an outbound request to /state
self.mock_federation_transport_client.get_room_state.return_value = (
make_awaitable(
StateRequestResponse(auth_events=[], state=state_at_prev_event)
)
)
# we have to bump the clock a bit, to keep the retry logic in
# FederationClient.get_pdu happy
self.reactor.advance(60000)
# Finally, the call under test: send the pulled event into _process_pulled_event
with LoggingContext("test"):
self.get_success(
self.hs.get_federation_event_handler()._process_pulled_event(
self.OTHER_SERVER_NAME, pulled_event, backfilled=False
)
)
# check that the event is correctly persisted
persisted = self.get_success(main_store.get_event(pulled_event.event_id))
self.assertIsNotNone(persisted, "pulled event was not persisted at all")
self.assertFalse(
persisted.internal_metadata.is_outlier(), "pulled event was an outlier"
)
# check that the state at that event is as expected
state = self.get_success(
state_storage_controller.get_state_ids_for_event(pulled_event.event_id)
)
expected_state = {
(e.type, e.state_key): e.event_id for e in state_at_prev_event
}
self.assertEqual(state, expected_state)
if prev_exists_as_outlier:
self.mock_federation_transport_client.get_event.assert_not_called()
|
the-stack_0_3031 | import sys
import matplotlib.pyplot as plt
import numpy as np
# define a function reading the data from file
# File format: arbitrary number of pairs of lines
# 1st line: <key> = <float value> pairs, comma separated
# 2nd line: comma separated float values
# Here special keys: 'a','b','T'
def readdat(fname):
params = {} # dictionary for parameters
data = [] # list of arrays for data
# Open the data file
with open(fname) as f:
# Read next line of data file into a string variable
for line in f.readlines():
# Split string into comma-separated substrings and store them in a list
tmp = line.split(sep=",")
# Parameter line: contains '='
if "=" in line: # parameter line
tmp2 = [a.split(sep="=") for a in tmp]
params.update({k.strip() : v.strip() for k, v in tmp2})
else: # data line
try:
# Check whether data are numbers
float(tmp[0])
# Append list of numbers to list of data arrays
data += [[float(v) for v in tmp]]
except:
pass
return params, data
def plotfvsol(filename):
params, datas = readdat(filename)
print("Parameters: ",params)
# Extract relevant parameters
try:
a = float(params['a'])
b = float(params['b'])
T = float(params['T'])
except:
print("Missing parameters a, b, T!")
# Ensure that [a,b] is an interval
if a>b:
a, b = b, a
# Plot data
fig, ax = plt.subplots() # Create a figure containing a single axis
for i, data in enumerate(datas):
print("|data[", i, "]| = ",len(data))
# Number of cell values
N = len(data)
h = (b-a)/N
x = np.linspace(a+h/2,b-h/2,N)
plt.plot(x,data,label=str('N={:d}'.format(N)),linewidth=1)
plt.title(filename + ': solution at T = ' + str(T));
plt.xlabel('x')
plt.ylabel('u(x,t)')
plt.legend()
plt.show()
# Save figure
outfnname = filename.split(sep='.')
plt.savefig(outfnname[0] + ".eps")
print("Figure saved in" + outfnname[0] + ".eps")
plt.close()
if __name__ == "__main__":
filename = sys.argv[1]
print ("Reading data from ", filename)
plotfvsol(filename)
|
the-stack_0_3032 | """
*******
GraphML
*******
Read and write graphs in GraphML format.
This implementation does not support mixed graphs (directed and unidirected
edges together), hyperedges, nested graphs, or ports.
"GraphML is a comprehensive and easy-to-use file format for graphs. It
consists of a language core to describe the structural properties of a
graph and a flexible extension mechanism to add application-specific
data. Its main features include support of
* directed, undirected, and mixed graphs,
* hypergraphs,
* hierarchical graphs,
* graphical representations,
* references to external data,
* application-specific attribute data, and
* light-weight parsers.
Unlike many other file formats for graphs, GraphML does not use a
custom syntax. Instead, it is based on XML and hence ideally suited as
a common denominator for all kinds of services generating, archiving,
or processing graphs."
http://graphml.graphdrawing.org/
Format
------
GraphML is an XML format. See
http://graphml.graphdrawing.org/specification.html for the specification and
http://graphml.graphdrawing.org/primer/graphml-primer.html
for examples.
"""
__author__ = """\n""".join(['Salim Fadhley',
'Aric Hagberg ([email protected])'
])
__all__ = ['write_graphml', 'read_graphml', 'generate_graphml',
'GraphMLWriter', 'GraphMLReader']
import networkx as nx
from networkx.utils import _get_fh, make_str
import warnings
try:
from xml.etree.cElementTree import Element, ElementTree, tostring
except ImportError:
try:
from xml.etree.ElementTree import Element, ElementTree, tostring
except ImportError:
pass
def write_graphml(G, path, encoding='utf-8',prettyprint=True):
"""Write G in GraphML XML format to path
Parameters
----------
G : graph
A networkx graph
path : file or string
File or filename to write.
Filenames ending in .gz or .bz2 will be compressed.
encoding : string (optional)
Encoding for text data.
prettyprint : bool (optional)
If True use line breaks and indenting in output XML.
Examples
--------
>>> G=nx.path_graph(4)
>>> nx.write_graphml(G, "test.graphml")
Notes
-----
This implementation does not support mixed graphs (directed and unidirected
edges together) hyperedges, nested graphs, or ports.
"""
fh = _get_fh(path, mode='wb')
writer = GraphMLWriter(encoding=encoding,prettyprint=prettyprint)
writer.add_graph_element(G)
writer.dump(fh)
def generate_graphml(G, encoding='utf-8',prettyprint=True):
"""Generate GraphML lines for G
Parameters
----------
G : graph
A networkx graph
encoding : string (optional)
Encoding for text data.
prettyprint : bool (optional)
If True use line breaks and indenting in output XML.
Examples
--------
>>> G=nx.path_graph(4)
>>> linefeed=chr(10) # linefeed=\n
>>> s=linefeed.join(nx.generate_graphml(G)) # a string
>>> for line in nx.generate_graphml(G): # doctest: +SKIP
... print(line)
Notes
-----
This implementation does not support mixed graphs (directed and unidirected
edges together) hyperedges, nested graphs, or ports.
"""
writer = GraphMLWriter(encoding=encoding,prettyprint=prettyprint)
writer.add_graph_element(G)
for line in str(writer).splitlines():
yield line
def read_graphml(path,node_type=str):
"""Read graph in GraphML format from path.
Parameters
----------
path : file or string
File or filename to write.
Filenames ending in .gz or .bz2 will be compressed.
node_type: Python type (default: str)
Convert node ids to this type
Returns
-------
graph: NetworkX graph
If no parallel edges are found a Graph or DiGraph is returned.
Otherwise a MultiGraph or MultiDiGraph is returned.
Notes
-----
This implementation does not support mixed graphs (directed and unidirected
edges together), hypergraphs, nested graphs, or ports.
For multigraphs the GraphML edge "id" will be used as the edge
key. If not specified then they "key" attribute will be used. If
there is no "key" attribute a default NetworkX multigraph edge key
will be provided.
Files with the yEd "yfiles" extension will can be read but the graphics
information is discarded.
yEd compressed files ("file.graphmlz" extension) can be read by renaming
the file to "file.graphml.gz".
"""
fh=_get_fh(path,mode='rb')
reader = GraphMLReader(node_type=node_type)
# need to check for multiple graphs
glist=list(reader(fh))
return glist[0]
class GraphML(object):
NS_GRAPHML = "http://graphml.graphdrawing.org/xmlns"
NS_XSI = "http://www.w3.org/2001/XMLSchema-instance"
#xmlns:y="http://www.yworks.com/xml/graphml"
NS_Y = "http://www.yworks.com/xml/graphml"
SCHEMALOCATION = \
' '.join(['http://graphml.graphdrawing.org/xmlns',
'http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd'])
try:
chr(12345) # Fails on Py!=3.
unicode = str # Py3k's str is our unicode type
except ValueError:
pass
types=((str,"yfiles"),(str,"string"), (unicode,"string"),
(int,"int"), (int,"integer"), (float,"float"), (float,"double"),
(bool, "boolean"))
xml_type = dict(types)
python_type = dict(reversed(a) for a in types)
class GraphMLWriter(GraphML):
def __init__(self, graph=None, encoding="utf-8",prettyprint=True):
try:
import xml.etree.ElementTree
except ImportError:
raise ImportError('GraphML writer requires '
'xml.elementtree.ElementTree')
self.prettyprint=prettyprint
self.encoding = encoding
self.xml = Element("graphml",
{'xmlns':self.NS_GRAPHML,
'xmlns:xsi':self.NS_XSI,
'xsi:schemaLocation':self.SCHEMALOCATION}
)
self.keys={}
if graph is not None:
self.add_graph_element(graph)
def __str__(self):
if self.prettyprint:
self.indent(self.xml)
s=tostring(self.xml).decode(self.encoding)
return s
def get_key(self, name, attr_type, scope, default):
keys_key = (name, attr_type, scope)
try:
return self.keys[keys_key]
except KeyError:
new_id = "d%i" % len(list(self.keys))
self.keys[keys_key] = new_id
key_kwargs = {"id":new_id,
"for":scope,
"attr.name":name,
"attr.type":attr_type}
key_element=Element("key",**key_kwargs)
# add subelement for data default value if present
if default is not None:
default_element=Element("default")
default_element.text=make_str(default)
key_element.append(default_element)
self.xml.insert(0,key_element)
return new_id
def add_data(self, name, element_type, value,
scope="all",
default=None):
"""
Make a data element for an edge or a node. Keep a log of the
type in the keys table.
"""
if element_type not in self.xml_type:
raise nx.NetworkXError('GraphML writer does not support '
'dict types as data values.')
key_id = self.get_key(name, self.xml_type[element_type], scope, default)
data_element = Element("data", key=key_id)
data_element.text = make_str(value)
return data_element
def add_attributes(self, scope, xml_obj, data, default):
"""Appends attributes to edges or nodes.
"""
for k,v in data.items():
default_value=default.get(k)
obj=self.add_data(make_str(k), type(v), make_str(v),
scope=scope, default=default_value)
xml_obj.append(obj)
def add_nodes(self, G, graph_element):
for node,data in G.nodes_iter(data=True):
node_element = Element("node", id = make_str(node))
default=G.graph.get('node_default',{})
self.add_attributes("node", node_element, data, default)
graph_element.append(node_element)
def add_edges(self, G, graph_element):
if G.is_multigraph():
for u,v,key,data in G.edges_iter(data=True,keys=True):
edge_element = Element("edge",source=make_str(u),
target=make_str(v))
default=G.graph.get('edge_default',{})
self.add_attributes("edge", edge_element, data, default)
self.add_attributes("edge", edge_element,
{'key':key}, default)
graph_element.append(edge_element)
else:
for u,v,data in G.edges_iter(data=True):
edge_element = Element("edge",source=make_str(u),
target=make_str(v))
default=G.graph.get('edge_default',{})
self.add_attributes("edge", edge_element, data, default)
graph_element.append(edge_element)
def add_graph_element(self, G):
"""
Serialize graph G in GraphML to the stream.
"""
if G.is_directed():
default_edge_type='directed'
else:
default_edge_type='undirected'
graphid=G.graph.pop('id',None)
if graphid is None:
graph_element = Element("graph",
edgedefault = default_edge_type)
else:
graph_element = Element("graph",
edgedefault = default_edge_type,
id=graphid)
default={}
data=dict((k,v) for (k,v) in G.graph.items()
if k not in ['node_default','edge_default'])
self.add_attributes("graph", graph_element, data, default)
self.add_nodes(G,graph_element)
self.add_edges(G,graph_element)
self.xml.append(graph_element)
def add_graphs(self, graph_list):
"""
Add many graphs to this GraphML document.
"""
for G in graph_list:
self.add_graph_element(G)
def dump(self, stream):
if self.prettyprint:
self.indent(self.xml)
document = ElementTree(self.xml)
header='<?xml version="1.0" encoding="%s"?>'%self.encoding
stream.write(header.encode(self.encoding))
document.write(stream, encoding=self.encoding)
def indent(self, elem, level=0):
# in-place prettyprint formatter
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
self.indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
class GraphMLReader(GraphML):
"""Read a GraphML document. Produces NetworkX graph objects.
"""
def __init__(self, node_type=str):
try:
import xml.etree.ElementTree
except ImportError:
raise ImportError('GraphML reader requires '
'xml.elementtree.ElementTree')
self.node_type=node_type
self.multigraph=False # assume multigraph and test for parallel edges
def __call__(self, stream):
self.xml = ElementTree(file=stream)
(keys,defaults) = self.find_graphml_keys(self.xml)
for g in self.xml.findall("{%s}graph" % self.NS_GRAPHML):
yield self.make_graph(g, keys, defaults)
def make_graph(self, graph_xml, graphml_keys, defaults):
# set default graph type
edgedefault = graph_xml.get("edgedefault", None)
if edgedefault=='directed':
G=nx.MultiDiGraph()
else:
G=nx.MultiGraph()
# set defaults for graph attributes
for key_id,value in defaults.items():
key_for=graphml_keys[key_id]['for']
name=graphml_keys[key_id]['name']
python_type=graphml_keys[key_id]['type']
if key_for=='node':
G.graph['node_default']={name:python_type(value)}
if key_for=='edge':
G.graph['edge_default']={name:python_type(value)}
# hyperedges are not supported
hyperedge=graph_xml.find("{%s}hyperedge" % self.NS_GRAPHML)
if hyperedge is not None:
raise nx.NetworkXError("GraphML reader does not support hyperedges")
# add nodes
for node_xml in graph_xml.findall("{%s}node" % self.NS_GRAPHML):
self.add_node(G, node_xml, graphml_keys)
# add edges
for edge_xml in graph_xml.findall("{%s}edge" % self.NS_GRAPHML):
self.add_edge(G, edge_xml, graphml_keys)
# add graph data
data = self.decode_data_elements(graphml_keys, graph_xml)
G.graph.update(data)
# switch to Graph or DiGraph if no parallel edges were found.
if not self.multigraph:
if G.is_directed():
return nx.DiGraph(G)
else:
return nx.Graph(G)
else:
return G
def add_node(self, G, node_xml, graphml_keys):
"""Add a node to the graph.
"""
# warn on finding unsupported ports tag
ports=node_xml.find("{%s}port" % self.NS_GRAPHML)
if ports is not None:
warnings.warn("GraphML port tag not supported.")
# find the node by id and cast it to the appropriate type
node_id = self.node_type(node_xml.get("id"))
# get data/attributes for node
data = self.decode_data_elements(graphml_keys, node_xml)
G.add_node(node_id, data)
def add_edge(self, G, edge_element, graphml_keys):
"""Add an edge to the graph.
"""
# warn on finding unsupported ports tag
ports=edge_element.find("{%s}port" % self.NS_GRAPHML)
if ports is not None:
warnings.warn("GraphML port tag not supported.")
# raise error if we find mixed directed and undirected edges
directed = edge_element.get("directed")
if G.is_directed() and directed=='false':
raise nx.NetworkXError(\
"directed=false edge found in directed graph.")
if (not G.is_directed()) and directed=='true':
raise nx.NetworkXError(\
"directed=true edge found in undirected graph.")
source = self.node_type(edge_element.get("source"))
target = self.node_type(edge_element.get("target"))
data = self.decode_data_elements(graphml_keys, edge_element)
# GraphML stores edge ids as an attribute
# NetworkX uses them as keys in multigraphs too if no key
# attribute is specified
edge_id = edge_element.get("id")
if edge_id:
data["id"] = edge_id
if G.has_edge(source,target):
# mark this as a multigraph
self.multigraph=True
if edge_id is None:
# no id specified, try using 'key' attribute as id
edge_id=data.pop('key',None)
G.add_edge(source, target, key=edge_id, **data)
def decode_data_elements(self, graphml_keys, obj_xml):
"""Use the key information to decode the data XML if present."""
data = {}
for data_element in obj_xml.findall("{%s}data" % self.NS_GRAPHML):
key = data_element.get("key")
try:
data_name=graphml_keys[key]['name']
data_type=graphml_keys[key]['type']
except KeyError:
raise nx.NetworkXError("Bad GraphML data: no key %s"%key)
text=data_element.text
# assume anything with subelements is a yfiles extension
if text is not None and len(list(data_element))==0:
data[data_name] = data_type(text)
elif len(list(data_element)) > 0:
# Assume yfiles as subelements, try to extract node_label
node_label = data_element.find("{%s}ShapeNode/{%s}NodeLabel"%
(self.NS_Y, self.NS_Y))
if node_label is not None:
data['label'] = node_label.text
edge_label = data_element.find("{%s}PolyLineEdge/{%s}EdgeLabel"%
(self.NS_Y, (self.NS_Y)))
if edge_label is not None:
data['label'] = edge_label.text
return data
def find_graphml_keys(self, graph_element):
"""Extracts all the keys and key defaults from the xml.
"""
graphml_keys = {}
graphml_key_defaults = {}
for k in graph_element.findall("{%s}key" % self.NS_GRAPHML):
attr_id = k.get("id")
attr_type=k.get('attr.type')
attr_name=k.get("attr.name")
if attr_type is None:
attr_name=k.get('yfiles.type')
attr_type='yfiles'
if attr_name is None:
raise nx.NetworkXError("Unknown key type in file.")
graphml_keys[attr_id] = {
"name":attr_name,
"type":self.python_type[attr_type],
"for":k.get("for")}
# check for "default" subelement of key element
default=k.find("{%s}default" % self.NS_GRAPHML)
if default is not None:
graphml_key_defaults[attr_id]=default.text
return graphml_keys,graphml_key_defaults
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import xml.etree.ElementTree
except:
raise SkipTest("xml.etree.ElementTree not available")
# fixture for nose tests
def teardown_module(module):
import os
try:
os.unlink('test.graphml')
except:
pass
|
the-stack_0_3033 | import uvicorn
import os
from diskcache import Cache
from fastapi import FastAPI, File, UploadFile
from starlette.middleware.cors import CORSMiddleware
from starlette.responses import FileResponse
from starlette.requests import Request
from src.helpers.milvus_helpers import MilvusHelper
from src.helpers.mysql_helpers import MySQLHelper
from src.config import UPLOAD_PATH
from src.operations.load import do_load
from src.operations.search import do_search
from src.operations.count import do_count
from src.operations.drop import do_drop
from src.config import TOP_K
from src.logs import LOGGER
from pydantic import BaseModel
from typing import Optional
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"])
MODEL = None
MILVUS_CLI = MilvusHelper()
MYSQL_CLI = MySQLHelper()
# Mkdir 'tmp/mol-data'
if not os.path.exists(UPLOAD_PATH):
os.makedirs(UPLOAD_PATH)
LOGGER.info("mkdir the path:{} ".format(UPLOAD_PATH))
@app.get('/data')
def mols_img(mols_path):
# Get the molecular image file
try:
LOGGER.info(("Successfully load molecular image: {}".format(mols_path)))
return FileResponse(UPLOAD_PATH + '/' + mols_path + '.png')
except Exception as e:
LOGGER.error("upload image error: {}".format(e))
return {'status': False, 'msg': e}, 400
@app.get('/progress')
def get_progress():
# Get the progress of dealing with data
try:
cache = Cache('./tmp')
return "current: {}, total: {}".format(cache['current'], cache['total'])
except Exception as e:
LOGGER.error("upload data error: {}".format(e))
return {'status': False, 'msg': e}, 400
class Item(BaseModel):
Table: Optional[str] = None
File: str
@app.post('/data/load')
async def load_data(item: Item):
# Insert all the data under the file path to Milvus/MySQL
try:
total_num = do_load(item.Table, item.File, MODEL, MILVUS_CLI, MYSQL_CLI)
LOGGER.info("Successfully loaded data, total count: {}".format(total_num))
return {'status': True, 'msg': "Successfully loaded data!"}
except Exception as e:
LOGGER.error(e)
return {'status': False, 'msg': e}, 400
class Item_search(BaseModel):
Table: Optional[str] = None
Mol: str
Num: Optional[int] = TOP_K
@app.post('/data/search')
async def search_data(request: Request, item: Item_search):
# Search the upload image in Milvus/MySQL
try:
# Save the upload data to server.
ids, paths, distances = do_search(item.Table, item.Mol, item.Num, MODEL, MILVUS_CLI, MYSQL_CLI)
host = request.headers['host']
for i in range(len(ids)):
tmp = "http://" + str(host) + "/data?mols_path=" + str(ids[i])
ids[i] = tmp
res = dict(zip(paths, zip(ids, distances)))
res = sorted(res.items(), key=lambda item: item[1][1])
LOGGER.info("Successfully searched similar data!")
return res
except Exception as e:
LOGGER.error(e)
return {'status': False, 'msg': e}, 400
@app.post('/data/count')
async def count_data(table_name: str = None):
# Returns the total number of data in the system
try:
num = do_count(table_name, MILVUS_CLI, MYSQL_CLI)
LOGGER.info("Successfully count the number of data!")
return num
except Exception as e:
LOGGER.error(e)
return {'status': False, 'msg': e}, 400
@app.post('/data/drop')
async def drop_tables(table_name: str = None):
# Delete the collection of Milvus and MySQL
try:
status = do_drop(table_name, MILVUS_CLI, MYSQL_CLI)
LOGGER.info("Successfully drop tables in Milvus and MySQL!")
return status
except Exception as e:
LOGGER.error(e)
return {'status': False, 'msg': e}, 400
if __name__ == '__main__':
uvicorn.run(app=app, host='0.0.0.0', port=5000)
|
the-stack_0_3034 | #!/usr/bin/env python3
"""
How to Run
-s keepItSimple -a 1 -a 2 -c -A -B
"""
__author__ = "Your Name"
__version__ = "0.1.0"
__license__ = "MIT"
import argparse
from logzero import logger
def main(args):
""" Main entry point of the app """
# logger.info("hello world")
# logger.info(args)
print( 'simple_value =', args.simple_value )
print( 'constant_value =', args.constant_value )
print( 'boolean_switch =', args.boolean_switch )
print( 'collection =', args.collection )
print( 'const_collection =', args.const_collection )
if __name__ == "__main__":
""" This is executed when run from the command line """
parser = argparse.ArgumentParser()
## Required positional argument
#parser.add_argument("arg", help="Required positional argument")
# Optional argument which requires a parameter (eg. -s keepItSimple)
parser.add_argument('-s', action='store', dest='simple_value',
help='Store a simple value')
parser.add_argument('-c', action='store_const', dest='constant_value',
const='value-to-store',
help='Store a constant value')
# Optional argument flag to true
parser.add_argument('-t', action='store_true', default=False,
dest='boolean_switch',
help='Set a switch to true')
# Optional argument flag to make true
parser.add_argument('-f', action='store_false', default=False,
dest='boolean_switch',
help='Set a switch to false')
parser.add_argument('-a', action='append', dest='collection',
default=[],
help='Add repeated values to a list',
)
parser.add_argument('-A', action='append_const', dest='const_collection',
const='value-1-to-append',
default=[],
help='Add different values to list')
parser.add_argument('-B', action='append_const', dest='const_collection',
const='value-2-to-append',
help='Add different values to list')
# Optional verbosity counter (eg. -v, -vv, -vvv, etc.)
parser.add_argument(
"-v",
"--verbose",
action="count",
default=0,
help="Verbosity (-v, -vv, etc)")
# Specify output of "--version"
parser.add_argument(
"--version",
action="version",
version="%(prog)s (version {version})".format(version=__version__))
args = parser.parse_args()
main(args)
|
the-stack_0_3035 | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Two-qubit XX-rotation gate.
"""
from qiskit.circuit import Gate
from qiskit.circuit import QuantumCircuit
from qiskit.circuit import QuantumRegister
class RXXGate(Gate):
"""Two-qubit XX-rotation gate.
This gate corresponds to the rotation U(θ) = exp(-1j * θ * X⊗X / 2)
up to the phase exp(-1j * θ/2).
"""
def __init__(self, theta):
"""Create new rxx gate."""
super().__init__('rxx', 2, [theta])
def _define(self):
"""Calculate a subcircuit that implements this unitary."""
from qiskit.extensions.standard.x import CXGate
from qiskit.extensions.standard.u1 import U1Gate
from qiskit.extensions.standard.h import HGate
definition = []
q = QuantumRegister(2, 'q')
theta = self.params[0]
rule = [
(HGate(), [q[0]], []),
(HGate(), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(U1Gate(theta), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(HGate(), [q[1]], []),
(HGate(), [q[0]], []),
]
for inst in rule:
definition.append(inst)
self.definition = definition
def inverse(self):
"""Invert this gate."""
return RXXGate(-self.params[0])
# NOTE: we should use the following as the canonical matrix
# definition but we don't include it yet since it differs from
# the circuit decomposition matrix by a global phase
# def to_matrix(self):
# """Return a Numpy.array for the RXX gate."""
# theta = float(self.params[0])
# return np.array([
# [np.cos(theta / 2), 0, 0, -1j * np.sin(theta / 2)],
# [0, np.cos(theta / 2), -1j * np.sin(theta / 2), 0],
# [0, -1j * np.sin(theta / 2), np.cos(theta / 2), 0],
# [-1j * np.sin(theta / 2), 0, 0, np.cos(theta / 2)]], dtype=complex)
def rxx(self, theta, qubit1, qubit2):
"""Apply RXX to circuit."""
return self.append(RXXGate(theta), [qubit1, qubit2], [])
QuantumCircuit.rxx = rxx
|
the-stack_0_3036 | import torch
import torch.nn as nn
from torch.nn.utils.rnn import pad_sequence
from .kmeans import MultiKMeans
from .kmeans import KMeans
from .kernels import PQDecodeCUDA
from .PQ import PQ
from .CustomModule import CustomModule
class MPQ(CustomModule):
def __init__(
self,
d_vector,
n_subvectors=8,
n_clusters=256,
distance="euclidean",
verbose=0,
n_codebooks=64,
):
super(MPQ, self).__init__()
assert d_vector % n_subvectors == 0
self.n_codebooks = n_codebooks
self.d_vector = d_vector
self.n_subvectors = n_subvectors
self.d_subvector = d_vector // n_subvectors
self.n_clusters = n_clusters
self.distance = distance
self.verbose = verbose
self.group_size=512
#codebook: [n_codebooks, n_subvectors, d_subvectors, n_clusters]
self.register_buffer("codebook", None)
self.kmeans = MultiKMeans(
n_clusters = n_clusters,
distance = distance,
max_iter = 25,
verbose = verbose,
)
self.codebook_selector = KMeans(
n_clusters = n_codebooks,
distance = distance,
max_iter = 25,
verbose = verbose,
)
self._decode_cuda = PQDecodeCUDA(tm=2, td=8)
def train(self, x):
"""
x: shape: [d_vector, n_data]
"""
labels = self.codebook_selector.fit(x)
# print("labels", labels.shape, labels.unique().shape )
unique_labels = labels.unique()
codebook = torch.zeros(
self.n_codebooks,
self.n_subvectors,
self.d_subvector,
self.n_clusters,
device=x.device,
dtype=torch.float32
)
for label in unique_labels:
mask = labels == label
sub_x = (
x[:, mask]
.reshape(self.n_subvectors, self.d_subvector, -1)
.contiguous()
)
self.kmeans.fit(sub_x)
codebook[label] = self.kmeans.centroids
del self.codebook
self.register_buffer("codebook", codebook)
def encode(self, x):
"""
returns code and codebook_index
x: shape: [d_vector, n_data]
"""
n_data = x.shape[1]
labels = self.codebook_selector.predict(x)
unique_labels, counts = labels.unique(return_counts=True)
n_unique = unique_labels.shape[0]
code = torch.zeros(self.n_subvectors, n_data, dtype=torch.uint8, device=self.codebook.device)
for i in range(n_unique):
label = unique_labels[i]
mask = labels == label
sub_x = (
x[:, mask]
.reshape(self.n_subvectors, self.d_subvector, -1)
.contiguous()
)
sub_codebook = self.codebook[label].contiguous()
_, sub_code = self.kmeans.get_labels(sub_x, sub_codebook)
code[:, mask] = sub_code.byte()
return (code, labels)
@staticmethod
def _decode_cpu(codebook, code):
"""
code: torch.Tensor, shape : [n_subvectors, n_data], dtype : uint8
return: torch.Tensor, shape : [d_vector, n_data], dtype : float32
"""
n_subvectors, n_data = code.shape
arange = torch.arange(n_subvectors)[:, None].expand(-1, n_data)
res = codebook[arange, :, code.long()]
res = res.transpose(1, 2).reshape(-1, n_data)
return res
def decode(self, code, codebook_index):
"""
returns reconstruction of code
code: [n_subvectors, n_data]
codebook_index: shape : [n_data], dtype : uint8
"""
n_data = code.shape[1]
unique_labels, counts = codebook_index.unique(return_counts=True)
recon = torch.zeros(
self.d_vector,
n_data,
device=self.codebook.device,
dtype=torch.float32,
)
for label in unique_labels:
mask = codebook_index == label
sub_code = code[:, mask].contiguous()
sub_codebook = self.codebook[label].contiguous()
if self.codebook.device.type == "cpu":
sub_recon = self._decode_cpu(sub_codebook, sub_code)
elif self.codebook.device.type == "cuda":
sub_recon = self._decode_cuda(sub_codebook, sub_code)
recon[:, mask] = sub_recon
return recon
def precompute_adc3(self, x, return_labels=False):
d_vector, n_data = x.shape
assert d_vector == self.d_vector
labels = self.codebook_selector.predict(x)
unique_labels, counts = labels.unique(return_counts=True)
n_unique = unique_labels.shape[0]
precomputed = torch.zeros(
self.n_subvectors,
n_data,
self.n_clusters,
device=self.codebook.device
)
mask = labels[:, None] == unique_labels[None]
xs = [ x[:, mask[:, i]].T for i in range(n_unique)]
lens = [i.shape[0] for i in xs]
padded_x = (
pad_sequence(xs, batch_first=True)
.transpose(-1, -2)
.reshape(n_unique * self.n_subvectors, self.d_subvector, -1)
)
codebook = (
self.codebook[unique_labels]
.reshape(n_unique * self.n_subvectors, self.d_subvector, self.n_clusters)
)
pcd = self.kmeans.sim(padded_x, codebook, normalize=False)
pcd = pcd.reshape(n_unique, self.n_subvectors, -1, self.n_clusters)
for i, label in enumerate(unique_labels):
sub_mask = mask[:, i]
precomputed[:, sub_mask] = pcd[i, :, :lens[i] ]
if return_labels:
return precomputed, labels
else:
return precomputed
def precompute_adc2(self, x, return_labels=False):
d_vector, n_data = x.shape
assert d_vector == self.d_vector
labels = self.codebook_selector.predict(x)
unique_labels, counts = labels.unique(return_counts=True)
precomputed = torch.zeros(
self.n_subvectors,
n_data,
self.n_clusters,
device=self.codebook.device
)
mask = labels[:, None] == unique_labels[None]
for i, label in enumerate(unique_labels):
sub_mask = mask[:, i]
sub_x = x[:, sub_mask]
sub_x = sub_x.reshape(self.n_subvectors, self.d_subvector, -1)
sub_codebook = self.codebook[label]
sub_precomputed = self.kmeans.sim(sub_x, sub_codebook, normalize=False)
precomputed[:, sub_mask] = sub_precomputed
if return_labels:
return precomputed, labels
else:
return precomputed
def precompute_adc(self, x, return_labels=False):
"""
x: shape : [d_vector, n_data]
"""
d_vector, n_data = x.shape
assert d_vector == self.d_vector
labels = self.codebook_selector.predict(x) #[n_data]
unique_labels, counts = labels.unique(return_counts=True)
groups = counts // self.group_size
unique_groups = groups.unique()
precomputed = torch.zeros(
self.n_subvectors,
n_data,
self.n_clusters,
device=self.codebook.device
)
for group_index in unique_groups:
group_unique_labels = unique_labels[groups == group_index]
n_gul = group_unique_labels.shape[0]
mask = labels[:, None] == group_unique_labels[None, :] #[n_data, n_gul]
mask2 = mask.sum(dim=1).bool() #[n_data]
sub_x = x[:, mask2]
sub_labels = labels[mask2]
sub_codebook = self.codebook[group_unique_labels] #[n_gul, n_subvectors, d_subvector, n_clusters]
sub_codebook = sub_codebook.reshape(-1, self.d_subvector, self.n_clusters)# [n_gul*n_subvectors, d_subvector, n_clusters]
padded_x = [sub_x[:, sub_labels == lab].T for lab in group_unique_labels]
del sub_x, sub_labels
len_x = [padded_x[i].shape[0] for i in range(n_gul)]
padded_x = (
pad_sequence(padded_x, batch_first=True) #[n_gul, max_n_sub_x, d_vector]
.transpose(-1, -2) #[n_gul, d_vector, max_n_sub_x]
.reshape(n_gul * self.n_subvectors, self.d_subvector, -1)
) #[n_gul* n_subvectors, d_subvector, max_n_sub_x]
sub_precomputed = self.kmeans.sim(padded_x, sub_codebook, normalize=False) #[n_gul*n_subvectors, max_n_sub_x, n_clusters]
del sub_codebook, padded_x
sub_precomputed = sub_precomputed.reshape(n_gul, self.n_subvectors, -1, self.n_clusters) #[n_gul,n_subvectors, max_n_sub_x, n_clusters]
for i in range(n_gul):
lab = group_unique_labels[i]
subsub_precomputed = sub_precomputed[i][:, :len_x[i]] #[n_subvectors, n_subsub_x, n_clusters]
sub_mask = mask[:, i]
precomputed[:, sub_mask] = subsub_precomputed
del sub_precomputed
if return_labels:
return precomputed, labels
else:
return precomputed |
the-stack_0_3037 | import torch
import unittest
from super_gradients.training.datasets.data_augmentation import RandomErase
class RandomEraseTest(unittest.TestCase):
def test_random_erase(self):
dummy_input = torch.randn(1, 3, 32, 32)
one_erase = RandomErase(probability=0, value='1.')
self.assertEqual(one_erase.p, 0)
self.assertEqual(one_erase.value, 1.)
one_erase(dummy_input)
rndm_erase = RandomErase(probability=0, value='random')
self.assertEqual(rndm_erase.value, 'random')
rndm_erase(dummy_input)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_3039 | import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
# Create a geodesic polygon.
polygon = ee.Geometry.Polygon([
[[-5, 40], [65, 40], [65, 60], [-5, 60], [-5, 60]]
])
# Compute a buffer of the polygon.
buffer = polygon.buffer(0.1)
# Compute the centroid of the polygon.
centroid = polygon.centroid()
Map.addLayer(buffer, {}, 'buffer')
Map.addLayer(centroid, {'color': 'red'}, 'centroid')
# Display the map.
Map
|
the-stack_0_3041 | """Benchmark from Laurent Vaucher.
Source: https://github.com/slowfrog/hexiom : hexiom2.py, level36.txt
(Main function tweaked by Armin Rigo.)
"""
from __future__ import division, print_function
import time
from io import StringIO
import cython
##################################
class Dir(object):
def __init__(self, x, y):
self.x = x
self.y = y
DIRS = [ Dir(1, 0),
Dir(-1, 0),
Dir(0, 1),
Dir(0, -1),
Dir(1, 1),
Dir(-1, -1) ]
EMPTY = 7
##################################
class Done(object):
MIN_CHOICE_STRATEGY = 0
MAX_CHOICE_STRATEGY = 1
HIGHEST_VALUE_STRATEGY = 2
FIRST_STRATEGY = 3
MAX_NEIGHBORS_STRATEGY = 4
MIN_NEIGHBORS_STRATEGY = 5
def __init__(self, count, empty=False):
self.count = count
self.cells = None if empty else [[0, 1, 2, 3, 4, 5, 6, EMPTY] for i in range(count)]
def clone(self):
ret = Done(self.count, True)
ret.cells = [self.cells[i][:] for i in range(self.count)]
return ret
def __getitem__(self, i):
return self.cells[i]
def set_done(self, i, v):
self.cells[i] = [v]
def already_done(self, i):
return len(self.cells[i]) == 1
def remove(self, i, v):
if v in self.cells[i]:
self.cells[i].remove(v)
return True
else:
return False
def remove_all(self, v):
for i in range(self.count):
self.remove(i, v)
def remove_unfixed(self, v):
changed = False
for i in range(self.count):
if not self.already_done(i):
if self.remove(i, v):
changed = True
return changed
def filter_tiles(self, tiles):
for v in range(8):
if tiles[v] == 0:
self.remove_all(v)
@cython.locals(i=cython.int)
def next_cell_min_choice(self):
minlen = 10
mini = -1
for i in range(self.count):
if 1 < len(self.cells[i]) < minlen:
minlen = len(self.cells[i])
mini = i
return mini
@cython.locals(i=cython.int)
def next_cell_max_choice(self):
maxlen = 1
maxi = -1
for i in range(self.count):
if maxlen < len(self.cells[i]):
maxlen = len(self.cells[i])
maxi = i
return maxi
@cython.locals(i=cython.int)
def next_cell_highest_value(self):
maxval = -1
maxi = -1
for i in range(self.count):
if (not self.already_done(i)):
maxvali = max([k for k in self.cells[i] if k != EMPTY])
if maxval < maxvali:
maxval = maxvali
maxi = i
return maxi
@cython.locals(i=cython.int)
def next_cell_first(self):
for i in range(self.count):
if (not self.already_done(i)):
return i
return -1
@cython.locals(i=cython.int)
def next_cell_max_neighbors(self, pos):
maxn = -1
maxi = -1
for i in range(self.count):
if not self.already_done(i):
cells_around = pos.hex.get_by_id(i).links
n = sum([1 if (self.already_done(nid) and (self[nid][0] != EMPTY)) else 0
for nid in cells_around])
if n > maxn:
maxn = n
maxi = i
return maxi
@cython.locals(i=cython.int)
def next_cell_min_neighbors(self, pos):
minn = 7
mini = -1
for i in range(self.count):
if not self.already_done(i):
cells_around = pos.hex.get_by_id(i).links
n = sum([1 if (self.already_done(nid) and (self[nid][0] != EMPTY)) else 0
for nid in cells_around])
if n < minn:
minn = n
mini = i
return mini
def next_cell(self, pos, strategy=HIGHEST_VALUE_STRATEGY):
if strategy == Done.HIGHEST_VALUE_STRATEGY:
return self.next_cell_highest_value()
elif strategy == Done.MIN_CHOICE_STRATEGY:
return self.next_cell_min_choice()
elif strategy == Done.MAX_CHOICE_STRATEGY:
return self.next_cell_max_choice()
elif strategy == Done.FIRST_STRATEGY:
return self.next_cell_first()
elif strategy == Done.MAX_NEIGHBORS_STRATEGY:
return self.next_cell_max_neighbors(pos)
elif strategy == Done.MIN_NEIGHBORS_STRATEGY:
return self.next_cell_min_neighbors(pos)
else:
raise Exception("Wrong strategy: %d" % strategy)
##################################
class Node(object):
def __init__(self, pos, id, links):
self.pos = pos
self.id = id
self.links = links
##################################
class Hex(object):
@cython.locals(size=cython.int, id=cython.int, x=cython.int, y=cython.int)
def __init__(self, size):
self.size = size
self.count = 3 * size * (size - 1) + 1
self.nodes_by_id = self.count * [None]
self.nodes_by_pos = {}
id = 0
for y in range(size):
for x in range(size + y):
pos = (x, y)
node = Node(pos, id, [])
self.nodes_by_pos[pos] = node
self.nodes_by_id[node.id] = node
id += 1
for y in range(1, size):
for x in range(y, size * 2 - 1):
ry = size + y - 1
pos = (x, ry)
node = Node(pos, id, [])
self.nodes_by_pos[pos] = node
self.nodes_by_id[node.id] = node
id += 1
@cython.locals(dir=Dir, x=cython.int, y=cython.int, nx=cython.int, ny=cython.int, node=Node)
def link_nodes(self):
for node in self.nodes_by_id:
(x, y) = node.pos
for dir in DIRS:
nx = x + dir.x
ny = y + dir.y
if self.contains_pos((nx, ny)):
node.links.append(self.nodes_by_pos[(nx, ny)].id)
def contains_pos(self, pos):
return pos in self.nodes_by_pos
def get_by_pos(self, pos):
return self.nodes_by_pos[pos]
def get_by_id(self, id):
return self.nodes_by_id[id]
##################################
class Pos(object):
def __init__(self, hex, tiles, done = None):
self.hex = hex
self.tiles = tiles
self.done = Done(hex.count) if done is None else done
def clone(self):
return Pos(self.hex, self.tiles, self.done.clone())
##################################
@cython.locals(pos=Pos, i=cython.long, v=cython.int,
nid=cython.int, num=cython.int,
empties=cython.int, filled=cython.int,
vmax=cython.int, vmin=cython.int, cell=list, left=cython.int[8])
def constraint_pass(pos, last_move=None):
changed = False
left = pos.tiles[:]
done = pos.done
# Remove impossible values from free cells
free_cells = (range(done.count) if last_move is None
else pos.hex.get_by_id(last_move).links)
for i in free_cells:
if not done.already_done(i):
vmax = 0
vmin = 0
cells_around = pos.hex.get_by_id(i).links
for nid in cells_around:
if done.already_done(nid):
if done[nid][0] != EMPTY:
vmin += 1
vmax += 1
else:
vmax += 1
for num in range(7):
if (num < vmin) or (num > vmax):
if done.remove(i, num):
changed = True
# Computes how many of each value is still free
for cell in done.cells:
if len(cell) == 1:
left[cell[0]] -= 1
for v in range(8):
# If there is none, remove the possibility from all tiles
if (pos.tiles[v] > 0) and (left[v] == 0):
if done.remove_unfixed(v):
changed = True
else:
possible = sum([(1 if v in cell else 0) for cell in done.cells])
# If the number of possible cells for a value is exactly the number of available tiles
# put a tile in each cell
if pos.tiles[v] == possible:
for i in range(done.count):
cell = done.cells[i]
if (not done.already_done(i)) and (v in cell):
done.set_done(i, v)
changed = True
# Force empty or non-empty around filled cells
filled_cells = (range(done.count) if last_move is None
else [last_move])
for i in filled_cells:
if done.already_done(i):
num = done[i][0]
empties = 0
filled = 0
unknown = []
cells_around = pos.hex.get_by_id(i).links
for nid in cells_around:
if done.already_done(nid):
if done[nid][0] == EMPTY:
empties += 1
else:
filled += 1
else:
unknown.append(nid)
if len(unknown) > 0:
if num == filled:
for u in unknown:
if EMPTY in done[u]:
done.set_done(u, EMPTY)
changed = True
#else:
# raise Exception("Houston, we've got a problem")
elif num == filled + len(unknown):
for u in unknown:
if done.remove(u, EMPTY):
changed = True
return changed
ASCENDING = 1
DESCENDING = -1
def find_moves(pos, strategy, order):
done = pos.done
cell_id = done.next_cell(pos, strategy)
if cell_id < 0:
return []
if order == ASCENDING:
return [(cell_id, v) for v in done[cell_id]]
else:
# Try higher values first and EMPTY last
moves = list(reversed([(cell_id, v) for v in done[cell_id] if v != EMPTY]))
if EMPTY in done[cell_id]:
moves.append((cell_id, EMPTY))
return moves
def play_move(pos, move):
(cell_id, i) = move
pos.done.set_done(cell_id, i)
@cython.locals(x=cython.int, y=cython.int, ry=cython.int, id=cython.int)
def print_pos(pos, output):
hex = pos.hex
done = pos.done
size = hex.size
for y in range(size):
print(u" " * (size - y - 1), end=u"", file=output)
for x in range(size + y):
pos2 = (x, y)
id = hex.get_by_pos(pos2).id
if done.already_done(id):
c = str(done[id][0]) if done[id][0] != EMPTY else u"."
else:
c = u"?"
print(u"%s " % c, end=u"", file=output)
print(end=u"\n", file=output)
for y in range(1, size):
print(u" " * y, end=u"", file=output)
for x in range(y, size * 2 - 1):
ry = size + y - 1
pos2 = (x, ry)
id = hex.get_by_pos(pos2).id
if done.already_done(id):
c = str(done[id][0]) if done[id][0] != EMPTY else (u".")
else:
c = u"?"
print(u"%s " % c, end=u"", file=output)
print(end=u"\n", file=output)
OPEN = 0
SOLVED = 1
IMPOSSIBLE = -1
@cython.locals(i=cython.int, num=cython.int, nid=cython.int,
vmin=cython.int, vmax=cython.int, tiles=cython.int[8])
def solved(pos, output, verbose=False):
hex = pos.hex
tiles = pos.tiles[:]
done = pos.done
exact = True
all_done = True
for i in range(hex.count):
if len(done[i]) == 0:
return IMPOSSIBLE
elif done.already_done(i):
num = done[i][0]
tiles[num] -= 1
if (tiles[num] < 0):
return IMPOSSIBLE
vmax = 0
vmin = 0
if num != EMPTY:
cells_around = hex.get_by_id(i).links
for nid in cells_around:
if done.already_done(nid):
if done[nid][0] != EMPTY:
vmin += 1
vmax += 1
else:
vmax += 1
if (num < vmin) or (num > vmax):
return IMPOSSIBLE
if num != vmin:
exact = False
else:
all_done = False
if (not all_done) or (not exact):
return OPEN
print_pos(pos, output)
return SOLVED
@cython.locals(move=tuple)
def solve_step(prev, strategy, order, output, first=False):
if first:
pos = prev.clone()
while constraint_pass(pos):
pass
else:
pos = prev
moves = find_moves(pos, strategy, order)
if len(moves) == 0:
return solved(pos, output)
else:
for move in moves:
#print("Trying (%d, %d)" % (move[0], move[1]))
ret = OPEN
new_pos = pos.clone()
play_move(new_pos, move)
#print_pos(new_pos)
while constraint_pass(new_pos, move[0]):
pass
cur_status = solved(new_pos, output)
if cur_status != OPEN:
ret = cur_status
else:
ret = solve_step(new_pos, strategy, order, output)
if ret == SOLVED:
return SOLVED
return IMPOSSIBLE
@cython.locals(tot=cython.int, tiles=cython.int[8])
def check_valid(pos):
hex = pos.hex
tiles = pos.tiles
done = pos.done
# fill missing entries in tiles
tot = 0
for i in range(8):
if tiles[i] > 0:
tot += tiles[i]
else:
tiles[i] = 0
# check total
if tot != hex.count:
raise Exception("Invalid input. Expected %d tiles, got %d." % (hex.count, tot))
def solve(pos, strategy, order, output):
check_valid(pos)
return solve_step(pos, strategy, order, output, first=True)
# TODO Write an 'iterator' to go over all x,y positions
@cython.locals(x=cython.int, y=cython.int, p=cython.int, tiles=cython.int[8],
size=cython.int, inctile=cython.int, linei=cython.int)
def read_file(file):
lines = [line.strip("\r\n") for line in file.splitlines()]
size = int(lines[0])
hex = Hex(size)
linei = 1
tiles = 8 * [0]
done = Done(hex.count)
for y in range(size):
line = lines[linei][size - y - 1:]
p = 0
for x in range(size + y):
tile = line[p:p + 2]
p += 2
if tile[1] == ".":
inctile = EMPTY
else:
inctile = int(tile)
tiles[inctile] += 1
# Look for locked tiles
if tile[0] == "+":
print("Adding locked tile: %d at pos %d, %d, id=%d" %
(inctile, x, y, hex.get_by_pos((x, y)).id))
done.set_done(hex.get_by_pos((x, y)).id, inctile)
linei += 1
for y in range(1, size):
ry = size - 1 + y
line = lines[linei][y:]
p = 0
for x in range(y, size * 2 - 1):
tile = line[p:p + 2]
p += 2
if tile[1] == ".":
inctile = EMPTY
else:
inctile = int(tile)
tiles[inctile] += 1
# Look for locked tiles
if tile[0] == "+":
print("Adding locked tile: %d at pos %d, %d, id=%d" %
(inctile, x, ry, hex.get_by_pos((x, ry)).id))
done.set_done(hex.get_by_pos((x, ry)).id, inctile)
linei += 1
hex.link_nodes()
done.filter_tiles(tiles)
return Pos(hex, tiles, done)
def solve_file(file, strategy, order, output):
pos = read_file(file)
solve(pos, strategy, order, output)
def run_level36():
f = """\
4
2 1 1 2
3 3 3 . .
2 3 3 . 4 .
. 2 . 2 4 3 2
2 2 . . . 2
4 3 4 . .
3 2 3 3
"""
order = DESCENDING
strategy = Done.FIRST_STRATEGY
output = StringIO()
solve_file(f, strategy, order, output)
expected = """\
3 4 3 2
3 4 4 . 3
2 . . 3 4 3
2 . 1 . 3 . 2
3 3 . 2 . 2
3 . 2 . 2
2 2 . 1
"""
if output.getvalue() != expected:
raise AssertionError("got a wrong answer:\n%s" % output.getvalue())
def main(n):
# only run 1/25th of the requested number of iterations.
# with the default n=50 from runner.py, this means twice.
l = []
for i in range(n):
t0 = time.time()
run_level36()
time_elapsed = time.time() - t0
l.append(time_elapsed)
return l
if __name__ == "__main__":
import util, optparse
parser = optparse.OptionParser(
usage="%prog [options]",
description="Test the performance of the hexiom2 benchmark")
util.add_standard_options_to(parser)
options, args = parser.parse_args()
util.run_benchmark(options, options.num_runs, main)
|
the-stack_0_3042 | # Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
## Data scale introduction
"""
import os
import traceback
from absl import logging
from delta.data.datasets.base_dataset import BaseDataSet
from delta.data.datasets.utils import mock_data
from delta.utils.register import registers
@registers.dataset.register('mock_text_nlu_joint_data')
class MockTextNLUJointData(BaseDataSet):
"""mock nlu-joint data class for nlu-joint task."""
def __init__(self, project_dir):
super().__init__(project_dir)
self.train_file = "train.txt"
self.dev_file = "dev.txt"
self.test_file = "test.txt"
self.data_files = [self.train_file, self.dev_file, self.test_file]
self.config_files = ['nlu_joint_mock.yml']
self.download_files = []
self.text_vocab = "text_vocab.txt"
# samples with label
self.samples = [
"0\tO O O O\tmy feeling is low",
"1\tO O O O B-ORG\ti am happy in the kfc"
]
self.text_vocab_list = [
"<unk>\t0", "</s>\t1", "i\t2", "am\t3", "kfc\t4", "my\t5", "feeling\t6",
"happy\t7", "is\t8", "low\t9", "in\t10", "the\t11"
]
def download(self) -> bool:
return True
def after_download(self) -> bool:
try:
train_file_path = os.path.join(self.data_dir, self.train_file)
dev_file_path = os.path.join(self.data_dir, self.dev_file)
test_file_path = os.path.join(self.data_dir, self.test_file)
text_vocab_file = os.path.join(self.data_dir, self.text_vocab)
mock_data(self.samples, train_file_path, dev_file_path, test_file_path,
text_vocab_file, self.text_vocab_list)
except Exception as e:
logging.warning(traceback.format_exc())
return False
return True
|
the-stack_0_3045 | # -*- coding: utf-8 -*-
import cv2
import os
import numpy as np
import sys
print("python version : {version}".format(version=sys.version))
'''
요구 사항
1. Image Load & Search in directory
- 현재 폴더 출력 : done
- 현재 폴더의 이미지 출력 (확장자는 확장성 있게) : done
- 현재 폴더의 이미지 개수 출력 : done
- 현재 폴더의 이미지에서 숫자를 입력하면, 해당 파일이 open되도록 : done
2. Image Value read & View
- RGB로 띄우기 : done
- gray로 띄우기 : done
- 버튼에 rgb <-> gray change?
3. Auto window size, editing window size
- autosize flag : done
- edit window size : done
- 크기를 늘리면 자동으로 이미지도 늘어나게
4. zooming + moving
- zooming
- moving
5. bbox size, position check
추가 사항
6. crop : done
7. class화
'''
#1
def show_image(img, show_flag='color', size_flag='auto'):
flag = show_type(show_flag)
image = cv2.imread(img, flag)
size_flag = window_size(size_flag)
cv2.namedWindow('image', size_flag)
cv2.imshow('image', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def crop_drag(event, x, y, flags, param):
global refPt, cropping, i
if event == cv2.EVENT_MOUSEMOVE:
print('event_mousemove!!!')
# if cropping == False:
# temp = image.copy()
# print(refPt)
# cv2.rectangle(temp, refPt[0], refPt[1], (0, 255, 0), 2)
# cv2.imshow('temp', temp)
def crop_image(img):
global image
image = cv2.imread(img)
clone = image.copy()
cv2.namedWindow("image")
cv2.setMouseCallback("image", click_crop)
# cv2.setMouseCallback("image", crop_drag)
while True:
cv2.imshow('image', image)
key = cv2.waitKey(1) & 0xFF
if key == ord("r"):
image = clone.copy()
elif key == ord("c"):
break
if len(refPt) == 2:
crop = clone[refPt[0][1]:refPt[1][1], refPt[0][0]:refPt[1][0]]
cv2.imshow("crop", crop)
cv2.waitKey(0)
cv2.destroyAllWindows()
def click_crop(event, x, y, flags, param):
global refPt, cropping
if event == cv2.EVENT_LBUTTONDOWN:
refPt = [(x, y)]
cropping = True
elif event == cv2.EVENT_LBUTTONUP:
refPt.append((x, y))
cropping = False
cv2.rectangle(image, refPt[0], refPt[1], (0, 255, 0), 2)
cv2.imshow("image", image)
def show_type(style):
if style == 'color':
return 1
elif style == 'gray':
return 0
else:
return -1
def window_size(size_flag):
if size_flag == 'auto':
return cv2.WINDOW_AUTOSIZE
elif size_flag == 'normal':
return cv2.WINDOW_NORMAL
elif size_flag == 'full':
return cv2.WINDOW_FULLSCREEN
def zoom(img):
'''
2배로 zoom
'''
image = cv2.imread(img)
zoom = cv2.resize(image, None, fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
cv2.imshow('zoom', zoom)
cv2.waitKey(0)
cv2.destroyAllWindows()
def bbox(img):
image = cv2.imread(img)
r = cv2.selectROI(image)
crop = image[int(r[1]):int(r[1]+r[3]), int(r[0]):int(r[0]+r[2])]
cv2.imshow("Image", crop)
cv2.waitKey(0)
cv2.destroyAllWindows()
def view_move(image_file, now_order, len_image):
'''
현재 폴더의 목록을 받아오고 mouse callback 함수를 추가해보기
1. 현재 폴더의 목록은 받아오는 것으로 처리
image_file : 경로
now_order : 현재 이미지의 순서
len_image : 이미지 개수
:return:
'''
image = cv2.imread(image_file[int(now_order)])
cv2.imshow("viewer", image)
now_order = int(now_order)
while True:
key = cv2.waitKey(1) & 0xFF
cv2.namedWindow("viewer")
if key == 27:
quit()
elif key == 2:
if now_order <= 0:
now_order = now_order + len_image - 1
else:
now_order -= 1
image_path = image_file[now_order]
print(image_path)
image = cv2.imread(image_path)
cv2.imshow("viewer", image)
elif key == 3:
if now_order+1 >= len_image:
now_order = now_order - len_image + 1
else:
now_order += 1
image_path = image_file[now_order]
print(image_path)
image = cv2.imread(image_path)
cv2.imshow("viewer", image)
def drag_zoom(event, x, y, flags, param):
global refPt, cropping, direction
if event == cv2.EVENT_LBUTTONDOWN:
refPt = [(x, y)]
print('aa')
cropping = True
# elif event == cv2.EVENT_MOUSEMOVE:
# if cropping == True:
# direction.append((x, y))
# print(direction)
elif event == cv2.EVENT_LBUTTONUP:
refPt.append((x, y))
cropping = False
print('bb')
cv2.rectangle(image, refPt[0], refPt[1], (0, 255, 0), 2)
print(refPt[0], refPt[1])
cv2.imshow("image", image)
def drag_zoom_viewer(img):
'''
드래그하면서 줌
img : 오리지날 이미지
copy_img ? 이건 그냥 오리지날 이미지에서 카피하면되지 않남
mouse
마우스가 드래그하는 좌표를 찍어서 -> 그 부분으로 사이즈를 구성해 보이게 하기
:return:
'''
global image, copy_img
image = cv2.imread(img)
y, x = image.shape[:2]
copy_img = image.copy()
cv2.namedWindow("image")
cv2.setMouseCallback("image", drag_zoom)
print('x, y', x, y)
while True:
cv2.imshow('image', image)
key = cv2.waitKey(1) & 0xFF
if key == ord("r"):
image = copy_img.copy()
print('b')
elif key == ord("c"):
print('c')
if len(refPt) == 2:
copy = copy_img[refPt[0][1]:refPt[1][1], refPt[0][0]:refPt[1][0]]
# fx, fy를 원본 / copy로 나눠서 설정하게 하도록 해보자
cv2.resize(copy, (x, y), fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
cv2.imshow("image", copy)
print('d')
cv2.waitKey(0)
cv2.destroyAllWindows()
print(refPt)
# if len(refPt) == 2:
# copy = copy_img[refPt[0][1]:refPt[1][1], refPt[0][0]:refPt[1][0]]
# # fx, fy를 원본 / copy로 나눠서 설정하게 하도록 해보자
# cv2.resize(copy, (x, y), fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
# cv2.imshow("image", copy)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
refPt = []
# direction = []
cropping = False
image_extension = ('jpg', 'jpeg', 'png') # 이미지 확장자
current_folder = os.getcwd() # 현재 폴더
print(current_folder)
image_file = [i for i in os.listdir(current_folder) if i.endswith(image_extension)==True]
print("current folder path : {current_folder}\nimage 개수 : {len_image}\nimage file : {image_file}".format(
current_folder=current_folder, len_image=len(image_file), image_file=image_file
))
input = raw_input("몇번째 이미지를 보여드릴까요?\n")
now_order = int(input)-1
try:
selected_image = image_file[int(input)-1]
print(selected_image)
except IndexError:
print("1부터 {n}까지의 숫자를 입력해주세요".format(n=len(image_file)))
finally:
if int(input)<=0:
print("양수를 입력해주세요")
# show_image(selected_image)
# zoom(selected_image)
# crop_image(selected_image)
# bbox(selected_image)
# view_move(image_file, now_order, len(image_file))
drag_zoom_viewer(selected_image)
|
the-stack_0_3046 | # Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import logging.config
import itertools
import platform
import struct
import warnings
import os
try:
import xml.etree.cElementTree as ElementTree
except ImportError:
import xml.etree.ElementTree as ElementTree
try:
ElementTreeParseError = getattr(ElementTree, 'ParseError')
except AttributeError:
ElementTreeParseError = getattr(ElementTree, 'XMLParserError')
from unicodedata import east_asian_width
from ..lib import six
PY27 = six.PY2 and sys.version_info[1] == 7
PYPY = platform.python_implementation().lower() == 'pypy'
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
# Definition of East Asian Width
# http://unicode.org/reports/tr11/
# Ambiguous width can be changed by option
_EAW_MAP = {'Na': 1, 'N': 1, 'W': 2, 'F': 2, 'H': 1}
import decimal
DECIMAL_TYPES = [decimal.Decimal, ]
import json # don't remove
if six.PY3:
lrange = lambda *x: list(range(*x))
lzip = lambda *x: list(zip(*x))
lkeys = lambda x: list(x.keys())
lvalues = lambda x: list(x.values())
litems = lambda x: list(x.items())
lmap = lambda *x: list(map(*x))
irange = range
izip = zip
long_type = int
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
if PY27:
from ..lib import enum
else:
import enum
if PY27:
try:
import cdecimal as decimal
DECIMAL_TYPES.append(decimal.Decimal)
except ImportError:
import decimal
else:
import decimal
from collections import OrderedDict
OrderedDict3 = OrderedDict
def u(s):
return s
def strlen(data, encoding=None):
# encoding is for compat with PY2
return len(data)
def east_asian_len(data, encoding=None, ambiguous_width=1):
"""
Calculate display width considering unicode East Asian Width
"""
if isinstance(data, six.text_type):
return sum([_EAW_MAP.get(east_asian_width(c), ambiguous_width) for c in data])
else:
return len(data)
dictconfig = lambda config: logging.config.dictConfig(config)
import builtins
from concurrent import futures # don't remove
from datetime import timedelta
total_seconds = timedelta.total_seconds
import functools as functools32
def np_getbuffer(n):
return memoryview(n)
BrokenPipeError = BrokenPipeError
ConnectionResetError = ConnectionResetError
TimeoutError = TimeoutError
from itertools import accumulate
else:
lrange = range
lzip = zip
lkeys = lambda x: x.keys()
lvalues = lambda x: x.values()
litems = lambda x: x.items()
lmap = map
irange = xrange # noqa F821
izip = itertools.izip
long_type = long # noqa F821
from ..lib import enum
try:
import cdecimal as decimal
DECIMAL_TYPES.append(decimal.Decimal)
except ImportError:
import decimal
try:
import cStringIO as StringIO
except ImportError:
import StringIO
StringIO = BytesIO = StringIO.StringIO
def u(s):
return unicode(s, "unicode_escape") # noqa F821
def strlen(data, encoding=None):
try:
data = data.decode(encoding)
except UnicodeError:
pass
return len(data)
def east_asian_len(data, encoding=None, ambiguous_width=1):
"""
Calculate display width considering unicode East Asian Width
"""
if isinstance(data, six.text_type):
try:
data = data.decode(encoding)
except UnicodeError:
pass
return sum([_EAW_MAP.get(east_asian_width(c), ambiguous_width) for c in data])
else:
return len(data)
from collections import OrderedDict
dictconfig = lambda config: logging.config.dictConfig(config)
from datetime import timedelta
total_seconds = timedelta.total_seconds
import __builtin__ as builtins # don't remove
from ..lib import futures # don't remove
from ..lib.functools32.functools32 import OrderedDict as OrderedDict3
from ..lib import functools32 # don't remove
def np_getbuffer(n):
import numpy as np
return np.getbuffer(n)
class TimeoutError(Exception):
pass
class BrokenPipeError(Exception):
pass
class ConnectionResetError(Exception):
pass
def accumulate(iterable, func=lambda a, b: a + b):
'Return running totals'
# accumulate([1,2,3,4,5]) --> 1 3 6 10 15
# accumulate([1,2,3,4,5], operator.mul) --> 1 2 6 24 120
it = iter(iterable)
try:
total = next(it)
except StopIteration:
return
yield total
for element in it:
total = func(total, element)
yield total
if six.PY3:
from contextlib import suppress
else:
from contextlib import contextmanager
@contextmanager
def suppress(*exceptions):
try:
yield
except exceptions:
pass
Enum = enum.Enum
DECIMAL_TYPES = tuple(DECIMAL_TYPES)
Decimal = decimal.Decimal
if sys.version_info.major < 3:
# Due to a bug in python 2.7 Queue.get, if a timeout isn't specified then
# `Queue.get` can't be interrupted. A workaround is to specify an extremely
# long timeout, which then allows it to be interrupted.
# For more information see: https://bugs.python.org/issue1360
def queue_get(q):
return q.get(block=True, timeout=(365 * 24 * 60 * 60))
elif os.name == 'nt':
# Python 3 windows Queue.get also doesn't handle interrupts properly. To
# workaround this we poll at a sufficiently large interval that it
# shouldn't affect performance, but small enough that users trying to kill
# an application shouldn't care.
def queue_get(q):
while True:
try:
return q.get(block=True, timeout=0.1)
except Empty:
pass
else:
def queue_get(q):
return q.get()
from ..lib.lib_utils import isvalidattr, dir2, raise_exc, getargspec, getfullargspec
from ..lib.six.moves import reduce, zip_longest
from ..lib.six.moves import reload_module
from ..lib.six.moves.queue import Queue, Empty, PriorityQueue
from ..lib.six.moves.urllib.request import urlretrieve
from ..lib.six.moves import cPickle as pickle
from ..lib.six.moves.urllib.parse import urlencode, urlparse, unquote, quote, quote_plus, parse_qsl
from ..lib.six.moves import configparser as ConfigParser
try:
import pytz
utc = pytz.utc
FixedOffset = pytz._FixedOffset
except ImportError:
import datetime
_ZERO_TIMEDELTA = datetime.timedelta(0)
# A class building tzinfo objects for fixed-offset time zones.
# Note that FixedOffset(0, "UTC") is a different way to build a
# UTC tzinfo object.
class FixedOffset(datetime.tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name=None):
self.__offset = datetime.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return _ZERO_TIMEDELTA
utc = FixedOffset(0, 'UTC')
try:
from weakref import finalize
except ImportError:
# Backported from Python 3.6
import itertools
from weakref import ref
class finalize:
"""Class for finalization of weakrefable objects
finalize(obj, func, *args, **kwargs) returns a callable finalizer
object which will be called when obj is garbage collected. The
first time the finalizer is called it evaluates func(*arg, **kwargs)
and returns the result. After this the finalizer is dead, and
calling it just returns None.
When the program exits any remaining finalizers for which the
atexit attribute is true will be run in reverse order of creation.
By default atexit is true.
"""
# Finalizer objects don't have any state of their own. They are
# just used as keys to lookup _Info objects in the registry. This
# ensures that they cannot be part of a ref-cycle.
__slots__ = ()
_registry = {}
_shutdown = False
_index_iter = itertools.count()
_dirty = False
_registered_with_atexit = False
class _Info:
__slots__ = ("weakref", "func", "args", "kwargs", "atexit", "index")
def __init__(self, obj, func, *args, **kwargs):
if not self._registered_with_atexit:
# We may register the exit function more than once because
# of a thread race, but that is harmless
import atexit
atexit.register(self._exitfunc)
finalize._registered_with_atexit = True
info = self._Info()
info.weakref = ref(obj, self)
info.func = func
info.args = args
info.kwargs = kwargs or None
info.atexit = True
info.index = next(self._index_iter)
self._registry[self] = info
finalize._dirty = True
def __call__(self, _=None):
"""If alive then mark as dead and return func(*args, **kwargs);
otherwise return None"""
info = self._registry.pop(self, None)
if info and not self._shutdown:
return info.func(*info.args, **(info.kwargs or {}))
def detach(self):
"""If alive then mark as dead and return (obj, func, args, kwargs);
otherwise return None"""
info = self._registry.get(self)
obj = info and info.weakref()
if obj is not None and self._registry.pop(self, None):
return (obj, info.func, info.args, info.kwargs or {})
def peek(self):
"""If alive then return (obj, func, args, kwargs);
otherwise return None"""
info = self._registry.get(self)
obj = info and info.weakref()
if obj is not None:
return (obj, info.func, info.args, info.kwargs or {})
@property
def alive(self):
"""Whether finalizer is alive"""
return self in self._registry
@property
def atexit(self):
"""Whether finalizer should be called at exit"""
info = self._registry.get(self)
return bool(info) and info.atexit
@atexit.setter
def atexit(self, value):
info = self._registry.get(self)
if info:
info.atexit = bool(value)
def __repr__(self):
info = self._registry.get(self)
obj = info and info.weakref()
if obj is None:
return '<%s object at %#x; dead>' % (type(self).__name__, id(self))
else:
return '<%s object at %#x; for %r at %#x>' % \
(type(self).__name__, id(self), type(obj).__name__, id(obj))
@classmethod
def _select_for_exit(cls):
# Return live finalizers marked for exit, oldest first
L = [(f,i) for (f,i) in cls._registry.items() if i.atexit]
L.sort(key=lambda item:item[1].index)
return [f for (f,i) in L]
@classmethod
def _exitfunc(cls):
# At shutdown invoke finalizers for which atexit is true.
# This is called once all other non-daemonic threads have been
# joined.
reenable_gc = False
try:
if cls._registry:
import gc
if gc.isenabled():
reenable_gc = True
gc.disable()
pending = None
while True:
if pending is None or finalize._dirty:
pending = cls._select_for_exit()
finalize._dirty = False
if not pending:
break
f = pending.pop()
try:
# gc is disabled, so (assuming no daemonic
# threads) the following is the only line in
# this function which might trigger creation
# of a new finalizer
f()
except Exception:
sys.excepthook(*sys.exc_info())
assert f not in cls._registry
finally:
# prevent any more finalizers from executing during shutdown
finalize._shutdown = True
if reenable_gc:
gc.enable()
__all__ = ['sys', 'builtins', 'logging.config', 'OrderedDict', 'dictconfig', 'suppress',
'reduce', 'reload_module', 'Queue', 'PriorityQueue', 'Empty', 'ElementTree', 'ElementTreeParseError',
'urlretrieve', 'pickle', 'urlencode', 'urlparse', 'unquote', 'quote', 'quote_plus', 'parse_qsl',
'Enum', 'ConfigParser', 'decimal', 'Decimal', 'DECIMAL_TYPES', 'FixedOffset', 'utc', 'finalize',
'functools32', 'zip_longest', 'OrderedDict3', 'BrokenPipeError', 'TimeoutError', 'ConnectionResetError',
'izip', 'accumulate']
|
the-stack_0_3047 | #!/usr/bin/env python3
# coding:utf-8
"""
__title__ = ''
__author__ = 'David Ao'
__mtime__ = '2018/6/26'
#
"""
import json
import os
import pandas as pd
import numpy as np
from tensorflow.contrib import learn
from sklearn.metrics import classification_report, f1_score
from tools.utility import Utility
from works.test.base.estimate_base import EstimateBase
class FineGrainedSentimentInfer(EstimateBase):
def __init__(self):
super().__init__()
self.load_config('aspect_pro')
self.reload_model()
self.log = Utility.get_logger('aspect_pro')
self.content_vocab_processor = learn.preprocessing.VocabularyProcessor.restore(self.config['vocab_pkl'])
def batch_iter_test(self, data, batch_size):
"""
:param data:
:param batch_size:
:return:
"""
assert isinstance(data, pd.core.frame.DataFrame), 'test data should be a DataFrame'
content = data.content_token.values.tolist()
data = np.array(list(self.content_vocab_processor.transform(content))).tolist()
return super().batch_iter_test(data, batch_size)
def estimate(self, df):
"""
推理
:param df:
:return:
"""
batches = self.batch_iter_test(df, self.test_batch_size)
# 判断
all_predictions = []
for x_test_batch in batches:
batch_predictions = self.sess.run(self.predictions, {self.input_x: x_test_batch, self.dropout_keep_prob: 1.0})
all_predictions.append(batch_predictions.T)
all_predictions = np.concatenate(all_predictions)
df['y'] = all_predictions.tolist()
return df
def run(self):
"""
:return:
"""
self.run_test()
def test(self, df, out_path, with_pre=False):
"""
:param df:
:param out_path:
:param with_pre:
:return:
"""
# df = df.head(10)
if df is None or len(df) == 0:
self.log.info('parse_dataset is empty')
return
if 'y' in df.columns:
df.rename(columns={'y': 'y_'}, inplace=True)
df = self.estimate(df)
if df is None or len(df) == 0:
self.log.info('estimate result is empty')
return
if 'y_' not in df.columns:
# 无标签测试数据
return self.test_no_label(df, out_path)
def process(row):
lab = eval(row['y_'])
cl = []
for i in range(0, 80, 4):
lt = lab[i: i + 4]
cl.append(np.argmax(lt))
row['label'] = cl
return row
df = df.apply(process, axis=1)
y_pre = df.y
y_pre = np.array(y_pre.tolist())
y_true = df.label
y_true = np.array(y_true.tolist())
f_scores = []
for i in range(20):
f = f1_score(y_true[:, i], y_pre[:, i], average='macro')
f_scores.append(f)
self.log.info('f1 score : {}'.format(f_scores))
f_avg = np.array(f_scores).mean()
self.log.info('mean f1 score: {}'.format(f_avg))
df.to_csv(out_path, index=False, encoding='utf-8')
def test_no_label(self, ret_df, out_path):
"""
无标签数据测试
:param ret_df:
:return:
"""
aspect = ['location_traffic_convenience',
'location_distance_from_business_district', 'location_easy_to_find',
'service_wait_time', 'service_waiters_attitude',
'service_parking_convenience', 'service_serving_speed', 'price_level',
'price_cost_effective', 'price_discount', 'environment_decoration',
'environment_noise', 'environment_space', 'environment_cleaness',
'dish_portion', 'dish_taste', 'dish_look', 'dish_recommendation',
'others_overall_experience', 'others_willing_to_consume_again']
lab_dict = {
0: 0,
1: 1,
2: -2,
3: -1
}
df_ret = ret_df[['id', 'content', 'y']]
def process(row):
# y = eval(row['y'])
y = row['y']
for i, a in enumerate(y):
row[aspect[i]] = lab_dict[a]
return row
df_ret = df_ret.apply(process, axis=1)
df_ret = df_ret.drop(['y'], axis=1)
df_ret.to_csv(out_path, index=False, encoding='utf-8')
|
the-stack_0_3048 |
#!/usr/bin/env python3
import time
import os
import sqlite3
from sqlite3 import Connection
from typing import List
V100_DB_PATH = "../SQLiteDBs/A4v100.db"
V1K_DB_PATH = "../SQLiteDBs/A4v1k.db"
V10K_DB_PATH = "../SQLiteDBs/A4v10k.db"
V100K_DB_PATH = "../SQLiteDBs/A4v100k.db"
V1M_DB_PATH = "../SQLiteDBs/A4v1M.db"
# Q5: Find the quantity of parts that are not used in any other part, your query must use EXISTS.
# select
# count(partNumber)
# from
# Parts p
# where
# not exists (
# select
# 1
# from
# Parts p2
# where
# p.partNumber = p2.needsPart
# );
QUERY_5 = '''
select
count(partNumber)
from
Parts p
where
not exists (
select
1
from
Parts p2
where
p.partNumber = p2.needsPart
);
'''
# Q6: Find the quantity of parts that are not used in any other part, your query must use NOT IN.
# select
# count(partNumber)
# from
# Parts p
# where
# p.partNumber not in (
# select
# needsPart
# from
# Parts p2
# );
QUERY_6 = '''
select
count(partNumber)
from
Parts p
where
p.partNumber not in (
select
needsPart
from
Parts p2
);
'''
# Creates an index for Q6
# CREATE INDEX idxPartNumberNeedsPart on Parts ( needsPart, partNumber );
CREATE_INDEX_QUERY = '''
CREATE INDEX idxPartNumberNeedsPart on Parts ( needsPart, partNumber );
'''
# Drops the index for Q6
# DROP INDEX idxPartNumberNeedsPart;
DROP_INDEX_QUERY = '''
DROP INDEX idxPartNumberNeedsPart;
'''
country_list = None
def main():
options = {"100": V100_DB_PATH, "1K": V1K_DB_PATH,
"10K": V10K_DB_PATH, "100K": V100K_DB_PATH, "1M": V1M_DB_PATH}
print("Executing Part 4\n")
print("Avg times and sizes for Query 5 without index\n")
run_trials(options, QUERY_5)
print("Avg times and sizes for Query 6 without index\n")
run_trials(options, QUERY_6)
print("Creating index for each database")
update_index(options, CREATE_INDEX_QUERY)
print("Avg times and sizes for Query 6 with index\n")
run_trials(options, QUERY_6)
print("Dropping index for each database\n")
update_index(options, DROP_INDEX_QUERY)
print("Done!")
def update_index(options, query):
for option in options:
path = options[option]
connection = connect(path)
cursor = connection.cursor()
cursor.execute(query)
connection.commit()
connection.close()
def run_trials(options, query):
for option in options:
print("Avg time for {} entries".format(option))
avg_time(options[option], query)
print("Size of database {}".format(os.stat(options[option]).st_size))
print("\n")
def connect(path) -> Connection:
# Returns a connection to the database provided at the path.
db_path = exact_path(path)
connection = sqlite3.connect(db_path)
cursor = connection.cursor()
# To enable foreign keys for SQLite
cursor.execute(' PRAGMA foreign_keys=ON; ')
connection.commit()
return connection
def exact_path(path) -> str:
# Used to convert relative path to absolute path.
curr = os.path.dirname(__file__)
load_path = os.path.join(curr, path)
return load_path
def run_query(path, query) -> None:
connection = connect(path)
cursor = connection.cursor()
cursor.execute(query, {})
connection.commit()
connection.close()
def avg_time(path, query) -> None:
total_time = 0
if path in {V100K_DB_PATH, V1M_DB_PATH} and query is QUERY_5:
print("Skipping this Database")
return
for i in range(0, 100):
t_start = time.process_time()
run_query(path, query)
t_taken = time.process_time() - t_start
total_time += t_taken
# to get the average for total_time
total_time = total_time/100
# display in ms
print("Avg time: {} ms".format(total_time*1000))
if __name__ == "__main__":
main()
|
the-stack_0_3049 | #!/bin/python2.7
# -*- coding: utf-8 -*-
"""
Lucas Ou-Yang 2014 -- http://codelucas.com
"""
import sys
import os
import codecs
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
packages = [
'newspaper',
]
if sys.argv[-1] == 'publish':
os.system('python3 setup.py sdist upload -r pypi')
sys.exit()
# This *must* run early. Please see this API limitation on our users:
# https://github.com/codelucas/newspaper/issues/155
if sys.version_info[0] == 2 and sys.argv[-1] not in ['publish', 'upload']:
sys.exit('WARNING! You are attempting to install newspaper3k\'s '
'python3 repository on python2. PLEASE RUN '
'`$ pip3 install newspaper3k` for python3 or '
'`$ pip install newspaper` for python2')
with open('requirements.txt') as f:
required = f.read().splitlines()
with codecs.open('README.rst', 'r', 'utf-8') as f:
readme = f.read()
setup(
name='newspaper3k',
version='0.2.5',
description='Simplified python article discovery & extraction.',
long_description=readme,
author='Lucas Ou-Yang',
author_email='[email protected]',
url='https://github.com/codelucas/newspaper/',
packages=packages,
include_package_data=True,
install_requires=required,
license='MIT',
zip_safe=False,
classifiers=[
'Programming Language :: Python :: 3',
'Natural Language :: English',
'Intended Audience :: Developers',
],
)
|
the-stack_0_3051 | try:
# Try to use setuptools so as to enable support of the special
# "Microsoft Visual C++ Compiler for Python 2.7" (http://aka.ms/vcpython27)
# for building under Windows.
# Note setuptools >= 6.0 is required for this.
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup, Extension
from distutils.command import build
from distutils.spawn import spawn
from distutils import sysconfig
import sys
import os
import platform
import versioneer
if sys.platform.startswith('linux'):
# Patch for #2555 to make wheels without libpython
sysconfig.get_config_vars()['Py_ENABLE_SHARED'] = 0
class build_doc(build.build):
description = "build documentation"
def run(self):
spawn(['make', '-C', 'docs', 'html'])
versioneer.VCS = 'git'
versioneer.versionfile_source = 'numba/_version.py'
versioneer.versionfile_build = 'numba/_version.py'
versioneer.tag_prefix = ''
versioneer.parentdir_prefix = 'numba-'
cmdclass = versioneer.get_cmdclass()
cmdclass['build_doc'] = build_doc
GCCFLAGS = ["-std=c89", "-Wdeclaration-after-statement", "-Werror"]
if os.environ.get("NUMBA_GCC_FLAGS"):
CFLAGS = GCCFLAGS
else:
CFLAGS = ['-g']
install_name_tool_fixer = []
if sys.platform == 'darwin':
install_name_tool_fixer += ['-headerpad_max_install_names']
def is_building():
"""
Parse the setup.py command and return whether a build is requested.
If False is returned, only an informational command is run.
If True is returned, information about C extensions will have to
be passed to the setup() function.
"""
if len(sys.argv) < 2:
# User forgot to give an argument probably, let setuptools handle that.
return True
info_commands = ['--help-commands', '--name', '--version', '-V',
'--fullname', '--author', '--author-email',
'--maintainer', '--maintainer-email', '--contact',
'--contact-email', '--url', '--license', '--description',
'--long-description', '--platforms', '--classifiers',
'--keywords', '--provides', '--requires', '--obsoletes']
# Add commands that do more than print info, but also don't need
# any build step.
info_commands.extend(['egg_info', 'install_egg_info', 'rotate'])
for command in info_commands:
if command in sys.argv[1:]:
return False
return True
def is_building_wheel():
if len(sys.argv) < 2:
# No command is given.
return False
return 'bdist_wheel' in sys.argv[1:]
def get_ext_modules():
"""
Return a list of Extension instances for the setup() call.
"""
# Note we don't import Numpy at the toplevel, since setup.py
# should be able to run without Numpy for pip to discover the
# build dependencies
import numpy.distutils.misc_util as np_misc
# Inject required options for extensions compiled against the Numpy
# C API (include dirs, library dirs etc.)
np_compile_args = np_misc.get_info('npymath')
ext_dynfunc = Extension(name='numba._dynfunc',
sources=['numba/_dynfuncmod.c'],
extra_compile_args=CFLAGS,
depends=['numba/_pymodule.h',
'numba/_dynfunc.c'])
ext_dispatcher = Extension(name="numba._dispatcher",
sources=['numba/_dispatcher.c',
'numba/_typeof.c',
'numba/_hashtable.c',
'numba/_dispatcherimpl.cpp',
'numba/typeconv/typeconv.cpp'],
depends=["numba/_pymodule.h",
"numba/_dispatcher.h",
"numba/_typeof.h",
"numba/_hashtable.h"],
**np_compile_args)
ext_helperlib = Extension(name="numba._helperlib",
sources=["numba/_helpermod.c",
"numba/_math_c99.c"],
extra_compile_args=CFLAGS,
extra_link_args=install_name_tool_fixer,
depends=["numba/_pymodule.h",
"numba/_math_c99.h",
"numba/_helperlib.c",
"numba/_lapack.c",
"numba/_npymath_exports.c",
"numba/_random.c",
"numba/mathnames.inc"],
**np_compile_args)
ext_typeconv = Extension(name="numba.typeconv._typeconv",
sources=["numba/typeconv/typeconv.cpp",
"numba/typeconv/_typeconv.cpp"],
depends=["numba/_pymodule.h"],
)
ext_npyufunc_ufunc = Extension(name="numba.npyufunc._internal",
sources=["numba/npyufunc/_internal.c"],
depends=["numba/npyufunc/_ufunc.c",
"numba/npyufunc/_internal.h",
"numba/_pymodule.h"],
**np_compile_args)
ext_npyufunc_workqueue_impls = []
def check_file_at_path(path2file):
"""
Takes a list as a path, a single glob (*) is permitted as an entry which
indicates that expansion at this location is required (i.e. version
might not be known).
"""
found = None
path2check = [os.path.split(os.path.split(sys.executable)[0])[0]]
path2check += [os.getenv(n, '') for n in ['CONDA_PREFIX', 'PREFIX']]
if sys.platform.startswith('win'):
path2check += [os.path.join(p, 'Library') for p in path2check]
for p in path2check:
if p:
if '*' in path2file:
globloc = path2file.index('*')
searchroot = os.path.join(*path2file[:globloc])
try:
potential_locs = os.listdir(os.path.join(p, searchroot))
except BaseException:
continue
searchfor = path2file[globloc + 1:]
for x in potential_locs:
potpath = os.path.join(p, searchroot, x, *searchfor)
if os.path.isfile(potpath):
found = p # the latest is used
elif os.path.isfile(os.path.join(p, *path2file)):
found = p # the latest is used
return found
# Search for Intel TBB, first check env var TBBROOT then conda locations
tbb_root = os.getenv('TBBROOT')
if not tbb_root:
tbb_root = check_file_at_path(['include', 'tbb', 'tbb.h'])
# Set various flags for use in TBB and openmp. On OSX, also find OpenMP!
have_openmp = True
if sys.platform.startswith('win'):
cpp11flags = []
ompcompileflags = ['-openmp']
omplinkflags = []
elif sys.platform.startswith('darwin'):
cpp11flags = ['-std=c++11']
# This is a bit unusual but necessary...
# llvm (clang) OpenMP is used for headers etc at compile time
# Intel OpenMP (libiomp5) provides the link library.
# They are binary compatible and may not safely coexist in a process, as
# libiomp5 is more prevalent and often linked in for NumPy it is used
# here!
ompcompileflags = ['-fopenmp']
omplinkflags = ['-fopenmp=libiomp5']
omppath = ['lib', 'clang', '*', 'include', 'omp.h']
have_openmp = check_file_at_path(omppath)
else:
cpp11flags = ['-std=c++11']
ompcompileflags = ['-fopenmp']
if platform.machine() == 'ppc64le':
omplinkflags = ['-fopenmp']
else:
omplinkflags = ['-fopenmp']
if tbb_root:
print("Using Intel TBB from:", tbb_root)
ext_npyufunc_tbb_workqueue = Extension(
name='numba.npyufunc.tbbpool',
sources=['numba/npyufunc/tbbpool.cpp', 'numba/npyufunc/gufunc_scheduler.cpp'],
depends=['numba/npyufunc/workqueue.h'],
include_dirs=[os.path.join(tbb_root, 'include')],
extra_compile_args=cpp11flags,
libraries =['tbb'], # TODO: if --debug or -g, use 'tbb_debug'
library_dirs=[os.path.join(tbb_root, 'lib', 'intel64', 'gcc4.4'), # for Linux
os.path.join(tbb_root, 'lib'), # for MacOS
os.path.join(tbb_root, 'lib', 'intel64', 'vc_mt'), # for Windows
],
)
ext_npyufunc_workqueue_impls.append(ext_npyufunc_tbb_workqueue)
else:
print("TBB not found")
# Disable OpenMP if we are building a wheel or
# forced by user with NUMBA_NO_OPENMP=1
if is_building_wheel() or os.getenv('NUMBA_NO_OPENMP'):
print("OpenMP disabled")
elif have_openmp:
print("Using OpenMP from:", have_openmp)
# OpenMP backed work queue
ext_npyufunc_omppool = Extension( name='numba.npyufunc.omppool',
sources=['numba/npyufunc/omppool.cpp',
'numba/npyufunc/gufunc_scheduler.cpp'],
depends=['numba/npyufunc/workqueue.h'],
extra_compile_args=ompcompileflags + cpp11flags,
extra_link_args = omplinkflags)
ext_npyufunc_workqueue_impls.append(ext_npyufunc_omppool)
else:
print("OpenMP not found")
# Build the Numba workqueue implementation irrespective of whether the TBB
# version is built. Users can select a backend via env vars.
ext_npyufunc_workqueue = Extension(
name='numba.npyufunc.workqueue',
sources=['numba/npyufunc/workqueue.c', 'numba/npyufunc/gufunc_scheduler.cpp'],
depends=['numba/npyufunc/workqueue.h'])
ext_npyufunc_workqueue_impls.append(ext_npyufunc_workqueue)
ext_mviewbuf = Extension(name='numba.mviewbuf',
extra_link_args=install_name_tool_fixer,
sources=['numba/mviewbuf.c'])
ext_nrt_python = Extension(name='numba.runtime._nrt_python',
sources=['numba/runtime/_nrt_pythonmod.c',
'numba/runtime/nrt.c'],
depends=['numba/runtime/nrt.h',
'numba/_pymodule.h',
'numba/runtime/_nrt_python.c'],
**np_compile_args)
ext_jitclass_box = Extension(name='numba.jitclass._box',
sources=['numba/jitclass/_box.c'],
depends=['numba/_pymodule.h'],
)
ext_cuda_extras = Extension(name='numba.cuda.cudadrv._extras',
sources=['numba/cuda/cudadrv/_extras.c'],
depends=['numba/_pymodule.h'],
include_dirs=["numba"])
ext_modules = [ext_dynfunc, ext_dispatcher, ext_helperlib, ext_typeconv,
ext_npyufunc_ufunc, ext_mviewbuf, ext_nrt_python,
ext_jitclass_box, ext_cuda_extras]
ext_modules += ext_npyufunc_workqueue_impls
return ext_modules
def find_packages(root_dir, root_name):
"""
Recursively find packages in *root_dir*.
"""
packages = []
def rec(path, pkg_name):
packages.append(pkg_name)
for fn in sorted(os.listdir(path)):
subpath = os.path.join(path, fn)
if os.path.exists(os.path.join(subpath, "__init__.py")):
subname = "%s.%s" % (pkg_name, fn)
rec(subpath, subname)
rec(root_dir, root_name)
return packages
packages = find_packages("numba", "numba")
build_requires = ['numpy']
install_requires = ['llvmlite>=0.27.0dev0', 'numpy']
if sys.version_info < (3, 4):
install_requires.extend(['enum34', 'singledispatch'])
if sys.version_info < (3, 3):
install_requires.append('funcsigs')
metadata = dict(
name='numba',
description="compiling Python code using LLVM",
version=versioneer.get_version(),
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Software Development :: Compilers",
],
package_data={
# HTML templates for type annotations
"numba.annotations": ["*.html"],
# Various test data
"numba.cuda.tests.cudadrv.data": ["*.ptx"],
"numba.tests": ["pycc_distutils_usecase/*.py"],
# Some C files are needed by pycc
"numba": ["*.c", "*.h"],
"numba.pycc": ["*.c", "*.h"],
"numba.runtime": ["*.c", "*.h"],
},
scripts=["numba/pycc/pycc", "bin/numba"],
author="Anaconda, Inc.",
author_email="[email protected]",
url="http://numba.github.com",
packages=packages,
setup_requires=build_requires,
install_requires=install_requires,
license="BSD",
cmdclass=cmdclass,
)
with open('README.rst') as f:
metadata['long_description'] = f.read()
if is_building():
metadata['ext_modules'] = get_ext_modules()
setup(**metadata)
|
the-stack_0_3053 | """Base class to manage comms"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import sys
from traitlets.config import LoggingConfigurable
from IPython.core.prompts import LazyEvaluate
from IPython.core.getipython import get_ipython
from ipython_genutils.importstring import import_item
from ipython_genutils.py3compat import string_types
from traitlets import Instance, Unicode, Dict, Any
from .comm import Comm
def lazy_keys(dikt):
"""Return lazy-evaluated string representation of a dictionary's keys
Key list is only constructed if it will actually be used.
Used for debug-logging.
"""
return LazyEvaluate(lambda d: list(d.keys()))
class CommManager(LoggingConfigurable):
"""Manager for Comms in the Kernel"""
# If this is instantiated by a non-IPython kernel, shell will be None
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
allow_none=True)
kernel = Instance('ipykernel.kernelbase.Kernel')
iopub_socket = Any()
def _iopub_socket_default(self):
return self.kernel.iopub_socket
session = Instance('jupyter_client.session.Session')
def _session_default(self):
return self.kernel.session
comms = Dict()
targets = Dict()
# Public APIs
def register_target(self, target_name, f):
"""Register a callable f for a given target name
f will be called with two arguments when a comm_open message is received with `target`:
- the Comm instance
- the `comm_open` message itself.
f can be a Python callable or an import string for one.
"""
if isinstance(f, string_types):
f = import_item(f)
self.targets[target_name] = f
def unregister_target(self, target_name, f):
"""Unregister a callable registered with register_target"""
return self.targets.pop(target_name);
def register_comm(self, comm):
"""Register a new comm"""
comm_id = comm.comm_id
comm.shell = self.shell
comm.kernel = self.kernel
comm.iopub_socket = self.iopub_socket
self.comms[comm_id] = comm
return comm_id
def unregister_comm(self, comm):
"""Unregister a comm, and close its counterpart"""
# unlike get_comm, this should raise a KeyError
comm = self.comms.pop(comm.comm_id)
def get_comm(self, comm_id):
"""Get a comm with a particular id
Returns the comm if found, otherwise None.
This will not raise an error,
it will log messages if the comm cannot be found.
"""
if comm_id not in self.comms:
self.log.warn("No such comm: %s", comm_id)
self.log.debug("Current comms: %s", lazy_keys(self.comms))
return
# call, because we store weakrefs
comm = self.comms[comm_id]
return comm
# Message handlers
def comm_open(self, stream, ident, msg):
"""Handler for comm_open messages"""
content = msg['content']
comm_id = content['comm_id']
target_name = content['target_name']
f = self.targets.get(target_name, None)
comm = Comm(comm_id=comm_id,
shell=self.shell,
kernel=self.kernel,
iopub_socket=self.iopub_socket,
primary=False,
target_name=target_name,
)
self.register_comm(comm)
if f is None:
self.log.error("No such comm target registered: %s", target_name)
else:
try:
f(comm, msg)
return
except Exception:
self.log.error("Exception opening comm with target: %s", target_name, exc_info=True)
# Failure.
try:
comm.close()
except:
self.log.error("""Could not close comm during `comm_open` failure
clean-up. The comm may not have been opened yet.""", exc_info=True)
def comm_msg(self, stream, ident, msg):
"""Handler for comm_msg messages"""
content = msg['content']
comm_id = content['comm_id']
comm = self.get_comm(comm_id)
if comm is None:
# no such comm
return
try:
comm.handle_msg(msg)
except Exception:
self.log.error("Exception in comm_msg for %s", comm_id, exc_info=True)
def comm_close(self, stream, ident, msg):
"""Handler for comm_close messages"""
content = msg['content']
comm_id = content['comm_id']
comm = self.get_comm(comm_id)
if comm is None:
# no such comm
self.log.debug("No such comm to close: %s", comm_id)
return
del self.comms[comm_id]
try:
comm.handle_close(msg)
except Exception:
self.log.error("Exception handling comm_close for %s", comm_id, exc_info=True)
__all__ = ['CommManager']
|
the-stack_0_3054 | # Copyright (C) 2012-2016 Ben Kurtovic <[email protected]>
# Copyright (C) 2019-2020 Yuri Astrakhan <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from sys import maxsize
__all__ = []
def inheritdoc(method):
"""Set __doc__ of *method* to __doc__ of *method* in its parent class.
Since this is used on :class:`.SmartList`, the "parent class" used is
``list``. This function can be used as a decorator.
"""
method.__doc__ = getattr(list, method.__name__).__doc__
return method
class _SliceNormalizerMixIn:
"""MixIn that provides a private method to normalize slices."""
def _normalize_slice(self, key, clamp=False):
"""Return a slice equivalent to the input *key*, standardized."""
if key.start is None:
start = 0
else:
start = (len(self) + key.start) if key.start < 0 else key.start
if key.stop is None or key.stop == maxsize:
stop = len(self) if clamp else None
else:
stop = (len(self) + key.stop) if key.stop < 0 else key.stop
return slice(start, stop, key.step or 1)
|
the-stack_0_3057 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from lingvo.core import metrics
from lingvo.core import test_utils
from six.moves import range
import tensorflow as tf
class MetricsTest(test_utils.TestCase):
def testAverageMetric(self):
m = metrics.AverageMetric()
m.Update(1.0)
m.Update(2.0, 10.0)
self.assertEqual(1.0 + 2.0*10.0, m.total_value)
expected_average = (1.0 + 2.0*10.0) / (1.0 + 10.0)
self.assertEqual(expected_average, m.value)
name = 'metric_name'
self.assertEqual(
tf.Summary(value=[tf.Summary.Value(tag=name,
simple_value=expected_average)]),
m.Summary(name))
# Calling m.Summary() does not reset statistics.
m.Update(1.0)
self.assertEqual(1.0 + 2.0*10.0 + 1.0, m.total_value)
def testF1Metric(self):
m = metrics.F1Metric()
m.UpdateTruePositive(count=2.0)
m.UpdateFalsePositive()
m.UpdateFalseNegative()
precision = 2.0 / 3.0
recall = 2.0 / 3.0
expected_f1 = 2 * precision * recall / (precision + recall)
self.assertAlmostEqual(expected_f1, m.value)
name = 'my_f1_metric'
self.assertEqual(
tf.Summary(value=[tf.Summary.Value(tag=name,
simple_value=expected_f1)]),
m.Summary(name))
def testCorpusBleuMetric(self):
m = metrics.CorpusBleuMetric()
m.Update('a b c d', 'a b c d')
m.Update('a b c', 'a b c')
self.assertEqual(1.0, m.value)
name = 'corpus_bleu'
self.assertEqual(
tf.Summary(value=[tf.Summary.Value(tag=name, simple_value=1.0)]),
m.Summary(name))
if __name__ == '__main__':
tf.test.main()
|
the-stack_0_3059 | import logging
import sh
from cached_property import cached_property
from kubeyard.base_command import CommandException
from kubeyard.commands.devel import BaseDevelCommand
logger = logging.getLogger(__name__)
class ShellCommand(BaseDevelCommand):
"""
Command allows you to exec into container.
"""
custom_script_name = 'shell'
context_vars = ['pod', 'shell']
def __init__(self, *, shell, pod, container, root, **kwargs):
super().__init__(**kwargs)
self.shell = shell
self.pod = pod
self.container = container
self.root = root
def run_default(self):
try:
sh.kubectl.exec(
"-it",
self.pod_name,
"-c", self.container_name,
'--',
self.shell,
"-c", self.before_command,
_fg=True,
)
except sh.ErrorReturnCode_130:
# Command exited using Ctrl+D or Ctrl+C
pass
finally:
if self.after_command:
sh.kubectl.exec(
self.pod_name,
"-c", self.container_name,
"--",
self.shell,
"-c", self.after_command,
)
@cached_property
def pod_name(self) -> str:
if self.pod:
all_pods = sh.kubectl.get.pods('-o', 'jsonpath={.items[*].metadata.name}').split()
# Exact match
if self.pod in all_pods:
return self.pod
# Starting-with match
pods = [pod for pod in all_pods if pod.startswith(self.pod)]
pods.sort(key=len)
if len(pods) == 0:
raise CommandException(f"Not found pod equal or starting with '{self.pod}'")
if len(pods) > 1:
logger.warning(f"Found more than one pod. Using '{pods[0]}'")
return pods[0]
else:
for pod in sh.kubectl.get.pods(_iter='out'):
if self.image_name in pod:
return pod.split()[0]
raise CommandException("Container not found, please specify container or fix project setup.")
@cached_property
def container_name(self) -> str:
if self.container:
return self.container
else:
return self.image_name
@cached_property
def username(self) -> str:
return str(sh.whoami()).strip()
@property
def before_command(self):
if self.root:
return self.shell
return (
'groupadd -f -g {gid} {username}; '
'adduser -q --gecos "" --disabled-password --uid {uid} --gid {gid} {username}; '
'su {username}; '
).format(
gid=self.gid,
uid=self.uid,
username=self.username,
)
@property
def after_command(self) -> str:
if self.root:
return ""
else:
return "userdel --remove {username}; ".format(username=self.username)
|
the-stack_0_3063 | '''
Gramex {__version__} Copyright (c) 2017 by Gramener
Start the Gramex server on port 9988 at the current directory.
If no gramex.yaml exists, show the guide (https://learn.gramener.com/guide/)
Options
--listen.port=9090 Starts Gramex at port 9090
--browser Open the browser after startup
--settings.debug Enable serving tracebacks and autoreload
--settings.xsrf_cookies=false Disable XSRF cookies (only for testing)
--settings.cookie_secret=... Change cookie encryption key
Helper applications
gramex init Add Gramex project scaffolding to current dir
gramex service Windows service setup
gramex mail Send email from command line
gramex license See Gramex license, accept or reject it
Installation commands. Run without arguments to see help
gramex install Install an app
gramex update Update an app
gramex setup Run make, npm install, bower install etc on app
gramex run Run an installed app
gramex uninstall Uninstall an app
'''
import os
import sys
import json
import yaml
import logging
import logging.config
import tornado.ioloop
from pathlib import Path
from orderedattrdict import AttrDict
from gramex.config import ChainConfig, PathConfig, app_log, variables, setup_variables
from gramex.config import ioloop_running, prune_keys, setup_secrets
paths = AttrDict() # Paths where configurations are stored
conf = AttrDict() # Final merged configurations
config_layers = ChainConfig() # Loads all configurations. init() updates it
paths['source'] = Path(__file__).absolute().parent # Where gramex source code is
paths['base'] = Path('.') # Where gramex is run from
callbacks = {} # Services callbacks
# Populate __version__ from release.json
with (paths['source'] / 'release.json').open() as _release_file:
release = json.load(_release_file, object_pairs_hook=AttrDict)
__version__ = release.info.version
_sys_path = list(sys.path) # Preserve original sys.path
# List of URLs to warn about in case of duplicates
PathConfig.duplicate_warn = [
'url.*',
'cache.*',
'schedule.*',
'watch.*',
'email.*',
'alert.*',
'sms.*',
'log.loggers.*', 'log.handlers.*', 'log.formatters.*',
]
def parse_command_line(commands):
'''
Parse command line arguments. For example:
gramex cmd1 cmd2 --a=1 2 -b x --c --p.q=4
returns:
{"_": ["cmd1", "cmd2"], "a": [1, 2], "b": "x", "c": True, "p": {"q": [4]}}
Values are parsed as YAML. Arguments with '.' are split into subgroups. For
example, ``gramex --listen.port 80`` returns ``{"listen": {"port": 80}}``.
'''
group = '_'
args = AttrDict({group: []})
for arg in commands:
if arg.startswith('-'):
group, value = arg.lstrip('-'), 'True'
if '=' in group:
group, value = group.split('=', 1)
else:
value = arg
value = yaml.safe_load(value)
base = args
keys = group.split('.')
for key in keys[:-1]:
base = base.setdefault(key, AttrDict())
# Add the key to the base.
# If it's already there, make it a list.
# If it's already a list, append to it.
if keys[-1] not in base or base[keys[-1]] is True:
base[keys[-1]] = value
elif not isinstance(base[keys[-1]], list):
base[keys[-1]] = [base[keys[-1]], value]
else:
base[keys[-1]].append(value)
return args
def callback_commandline(commands):
'''
Find what method should be run based on the command line programs. This
refactoring allows us to test gramex.commandline() to see if it processes
the command line correctly, without actually running the commands.
Returns a callback method and kwargs for the callback method.
'''
# Set logging config at startup. (Services may override this.)
log_config = (+PathConfig(paths['source'] / 'gramex.yaml')).get('log', AttrDict())
log_config.root.level = logging.INFO
from . import services
services.log(log_config)
# args has all optional command line args as a dict of values / lists.
# cmd has all positional arguments as a list.
args = parse_command_line(commands)
cmd = args.pop('_')
# If --help or -V --version is specified, print a message and end
if args.get('V') is True or args.get('version') is True:
return console, {'msg': 'Gramex %s' % __version__}
if args.get('help') is True:
return console, {'msg': __doc__.strip().format(**globals())}
# Any positional argument is treated as a gramex command
if len(cmd) > 0:
kwargs = {'cmd': cmd, 'args': args}
base_command = cmd.pop(0).lower()
method = 'install' if base_command == 'update' else base_command
if method in {
'install', 'uninstall', 'setup', 'run', 'service', 'init',
'mail', 'license',
}:
import gramex.install
return getattr(gramex.install, method), kwargs
raise NotImplementedError('Unknown gramex command: %s' % base_command)
# Use current dir as base (where gramex is run from) if there's a gramex.yaml.
if not os.path.isfile('gramex.yaml'):
return console, {'msg': 'No gramex.yaml. See https://learn.gramener.com/guide/'}
app_log.info('Gramex %s | %s | Python %s', __version__, os.getcwd(),
sys.version.replace('\n', ' '))
return init, {'cmd': AttrDict(app=args)}
def commandline(args=None):
'''
Run Gramex from the command line. Called via:
- setup.py console_scripts when running gramex
- __main__.py when running python -m gramex
'''
callback, kwargs = callback_commandline(sys.argv[1:] if args is None else args)
callback(**kwargs)
def gramex_update(url):
'''If a newer version of gramex is available, logs a warning'''
import time
import requests
import platform
from . import services
if not services.info.eventlog:
return app_log.error('eventlog: service is not running. So Gramex update is disabled')
query = services.info.eventlog.query
update = query('SELECT * FROM events WHERE event="update" ORDER BY time DESC LIMIT 1')
delay = 24 * 60 * 60 # Wait for one day before updates
if update and time.time() < update[0]['time'] + delay:
return app_log.debug('Gramex update ran recently. Deferring check.')
meta = {
'dir': variables.get('GRAMEXDATA'),
'uname': platform.uname(),
}
if update:
events = query('SELECT * FROM events WHERE time > ? ORDER BY time',
(update[0]['time'], ))
else:
events = query('SELECT * FROM events')
logs = [dict(log, **meta) for log in events]
r = requests.post(url, data=json.dumps(logs))
r.raise_for_status()
update = r.json()
version = update['version']
if version > __version__:
app_log.error('Gramex %s is available. See https://learn.gramener.com/guide/', version)
elif version < __version__:
app_log.warning('Gramex update: your version %s is ahead of the stable %s',
__version__, version)
else:
app_log.debug('Gramex version %s is up to date', __version__)
services.info.eventlog.add('update', update)
return {'logs': logs, 'response': update}
def console(msg):
'''Write message to console'''
print(msg) # noqa
def init(force_reload=False, **kwargs):
'''
Update Gramex configurations and start / restart the instance.
``gramex.init()`` can be called any time to refresh configuration files.
``gramex.init(key=val)`` adds ``val`` as a configuration layer named
``key``. If ``val`` is a Path, it is converted into a PathConfig. (If it is
Path directory, use ``gramex.yaml``.)
Services are re-initialised if their configurations have changed. Service
callbacks are always re-run (even if the configuration hasn't changed.)
'''
try:
setup_secrets(paths['base'])
except Exception as e:
app_log.exception(e)
# Reset variables
variables.clear()
variables.update(setup_variables())
# Initialise configuration layers with provided configurations
# AttrDicts are updated as-is. Paths are converted to PathConfig
paths.update(kwargs)
for key, val in paths.items():
if isinstance(val, Path):
if val.is_dir():
val = val / 'gramex.yaml'
val = PathConfig(val)
config_layers[key] = val
# Locate all config files
config_files = set()
for path_config in config_layers.values():
if hasattr(path_config, '__info__'):
for pathinfo in path_config.__info__.imports:
config_files.add(pathinfo.path)
config_files = list(config_files)
# Add config file folders to sys.path
sys.path[:] = _sys_path + [str(path.absolute().parent) for path in config_files]
from . import services
globals()['service'] = services.info # gramex.service = gramex.services.info
# Override final configurations
final_config = +config_layers
# --settings.debug => log.root.level = True
if final_config.app.get('settings', {}).get('debug', False):
final_config.log.root.level = logging.DEBUG
# Set up a watch on config files (including imported files)
if final_config.app.get('watch', True):
from services import watcher
watcher.watch('gramex-reconfig', paths=config_files, on_modified=lambda event: init())
# Run all valid services. (The "+" before config_chain merges the chain)
# Services may return callbacks to be run at the end
for key, val in final_config.items():
if key not in conf or conf[key] != val or force_reload:
if hasattr(services, key):
app_log.debug('Loading service: %s', key)
conf[key] = prune_keys(val, {'comment'})
callback = getattr(services, key)(conf[key])
if callable(callback):
callbacks[key] = callback
else:
app_log.error('No service named %s', key)
# Run the callbacks. Specifically, the app service starts the Tornado ioloop
for key in (+config_layers).keys():
if key in callbacks:
app_log.debug('Running callback: %s', key)
callbacks[key]()
def shutdown():
'''Shut down this instance'''
ioloop = tornado.ioloop.IOLoop.current()
if ioloop_running(ioloop):
app_log.info('Shutting down Gramex...')
ioloop.stop()
def log(*args, **kwargs):
'''
Logs structured information for future reference. Typical usage::
gramex.log(level='INFO', x=1, msg='abc')
This logs ``{level: INFO, x: 1, msg: abc}`` into a logging queue. If a `gramexlog` service like
ElasticSearch has been configured, it will periodically flush the logs into the server.
'''
from . import services
# gramexlog() positional arguments may have a handler and app (in any order)
# The app defaults to the first gramexlog:
handler, app = None, services.info.gramexlog.get('defaultapp', None)
for arg in args:
# Pretend that anything that has a .args is a handler
if hasattr(getattr(arg, 'args', None), 'items'):
handler = arg
# ... and anything that's a string is an index name. The last string overrides all
elif isinstance(arg, str):
app = arg
# If the user logs into an unknown app, stop immediately
try:
conf = services.info.gramexlog.apps[app]
except KeyError:
raise ValueError(f'gramexlog: no config for {app}')
# Add all URL query parameters. In case of multiple values, capture the last
if handler:
kwargs.update({key: val[-1] for key, val in handler.args.items()})
# Add additional keys specified in gramex.yaml via keys:
kwargs.update(conf.extra_keys(handler))
conf.queue.append(kwargs)
|
the-stack_0_3064 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Niccolò Bonacchi
# @Date: 2018-02-02 12:31:13
import logging
import numpy as np
from pybpodapi.protocol import Bpod, StateMachine
import task_settings
import user_settings
from iblrig.bpod_helper import BpodMessageCreator
from session_params import SessionParamHandler
from trial_params import TrialParamHandler
log = logging.getLogger("iblrig")
log.setLevel(logging.INFO)
global sph
sph = SessionParamHandler(task_settings, user_settings)
def softcode_handler(data):
"""
Soft codes should work with resasonable latency considering our limiting
factor is the refresh rate of the screen which should be 16.667ms @ a frame
rate of 60Hz
1 : go_tone
2 : white_noise
"""
global sph
if data == 0:
sph.stop_sound()
elif data == 1:
sph.play_tone()
elif data == 3:
sph.start_camera_recording()
# =============================================================================
# CONNECT TO BPOD
# =============================================================================
bpod = Bpod()
# Soft code handler function can run arbitrary code from within state machine
bpod.softcode_handler_function = softcode_handler
# TODO: Put inside SPH remove @property or organize sequence of var definition
# Bpod message creator
msg = BpodMessageCreator(bpod)
bonsai_hide_stim = msg.bonsai_hide_stim()
bonsai_show_stim = msg.bonsai_show_stim()
sc_play_tone = msg.sound_card_play_idx(sph.GO_TONE_IDX)
sph.GO_TONE_SM_TRIGGER = sc_play_tone
bpod = msg.return_bpod()
# =============================================================================
# TRIAL PARAMETERS AND STATE MACHINE
# =============================================================================
global tph
tph = TrialParamHandler(sph)
bad_stim_count = 0
bad_tone_count = 0
for i in range(sph.NTRIALS): # Main loop
tph.next_trial()
log.info(f"Starting trial: {i + 1}")
# =============================================================================
# Start state machine definition
# =============================================================================
sma = StateMachine(bpod)
if i == 0:
sma.add_state(
state_name="stim_on",
state_timer=10,
state_change_conditions={
"Tup": "bad_stim",
"BNC1High": "stim_off",
"BNC1Low": "stim_off",
},
output_actions=[("Serial1", bonsai_show_stim)],
)
else:
sma.add_state(
state_name="stim_on",
state_timer=1,
state_change_conditions={
"Tup": "bad_stim",
"BNC1High": "stim_off",
"BNC1Low": "stim_off",
},
output_actions=[("Serial1", bonsai_show_stim)],
)
sma.add_state(
state_name="stim_off",
state_timer=1, # Stim off for 1 sec
state_change_conditions={
"Tup": "bad_stim",
"BNC1High": "play_tone",
"BNC1Low": "play_tone",
},
output_actions=[("Serial1", bonsai_hide_stim)],
)
sma.add_state(
state_name="bad_stim",
state_timer=0,
state_change_conditions={"Tup": "play_tone"},
output_actions=[],
)
sma.add_state(
state_name="play_tone",
state_timer=1,
state_change_conditions={
"Tup": "bad_tone",
"BNC2High": "exit",
"BNC2Low": "exit",
},
output_actions=[tph.out_tone],
)
sma.add_state(
state_name="bad_tone",
state_timer=0,
state_change_conditions={"Tup": "exit"},
output_actions=[],
)
# Send state machine description to Bpod device
bpod.send_state_machine(sma)
# Run state machine
if not bpod.run_state_machine(sma): # Locks until state machine 'exit' is reached
break
trial_data = tph.trial_completed(bpod.session.current_trial.export())
bad_tone_state = trial_data["behavior_data"]["States timestamps"]["bad_tone"]
bad_stim_state = trial_data["behavior_data"]["States timestamps"]["bad_stim"]
if not np.all(np.isnan(bad_stim_state)):
bad_stim_count += 1
log.warning(f"Missing stims: {bad_stim_count}")
if not np.all(np.isnan(bad_tone_state)):
bad_tone_count += 1
log.warning(f"Missing tones: {bad_tone_count}")
sph.check_data()
bpod.close()
if __name__ == "__main__":
print("main")
|
the-stack_0_3066 | # qubit number=4
# total number=35
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.cx(input_qubit[0],input_qubit[3]) # number=14
prog.h(input_qubit[3]) # number=22
prog.cx(input_qubit[0],input_qubit[3]) # number=32
prog.x(input_qubit[3]) # number=33
prog.cx(input_qubit[0],input_qubit[3]) # number=34
prog.h(input_qubit[3]) # number=19
prog.cz(input_qubit[0],input_qubit[3]) # number=20
prog.h(input_qubit[3]) # number=21
prog.z(input_qubit[3]) # number=10
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.h(input_qubit[0]) # number=26
prog.cz(input_qubit[1],input_qubit[0]) # number=27
prog.h(input_qubit[0]) # number=28
prog.z(input_qubit[1]) # number=24
prog.h(input_qubit[0]) # number=29
prog.cz(input_qubit[1],input_qubit[0]) # number=30
prog.h(input_qubit[0]) # number=31
prog.h(input_qubit[1]) # number=18
prog.rx(2.8902652413026093,input_qubit[2]) # number=13
prog.y(input_qubit[1]) # number=11
prog.y(input_qubit[1]) # number=12
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit2133.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
the-stack_0_3068 | import os
import sys
import mxnet as mx
import logging
def patch_path(path):
return os.path.join(os.path.dirname(__file__), path)
def main():
sys.path.append(patch_path('..'))
logging.basicConfig(level=logging.DEBUG)
data_dir_path = patch_path('data/flowers')
output_dir_path = patch_path('models')
batch_size = 8
epochs = 100
ctx = mx.cpu() # gpu too expensive for my graphics card due to the (224, 224) size, has to switch to cpu
from mxnet_text_to_image.library.dcgan1 import DCGan
from mxnet_text_to_image.data.flowers import get_data_iter
from mxnet_text_to_image.data.flowers_images import get_image_features
train_data = get_data_iter(data_dir_path=data_dir_path,
batch_size=batch_size,
limit=1000,
text_mode='add')
image_feats_dict = get_image_features(data_dir_path=os.path.join(data_dir_path, 'jpg'), model_ctx=ctx,
image_width=224, image_height=224)
gan = DCGan(model_ctx=ctx)
gan.random_input_size = 20 # random input is 20, text input is 300
gan.fit(train_data=train_data, image_feats_dict=image_feats_dict, model_dir_path=output_dir_path,
epochs=epochs, batch_size=batch_size)
if __name__ == '__main__':
main()
|
the-stack_0_3071 | import sys
import tkinter as tk
import myNotebook as nb
from config import config
import utils
import overlay
this = sys.modules[__name__]
def plugin_start3(plugin_dir):
return plugin_start()
def plugin_start():
"""
Load EliteHIS plugin
"""
#Initialize target coordinates and planet radius
this.targetLat = tk.DoubleVar(value=0)
this.targetLong = tk.DoubleVar(value=0)
this.planetRadius = tk.IntVar(value=0)
print("EliteHIS: loaded")
return "EliteHIS"
def plugin_stop():
"""
Stop EliteHIS plugin
"""
print("EliteHIS: stopped")
def plugin_prefs(parent, cmdr, is_beta):
"""
Create settings dialog
"""
frame = nb.Frame(parent)
nb.Label(frame, text="Latitude").grid(row=0)
nb.Entry(frame, textvariable=this.targetLat).grid(row=0, column=1)
nb.Label(frame, text="Longitude").grid(row=1)
nb.Entry(frame, textvariable=this.targetLong).grid(row=1, column=1)
nb.Label(frame, text="Planet radius (in metres)").grid(row=2)
nb.Entry(frame, textvariable=this.planetRadius).grid(row=2, column=1)
return frame
def dashboard_entry(cmdr, is_beta, entry):
"""
Called when something on the cockpit display changes
"""
#Lat/Long not always in status.json
try:
currentLat = entry["Latitude"]
currentLong = entry["Longitude"]
altitude = entry["Altitude"]
recalculate_info(currentLat, currentLong, altitude)
except KeyError:
pass
def recalculate_info(currentLat, currentLong, altitude):
"""
Recalculate target heading
"""
heading = utils.calc_heading(currentLat, currentLong, this.targetLat.get(), this.targetLong.get())
overlay.show_heading(heading)
if this.planetRadius.get() > 0:
distance = utils.calc_distance(currentLat, currentLong, this.targetLat.get(), this.targetLong.get(), this.planetRadius.get(), altitude)
overlay.show_distance(distance)
|
the-stack_0_3072 | from unittest.mock import Mock, patch
from django.test import TestCase
from data_refinery_common import utils
from data_refinery_common.models import Pipeline
class UtilsTestCase(TestCase):
@patch('data_refinery_common.utils.requests.get')
def test_get_instance_id_cloud(self, mock_get):
"""Test that a request is made and the global value is stored"""
# Ensure utils.INSTANCE_ID hasn't been set yet in case the
# order the tests are run in ever changes
utils.INSTANCE_ID = None
mock_get.return_value = Mock(ok=True)
mock_get.return_value.text = "instance_id"
with self.settings(RUNNING_IN_CLOUD=True):
self.assertEqual(utils.get_instance_id(), "instance_id")
# Ensure that the second call uses the now-set global value.
# (By resetting the mocks, calling it again, and checking that
# the values didn't need to be set again).
mock_get.reset_mock()
utils.get_instance_id()
mock_get.assert_not_called()
def test_get_instance_id_local(self):
"""Test that local is used for instance id."""
# Ensure utils.INSTANCE_ID hasn't been set yet in case the
# order the tests are run in ever changes
utils.INSTANCE_ID = None
with self.settings(RUNNING_IN_CLOUD=False):
self.assertEqual(utils.get_instance_id(), "local")
# Ensure that the second call uses the now-set global value
# by changing what settings would tell it.
with self.settings(RUNNING_IN_CLOUD=True):
self.assertEqual(utils.get_instance_id(), "local")
def test_supported_microarray_platforms(self):
"""Test that supported microarray platforms setting is set correctly."""
supported_microarray_platforms = utils.get_supported_microarray_platforms()
has_equgene11st = False
has_A_AFFY_59 = False
has_GPL23026 = False
has_AGEOD23026 = False
for platform in supported_microarray_platforms:
if platform["platform_accession"] == "equgene11st" and platform["is_brainarray"]:
has_equgene11st = True
if platform["external_accession"] == "A-AFFY-59" and not platform["is_brainarray"]:
has_A_AFFY_59 = True
if platform["external_accession"] == "GPL23026" and not platform["is_brainarray"]:
has_GPL23026 = True
if platform["external_accession"] == "A-GEOD-23026" and not platform["is_brainarray"]:
has_AGEOD23026 = True
self.assertTrue(has_equgene11st)
self.assertTrue(has_A_AFFY_59)
self.assertTrue(has_GPL23026)
self.assertTrue(has_AGEOD23026)
def test_get_internal_microarray_accession(self):
"""Test that supported microarray platforms setting is set correctly."""
self.assertEqual(utils.get_internal_microarray_accession("hgu133a"), "hgu133a")
self.assertEqual(utils.get_internal_microarray_accession("A-AFFY-59"), "soybean")
self.assertEqual(utils.get_internal_microarray_accession("GPL23026"), "Illumina_HumanHT-12_V4.0")
def test_supported_rnaseq_platforms(self):
"""Test that supported RNASeq platforms setting is set correctly."""
self.assertTrue("Illumina HiSeq 1000" in utils.get_supported_rnaseq_platforms())
def test_readable_affymetrix_names(self):
"""Test that the setting for Affymetrix accessions to
human readable names is set correctly."""
readable_platform_names = utils.get_readable_affymetrix_names()
expected_readable_name = "[ChiGene-1_0-st] Affymetrix Chicken Gene 1.0 ST Array"
self.assertTrue(readable_platform_names["chigene10st"] == expected_readable_name)
expected_readable_name = "[Xenopus_laevis] Affymetrix Xenopus laevis Genome Array"
self.assertTrue(readable_platform_names["xenopuslaevis"] == expected_readable_name)
def test_get_normalized_platform(self):
""" Test a particular normaization we need to perform """
self.assertEqual(utils.get_normalized_platform("hugene10stv1"), "hugene10st")
self.assertEqual(utils.get_normalized_platform("hugene10stv2"), "hugene10st")
self.assertEqual(utils.get_normalized_platform("stv1hugene10"), "stv1hugene10")
def test_volume_index(self):
"""Test that supported RNASeq platforms setting is set correctly."""
self.assertEqual(utils.get_volume_index(), "0")
with open('/tmp/VOLUME_INDEX', 'wb') as f:
f.write("123".encode())
self.assertEqual(utils.get_volume_index(path='/tmp/VOLUME_INDEX'), "123")
def test_load_blacklist(self):
blacklist = utils.load_blacklist()
self.assertEqual(len(blacklist), 239449)
def test_queryset_iterator(self):
"""Test that the queryset iterator by using it to actually iterate over a queryset.
Uses Pipeline just because it's easy to init."""
# Page size defaults to 2000, so use something bigger than
# that so there's more than one page.
for i in range(3000):
Pipeline(name=str(i)).save()
pipelines = Pipeline.objects.all()
# Build a list of the names just to do something with the data
# so we know the query actually resolved.
names = []
for pipeline in utils.queryset_iterator(pipelines):
names.append(pipeline.name)
self.assertEqual(len(names), 3000)
|
the-stack_0_3074 | # -*- coding: utf-8 -*-
"""Global configuration objects.
This module contains boilerplate configuration objects for storing and loading
configuration state.
"""
from __future__ import division
import os
import numpy as np
import pytoml as toml
import six
class BaseConfig(object):
"""Base class for configuration objects.
String representation yields TOML that should parse back to a dictionary
that will initialize the same configuration object.
"""
def __str__(self):
sanitized = {}
for k, v in six.iteritems(self.__dict__):
if isinstance(v, np.ndarray):
sanitized[k] = v.tolist()
else:
sanitized[k] = v
return toml.dumps(sanitized)
__repr__ = __str__
class VolumeConfig(BaseConfig):
"""Configuration for the use of volumes.
Attributes
----------
resolution : sequence or ndarray of float
Resolution to which volumes will be downsampled before processing.
label_downsampling : str
Method for downsampling label masks. One of 'majority' or 'conjunction'.
"""
def __init__(self, settings):
self.resolution = np.array(settings.get('resolution', [1, 1, 1]))
self.label_downsampling = str(settings.get('label_downsampling', 'majority'))
class ModelConfig(BaseConfig):
"""Configuration for non-network aspects of the flood filling model.
Attributes
----------
input_fov_shape : sequence or ndarray of int
Input field of view shape in voxels for each flood filling move.
output_fov_shape : sequence or ndarray of int
Output field of view shape in voxels for each flood filling move. Can
not be larger than ``input_fov_shape``.
output_fov_move_fraction : int
Move size as a fraction of the output field of view shape.
v_true, v_false : float
Soft target values for in-object and out-of-object mask voxels,
respectively.
t_move : float
Threshold mask probability in the move check plane to queue a move
to that position.
t_final : float, optional
Threshold mask probability to produce the final segmentation. Defaults
to ``t_move``.
move_check_thickness : int
Thickness of move check plane in voxels. Setting this greater than 1
is useful to make moves more robust even if the move grid aligns with
missing sections or image artifacts.
move_priority : str
How to prioritize the move queue. Either 'descending' to order by
descending mask probability in the move check plane (default),
'proximity' to prioritize moves minimizing L1 path distance from the
seed, or 'random'.
move_recheck : bool
If true, when moves are retrieved from the queue a cube in the
probability mask will be checked around the move location. If no voxels
in this cube are greater than the move threshold, the move will be
skipped. The cube size is one move step in each direction.
training_subv_shape : sequence or ndarray of int, optional
Shape of the subvolumes used during moving training.
validation_subv_shape : sequence or ndarray of int, optional
Shape of the subvolumes used during training validation.
"""
def __init__(self, settings):
self.input_fov_shape = np.array(settings.get('input_fov_shape', [17, 33, 33]))
self.output_fov_shape = np.array(settings.get('output_fov_shape', [17, 33, 33]))
self.output_fov_move_fraction = int(settings.get('output_fov_move_fraction', 4))
self.v_true = float(settings.get('v_true', 0.95))
self.v_false = float(settings.get('v_false', 0.05))
self.t_move = float(settings.get('t_move', 0.9))
self.t_final = float(settings.get('t_final', self.t_move))
self.move_check_thickness = int(settings.get('move_check_thickness', 1))
self.move_priority = str(settings.get('move_priority', 'descending'))
self.move_recheck = bool(settings.get('move_recheck', True))
self.training_subv_shape = np.array(settings.get('training_subv_shape',
self.input_fov_shape + self.move_step * 2))
self.validation_subv_shape = np.array(settings.get('validation_subv_shape',
self.input_fov_shape + self.move_step * 4))
@property
def move_step(self):
return (self.output_fov_shape - 1) // self.output_fov_move_fraction
def subv_moves(self, shape):
return np.prod((shape - self.input_fov_shape) // self.move_step + 1)
@property
def training_subv_moves(self):
return self.subv_moves(self.training_subv_shape)
@property
def validation_subv_moves(self):
return self.subv_moves(self.validation_subv_shape)
class NetworkConfig(BaseConfig):
"""Configuration for the flood filling network architecture.
Attributes
----------
factory : str
Module and function name for a factory method for creating the flood
filling network. This allows a custom architecture to be provided
without needing to modify diluvian.
transpose : bool
If true, any loaded networks will reverse the order of axes for both
inputs and outputs. Data is assumed to be ZYX row-major, but old
versions of diluvian used XYZ, so this is necessary to load old
networks.
rescale_image : bool
If true, rescale the input image intensity from [0, 1) to [-1, 1).
num_modules : int
Number of convolution modules to use, each module consisting of a skip
link in parallel with ``num_layers_per_module`` convolution layers.
num_layers_per_module : int
Number of layers to use in each organizational module, e.g., the
number of convolution layers in each convolution module or the number
of convolution layers before and after each down- and up-sampling
respectively in a U-Net level.
convolution_dim : sequence or ndarray of int
Shape of the convolution for each layer.
convolution_filters : int
Number of convolution filters for each layer.
convolution_activation : str
Name of the Keras activation function to apply after convolution layers.
convolution_padding : str
Name of the padding mode for convolutions, either 'same' (default) or
'valid'.
initialization : str
Name of the Keras initialization function to use for weight
initialization of all layers.
output_activation : str
Name of the Keras activation function to use for the final network
output.
dropout_probability : float
Probability for dropout layers. If zero, no dropout layers will be
included.
batch_normalization : bool
Whether to apply batch normalization. Note that in included networks
normalization is applied after activation, rather than before as in the
original paper, because this is now more common practice.
unet_depth : int
For U-Net models, the total number of downsampled levels in the network.
unet_downsample_rate : sequence or ndarray of int
The frequency in levels to downsample each axis. For example, a standard
U-Net downsamples all axes at each level, so this value would be all
ones. If data is anisotropic and Z should only be downsampled every
other level, this value could be [2, 1, 1]. Axes set to 0 are never
downsampled.
"""
def __init__(self, settings):
self.factory = str(settings.get('factory'))
self.transpose = bool(settings.get('transpose', False))
self.rescale_image = bool(settings.get('rescale_image', False))
self.num_modules = int(settings.get('num_modules', 8))
self.num_layers_per_module = int(settings.get('num_layers_per_module', 2))
self.convolution_dim = np.array(settings.get('convolution_dim', [3, 3, 3]))
self.convolution_filters = int(settings.get('convolution_filters', 32))
self.convolution_activation = str(settings.get('convolution_activation', 'relu'))
self.convolution_padding = str(settings.get('convolution_padding', 'same'))
self.initialization = str(settings.get('initialization', 'glorot_uniform'))
self.output_activation = str(settings.get('output_activation', 'sigmoid'))
self.dropout_probability = float(settings.get('dropout_probability', 0.0))
self.batch_normalization = bool(settings.get('batch_normalization', False))
self.unet_depth = int(settings.get('unet_depth', 4))
self.unet_downsample_rate = np.array(settings.get('unet_downsample_rate', [1, 1, 1]))
class OptimizerConfig(BaseConfig):
"""Configuration for the network optimizer.
Any settings dict entries passed to this initializer will be added as
configuration attributes and passed to the optimizer initializer as keyword
arguments.
Attributes
----------
klass : str
Class name of the Keras optimizer to use.
loss : str
Name of the Keras loss function to use.
"""
def __init__(self, settings):
for k, v in six.iteritems(settings):
if k != 'klass' and k != 'loss':
setattr(self, k, v)
self.klass = str(settings.get('klass', 'SGD'))
self.loss = str(settings.get('loss', 'binary_crossentropy'))
class TrainingConfig(BaseConfig):
"""Configuration for model training.
Attributes
----------
num_gpus : int
Number of GPUs to use for data-parallelism.
num_workers : int
Number of worker queues to use for generating training data.
gpu_batch_size : int
Per-GPU batch size. The effective batch size will be this times
``num_gpus``.
training_size : int
Number of samples to use for training **from each volume**.
validation_size : int
Number of samples to use for validation **from each volume**.
total_epochs : int
Maximum number of training epochs.
reset_generators : bool
Reset training generators after each epoch, so that the training
examples at each epoch are identical.
fill_factor_bins : sequence of float
Bin boundaries for filling fractions. If provided, sample loss will be
weighted to increase loss contribution from less-frequent bins.
Otherwise all samples are weighted equally.
partitions : dict
Dictionary mapping volume name regexes to a sequence of int indicating
number of volume partitions along each axis. Only one axis should be
greater than 1. Each volume should match at most one regex.
training_partition, validation_partition : dict
Dictionaries mapping volume name regexes to a sequence of int indicating
index of the partitions to use for training and validation,
respectively. Each volume should match at most one regex.
validation_metric : dict
Module and function name for a metric function taking a true and
predicted region mask ('metric'). Boolean of whether to threshold the
mask for the metric (true) or use the mask and target probabilities
('threshold').
String 'min' or 'max'for how to choose best validation metric value
('mode').
patience : int
Number of epochs after the last minimal validation loss to terminate
training.
early_abort_epoch : int
If provided, training will check at the end of this epoch
whether validation loss is less than ``early_abort_loss``. If not,
training will be aborted, and may be restarted with a new seed
depending on CLI options. By default this is disabled.
early_abort_loss : float
See ``early_abort_epoch``.
label_erosion : sequence or ndarray of int
Amount to erode label mask for each training subvolume in each
dimension, in pixels. For example, a value of [0, 1, 1] will result
in erosion with a structuring element of size [1, 3, 3].
relabel_seed_component : bool
Relabel training subvolumes to only include the seeded connected
component.
augment_validation : bool
Whether validation data should also be augmented.
augment_use_both : bool
Whether to sequentially use both the augmented and unaugmented version
of each subvolume.
augment_mirrors : sequence of int
Axes along which to mirror for data augmentation.
augment_permute_axes : sequence of sequence of int
Axis permutations to use for data augmentation.
augment_missing_data : list of dict
List of dictionaries with ``axis`` and ``prob`` keys, indicating
an axis to perform data blanking along, and the probability to blank
each plane in the axis, respectively.
augment_noise : list of dict
List of dictionaries with ``axis``, ``mul`` and `add`` keys, indicating
an axis to perform independent Gaussian noise augmentation on, and the
standard deviations of 1-mean multiplicative and 0-mean additive noise,
respectively.
augment_contrast : list of dict
List of dictionaries with ``axis``, ``prob``, ``scaling_mean``,
``scaling_std``, ``center_mean`` and ``center_std`` keys. These
specify the probability to alter the contrast of a section, the mean
and standard deviation to draw from a normal distribution to scale
contrast, and the mean and standard deviation to draw from a normal
distribution to move the intensity center multiplicatively.
augment_missing_data : list of dict
List of dictionaries with ``axis``, ``prob`` and ``volume_file``
keys, indicating an axis to perform data artifacting along, the
probability to add artifacts to each plane in the axis, and the
volume configuration file from which to draw artifacts, respectively.
"""
def __init__(self, settings):
self.num_gpus = int(settings.get('num_gpus', 1))
self.num_workers = int(settings.get('num_workers', 4))
self.gpu_batch_size = int(settings.get('gpu_batch_size', 8))
self.batch_size = self.num_gpus * self.gpu_batch_size
self.training_size = int(settings.get('training_size', 256))
self.validation_size = int(settings.get('validation_size', 256))
self.total_epochs = int(settings.get('total_epochs', 100))
self.reset_generators = bool(settings.get('reset_generators', False))
self.fill_factor_bins = settings.get('fill_factor_bins', None)
if self.fill_factor_bins is not None:
self.fill_factor_bins = np.array(self.fill_factor_bins)
self.partitions = settings.get('partitions', {'.*': [2, 1, 1]})
self.training_partition = settings.get('training_partition', {'.*': [0, 0, 0]})
self.validation_partition = settings.get('validation_partition', {'.*': [1, 0, 0]})
self.validation_metric = settings.get(
'validation_metric',
{'metric': 'diluvian.util.binary_f_score', 'threshold': True, 'mode': 'max', 'args': {'beta': 0.5}})
self.patience = int(np.array(settings.get('patience', 10)))
self.early_abort_epoch = settings.get('early_abort_epoch', None)
self.early_abort_loss = settings.get('early_abort_loss', None)
self.label_erosion = np.array(settings.get('label_erosion', [0, 1, 1]), dtype=np.int64)
self.relabel_seed_component = bool(settings.get('relabel_seed_component', False))
self.augment_validation = bool(settings.get('augment_validation', True))
self.augment_use_both = bool(settings.get('augment_use_both', True))
self.augment_mirrors = [int(x) for x in settings.get('augment_mirrors', [0, 1, 2])]
self.augment_permute_axes = settings.get('augment_permute_axes', [[0, 2, 1]])
self.augment_missing_data = settings.get('augment_missing_data', [{'axis': 0, 'prob': 0.01}])
self.augment_noise = settings.get('augment_noise', [{'axis': 0, 'mul': 0.1, 'add': 0.1}])
self.augment_contrast = settings.get(
'augment_contrast',
[{'axis': 0, 'prob': 0.05, 'scaling_mean': 0.5, 'scaling_std': 0.1,
'center_mean': 1.2, 'center_std': 0.2}])
self.augment_artifacts = settings.get('augment_artifacts', [])
class PostprocessingConfig(BaseConfig):
"""Configuration for segmentation processing after flood filling.
Attributes
----------
closing_shape : sequence or ndarray of int
Shape of the structuring element for morphological closing, in voxels.
"""
def __init__(self, settings):
self.closing_shape = settings.get('closing_shape', None)
class Config(object):
"""A complete collection of configuration objects.
Attributes
----------
random_seed : int
Seed for initializing the Python and NumPy random generators.
"""
def __init__(self, settings_collection=None):
if settings_collection is not None:
settings = settings_collection[0].copy()
for s in settings_collection:
for c in s:
if c in settings and isinstance(settings[c], dict):
settings[c].update(s[c])
else:
settings[c] = s[c]
else:
settings = {}
self.volume = VolumeConfig(settings.get('volume', {}))
self.model = ModelConfig(settings.get('model', {}))
self.network = NetworkConfig(settings.get('network', {}))
self.optimizer = OptimizerConfig(settings.get('optimizer', {}))
self.training = TrainingConfig(settings.get('training', {}))
self.postprocessing = PostprocessingConfig(settings.get('postprocessing', {}))
self.random_seed = int(settings.get('random_seed', 0))
def __str__(self):
sanitized = {}
for n, c in six.iteritems(self.__dict__):
if not isinstance(c, BaseConfig):
sanitized[n] = c
continue
sanitized[n] = {}
for k, v in six.iteritems(c.__dict__):
if isinstance(v, np.ndarray):
sanitized[n][k] = v.tolist()
else:
sanitized[n][k] = v
return toml.dumps(sanitized)
def from_toml(self, *filenames):
"""Reinitializes this Config from a list of TOML configuration files.
Existing settings are discarded. When multiple files are provided,
configuration is overridden by later files in the list.
Parameters
----------
filenames : interable of str
Filenames of TOML configuration files to load.
"""
settings = []
for filename in filenames:
with open(filename, 'rb') as fin:
settings.append(toml.load(fin))
return self.__init__(settings)
def to_toml(self, filename):
with open(filename, 'w') as tomlfile:
tomlfile.write(str(self))
CONFIG = Config()
CONFIG.from_toml(os.path.join(os.path.dirname(__file__), 'conf', 'default.toml'))
|
the-stack_0_3077 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import logging
import numpy
import os
import torch
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, AutoModel
from benchmark_helper import create_onnxruntime_session, Precision
from gpt2_helper import GPT2ModelNoPastState, PRETRAINED_GPT2_MODELS
from quantize_helper import QuantizeHelper
from huggingface_models import MODEL_CLASSES
logger = logging.getLogger(__name__)
# Walkaround by replacing torch.triu using self-defined op
# Since torch.triu cannot be exported to ONNX. See https://github.com/pytorch/pytorch/issues/32968
torch_func = {"triu": torch.triu}
def triu_onnx(x, diagonal=0, out=None):
assert out is None
assert len(x.shape) == 2 and x.size(0) == x.size(1)
torch_triu = torch_func["triu"]
template = torch_triu(torch.ones((1024, 1024), dtype=torch.uint8), diagonal)
mask = template[:x.size(0), :x.size(1)]
return torch.where(mask.bool(), x, torch.zeros_like(x))
def replace_torch_functions():
torch.triu = triu_onnx
def restore_torch_functions():
torch.triu = torch_func["triu"]
def create_onnxruntime_input(vocab_size, batch_size, sequence_length, input_names, data_type=numpy.int64):
input_ids = numpy.random.randint(low=0, high=vocab_size - 1, size=(batch_size, sequence_length), dtype=data_type)
inputs = {'input_ids': input_ids}
if "attention_mask" in input_names:
attention_mask = numpy.ones([batch_size, sequence_length], dtype=data_type)
inputs['attention_mask'] = attention_mask
if "token_type_ids" in input_names:
segment_ids = numpy.zeros([batch_size, sequence_length], dtype=data_type)
inputs['token_type_ids'] = segment_ids
return inputs
def filter_inputs(inputs, input_names):
remaining_model_inputs = {}
for input_name in input_names:
remaining_model_inputs[input_name] = inputs[input_name]
return remaining_model_inputs
def flatten(inputs):
return [[flatten(i) for i in inputs] if isinstance(inputs, (list, tuple)) else inputs]
def update_flatten_list(inputs, res_list):
for i in inputs:
res_list.append(i) if not isinstance(i, (list, tuple)) else update_flatten_list(i, res_list)
return res_list
def build_dynamic_axes(example_inputs, outputs_flatten):
sequence_length = example_inputs["input_ids"].shape[-1]
dynamic_axes = {key: {0: 'batch_size', 1: 'seq_len'} for key in example_inputs.keys()}
output_names = ['output_' + str(i + 1) for i in range(len(outputs_flatten))]
for i, output_name in enumerate(output_names):
dynamic_axes[output_name] = {0: 'batch_size'}
dims = outputs_flatten[i].shape
for j, dim in enumerate(dims):
if dim == sequence_length:
dynamic_axes[output_name].update({j: 'seq_len'})
return dynamic_axes, output_names
def validate_onnx_model(onnx_model_path, example_inputs, example_outputs_flatten, use_gpu, fp16):
test_session = create_onnxruntime_session(onnx_model_path, use_gpu, enable_all_optimization=False)
if test_session is None:
logger.error(f"{onnx_model_path} is an invalid ONNX model")
return False
logger.info(f"{onnx_model_path} is a valid ONNX model")
# Compare the inference result with PyTorch or Tensorflow
example_ort_inputs = {k: t.cpu().numpy() for k, t in example_inputs.items()}
example_ort_outputs = test_session.run(None, example_ort_inputs)
if len(example_outputs_flatten) != len(example_ort_outputs):
logger.error(
f"Number of output tensors expected {len(example_outputs_flatten)}, got {len(example_ort_outputs)}")
return False
for i in range(len(example_outputs_flatten)):
abs_diff = numpy.amax(numpy.abs(example_ort_outputs[i] - example_outputs_flatten[i].cpu().numpy()))
if abs_diff > 1e-4:
logger.info(f"Max absolute diff={abs_diff} for output tensor {i}")
rtol = 5e-02 if fp16 else 1e-4
atol = 1e-01 if fp16 else 1e-4
if not numpy.allclose(example_ort_outputs[i], example_outputs_flatten[i].cpu(), rtol=rtol, atol=atol):
logger.error(f"Output tensor {i} is not close: rtol={rtol}, atol={atol}")
return False
logger.info(f"inference result of onnxruntime is validated on {onnx_model_path}")
return True
def get_onnx_file_path(onnx_dir: str, model_name: str, input_count: int, optimized_by_script: bool, use_gpu: bool,
precision: Precision, optimized_by_onnxruntime: bool, use_external_data: bool):
from re import sub
normalized_model_name = sub(r'[^a-zA-Z0-9_]', '_', model_name)
if not optimized_by_script:
filename = f"{normalized_model_name}_{input_count}"
else:
device = "gpu" if use_gpu else "cpu"
filename = f"{normalized_model_name}_{input_count}_{precision}_{device}"
if optimized_by_onnxruntime:
filename += f"_ort"
directory = onnx_dir
# ONNXRuntime will not write external data so the raw and optimized models shall be in same directory.
if use_external_data and not optimized_by_onnxruntime:
directory = os.path.join(onnx_dir, filename)
if not os.path.exists(directory):
os.makedirs(directory)
return os.path.join(directory, f"{filename}.onnx")
def add_filename_suffix(file_path: str, suffix: str) -> str:
"""
Append a suffix at the filename (before the extension).
Args:
path: pathlib.Path The actual path object we would like to add a suffix
suffix: The suffix to add
Returns: path with suffix appended at the end of the filename and before extension
"""
path = Path(file_path)
return str(path.parent.joinpath(path.stem + suffix).with_suffix(path.suffix))
def optimize_onnx_model_by_ort(onnx_model_path, ort_model_path, use_gpu, overwrite, model_fusion_statistics):
if overwrite or not os.path.exists(ort_model_path):
Path(ort_model_path).parent.mkdir(parents=True, exist_ok=True)
from optimizer import optimize_by_onnxruntime, get_fusion_statistics
# Use onnxruntime to optimize model, which will be saved to *_ort.onnx
opt_model = optimize_by_onnxruntime(onnx_model_path,
use_gpu=use_gpu,
optimized_model_path=ort_model_path,
opt_level=99)
model_fusion_statistics[ort_model_path] = get_fusion_statistics(ort_model_path)
else:
logger.info(f"Skip optimization since model existed: {ort_model_path}")
def optimize_onnx_model(onnx_model_path, optimized_model_path, model_type, num_attention_heads, hidden_size, use_gpu,
precision, use_raw_attention_mask, overwrite, model_fusion_statistics,
use_external_data_format):
if overwrite or not os.path.exists(optimized_model_path):
Path(optimized_model_path).parent.mkdir(parents=True, exist_ok=True)
from optimizer import optimize_model
from onnx_model_bert import BertOptimizationOptions
optimization_options = BertOptimizationOptions(model_type)
optimization_options.use_raw_attention_mask(use_raw_attention_mask)
if Precision.FLOAT16 == precision:
optimization_options.enable_gelu_approximation = True
if Precision.INT8 == precision:
optimization_options.enable_embed_layer_norm = False
# Use script to optimize model.
# Use opt_level <= 1 for models to be converted to fp16, because some fused op (like FusedGemm) has only fp32 and no fp16.
# It is better to be conservative so we use opt_level=0 here, in case MemcpyFromHost is added to the graph by OnnxRuntime.
opt_model = optimize_model(onnx_model_path,
model_type,
num_heads=num_attention_heads,
hidden_size=hidden_size,
opt_level=0,
optimization_options=optimization_options,
use_gpu=use_gpu,
only_onnxruntime=False)
if model_type == 'bert_keras':
opt_model.use_dynamic_axes()
model_fusion_statistics[optimized_model_path] = opt_model.get_fused_operator_statistics()
if Precision.FLOAT16 == precision:
opt_model.convert_model_float32_to_float16()
opt_model.save_model_to_file(optimized_model_path, use_external_data_format)
else:
logger.info(f"Skip optimization since model existed: {optimized_model_path}")
def modelclass_dispatcher(model_name, custom_model_class):
if (custom_model_class != None):
if (custom_model_class in MODEL_CLASSES):
return custom_model_class
else:
raise Exception("Valid model class: " + ' '.join(MODEL_CLASSES))
if model_name in PRETRAINED_GPT2_MODELS:
return "GPT2ModelNoPastState"
import re
if (re.search('-squad$', model_name) != None):
return "AutoModelForQuestionAnswering"
elif (re.search('-mprc$', model_name) != None):
return "AutoModelForSequenceClassification"
elif (re.search('gpt2', model_name) != None):
return "AutoModelWithLMHead"
return "AutoModel"
def load_pretrained_model(model_name, config, cache_dir, custom_model_class, is_tf_model=False):
model_class_name = modelclass_dispatcher(model_name, custom_model_class)
if model_class_name == "GPT2ModelNoPastState":
if is_tf_model:
raise NotImplementedError("TFGPT2ModelNoPastState is currently not supported.")
else:
return GPT2ModelNoPastState.from_pretrained(model_name, config=config, cache_dir=cache_dir)
if is_tf_model:
model_class_name = 'TF' + model_class_name
transformers_module = __import__("transformers", fromlist=[model_class_name])
model_class = getattr(transformers_module, model_class_name)
use_cdn = False if model_name == 't5-11b' else True
return model_class.from_pretrained(model_name, config=config, cache_dir=cache_dir, use_cdn=use_cdn)
def load_pt_model(model_name, model_class, cache_dir):
config = AutoConfig.from_pretrained(model_name, cache_dir=cache_dir)
if hasattr(config, 'return_dict'):
config.return_dict = False
model = load_pretrained_model(model_name, config=config, cache_dir=cache_dir, custom_model_class=model_class)
return config, model
def load_tf_model(model_name, model_class, cache_dir):
config = AutoConfig.from_pretrained(model_name, cache_dir=cache_dir)
model = load_pretrained_model(model_name,
config=config,
cache_dir=cache_dir,
custom_model_class=model_class,
is_tf_model=True)
return config, model
# For test only
def load_pt_model_from_tf(model_name):
# Note that we could get pt model from tf, but model source and its structure in this case is different from directly using
# load_pt_model() and load_tf_model() even with the same name. Therefore it should not be used for comparing with them
from convert_tf_models_to_pytorch import tf2pt_pipeline
config, model = tf2pt_pipeline(model_name)
return config, model
def validate_and_optimize_onnx(model_name, use_external_data_format, model_type, onnx_dir, input_names, use_gpu,
precision, optimize_onnx, validate_onnx, use_raw_attention_mask, overwrite, config,
model_fusion_statistics, onnx_model_path, example_inputs, example_outputs_flatten):
is_valid_onnx_model = True
if validate_onnx:
is_valid_onnx_model = validate_onnx_model(onnx_model_path, example_inputs, example_outputs_flatten, use_gpu,
False)
if optimize_onnx or precision == Precision.FLOAT16 or precision == Precision.INT8: # Use script (optimizer.py) to optimize
optimized_model_path = get_onnx_file_path(onnx_dir, model_name, len(input_names), True, use_gpu, precision,
False, use_external_data_format)
optimize_onnx_model(onnx_model_path, optimized_model_path, model_type, config.num_attention_heads,
config.hidden_size, use_gpu, precision, use_raw_attention_mask, overwrite,
model_fusion_statistics, use_external_data_format)
onnx_model_path = optimized_model_path
if validate_onnx:
is_valid_onnx_model = validate_onnx_model(onnx_model_path, example_inputs, example_outputs_flatten, use_gpu,
precision == Precision.FLOAT16)
if precision == Precision.INT8:
logger.info(f"Quantizing model: {onnx_model_path}")
QuantizeHelper.quantize_onnx_model(onnx_model_path, onnx_model_path, use_external_data_format)
logger.info(f"Finished quantizing model: {onnx_model_path}")
else: # Use OnnxRuntime to optimize
if is_valid_onnx_model:
ort_model_path = add_filename_suffix(onnx_model_path, '_ort')
optimize_onnx_model_by_ort(onnx_model_path, ort_model_path, use_gpu, overwrite, model_fusion_statistics)
return onnx_model_path, is_valid_onnx_model, config.vocab_size
def export_onnx_model_from_pt(model_name, opset_version, use_external_data_format, model_type, model_class, cache_dir,
onnx_dir, input_names, use_gpu, precision, optimize_onnx, validate_onnx,
use_raw_attention_mask, overwrite, model_fusion_statistics):
config, model = load_pt_model(model_name, model_class, cache_dir)
# config, model = load_pt_model_from_tf(model_name)
model.cpu()
tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir=cache_dir)
max_input_size = tokenizer.max_model_input_sizes[
model_name] if model_name in tokenizer.max_model_input_sizes else 1024
example_inputs = tokenizer.encode_plus("This is a sample input", return_tensors="pt")
example_inputs = filter_inputs(example_inputs, input_names)
example_outputs = model(**example_inputs)
assert isinstance(example_outputs, (list, tuple)), f"type of output is not list or tuple: {type(example_outputs)}"
# Flatten is needed for gpt2 and distilgpt2.
example_outputs_flatten = flatten(example_outputs)
example_outputs_flatten = update_flatten_list(example_outputs_flatten, [])
onnx_model_path = get_onnx_file_path(onnx_dir, model_name, len(input_names), False, use_gpu, precision, False,
use_external_data_format)
if overwrite or not os.path.exists(onnx_model_path):
logger.info("Exporting ONNX model to {}".format(onnx_model_path))
Path(onnx_model_path).parent.mkdir(parents=True, exist_ok=True)
dynamic_axes, output_names = build_dynamic_axes(example_inputs, example_outputs_flatten)
replace_torch_functions()
torch.onnx.export(model=model,
args=tuple(example_inputs.values()),
f=onnx_model_path,
input_names=list(example_inputs.keys()),
output_names=output_names,
example_outputs=example_outputs,
dynamic_axes=dynamic_axes,
do_constant_folding=True,
opset_version=opset_version,
use_external_data_format=use_external_data_format)
restore_torch_functions()
else:
logger.info(f"Skip export since model existed: {onnx_model_path}")
onnx_model_file, is_valid_onnx_model, vocab_size = validate_and_optimize_onnx(
model_name, use_external_data_format, model_type, onnx_dir, input_names, use_gpu, precision, optimize_onnx,
validate_onnx, use_raw_attention_mask, overwrite, config, model_fusion_statistics, onnx_model_path,
example_inputs, example_outputs_flatten)
return onnx_model_file, is_valid_onnx_model, vocab_size, max_input_size
def export_onnx_model_from_tf(model_name, opset_version, use_external_data_format, model_type, model_class, cache_dir,
onnx_dir, input_names, use_gpu, precision, optimize_onnx, validate_onnx,
use_raw_attention_mask, overwrite, model_fusion_statistics):
# Use CPU to export
import tensorflow as tf
tf.config.set_visible_devices([], 'GPU')
config, model = load_tf_model(model_name, model_class, cache_dir)
model._saved_model_inputs_spec = None
tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir=cache_dir)
max_input_size = tokenizer.max_model_input_sizes[
model_name] if model_name in tokenizer.max_model_input_sizes else 1024
example_inputs = tokenizer.encode_plus("This is a sample input",
return_tensors="tf",
max_length=max_input_size,
pad_to_max_length=True,
truncation=True)
example_inputs = filter_inputs(example_inputs, input_names)
example_outputs = model(example_inputs, training=False)
# Flatten is needed for gpt2 and distilgpt2.
example_outputs_flatten = flatten(example_outputs)
example_outputs_flatten = update_flatten_list(example_outputs_flatten, [])
onnx_model_path = get_onnx_file_path(onnx_dir, model_name, len(input_names), False, use_gpu, precision, False,
use_external_data_format)
if overwrite or not os.path.exists(onnx_model_path):
logger.info("Exporting ONNX model to {}".format(onnx_model_path))
Path(onnx_model_path).parent.mkdir(parents=True, exist_ok=True)
import keras2onnx
onnx_model = keras2onnx.convert_keras(model, model.name, target_opset=opset_version)
keras2onnx.save_model(onnx_model, onnx_model_path)
else:
logger.info(f"Skip export since model existed: {onnx_model_path}")
model_type = model_type + '_keras'
onnx_model_file, is_valid_onnx_model, vocab_size = validate_and_optimize_onnx(
model_name, use_external_data_format, model_type, onnx_dir, input_names, use_gpu, precision, optimize_onnx,
validate_onnx, use_raw_attention_mask, overwrite, config, model_fusion_statistics, onnx_model_path,
example_inputs, example_outputs_flatten)
return onnx_model_file, is_valid_onnx_model, vocab_size, max_input_size
|
the-stack_0_3080 | # Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
`stackdriver_service_monitoring.py`
Stackdriver Service Monitoring exporter class.
"""
import difflib
import json
import logging
import os
import google.api_core.exceptions
from google.cloud.monitoring_v3 import ServiceMonitoringServiceClient
from google.protobuf.json_format import MessageToJson
from slo_generator.backends.stackdriver import StackdriverBackend
from slo_generator.constants import NO_DATA
from slo_generator.utils import dict_snake_to_caml
LOGGER = logging.getLogger(__name__)
SID_GAE = 'gae:{project_id}_{module_id}'
SID_CLOUD_ENDPOINT = 'ist:{project_id}-{service}'
SID_CLUSTER_ISTIO = (
'ist:{project_id}-zone-{location}-{cluster_name}-{service_namespace}-'
'{service_name}')
SID_MESH_ISTIO = ('ist:{mesh_uid}-{service_namespace}-{service_name}')
class StackdriverServiceMonitoringBackend:
"""Stackdriver Service Monitoring backend class.
Args:
project_id (str): Stackdriver host project id.
client (google.cloud.monitoring_v3.ServiceMonitoringServiceClient):
Existing Service Monitoring API client. Initialize a new client if
omitted.
"""
def __init__(self, project_id, client=None):
self.project_id = project_id
self.client = client
if client is None:
self.client = ServiceMonitoringServiceClient()
self.parent = self.client.project_path(project_id)
self.workspace_path = f'workspaces/{project_id}'
self.project_path = f'projects/{project_id}'
def good_bad_ratio(self, timestamp, window, slo_config):
"""Good bad ratio method.
Args:
timestamp (int): UNIX timestamp.
window (int): Window in seconds.
slo_config (dict): SLO configuration.
Returns:
dict: SLO config.
"""
return self.retrieve_slo(timestamp, window, slo_config)
def distribution_cut(self, timestamp, window, slo_config):
"""Distribution cut method.
Args:
timestamp (int): UNIX timestamp.
window (int): Window in seconds.
slo_config (dict): SLO configuration.
Returns:
dict: SLO config.
"""
return self.retrieve_slo(timestamp, window, slo_config)
def basic(self, timestamp, window, slo_config):
"""Basic method (automatic SLOs for GAE / GKE (Istio) and Cloud
Endpoints).
Args:
timestamp (int): UNIX timestamp.
window (int): Window in seconds.
slo_config (dict): SLO configuration.
Returns:
dict: SLO config.
"""
return self.retrieve_slo(timestamp, window, slo_config)
def window(self, timestamp, window, slo_config):
"""Window-based SLI method.
Args:
timestamp (int): UNIX timestamp.
window (int): Window in seconds.
slo_config (dict): SLO configuration.
Returns:
dict: SLO config.
"""
return self.retrieve_slo(timestamp, window, slo_config)
# pylint: disable=unused-argument
def delete(self, timestamp, window, slo_config):
"""Delete method.
Args:
timestamp (int): UNIX timestamp.
window (int): Window in seconds.
slo_config (dict): SLO configuration.
Returns:
dict: SLO config.
"""
return self.delete_slo(window, slo_config)
def retrieve_slo(self, timestamp, window, slo_config):
"""Get SLI value from Stackdriver Monitoring API.
Args:
timestamp (int): UNIX timestamp.
window (int): Window in seconds.
slo_config (dict): SLO configuration.
Returns:
dict: SLO config.
"""
# Get or create service
service = self.get_service(slo_config)
if service is None:
service = self.create_service(slo_config)
LOGGER.debug(service)
# Get or create SLO
slo = self.get_slo(window, slo_config)
if not slo:
slo = self.create_slo(window, slo_config)
LOGGER.debug(service)
# Now that we have our SLO, retrieve the TimeSeries from Stackdriver
# Monitoring API for that particular SLO id.
metric_filter = SSM.build_slo_id(window, slo_config, full=True)
filter = f"select_slo_counts(\"{metric_filter}\")"
# Query SLO timeseries
stackdriver = StackdriverBackend(self.project_id)
timeseries = stackdriver.query(timestamp,
window,
filter,
aligner='ALIGN_SUM',
reducer='REDUCE_SUM',
group_by=['metric.labels.event_type'])
timeseries = list(timeseries)
good_event_count, bad_event_count = SSM.count(timeseries)
return (good_event_count, bad_event_count)
@staticmethod
def count(timeseries):
"""Extract good_count, bad_count tuple from Stackdriver Monitoring API
response.
Args:
timeseries (list): List of timeseries objects.
Returns:
tuple: A tuple (good_event_count, bad_event_count).
"""
good_event_count, bad_event_count = NO_DATA, NO_DATA
for timeserie in timeseries:
event_type = timeserie.metric.labels['event_type']
value = timeserie.points[0].value.double_value
if event_type == 'bad':
bad_event_count = value
elif event_type == 'good':
good_event_count = value
return good_event_count, bad_event_count
def create_service(self, slo_config):
"""Create Service object in Stackdriver Service Monitoring API.
Args:
slo_config (dict): SLO configuration.
Returns:
dict: Stackdriver Service Monitoring API response.
"""
LOGGER.debug("Creating service ...")
service_json = SSM.build_service(slo_config)
service_id = SSM.build_service_id(slo_config)
service = self.client.create_service(self.project_path,
service_json,
service_id=service_id)
LOGGER.info(
f'Service "{service_id}" created successfully in Stackdriver '
f'Service Monitoring API.')
return SSM.to_json(service)
def get_service(self, slo_config):
"""Get Service object from Stackdriver Service Monitoring API.
Args:
slo_config (dict): SLO configuration.
Returns:
dict: Service config.
"""
# Look for API services in workspace matching our config.
service_id = SSM.build_service_id(slo_config)
services = list(self.client.list_services(self.workspace_path))
matches = [
service for service in services
if service.name.split("/")[-1] == service_id
]
# If no match is found for our service name in the API, raise an
# exception if the service should have been auto-added (method 'basic'),
# else output a warning message.
if not matches:
msg = (f'Service "{service_id}" does not exist in '
f'workspace "{self.project_id}"')
method = slo_config['backend']['method']
if method == 'basic':
sids = [service.name.split("/")[-1] for service in services]
LOGGER.debug(
f'List of services in workspace {self.project_id}: {sids}')
LOGGER.error(msg)
raise Exception(msg)
LOGGER.error(msg)
return None
# Match found in API, return it.
service = matches[0]
LOGGER.debug(f'Found matching service "{service.name}"')
return SSM.to_json(service)
@staticmethod
def build_service(slo_config):
"""Build service JSON in Stackdriver Monitoring API from SLO
configuration.
Args:
slo_config (dict): SLO configuration.
Returns:
dict: Service JSON in Stackdriver Monitoring API.
"""
service_id = SSM.build_service_id(slo_config)
display_name = slo_config.get('service_display_name', service_id)
service = {'display_name': display_name, 'custom': {}}
return service
@staticmethod
def build_service_id(slo_config, dest_project_id=None, full=False):
"""Build service id from SLO configuration.
Args:
slo_config (dict): SLO configuration.
dest_project_id (str, optional): Project id for service if different
than the workspace project id.
full (bool): If True, return full service resource id including
project path.
Returns:
str: Service id.
"""
service_name = slo_config['service_name']
feature_name = slo_config['feature_name']
backend = slo_config['backend']
project_id = backend['project_id']
measurement = backend['measurement']
app_engine = measurement.get('app_engine')
cluster_istio = measurement.get('cluster_istio')
mesh_istio = measurement.get('mesh_istio')
cloud_endpoints = measurement.get('cloud_endpoints')
# Use auto-generated ids for 'custom' SLOs, use system-generated ids
# for all other types of SLOs.
if app_engine:
service_id = SID_GAE.format_map(app_engine)
dest_project_id = app_engine['project_id']
elif cluster_istio:
service_id = SID_CLUSTER_ISTIO.format_map(cluster_istio)
dest_project_id = cluster_istio['project_id']
elif mesh_istio:
service_id = SID_MESH_ISTIO.format_map(mesh_istio)
elif cloud_endpoints:
service_id = SID_CLOUD_ENDPOINT.format_map(cloud_endpoints)
dest_project_id = cluster_istio['project_id']
else:
service_id = f'{service_name}-{feature_name}'
if full:
if dest_project_id:
return f'projects/{dest_project_id}/services/{service_id}'
return f'projects/{project_id}/services/{service_id}'
return service_id
def create_slo(self, window, slo_config):
"""Create SLO object in Stackdriver Service Monitoring API.
Args:
window (int): Window (in seconds).
slo_config (dict): SLO config.
Returns:
dict: Service Management API response.
"""
slo_json = SSM.build_slo(window, slo_config)
slo_id = SSM.build_slo_id(window, slo_config)
parent = SSM.build_service_id(slo_config, full=True)
slo = self.client.create_service_level_objective(
parent, slo_json, service_level_objective_id=slo_id)
return SSM.to_json(slo)
@staticmethod
def build_slo(window, slo_config): # pylint: disable=R0912,R0915
"""Get SLO JSON representation in Service Monitoring API from SLO
configuration.
Args:
window (int): Window (in seconds).
slo_config (dict): SLO Configuration.
Returns:
dict: SLO JSON configuration.
"""
measurement = slo_config['backend'].get('measurement', {})
method = slo_config['backend']['method']
description = slo_config['slo_description']
target = slo_config['slo_target']
minutes, _ = divmod(window, 60)
hours, _ = divmod(minutes, 60)
display_name = f'{description} ({hours}h)'
slo = {
'display_name': display_name,
'goal': target,
'rolling_period': {
'seconds': window
}
}
filter_valid = measurement.get('filter_valid', "")
if method == 'basic':
methods = measurement.get('method', [])
locations = measurement.get('location', [])
versions = measurement.get('version', [])
threshold = measurement.get('latency', {}).get('threshold')
slo['service_level_indicator'] = {'basic_sli': {}}
basic_sli = slo['service_level_indicator']['basic_sli']
if methods:
basic_sli['method'] = methods
if locations:
basic_sli['location'] = locations
if versions:
basic_sli['version'] = versions
if threshold:
basic_sli['latency'] = {
'threshold': {
'seconds': 0,
'nanos': int(threshold) * 10**6
}
}
else:
basic_sli['availability'] = {}
elif method == 'good_bad_ratio':
filter_good = measurement.get('filter_good', "")
filter_bad = measurement.get('filter_bad', "")
slo['service_level_indicator'] = {
'request_based': {
'good_total_ratio': {}
}
}
sli = slo['service_level_indicator']
ratio = sli['request_based']['good_total_ratio']
if filter_good:
ratio['good_service_filter'] = filter_good
if filter_bad:
ratio['bad_service_filter'] = filter_bad
if filter_valid:
ratio['total_service_filter'] = filter_valid
elif method == 'distribution_cut':
range_min = measurement.get('range_min', 0)
range_max = measurement['range_max']
slo['service_level_indicator'] = {
'request_based': {
'distribution_cut': {
'distribution_filter': filter_valid,
'range': {
'max': float(range_max)
}
}
}
}
sli = slo['service_level_indicator']['request_based']
if range_min != 0:
sli['distribution_cut']['range']['min'] = float(range_min)
elif method == 'windows':
filter = measurement.get('filter')
# threshold = conf.get('threshold')
# mean_in_range = conf.get('filter')
# sum_in_range = conf.get('filter')
slo['service_level_indicator'] = {
'windows_based': {
'window_period': window,
'good_bad_metric_filter': filter,
# 'good_total_ratio_threshold': {
# object (PerformanceThreshold)
# },
# 'metricMeanInRange': {
# object (MetricRange)
# },
# 'metricSumInRange': {
# object (MetricRange)
# }
}
}
else:
raise Exception(f'Method "{method}" is not supported.')
return slo
def get_slo(self, window, slo_config):
"""Get SLO object from Stackriver Service Monitoring API.
Args:
service_id (str): Service identifier.
window (int): Window in seconds.
slo_config (dict): SLO config.
Returns:
dict: API response.
"""
service_path = SSM.build_service_id(slo_config, full=True)
LOGGER.debug(f'Getting SLO for for "{service_path}" ...')
slos = self.list_slos(service_path)
slo_local_id = SSM.build_slo_id(window, slo_config)
slo_json = SSM.build_slo(window, slo_config)
slo_json = SSM.convert_slo_to_ssm_format(slo_json)
# Loop through API response to find an existing SLO that corresponds to
# our configuration.
for slo in slos:
slo_remote_id = slo['name'].split("/")[-1]
equal = slo_remote_id == slo_local_id
if equal:
LOGGER.debug(f'Found existing SLO "{slo_remote_id}".')
LOGGER.debug(f'SLO object: {slo}')
strict_equal = SSM.compare_slo(slo_json, slo)
if strict_equal:
return slo
return self.update_slo(window, slo_config)
LOGGER.warning('No SLO found matching configuration.')
LOGGER.debug(f'SLOs from Stackdriver Monitoring API: {slos}')
LOGGER.debug(f'SLO config converted: {slo_json}')
return None
def update_slo(self, window, slo_config):
"""Update an existing SLO.
Args:
window (int): Window (in seconds)
slo_config (dict): SLO configuration.
Returns:
dict: API response.
"""
slo_json = SSM.build_slo(window, slo_config)
slo_id = SSM.build_slo_id(window, slo_config, full=True)
LOGGER.warning(f"Updating SLO {slo_id} ...")
slo_json['name'] = slo_id
return SSM.to_json(
self.client.update_service_level_objective(slo_json))
def list_slos(self, service_path):
"""List all SLOs from Stackdriver Service Monitoring API.
Args:
service_path (str): Service path in the form
'projects/{project_id}/services/{service_id}'.
slo_config (dict): SLO configuration.
Returns:
dict: API response.
"""
slos = self.client.list_service_level_objectives(service_path)
slos = list(slos)
LOGGER.debug(f"{len(slos)} SLOs found in Service Monitoring API.")
# LOGGER.debug(slos)
return [SSM.to_json(slo) for slo in slos]
def delete_slo(self, window, slo_config):
"""Delete SLO from Stackdriver Monitoring API.
Args:
window (int): Window (in seconds).
slo_config: SLO configuration.
Returns:
dict: API response.
"""
slo_path = SSM.build_slo_id(window, slo_config, full=True)
LOGGER.info(f'Deleting SLO "{slo_path}"')
try:
return self.client.delete_service_level_objective(slo_path)
except google.api_core.exceptions.NotFound:
LOGGER.warning(
f'SLO "{slo_path}" does not exist in Service Monitoring API. '
f'Skipping.')
return None
@staticmethod
def build_slo_id(window, slo_config, full=False):
"""Build SLO id from SLO configuration.
Args:
slo_config (dict): SLO configuration.
full (bool): If True, return full resource id including project.
Returns:
str: SLO id.
"""
if 'slo_id' in slo_config:
slo_id_part = slo_config['slo_id']
slo_id = f'{slo_id_part}-{window}'
else:
slo_name = slo_config['slo_name']
slo_id = f'{slo_name}-{window}'
if full:
service_path = SSM.build_service_id(slo_config, full=True)
return f'{service_path}/serviceLevelObjectives/{slo_id}'
return slo_id
@staticmethod
def compare_slo(slo1, slo2):
"""Compares 2 SLO configurations to see if they correspond to the same
SLO.
An SLO is deemed the same if the whole configuration is similar, except
for the `goal` field that should be adjustable.
Args:
slo1 (dict): Service Monitoring API SLO configuration to compare.
slo2 (dict): Service Monitoring API SLO configuration to compare.
Returns:
bool: True if the SLOs match, False otherwise.
"""
exclude_keys = ["name"]
slo1_copy = {k: v for k, v in slo1.items() if k not in exclude_keys}
slo2_copy = {k: v for k, v in slo2.items() if k not in exclude_keys}
local_json = json.dumps(slo1_copy, sort_keys=True)
remote_json = json.dumps(slo2_copy, sort_keys=True)
if os.environ.get('DEBUG') == '2':
LOGGER.info("----------")
LOGGER.info(local_json)
LOGGER.info("----------")
LOGGER.info(remote_json)
LOGGER.info("----------")
LOGGER.info(SSM.string_diff(local_json, remote_json))
return local_json == remote_json
@staticmethod
def string_diff(string1, string2):
"""Diff 2 strings. Used to print comparison of JSONs for debugging.
Args:
string1 (str): String 1.
string2 (str): String 2.
Returns:
list: List of messages pointing out differences.
"""
lines = []
for idx, string in enumerate(difflib.ndiff(string1, string2)):
if string[0] == ' ':
continue
if string[0] == '-':
info = u'Delete "{}" from position {}'.format(string[-1], idx)
lines.append(info)
elif string[0] == '+':
info = u'Add "{}" to position {}'.format(string[-1], idx)
lines.append(info)
return lines
@staticmethod
def convert_slo_to_ssm_format(slo):
"""Convert SLO JSON to Service Monitoring API format.
Address edge cases, like `duration` object computation.
Args:
slo (dict): SLO JSON object to be converted to Stackdriver Service
Monitoring API format.
Returns:
dict: SLO configuration in Service Monitoring API format.
"""
# Our local JSON is in snake case, convert it to Caml case.
data = dict_snake_to_caml(slo)
# The `rollingPeriod` field is in Duration format, convert it.
try:
period = data['rollingPeriod']
data['rollingPeriod'] = SSM.convert_duration_to_string(period)
except KeyError:
pass
# The `latency` field is in Duration format, convert it.
try:
latency = data['serviceLevelIndicator']['basicSli']['latency']
threshold = latency['threshold']
latency['threshold'] = SSM.convert_duration_to_string(threshold)
except KeyError:
pass
return data
@staticmethod
def convert_duration_to_string(duration):
"""Convert a duration object to a duration string (in seconds).
Args:
duration (dict): Duration dictionary.
Returns:
str: Duration string.
"""
duration_seconds = 0.000
if 'seconds' in duration:
duration_seconds += duration['seconds']
if 'nanos' in duration:
duration_seconds += duration['nanos'] * 10**(-9)
if duration_seconds.is_integer():
duration_str = int(duration_seconds)
else:
duration_str = "{:0.3f}".format(duration_seconds)
return str(duration_str) + 's'
@staticmethod
def to_json(response):
"""Convert a Stackdriver Service Monitoring API response to JSON
format.
Args:
response (obj): Response object.
Returns:
dict: Response object serialized as JSON.
"""
return json.loads(MessageToJson(response))
SSM = StackdriverServiceMonitoringBackend
|
the-stack_0_3083 | from bs4 import BeautifulSoup
import requests, TTS, time, sys
from TTS import *
# test game url hawks v bucks https://www.espn.com/nba/playbyplay/_/gameId/401337344
headers = {'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'}
def get_play_table(url):
game = requests.get(url, headers=headers)
soup = BeautifulSoup(game.text, 'html.parser')
soup = soup.findAll("div", class_="accordion-content collapse in")[1]
return soup.findAll('tr')[1:]
def get_team_names(url):
game = requests.get(url, headers=headers)
soup = BeautifulSoup(game.text, 'html.parser')
soup = soup.find("title")
title_array = soup.text.split(" ")
team1_name = ""
team1_abbr = ""
team2_name = ""
team2_abbr = ""
with open("Teams.txt") as file:
for line in file:
if title_array[0] in line:
team1_name = title_array[0]
team1_abbr = line.split(" ")[0].lower()
elif title_array[2] in line:
team2_name = title_array[2]
team2_abbr = line.split(" ")[0].lower()
return (team1_name, team1_abbr, team2_name, team2_abbr)
def get_team_name(name_list, team_logo_string):
# print(team_logo_string)
if name_list[1] in team_logo_string:
return name_list[0]
elif name_list[3] in team_logo_string:
return name_list[2]
else:
print("Team name error")
def compare_times(last_time, maybe_new_time):
#retrun -1 if after last time, 0 if same as last time, 1 if before (less time on clock), -2 if new quarter
last_split = last_time.split(":")
new_split = maybe_new_time.split(":")
print(last_split, new_split)
last_seconds_total = 0
new_seconds_total = 0
if len(last_split) == 1:
last_seconds_total = float(last_split[0])
elif len(last_split) == 2:
last_seconds_total = float(last_split[0])*60 + float(last_split[1])
else:
print("Last split error: ", last_split)
if len(new_split) == 1:
new_seconds_total = float(new_split[0])
elif len(new_split) == 2:
new_seconds_total = float(new_split[0])*60 + float(new_split[1])
else:
print("New split error: ", new_split)
if len(last_split) < len(new_split):
# this is a new quarter
return -2
else: #same quarter
if last_seconds_total > new_seconds_total:
return 1
elif last_seconds_total < new_seconds_total:
return -1
else:
return 0
def get_zero_time(score_table): #this breaks if there is no zero time
play = score_table[0]
time = play.find('td', class_='time-stamp').text
return str(time)
def get_min_plays(score_table):
length = len(score_table)
if length < 15:
return length
return 15
def start_and_read(last_time, score_table, name_list):
print("A play was made at a new time.")
for i in range(get_min_plays(score_table)-1, -1, -1):
play = score_table[i]
time = play.find('td', class_='time-stamp').text
desc = play.find('td', class_='game-details').text
score = play.find('td', class_='combined-score').text
team_logo = play.find('img')['src']
print(" Zero Time: {} Read Time: {} ".format(last_time, time))
comparison = compare_times(last_time, time)
if comparison == 1:
# print(time, desc, score)
read("{} at {}.".format(get_team_name(name_list, team_logo), time))
# read(time)
if "three" in desc and "makes" in desc:
playBang()
elif "dunk" in desc and "makes" in desc:
playDunk()
elif "makes free throw" in desc:
playFreethrow()
read(desc)
if ("makes" in desc):
read(score)
elif comparison == -2:
return -1
return 0
def new_quarter(url, name_list):
last_time = "13:00"
last_count = 0
while True:
score_table = get_play_table(url)
zero_time = get_zero_time(score_table)
print("Last time: ", last_time, " Last count: ", last_count, " Zero time: ", zero_time)
if last_time != zero_time:
if start_and_read(last_time, score_table, name_list) == -1:
break
last_time = zero_time # update lasttime
# elif count_recent_score(score_table, last_time) > last_count:
# last_count = add_and_read(last_time, last_count, score_table)
else:
time.sleep(10)
def main():
if len(sys.argv) != 3:
print("Usage: run.py <espn play by play url> <quarter #>")
else:
init_tts()
url = sys.argv[1]
quarter_num = int(sys.argv[2])
name_list = get_team_names(url)
for i in range(quarter_num, 5):
new_quarter(url, name_list)
stop_tts()
if __name__ == '__main__':
main()
# get_score_table() |
the-stack_0_3089 | """Tests for the Wemo light entity via the bridge."""
import pytest
import pywemo
from homeassistant.components.homeassistant import (
DOMAIN as HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
)
from homeassistant.components.wemo.light import MIN_TIME_BETWEEN_SCANS
from homeassistant.const import ATTR_ENTITY_ID, STATE_OFF, STATE_ON
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from . import entity_test_helpers
from tests.async_mock import create_autospec, patch
@pytest.fixture
def pywemo_model():
"""Pywemo Bridge models use the light platform (WemoLight class)."""
return "Bridge"
# Note: The ordering of where the pywemo_bridge_light comes in test arguments matters.
# In test methods, the pywemo_bridge_light fixture argument must come before the
# wemo_entity fixture argument.
@pytest.fixture(name="pywemo_bridge_light")
def pywemo_bridge_light_fixture(pywemo_device):
"""Fixture for Bridge.Light WeMoDevice instances."""
light = create_autospec(pywemo.ouimeaux_device.bridge.Light, instance=True)
light.uniqueID = pywemo_device.serialnumber
light.name = pywemo_device.name
light.bridge = pywemo_device
light.state = {"onoff": 0}
pywemo_device.Lights = {pywemo_device.serialnumber: light}
return light
def _bypass_throttling():
"""Bypass the util.Throttle on the update_lights method."""
utcnow = dt_util.utcnow()
def increment_and_return_time():
nonlocal utcnow
utcnow += MIN_TIME_BETWEEN_SCANS
return utcnow
return patch("homeassistant.util.utcnow", side_effect=increment_and_return_time)
async def test_async_update_locked_multiple_updates(
hass, pywemo_registry, pywemo_bridge_light, wemo_entity, pywemo_device
):
"""Test that two state updates do not proceed at the same time."""
pywemo_device.bridge_update.reset_mock()
with _bypass_throttling():
await entity_test_helpers.test_async_update_locked_multiple_updates(
hass,
pywemo_registry,
wemo_entity,
pywemo_device,
update_polling_method=pywemo_device.bridge_update,
)
async def test_async_update_with_timeout_and_recovery(
hass, pywemo_bridge_light, wemo_entity, pywemo_device
):
"""Test that the entity becomes unavailable after a timeout, and that it recovers."""
await entity_test_helpers.test_async_update_with_timeout_and_recovery(
hass, wemo_entity, pywemo_device
)
async def test_async_locked_update_with_exception(
hass, pywemo_bridge_light, wemo_entity, pywemo_device
):
"""Test that the entity becomes unavailable when communication is lost."""
with _bypass_throttling():
await entity_test_helpers.test_async_locked_update_with_exception(
hass,
wemo_entity,
pywemo_device,
update_polling_method=pywemo_device.bridge_update,
)
async def test_light_update_entity(
hass, pywemo_registry, pywemo_bridge_light, wemo_entity
):
"""Verify that the light performs state updates."""
await async_setup_component(hass, HA_DOMAIN, {})
# On state.
pywemo_bridge_light.state = {"onoff": 1}
await hass.services.async_call(
HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
{ATTR_ENTITY_ID: [wemo_entity.entity_id]},
blocking=True,
)
assert hass.states.get(wemo_entity.entity_id).state == STATE_ON
# Off state.
pywemo_bridge_light.state = {"onoff": 0}
await hass.services.async_call(
HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
{ATTR_ENTITY_ID: [wemo_entity.entity_id]},
blocking=True,
)
assert hass.states.get(wemo_entity.entity_id).state == STATE_OFF
|
the-stack_0_3093 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons
from program_config import TensorConfig, ProgramConfig
import numpy as np
import paddle.inference as paddle_infer
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
import unittest
class TrtConvertConv2dFusionTest(TrtLayerAutoScanTest):
def is_program_valid(self, program_config: ProgramConfig) -> bool:
inputs = program_config.inputs
weights = program_config.weights
attrs = [
program_config.ops[i].attrs for i in range(len(program_config.ops))
]
if inputs['input_data'].shape[
1] != weights['conv2d_weight'].shape[1] * attrs[0]['groups']:
return False
if attrs[0]['groups'] <= 1:
return False
ver = paddle_infer.get_trt_compile_version()
if ver[0] * 1000 + ver[1] * 100 + ver[0] * 10 < 7000:
if attrs[0]['padding_algorithm'] == 'SAME' and (
attrs[0]['strides'][0] > 1 or attrs[0]['strides'][1] > 1):
return False
return True
def sample_program_configs(self):
self.trt_param.workspace_size = 1073741824
def generate_input1(batch, attrs: List[Dict[str, Any]]):
return np.ones([batch, attrs[0]['groups'] * 3, 64,
64]).astype(np.float32)
def generate_weight1(attrs: List[Dict[str, Any]]):
return np.random.random([24, 3, 3, 3]).astype(np.float32)
def generate_weight2(attrs: List[Dict[str, Any]]):
return np.random.random([24, 1, 1]).astype(np.float32)
for batch in [1, 4]:
for strides in [[1, 1], [2, 2], [1, 2]]:
for paddings in [[0, 3], [1, 2, 3, 4]]:
for groups in [2, 3]:
for padding_algorithm in ['EXPLICIT', 'SAME', 'VALID']:
for dilations in [[1, 1], [2, 2], [1, 2]]:
for data_format in ['NCHW']:
dics = [{
"data_fromat": data_format,
"dilations": dilations,
"padding_algorithm": padding_algorithm,
"groups": groups,
"paddings": paddings,
"strides": strides,
"data_format": data_format
}, {
"axis": 1
}]
ops_config = [{
"op_type": "conv2d",
"op_inputs": {
"Input": ["input_data"],
"Filter": ["conv2d_weight"]
},
"op_outputs": {
"Output": ["conv_output_data"]
},
"op_attrs": dics[0]
}, {
"op_type": "elementwise_add",
"op_inputs": {
"X": ["conv_output_data"],
"Y": ["elementwise_weight"]
},
"op_outputs": {
"Out": ["output_data"]
},
"op_attrs": dics[1]
}]
ops = self.generate_op_config(ops_config)
program_config = ProgramConfig(
ops=ops,
weights={
"conv2d_weight":
TensorConfig(data_gen=partial(
generate_weight1, dics)),
"elementwise_weight":
TensorConfig(data_gen=partial(
generate_weight2, dics))
},
inputs={
"input_data":
TensorConfig(data_gen=partial(
generate_input1, batch, dics))
},
outputs=["output_data"])
yield program_config
def sample_predictor_configs(
self, program_config) -> (paddle_infer.Config, List[int], float):
def generate_dynamic_shape(attrs):
input_groups = attrs[0]['groups'] * 3
self.dynamic_shape.min_input_shape = {
"input_data": [1, input_groups, 32, 32],
"output_data": [1, 24, 32, 32]
}
self.dynamic_shape.max_input_shape = {
"input_data": [4, input_groups, 64, 64],
"output_data": [4, 24, 64, 64]
}
self.dynamic_shape.opt_input_shape = {
"input_data": [1, input_groups, 64, 64],
"output_data": [1, 24, 64, 64]
}
def clear_dynamic_shape():
self.dynamic_shape.min_input_shape = {}
self.dynamic_shape.max_input_shape = {}
self.dynamic_shape.opt_input_shape = {}
def generate_trt_nodes_num(attrs, dynamic_shape):
return 1, 2
attrs = [
program_config.ops[i].attrs for i in range(len(program_config.ops))
]
# for static_shape
clear_dynamic_shape()
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), (1e-5, 1e-5)
self.trt_param.precision = paddle_infer.PrecisionType.Int8
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), (1e-5, 1e-5)
# for dynamic_shape
generate_dynamic_shape(attrs)
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), (1e-5, 1e-5)
self.trt_param.precision = paddle_infer.PrecisionType.Int8
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), (1e-5, 1e-5)
def test(self):
self.run_test()
def test_quant(self):
self.run_test(quant=True)
if __name__ == "__main__":
unittest.main()
|
the-stack_0_3096 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import copy
# External imports
from six.moves import xrange
# Bokeh imports
from bokeh.core.properties import List, String, Instance, Dict, Any, Int
from bokeh.model import Model
from bokeh.core.property.wrappers import PropertyValueList, PropertyValueDict
from bokeh.util.future import with_metaclass
# Module under test
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def large_plot(n):
from bokeh.models import (
Plot, LinearAxis, Grid, GlyphRenderer,
ColumnDataSource, DataRange1d, PanTool, ZoomInTool, ZoomOutTool, WheelZoomTool, BoxZoomTool,
BoxSelectTool, SaveTool, ResetTool
)
from bokeh.models.layouts import Column
from bokeh.models.glyphs import Line
col = Column()
objects = set([col])
for i in xrange(n):
source = ColumnDataSource(data=dict(x=[0, i + 1], y=[0, i + 1]))
xdr = DataRange1d()
ydr = DataRange1d()
plot = Plot(x_range=xdr, y_range=ydr)
xaxis = LinearAxis(plot=plot)
yaxis = LinearAxis(plot=plot)
xgrid = Grid(plot=plot, dimension=0)
ygrid = Grid(plot=plot, dimension=1)
tickers = [xaxis.ticker, xaxis.formatter, yaxis.ticker, yaxis.formatter]
glyph = Line(x='x', y='y')
renderer = GlyphRenderer(data_source=source, glyph=glyph)
plot.renderers.append(renderer)
pan = PanTool()
zoom_in = ZoomInTool()
zoom_out = ZoomOutTool()
wheel_zoom = WheelZoomTool()
box_zoom = BoxZoomTool()
box_select = BoxSelectTool()
save = SaveTool()
reset = ResetTool()
tools = [pan, zoom_in, zoom_out, wheel_zoom, box_zoom, box_select, save, reset]
plot.add_tools(*tools)
col.children.append(plot)
objects |= set([
xdr, ydr,
xaxis, yaxis,
xgrid, ygrid,
renderer, renderer.view, glyph,
source, source.selected, source.selection_policy,
plot, plot.x_scale, plot.y_scale, plot.toolbar, plot.title,
box_zoom.overlay, box_select.overlay,
] + tickers + tools)
return col, objects
class TestMetaModel(object):
def setup_method(self):
from bokeh.model import MetaModel
self.metamodel = MetaModel
self.old_map = copy.copy(self.metamodel.model_class_reverse_map)
def teardown_method(self):
self.metamodel.model_class_reverse_map = self.old_map
def mkclass(self):
class Test_Class(with_metaclass(self.metamodel)):
foo = 1
return Test_Class
def test_metaclassing(self):
tclass = self.mkclass()
assert hasattr(tclass, '__view_model__')
with pytest.raises(Warning):
self.mkclass()
def test_get_class(self):
from bokeh.model import get_class
self.mkclass()
tclass = get_class('Test_Class')
assert hasattr(tclass, 'foo')
with pytest.raises(KeyError):
get_class('Imaginary_Class')
class DeepModel(Model):
child = Instance(Model)
class TestCollectModels(object):
def test_references_large(self):
root, objects = large_plot(10)
assert set(root.references()) == objects
def test_references_deep(self):
root = DeepModel()
objects = set([root])
parent = root
# in a previous implementation, about 400 would blow max
# recursion depth, so we double that and a little bit,
# here.
for i in xrange(900):
model = DeepModel()
objects.add(model)
parent.child = model
parent = model
assert set(root.references()) == objects
class SomeModelToJson(Model):
child = Instance(Model)
foo = Int()
bar = String()
class TestModel(object):
def setup_method(self):
from bokeh.model import Model
self.pObjectClass = Model
self.maxDiff = None
def test_init(self):
testObject = self.pObjectClass(id='test_id')
assert testObject.id == 'test_id'
testObject2 = self.pObjectClass()
assert testObject2.id is not None
assert set(["name", "tags", "js_property_callbacks", "subscribed_events", "js_event_callbacks"]) == testObject.properties()
assert dict(
name=None, tags=[], js_property_callbacks={}, js_event_callbacks={}, subscribed_events=[]
) == testObject.properties_with_values(include_defaults=True)
assert dict() == testObject.properties_with_values(include_defaults=False)
def test_ref(self):
testObject = self.pObjectClass(id='test_id')
assert {'type': 'Model', 'id': 'test_id'} == testObject.ref
def test_references_by_ref_by_value(self):
from bokeh.core.has_props import HasProps
from bokeh.core.properties import Instance, Int
class T(self.pObjectClass):
t = Int(0)
class Y(self.pObjectClass):
t1 = Instance(T)
class Z1(HasProps):
t2 = Instance(T)
class Z2(self.pObjectClass):
t2 = Instance(T)
class X1(self.pObjectClass):
y = Instance(Y)
z1 = Instance(Z1)
class X2(self.pObjectClass):
y = Instance(Y)
z2 = Instance(Z2)
t1, t2 = T(t=1), T(t=2)
y = Y(t1=t1)
z1, z2 = Z1(t2=t2), Z2(t2=t2)
x1 = X1(y=y, z1=z1)
x2 = X2(y=y, z2=z2)
assert x1.references() == {t1, y, t2, x1}
assert x2.references() == {t1, y, t2, z2, x2}
def test_references_in_containers(self):
from bokeh.core.properties import Int, String, Instance, List, Tuple, Dict
# XXX: can't use Y, because of:
#
# Warning: Duplicate __view_model__ declaration of 'Y' for class Y.
# Previous definition: <class 'bokeh.tests.test_objects.Y'>
class U(self.pObjectClass):
a = Int
class V(self.pObjectClass):
u1 = Instance(U)
u2 = List(Instance(U))
u3 = Tuple(Int, Instance(U))
u4 = Dict(String, Instance(U))
u5 = Dict(String, List(Instance(U)))
u1, u2, u3, u4, u5 = U(a=1), U(a=2), U(a=3), U(a=4), U(a=5)
v = V(u1=u1, u2=[u2], u3=(3, u3), u4={"4": u4}, u5={"5": [u5]})
assert v.references() == set([v, u1, u2, u3, u4, u5])
def test_to_json(self):
child_obj = SomeModelToJson(foo=57, bar="hello")
obj = SomeModelToJson(child=child_obj,
foo=42, bar="world")
json = obj.to_json(include_defaults=True)
json_string = obj.to_json_string(include_defaults=True)
assert { "child" : { "id" : child_obj.id, "type" : "SomeModelToJson" },
"id" : obj.id,
"name" : None,
"tags" : [],
'js_property_callbacks': {},
"js_event_callbacks" : {},
"subscribed_events" : [],
"foo" : 42,
"bar" : "world" } == json
assert ('{"bar":"world",' +
'"child":{"id":"%s","type":"SomeModelToJson"},' +
'"foo":42,"id":"%s","js_event_callbacks":{},"js_property_callbacks":{},' +
'"name":null,"subscribed_events":[],"tags":[]}') % (child_obj.id, obj.id) == json_string
def test_no_units_in_json(self):
from bokeh.models import AnnularWedge
obj = AnnularWedge()
json = obj.to_json(include_defaults=True)
assert 'start_angle' in json
assert 'start_angle_units' not in json
assert 'outer_radius' in json
assert 'outer_radius_units' not in json
def test_dataspec_field_in_json(self):
from bokeh.models import AnnularWedge
obj = AnnularWedge()
obj.start_angle = "fieldname"
json = obj.to_json(include_defaults=True)
assert 'start_angle' in json
assert 'start_angle_units' not in json
assert dict(units='rad', field='fieldname') == json['start_angle']
def test_dataspec_value_in_json(self):
from bokeh.models import AnnularWedge
obj = AnnularWedge()
obj.start_angle = 60
json = obj.to_json(include_defaults=True)
assert 'start_angle' in json
assert 'start_angle_units' not in json
assert dict(units='rad', value=60) == json['start_angle']
def test_list_default(self):
class HasListDefault(Model):
value = List(String, default=["hello"])
obj = HasListDefault()
assert obj.value == obj.value
# 'value' should not be included because we haven't modified it
assert 'value' not in obj.properties_with_values(include_defaults=False)
# (but should be in include_defaults=True)
assert 'value' in obj.properties_with_values(include_defaults=True)
obj.value.append("world")
# 'value' should now be included
assert 'value' in obj.properties_with_values(include_defaults=False)
def test_dict_default(self):
class HasDictDefault(Model):
value = Dict(String, Int, default=dict(hello=42))
obj = HasDictDefault()
assert obj.value == obj.value
assert dict(hello=42) == obj.value
# 'value' should not be included because we haven't modified it
assert 'value' not in obj.properties_with_values(include_defaults=False)
# (but should be in include_defaults=True)
assert 'value' in obj.properties_with_values(include_defaults=True)
obj.value['world'] = 57
# 'value' should now be included
assert 'value' in obj.properties_with_values(include_defaults=False)
assert dict(hello=42, world=57) == obj.value
def test_func_default_with_counter(self):
counter = dict(value=0)
def next_value():
counter['value'] += 1
return counter['value']
class HasFuncDefaultInt(Model):
value = Int(default=next_value)
obj1 = HasFuncDefaultInt()
obj2 = HasFuncDefaultInt()
assert obj1.value+1 == obj2.value
# 'value' is a default, but it gets included as a
# non-default because it's unstable.
assert 'value' in obj1.properties_with_values(include_defaults=False)
def test_func_default_with_model(self):
class HasFuncDefaultModel(Model):
child = Instance(Model, lambda: Model())
obj1 = HasFuncDefaultModel()
obj2 = HasFuncDefaultModel()
assert obj1.child.id != obj2.child.id
# 'child' is a default, but it gets included as a
# non-default because it's unstable.
assert 'child' in obj1.properties_with_values(include_defaults=False)
class TestContainerMutation(object):
def _check_mutation(self, obj, attr, mutator, expected_event_old, expected_event_new):
result = dict(calls=[])
def record_trigger(attr, old, new_):
result['calls'].append((attr, old, new_))
obj.on_change(attr, record_trigger)
try:
actual_old = getattr(obj, attr)
assert expected_event_old == actual_old
mutator(actual_old)
assert expected_event_new == getattr(obj, attr)
finally:
obj.remove_on_change(attr, record_trigger)
assert 1 == len(result['calls'])
call = result['calls'][0]
assert attr == call[0]
assert expected_event_old == call[1]
assert expected_event_new == call[2]
class HasListProp(Model):
foo = List(String)
def __init__(self, **kwargs):
super(HasListProp, self).__init__(**kwargs)
class TestListMutation(TestContainerMutation):
def test_whether_included_in_props_with_values(self):
obj = HasListProp()
assert 'foo' not in obj.properties_with_values(include_defaults=False)
assert 'foo' in obj.properties_with_values(include_defaults=True)
# simply reading the property creates a new wrapper, so be
# sure that doesn't count as replacing the default
foo = obj.foo
assert foo == foo # this is to calm down flake's unused var warning
assert 'foo' not in obj.properties_with_values(include_defaults=False)
assert 'foo' in obj.properties_with_values(include_defaults=True)
# but changing the list should count as replacing the default
obj.foo.append("hello")
assert 'foo' in obj.properties_with_values(include_defaults=False)
assert 'foo' in obj.properties_with_values(include_defaults=True)
def test_assignment_maintains_owners(self):
obj = HasListProp()
old_list = obj.foo
assert isinstance(old_list, PropertyValueList)
assert 1 == len(old_list._owners)
obj.foo = ["a"]
new_list = obj.foo
assert isinstance(new_list, PropertyValueList)
assert old_list is not new_list
assert 0 == len(old_list._owners)
assert 1 == len(new_list._owners)
def test_list_delitem(self):
obj = HasListProp(foo=["a", "b", "c"])
assert isinstance(obj.foo, PropertyValueList)
def mutate(x):
del x[1]
self._check_mutation(obj, 'foo', mutate,
["a", "b", "c"],
["a", "c"])
def test_list_delslice(self):
obj = HasListProp(foo=["a", "b", "c", "d"])
assert isinstance(obj.foo, PropertyValueList)
def mutate(x):
del x[1:3]
self._check_mutation(obj, 'foo', mutate,
["a", "b", "c", "d"],
["a", "d"])
def test_list_iadd(self):
obj = HasListProp(foo=["a"])
assert isinstance(obj.foo, PropertyValueList)
def mutate(x):
x += ["b"]
self._check_mutation(obj, 'foo', mutate,
["a"],
["a", "b"])
def test_list_imul(self):
obj = HasListProp(foo=["a"])
assert isinstance(obj.foo, PropertyValueList)
def mutate(x):
x *= 3
self._check_mutation(obj, 'foo', mutate,
["a"],
["a", "a", "a"])
def test_list_setitem(self):
obj = HasListProp(foo=["a"])
assert isinstance(obj.foo, PropertyValueList)
def mutate(x):
x[0] = "b"
self._check_mutation(obj, 'foo', mutate,
["a"],
["b"])
def test_list_setslice(self):
obj = HasListProp(foo=["a", "b", "c", "d"])
assert isinstance(obj.foo, PropertyValueList)
def mutate(x):
x[1:3] = ["x"]
self._check_mutation(obj, 'foo', mutate,
["a", "b", "c", "d"],
["a", "x", "d"])
def test_list_append(self):
obj = HasListProp()
assert isinstance(obj.foo, PropertyValueList)
self._check_mutation(obj, 'foo', lambda x: x.append("bar"), [], ["bar"])
def test_list_extend(self):
obj = HasListProp()
assert isinstance(obj.foo, PropertyValueList)
self._check_mutation(obj, 'foo', lambda x: x.extend(["x", "y"]), [], ["x", "y"])
def test_list_insert(self):
obj = HasListProp(foo=["a", "b"])
assert isinstance(obj.foo, PropertyValueList)
self._check_mutation(obj, 'foo', lambda x: x.insert(1, "x"),
["a", "b"],
["a", "x", "b"])
def test_list_pop(self):
obj = HasListProp(foo=["a", "b"])
assert isinstance(obj.foo, PropertyValueList)
self._check_mutation(obj, 'foo', lambda x: x.pop(),
["a", "b"],
["a"])
def test_list_remove(self):
obj = HasListProp(foo=["a", "b"])
assert isinstance(obj.foo, PropertyValueList)
self._check_mutation(obj, 'foo', lambda x: x.remove("b"),
["a", "b"],
["a"])
def test_list_reverse(self):
obj = HasListProp(foo=["a", "b"])
assert isinstance(obj.foo, PropertyValueList)
self._check_mutation(obj, 'foo', lambda x: x.reverse(),
["a", "b"],
["b", "a"])
def test_list_sort(self):
obj = HasListProp(foo=["b", "a"])
assert isinstance(obj.foo, PropertyValueList)
self._check_mutation(obj, 'foo', lambda x: x.sort(),
["b", "a"],
["a", "b"])
class HasStringDictProp(Model):
foo = Dict(String, Any)
def __init__(self, **kwargs):
super(HasStringDictProp, self).__init__(**kwargs)
class HasIntDictProp(Model):
foo = Dict(Int, Any)
def __init__(self, **kwargs):
super(HasIntDictProp, self).__init__(**kwargs)
class TestDictMutation(TestContainerMutation):
def test_whether_included_in_props_with_values(self):
obj = HasStringDictProp()
assert 'foo' not in obj.properties_with_values(include_defaults=False)
assert 'foo' in obj.properties_with_values(include_defaults=True)
# simply reading the property creates a new wrapper, so be
# sure that doesn't count as replacing the default
foo = obj.foo
assert foo == foo # this is to calm down flake's unused var warning
assert 'foo' not in obj.properties_with_values(include_defaults=False)
assert 'foo' in obj.properties_with_values(include_defaults=True)
# but changing the dict should count as replacing the default
obj.foo['bar'] = 42
assert 'foo' in obj.properties_with_values(include_defaults=False)
assert 'foo' in obj.properties_with_values(include_defaults=True)
def test_assignment_maintains_owners(self):
obj = HasStringDictProp()
old_dict = obj.foo
assert isinstance(old_dict, PropertyValueDict)
assert 1 == len(old_dict._owners)
obj.foo = dict(a=1)
new_dict = obj.foo
assert isinstance(new_dict, PropertyValueDict)
assert old_dict is not new_dict
assert 0 == len(old_dict._owners)
assert 1 == len(new_dict._owners)
def test_dict_delitem_string(self):
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
assert isinstance(obj.foo, PropertyValueDict)
def mutate(x):
del x['b']
self._check_mutation(obj, 'foo', mutate,
dict(a=1, b=2, c=3),
dict(a=1, c=3))
def test_dict_delitem_int(self):
obj = HasIntDictProp(foo={ 1 : "a", 2 : "b", 3 : "c" })
assert isinstance(obj.foo, PropertyValueDict)
def mutate(x):
del x[1]
self._check_mutation(obj, 'foo', mutate,
{ 1 : "a", 2 : "b", 3 : "c" },
{ 2 : "b", 3 : "c" })
def test_dict_setitem_string(self):
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
assert isinstance(obj.foo, PropertyValueDict)
def mutate(x):
x['b'] = 42
self._check_mutation(obj, 'foo', mutate,
dict(a=1, b=2, c=3),
dict(a=1, b=42, c=3))
def test_dict_setitem_int(self):
obj = HasIntDictProp(foo={ 1 : "a", 2 : "b", 3 : "c" })
assert isinstance(obj.foo, PropertyValueDict)
def mutate(x):
x[2] = "bar"
self._check_mutation(obj, 'foo', mutate,
{ 1 : "a", 2 : "b", 3 : "c" },
{ 1 : "a", 2 : "bar", 3 : "c" })
def test_dict_clear(self):
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
assert isinstance(obj.foo, PropertyValueDict)
def mutate(x):
x.clear()
self._check_mutation(obj, 'foo', mutate,
dict(a=1, b=2, c=3),
dict())
def test_dict_pop(self):
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
assert isinstance(obj.foo, PropertyValueDict)
def mutate(x):
x.pop('b')
self._check_mutation(obj, 'foo', mutate,
dict(a=1, b=2, c=3),
dict(a=1, c=3))
def test_dict_pop_default_works(self):
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
assert isinstance(obj.foo, PropertyValueDict)
assert 42 == obj.foo.pop('z', 42)
def test_dict_popitem_works(self):
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
assert isinstance(obj.foo, PropertyValueDict)
i = obj.foo.popitem()
assert i == ('a', 1) or i == ('b', 2) or i == ('c', 3)
# we don't _check_mutation since the end value is nondeterministic
def test_dict_setdefault(self):
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
assert isinstance(obj.foo, PropertyValueDict)
def mutate(x):
b = x.setdefault('b', 43)
assert 2 == b
z = x.setdefault('z', 44)
assert 44 == z
self._check_mutation(obj, 'foo', mutate,
dict(a=1, b=2, c=3),
dict(a=1, b=2, c=3, z=44))
def test_dict_update(self):
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
assert isinstance(obj.foo, PropertyValueDict)
def mutate(x):
x.update(dict(b=7, c=8))
self._check_mutation(obj, 'foo', mutate,
dict(a=1, b=2, c=3),
dict(a=1, b=7, c=8))
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
the-stack_0_3097 | # MetaPrint.py
# Copyright (c) 2008-2017 Chris Gonnerman
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of the author nor the names of any contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
MetaPrint.py defines a class and utility functions for use by programs
which also use MSWinPrint.py for output. MetaPrint exposes a document
class which replicates the functionality of MSWinPrint, but rather than
actually printing, a document object collects the output generated so
that it can be replayed, either via MSWinPrint or ImagePrint. This is
useful mainly to preview a print job (by running the MetaPrint document
through ImagePrint) and subsequently actually print it (via MSWinPrint).
document is a class for creating and running print jobs. Presently, the
source is the only documentation for this class.
"""
class document:
def __init__(self, desc = None, printer = None,
papersize = "letter", orientation = "portrait", duplex = "normal"):
self.font = None
self.printer = printer
self.papersize = papersize
self.orientation = orientation
self.duplex = duplex
self.page = 0
self.pagelist = []
self.pagedata = []
if desc is not None:
self.desc = desc
else:
self.desc = "MetaPrint.py print job"
def begin_document(self, desc = None):
if desc:
self.desc = desc
def end_document(self):
if self.pagedata:
self.end_page()
def end_page(self):
if self.pagedata:
self.pagelist.append(self.pagedata)
self.pagedata = []
if self.font is not None:
self.pagedata.append(self.font)
def line(self, from_, to):
self.pagedata.append(("line", (from_, to)))
def rectangle(self, box):
self.pagedata.append(("rectangle", box))
def text(self, position, text):
self.pagedata.append(("text", (position, text)))
def setfont(self, name, size, bold = None, italic = 0):
self.font = ("font", (name, size, bold, italic))
self.pagedata.append(self.font)
def image(self, position, image, size):
self.pagedata.append(("image", (position, image, size)))
def setink(self, ink):
self.pagedata.append(("setink", (ink,)))
def setfill(self, onoff):
self.pagedata.append(("setfill", (onoff,)))
def runpage(self, doc, page):
for op, args in page:
if op == "line":
doc.line(*args)
elif op == "rectangle":
doc.rectangle(args)
elif op == "text":
doc.text(*args)
elif op == "font":
doc.setfont(*args)
elif op == "image":
doc.image(*args)
elif op == "setink":
doc.setink(*args)
elif op == "setfill":
doc.setfill(*args)
doc.end_page()
def run(self, doc, pageno = None):
if pageno is None:
for page in self.pagelist:
self.runpage(doc, page)
else:
self.runpage(doc, self.pagelist[pageno])
# end of file.
|
the-stack_0_3100 | import datetime
from base64 import b64decode, b64encode
from collections import deque
from decimal import Decimal
from enum import Enum
from functools import singledispatch, wraps
from inspect import isfunction
from json import JSONDecoder, JSONEncoder
from modulefinder import Module
from types import FunctionType, MethodType
from typing import Optional
from uuid import UUID
import dateutil.parser
from eventsourcing.exceptions import EncoderTypeError
from eventsourcing.utils.topic import get_topic, resolve_topic
try:
import orjson
except ImportError:
orjson: Optional[Module] = None # type: ignore
JSON_SEPARATORS = (",", ":")
def encoderpolicy(arg=None):
"""
Decorator for encoder policy.
Allows default behaviour to be built up from methods
registered for different types of things, rather than
chain of isinstance() calls in a long if-else block.
"""
def _mutator(func):
wrapped = singledispatch(func)
@wraps(wrapped)
def wrapper(*args, **kwargs):
obj = kwargs.get("obj") or args[-1]
return wrapped.dispatch(type(obj))(*args, **kwargs)
wrapper.register = wrapped.register
return wrapper
assert isfunction(arg), arg
return _mutator(arg)
def decoderpolicy(arg=None):
"""
Decorator for decoder policy.
Allows default behaviour to be built up from methods
registered for different named keys, rather than
chain of "in dict" queries in a long if-else block.
"""
def _mutator(func):
wrapped = func
decoder_map = {}
@wraps(wrapped)
def wrapper(*args, **kwargs):
d = kwargs.get("d") or args[-1]
keys = list(d.keys())
if len(keys) == 1:
try:
decoder_func = decoder_map[keys[0]]
except KeyError:
return d
else:
return decoder_func(d)
else:
return d
def register(key):
def decorator(decoder_func):
decoder_map[key] = decoder_func
return decoder_func
return decorator
wrapper.register = register
return wrapper
assert isfunction(arg), arg
return _mutator(arg)
class ObjectJSONEncoder(JSONEncoder):
def __init__(self, sort_keys=False):
super().__init__(sort_keys=sort_keys, separators=JSON_SEPARATORS)
def encode(self, o) -> bytes:
o = self.encode_object(o)
if self.sort_keys is True or orjson is None:
return super(ObjectJSONEncoder, self).encode(o).encode("utf8")
else:
return orjson.dumps(o)
def encode_object(self, o):
return self.encode_container(encoder(o))
@encoderpolicy
def encode_container(self, o):
return o
@encode_container.register(dict)
def encode_dict(self, o):
if type(o) == dict:
return self.encode_dict_state(o)
else:
return {
"__dict__": {
"topic": get_topic(o.__class__),
"state": self.encode_dict_state(o),
}
}
def encode_dict_state(self, o):
return {k: self.encode_object(v) for (k, v) in o.items()}
@encode_container.register(tuple)
def encode_tuple(self, o):
if type(o) == tuple:
return {"__tuple__": self.encode_object(list(o))}
else:
return {
"__tuple__": {
"state": self.encode_object(list(o)),
"topic": get_topic(o.__class__),
}
}
@encode_container.register(list)
def encode_list(self, o):
if type(o) == list:
return [self.encode_object(i) for i in o]
else:
return {
"__list__": {
"state": [self.encode_object(i) for i in o],
"topic": get_topic(o.__class__),
}
}
@encode_container.register(set)
def encode_set(self, o):
if type(o) == set:
return {"__set__": self.encode_iterable(o)}
else:
return {
"__set__": {
"state": self.encode_iterable(o),
"topic": get_topic(o.__class__),
}
}
def encode_iterable(self, o):
return self.encode_object(self.sort_keys and sorted(o) or list(o))
@encode_container.register(frozenset)
def encode_frozenset(self, o):
if type(o) == frozenset:
return {"__frozenset__": self.encode_iterable(o)}
else:
return {
"__frozenset__": {
"state": self.encode_iterable(o),
"topic": get_topic(o.__class__),
}
}
@encode_container.register(deque)
def encode_deque(self, o):
if type(o) == deque:
return {"__deque__": self.encode_object(list(o))}
else:
return {
"__deque__": {
"state": self.encode_object(list(o)),
"topic": get_topic(o.__class__),
}
}
@encode_container.register(object)
def encode_instance(self, o):
if hasattr(o, "__slots__") and o.__slots__ != ():
topic = get_topic(o.__class__)
state = {k: self.encode_object(getattr(o, k)) for k in o.__slots__}
return {"__class__": {"state": state, "topic": topic}}
elif hasattr(o, "__dict__"):
topic = get_topic(o.__class__)
state = {k: self.encode_object(v) for k, v in o.__dict__.items()}
return {"__class__": {"state": state, "topic": topic}}
else:
return o
@encoderpolicy
def encoder(o):
return o
class ObjectJSONDecoder(JSONDecoder):
def __init__(self, object_hook=None, **kwargs):
super(ObjectJSONDecoder, self).__init__(
object_hook=object_hook or decoder, **kwargs
)
@decoderpolicy
def decoder(d):
return d
@encoder.register(type)
def encode_type(o):
return {"__type__": get_topic(o)}
@encoder.register(MethodType)
def encode_method(o):
raise EncoderTypeError(o)
@encoder.register(FunctionType)
def encode_function(o):
raise EncoderTypeError(o)
@decoder.register("__type__")
def decode_type(d):
return resolve_topic(d["__type__"])
@decoder.register("__class__")
def decode_object(d):
topic = d["__class__"]["topic"]
state = d["__class__"]["state"]
obj_class = resolve_topic(topic)
obj = object.__new__(obj_class)
if hasattr(obj, "__dict__"):
obj.__dict__.update(state)
else:
for k, v in state.items():
object.__setattr__(obj, k, v)
return obj
@encoder.register(UUID)
def encode_uuid(obj):
return {"UUID": obj.hex}
@decoder.register("UUID")
def decode_uuid(d):
return UUID(d["UUID"])
@encoder.register(datetime.datetime)
def encode_datetime(obj):
return {"ISO8601_datetime": obj.strftime("%Y-%m-%dT%H:%M:%S.%f%z")}
@decoder.register("ISO8601_datetime")
def decode_datetime(d):
return dateutil.parser.parse(d["ISO8601_datetime"])
@encoder.register(datetime.date)
def encode_date(obj):
return {"ISO8601_date": obj.isoformat()}
@decoder.register("ISO8601_date")
def decode_date(d):
return datetime.datetime.strptime(d["ISO8601_date"], "%Y-%m-%d").date()
@encoder.register(datetime.time)
def encode_time(obj):
return {"ISO8601_time": obj.strftime("%H:%M:%S.%f")}
@decoder.register("ISO8601_time")
def decode_time(d):
hour, minute, seconds = d["ISO8601_time"].split(":")
second, microsecond = seconds.split(".")
return datetime.time(int(hour), int(minute), int(second), int(microsecond))
@encoder.register(Decimal)
def encode_decimal(obj):
return {"__decimal__": str(obj)}
@decoder.register("__decimal__")
def decode_decimal(d):
return Decimal(d["__decimal__"])
@encoder.register(Enum)
def encode_enum(obj):
return {"__enum__": {"topic": get_topic(type(obj)), "name": obj.name}}
@decoder.register("__enum__")
def decode_enum(d):
topic = d["__enum__"]["topic"]
name = d["__enum__"]["name"]
enum = resolve_topic(topic)
return getattr(enum, name)
@decoder.register("__deque__")
def decode_deque(d):
deque_data = d["__deque__"]
if type(deque_data) == dict:
topic = deque_data["topic"]
try:
state = deque_data["state"]
except KeyError:
state = deque_data["values"]
deque_type = resolve_topic(topic)
return deque_type(state)
else:
return deque(deque_data)
@decoder.register("__tuple__")
def decode_tuple(d):
tuple_data = d["__tuple__"]
if type(tuple_data) == dict:
# For NamedTuple objects.
topic = tuple_data["topic"]
state = tuple_data["state"]
tuple_type = resolve_topic(topic)
obj = tuple_type(*state)
else:
# For standard tuple objects.
obj = tuple(tuple_data)
return obj
@decoder.register("__dict__")
def decode_dict(d):
topic = d["__dict__"]["topic"]
state = d["__dict__"]["state"]
dict_type = resolve_topic(topic)
return dict_type(state)
@decoder.register("__set__")
def decode_set(d):
set_data = d["__set__"]
if isinstance(set_data, dict):
topic = set_data["topic"]
state = set_data["state"]
set_type = resolve_topic(topic)
return set_type(state)
else:
return set(set_data)
@decoder.register("__frozenset__")
def decode_frozenset(d):
set_data = d["__frozenset__"]
if isinstance(set_data, dict):
topic = set_data["topic"]
state = set_data["state"]
set_type = resolve_topic(topic)
return set_type(state)
else:
return frozenset(set_data)
@encoder.register(bytes)
def encode_bytes(o):
return {"__bytes__": b64str_from_bytes(o)}
@decoder.register("__bytes__")
def decode_bytes(d):
return bytes_from_b64str(d["__bytes__"])
def b64str_from_bytes(value: bytes) -> str:
return b64encode(value).decode("utf8")
def bytes_from_b64str(value):
return b64decode(value.encode("utf8"))
|
the-stack_0_3101 | import CLARK_Automator
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--force", action="store_true",
help="Don't ask to update redmine api key")
args = parser.parse_args()
automator = CLARK_Automator.Automate(args.force)
# try to run the automation tasks, if an error occurs print it
try:
automator.timed_retrieve()
except Exception as e:
import traceback
automator.timelog.time_print("[Error] Dumping...\n%s" % traceback.format_exc())
raise
|
the-stack_0_3102 | import chances
from ..utils.exceptions import TalosDataError
def sample_reducer(self, length, max_value):
'''Sample Reducer (Helper)
NOTE: The Scan() object is in self.main_self because
the object being passed here is ParamGrid() object where
the Scan() object is attached as self.main_self.
Utilize 'grid_downsample', 'shuffle', and 'random_method'
to reduce the param_grid before starting the experiment.
This is the simplest method in Talos for dealing with curse
of dimensionality.
Options are uniform random, stratified random, latin hypercube
sampling, and latin hypercube with sudoku style constraint.
Returns the reduced param_grid as numpy array.
'''
random_method = self.main_self.random_method
# calculate the size of the downsample
n = int(max_value * self.main_self.grid_downsample)
# throw an error if
if n < 1:
raise TalosDataError("No permutations in grid. Incease grid_downsample")
# Initialize Randomizer()
r = chances.Randomizer(max_value, length)
# use the user selected method
if random_method == 'sobol':
out = r.sobol()
elif random_method == 'quantum':
out = r.quantum()
elif random_method == 'halton':
out = r.halton()
elif random_method == 'korobov_matrix':
out = r.korobov_matrix()
elif random_method == 'latin_sudoku':
out = r.latin_sudoku()
elif random_method == 'latin_matrix':
out = r.latin_matrix()
elif random_method == 'latin_improved':
out = r.latin_improved()
elif random_method == 'uniform_mersenne':
out = r.uniform_mersenne()
elif random_method == 'uniform_crypto':
out = r.uniform_crypto()
elif random_method == 'ambience':
out = r.ambience()
return out
|
the-stack_0_3105 | def get_prs_chan_count(family_name=""):
"""
Returns the number of available PRS channels for the given family
:param family_name: string representation of the family name
:return: integer representing the number of available PRS channels
"""
return 12
def get_prs_chan_with_gpio_count(family_name=""):
"""
Returns the number of available PRS channels for the given family
:param family_name: string representation of the family name
:return: integer representing the number of available PRS channels
"""
return 12
def get_available_modules_for_family():
available_modules_for_family = [
"CMU",
"PRS",
"TIMER0",
"TIMER1",
"WTIMER0",
"USART0",
"USART1",
"USART2",
"LEUART0",
"LETIMER0",
"PCNT0",
"I2C0",
"I2C1",
"ACMP0",
"ACMP1",
"LESENSE",
"GPIO",
"PTI",
"MODEM",
"ADC0",
"VDAC0",
"CSEN",
"LFXO",
"IDAC0",
]
return available_modules_for_family
def em4_pin_to_loc(pin_name=None):
pin_loc_map = {
"PF2": {
"number": 0,
"define": "(1 << 0) << _GPIO_EM4WUEN_EM4WUEN_SHIFT",
},
"PF7": {
"number": 1,
"define": "(1 << 1) << _GPIO_EM4WUEN_EM4WUEN_SHIFT",
},
"PD14": {
"number": 4,
"define": "(1 << 4) << _GPIO_EM4WUEN_EM4WUEN_SHIFT",
},
"PA3": {
"number": 8,
"define": "(1 << 8) << _GPIO_EM4WUEN_EM4WUEN_SHIFT",
},
"PB13": {
"number": 9,
"define": "(1 << 9) << _GPIO_EM4WUEN_EM4WUEN_SHIFT",
},
"PC10": {
"number": 12,
"define": "(1 << 12) << _GPIO_EM4WUEN_EM4WUEN_SHIFT",
},
}
if pin_name is not None:
return pin_loc_map[pin_name]
else:
return pin_loc_map
class stacked_flash(object):
@staticmethod
def items():
props = {
}
return props.items()
def allowed_route_conflicts(route):
allowed_conflicts = {
"BSP_BTL_BUTTON": ['BSP_LED', 'BSP_BUTTON'],
"BSP_BUTTON_COUNT": ['BSP_LED', 'BSP_BTL_BUTTON'],
"BSP_BUTTON0": ['BSP_LED', 'BSP_BTL_BUTTON'],
"BSP_BUTTON1": ['BSP_LED', 'BSP_BTL_BUTTON'],
"BSP_BUTTON2": ['BSP_LED', 'BSP_BTL_BUTTON'],
"BSP_BUTTON3": ['BSP_LED', 'BSP_BTL_BUTTON'],
"BSP_BUTTON4": ['BSP_LED', 'BSP_BTL_BUTTON'],
"BSP_BUTTON5": ['BSP_LED', 'BSP_BTL_BUTTON'],
"BSP_BUTTON6": ['BSP_LED', 'BSP_BTL_BUTTON'],
"BSP_BUTTON7": ['BSP_LED', 'BSP_BTL_BUTTON'],
"BSP_CSEN_SCAN_MASK0": ['BSP_CSEN_BONDED_INPUT', 'BSP_CSEN_SCAN_INPUT'],
"BSP_CSEN_SCAN_MASK1": ['BSP_CSEN_BONDED_INPUT', 'BSP_CSEN_SCAN_INPUT'],
"BSP_CSEN_BONDED_MASK0": ['BSP_CSEN_BONDED_INPUT', 'BSP_CSEN_SCAN_INPUT'],
"BSP_CSEN_BONDED_MASK1": ['BSP_CSEN_BONDED_INPUT', 'BSP_CSEN_SCAN_INPUT'],
"BSP_LED_COUNT": ['BSP_BUTTON', 'BSP_BTL_BUTTON'],
"BSP_LED0": ['BSP_BUTTON', 'BSP_BTL_BUTTON'],
"BSP_LED1": ['BSP_BUTTON', 'BSP_BTL_BUTTON'],
"BSP_LED2": ['BSP_BUTTON', 'BSP_BTL_BUTTON'],
"BSP_LED3": ['BSP_BUTTON', 'BSP_BTL_BUTTON'],
"BSP_LED4": ['BSP_BUTTON', 'BSP_BTL_BUTTON'],
"BSP_LED5": ['BSP_BUTTON', 'BSP_BTL_BUTTON'],
"BSP_LED6": ['BSP_BUTTON', 'BSP_BTL_BUTTON'],
"BSP_LED7": ['BSP_BUTTON', 'BSP_BTL_BUTTON'],
"BSP_SPIDISPLAY_EXTCOMIN": ['PRS_CH'],
}
return allowed_conflicts.get(route, []) |
the-stack_0_3106 | # Zulip's main markdown implementation. See docs/subsystems/markdown.md for
# detailed documentation on our markdown syntax.
import functools
import html
import logging
import os
import re
import time
import urllib
import urllib.parse
from collections import defaultdict, deque
from dataclasses import dataclass
from datetime import datetime
from io import StringIO
from typing import (
Any,
Callable,
Dict,
Generic,
Iterable,
List,
Optional,
Set,
Tuple,
TypeVar,
Union,
)
from typing.re import Match, Pattern
from xml.etree import ElementTree as etree
from xml.etree.ElementTree import Element, SubElement
import ahocorasick
import dateutil.parser
import dateutil.tz
import markdown
import requests
from django.conf import settings
from django.db.models import Q
from hyperlink import parse
from markdown.extensions import codehilite, nl2br, sane_lists, tables
from typing_extensions import TypedDict
from zerver.lib import mention as mention
from zerver.lib.bugdown import fenced_code
from zerver.lib.bugdown.fenced_code import FENCE_RE
from zerver.lib.cache import NotFoundInCache, cache_with_key
from zerver.lib.camo import get_camo_url
from zerver.lib.emoji import (
codepoint_to_name,
emoticon_regex,
name_to_codepoint,
translate_emoticons,
)
from zerver.lib.exceptions import BugdownRenderingException
from zerver.lib.mention import extract_user_group, possible_mentions, possible_user_group_mentions
from zerver.lib.tex import render_tex
from zerver.lib.thumbnail import user_uploads_or_external
from zerver.lib.timeout import TimeoutExpired, timeout
from zerver.lib.timezone import get_common_timezones
from zerver.lib.url_encoding import encode_stream, hash_util_encode
from zerver.lib.url_preview import preview as link_preview
from zerver.models import (
MAX_MESSAGE_LENGTH,
Message,
Realm,
UserGroup,
UserGroupMembership,
UserProfile,
all_realm_filters,
get_active_streams,
realm_filters_for_realm,
)
ReturnT = TypeVar('ReturnT')
def one_time(method: Callable[[], ReturnT]) -> Callable[[], ReturnT]:
'''
Use this decorator with extreme caution.
The function you wrap should have no dependency
on any arguments (no args, no kwargs) nor should
it depend on any global state.
'''
val = None
def cache_wrapper() -> ReturnT:
nonlocal val
if val is None:
val = method()
return val
return cache_wrapper
class FullNameInfo(TypedDict):
id: int
email: str
full_name: str
DbData = Dict[str, Any]
# Format version of the bugdown rendering; stored along with rendered
# messages so that we can efficiently determine what needs to be re-rendered
version = 1
_T = TypeVar('_T')
ElementStringNone = Union[Element, Optional[str]]
AVATAR_REGEX = r'!avatar\((?P<email>[^)]*)\)'
GRAVATAR_REGEX = r'!gravatar\((?P<email>[^)]*)\)'
EMOJI_REGEX = r'(?P<syntax>:[\w\-\+]+:)'
def verbose_compile(pattern: str) -> Any:
return re.compile(
f"^(.*?){pattern}(.*?)$",
re.DOTALL | re.UNICODE | re.VERBOSE,
)
def normal_compile(pattern: str) -> Any:
return re.compile(
fr"^(.*?){pattern}(.*)$",
re.DOTALL | re.UNICODE,
)
STREAM_LINK_REGEX = r"""
(?<![^\s'"\(,:<]) # Start after whitespace or specified chars
\#\*\* # and after hash sign followed by double asterisks
(?P<stream_name>[^\*]+) # stream name can contain anything
\*\* # ends by double asterisks
"""
@one_time
def get_compiled_stream_link_regex() -> Pattern:
return verbose_compile(STREAM_LINK_REGEX)
STREAM_TOPIC_LINK_REGEX = r"""
(?<![^\s'"\(,:<]) # Start after whitespace or specified chars
\#\*\* # and after hash sign followed by double asterisks
(?P<stream_name>[^\*>]+) # stream name can contain anything except >
> # > acts as separator
(?P<topic_name>[^\*]+) # topic name can contain anything
\*\* # ends by double asterisks
"""
@one_time
def get_compiled_stream_topic_link_regex() -> Pattern:
return verbose_compile(STREAM_TOPIC_LINK_REGEX)
LINK_REGEX: Pattern = None
def get_web_link_regex() -> str:
# We create this one time, but not at startup. So the
# first message rendered in any process will have some
# extra costs. It's roughly 75ms to run this code, so
# caching the value in LINK_REGEX is super important here.
global LINK_REGEX
if LINK_REGEX is not None:
return LINK_REGEX
tlds = '|'.join(list_of_tlds())
# A link starts at a word boundary, and ends at space, punctuation, or end-of-input.
#
# We detect a url either by the `https?://` or by building around the TLD.
# In lieu of having a recursive regex (which python doesn't support) to match
# arbitrary numbers of nested matching parenthesis, we manually build a regexp that
# can match up to six
# The inner_paren_contents chunk matches the innermore non-parenthesis-holding text,
# and the paren_group matches text with, optionally, a matching set of parens
inner_paren_contents = r"[^\s()\"]*"
paren_group = r"""
[^\s()\"]*? # Containing characters that won't end the URL
(?: \( %s \) # and more characters in matched parens
[^\s()\"]*? # followed by more characters
)* # zero-or-more sets of paired parens
"""
nested_paren_chunk = paren_group
for i in range(6):
nested_paren_chunk = nested_paren_chunk % (paren_group,)
nested_paren_chunk = nested_paren_chunk % (inner_paren_contents,)
file_links = r"| (?:file://(/[^/ ]*)+/?)" if settings.ENABLE_FILE_LINKS else r""
REGEX = fr"""
(?<![^\s'"\(,:<]) # Start after whitespace or specified chars
# (Double-negative lookbehind to allow start-of-string)
(?P<url> # Main group
(?:(?: # Domain part
https?://[\w.:@-]+? # If it has a protocol, anything goes.
|(?: # Or, if not, be more strict to avoid false-positives
(?:[\w-]+\.)+ # One or more domain components, separated by dots
(?:{tlds}) # TLDs (filled in via format from tlds-alpha-by-domain.txt)
)
)
(?:/ # A path, beginning with /
{nested_paren_chunk} # zero-to-6 sets of paired parens
)?) # Path is optional
| (?:[\w.-]+\@[\w.-]+\.[\w]+) # Email is separate, since it can't have a path
{file_links} # File path start with file:///, enable by setting ENABLE_FILE_LINKS=True
| (?:bitcoin:[13][a-km-zA-HJ-NP-Z1-9]{{25,34}}) # Bitcoin address pattern, see https://mokagio.github.io/tech-journal/2014/11/21/regex-bitcoin.html
)
(?= # URL must be followed by (not included in group)
[!:;\?\),\.\'\"\>]* # Optional punctuation characters
(?:\Z|\s) # followed by whitespace or end of string
)
"""
LINK_REGEX = verbose_compile(REGEX)
return LINK_REGEX
def clear_state_for_testing() -> None:
# The link regex never changes in production, but our tests
# try out both sides of ENABLE_FILE_LINKS, so we need
# a way to clear it.
global LINK_REGEX
LINK_REGEX = None
bugdown_logger = logging.getLogger()
def rewrite_local_links_to_relative(db_data: Optional[DbData], link: str) -> str:
"""If the link points to a local destination (e.g. #narrow/...),
generate a relative link that will open it in the current window.
"""
if db_data:
realm_uri_prefix = db_data['realm_uri'] + "/"
if (
link.startswith(realm_uri_prefix)
and urllib.parse.urljoin(realm_uri_prefix, link[len(realm_uri_prefix):])
== link
):
return link[len(realm_uri_prefix):]
return link
def url_embed_preview_enabled(message: Optional[Message]=None,
realm: Optional[Realm]=None,
no_previews: bool=False) -> bool:
if not settings.INLINE_URL_EMBED_PREVIEW:
return False
if no_previews:
return False
if realm is None:
if message is not None:
realm = message.get_realm()
if realm is None:
# realm can be None for odd use cases
# like generating documentation or running
# test code
return True
return realm.inline_url_embed_preview
def image_preview_enabled(message: Optional[Message]=None,
realm: Optional[Realm]=None,
no_previews: bool=False) -> bool:
if not settings.INLINE_IMAGE_PREVIEW:
return False
if no_previews:
return False
if realm is None:
if message is not None:
realm = message.get_realm()
if realm is None:
# realm can be None for odd use cases
# like generating documentation or running
# test code
return True
return realm.inline_image_preview
def list_of_tlds() -> List[str]:
# HACK we manually blacklist a few domains
blacklist = ['PY\n', "MD\n"]
# tlds-alpha-by-domain.txt comes from https://data.iana.org/TLD/tlds-alpha-by-domain.txt
tlds_file = os.path.join(os.path.dirname(__file__), 'tlds-alpha-by-domain.txt')
tlds = [tld.lower().strip() for tld in open(tlds_file)
if tld not in blacklist and not tld[0].startswith('#')]
tlds.sort(key=len, reverse=True)
return tlds
def walk_tree(root: Element,
processor: Callable[[Element], Optional[_T]],
stop_after_first: bool=False) -> List[_T]:
results = []
queue = deque([root])
while queue:
currElement = queue.popleft()
for child in currElement:
if child:
queue.append(child)
result = processor(child)
if result is not None:
results.append(result)
if stop_after_first:
return results
return results
@dataclass
class ElementFamily:
grandparent: Optional[Element]
parent: Element
child: Element
in_blockquote: bool
T = TypeVar("T")
class ResultWithFamily(Generic[T]):
family: ElementFamily
result: T
def __init__(self, family: ElementFamily, result: T):
self.family = family
self.result = result
class ElementPair:
parent: Optional["ElementPair"]
value: Element
def __init__(self, parent: Optional["ElementPair"], value: Element):
self.parent = parent
self.value = value
def walk_tree_with_family(root: Element,
processor: Callable[[Element], Optional[_T]],
) -> List[ResultWithFamily[_T]]:
results = []
queue = deque([ElementPair(parent=None, value=root)])
while queue:
currElementPair = queue.popleft()
for child in currElementPair.value:
if child:
queue.append(ElementPair(parent=currElementPair, value=child))
result = processor(child)
if result is not None:
if currElementPair.parent is not None:
grandparent_element = currElementPair.parent
grandparent = grandparent_element.value
else:
grandparent = None
family = ElementFamily(
grandparent=grandparent,
parent=currElementPair.value,
child=child,
in_blockquote=has_blockquote_ancestor(currElementPair),
)
results.append(ResultWithFamily(
family=family,
result=result,
))
return results
def has_blockquote_ancestor(element_pair: Optional[ElementPair]) -> bool:
if element_pair is None:
return False
elif element_pair.value.tag == 'blockquote':
return True
else:
return has_blockquote_ancestor(element_pair.parent)
@cache_with_key(lambda tweet_id: tweet_id, cache_name="database", with_statsd_key="tweet_data")
def fetch_tweet_data(tweet_id: str) -> Optional[Dict[str, Any]]:
if settings.TEST_SUITE:
from . import testing_mocks
res = testing_mocks.twitter(tweet_id)
else:
creds = {
'consumer_key': settings.TWITTER_CONSUMER_KEY,
'consumer_secret': settings.TWITTER_CONSUMER_SECRET,
'access_token_key': settings.TWITTER_ACCESS_TOKEN_KEY,
'access_token_secret': settings.TWITTER_ACCESS_TOKEN_SECRET,
}
if not all(creds.values()):
return None
# We lazily import twitter here because its import process is
# surprisingly slow, and doing so has a significant impact on
# the startup performance of `manage.py` commands.
import twitter
try:
api = twitter.Api(tweet_mode='extended', **creds)
# Sometimes Twitter hangs on responses. Timing out here
# will cause the Tweet to go through as-is with no inline
# preview, rather than having the message be rejected
# entirely. This timeout needs to be less than our overall
# formatting timeout.
tweet = timeout(3, api.GetStatus, tweet_id)
res = tweet.AsDict()
except AttributeError:
bugdown_logger.error('Unable to load twitter api, you may have the wrong '
'library installed, see https://github.com/zulip/zulip/issues/86')
return None
except TimeoutExpired:
# We'd like to try again later and not cache the bad result,
# so we need to re-raise the exception (just as though
# we were being rate-limited)
raise
except twitter.TwitterError as e:
t = e.args[0]
if len(t) == 1 and ('code' in t[0]) and (t[0]['code'] == 34):
# Code 34 means that the message doesn't exist; return
# None so that we will cache the error
return None
elif len(t) == 1 and ('code' in t[0]) and (t[0]['code'] == 88 or
t[0]['code'] == 130):
# Code 88 means that we were rate-limited and 130
# means Twitter is having capacity issues; either way
# just raise the error so we don't cache None and will
# try again later.
raise
else:
# It's not clear what to do in cases of other errors,
# but for now it seems reasonable to log at error
# level (so that we get notified), but then cache the
# failure to proceed with our usual work
bugdown_logger.exception("Unknown error fetching tweet data")
return None
return res
HEAD_START_RE = re.compile('^head[ >]')
HEAD_END_RE = re.compile('^/head[ >]')
META_START_RE = re.compile('^meta[ >]')
META_END_RE = re.compile('^/meta[ >]')
def fetch_open_graph_image(url: str) -> Optional[Dict[str, Any]]:
in_head = False
# HTML will auto close meta tags, when we start the next tag add
# a closing tag if it has not been closed yet.
last_closed = True
head = []
# TODO: What if response content is huge? Should we get headers first?
try:
content = requests.get(url, timeout=1).text
except Exception:
return None
# Extract the head and meta tags
# All meta tags are self closing, have no children or are closed
# automatically.
for part in content.split('<'):
if not in_head and HEAD_START_RE.match(part):
# Started the head node output it to have a document root
in_head = True
head.append('<head>')
elif in_head and HEAD_END_RE.match(part):
# Found the end of the head close any remaining tag then stop
# processing
in_head = False
if not last_closed:
last_closed = True
head.append('</meta>')
head.append('</head>')
break
elif in_head and META_START_RE.match(part):
# Found a meta node copy it
if not last_closed:
head.append('</meta>')
last_closed = True
head.append('<')
head.append(part)
if '/>' not in part:
last_closed = False
elif in_head and META_END_RE.match(part):
# End of a meta node just copy it to close the tag
head.append('<')
head.append(part)
last_closed = True
try:
doc = etree.fromstring(''.join(head))
except etree.ParseError:
return None
og_image = doc.find('meta[@property="og:image"]')
og_title = doc.find('meta[@property="og:title"]')
og_desc = doc.find('meta[@property="og:description"]')
title = None
desc = None
if og_image is not None:
image = og_image.get('content')
else:
return None
if og_title is not None:
title = og_title.get('content')
if og_desc is not None:
desc = og_desc.get('content')
return {'image': image, 'title': title, 'desc': desc}
def get_tweet_id(url: str) -> Optional[str]:
parsed_url = urllib.parse.urlparse(url)
if not (parsed_url.netloc == 'twitter.com' or parsed_url.netloc.endswith('.twitter.com')):
return None
to_match = parsed_url.path
# In old-style twitter.com/#!/wdaher/status/1231241234-style URLs,
# we need to look at the fragment instead
if parsed_url.path == '/' and len(parsed_url.fragment) > 5:
to_match = parsed_url.fragment
tweet_id_match = re.match(r'^!?/.*?/status(es)?/(?P<tweetid>\d{10,30})(/photo/[0-9])?/?$', to_match)
if not tweet_id_match:
return None
return tweet_id_match.group("tweetid")
class InlineHttpsProcessor(markdown.treeprocessors.Treeprocessor):
def run(self, root: Element) -> None:
# Get all URLs from the blob
found_imgs = walk_tree(root, lambda e: e if e.tag == "img" else None)
for img in found_imgs:
url = img.get("src")
if urllib.parse.urlsplit(url).scheme != "http":
# Don't rewrite images on our own site (e.g. emoji).
continue
img.set("src", get_camo_url(url))
class BacktickPattern(markdown.inlinepatterns.Pattern):
""" Return a `<code>` element containing the matching text. """
def __init__(self, pattern: str) -> None:
markdown.inlinepatterns.Pattern.__init__(self, pattern)
self.ESCAPED_BSLASH = '{}{}{}'.format(markdown.util.STX, ord('\\'), markdown.util.ETX)
self.tag = 'code'
def handleMatch(self, m: Match[str]) -> Union[str, Element]:
if m.group(4):
el = Element(self.tag)
# Modified to not strip whitespace
el.text = markdown.util.AtomicString(m.group(4))
return el
else:
return m.group(2).replace('\\\\', self.ESCAPED_BSLASH)
class InlineInterestingLinkProcessor(markdown.treeprocessors.Treeprocessor):
TWITTER_MAX_IMAGE_HEIGHT = 400
TWITTER_MAX_TO_PREVIEW = 3
INLINE_PREVIEW_LIMIT_PER_MESSAGE = 5
def __init__(self, md: markdown.Markdown) -> None:
markdown.treeprocessors.Treeprocessor.__init__(self, md)
def add_a(
self,
root: Element,
url: str,
link: str,
title: Optional[str]=None,
desc: Optional[str]=None,
class_attr: str="message_inline_image",
data_id: Optional[str]=None,
insertion_index: Optional[int]=None,
already_thumbnailed: bool=False,
) -> None:
desc = desc if desc is not None else ""
# Update message.has_image attribute.
if 'message_inline_image' in class_attr and self.md.zulip_message:
self.md.zulip_message.has_image = True
if insertion_index is not None:
div = Element("div")
root.insert(insertion_index, div)
else:
div = SubElement(root, "div")
div.set("class", class_attr)
a = SubElement(div, "a")
a.set("href", link)
if title is not None:
a.set("title", title)
if data_id is not None:
a.set("data-id", data_id)
img = SubElement(a, "img")
if settings.THUMBNAIL_IMAGES and (not already_thumbnailed) and user_uploads_or_external(url):
# See docs/thumbnailing.md for some high-level documentation.
#
# We strip leading '/' from relative URLs here to ensure
# consistency in what gets passed to /thumbnail
url = url.lstrip('/')
img.set("src", "/thumbnail?url={}&size=thumbnail".format(
urllib.parse.quote(url, safe=''),
))
img.set('data-src-fullsize', "/thumbnail?url={}&size=full".format(
urllib.parse.quote(url, safe=''),
))
else:
img.set("src", url)
if class_attr == "message_inline_ref":
summary_div = SubElement(div, "div")
title_div = SubElement(summary_div, "div")
title_div.set("class", "message_inline_image_title")
title_div.text = title
desc_div = SubElement(summary_div, "desc")
desc_div.set("class", "message_inline_image_desc")
def add_oembed_data(self, root: Element, link: str, extracted_data: Dict[str, Any]) -> bool:
oembed_resource_type = extracted_data.get('type', '')
title = extracted_data.get('title')
if oembed_resource_type == 'photo':
image = extracted_data.get('image')
if image:
self.add_a(root, image, link, title=title)
return True
elif oembed_resource_type == 'video':
html = extracted_data['html']
image = extracted_data['image']
title = extracted_data.get('title')
description = extracted_data.get('description')
self.add_a(root, image, link, title, description,
"embed-video message_inline_image",
html, already_thumbnailed=True)
return True
return False
def add_embed(self, root: Element, link: str, extracted_data: Dict[str, Any]) -> None:
oembed = extracted_data.get('oembed', False)
if oembed and self.add_oembed_data(root, link, extracted_data):
return
img_link = extracted_data.get('image')
if not img_link:
# Don't add an embed if an image is not found
return
container = SubElement(root, "div")
container.set("class", "message_embed")
parsed_img_link = urllib.parse.urlparse(img_link)
# Append domain where relative img_link url is given
if not parsed_img_link.netloc:
parsed_url = urllib.parse.urlparse(link)
domain = '{url.scheme}://{url.netloc}/'.format(url=parsed_url)
img_link = urllib.parse.urljoin(domain, img_link)
img = SubElement(container, "a")
img.set("style", "background-image: url(" + img_link + ")")
img.set("href", link)
img.set("class", "message_embed_image")
data_container = SubElement(container, "div")
data_container.set("class", "data-container")
title = extracted_data.get('title')
if title:
title_elm = SubElement(data_container, "div")
title_elm.set("class", "message_embed_title")
a = SubElement(title_elm, "a")
a.set("href", link)
a.set("title", title)
a.text = title
description = extracted_data.get('description')
if description:
description_elm = SubElement(data_container, "div")
description_elm.set("class", "message_embed_description")
description_elm.text = description
def get_actual_image_url(self, url: str) -> str:
# Add specific per-site cases to convert image-preview urls to image urls.
# See https://github.com/zulip/zulip/issues/4658 for more information
parsed_url = urllib.parse.urlparse(url)
if (parsed_url.netloc == 'github.com' or parsed_url.netloc.endswith('.github.com')):
# https://github.com/zulip/zulip/blob/master/static/images/logo/zulip-icon-128x128.png ->
# https://raw.githubusercontent.com/zulip/zulip/master/static/images/logo/zulip-icon-128x128.png
split_path = parsed_url.path.split('/')
if len(split_path) > 3 and split_path[3] == "blob":
return urllib.parse.urljoin('https://raw.githubusercontent.com',
'/'.join(split_path[0:3] + split_path[4:]))
return url
def is_image(self, url: str) -> bool:
if not self.md.image_preview_enabled:
return False
parsed_url = urllib.parse.urlparse(url)
# remove html urls which end with img extensions that can not be shorted
if parsed_url.netloc == 'pasteboard.co':
return False
# List from https://support.google.com/chromeos/bin/answer.py?hl=en&answer=183093
for ext in [".bmp", ".gif", ".jpe", "jpeg", ".jpg", ".png", ".webp"]:
if parsed_url.path.lower().endswith(ext):
return True
return False
def corrected_image_source(self, url: str) -> str:
# This function adjusts any urls from linx.li and
# wikipedia.org to point to the actual image url. It's
# structurally very similar to dropbox_image, and possibly
# should be rewritten to use open graph, but has some value.
parsed_url = urllib.parse.urlparse(url)
if parsed_url.netloc.lower().endswith('.wikipedia.org'):
# Redirecting from "/wiki/File:" to "/wiki/Special:FilePath/File:"
# A possible alternative, that avoids the redirect after hitting "Special:"
# is using the first characters of md5($filename) to generate the url
domain = parsed_url.scheme + "://" + parsed_url.netloc
correct_url = domain + parsed_url.path[:6] + 'Special:FilePath' + parsed_url.path[5:]
return correct_url
if parsed_url.netloc == 'linx.li':
return 'https://linx.li/s' + parsed_url.path
return None
def dropbox_image(self, url: str) -> Optional[Dict[str, Any]]:
# TODO: The returned Dict could possibly be a TypedDict in future.
parsed_url = urllib.parse.urlparse(url)
if (parsed_url.netloc == 'dropbox.com' or parsed_url.netloc.endswith('.dropbox.com')):
is_album = parsed_url.path.startswith('/sc/') or parsed_url.path.startswith('/photos/')
# Only allow preview Dropbox shared links
if not (parsed_url.path.startswith('/s/') or
parsed_url.path.startswith('/sh/') or
is_album):
return None
# Try to retrieve open graph protocol info for a preview
# This might be redundant right now for shared links for images.
# However, we might want to make use of title and description
# in the future. If the actual image is too big, we might also
# want to use the open graph image.
image_info = fetch_open_graph_image(url)
is_image = is_album or self.is_image(url)
# If it is from an album or not an actual image file,
# just use open graph image.
if is_album or not is_image:
# Failed to follow link to find an image preview so
# use placeholder image and guess filename
if image_info is None:
return None
image_info["is_image"] = is_image
return image_info
# Otherwise, try to retrieve the actual image.
# This is because open graph image from Dropbox may have padding
# and gifs do not work.
# TODO: What if image is huge? Should we get headers first?
if image_info is None:
image_info = dict()
image_info['is_image'] = True
parsed_url_list = list(parsed_url)
parsed_url_list[4] = "dl=1" # Replaces query
image_info["image"] = urllib.parse.urlunparse(parsed_url_list)
return image_info
return None
def youtube_id(self, url: str) -> Optional[str]:
if not self.md.image_preview_enabled:
return None
# Youtube video id extraction regular expression from https://pastebin.com/KyKAFv1s
# Slightly modified to support URLs of the forms
# - youtu.be/<id>
# - youtube.com/playlist?v=<id>&list=<list-id>
# - youtube.com/watch_videos?video_ids=<id1>,<id2>,<id3>
# If it matches, match.group(2) is the video id.
schema_re = r'(?:https?://)'
host_re = r'(?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/)'
param_re = r'(?:(?:(?:v|embed)/)|' + \
r'(?:(?:(?:watch|playlist)(?:_popup|_videos)?(?:\.php)?)?(?:\?|#!?)(?:.+&)?v(?:ideo_ids)?=))'
id_re = r'([0-9A-Za-z_-]+)'
youtube_re = r'^({schema_re}?{host_re}{param_re}?)?{id_re}(?(1).+)?$'
youtube_re = youtube_re.format(schema_re=schema_re, host_re=host_re, id_re=id_re, param_re=param_re)
match = re.match(youtube_re, url)
# URLs of the form youtube.com/playlist?list=<list-id> are incorrectly matched
if match is None or match.group(2) == 'playlist':
return None
return match.group(2)
def youtube_title(self, extracted_data: Dict[str, Any]) -> Optional[str]:
title = extracted_data.get("title")
if title is not None:
return f"YouTube - {title}"
return None
def youtube_image(self, url: str) -> Optional[str]:
yt_id = self.youtube_id(url)
if yt_id is not None:
return f"https://i.ytimg.com/vi/{yt_id}/default.jpg"
return None
def vimeo_id(self, url: str) -> Optional[str]:
if not self.md.image_preview_enabled:
return None
#(http|https)?:\/\/(www\.)?vimeo.com\/(?:channels\/(?:\w+\/)?|groups\/([^\/]*)\/videos\/|)(\d+)(?:|\/\?)
# If it matches, match.group('id') is the video id.
vimeo_re = r'^((http|https)?:\/\/(www\.)?vimeo.com\/' + \
r'(?:channels\/(?:\w+\/)?|groups\/' + \
r'([^\/]*)\/videos\/|)(\d+)(?:|\/\?))$'
match = re.match(vimeo_re, url)
if match is None:
return None
return match.group(5)
def vimeo_title(self, extracted_data: Dict[str, Any]) -> Optional[str]:
title = extracted_data.get("title")
if title is not None:
return f"Vimeo - {title}"
return None
def twitter_text(self, text: str,
urls: List[Dict[str, str]],
user_mentions: List[Dict[str, Any]],
media: List[Dict[str, Any]]) -> Element:
"""
Use data from the twitter API to turn links, mentions and media into A
tags. Also convert unicode emojis to images.
This works by using the urls, user_mentions and media data from
the twitter API and searching for unicode emojis in the text using
`unicode_emoji_regex`.
The first step is finding the locations of the URLs, mentions, media and
emoji in the text. For each match we build a dictionary with type, the start
location, end location, the URL to link to, and the text(codepoint and title
in case of emojis) to be used in the link(image in case of emojis).
Next we sort the matches by start location. And for each we add the
text from the end of the last link to the start of the current link to
the output. The text needs to added to the text attribute of the first
node (the P tag) or the tail the last link created.
Finally we add any remaining text to the last node.
"""
to_process: List[Dict[str, Any]] = []
# Build dicts for URLs
for url_data in urls:
short_url = url_data["url"]
full_url = url_data["expanded_url"]
for match in re.finditer(re.escape(short_url), text, re.IGNORECASE):
to_process.append({
'type': 'url',
'start': match.start(),
'end': match.end(),
'url': short_url,
'text': full_url,
})
# Build dicts for mentions
for user_mention in user_mentions:
screen_name = user_mention['screen_name']
mention_string = '@' + screen_name
for match in re.finditer(re.escape(mention_string), text, re.IGNORECASE):
to_process.append({
'type': 'mention',
'start': match.start(),
'end': match.end(),
'url': 'https://twitter.com/' + urllib.parse.quote(screen_name),
'text': mention_string,
})
# Build dicts for media
for media_item in media:
short_url = media_item['url']
expanded_url = media_item['expanded_url']
for match in re.finditer(re.escape(short_url), text, re.IGNORECASE):
to_process.append({
'type': 'media',
'start': match.start(),
'end': match.end(),
'url': short_url,
'text': expanded_url,
})
# Build dicts for emojis
for match in re.finditer(unicode_emoji_regex, text, re.IGNORECASE):
orig_syntax = match.group('syntax')
codepoint = unicode_emoji_to_codepoint(orig_syntax)
if codepoint in codepoint_to_name:
display_string = ':' + codepoint_to_name[codepoint] + ':'
to_process.append({
'type': 'emoji',
'start': match.start(),
'end': match.end(),
'codepoint': codepoint,
'title': display_string,
})
to_process.sort(key=lambda x: x['start'])
p = current_node = Element('p')
def set_text(text: str) -> None:
"""
Helper to set the text or the tail of the current_node
"""
if current_node == p:
current_node.text = text
else:
current_node.tail = text
db_data = self.md.zulip_db_data
current_index = 0
for item in to_process:
# The text we want to link starts in already linked text skip it
if item['start'] < current_index:
continue
# Add text from the end of last link to the start of the current
# link
set_text(text[current_index:item['start']])
current_index = item['end']
if item['type'] != 'emoji':
elem = url_to_a(db_data, item['url'], item['text'])
assert isinstance(elem, Element)
else:
elem = make_emoji(item['codepoint'], item['title'])
current_node = elem
p.append(elem)
# Add any unused text
set_text(text[current_index:])
return p
def twitter_link(self, url: str) -> Optional[Element]:
tweet_id = get_tweet_id(url)
if tweet_id is None:
return None
try:
res = fetch_tweet_data(tweet_id)
if res is None:
return None
user: Dict[str, Any] = res['user']
tweet = Element("div")
tweet.set("class", "twitter-tweet")
img_a = SubElement(tweet, 'a')
img_a.set("href", url)
profile_img = SubElement(img_a, 'img')
profile_img.set('class', 'twitter-avatar')
# For some reason, for, e.g. tweet 285072525413724161,
# python-twitter does not give us a
# profile_image_url_https, but instead puts that URL in
# profile_image_url. So use _https if available, but fall
# back gracefully.
image_url = user.get('profile_image_url_https', user['profile_image_url'])
profile_img.set('src', image_url)
text = html.unescape(res['full_text'])
urls = res.get('urls', [])
user_mentions = res.get('user_mentions', [])
media: List[Dict[str, Any]] = res.get('media', [])
p = self.twitter_text(text, urls, user_mentions, media)
tweet.append(p)
span = SubElement(tweet, 'span')
span.text = "- {} (@{})".format(user['name'], user['screen_name'])
# Add image previews
for media_item in media:
# Only photos have a preview image
if media_item['type'] != 'photo':
continue
# Find the image size that is smaller than
# TWITTER_MAX_IMAGE_HEIGHT px tall or the smallest
size_name_tuples = list(media_item['sizes'].items())
size_name_tuples.sort(reverse=True,
key=lambda x: x[1]['h'])
for size_name, size in size_name_tuples:
if size['h'] < self.TWITTER_MAX_IMAGE_HEIGHT:
break
media_url = '{}:{}'.format(media_item['media_url_https'], size_name)
img_div = SubElement(tweet, 'div')
img_div.set('class', 'twitter-image')
img_a = SubElement(img_div, 'a')
img_a.set('href', media_item['url'])
img = SubElement(img_a, 'img')
img.set('src', media_url)
return tweet
except Exception:
# We put this in its own try-except because it requires external
# connectivity. If Twitter flakes out, we don't want to not-render
# the entire message; we just want to not show the Twitter preview.
bugdown_logger.warning("Error building Twitter link", exc_info=True)
return None
def get_url_data(self, e: Element) -> Optional[Tuple[str, Optional[str]]]:
if e.tag == "a":
return (e.get("href"), e.text)
return None
def handle_image_inlining(
self,
root: Element,
found_url: ResultWithFamily[Tuple[str, Optional[str]]],
) -> None:
grandparent = found_url.family.grandparent
parent = found_url.family.parent
ahref_element = found_url.family.child
(url, text) = found_url.result
actual_url = self.get_actual_image_url(url)
# url != text usually implies a named link, which we opt not to remove
url_eq_text = text is None or url == text
title = None if url_eq_text else text
if parent.tag == 'li':
self.add_a(parent, self.get_actual_image_url(url), url, title=title)
if not parent.text and not ahref_element.tail and url_eq_text:
parent.remove(ahref_element)
elif parent.tag == 'p':
parent_index = None
for index, uncle in enumerate(grandparent):
if uncle is parent:
parent_index = index
break
if parent_index is not None:
ins_index = self.find_proper_insertion_index(grandparent, parent, parent_index)
self.add_a(grandparent, actual_url, url, title=title, insertion_index=ins_index)
else:
# We're not inserting after parent, since parent not found.
# Append to end of list of grandparent's children as normal
self.add_a(grandparent, actual_url, url, title=title)
# If link is alone in a paragraph, delete paragraph containing it
if (len(parent) == 1 and
(not parent.text or parent.text == "\n") and
not ahref_element.tail and
url_eq_text):
grandparent.remove(parent)
else:
# If none of the above criteria match, fall back to old behavior
self.add_a(root, actual_url, url, title=title)
def find_proper_insertion_index(self, grandparent: Element, parent: Element,
parent_index_in_grandparent: int) -> int:
# If there are several inline images from same paragraph, ensure that
# they are in correct (and not opposite) order by inserting after last
# inline image from paragraph 'parent'
parent_links = [ele.attrib['href'] for ele in parent.iter(tag="a")]
insertion_index = parent_index_in_grandparent
while True:
insertion_index += 1
if insertion_index >= len(grandparent):
return insertion_index
uncle = grandparent[insertion_index]
inline_image_classes = ['message_inline_image', 'message_inline_ref']
if (
uncle.tag != 'div' or
'class' not in uncle.keys() or
uncle.attrib['class'] not in inline_image_classes
):
return insertion_index
uncle_link = list(uncle.iter(tag="a"))[0].attrib['href']
if uncle_link not in parent_links:
return insertion_index
def is_absolute_url(self, url: str) -> bool:
return bool(urllib.parse.urlparse(url).netloc)
def run(self, root: Element) -> None:
# Get all URLs from the blob
found_urls = walk_tree_with_family(root, self.get_url_data)
unique_urls = {found_url.result[0] for found_url in found_urls}
# Collect unique URLs which are not quoted as we don't do
# inline previews for links inside blockquotes.
unique_previewable_urls = {found_url.result[0] for found_url in found_urls
if not found_url.family.in_blockquote}
# Set has_link and similar flags whenever a message is processed by bugdown
if self.md.zulip_message:
self.md.zulip_message.has_link = len(found_urls) > 0
self.md.zulip_message.has_image = False # This is updated in self.add_a
self.md.zulip_message.potential_attachment_path_ids = []
for url in unique_urls:
# Due to rewrite_local_links_to_relative, we need to
# handle both relative URLs beginning with
# `/user_uploads` and beginning with `user_uploads`.
# This urllib construction converts the latter into
# the former.
parsed_url = urllib.parse.urlsplit(urllib.parse.urljoin("/", url))
host = parsed_url.netloc
if host != '' and host != self.md.zulip_realm.host:
continue
if not parsed_url.path.startswith("/user_uploads/"):
continue
path_id = parsed_url.path[len("/user_uploads/"):]
self.md.zulip_message.potential_attachment_path_ids.append(path_id)
if len(found_urls) == 0:
return
if len(unique_previewable_urls) > self.INLINE_PREVIEW_LIMIT_PER_MESSAGE:
return
processed_urls: Set[str] = set()
rendered_tweet_count = 0
for found_url in found_urls:
(url, text) = found_url.result
if url in unique_previewable_urls and url not in processed_urls:
processed_urls.add(url)
else:
continue
if not self.is_absolute_url(url):
if self.is_image(url):
self.handle_image_inlining(root, found_url)
# We don't have a strong use case for doing url preview for relative links.
continue
dropbox_image = self.dropbox_image(url)
if dropbox_image is not None:
class_attr = "message_inline_ref"
is_image = dropbox_image["is_image"]
if is_image:
class_attr = "message_inline_image"
# Not making use of title and description of images
self.add_a(root, dropbox_image['image'], url,
title=dropbox_image.get('title'),
desc=dropbox_image.get('desc', ""),
class_attr=class_attr,
already_thumbnailed=True)
continue
if self.is_image(url):
image_source = self.corrected_image_source(url)
if image_source is not None:
found_url = ResultWithFamily(
family=found_url.family,
result=(image_source, image_source),
)
self.handle_image_inlining(root, found_url)
continue
if get_tweet_id(url) is not None:
if rendered_tweet_count >= self.TWITTER_MAX_TO_PREVIEW:
# Only render at most one tweet per message
continue
twitter_data = self.twitter_link(url)
if twitter_data is None:
# This link is not actually a tweet known to twitter
continue
rendered_tweet_count += 1
div = SubElement(root, "div")
div.set("class", "inline-preview-twitter")
div.insert(0, twitter_data)
continue
youtube = self.youtube_image(url)
if youtube is not None:
yt_id = self.youtube_id(url)
self.add_a(root, youtube, url, None, None,
"youtube-video message_inline_image",
yt_id, already_thumbnailed=True)
# NOTE: We don't `continue` here, to allow replacing the URL with
# the title, if INLINE_URL_EMBED_PREVIEW feature is enabled.
# The entire preview would ideally be shown only if the feature
# is enabled, but URL previews are a beta feature and YouTube
# previews are pretty stable.
db_data = self.md.zulip_db_data
if db_data and db_data['sent_by_bot']:
continue
if not self.md.url_embed_preview_enabled:
continue
try:
extracted_data = link_preview.link_embed_data_from_cache(url)
except NotFoundInCache:
self.md.zulip_message.links_for_preview.add(url)
continue
if extracted_data:
if youtube is not None:
title = self.youtube_title(extracted_data)
if title is not None:
found_url.family.child.text = title
continue
self.add_embed(root, url, extracted_data)
if self.vimeo_id(url):
title = self.vimeo_title(extracted_data)
if title:
found_url.family.child.text = title
class Avatar(markdown.inlinepatterns.Pattern):
def handleMatch(self, match: Match[str]) -> Optional[Element]:
img = Element('img')
email_address = match.group('email')
email = email_address.strip().lower()
profile_id = None
db_data = self.md.zulip_db_data
if db_data is not None:
user_dict = db_data['email_info'].get(email)
if user_dict is not None:
profile_id = user_dict['id']
img.set('class', 'message_body_gravatar')
img.set('src', f'/avatar/{profile_id or email}?s=30')
img.set('title', email)
img.set('alt', email)
return img
def possible_avatar_emails(content: str) -> Set[str]:
emails = set()
for REGEX in [AVATAR_REGEX, GRAVATAR_REGEX]:
matches = re.findall(REGEX, content)
for email in matches:
if email:
emails.add(email)
return emails
class Timestamp(markdown.inlinepatterns.Pattern):
def handleMatch(self, match: Match[str]) -> Optional[Element]:
span = Element('span')
span.set('class', 'timestamp')
timestamp = None
try:
timestamp = dateutil.parser.parse(match.group('time'), tzinfos=get_common_timezones())
except ValueError:
try:
timestamp = datetime.fromtimestamp(float(match.group('time')))
except ValueError:
pass
if timestamp:
if timestamp.tzinfo:
timestamp = timestamp - timestamp.utcoffset()
span.set('data-timestamp', timestamp.strftime("%s"))
# Set text to initial input, so even if parsing fails, the data remains intact.
span.text = markdown.util.AtomicString(match.group('time'))
return span
# All of our emojis(non ZWJ sequences) belong to one of these unicode blocks:
# \U0001f100-\U0001f1ff - Enclosed Alphanumeric Supplement
# \U0001f200-\U0001f2ff - Enclosed Ideographic Supplement
# \U0001f300-\U0001f5ff - Miscellaneous Symbols and Pictographs
# \U0001f600-\U0001f64f - Emoticons (Emoji)
# \U0001f680-\U0001f6ff - Transport and Map Symbols
# \U0001f900-\U0001f9ff - Supplemental Symbols and Pictographs
# \u2000-\u206f - General Punctuation
# \u2300-\u23ff - Miscellaneous Technical
# \u2400-\u243f - Control Pictures
# \u2440-\u245f - Optical Character Recognition
# \u2460-\u24ff - Enclosed Alphanumerics
# \u2500-\u257f - Box Drawing
# \u2580-\u259f - Block Elements
# \u25a0-\u25ff - Geometric Shapes
# \u2600-\u26ff - Miscellaneous Symbols
# \u2700-\u27bf - Dingbats
# \u2900-\u297f - Supplemental Arrows-B
# \u2b00-\u2bff - Miscellaneous Symbols and Arrows
# \u3000-\u303f - CJK Symbols and Punctuation
# \u3200-\u32ff - Enclosed CJK Letters and Months
unicode_emoji_regex = '(?P<syntax>['\
'\U0001F100-\U0001F64F' \
'\U0001F680-\U0001F6FF' \
'\U0001F900-\U0001F9FF' \
'\u2000-\u206F' \
'\u2300-\u27BF' \
'\u2900-\u297F' \
'\u2B00-\u2BFF' \
'\u3000-\u303F' \
'\u3200-\u32FF' \
'])'
# The equivalent JS regex is \ud83c[\udd00-\udfff]|\ud83d[\udc00-\ude4f]|\ud83d[\ude80-\udeff]|
# \ud83e[\udd00-\uddff]|[\u2000-\u206f]|[\u2300-\u27bf]|[\u2b00-\u2bff]|[\u3000-\u303f]|
# [\u3200-\u32ff]. See below comments for explanation. The JS regex is used by marked.js for
# frontend unicode emoji processing.
# The JS regex \ud83c[\udd00-\udfff]|\ud83d[\udc00-\ude4f] represents U0001f100-\U0001f64f
# The JS regex \ud83d[\ude80-\udeff] represents \U0001f680-\U0001f6ff
# The JS regex \ud83e[\udd00-\uddff] represents \U0001f900-\U0001f9ff
# The JS regex [\u2000-\u206f] represents \u2000-\u206f
# The JS regex [\u2300-\u27bf] represents \u2300-\u27bf
# Similarly other JS regexes can be mapped to the respective unicode blocks.
# For more information, please refer to the following article:
# http://crocodillon.com/blog/parsing-emoji-unicode-in-javascript
def make_emoji(codepoint: str, display_string: str) -> Element:
# Replace underscore in emoji's title with space
title = display_string[1:-1].replace("_", " ")
span = Element('span')
span.set('class', f'emoji emoji-{codepoint}')
span.set('title', title)
span.set('role', 'img')
span.set('aria-label', title)
span.text = markdown.util.AtomicString(display_string)
return span
def make_realm_emoji(src: str, display_string: str) -> Element:
elt = Element('img')
elt.set('src', src)
elt.set('class', 'emoji')
elt.set("alt", display_string)
elt.set("title", display_string[1:-1].replace("_", " "))
return elt
def unicode_emoji_to_codepoint(unicode_emoji: str) -> str:
codepoint = hex(ord(unicode_emoji))[2:]
# Unicode codepoints are minimum of length 4, padded
# with zeroes if the length is less than zero.
while len(codepoint) < 4:
codepoint = '0' + codepoint
return codepoint
class EmoticonTranslation(markdown.inlinepatterns.Pattern):
""" Translates emoticons like `:)` into emoji like `:smile:`. """
def handleMatch(self, match: Match[str]) -> Optional[Element]:
db_data = self.md.zulip_db_data
if db_data is None or not db_data['translate_emoticons']:
return None
emoticon = match.group('emoticon')
translated = translate_emoticons(emoticon)
name = translated[1:-1]
return make_emoji(name_to_codepoint[name], translated)
class UnicodeEmoji(markdown.inlinepatterns.Pattern):
def handleMatch(self, match: Match[str]) -> Optional[Element]:
orig_syntax = match.group('syntax')
codepoint = unicode_emoji_to_codepoint(orig_syntax)
if codepoint in codepoint_to_name:
display_string = ':' + codepoint_to_name[codepoint] + ':'
return make_emoji(codepoint, display_string)
else:
return None
class Emoji(markdown.inlinepatterns.Pattern):
def handleMatch(self, match: Match[str]) -> Optional[Element]:
orig_syntax = match.group("syntax")
name = orig_syntax[1:-1]
active_realm_emoji: Dict[str, Dict[str, str]] = {}
db_data = self.md.zulip_db_data
if db_data is not None:
active_realm_emoji = db_data['active_realm_emoji']
if self.md.zulip_message and name in active_realm_emoji:
return make_realm_emoji(active_realm_emoji[name]['source_url'], orig_syntax)
elif name == 'zulip':
return make_realm_emoji('/static/generated/emoji/images/emoji/unicode/zulip.png', orig_syntax)
elif name in name_to_codepoint:
return make_emoji(name_to_codepoint[name], orig_syntax)
else:
return orig_syntax
def content_has_emoji_syntax(content: str) -> bool:
return re.search(EMOJI_REGEX, content) is not None
class Tex(markdown.inlinepatterns.Pattern):
def handleMatch(self, match: Match[str]) -> Element:
rendered = render_tex(match.group('body'), is_inline=True)
if rendered is not None:
# We need to give Python-Markdown an ElementTree object, but if we
# give it one with correctly stored XML namespaces, it will mangle
# everything when serializing it. So we play this stupid game to
# store xmlns as a normal attribute. :-[
assert ' zulip-xmlns="' not in rendered
rendered = rendered.replace(' xmlns="', ' zulip-xmlns="')
parsed = etree.iterparse(StringIO(rendered))
for event, elem in parsed:
if 'zulip-xmlns' in elem.attrib:
elem.attrib['xmlns'] = elem.attrib.pop('zulip-xmlns')
root = elem
return root
else: # Something went wrong while rendering
span = Element('span')
span.set('class', 'tex-error')
span.text = '$$' + match.group('body') + '$$'
return span
def sanitize_url(url: str) -> Optional[str]:
"""
Sanitize a url against xss attacks.
See the docstring on markdown.inlinepatterns.LinkPattern.sanitize_url.
"""
try:
parts = urllib.parse.urlparse(url.replace(' ', '%20'))
scheme, netloc, path, params, query, fragment = parts
except ValueError:
# Bad url - so bad it couldn't be parsed.
return ''
# If there is no scheme or netloc and there is a '@' in the path,
# treat it as a mailto: and set the appropriate scheme
if scheme == '' and netloc == '' and '@' in path:
scheme = 'mailto'
elif scheme == '' and netloc == '' and len(path) > 0 and path[0] == '/':
# Allow domain-relative links
return urllib.parse.urlunparse(('', '', path, params, query, fragment))
elif (scheme, netloc, path, params, query) == ('', '', '', '', '') and len(fragment) > 0:
# Allow fragment links
return urllib.parse.urlunparse(('', '', '', '', '', fragment))
# Zulip modification: If scheme is not specified, assume http://
# We re-enter sanitize_url because netloc etc. need to be re-parsed.
if not scheme:
return sanitize_url('http://' + url)
locless_schemes = ['mailto', 'news', 'file', 'bitcoin']
if netloc == '' and scheme not in locless_schemes:
# This fails regardless of anything else.
# Return immediately to save additional processing
return None
# Upstream code will accept a URL like javascript://foo because it
# appears to have a netloc. Additionally there are plenty of other
# schemes that do weird things like launch external programs. To be
# on the safe side, we whitelist the scheme.
if scheme not in ('http', 'https', 'ftp', 'mailto', 'file', 'bitcoin'):
return None
# Upstream code scans path, parameters, and query for colon characters
# because
#
# some aliases [for javascript:] will appear to urllib.parse to have
# no scheme. On top of that relative links (i.e.: "foo/bar.html")
# have no scheme.
#
# We already converted an empty scheme to http:// above, so we skip
# the colon check, which would also forbid a lot of legitimate URLs.
# Url passes all tests. Return url as-is.
return urllib.parse.urlunparse((scheme, netloc, path, params, query, fragment))
def url_to_a(db_data: Optional[DbData], url: str, text: Optional[str]=None) -> Union[Element, str]:
a = Element('a')
href = sanitize_url(url)
if href is None:
# Rejected by sanitize_url; render it as plain text.
return url
if text is None:
text = markdown.util.AtomicString(url)
href = rewrite_local_links_to_relative(db_data, href)
a.set('href', href)
a.text = text
return a
class CompiledPattern(markdown.inlinepatterns.Pattern):
def __init__(self, compiled_re: Pattern, md: markdown.Markdown) -> None:
# This is similar to the superclass's small __init__ function,
# but we skip the compilation step and let the caller give us
# a compiled regex.
self.compiled_re = compiled_re
self.md = md
class AutoLink(CompiledPattern):
def handleMatch(self, match: Match[str]) -> ElementStringNone:
url = match.group('url')
db_data = self.md.zulip_db_data
return url_to_a(db_data, url)
class OListProcessor(sane_lists.SaneOListProcessor):
def __init__(self, parser: Any) -> None:
parser.md.tab_length = 2
super().__init__(parser)
parser.md.tab_length = 4
class UListProcessor(sane_lists.SaneUListProcessor):
""" Unordered lists, but with 2-space indent """
def __init__(self, parser: Any) -> None:
parser.md.tab_length = 2
super().__init__(parser)
parser.md.tab_length = 4
class ListIndentProcessor(markdown.blockprocessors.ListIndentProcessor):
""" Process unordered list blocks.
Based on markdown.blockprocessors.ListIndentProcessor, but with 2-space indent
"""
def __init__(self, parser: Any) -> None:
# HACK: Set the tab length to 2 just for the initialization of
# this class, so that bulleted lists (and only bulleted lists)
# work off 2-space indentation.
parser.md.tab_length = 2
super().__init__(parser)
parser.md.tab_length = 4
class HashHeaderProcessor(markdown.blockprocessors.HashHeaderProcessor):
""" Process Hash Headers.
Based on markdown.blockprocessors.HashHeaderProcessor, but requires space for heading.
"""
# Original regex for hashheader is
# RE = re.compile(r'(?:^|\n)(?P<level>#{1,6})(?P<header>(?:\\.|[^\\])*?)#*(?:\n|$)')
RE = re.compile(r'(?:^|\n)(?P<level>#{1,6})\s(?P<header>(?:\\.|[^\\])*?)#*(?:\n|$)')
class BlockQuoteProcessor(markdown.blockprocessors.BlockQuoteProcessor):
""" Process BlockQuotes.
Based on markdown.blockprocessors.BlockQuoteProcessor, but with 2-space indent
"""
# Original regex for blockquote is RE = re.compile(r'(^|\n)[ ]{0,3}>[ ]?(.*)')
RE = re.compile(r'(^|\n)(?!(?:[ ]{0,3}>\s*(?:$|\n))*(?:$|\n))'
r'[ ]{0,3}>[ ]?(.*)')
mention_re = re.compile(mention.find_mentions)
def clean(self, line: str) -> str:
# Silence all the mentions inside blockquotes
line = re.sub(self.mention_re, lambda m: "@_{}".format(m.group('match')), line)
# And then run the upstream processor's code for removing the '>'
return super().clean(line)
@dataclass
class Fence:
fence_str: str
is_code: bool
class BugdownListPreprocessor(markdown.preprocessors.Preprocessor):
""" Allows list blocks that come directly after another block
to be rendered as a list.
Detects paragraphs that have a matching list item that comes
directly after a line of text, and inserts a newline between
to satisfy Markdown"""
LI_RE = re.compile(r'^[ ]*([*+-]|\d\.)[ ]+(.*)', re.MULTILINE)
def run(self, lines: List[str]) -> List[str]:
""" Insert a newline between a paragraph and ulist if missing """
inserts = 0
in_code_fence: bool = False
open_fences: List[Fence] = []
copy = lines[:]
for i in range(len(lines) - 1):
# Ignore anything that is inside a fenced code block but not quoted.
# We ignore all lines where some parent is a non quote code block.
m = FENCE_RE.match(lines[i])
if m:
fence_str = m.group('fence')
is_code = not m.group('lang') in ('quote', 'quoted')
has_open_fences = not len(open_fences) == 0
matches_last_fence = fence_str == open_fences[-1].fence_str if has_open_fences else False
closes_last_fence = not m.group('lang') and matches_last_fence
if closes_last_fence:
open_fences.pop()
else:
open_fences.append(Fence(fence_str, is_code))
in_code_fence = any([fence.is_code for fence in open_fences])
# If we're not in a fenced block and we detect an upcoming list
# hanging off any block (including a list of another type), add
# a newline.
li1 = self.LI_RE.match(lines[i])
li2 = self.LI_RE.match(lines[i+1])
if not in_code_fence and lines[i]:
if (li2 and not li1) or (li1 and li2 and
(len(li1.group(1)) == 1) != (len(li2.group(1)) == 1)):
copy.insert(i+inserts+1, '')
inserts += 1
return copy
# Name for the outer capture group we use to separate whitespace and
# other delimiters from the actual content. This value won't be an
# option in user-entered capture groups.
OUTER_CAPTURE_GROUP = "linkifier_actual_match"
def prepare_realm_pattern(source: str) -> str:
"""Augment a realm filter so it only matches after start-of-string,
whitespace, or opening delimiters, won't match if there are word
characters directly after, and saves what was matched as
OUTER_CAPTURE_GROUP."""
return fr"""(?<![^\s'"\(,:<])(?P<{OUTER_CAPTURE_GROUP}>{source})(?!\w)"""
# Given a regular expression pattern, linkifies groups that match it
# using the provided format string to construct the URL.
class RealmFilterPattern(markdown.inlinepatterns.Pattern):
""" Applied a given realm filter to the input """
def __init__(self, source_pattern: str,
format_string: str,
markdown_instance: Optional[markdown.Markdown]=None) -> None:
self.pattern = prepare_realm_pattern(source_pattern)
self.format_string = format_string
markdown.inlinepatterns.Pattern.__init__(self, self.pattern, markdown_instance)
def handleMatch(self, m: Match[str]) -> Union[Element, str]:
db_data = self.md.zulip_db_data
return url_to_a(db_data,
self.format_string % m.groupdict(),
m.group(OUTER_CAPTURE_GROUP))
class UserMentionPattern(markdown.inlinepatterns.Pattern):
def handleMatch(self, m: Match[str]) -> Optional[Element]:
match = m.group('match')
silent = m.group('silent') == '_'
db_data = self.md.zulip_db_data
if self.md.zulip_message and db_data is not None:
if match.startswith("**") and match.endswith("**"):
name = match[2:-2]
else:
return None
wildcard = mention.user_mention_matches_wildcard(name)
id_syntax_match = re.match(r'.+\|(?P<user_id>\d+)$', name)
if id_syntax_match:
id = id_syntax_match.group("user_id")
user = db_data['mention_data'].get_user_by_id(id)
else:
user = db_data['mention_data'].get_user_by_name(name)
if wildcard:
self.md.zulip_message.mentions_wildcard = True
user_id = "*"
elif user:
if not silent:
self.md.zulip_message.mentions_user_ids.add(user['id'])
name = user['full_name']
user_id = str(user['id'])
else:
# Don't highlight @mentions that don't refer to a valid user
return None
el = Element("span")
el.set('data-user-id', user_id)
text = f"{name}"
if silent:
el.set('class', 'user-mention silent')
else:
el.set('class', 'user-mention')
text = f"@{text}"
el.text = markdown.util.AtomicString(text)
return el
return None
class UserGroupMentionPattern(markdown.inlinepatterns.Pattern):
def handleMatch(self, m: Match[str]) -> Optional[Element]:
match = m.group(2)
db_data = self.md.zulip_db_data
if self.md.zulip_message and db_data is not None:
name = extract_user_group(match)
user_group = db_data['mention_data'].get_user_group(name)
if user_group:
self.md.zulip_message.mentions_user_group_ids.add(user_group.id)
name = user_group.name
user_group_id = str(user_group.id)
else:
# Don't highlight @-mentions that don't refer to a valid user
# group.
return None
el = Element("span")
el.set('class', 'user-group-mention')
el.set('data-user-group-id', user_group_id)
text = f"@{name}"
el.text = markdown.util.AtomicString(text)
return el
return None
class StreamPattern(CompiledPattern):
def find_stream_by_name(self, name: Match[str]) -> Optional[Dict[str, Any]]:
db_data = self.md.zulip_db_data
if db_data is None:
return None
stream = db_data['stream_names'].get(name)
return stream
def handleMatch(self, m: Match[str]) -> Optional[Element]:
name = m.group('stream_name')
if self.md.zulip_message:
stream = self.find_stream_by_name(name)
if stream is None:
return None
el = Element('a')
el.set('class', 'stream')
el.set('data-stream-id', str(stream['id']))
# TODO: We should quite possibly not be specifying the
# href here and instead having the browser auto-add the
# href when it processes a message with one of these, to
# provide more clarity to API clients.
# Also do the same for StreamTopicPattern.
stream_url = encode_stream(stream['id'], name)
el.set('href', f'/#narrow/stream/{stream_url}')
text = f'#{name}'
el.text = markdown.util.AtomicString(text)
return el
return None
class StreamTopicPattern(CompiledPattern):
def find_stream_by_name(self, name: Match[str]) -> Optional[Dict[str, Any]]:
db_data = self.md.zulip_db_data
if db_data is None:
return None
stream = db_data['stream_names'].get(name)
return stream
def handleMatch(self, m: Match[str]) -> Optional[Element]:
stream_name = m.group('stream_name')
topic_name = m.group('topic_name')
if self.md.zulip_message:
stream = self.find_stream_by_name(stream_name)
if stream is None or topic_name is None:
return None
el = Element('a')
el.set('class', 'stream-topic')
el.set('data-stream-id', str(stream['id']))
stream_url = encode_stream(stream['id'], stream_name)
topic_url = hash_util_encode(topic_name)
link = f'/#narrow/stream/{stream_url}/topic/{topic_url}'
el.set('href', link)
text = f'#{stream_name} > {topic_name}'
el.text = markdown.util.AtomicString(text)
return el
return None
def possible_linked_stream_names(content: str) -> Set[str]:
matches = re.findall(STREAM_LINK_REGEX, content, re.VERBOSE)
for match in re.finditer(STREAM_TOPIC_LINK_REGEX, content, re.VERBOSE):
matches.append(match.group('stream_name'))
return set(matches)
class AlertWordNotificationProcessor(markdown.preprocessors.Preprocessor):
allowed_before_punctuation = {' ', '\n', '(', '"', '.', ',', '\'', ';', '[', '*', '`', '>'}
allowed_after_punctuation = {' ', '\n', ')', '",', '?', ':', '.', ',', '\'', ';', ']', '!',
'*', '`'}
def check_valid_start_position(self, content: str, index: int) -> bool:
if index <= 0 or content[index] in self.allowed_before_punctuation:
return True
return False
def check_valid_end_position(self, content: str, index: int) -> bool:
if index >= len(content) or content[index] in self.allowed_after_punctuation:
return True
return False
def run(self, lines: Iterable[str]) -> Iterable[str]:
db_data = self.md.zulip_db_data
if self.md.zulip_message and db_data is not None:
# We check for alert words here, the set of which are
# dependent on which users may see this message.
#
# Our caller passes in the list of possible_words. We
# don't do any special rendering; we just append the alert words
# we find to the set self.md.zulip_message.alert_words.
realm_alert_words_automaton = db_data['realm_alert_words_automaton']
if realm_alert_words_automaton is not None:
content = '\n'.join(lines).lower()
for end_index, (original_value, user_ids) in realm_alert_words_automaton.iter(content):
if self.check_valid_start_position(content, end_index - len(original_value)) and \
self.check_valid_end_position(content, end_index + 1):
self.md.zulip_message.user_ids_with_alert_words.update(user_ids)
return lines
class LinkInlineProcessor(markdown.inlinepatterns.LinkInlineProcessor):
def zulip_specific_link_changes(self, el: Element) -> Union[None, Element]:
href = el.get('href')
# Sanitize url or don't parse link. See linkify_tests in markdown_test_cases for banned syntax.
href = sanitize_url(self.unescape(href.strip()))
if href is None:
return None # no-op; the link is not processed.
# Rewrite local links to be relative
db_data = self.md.zulip_db_data
href = rewrite_local_links_to_relative(db_data, href)
# Make changes to <a> tag attributes
el.set("href", href)
# Show link href if title is empty
if not el.text.strip():
el.text = href
# Prevent realm_filters from running on the content of a Markdown link, breaking up the link.
# This is a monkey-patch, but it might be worth sending a version of this change upstream.
el.text = markdown.util.AtomicString(el.text)
return el
def handleMatch(self, m: Match[str], data: str) -> Tuple[Union[None, Element], int, int]:
el, match_start, index = super().handleMatch(m, data)
if el is not None:
el = self.zulip_specific_link_changes(el)
return el, match_start, index
def get_sub_registry(r: markdown.util.Registry, keys: List[str]) -> markdown.util.Registry:
# Registry is a new class added by py-markdown to replace Ordered List.
# Since Registry doesn't support .keys(), it is easier to make a new
# object instead of removing keys from the existing object.
new_r = markdown.util.Registry()
for k in keys:
new_r.register(r[k], k, r.get_index_for_name(k))
return new_r
# These are used as keys ("realm_filters_keys") to md_engines and the respective
# realm filter caches
DEFAULT_BUGDOWN_KEY = -1
ZEPHYR_MIRROR_BUGDOWN_KEY = -2
class Bugdown(markdown.Markdown):
def __init__(self, *args: Any, **kwargs: Union[bool, int, List[Any]]) -> None:
# define default configs
self.config = {
"realm_filters": [kwargs['realm_filters'],
"Realm-specific filters for realm_filters_key {}".format(kwargs['realm'])],
"realm": [kwargs['realm'], "Realm id"],
"code_block_processor_disabled": [kwargs['code_block_processor_disabled'],
"Disabled for email gateway"],
}
super().__init__(*args, **kwargs)
self.set_output_format('html')
def build_parser(self) -> markdown.Markdown:
# Build the parser using selected default features from py-markdown.
# The complete list of all available processors can be found in the
# super().build_parser() function.
#
# Note: for any py-markdown updates, manually check if we want any
# of the new features added upstream or not; they wouldn't get
# included by default.
self.preprocessors = self.build_preprocessors()
self.parser = self.build_block_parser()
self.inlinePatterns = self.build_inlinepatterns()
self.treeprocessors = self.build_treeprocessors()
self.postprocessors = self.build_postprocessors()
self.handle_zephyr_mirror()
return self
def build_preprocessors(self) -> markdown.util.Registry:
# We disable the following preprocessors from upstream:
#
# html_block - insecure
# reference - references don't make sense in a chat context.
preprocessors = markdown.util.Registry()
preprocessors.register(BugdownListPreprocessor(self), 'hanging_lists', 35)
preprocessors.register(markdown.preprocessors.NormalizeWhitespace(self), 'normalize_whitespace', 30)
preprocessors.register(fenced_code.FencedBlockPreprocessor(self), 'fenced_code_block', 25)
preprocessors.register(AlertWordNotificationProcessor(self), 'custom_text_notifications', 20)
return preprocessors
def build_block_parser(self) -> markdown.util.Registry:
# We disable the following blockparsers from upstream:
#
# indent - replaced by ours
# setextheader - disabled; we only support hashheaders for headings
# olist - replaced by ours
# ulist - replaced by ours
# quote - replaced by ours
parser = markdown.blockprocessors.BlockParser(self)
parser.blockprocessors.register(markdown.blockprocessors.EmptyBlockProcessor(parser), 'empty', 95)
parser.blockprocessors.register(ListIndentProcessor(parser), 'indent', 90)
if not self.getConfig('code_block_processor_disabled'):
parser.blockprocessors.register(markdown.blockprocessors.CodeBlockProcessor(parser), 'code', 85)
parser.blockprocessors.register(HashHeaderProcessor(parser), 'hashheader', 80)
# We get priority 75 from 'table' extension
parser.blockprocessors.register(markdown.blockprocessors.HRProcessor(parser), 'hr', 70)
parser.blockprocessors.register(OListProcessor(parser), 'olist', 65)
parser.blockprocessors.register(UListProcessor(parser), 'ulist', 60)
parser.blockprocessors.register(BlockQuoteProcessor(parser), 'quote', 55)
parser.blockprocessors.register(markdown.blockprocessors.ParagraphProcessor(parser), 'paragraph', 50)
return parser
def build_inlinepatterns(self) -> markdown.util.Registry:
# We disable the following upstream inline patterns:
#
# backtick - replaced by ours
# escape - probably will re-add at some point.
# link - replaced by ours
# image_link - replaced by ours
# autolink - replaced by ours
# automail - replaced by ours
# linebreak - we use nl2br and consider that good enough
# html - insecure
# reference - references not useful
# image_reference - references not useful
# short_reference - references not useful
# ---------------------------------------------------
# strong_em - for these three patterns,
# strong2 - we have our own versions where
# emphasis2 - we disable _ for bold and emphasis
# Declare regexes for clean single line calls to .register().
NOT_STRONG_RE = markdown.inlinepatterns.NOT_STRONG_RE
# Custom strikethrough syntax: ~~foo~~
DEL_RE = r'(?<!~)(\~\~)([^~\n]+?)(\~\~)(?!~)'
# Custom bold syntax: **foo** but not __foo__
# str inside ** must start and end with a word character
# it need for things like "const char *x = (char *)y"
EMPHASIS_RE = r'(\*)(?!\s+)([^\*^\n]+)(?<!\s)\*'
ENTITY_RE = markdown.inlinepatterns.ENTITY_RE
STRONG_EM_RE = r'(\*\*\*)(?!\s+)([^\*^\n]+)(?<!\s)\*\*\*'
# Inline code block without whitespace stripping
BACKTICK_RE = r'(?:(?<!\\)((?:\\{2})+)(?=`+)|(?<!\\)(`+)(.+?)(?<!`)\3(?!`))'
# Add Inline Patterns. We use a custom numbering of the
# rules, that preserves the order from upstream but leaves
# space for us to add our own.
reg = markdown.util.Registry()
reg.register(BacktickPattern(BACKTICK_RE), 'backtick', 105)
reg.register(markdown.inlinepatterns.DoubleTagPattern(STRONG_EM_RE, 'strong,em'), 'strong_em', 100)
reg.register(UserMentionPattern(mention.find_mentions, self), 'usermention', 95)
reg.register(Tex(r'\B(?<!\$)\$\$(?P<body>[^\n_$](\\\$|[^$\n])*)\$\$(?!\$)\B'), 'tex', 90)
reg.register(StreamTopicPattern(get_compiled_stream_topic_link_regex(), self), 'topic', 87)
reg.register(StreamPattern(get_compiled_stream_link_regex(), self), 'stream', 85)
reg.register(Avatar(AVATAR_REGEX, self), 'avatar', 80)
reg.register(Timestamp(r'!time\((?P<time>[^)]*)\)'), 'timestamp', 75)
# Note that !gravatar syntax should be deprecated long term.
reg.register(Avatar(GRAVATAR_REGEX, self), 'gravatar', 70)
reg.register(UserGroupMentionPattern(mention.user_group_mentions, self), 'usergroupmention', 65)
reg.register(LinkInlineProcessor(markdown.inlinepatterns.LINK_RE, self), 'link', 60)
reg.register(AutoLink(get_web_link_regex(), self), 'autolink', 55)
# Reserve priority 45-54 for Realm Filters
reg = self.register_realm_filters(reg)
reg.register(markdown.inlinepatterns.HtmlInlineProcessor(ENTITY_RE, self), 'entity', 40)
reg.register(markdown.inlinepatterns.SimpleTagPattern(r'(\*\*)([^\n]+?)\2', 'strong'), 'strong', 35)
reg.register(markdown.inlinepatterns.SimpleTagPattern(EMPHASIS_RE, 'em'), 'emphasis', 30)
reg.register(markdown.inlinepatterns.SimpleTagPattern(DEL_RE, 'del'), 'del', 25)
reg.register(markdown.inlinepatterns.SimpleTextInlineProcessor(NOT_STRONG_RE), 'not_strong', 20)
reg.register(Emoji(EMOJI_REGEX, self), 'emoji', 15)
reg.register(EmoticonTranslation(emoticon_regex, self), 'translate_emoticons', 10)
# We get priority 5 from 'nl2br' extension
reg.register(UnicodeEmoji(unicode_emoji_regex), 'unicodeemoji', 0)
return reg
def register_realm_filters(self, inlinePatterns: markdown.util.Registry) -> markdown.util.Registry:
for (pattern, format_string, id) in self.getConfig("realm_filters"):
inlinePatterns.register(RealmFilterPattern(pattern, format_string, self),
f'realm_filters/{pattern}', 45)
return inlinePatterns
def build_treeprocessors(self) -> markdown.util.Registry:
# Here we build all the processors from upstream, plus a few of our own.
treeprocessors = markdown.util.Registry()
# We get priority 30 from 'hilite' extension
treeprocessors.register(markdown.treeprocessors.InlineProcessor(self), 'inline', 25)
treeprocessors.register(markdown.treeprocessors.PrettifyTreeprocessor(self), 'prettify', 20)
treeprocessors.register(InlineInterestingLinkProcessor(self), 'inline_interesting_links', 15)
if settings.CAMO_URI:
treeprocessors.register(InlineHttpsProcessor(self), 'rewrite_to_https', 10)
return treeprocessors
def build_postprocessors(self) -> markdown.util.Registry:
# These are the default python-markdown processors, unmodified.
postprocessors = markdown.util.Registry()
postprocessors.register(markdown.postprocessors.RawHtmlPostprocessor(self), 'raw_html', 20)
postprocessors.register(markdown.postprocessors.AndSubstitutePostprocessor(), 'amp_substitute', 15)
postprocessors.register(markdown.postprocessors.UnescapePostprocessor(), 'unescape', 10)
return postprocessors
def getConfig(self, key: str, default: str='') -> Any:
""" Return a setting for the given key or an empty string. """
if key in self.config:
return self.config[key][0]
else:
return default
def handle_zephyr_mirror(self) -> None:
if self.getConfig("realm") == ZEPHYR_MIRROR_BUGDOWN_KEY:
# Disable almost all inline patterns for zephyr mirror
# users' traffic that is mirrored. Note that
# inline_interesting_links is a treeprocessor and thus is
# not removed
self.inlinePatterns = get_sub_registry(self.inlinePatterns, ['autolink'])
self.treeprocessors = get_sub_registry(self.treeprocessors, ['inline_interesting_links',
'rewrite_to_https'])
# insert new 'inline' processor because we have changed self.inlinePatterns
# but InlineProcessor copies md as self.md in __init__.
self.treeprocessors.register(markdown.treeprocessors.InlineProcessor(self), 'inline', 25)
self.preprocessors = get_sub_registry(self.preprocessors, ['custom_text_notifications'])
self.parser.blockprocessors = get_sub_registry(self.parser.blockprocessors, ['paragraph'])
md_engines: Dict[Tuple[int, bool], markdown.Markdown] = {}
realm_filter_data: Dict[int, List[Tuple[str, str, int]]] = {}
def make_md_engine(realm_filters_key: int, email_gateway: bool) -> None:
md_engine_key = (realm_filters_key, email_gateway)
if md_engine_key in md_engines:
del md_engines[md_engine_key]
realm_filters = realm_filter_data[realm_filters_key]
md_engines[md_engine_key] = build_engine(
realm_filters=realm_filters,
realm_filters_key=realm_filters_key,
email_gateway=email_gateway,
)
def build_engine(realm_filters: List[Tuple[str, str, int]],
realm_filters_key: int,
email_gateway: bool) -> markdown.Markdown:
engine = Bugdown(
realm_filters=realm_filters,
realm=realm_filters_key,
code_block_processor_disabled=email_gateway,
extensions = [
nl2br.makeExtension(),
tables.makeExtension(),
codehilite.makeExtension(
linenums=False,
guess_lang=False,
),
])
return engine
# Split the topic name into multiple sections so that we can easily use
# our common single link matching regex on it.
basic_link_splitter = re.compile(r'[ !;\?\),\'\"]')
# Security note: We don't do any HTML escaping in this
# function on the URLs; they are expected to be HTML-escaped when
# rendered by clients (just as links rendered into message bodies
# are validated and escaped inside `url_to_a`).
def topic_links(realm_filters_key: int, topic_name: str) -> List[str]:
matches: List[str] = []
realm_filters = realm_filters_for_realm(realm_filters_key)
for realm_filter in realm_filters:
pattern = prepare_realm_pattern(realm_filter[0])
for m in re.finditer(pattern, topic_name):
matches += [realm_filter[1] % m.groupdict()]
# Also make raw urls navigable.
for sub_string in basic_link_splitter.split(topic_name):
link_match = re.match(get_web_link_regex(), sub_string)
if link_match:
url = link_match.group('url')
url_object = parse(url)
if not url_object.scheme:
url = url_object.replace(scheme='https').to_text()
matches.append(url)
return matches
def maybe_update_markdown_engines(realm_filters_key: Optional[int], email_gateway: bool) -> None:
# If realm_filters_key is None, load all filters
global realm_filter_data
if realm_filters_key is None:
all_filters = all_realm_filters()
all_filters[DEFAULT_BUGDOWN_KEY] = []
for realm_filters_key, filters in all_filters.items():
realm_filter_data[realm_filters_key] = filters
make_md_engine(realm_filters_key, email_gateway)
# Hack to ensure that getConfig("realm") is right for mirrored Zephyrs
realm_filter_data[ZEPHYR_MIRROR_BUGDOWN_KEY] = []
make_md_engine(ZEPHYR_MIRROR_BUGDOWN_KEY, False)
else:
realm_filters = realm_filters_for_realm(realm_filters_key)
if realm_filters_key not in realm_filter_data or \
realm_filter_data[realm_filters_key] != realm_filters:
# Realm filters data has changed, update `realm_filter_data` and any
# of the existing markdown engines using this set of realm filters.
realm_filter_data[realm_filters_key] = realm_filters
for email_gateway_flag in [True, False]:
if (realm_filters_key, email_gateway_flag) in md_engines:
# Update only existing engines(if any), don't create new one.
make_md_engine(realm_filters_key, email_gateway_flag)
if (realm_filters_key, email_gateway) not in md_engines:
# Markdown engine corresponding to this key doesn't exists so create one.
make_md_engine(realm_filters_key, email_gateway)
# We want to log Markdown parser failures, but shouldn't log the actual input
# message for privacy reasons. The compromise is to replace all alphanumeric
# characters with 'x'.
#
# We also use repr() to improve reproducibility, and to escape terminal control
# codes, which can do surprisingly nasty things.
_privacy_re = re.compile('\\w', flags=re.UNICODE)
def privacy_clean_markdown(content: str) -> str:
return repr(_privacy_re.sub('x', content))
def log_bugdown_error(msg: str) -> None:
"""We use this unusual logging approach to log the bugdown error, in
order to prevent AdminNotifyHandler from sending the sanitized
original markdown formatting into another Zulip message, which
could cause an infinite exception loop."""
bugdown_logger.error(msg)
def get_email_info(realm_id: int, emails: Set[str]) -> Dict[str, FullNameInfo]:
if not emails:
return dict()
q_list = {
Q(email__iexact=email.strip().lower())
for email in emails
}
rows = UserProfile.objects.filter(
realm_id=realm_id,
).filter(
functools.reduce(lambda a, b: a | b, q_list),
).values(
'id',
'email',
)
dct = {
row['email'].strip().lower(): row
for row in rows
}
return dct
def get_possible_mentions_info(realm_id: int, mention_texts: Set[str]) -> List[FullNameInfo]:
if not mention_texts:
return list()
# Remove the trailing part of the `name|id` mention syntax,
# thus storing only full names in full_names.
full_names = set()
name_re = r'(?P<full_name>.+)\|\d+$'
for mention_text in mention_texts:
name_syntax_match = re.match(name_re, mention_text)
if name_syntax_match:
full_names.add(name_syntax_match.group("full_name"))
else:
full_names.add(mention_text)
q_list = {
Q(full_name__iexact=full_name)
for full_name in full_names
}
rows = UserProfile.objects.filter(
realm_id=realm_id,
is_active=True,
).filter(
functools.reduce(lambda a, b: a | b, q_list),
).values(
'id',
'full_name',
'email',
)
return list(rows)
class MentionData:
def __init__(self, realm_id: int, content: str) -> None:
mention_texts, has_wildcards = possible_mentions(content)
possible_mentions_info = get_possible_mentions_info(realm_id, mention_texts)
self.full_name_info = {
row['full_name'].lower(): row
for row in possible_mentions_info
}
self.user_id_info = {
row['id']: row
for row in possible_mentions_info
}
self.init_user_group_data(realm_id=realm_id, content=content)
self.has_wildcards = has_wildcards
def message_has_wildcards(self) -> bool:
return self.has_wildcards
def init_user_group_data(self,
realm_id: int,
content: str) -> None:
user_group_names = possible_user_group_mentions(content)
self.user_group_name_info = get_user_group_name_info(realm_id, user_group_names)
self.user_group_members: Dict[int, List[int]] = defaultdict(list)
group_ids = [group.id for group in self.user_group_name_info.values()]
if not group_ids:
# Early-return to avoid the cost of hitting the ORM,
# which shows up in profiles.
return
membership = UserGroupMembership.objects.filter(user_group_id__in=group_ids)
for info in membership.values('user_group_id', 'user_profile_id'):
group_id = info['user_group_id']
user_profile_id = info['user_profile_id']
self.user_group_members[group_id].append(user_profile_id)
def get_user_by_name(self, name: str) -> Optional[FullNameInfo]:
# warning: get_user_by_name is not dependable if two
# users of the same full name are mentioned. Use
# get_user_by_id where possible.
return self.full_name_info.get(name.lower(), None)
def get_user_by_id(self, id: str) -> Optional[FullNameInfo]:
return self.user_id_info.get(int(id), None)
def get_user_ids(self) -> Set[int]:
"""
Returns the user IDs that might have been mentioned by this
content. Note that because this data structure has not parsed
the message and does not know about escaping/code blocks, this
will overestimate the list of user ids.
"""
return set(self.user_id_info.keys())
def get_user_group(self, name: str) -> Optional[UserGroup]:
return self.user_group_name_info.get(name.lower(), None)
def get_group_members(self, user_group_id: int) -> List[int]:
return self.user_group_members.get(user_group_id, [])
def get_user_group_name_info(realm_id: int, user_group_names: Set[str]) -> Dict[str, UserGroup]:
if not user_group_names:
return dict()
rows = UserGroup.objects.filter(realm_id=realm_id,
name__in=user_group_names)
dct = {row.name.lower(): row for row in rows}
return dct
def get_stream_name_info(realm: Realm, stream_names: Set[str]) -> Dict[str, FullNameInfo]:
if not stream_names:
return dict()
q_list = {
Q(name=name)
for name in stream_names
}
rows = get_active_streams(
realm=realm,
).filter(
functools.reduce(lambda a, b: a | b, q_list),
).values(
'id',
'name',
)
dct = {
row['name']: row
for row in rows
}
return dct
def do_convert(content: str,
realm_alert_words_automaton: Optional[ahocorasick.Automaton] = None,
message: Optional[Message]=None,
message_realm: Optional[Realm]=None,
sent_by_bot: bool=False,
translate_emoticons: bool=False,
mention_data: Optional[MentionData]=None,
email_gateway: bool=False,
no_previews: bool=False) -> str:
"""Convert Markdown to HTML, with Zulip-specific settings and hacks."""
# This logic is a bit convoluted, but the overall goal is to support a range of use cases:
# * Nothing is passed in other than content -> just run default options (e.g. for docs)
# * message is passed, but no realm is -> look up realm from message
# * message_realm is passed -> use that realm for bugdown purposes
if message is not None:
if message_realm is None:
message_realm = message.get_realm()
if message_realm is None:
realm_filters_key = DEFAULT_BUGDOWN_KEY
else:
realm_filters_key = message_realm.id
if message and hasattr(message, 'id') and message.id:
logging_message_id = 'id# ' + str(message.id)
else:
logging_message_id = 'unknown'
if message is not None and message_realm is not None:
if message_realm.is_zephyr_mirror_realm:
if message.sending_client.name == "zephyr_mirror":
# Use slightly customized Markdown processor for content
# delivered via zephyr_mirror
realm_filters_key = ZEPHYR_MIRROR_BUGDOWN_KEY
maybe_update_markdown_engines(realm_filters_key, email_gateway)
md_engine_key = (realm_filters_key, email_gateway)
if md_engine_key in md_engines:
_md_engine = md_engines[md_engine_key]
else:
if DEFAULT_BUGDOWN_KEY not in md_engines:
maybe_update_markdown_engines(realm_filters_key=None, email_gateway=False)
_md_engine = md_engines[(DEFAULT_BUGDOWN_KEY, email_gateway)]
# Reset the parser; otherwise it will get slower over time.
_md_engine.reset()
# Filters such as UserMentionPattern need a message.
_md_engine.zulip_message = message
_md_engine.zulip_realm = message_realm
_md_engine.zulip_db_data = None # for now
_md_engine.image_preview_enabled = image_preview_enabled(
message, message_realm, no_previews)
_md_engine.url_embed_preview_enabled = url_embed_preview_enabled(
message, message_realm, no_previews)
# Pre-fetch data from the DB that is used in the bugdown thread
if message is not None:
assert message_realm is not None # ensured above if message is not None
# Here we fetch the data structures needed to render
# mentions/avatars/stream mentions from the database, but only
# if there is syntax in the message that might use them, since
# the fetches are somewhat expensive and these types of syntax
# are uncommon enough that it's a useful optimization.
if mention_data is None:
mention_data = MentionData(message_realm.id, content)
emails = possible_avatar_emails(content)
email_info = get_email_info(message_realm.id, emails)
stream_names = possible_linked_stream_names(content)
stream_name_info = get_stream_name_info(message_realm, stream_names)
if content_has_emoji_syntax(content):
active_realm_emoji = message_realm.get_active_emoji()
else:
active_realm_emoji = dict()
_md_engine.zulip_db_data = {
'realm_alert_words_automaton': realm_alert_words_automaton,
'email_info': email_info,
'mention_data': mention_data,
'active_realm_emoji': active_realm_emoji,
'realm_uri': message_realm.uri,
'sent_by_bot': sent_by_bot,
'stream_names': stream_name_info,
'translate_emoticons': translate_emoticons,
}
try:
# Spend at most 5 seconds rendering; this protects the backend
# from being overloaded by bugs (e.g. markdown logic that is
# extremely inefficient in corner cases) as well as user
# errors (e.g. a realm filter that makes some syntax
# infinite-loop).
rendered_content = timeout(5, _md_engine.convert, content)
# Throw an exception if the content is huge; this protects the
# rest of the codebase from any bugs where we end up rendering
# something huge.
if len(rendered_content) > MAX_MESSAGE_LENGTH * 10:
raise BugdownRenderingException(
f'Rendered content exceeds {MAX_MESSAGE_LENGTH * 10} characters (message {logging_message_id})'
)
return rendered_content
except Exception:
cleaned = privacy_clean_markdown(content)
# NOTE: Don't change this message without also changing the
# logic in logging_handlers.py or we can create recursive
# exceptions.
bugdown_logger.exception(
'Exception in Markdown parser; input (sanitized) was: %s\n (message %s)',
cleaned,
logging_message_id,
)
raise BugdownRenderingException()
finally:
# These next three lines are slightly paranoid, since
# we always set these right before actually using the
# engine, but better safe then sorry.
_md_engine.zulip_message = None
_md_engine.zulip_realm = None
_md_engine.zulip_db_data = None
bugdown_time_start = 0.0
bugdown_total_time = 0.0
bugdown_total_requests = 0
def get_bugdown_time() -> float:
return bugdown_total_time
def get_bugdown_requests() -> int:
return bugdown_total_requests
def bugdown_stats_start() -> None:
global bugdown_time_start
bugdown_time_start = time.time()
def bugdown_stats_finish() -> None:
global bugdown_total_time
global bugdown_total_requests
global bugdown_time_start
bugdown_total_requests += 1
bugdown_total_time += (time.time() - bugdown_time_start)
def convert(content: str,
realm_alert_words_automaton: Optional[ahocorasick.Automaton] = None,
message: Optional[Message]=None,
message_realm: Optional[Realm]=None,
sent_by_bot: bool=False,
translate_emoticons: bool=False,
mention_data: Optional[MentionData]=None,
email_gateway: bool=False,
no_previews: bool=False) -> str:
bugdown_stats_start()
ret = do_convert(content, realm_alert_words_automaton,
message, message_realm, sent_by_bot,
translate_emoticons, mention_data, email_gateway,
no_previews=no_previews)
bugdown_stats_finish()
return ret
|
the-stack_0_3107 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import math
import networkx as nx
from collections import defaultdict
from typing import Any, Dict
def _modularity_component(
intra_community_degree: float,
total_community_degree: float,
network_degree_sum: float,
resolution: float,
) -> float:
community_degree_ratio = math.pow(total_community_degree, 2.0) / (
2.0 * network_degree_sum
)
return (intra_community_degree - resolution * community_degree_ratio) / (
2.0 * network_degree_sum
)
def _assertions(
graph: nx.Graph,
partitions: Dict[Any, int],
weight_attribute: str,
resolution: float,
):
if not isinstance(graph, nx.Graph):
raise TypeError("graph must be a networkx undirected graph")
if graph.is_directed():
raise ValueError("The graph must be an undirected graph")
if graph.is_multigraph():
raise ValueError(
"Multigraphs must be provided in the form of a non multigraph."
)
if not nx.is_weighted(graph, weight=weight_attribute):
raise ValueError(
f"weight_attribute {weight_attribute} not found on every edge in the provided graph"
)
if not isinstance(partitions, dict):
raise TypeError("partitions must be a dictionary")
if not isinstance(resolution, float):
raise TypeError("resolution must be a float")
def modularity(
graph: nx.Graph,
partitions: Dict[Any, int],
weight_attribute: str = "weight",
resolution: float = 1.0,
) -> float:
"""
Given an undirected graph and a dictionary of vertices to community ids, calculate
the modularity.
Parameters
----------
graph : nx.Graph
An undirected graph
partitions : Dict[Any, int]
A dictionary representing a community partitioning scheme with the keys being
the vertex and the value being a community id.
weight_attribute : str
The edge data attribute on the graph that contains a float weight for the edge.
resolution : float
The resolution to use when calculating the modularity.
Returns
-------
Dict[int, float]
A dictionary of the community id to the modularity component of that community
Raises
------
TypeError
If ``graph`` is not a networkx Graph or
If ``partitions`` is not a dictionary or
If ``resolution`` is not a float
ValueError
If ``graph`` is unweighted
If ``graph`` is directed
If ``graph`` is a multigraph
References
----------
.. [1] https://en.wikipedia.org/wiki/Modularity_(networks)
"""
_assertions(graph, partitions, weight_attribute, resolution)
components = modularity_components(graph, partitions, weight_attribute, resolution)
return sum(components.values())
def modularity_components(
graph: nx.Graph,
partitions: Dict[Any, int],
weight_attribute: str = "weight",
resolution: float = 1.0,
) -> Dict[int, float]:
"""
Given an undirected, weighted graph and a community partition dictionary,
calculates a modularity quantum for each community ID. The sum of these quanta
is the modularity of the graph and partitions provided.
Parameters
----------
graph : nx.Graph
An undirected graph
partitions : Dict[Any, int]
A dictionary representing a community partitioning scheme with the keys being
the vertex and the value being a community id.
weight_attribute : str
The edge data attribute on the graph that contains a float weight for the edge.
resolution : float
The resolution to use when calculating the modularity.
Returns
-------
Dict[int, float]
A dictionary of the community id to the modularity component of that community
Raises
------
TypeError
If ``graph`` is not a networkx Graph or
If ``partitions`` is not a dictionary or
If ``resolution`` is not a float
ValueError
If ``graph`` is unweighted
If ``graph`` is directed
If ``graph`` is a multigraph
"""
_assertions(graph, partitions, weight_attribute, resolution)
total_edge_weight = 0.0
communities = set(partitions.values())
degree_sums_within_community: Dict[int, float] = defaultdict(lambda: 0.0)
degree_sums_for_community: Dict[int, float] = defaultdict(lambda: 0.0)
for vertex, neighbor_vertex, weight in graph.edges(data=weight_attribute):
vertex_community = partitions[vertex]
neighbor_community = partitions[neighbor_vertex]
if vertex_community == neighbor_community:
if vertex == neighbor_vertex:
degree_sums_within_community[vertex_community] += weight
else:
degree_sums_within_community[vertex_community] += weight * 2.0
degree_sums_for_community[vertex_community] += weight
degree_sums_for_community[neighbor_community] += weight
total_edge_weight += weight
return {
comm: _modularity_component(
degree_sums_within_community[comm],
degree_sums_for_community[comm],
total_edge_weight,
resolution,
)
for comm in communities
}
|
the-stack_0_3108 | import os
import logging
from datetime import datetime
from discord import (
Intents,
Activity,
ActivityType
)
from discord.ext import commands
from discord.ext.commands import (
ExtensionNotFound,
ExtensionAlreadyLoaded,
NoEntryPointError,
ExtensionFailed,
ExtensionNotLoaded
)
from motor.motor_asyncio import AsyncIOMotorClient
from bot.utils import config
__all__ = (
'Astro',
)
log = logging.getLogger(__name__)
TOKEN = config()['token']
DESCRIPTION = "A bot for Dimension MC's Discord server."
STICKY_EXTENSIONS = (
'admin',
'handler'
)
def _get_prefix(bot, msg):
return config()['prefixes']
def _get_status():
conf = config()
type_ = conf['status_type']
name = conf['status_name']
url = conf['status_url']
return Activity(type=getattr(ActivityType, type_, ActivityType.playing), name=name, url=url)
class Astro(commands.Bot):
def __init__(self):
self.db = AsyncIOMotorClient('mongodb://localhost:27017/astro')['astro']
self._socket_receive = {}
super().__init__(
command_prefix=_get_prefix,
help_command=None,
description=DESCRIPTION,
case_insensitive=True,
activity=_get_status(),
intents=Intents.all())
def _load_extension(self, name, *, package=None):
try:
super().load_extension(f'bot.extensions.{name}', package=package)
except ExtensionNotFound:
return f'Extension `{name}` not found'
except ExtensionAlreadyLoaded:
return f'Extension `{name}` is already loaded'
except NoEntryPointError:
return f'Extension `{name}` has no setup function'
except ModuleNotFoundError:
return f'Extension `{name}` not found'
except ExtensionFailed as e:
return f'Extension `{name}` couldn\'t be loaded:\n```{e}```'
def _reload_extension(self, name, *, package=None):
try:
super().reload_extension(f'bot.extensions.{name}', package=package)
except ExtensionNotFound:
return f'Extension `{name}` not found'
except ExtensionNotLoaded:
return f'Extension `{name}` not loaded'
except NoEntryPointError:
return f'Extension `{name}` has no setup function'
except ModuleNotFoundError:
return f'Extension `{name}` not found'
except ExtensionFailed as e:
return f'`{name}`:\n```{e.original}```'
def _unload_extension(self, name, *, package=None):
if any(_ for _ in STICKY_EXTENSIONS if (_ in name)): # im clumsy lol
return f'Extension {name} is protected from unloads.'
try:
super().unload_extension(f'bot.extensions.{name}', package=package)
except:
return f'Extension `{name}` not found/loaded'
def _load_all_extensions(self):
ret = ''
for ext in os.listdir('bot/extensions'):
if ext.endswith('.py'):
_ = self._load_extension(ext[:-3])
if _:
ret = f'{ret}\n{_}'
return ret
def _reload_all_extensions(self):
ret = ''
for ext in os.listdir('bot/extensions'):
if ext.endswith('.py'):
_ = self._reload_extension(ext[:-3])
if _:
ret = f'{ret}\n{_}'
return ret
def _unload_all_extensions(self):
ret = ''
for ext in os.listdir('bot/extensions'):
if ext.endswith('.py'):
_ = self._unload_extension(ext[:-3])
if _:
ret = f'{ret}\n{_}'
return ret
async def on_message(self, message):
if not message.guild:
return
return await super().on_message(message)
async def on_connect(self):
print(f'Connected to Discord. Latency: {(self.latency * 1000):.2f}ms')
async def on_disconnect(self):
print('Disconnected. Attempting to reconnect...')
async def on_resume(self):
print('Connection restored.')
async def on_ready(self):
print('Internal cache ready.')
self.uptime = datetime.utcnow()
async def on_error(self, event_method, *args, **kwargs):
log.exception(f'Ignoring exception in event {event_method}')
super().on_error(event_method, *args, **kwargs)
def run(self):
print('Starting...')
_ = self._load_all_extensions()
if _:
print(_)
loop = self.loop
loop.run_until_complete(super().start(TOKEN)) |
the-stack_0_3109 | """
Example use of the pyjsgf parse_grammar_string function.
The parse_grammar_file, parse_rule_string and parse_expansion_string functions
are also available and work in a similar way.
"""
from jsgf import parse_grammar_string
# Parse a grammar string with parse_grammar_string and get a Grammar object back.
grammar = parse_grammar_string(
"#JSGF V1.0 UTF-8 en;"
"grammar example;"
"public <greet> = hello world {tag};"
)
# Print it.
print(grammar)
# Get the rule that matches "hello world".
rule = grammar.find_matching_rules("hello world")[0]
print("Matching rule: %s" % rule)
# Tags are also parsed and will work as expected.
print("Matched tags: %s" % rule.matched_tags)
|
the-stack_0_3110 | # coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from petstore_api.configuration import Configuration
class Animal(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'class_name': 'str',
'color': 'str'
}
attribute_map = {
'class_name': 'className',
'color': 'color'
}
discriminator_value_class_map = {
'Dog': 'Dog',
'Cat': 'Cat'
}
def __init__(self, class_name=None, color='red', local_vars_configuration=None): # noqa: E501
"""Animal - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._class_name = None
self._color = None
self.discriminator = 'class_name'
self.class_name = class_name
if color is not None:
self.color = color
@property
def class_name(self):
"""Gets the class_name of this Animal. # noqa: E501
:return: The class_name of this Animal. # noqa: E501
:rtype: str
"""
return self._class_name
@class_name.setter
def class_name(self, class_name):
"""Sets the class_name of this Animal.
:param class_name: The class_name of this Animal. # noqa: E501
:type class_name: str
"""
if self.local_vars_configuration.client_side_validation and class_name is None: # noqa: E501
raise ValueError("Invalid value for `class_name`, must not be `None`") # noqa: E501
self._class_name = class_name
@property
def color(self):
"""Gets the color of this Animal. # noqa: E501
:return: The color of this Animal. # noqa: E501
:rtype: str
"""
return self._color
@color.setter
def color(self, color):
"""Sets the color of this Animal.
:param color: The color of this Animal. # noqa: E501
:type color: str
"""
self._color = color
def get_real_child_model(self, data):
"""Returns the real base class specified by the discriminator"""
discriminator_key = self.attribute_map[self.discriminator]
discriminator_value = data[discriminator_key]
return self.discriminator_value_class_map.get(discriminator_value)
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Animal):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Animal):
return True
return self.to_dict() != other.to_dict()
|
the-stack_0_3112 | import log
from machine import I2C
'''
I2C使用示例
'''
# 设置日志输出级别
log.basicConfig(level=log.INFO)
i2c_log = log.getLogger("I2C")
I2C_SLAVE_ADDR = 0x1B # i2c 设备地址
WHO_AM_I= bytearray({0x02, 0}) # i2c 寄存器地址,以buff的方式传入,取第一个值,计算一个值的长度
data = bytearray({0x12, 0}) # 输入对应指令
i2c_obj = I2C(I2C.I2C0, I2C.STANDARD_MODE) # 返回i2c对象
i2c_obj.write(I2C_SLAVE_ADDR, WHO_AM_I, 1, data, 2) # 写入data
r_data = bytearray(2) # 创建长度为2的字节数组接收
i2c_obj.read(I2C_SLAVE_ADDR, WHO_AM_I, 1, r_data, 2, 0) # read
i2c_log.info(r_data[0])
i2c_log.info(r_data[1])
|
the-stack_0_3114 | ## Cash Machine ##
# Objective: the cash machine will give you the value desired in dollar banknotes
# Input: value you want to withdraw from the cash machine
# Output: banknotes from the cash machine
# 1) Input
# 1.1) Definition of the currency and banknotes
bank_notes = [100, 50, 20, 10, 5, 2, 1]
# 1.2) Definition of the value to be drawn from the cash machine
value = input('Type the value you would like to withdraw: $ ').strip()
# 1.3) Checking if value is an integer
while True:
try:
value = int(value)
except:
value = input('Error found, please type valid value: $ ').strip()
continue
else:
value = int(value)
break
# 2) Output
# 2.1) Determination of the number of banknotes
while True:
for note in bank_notes:
if value >= note:
if value % note == 0:
print(f'{int(value / note)} banknotes of $ {note}')
elif value % note != 0:
print(f'{value // note} banknotes of $ {note}')
value = value - int(value / note) * note
if value - note == 0:
break
break |
the-stack_0_3115 | #
# Copyright (C) Foundry 2020
#
import base64
import binascii
import hashlib
import hmac
import json
import time
import requests
from collections import OrderedDict
from datetime import datetime, timedelta
from typing import Callable, Dict, List, Tuple, Optional
class flix:
"""Flix will handle the login and expose functions to get,
create shows etc.
"""
def __init__(self):
self.reset()
def authenticate(self, hostname: str, login: str, password: str) -> Dict:
"""authenticate will authenticate a user
Arguments:
hostname {str} -- Hostname of the server
login {str} -- Login of the user
password {str} -- Password of the user
Returns:
Dict -- Authenticate
"""
authdata = base64.b64encode((login + ':' + password).encode('UTF-8'))
response = None
header = {
'Content-Type': 'application/json',
'Authorization': 'Basic ' + authdata.decode('UTF-8'),
}
try:
r = requests.post(hostname + '/authenticate', headers=header,
verify=False)
r.raise_for_status()
response = json.loads(r.content)
self.hostname = hostname
self.login = login
self.password = password
except requests.exceptions.RequestException as err:
print('Authentification failed', err)
return None
self.key = response['id']
self.secret = response['secret_access_key']
self.expiry = datetime.strptime(
response['expiry_date'].split('.')[0], '%Y-%m-%dT%H:%M:%S')
return response
def __get_token(self) -> Tuple[str, str]:
"""__get_token will request a token and will reset it
if it is too close to the expiry date
Returns:
Tuple[str, str] -- Key and Secret
"""
if (self.key is None or self.secret is None or self.expiry is None or
datetime.now() + timedelta(hours=2) > self.expiry):
authentificationToken = self.authenticate(
self.hostname, self.login, self.password)
auth_id = authentificationToken['id']
auth_secret_token = authentificationToken['secret_access_key']
auth_expiry_date = authentificationToken['expiry_date']
auth_expiry_date = auth_expiry_date.split('.')[0]
self.key = auth_id
self.secret = auth_secret_token
self.expiry = datetime.strptime(auth_expiry_date,
'%Y-%m-%dT%H:%M:%S')
return self.key, self.secret
def __fn_sign(self,
access_key_id: str,
secret_access_key: str,
url: str,
content: object,
http_method: str,
content_type: str,
dt: str) -> str:
"""After being logged in, you will have a token.
Arguments:
access_key_id {str} -- Access key ID from your token
secret_access_key {str} -- Secret access key from your token
url {str} -- Url of the request
content {object} -- Content of your request
http_method {str} -- Http Method of your request
content_type {str} -- Content Type of your request
dt {str} -- Datetime
Raises:
ValueError: 'You must specify a secret_access_key'
Returns:
str -- Signed header
"""
raw_string = http_method.upper() + '\n'
content_md5 = ''
if content:
if isinstance(content, str):
content_md5 = hashlib.md5(content).hexdigest()
elif isinstance(content, bytes):
hx = binascii.hexlify(content)
content_md5 = hashlib.md5(hx).hexdigest()
elif isinstance(content, dict):
jsoned = json.dumps(content)
content_md5 = hashlib.md5(jsoned.encode('utf-8')).hexdigest()
if content_md5 != '':
raw_string += content_md5 + '\n'
raw_string += content_type + '\n'
else:
raw_string += '\n\n'
raw_string += dt.isoformat().split('.')[0] + 'Z' + '\n'
url_bits = url.split('?')
url_without_query_params = url_bits[0]
raw_string += url_without_query_params
if len(secret_access_key) == 0:
raise ValueError('You must specify a secret_access_key')
digest_created = base64.b64encode(
hmac.new(secret_access_key.encode('utf-8'),
raw_string.encode('utf-8'),
digestmod=hashlib.sha256).digest()
)
return 'FNAUTH ' + access_key_id + ':' + digest_created.decode('utf-8')
def __get_headers(
self, content: object, url: str, method: str = 'POST') -> object:
"""__get_headers will generate the header to make any request
containing the authorization with signature
Arguments:
content {object} -- Content of the request
url {str} -- Url to make the request
method {str} -- Request method (default: {'POST'})
Returns:
object -- Headers
"""
dt = datetime.utcnow()
key, secret = self.__get_token()
return {
'Authorization': self.__fn_sign(
key,
secret,
url,
content,
method,
'application/json',
dt),
'Content-Type': 'application/json',
'Date': dt.strftime('%a, %d %b %Y %H:%M:%S GMT'),
}
def reset(self):
"""reset will reset the user info
"""
self.hostname = None
self.secret = None
self.expiry = None
self.login = None
self.password = None
self.key = None
# Returns sequence revision by given ID
def get_sequence_revision_by_id(self,
show_id: int,
episode_id: int,
sequence_id: int,
revision_id: int
) -> Dict:
"""get_sequence_revision_by_id retrieves sequence revision by given ID
Arguments:
show_id {int} -- Show ID
episode_id {int} -- Episode ID
sequence_id {int} -- Sequence ID
revision_id {int} -- Revision ID
Returns:
Dict -- Sequence revision
"""
url = '/show/{0}/sequence/{1}/revision/{2}'.format(
show_id, sequence_id, revision_id)
if episode_id is not None:
url = '/show/{0}/episode/{1}/sequence/{2}/revision/{3}'.format(
show_id, episode_id, sequence_id, revision_id)
headers = self.__get_headers(None, url, 'GET')
response = None
try:
r = requests.get(self.hostname + url, headers=headers,
verify=False)
response = json.loads(r.content)
if r.status_code == 404:
print('Could not retrieve sequence revision',
response.get('message'))
return None
except requests.exceptions.RequestException as err:
if r is not None and r.status_code == 401:
print('Your token has been revoked')
else:
print('Could not retrieve sequence revision', err)
return None
return response
# Returns the list of panels in the sequence revision
def get_sequence_revision_panels(self,
show_id: int,
episode_id: int,
sequence_id: int,
revision_id: int
) -> Dict:
"""get_sequence_revision_panels retrieves the list of panels in given sequence revision
Arguments:
show_id {int} -- Show ID
episode_id {int} -- Episode ID
sequence_id {int} -- Sequence ID
revision_id {int} -- Revision ID
Returns:
Dict -- Panels
"""
url = '/show/{0}/sequence/{1}/revision/{2}/panels'.format(
show_id, sequence_id, revision_id)
if episode_id is not None:
url = '/show/{0}/episode/{1}/sequence/{2}/revision/{3}/panels'.format(
show_id, episode_id, sequence_id, revision_id)
headers = self.__get_headers(None, url, 'GET')
response = None
try:
r = requests.get(self.hostname + url, headers=headers,
verify=False)
response = json.loads(r.content)
response = response.get('panels')
if r.status_code == 404:
print('Could not retrieve sequence revision panels',
response.get('message'))
return None
except requests.exceptions.RequestException as err:
if r is not None and r.status_code == 401:
print('Your token has been revoked')
else:
print('Could not retrieve sequence revision panels', err)
return None
return response
# Returns list of dialogues in the panel
def get_panel_dialogues(self,
show_id: int,
episode_id: int,
sequence_id: int,
panel_id: int
) -> Dict:
"""get_panel_dialogues retrieves the list of dialogues in given panel ID
Arguments:
show_id {int} -- Show ID
episode_id {int} -- Episode ID
sequence_id {int} -- Sequence ID
revision_id {int} -- Revision ID
Returns:
Dict -- Dialogues
"""
url = '/show/{0}/sequence/{1}/panel/{2}/dialogues'.format(
show_id, sequence_id, panel_id)
if episode_id is not None:
url = '/show/{0}/episode/{1}/sequence/{2}/panel/{3}/dialogues'.format(
show_id, episode_id, sequence_id, panel_id)
headers = self.__get_headers(None, url, 'GET')
response = None
try:
r = requests.get(self.hostname + url, headers=headers,
verify=False)
response = json.loads(r.content)
response = response.get('dialogues')
if r.status_code == 404:
print('Could not retrieve panel dialogues',
response.get('message'))
return None
except requests.exceptions.RequestException as err:
if r is not None and r.status_code == 401:
print('Your token has been revoked')
else:
print('Could not retrieve panel dialogues', err)
return None
return response
# Returns formatted panel object as revisioned panels for POST request
def format_panel_for_revision(self, panel: Dict, dialogue: Dict) -> Dict:
"""format_panel_for_revision will format the panels as revisioned panels
Arguments:
panels {List} -- List of panels
Returns:
List -- Formatted list of panels
"""
revisioned_panel = {
'dialogue': dialogue,
'duration': panel.get('duration'),
'id': panel.get('panel_id'),
'revision_number': panel.get('revision_number')
}
return revisioned_panel
# Makes POST request to create a new sequence revision
def create_new_sequence_revision(
self, show_id: int, episode_id: int, sequence_id: int, revisioned_panels: List[Dict], revision: Dict,
comment: Optional[str]) -> Dict:
"""new_sequence_revision will create a new sequence revision
Arguments:
show_id {int} -- Show ID
episode_id {int} -- Episode ID
sequence_id {int} -- Sequence ID
revisioned_panels {List} -- List of revisionned panels
revision {Object} -- Sequence Revision
comment {str} -- Comment (default: {'From AUTO Dialogue Relink'})
Returns:
Dict -- Sequence Revision
"""
if not comment:
comment = 'Auto Dialogue Relink'
url = '/show/{0}/sequence/{1}/revision'.format(show_id, sequence_id)
if episode_id is not None:
url = '/show/{0}/episode/{1}/sequence/{2}/revision'.format(
show_id, episode_id, sequence_id)
meta = revision.get('meta_data', {})
content = {
'comment': comment,
'imported': False,
'meta_data': {
'movie_asset_id': meta.get('movie_asset_id', None),
'audio_asset_id': meta.get('audio_asset_id', None),
'annotations': meta.get('annotations', []),
'audio_timings': meta.get('audio_timings', None),
'highlights': meta.get('highlights', None),
'markers': meta.get('markers', None)
},
'revisioned_panels': revisioned_panels
}
headers = self.__get_headers(content, url, 'POST')
response = None
try:
r = requests.post(self.hostname + url, headers=headers,
data=json.dumps(content), verify=False)
response = json.loads(r.content)
except BaseException:
print('Could not create sequence revision')
return None
return response
|
the-stack_0_3116 | from mindsdb.api.mongo.classes import Responder
import mindsdb.api.mongo.functions as helpers
class Responce(Responder):
when = {'ismaster': helpers.is_true}
result = {
"ismaster": True,
"minWireVersion": 0,
"maxWireVersion": 9,
"ok": 1
}
responder = Responce()
|
the-stack_0_3117 | # coding: utf-8
from __future__ import unicode_literals
import base64
import hashlib
import json
import random
import re
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_str,
)
from ..utils import (
ExtractorError,
float_or_none,
int_or_none,
orderedSet,
str_or_none,
)
class GloboIE(InfoExtractor):
_VALID_URL = r"(?:globo:|https?://.+?\.globo\.com/(?:[^/]+/)*(?:v/(?:[^/]+/)?|videos/))(?P<id>\d{7,})"
_NETRC_MACHINE = "globo"
_TESTS = [
{
"url": "http://g1.globo.com/carros/autoesporte/videos/t/exclusivos-do-g1/v/mercedes-benz-gla-passa-por-teste-de-colisao-na-europa/3607726/",
"md5": "b3ccc801f75cd04a914d51dadb83a78d",
"info_dict": {
"id": "3607726",
"ext": "mp4",
"title": "Mercedes-Benz GLA passa por teste de colisão na Europa",
"duration": 103.204,
"uploader": "Globo.com",
"uploader_id": "265",
},
},
{
"url": "http://globoplay.globo.com/v/4581987/",
"md5": "f36a1ecd6a50da1577eee6dd17f67eff",
"info_dict": {
"id": "4581987",
"ext": "mp4",
"title": "Acidentes de trânsito estão entre as maiores causas de queda de energia em SP",
"duration": 137.973,
"uploader": "Rede Globo",
"uploader_id": "196",
},
},
{
"url": "http://canalbrasil.globo.com/programas/sangue-latino/videos/3928201.html",
"only_matching": True,
},
{
"url": "http://globosatplay.globo.com/globonews/v/4472924/",
"only_matching": True,
},
{
"url": "http://globotv.globo.com/t/programa/v/clipe-sexo-e-as-negas-adeus/3836166/",
"only_matching": True,
},
{
"url": "http://globotv.globo.com/canal-brasil/sangue-latino/t/todos-os-videos/v/ator-e-diretor-argentino-ricado-darin-fala-sobre-utopias-e-suas-perdas/3928201/",
"only_matching": True,
},
{
"url": "http://canaloff.globo.com/programas/desejar-profundo/videos/4518560.html",
"only_matching": True,
},
{
"url": "globo:3607726",
"only_matching": True,
},
]
def _real_initialize(self):
email, password = self._get_login_info()
if email is None:
return
try:
glb_id = (
self._download_json(
"https://login.globo.com/api/authentication",
None,
data=json.dumps(
{
"payload": {
"email": email,
"password": password,
"serviceId": 4654,
},
}
).encode(),
headers={
"Content-Type": "application/json; charset=utf-8",
},
)
or {}
).get("glbId")
if glb_id:
self._set_cookie(".globo.com", "GLBID", glb_id)
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
resp = self._parse_json(e.cause.read(), None)
raise ExtractorError(
resp.get("userMessage") or resp["id"], expected=True
)
raise
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._download_json(
"http://api.globovideos.com/videos/%s/playlist" % video_id, video_id
)["videos"][0]
if video.get("encrypted") is True:
raise ExtractorError("This video is DRM protected.", expected=True)
title = video["title"]
formats = []
subtitles = {}
for resource in video["resources"]:
resource_id = resource.get("_id")
resource_url = resource.get("url")
resource_type = resource.get("type")
if (
not resource_url
or (resource_type == "media" and not resource_id)
or resource_type not in ("subtitle", "media")
):
continue
if resource_type == "subtitle":
subtitles.setdefault(resource.get("language") or "por", []).append(
{
"url": resource_url,
}
)
continue
security = self._download_json(
"http://security.video.globo.com/videos/%s/hash" % video_id,
video_id,
"Downloading security hash for %s" % resource_id,
query={
"player": "desktop",
"version": "5.19.1",
"resource_id": resource_id,
},
)
security_hash = security.get("hash")
if not security_hash:
message = security.get("message")
if message:
raise ExtractorError(
"%s returned error: %s" % (self.IE_NAME, message), expected=True
)
continue
hash_code = security_hash[:2]
padding = "%010d" % random.randint(1, 10000000000)
if hash_code in ("04", "14"):
received_time = security_hash[3:13]
received_md5 = security_hash[24:]
hash_prefix = security_hash[:23]
elif hash_code in ("02", "12", "03", "13"):
received_time = security_hash[2:12]
received_md5 = security_hash[22:]
padding += "1"
hash_prefix = "05" + security_hash[:22]
padded_sign_time = compat_str(int(received_time) + 86400) + padding
md5_data = (received_md5 + padded_sign_time + "0xAC10FD").encode()
signed_md5 = (
base64.urlsafe_b64encode(hashlib.md5(md5_data).digest())
.decode()
.strip("=")
)
signed_hash = hash_prefix + padded_sign_time + signed_md5
signed_url = "%s?h=%s&k=html5&a=%s&u=%s" % (
resource_url,
signed_hash,
"F" if video.get("subscriber_only") else "A",
security.get("user") or "",
)
if resource_id.endswith("m3u8") or resource_url.endswith(".m3u8"):
formats.extend(
self._extract_m3u8_formats(
signed_url,
resource_id,
"mp4",
entry_protocol="m3u8_native",
m3u8_id="hls",
fatal=False,
)
)
elif resource_id.endswith("mpd") or resource_url.endswith(".mpd"):
formats.extend(
self._extract_mpd_formats(
signed_url, resource_id, mpd_id="dash", fatal=False
)
)
elif resource_id.endswith("manifest") or resource_url.endswith("/manifest"):
formats.extend(
self._extract_ism_formats(
signed_url, resource_id, ism_id="mss", fatal=False
)
)
else:
formats.append(
{
"url": signed_url,
"format_id": "http-%s" % resource_id,
"height": int_or_none(resource.get("height")),
}
)
self._sort_formats(formats)
duration = float_or_none(video.get("duration"), 1000)
uploader = video.get("channel")
uploader_id = str_or_none(video.get("channel_id"))
return {
"id": video_id,
"title": title,
"duration": duration,
"uploader": uploader,
"uploader_id": uploader_id,
"formats": formats,
"subtitles": subtitles,
}
class GloboArticleIE(InfoExtractor):
_VALID_URL = r"https?://.+?\.globo\.com/(?:[^/]+/)*(?P<id>[^/.]+)(?:\.html)?"
_VIDEOID_REGEXES = [
r'\bdata-video-id=["\'](\d{7,})',
r'\bdata-player-videosids=["\'](\d{7,})',
r'\bvideosIDs\s*:\s*["\']?(\d{7,})',
r'\bdata-id=["\'](\d{7,})',
r'<div[^>]+\bid=["\'](\d{7,})',
]
_TESTS = [
{
"url": "http://g1.globo.com/jornal-nacional/noticia/2014/09/novidade-na-fiscalizacao-de-bagagem-pela-receita-provoca-discussoes.html",
"info_dict": {
"id": "novidade-na-fiscalizacao-de-bagagem-pela-receita-provoca-discussoes",
"title": "Novidade na fiscalização de bagagem pela Receita provoca discussões",
"description": "md5:c3c4b4d4c30c32fce460040b1ac46b12",
},
"playlist_count": 1,
},
{
"url": "http://g1.globo.com/pr/parana/noticia/2016/09/mpf-denuncia-lula-marisa-e-mais-seis-na-operacao-lava-jato.html",
"info_dict": {
"id": "mpf-denuncia-lula-marisa-e-mais-seis-na-operacao-lava-jato",
"title": "Lula era o 'comandante máximo' do esquema da Lava Jato, diz MPF",
"description": "md5:8aa7cc8beda4dc71cc8553e00b77c54c",
},
"playlist_count": 6,
},
{
"url": "http://gq.globo.com/Prazeres/Poder/noticia/2015/10/all-o-desafio-assista-ao-segundo-capitulo-da-serie.html",
"only_matching": True,
},
{
"url": "http://gshow.globo.com/programas/tv-xuxa/O-Programa/noticia/2014/01/xuxa-e-junno-namoram-muuuito-em-luau-de-zeze-di-camargo-e-luciano.html",
"only_matching": True,
},
{
"url": "http://oglobo.globo.com/rio/a-amizade-entre-um-entregador-de-farmacia-um-piano-19946271",
"only_matching": True,
},
]
@classmethod
def suitable(cls, url):
return (
False if GloboIE.suitable(url) else super(GloboArticleIE, cls).suitable(url)
)
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_ids = []
for video_regex in self._VIDEOID_REGEXES:
video_ids.extend(re.findall(video_regex, webpage))
entries = [
self.url_result("globo:%s" % video_id, GloboIE.ie_key())
for video_id in orderedSet(video_ids)
]
title = self._og_search_title(webpage, fatal=False)
description = self._html_search_meta("description", webpage)
return self.playlist_result(entries, display_id, title, description)
|
the-stack_0_3118 | # %%
from cProfile import label
import imp
from turtle import shape
import numpy as np
import pandas as pd
import tensorflow.keras as keras
from matplotlib import pyplot as plt, units
pd.options.display.max_rows = 10
pd.options.display.float_format = "{:.1f}".format
# %% Load datasets from Internet
train_df: pd.DataFrame = pd.read_csv(
"https://download.mlcc.google.com/mledu-datasets/california_housing_train.csv"
)
test_df: pd.DataFrame = pd.read_csv(
"https://download.mlcc.google.com/mledu-datasets/california_housing_test.csv"
)
# %%
scale_label = 1000.
train_df['median_house_value'] /= scale_label
test_df['median_house_value'] /= scale_label
# %%
def build_model(learning_rate: float):
model = keras.models.Sequential()
model.add(keras.layers.Input(shape=(1, )))
model.add(keras.layers.Dense(units=1))
model.compile(
optimizer=keras.optimizers.RMSprop(learning_rate=learning_rate),
loss=keras.losses.MeanSquaredError(),
metrics=[keras.metrics.RootMeanSquaredError()])
return model
def train_model(model: keras.Model,
df: pd.DataFrame,
feature,
label,
epochs: int,
batch_size=None,
validation_split=0.1):
history = model.fit(
x=df[feature],
y=df[label],
batch_size=batch_size,
epochs=epochs,
validation_split=validation_split,
)
params = model.get_weights()
weight = params[0]
bias = params[1]
hist = pd.DataFrame(history.history)
rmse = hist["root_mean_squared_error"]
return history.epoch, rmse, history.history
# %%
def plot_loss_curve(epochs, mae_training, mae_validation):
plt.figure()
plt.xlabel("Epoch")
plt.ylabel("RMSE")
plt.plot(epochs[1:], mae_training[1:], label="training loss")
plt.plot(epochs[1:], mae_validation[1:], label="validation loss")
plt.legend()
# We're not going to plot the first epoch, since the loss on the first epoch
# is often substantially greater than the loss for other epochs.
merged_mae_lists = mae_training[1:] + mae_validation[1:]
highest_loss = max(merged_mae_lists)
lowest_loss = min(merged_mae_lists)
delta = highest_loss - lowest_loss
print(delta)
top_of_y_axis = highest_loss + (delta * 0.05)
bottom_of_y_axis = lowest_loss - (delta * 0.05)
plt.ylim([bottom_of_y_axis, top_of_y_axis])
plt.show()
# %%
learning_rate = 0.05
epoch = 30
batch_size = 100
validation_split = 0.2
my_feature = "median_income"
my_label = "median_house_value"
my_model = None
my_model = build_model(learning_rate)
epochs, rmse, history = train_model(my_model, train_df, my_feature, my_label,
epoch, batch_size, validation_split)
plot_loss_curve(epochs, history["root_mean_squared_error"],
history["val_root_mean_squared_error"])
# %%
# No matter how you split the training set and the validation set, the loss curves differ significantly. Evidently, the data in the training set isn't similar enough to the data in the validation set. Counterintuitive? Yes, but this problem is actually pretty common in machine learning.
# Your task is to determine why the loss curves aren't highly similar. As with most issues in machine learning, the problem is rooted in the data itself. To solve this mystery of why the training set and validation set aren't almost identical, write a line or two of pandas code in the following code cell. Here are a couple of hints:
# The previous code cell split the original training set into:
# a reduced training set (the original training set - the validation set)
# the validation set
# By default, the pandas head method outputs the first 5 rows of the DataFrame. To see more of the training set, specify the n argument to head and assign a large positive integer to n.
train_df.head(1000)
# %%
# shuffle data before splitting
shuffled_train_df = train_df.reindex(np.random.permutation(train_df.index))
epochs, rmse, history = train_model(my_model, shuffled_train_df, my_feature,
my_label, epoch, batch_size,
validation_split)
plot_loss_curve(epochs, history["root_mean_squared_error"],
history["val_root_mean_squared_error"])
# %%
|
the-stack_0_3119 | import glob
import os
from torch.utils.data import Dataset, DataLoader
from natsort import natsorted
import numpy as np
import cv2
import torch
import re
from src.utils.io import path_leaf, load_image
fileExtensions = ["jpg", "jpeg", "png", "tiff"]
class ImagesDataset(Dataset):
"""
Ce dataset renvoie une image à la fois. Il sera certainement plus pertinent qu'il renvoie à chaque fois une image
et la phase associée, mais cela prend de réfléchir à la gestion de la phase. Une facon de faire serait de stocker
cette information dans un fichier qui puisse être lu par pandas. A chaque image
"""
def __init__(self, groundtruth_list, path_weights, path_img, shape=(512, 512), RNN_len=100, recursive=True): #passer le tableau ou le chemin
"""
A compléter éventuellement pour prendre en entrée le chemin vers le fichier des phases (groundtruth)
:param path_img:
:param shape:
:param recursive:
"""
super(ImagesDataset, self).__init__()
if isinstance(shape, int):
shape = (shape, shape)
self.path_img = path_img #adresse du DOSSIER d'images
self.shape = shape
self.path_weights = path_weights
self.da_core = None # Data augmentation instance. Only initialized if required
self.groundtruth_list = groundtruth_list
self.RNN_len = RNN_len
self.img_filepath = []
for file in os.listdir(self.path_img):
self.img_filepath.extend(glob.glob(self.path_img + file + '/' + '*.pt', recursive=recursive)) #was .pt
img_filenames = [path_leaf(path).split('.')[0] for path in self.img_filepath] #Liste de toutes les images ['frame0', 'frame1', ...]
self.img_filepath = np.asarray(self.img_filepath)
img_argsort = np.argsort(img_filenames)
self.img_filepath = self.img_filepath[img_argsort] #array de tous les paths (\data01\frameX.jpg), pas dans l'ordre
self.img_filepath = np.array(natsorted(self.img_filepath))
def set_data_augmentation_core(self, da_core):
# self.da_core = da_core
pass
def subset(self, indices):
self.img_filepath = natsorted(self.img_filepath[indices])
self.img_filepath = np.array(self.img_filepath)
def __len__(self):
return len(self.img_filepath)
#diviser et multiplier dans getitem
# - RNN_len
#padding
def __getitem__(self, item):
"""
Item est un index (entier), le dataset renvoie l'image et la groundtruth correspondante
:param item:
:return:
"""
video = self.video_number(self.img_filepath[item])
sequence_img = torch.FloatTensor()
seq_len = 0
if len(self.img_filepath[item:]) > self.RNN_len:
for tensor in self.img_filepath[item : item + self.RNN_len]:
if self.video_number(tensor) == video:
seq_len += 1
img = torch.load(tensor, map_location = torch.device('cpu')) #tensor contient à la fois le n° du dossier et le n° de frame
img = img.reshape(1,img.shape[-1])
sequence_img = torch.cat((sequence_img, img), 0)
else:
break
sequence_phase = self.read_phase(self.img_filepath[item : item+seq_len])
else:
for tensor in self.img_filepath[item:]:
if self.video_number(tensor) == video:
img = torch.load(tensor, map_location = torch.device('cpu'))
img = img.reshape(1,img.shape[-1])
sequence_img = torch.cat((sequence_img, img), 0) #img.logits
else:
break
sequence_phase = self.read_phase(self.img_filepath[item:])
seq_len = len(sequence_img)
return self.pad_seq(sequence_img), self.pad_seq(sequence_phase), seq_len
def pad_seq(self, array):
shape = array.shape
dtype = array.dtype
pad = self.RNN_len - shape[0]
padding = [(0, pad)] + [(0, 0) for _ in shape[1:]]
padded_array = np.pad(array.detach(), padding, mode='constant', constant_values=-1)
if dtype==int:
return padded_array.astype(dtype)
else:
return padded_array.astype(np.float32)
#padding dans getitem
#padding dans le trainer
#ignorer les valeurs dans CrossEntropyLoss
def get_classes_weight(self):
""" Fonction à implémenter potentiellement: elle charge ou calcul une pondération par classe permettant de les
équilibrer.
J'ai mis du pseudo-code à compléter selon le besoin, cela dépend de l'utilisation
"""
classes_weight_path = os.path.join(self.path_weights, 'classes_weight.npy') # chemin de sauvergarde des weights
if os.path.exists(classes_weight_path):
print("Loading existing weights for class balance from", classes_weight_path)
class_weights = np.load(classes_weight_path)
else:
print("Building weights for class balance")
classes_counts = np.zeros(128,
dtype=int) # Arbitrary number because the number of classes is unknown at this point
for i in range(len(self.img_filepath)):
phase = self.read_phase(self.img_filepath[i])
u, counts = np.unique(phase, return_counts=True)
classes_counts[u] += counts
classes_counts = classes_counts[
:np.max(np.nonzero(classes_counts)) + 1] # Keep only the classes that have a count
n_classes = len(classes_counts)
n_samples = classes_counts.sum()
class_weights = (n_samples / (n_classes * classes_counts + 1e-8)).astype(np.float32)
np.save(classes_weight_path, class_weights)
print('Weights stored in ', classes_weight_path)
return class_weights
def read_phase(self, filepaths):
Phases = []
for filepath in filepaths:
#find the number X of the video and the number Y of the image, saved in a file dataX with the name frameY
temp = re.findall(r'\d+', filepath)
res = list(map(int, temp))
X = res[-2] - 1 #les indices de la list groundtruth démarrent à 0 et les fichiers dataX démarrent à 1
Y = res[-1]
groundtruth = self.groundtruth_list[X]
B = (groundtruth.at[Y,"Frame,Steps"]) #groundtruth est un DataFrame créé par Pandas regroupant toutes les informations Frame,Steps
temp = re.findall(r'\d+', B)
res = list(map(int, temp)) #getting numbers from the string B = "frame_number,step_number"
#if there was no Steps value specified, then there is no surgical phase on the image
if len(res) == 2:
Phase = res[1]
else:
Phase = 0
Phases.append(Phase)
return torch.LongTensor(Phases)
def video_number(self,filepath):
temp = re.findall(r'\d+', filepath)
res = list(map(int, temp))
return res[-2]
|
the-stack_0_3120 | """Model-Agnostic Meta-Learning (MAML) algorithm implementation for RL."""
# yapf: disable
import collections
import copy
from dowel import tabular
import numpy as np
import torch
from garage import (_Default, EpisodeBatch, log_multitask_performance,
make_optimizer)
from garage.np import discount_cumsum
from garage.torch import update_module_params
from garage.torch.optimizers import (ConjugateGradientOptimizer,
DifferentiableSGD)
# yapf: enable
class MAML:
"""Model-Agnostic Meta-Learning (MAML).
Args:
inner_algo (garage.torch.algos.VPG): The inner algorithm used for
computing loss.
env (Environment): An environment.
policy (garage.torch.policies.Policy): Policy.
sampler (garage.sampler.Sampler): Sampler.
task_sampler (garage.experiment.TaskSampler): Task sampler.
meta_optimizer (Union[torch.optim.Optimizer, tuple]):
Type of optimizer.
This can be an optimizer type such as `torch.optim.Adam` or a tuple
of type and dictionary, where dictionary contains arguments to
initialize the optimizer e.g. `(torch.optim.Adam, {'lr' : 1e-3})`.
meta_batch_size (int): Number of tasks sampled per batch.
inner_lr (float): Adaptation learning rate.
outer_lr (float): Meta policy learning rate.
num_grad_updates (int): Number of adaptation gradient steps.
meta_evaluator (MetaEvaluator): A meta evaluator for meta-testing. If
None, don't do meta-testing.
evaluate_every_n_epochs (int): Do meta-testing every this epochs.
"""
def __init__(self,
inner_algo,
env,
policy,
sampler,
task_sampler,
meta_optimizer,
meta_batch_size=40,
inner_lr=0.1,
outer_lr=1e-3,
num_grad_updates=1,
meta_evaluator=None,
evaluate_every_n_epochs=1):
self._sampler = sampler
self.max_episode_length = inner_algo.max_episode_length
self._meta_evaluator = meta_evaluator
self._policy = policy
self._env = env
self._task_sampler = task_sampler
self._value_function = copy.deepcopy(inner_algo._value_function)
self._initial_vf_state = self._value_function.state_dict()
self._num_grad_updates = num_grad_updates
self._meta_batch_size = meta_batch_size
self._inner_algo = inner_algo
self._inner_optimizer = DifferentiableSGD(self._policy, lr=inner_lr)
self._meta_optimizer = make_optimizer(meta_optimizer,
module=policy,
lr=_Default(outer_lr),
eps=_Default(1e-5))
self._evaluate_every_n_epochs = evaluate_every_n_epochs
def train(self, trainer):
"""Obtain samples and start training for each epoch.
Args:
trainer (Trainer): Gives the algorithm access to
:method:`~Trainer.step_epochs()`, which provides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch cycle.
"""
last_return = None
for _ in trainer.step_epochs():
all_samples, all_params = self._obtain_samples(trainer)
last_return = self._train_once(trainer, all_samples, all_params)
trainer.step_itr += 1
return last_return
def _train_once(self, trainer, all_samples, all_params):
"""Train the algorithm once.
Args:
trainer (Trainer): The experiment runner.
all_samples (list[list[_MAMLEpisodeBatch]]): A two
dimensional list of _MAMLEpisodeBatch of size
[meta_batch_size * (num_grad_updates + 1)]
all_params (list[dict]): A list of named parameter dictionaries.
Each dictionary contains key value pair of names (str) and
parameters (torch.Tensor).
Returns:
float: Average return.
"""
itr = trainer.step_itr
old_theta = dict(self._policy.named_parameters())
kl_before = self._compute_kl_constraint(all_samples,
all_params,
set_grad=False)
meta_objective = self._compute_meta_loss(all_samples, all_params)
self._meta_optimizer.zero_grad()
meta_objective.backward()
self._meta_optimize(all_samples, all_params)
# Log
loss_after = self._compute_meta_loss(all_samples,
all_params,
set_grad=False)
kl_after = self._compute_kl_constraint(all_samples,
all_params,
set_grad=False)
with torch.no_grad():
policy_entropy = self._compute_policy_entropy(
[task_samples[0] for task_samples in all_samples])
average_return = self._log_performance(
itr, all_samples, meta_objective.item(), loss_after.item(),
kl_before.item(), kl_after.item(),
policy_entropy.mean().item())
if self._meta_evaluator and itr % self._evaluate_every_n_epochs == 0:
self._meta_evaluator.evaluate(self)
update_module_params(self._old_policy, old_theta)
return average_return
def _train_value_function(self, paths):
"""Train the value function.
Args:
paths (list[dict]): A list of collected paths.
Returns:
torch.Tensor: Calculated mean scalar value of value function loss
(float).
"""
# MAML resets a value function to its initial state before training.
self._value_function.load_state_dict(self._initial_vf_state)
obs = np.concatenate([path['observations'] for path in paths], axis=0)
returns = np.concatenate([path['returns'] for path in paths])
obs = torch.Tensor(obs)
returns = torch.Tensor(returns)
vf_loss = self._value_function.compute_loss(obs, returns)
# pylint: disable=protected-access
self._inner_algo._vf_optimizer.zero_grad()
vf_loss.backward()
# pylint: disable=protected-access
self._inner_algo._vf_optimizer.step()
return vf_loss
def _obtain_samples(self, trainer):
"""Obtain samples for each task before and after the fast-adaptation.
Args:
trainer (Trainer): A trainer instance to obtain samples.
Returns:
tuple: Tuple of (all_samples, all_params).
all_samples (list[_MAMLEpisodeBatch]): A list of size
[meta_batch_size * (num_grad_updates + 1)]
all_params (list[dict]): A list of named parameter
dictionaries.
"""
tasks = self._task_sampler.sample(self._meta_batch_size)
all_samples = [[] for _ in range(len(tasks))]
all_params = []
theta = dict(self._policy.named_parameters())
for i, env_up in enumerate(tasks):
for j in range(self._num_grad_updates + 1):
episodes = trainer.obtain_episodes(trainer.step_itr,
env_update=env_up)
batch_samples = self._process_samples(episodes)
all_samples[i].append(batch_samples)
# The last iteration does only sampling but no adapting
if j < self._num_grad_updates:
# A grad need to be kept for the next grad update
# Except for the last grad update
require_grad = j < self._num_grad_updates - 1
self._adapt(batch_samples, set_grad=require_grad)
all_params.append(dict(self._policy.named_parameters()))
# Restore to pre-updated policy
update_module_params(self._policy, theta)
return all_samples, all_params
def _adapt(self, batch_samples, set_grad=True):
"""Performs one MAML inner step to update the policy.
Args:
batch_samples (_MAMLEpisodeBatch): Samples data for one
task and one gradient step.
set_grad (bool): if False, update policy parameters in-place.
Else, allow taking gradient of functions of updated parameters
with respect to pre-updated parameters.
"""
# pylint: disable=protected-access
loss = self._inner_algo._compute_loss(*batch_samples[1:])
# Update policy parameters with one SGD step
self._inner_optimizer.zero_grad()
loss.backward(create_graph=set_grad)
with torch.set_grad_enabled(set_grad):
self._inner_optimizer.step()
def _meta_optimize(self, all_samples, all_params):
if isinstance(self._meta_optimizer, ConjugateGradientOptimizer):
self._meta_optimizer.step(
f_loss=lambda: self._compute_meta_loss(
all_samples, all_params, set_grad=False),
f_constraint=lambda: self._compute_kl_constraint(
all_samples, all_params))
else:
self._meta_optimizer.step(lambda: self._compute_meta_loss(
all_samples, all_params, set_grad=False))
def _compute_meta_loss(self, all_samples, all_params, set_grad=True):
"""Compute loss to meta-optimize.
Args:
all_samples (list[list[_MAMLEpisodeBatch]]): A two
dimensional list of _MAMLEpisodeBatch of size
[meta_batch_size * (num_grad_updates + 1)]
all_params (list[dict]): A list of named parameter dictionaries.
Each dictionary contains key value pair of names (str) and
parameters (torch.Tensor).
set_grad (bool): Whether to enable gradient calculation or not.
Returns:
torch.Tensor: Calculated mean value of loss.
"""
theta = dict(self._policy.named_parameters())
old_theta = dict(self._old_policy.named_parameters())
losses = []
for task_samples, task_params in zip(all_samples, all_params):
for i in range(self._num_grad_updates):
require_grad = i < self._num_grad_updates - 1 or set_grad
self._adapt(task_samples[i], set_grad=require_grad)
update_module_params(self._old_policy, task_params)
with torch.set_grad_enabled(set_grad):
# pylint: disable=protected-access
last_update = task_samples[-1]
loss = self._inner_algo._compute_loss(*last_update[1:])
losses.append(loss)
update_module_params(self._policy, theta)
update_module_params(self._old_policy, old_theta)
return torch.stack(losses).mean()
def _compute_kl_constraint(self, all_samples, all_params, set_grad=True):
"""Compute KL divergence.
For each task, compute the KL divergence between the old policy
distribution and current policy distribution.
Args:
all_samples (list[list[_MAMLEpisodeBatch]]): Two
dimensional list of _MAMLEpisodeBatch of size
[meta_batch_size * (num_grad_updates + 1)]
all_params (list[dict]): A list of named parameter dictionaries.
Each dictionary contains key value pair of names (str) and
parameters (torch.Tensor).
set_grad (bool): Whether to enable gradient calculation or not.
Returns:
torch.Tensor: Calculated mean value of KL divergence.
"""
theta = dict(self._policy.named_parameters())
old_theta = dict(self._old_policy.named_parameters())
kls = []
for task_samples, task_params in zip(all_samples, all_params):
for i in range(self._num_grad_updates):
require_grad = i < self._num_grad_updates - 1 or set_grad
self._adapt(task_samples[i], set_grad=require_grad)
update_module_params(self._old_policy, task_params)
with torch.set_grad_enabled(set_grad):
# pylint: disable=protected-access
kl = self._inner_algo._compute_kl_constraint(
task_samples[-1].observations)
kls.append(kl)
update_module_params(self._policy, theta)
update_module_params(self._old_policy, old_theta)
return torch.stack(kls).mean()
def _compute_policy_entropy(self, task_samples):
"""Compute policy entropy.
Args:
task_samples (list[_MAMLEpisodeBatch]): Samples data for
one task.
Returns:
torch.Tensor: Computed entropy value.
"""
obs = torch.cat([samples.observations for samples in task_samples])
# pylint: disable=protected-access
entropies = self._inner_algo._compute_policy_entropy(obs)
return entropies.mean()
@property
def policy(self):
"""Current policy of the inner algorithm.
Returns:
garage.torch.policies.Policy: Current policy of the inner
algorithm.
"""
return self._policy
@property
def _old_policy(self):
"""Old policy of the inner algorithm.
Returns:
garage.torch.policies.Policy: Old policy of the inner algorithm.
"""
# pylint: disable=protected-access
return self._inner_algo._old_policy
def _process_samples(self, episodes):
"""Process sample data based on the collected paths.
Args:
episodes (EpisodeBatch): Collected batch of episodes.
Returns:
_MAMLEpisodeBatch: Processed samples data.
"""
paths = episodes.to_list()
for path in paths:
path['returns'] = discount_cumsum(
path['rewards'], self._inner_algo.discount).copy()
self._train_value_function(paths)
obs = torch.Tensor(episodes.padded_observations)
actions = torch.Tensor(episodes.padded_actions)
rewards = torch.Tensor(episodes.padded_rewards)
valids = torch.Tensor(episodes.lengths).int()
with torch.no_grad():
# pylint: disable=protected-access
baselines = self._inner_algo._value_function(obs)
return _MAMLEpisodeBatch(paths, obs, actions, rewards, valids,
baselines)
def _log_performance(self, itr, all_samples, loss_before, loss_after,
kl_before, kl, policy_entropy):
"""Evaluate performance of this batch.
Args:
itr (int): Iteration number.
all_samples (list[list[_MAMLEpisodeBatch]]): Two
dimensional list of _MAMLEpisodeBatch of size
[meta_batch_size * (num_grad_updates + 1)]
loss_before (float): Loss before optimization step.
loss_after (float): Loss after optimization step.
kl_before (float): KL divergence before optimization step.
kl (float): KL divergence after optimization step.
policy_entropy (float): Policy entropy.
Returns:
float: The average return in last epoch cycle.
"""
tabular.record('Iteration', itr)
name_map = None
if hasattr(self._env, 'all_task_names'):
names = self._env.all_task_names
name_map = dict(zip(names, names))
rtns = log_multitask_performance(
itr,
EpisodeBatch.from_list(
env_spec=self._env.spec,
paths=[
path for task_paths in all_samples
for path in task_paths[self._num_grad_updates].paths
]),
discount=self._inner_algo.discount,
name_map=name_map)
with tabular.prefix(self._policy.name + '/'):
tabular.record('LossBefore', loss_before)
tabular.record('LossAfter', loss_after)
tabular.record('dLoss', loss_before - loss_after)
tabular.record('KLBefore', kl_before)
tabular.record('KLAfter', kl)
tabular.record('Entropy', policy_entropy)
return np.mean(rtns)
def get_exploration_policy(self):
"""Return a policy used before adaptation to a specific task.
Each time it is retrieved, this policy should only be evaluated in one
task.
Returns:
Policy: The policy used to obtain samples that are later used for
meta-RL adaptation.
"""
return copy.deepcopy(self._policy)
def adapt_policy(self, exploration_policy, exploration_episodes):
"""Adapt the policy by one gradient steps for a task.
Args:
exploration_policy (Policy): A policy which was returned from
get_exploration_policy(), and which generated
exploration_episodes by interacting with an environment.
The caller may not use this object after passing it into this
method.
exploration_episodes (EpisodeBatch): Episodes with which to adapt,
generated by exploration_policy exploring the environment.
Returns:
Policy: A policy adapted to the task represented by the
exploration_episodes.
"""
old_policy, self._policy = self._policy, exploration_policy
self._inner_algo.policy = exploration_policy
self._inner_optimizer.module = exploration_policy
batch_samples = self._process_samples(exploration_episodes)
self._adapt(batch_samples, set_grad=False)
self._policy = old_policy
self._inner_algo.policy = self._inner_optimizer.module = old_policy
return exploration_policy
class _MAMLEpisodeBatch(
collections.namedtuple('_MAMLEpisodeBatch', [
'paths', 'observations', 'actions', 'rewards', 'valids',
'baselines'
])):
r"""A tuple representing a batch of whole episodes in MAML.
A :class:`_MAMLEpisodeBatch` represents a batch of whole episodes
produced from one environment.
+-----------------------+-------------------------------------------------+
| Symbol | Description |
+=======================+=================================================+
| :math:`N` | Episode batch dimension |
+-----------------------+-------------------------------------------------+
| :math:`T` | Maximum length of an episode |
+-----------------------+-------------------------------------------------+
| :math:`S^*` | Single-step shape of a time-series tensor |
+-----------------------+-------------------------------------------------+
Attributes:
paths (list[dict[str, np.ndarray or dict[str, np.ndarray]]]):
Nonflatten original paths from sampler.
observations (torch.Tensor): A torch tensor of shape
:math:`(N \bullet T, O^*)` containing the (possibly
multi-dimensional) observations for all time steps in this batch.
These must conform to :obj:`env_spec.observation_space`.
actions (torch.Tensor): A torch tensor of shape
:math:`(N \bullet T, A^*)` containing the (possibly
multi-dimensional) actions for all time steps in this batch. These
must conform to :obj:`env_spec.action_space`.
rewards (torch.Tensor): A torch tensor of shape
:math:`(N \bullet T)` containing the rewards for all time
steps in this batch.
valids (numpy.ndarray): An integer numpy array of shape :math:`(N, )`
containing the length of each episode in this batch. This may be
used to reconstruct the individual episodes.
baselines (numpy.ndarray): An numpy array of shape
:math:`(N \bullet T, )` containing the value function estimation
at all time steps in this batch.
Raises:
ValueError: If any of the above attributes do not conform to their
prescribed types and shapes.
"""
|
the-stack_0_3122 | import sendgrid
import json
import os
sg = sendgrid.SendGridAPIClient(apikey=os.environ.get('SENDGRID_API_KEY'))
##################################################
# Retrieve email statistics by client type. #
# GET /clients/stats #
params = {'aggregated_by': 'day',
'start_date': '2016-01-01',
'end_date': '2016-04-01'}
response = sg.client.clients.stats.get(query_params=params)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Retrieve stats by a specific client type. #
# GET /clients/{client_type}/stats #
params = {'aggregated_by': 'day',
'start_date': '2016-01-01',
'end_date': '2016-04-01'}
client_type = "test_url_param"
response = sg.client.clients._(client_type).stats.get(query_params=params)
print(response.status_code)
print(response.body)
print(response.headers)
|
the-stack_0_3124 | import copy
import pytest
from multidict._compat import USE_CYTHON
from multidict._multidict_py import CIMultiDict as PyCIMultiDict
from multidict._multidict_py import CIMultiDictProxy as PyCIMultiDictProxy
from multidict._multidict_py import MultiDict as PyMultiDict # noqa: E402
from multidict._multidict_py import MultiDictProxy as PyMultiDictProxy
if USE_CYTHON:
from multidict._multidict import ( # type: ignore
CIMultiDict,
CIMultiDictProxy,
MultiDict,
MultiDictProxy,
)
@pytest.fixture(
params=([MultiDict, CIMultiDict] if USE_CYTHON else [])
+ [PyMultiDict, PyCIMultiDict],
ids=(["MultiDict", "CIMultiDict"] if USE_CYTHON else [])
+ ["PyMultiDict", "PyCIMultiDict"],
)
def cls(request):
return request.param
@pytest.fixture(
params=(
[(MultiDictProxy, MultiDict), (CIMultiDictProxy, CIMultiDict)]
if USE_CYTHON
else []
)
+ [(PyMultiDictProxy, PyMultiDict), (PyCIMultiDictProxy, PyCIMultiDict)],
ids=(["MultiDictProxy", "CIMultiDictProxy"] if USE_CYTHON else [])
+ ["PyMultiDictProxy", "PyCIMultiDictProxy"],
)
def proxy_classes(request):
return request.param
def test_copy(cls):
d = cls()
d["foo"] = 6
d2 = d.copy()
d2["foo"] = 7
assert d["foo"] == 6
assert d2["foo"] == 7
def test_copy_proxy(proxy_classes):
proxy_cls, dict_cls = proxy_classes
d = dict_cls()
d["foo"] = 6
p = proxy_cls(d)
d2 = p.copy()
d2["foo"] = 7
assert d["foo"] == 6
assert p["foo"] == 6
assert d2["foo"] == 7
def test_copy_std_copy(cls):
d = cls()
d["foo"] = 6
d2 = copy.copy(d)
d2["foo"] = 7
assert d["foo"] == 6
assert d2["foo"] == 7
def test_ci_multidict_clone(cls):
d = cls(foo=6)
d2 = cls(d)
d2["foo"] = 7
assert d["foo"] == 6
assert d2["foo"] == 7
|
the-stack_0_3125 | #!/usr/bin/env python3
################################################################################
# lev2 sample which renders an instanced model, optionally in VR mode
# Copyright 1996-2020, Michael T. Mayers.
# Distributed under the Boost Software License - Version 1.0 - August 17, 2003
# see http://www.boost.org/LICENSE_1_0.txt
################################################################################
import math, random, argparse
from orkengine.core import *
from orkengine.lev2 import *
################################################################################
parser = argparse.ArgumentParser(description='scenegraph example')
parser.add_argument('--numinstances', metavar="numinstances", help='number of mesh instances' )
parser.add_argument('--vrmode', action="store_true", help='run in vr' )
################################################################################
args = vars(parser.parse_args())
vrmode = (args["vrmode"]==True)
if args["numinstances"]==None:
numinstances = 10000
else:
numinstances = int(args["numinstances"])
################################################################################
class AnimationState(object):
def __init__(self):
super().__init__()
self.curpos = vec3(0,0,-15)
self.dstpos = vec3()
self.currot = quat()
self.cursca = 0.0
self.dstsca = 1.0
self.incrot = quat()
def update(self,deltatime):
self.lerpindex += deltatime*0.33
if self.lerpindex > 1:
self.lerpindex = 1
pos = vec3()
pos.lerp(self.curpos,self.dstpos,self.lerpindex)
sca = self.dstsca*self.lerpindex + self.cursca*(1-self.lerpindex)
mtx = mtx4()
mtx.compose(pos,self.currot,sca)
self.currot = self.currot * self.incrot
done = self.lerpindex>=1
if done:
self.curpos = pos
self.cursca = sca
return mtx,done
################################################################################
class instance_set(object):
########################################################
def __init__(self,model,num_instances,layer):
super().__init__()
self.num_instances = num_instances
self.model = model
self.sgnode = model.createInstancedNode(num_instances,"node1",layer)
self.animated = dict()
self.animstates = dict()
for i in range(num_instances):
self.animstates[i] = AnimationState()
########################################################
def animateInstance(self,deltatime,instance_id):
animstate =self.animstates[instance_id]
self.animated[instance_id] = animstate
########################################
incraxis = vec3(random.uniform(-1,1),
random.uniform(-1,1),
random.uniform(-1,1)).normal()
incrmagn = random.uniform(-0.05,0.05)
########################################
Z = random.uniform(-2.5,-50)
animstate.dstpos = vec3(random.uniform(-2.5,2.5)*Z,
random.uniform(-2.5,2.5)*Z,
Z)
animstate.incrot = quat(incraxis,incrmagn)
animstate.dstsca = random.uniform(0.1,0.65)
animstate.lerpindex = 0.0
########################################################
def update(self,deltatime):
for i in range(5):
instance_id = random.randint(0,numinstances-1)
self.animateInstance(deltatime,instance_id)
keys2del = list()
for id in self.animated.keys():
animstate = self.animstates[id]
matrix, done = animstate.update(deltatime)
self.sgnode.setInstanceMatrix(id,matrix)
if done:
keys2del += [id]
for id in keys2del:
del self.animated[id]
################################################################################
class SceneGraphApp(object):
################################################
def __init__(self):
super().__init__()
self.sceneparams = VarMap()
self.sceneparams.preset = "PBRVR" if vrmode else "PBR"
self.qtapp = OrkEzQtApp.create(self)
self.qtapp.setRefreshPolicy(RefreshFastest, 0)
self.instancesets=[]
##############################################
def onGpuInit(self,ctx):
layer = self.scene.createLayer("layer1")
models = []
#models += [Model("data://tests/pbr1/pbr1")]
#models += [Model("data://tests/pbr_calib.gltf")]
#models += [Model("src://environ/objects/misc/headwalker.obj")]
models += [Model("src://environ/objects/misc/ref/uvsph.glb")]
###################################
for model in models:
self.instancesets += [instance_set(model,numinstances,layer)]
###################################
self.camera = CameraData()
self.cameralut = CameraDataLut()
self.cameralut.addCamera("spawncam",self.camera)
###################################
self.camera.perspective(0.1, 150.0, 45.0)
self.camera.lookAt(vec3(0,0,5), # eye
vec3(0, 0, 0), # tgt
vec3(0, 1, 0)) # up
################################################
def onUpdate(self,updinfo):
###################################
for minst in self.instancesets:
minst.update(updinfo.deltatime)
###################################
self.scene.updateScene(self.cameralut) # update and enqueue all scenenodes
################################################
app = SceneGraphApp()
app.qtapp.exec()
|
the-stack_0_3126 | import unittest
import warnings
import tempfile
from tests.core import TestCore
from tests.core import ASSET_DIR
from pyrep.objects.object import Object
from pyrep.objects.shape import Shape
from pyrep.objects.dummy import Dummy
from pyrep.objects.joint import Joint
from pyrep.objects.proximity_sensor import ProximitySensor
from pyrep.objects.force_sensor import ForceSensor
from pyrep.objects.cartesian_path import CartesianPath
from pyrep.errors import WrongObjectTypeError
import os
from os import path
import numpy as np
class TestPyrep(TestCore):
def test_get_object_wrong_type(self):
with self.assertRaises(WrongObjectTypeError):
ProximitySensor('dynamic_cube')
def test_get_shape(self):
cube = Shape('dynamic_cube')
self.assertIsInstance(cube, Shape)
def test_get_joint(self):
cube = Joint('prismatic_joint')
self.assertIsInstance(cube, Joint)
def test_get_proximity_sensor(self):
cube = ProximitySensor('proximity_sensor')
self.assertIsInstance(cube, ProximitySensor)
def test_get_force_sensor(self):
cube = ForceSensor('force_sensor')
self.assertIsInstance(cube, ForceSensor)
def test_get_cartesian_path(self):
cube = CartesianPath('cartesian_path')
self.assertIsInstance(cube, CartesianPath)
def test_step(self):
cube = Shape('dynamic_cube')
start_pos = cube.get_position()
[self.pyrep.step() for _ in range(2)]
end_pos = cube.get_position()
self.assertFalse(np.allclose(start_pos, end_pos))
def test_load_model(self):
m = self.pyrep.import_model(path.join(ASSET_DIR, 'loadable_model.ttm'))
self.assertIsInstance(m, Shape)
def test_export_scene(self):
scene_file = tempfile.mktemp('.ttt')
self.pyrep.export_scene(scene_file)
os.remove(scene_file)
def test_group_objects(self):
top = Dummy('cubes_under_dummy')
self.assertEqual(
len(top.get_objects_in_tree(exclude_base=True)), 3)
cubes = [Shape('cube%d' % i) for i in range(3)]
ob = self.pyrep.group_objects(cubes)
self.assertIsInstance(ob, Object)
self.assertEqual(
len(top.get_objects_in_tree(exclude_base=True)), 1)
def test_merge_objects(self):
top = Dummy('cubes_under_dummy')
self.assertEqual(
len(top.get_objects_in_tree(exclude_base=True)), 3)
cubes = [Shape('cube%d' % i) for i in range(3)]
ob = self.pyrep.merge_objects(cubes)
self.assertIsInstance(ob, Object)
self.assertEqual(
len(top.get_objects_in_tree(exclude_base=True)), 1)
def test_set_configuration_tree(self):
dynamic_cube = Shape('dynamic_cube')
pos = dynamic_cube.get_position()
config = dynamic_cube.get_configuration_tree()
self.assertIsNotNone(config)
[self.pyrep.step() for _ in range(10)]
self.pyrep.set_configuration_tree(config)
self.assertTrue(np.allclose(pos, dynamic_cube.get_position()))
def test_create_texture_and_get_texture(self):
plane, texture = self.pyrep.create_texture(
path.join(ASSET_DIR, 'wood_texture.jpg'))
self.assertGreaterEqual(texture.get_texture_id(), 0)
self.assertEqual(texture.get_texture_id(),
plane.get_texture().get_texture_id())
def test_get_objects_in_tree(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
objects = self.pyrep.get_objects_in_tree()
self.assertNotEqual(len(w), 0)
for obj in objects:
self.assertIsInstance(obj, Object)
dummys = [Dummy('nested_dummy%d' % i) for i in range(3)]
for root_obj in [dummys[0], dummys[0].get_handle()]:
objects = self.pyrep.get_objects_in_tree(
root_obj, exclude_base=False, first_generation_only=False)
self.assertListEqual(objects, dummys)
for obj in objects:
self.assertIs(type(obj), Dummy)
self.assertListEqual(
self.pyrep.get_objects_in_tree(
root_obj, exclude_base=True, first_generation_only=False),
dummys[1:])
self.assertListEqual(
self.pyrep.get_objects_in_tree(
root_obj, exclude_base=False,first_generation_only=True),
dummys[:-1])
def test_get_collection_by_name(self):
self.assertIsInstance(self.pyrep.get_collection_handle_by_name('Panda_arm'), int)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_3128 | # Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import os
from tests.unit import unittest
import boto
from boto.regioninfo import RegionInfo, load_endpoint_json, merge_endpoints
from boto.regioninfo import load_regions, get_regions
class TestRegionInfo(object):
def __init__(self, connection=None, name=None, endpoint=None,
connection_cls=None, provider=None):
self.connection = connection
self.name = name
self.endpoint = endpoint
self.connection_cls = connection_cls
self.provider = provider
class FakeConn(object):
pass
class TestEndpointLoading(unittest.TestCase):
def setUp(self):
super(TestEndpointLoading, self).setUp()
def test_load_endpoint_json(self):
endpoints = load_endpoint_json(boto.ENDPOINTS_PATH)
self.assertTrue('ec2' in endpoints)
self.assertEqual(
endpoints['ec2']['us-east-1'],
'ec2.us-east-1.amazonaws.com'
)
def test_merge_endpoints(self):
defaults = {
'ec2': {
'us-east-1': 'ec2.us-east-1.amazonaws.com',
'us-west-1': 'ec2.us-west-1.amazonaws.com',
}
}
additions = {
# Top-level addition.
's3': {
'us-east-1': 's3.amazonaws.com'
},
'ec2': {
# Overwrite. This doesn't exist, just test data.
'us-east-1': 'ec2.auto-resolve.amazonaws.com',
# Deep addition.
'us-west-2': 'ec2.us-west-2.amazonaws.com',
}
}
endpoints = merge_endpoints(defaults, additions)
self.assertEqual(endpoints, {
'ec2': {
'us-east-1': 'ec2.auto-resolve.amazonaws.com',
'us-west-1': 'ec2.us-west-1.amazonaws.com',
'us-west-2': 'ec2.us-west-2.amazonaws.com',
},
's3': {
'us-east-1': 's3.amazonaws.com'
}
})
def test_load_regions(self):
# Just the defaults.
endpoints = load_regions()
self.assertTrue('us-east-1' in endpoints['ec2'])
self.assertFalse('test-1' in endpoints['ec2'])
# With ENV overrides.
os.environ['BOTO_ENDPOINTS'] = os.path.join(
os.path.dirname(__file__),
'test_endpoints.json'
)
self.addCleanup(os.environ.pop, 'BOTO_ENDPOINTS')
endpoints = load_regions()
self.assertTrue('us-east-1' in endpoints['ec2'])
self.assertTrue('test-1' in endpoints['ec2'])
self.assertEqual(endpoints['ec2']['test-1'], 'ec2.test-1.amazonaws.com')
def test_get_regions(self):
# With defaults.
ec2_regions = get_regions('ec2')
self.assertTrue(len(ec2_regions) >= 10)
west_2 = None
for region_info in ec2_regions:
if region_info.name == 'us-west-2':
west_2 = region_info
break
self.assertNotEqual(west_2, None, "Couldn't find the us-west-2 region!")
self.assertTrue(isinstance(west_2, RegionInfo))
self.assertEqual(west_2.name, 'us-west-2')
self.assertEqual(west_2.endpoint, 'ec2.us-west-2.amazonaws.com')
self.assertEqual(west_2.connection_cls, None)
def test_get_regions_overrides(self):
ec2_regions = get_regions(
'ec2',
region_cls=TestRegionInfo,
connection_cls=FakeConn
)
self.assertTrue(len(ec2_regions) >= 10)
west_2 = None
for region_info in ec2_regions:
if region_info.name == 'us-west-2':
west_2 = region_info
break
self.assertNotEqual(west_2, None, "Couldn't find the us-west-2 region!")
self.assertFalse(isinstance(west_2, RegionInfo))
self.assertTrue(isinstance(west_2, TestRegionInfo))
self.assertEqual(west_2.name, 'us-west-2')
self.assertEqual(west_2.endpoint, 'ec2.us-west-2.amazonaws.com')
self.assertEqual(west_2.connection_cls, FakeConn)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_3129 | # See also the methods already implemented we have in cm for ssh management
# I think you reimplemented things that already exists.
# see and inspect cloudmesh.common
import os
from os.path import expanduser
# see content of path_expand it does expanduser as far as I know
from cloudmesh.common.util import path_expand
from cloudmesh.management.configuration.SSHkey import SSHkey
from cloudmesh.mongo.DataBaseDecorator import DatabaseUpdate
from cloudmesh.common.debug import VERBOSE
from pprint import pprint
from cloudmesh.configuration.Config import Config
# noinspection PyPep8Naming
class Key(object):
@classmethod
def get_from_dir(cls, directory=None, store=True):
directory = directory or path_expand("~/.ssh")
# find way that also works on windows, code always must work on windows
# and Linux, if not you need to have if condition
os.system("chmod 700 $HOME /.ssh")
files = [file for file in os.listdir(expanduser(path_expand(directory)))
if file.lower().endswith(".pub")]
d = []
for file in files:
print(file)
path = directory + "/" + file
# find way that also works on windows, code always must work on
# windows and Linux, if not you need to have if condition
os.system("chmod 700 $HOME /.ssh")
with open(path) as fd:
for pubkey in map(str.strip, fd):
# skip empty lines
if not pubkey:
continue
print(pubkey)
d.append(pubkey)
return d
@DatabaseUpdate()
def add(self, name, source):
"""
key add [NAME] [--source=FILENAME]
key add [NAME] [--source=git]
key add [NAME] [--source=ssh]
"""
keys = None
if source == "git":
config = Config()
username = config["cloudmesh.profile.github"]
keys = SSHkey().get_from_git(username)
elif source == "ssh":
key = SSHkey(name=name)
keys = [key]
else:
raise NotImplementedError
# source is filename
return keys
if __name__ == "__main__":
Key.get_from_dir(None, True)
|
the-stack_0_3130 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.jvm.tasks.jvm_tool_task_mixin import JvmToolTaskMixin
from pants.backend.jvm.tasks.nailgun_task import NailgunTask
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnit
from pants.option.options import Options
class ThriftLintError(Exception):
"""Raised on a lint failure."""
class ThriftLinter(NailgunTask, JvmToolTaskMixin):
"""Print linter warnings for thrift files.
"""
_CONFIG_SECTION = 'thrift-linter'
@staticmethod
def _is_thrift(target):
return target.is_thrift
@classmethod
def register_options(cls, register):
super(ThriftLinter, cls).register_options(register)
register('--skip', action='store_true', help='Skip thrift linting.')
register('--strict', default=None, action='store_true',
help='Fail the goal if thrift linter errors are found. Overrides the '
'`strict-default` option.')
register('--strict-default', default=False, advanced=True, action='store_true',
help='Sets the default strictness for targets. The `strict` option overrides '
'this value if it is set.')
register('--linter-args', default=[], advanced=True, type=Options.list,
help='Additional options passed to the linter.')
cls.register_jvm_tool(register, 'scrooge-linter')
@classmethod
def product_types(cls):
# Declare the product of this goal. Gen depends on thrift-linter.
return ['thrift-linter']
@classmethod
def prepare(cls, options, round_manager):
super(ThriftLinter, cls).prepare(options, round_manager)
# Linter depends on ivy running before it.
round_manager.require_data('ivy_imports')
@property
def config_section(self):
return self._CONFIG_SECTION
@staticmethod
def _to_bool(value):
# Converts boolean and string values to boolean.
return str(value) == 'True'
def _is_strict(self, target):
# The strict value is read from the following, in order:
# 1. options, --[no-]strict
# 2. java_thrift_library target in BUILD file, thrift_linter_strict = False,
# 3. options, --[no-]strict-default
cmdline_strict = self.get_options().strict
if cmdline_strict is not None:
return self._to_bool(cmdline_strict)
if target.thrift_linter_strict is not None:
return self._to_bool(target.thrift_linter_strict)
return self._to_bool(self.get_options().strict_default)
def _lint(self, target):
self.context.log.debug('Linting {0}'.format(target.address.spec))
classpath = self.tool_classpath('scrooge-linter')
config_args = []
config_args.extend(self.get_options().linter_args)
if not self._is_strict(target):
config_args.append('--ignore-errors')
paths = target.sources_relative_to_buildroot()
args = config_args + paths
# If runjava returns non-zero, this marks the workunit as a
# FAILURE, and there is no way to wrap this here.
returncode = self.runjava(classpath=classpath,
main='com.twitter.scrooge.linter.Main',
args=args,
workunit_labels=[WorkUnit.COMPILER]) # to let stdout/err through.
if returncode != 0:
raise ThriftLintError(
'Lint errors in target {0} for {1}.'.format(target.address.spec, paths))
def execute(self):
if self.get_options().skip:
return
thrift_targets = self.context.targets(self._is_thrift)
with self.invalidated(thrift_targets) as invalidation_check:
errors = []
for vt in invalidation_check.invalid_vts:
try:
self._lint(vt.target)
except ThriftLintError as e:
errors.append(str(e))
else:
vt.update()
if errors:
raise TaskError('\n'.join(errors))
|
the-stack_0_3131 | # Author: Steven J. Bethard <[email protected]>.
"""Command-line parsing library. Implements argparse for Python 2.6 and below.
This module is an optparse-inspired command-line parsing library that:
- handles both optional and positional arguments
- produces highly informative usage messages
- supports parsers that dispatch to sub-parsers
The following is a simple usage example that sums integers from the
command-line and writes the result to a file::
parser = argparse.ArgumentParser(
description='sum the integers at the command line')
parser.add_argument(
'integers', metavar='int', nargs='+', type=int,
help='an integer to be summed')
parser.add_argument(
'--log', default=sys.stdout, type=argparse.FileType('w'),
help='the file where the sum should be written')
args = parser.parse_args()
args.log.write('%s' % sum(args.integers))
args.log.close()
The module contains the following public classes:
- ArgumentParser -- The main entry point for command-line parsing. As the
example above shows, the add_argument() method is used to populate
the parser with actions for optional and positional arguments. Then
the parse_args() method is invoked to convert the args at the
command-line into an object with attributes.
- ArgumentError -- The exception raised by ArgumentParser structures when
there are errors with the parser's actions. Errors raised while
parsing the command-line are caught by ArgumentParser and emitted
as command-line messages.
- FileType -- A factory for defining types of files to be created. As the
example above shows, instances of FileType are typically passed as
the type= argument of add_argument() calls.
- Action -- The base class for parser actions. Typically actions are
selected by passing strings like 'store_true' or 'append_const' to
the action= argument of add_argument(). However, for greater
customization of ArgumentParser actions, subclasses of Action may
be defined and passed as the action= argument.
- HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter,
ArgumentDefaultsHelpFormatter -- Formatter classes which
may be passed as the formatter_class= argument to the
ArgumentParser constructor. HelpFormatter is the default,
RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser
not to change the formatting for help text, and
ArgumentDefaultsHelpFormatter adds information about argument defaults
to the help.
All other classes in this module are considered implementation details.
(Also note that HelpFormatter and RawDescriptionHelpFormatter are only
considered public as object names -- the API of the formatter structures is
still considered an implementation detail.)
"""
__version__ = '1.3.0' # we use our own version number independant of the
# one in stdlib and we release this on pypi.
__external_lib__ = True # to make sure the tests really test THIS lib,
# not the builtin one in Python stdlib
__all__ = [
'ArgumentParser',
'ArgumentError',
'ArgumentTypeError',
'FileType',
'HelpFormatter',
'ArgumentDefaultsHelpFormatter',
'RawDescriptionHelpFormatter',
'RawTextHelpFormatter',
'Namespace',
'Action',
'ONE_OR_MORE',
'OPTIONAL',
'PARSER',
'REMAINDER',
'SUPPRESS',
'ZERO_OR_MORE',
]
import copy as _copy
import os as _os
import re as _re
import sys as _sys
import textwrap as _textwrap
from gettext import gettext as _
try:
set
except NameError:
# for python < 2.4 compatibility (sets module is there since 2.3):
from sets import Set as set
try:
basestring
except NameError:
basestring = str
try:
sorted
except NameError:
# for python < 2.4 compatibility:
def sorted(iterable, reverse=False):
result = list(iterable)
result.sort()
if reverse:
result.reverse()
return result
def _callable(obj):
return hasattr(obj, '__call__') or hasattr(obj, '__bases__')
SUPPRESS = '==SUPPRESS=='
OPTIONAL = '?'
ZERO_OR_MORE = '*'
ONE_OR_MORE = '+'
PARSER = 'A...'
REMAINDER = '...'
_UNRECOGNIZED_ARGS_ATTR = '_unrecognized_args'
# =============================
# Utility functions and classes
# =============================
class _AttributeHolder(object):
"""Abstract base class that provides __repr__.
The __repr__ method returns a string in the format::
ClassName(attr=name, attr=name, ...)
The attributes are determined either by a class-level attribute,
'_kwarg_names', or by inspecting the instance __dict__.
"""
def __repr__(self):
type_name = type(self).__name__
arg_strings = []
for arg in self._get_args():
arg_strings.append(repr(arg))
for name, value in self._get_kwargs():
arg_strings.append('%s=%r' % (name, value))
return '%s(%s)' % (type_name, ', '.join(arg_strings))
def _get_kwargs(self):
return sorted(self.__dict__.items())
def _get_args(self):
return []
def _ensure_value(namespace, name, value):
if getattr(namespace, name, None) is None:
setattr(namespace, name, value)
return getattr(namespace, name)
# ===============
# Formatting Help
# ===============
class HelpFormatter(object):
"""Formatter for generating usage messages and argument help strings.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def __init__(self,
prog,
indent_increment=2,
max_help_position=24,
width=None):
# default setting for width
if width is None:
try:
width = int(_os.environ['COLUMNS'])
except (KeyError, ValueError):
width = 80
width -= 2
self._prog = prog
self._indent_increment = indent_increment
self._max_help_position = max_help_position
self._width = width
self._current_indent = 0
self._level = 0
self._action_max_length = 0
self._root_section = self._Section(self, None)
self._current_section = self._root_section
self._whitespace_matcher = _re.compile(r'\s+')
self._long_break_matcher = _re.compile(r'\n\n\n+')
# ===============================
# Section and indentation methods
# ===============================
def _indent(self):
self._current_indent += self._indent_increment
self._level += 1
def _dedent(self):
self._current_indent -= self._indent_increment
assert self._current_indent >= 0, 'Indent decreased below 0.'
self._level -= 1
class _Section(object):
def __init__(self, formatter, parent, heading=None):
self.formatter = formatter
self.parent = parent
self.heading = heading
self.items = []
def format_help(self):
# format the indented section
if self.parent is not None:
self.formatter._indent()
join = self.formatter._join_parts
for func, args in self.items:
func(*args)
item_help = join([func(*args) for func, args in self.items])
if self.parent is not None:
self.formatter._dedent()
# return nothing if the section was empty
if not item_help:
return ''
# add the heading if the section was non-empty
if self.heading is not SUPPRESS and self.heading is not None:
current_indent = self.formatter._current_indent
heading = '%*s%s:\n' % (current_indent, '', self.heading)
else:
heading = ''
# join the section-initial newline, the heading and the help
return join(['\n', heading, item_help, '\n'])
def _add_item(self, func, args):
self._current_section.items.append((func, args))
# ========================
# Message building methods
# ========================
def start_section(self, heading):
self._indent()
section = self._Section(self, self._current_section, heading)
self._add_item(section.format_help, [])
self._current_section = section
def end_section(self):
self._current_section = self._current_section.parent
self._dedent()
def add_text(self, text):
if text is not SUPPRESS and text is not None:
self._add_item(self._format_text, [text])
def add_usage(self, usage, actions, groups, prefix=None):
if usage is not SUPPRESS:
args = usage, actions, groups, prefix
self._add_item(self._format_usage, args)
def add_argument(self, action):
if action.help is not SUPPRESS:
# find all invocations
get_invocation = self._format_action_invocation
invocations = [get_invocation(action)]
for subaction in self._iter_indented_subactions(action):
invocations.append(get_invocation(subaction))
# update the maximum item length
invocation_length = max([len(s) for s in invocations])
action_length = invocation_length + self._current_indent
self._action_max_length = max(self._action_max_length,
action_length)
# add the item to the list
self._add_item(self._format_action, [action])
def add_arguments(self, actions):
for action in actions:
self.add_argument(action)
# =======================
# Help-formatting methods
# =======================
def format_help(self):
help = self._root_section.format_help()
if help:
help = self._long_break_matcher.sub('\n\n', help)
help = help.strip('\n') + '\n'
return help
def _join_parts(self, part_strings):
return ''.join([part
for part in part_strings
if part and part is not SUPPRESS])
def _format_usage(self, usage, actions, groups, prefix):
if prefix is None:
prefix = _('usage: ')
# if usage is specified, use that
if usage is not None:
usage = usage % dict(prog=self._prog)
# if no optionals or positionals are available, usage is just prog
elif usage is None and not actions:
usage = '%(prog)s' % dict(prog=self._prog)
# if optionals and positionals are available, calculate usage
elif usage is None:
prog = '%(prog)s' % dict(prog=self._prog)
# split optionals from positionals
optionals = []
positionals = []
for action in actions:
if action.option_strings:
optionals.append(action)
else:
positionals.append(action)
# build full usage string
format = self._format_actions_usage
action_usage = format(optionals + positionals, groups)
usage = ' '.join([s for s in [prog, action_usage] if s])
# wrap the usage parts if it's too long
text_width = self._width - self._current_indent
if len(prefix) + len(usage) > text_width:
# break usage into wrappable parts
part_regexp = r'\(.*?\)+|\[.*?\]+|\S+'
opt_usage = format(optionals, groups)
pos_usage = format(positionals, groups)
opt_parts = _re.findall(part_regexp, opt_usage)
pos_parts = _re.findall(part_regexp, pos_usage)
assert ' '.join(opt_parts) == opt_usage
assert ' '.join(pos_parts) == pos_usage
# helper for wrapping lines
def get_lines(parts, indent, prefix=None):
lines = []
line = []
if prefix is not None:
line_len = len(prefix) - 1
else:
line_len = len(indent) - 1
for part in parts:
if line_len + 1 + len(part) > text_width:
lines.append(indent + ' '.join(line))
line = []
line_len = len(indent) - 1
line.append(part)
line_len += len(part) + 1
if line:
lines.append(indent + ' '.join(line))
if prefix is not None:
lines[0] = lines[0][len(indent):]
return lines
# if prog is short, follow it with optionals or positionals
if len(prefix) + len(prog) <= 0.75 * text_width:
indent = ' ' * (len(prefix) + len(prog) + 1)
if opt_parts:
lines = get_lines([prog] + opt_parts, indent, prefix)
lines.extend(get_lines(pos_parts, indent))
elif pos_parts:
lines = get_lines([prog] + pos_parts, indent, prefix)
else:
lines = [prog]
# if prog is long, put it on its own line
else:
indent = ' ' * len(prefix)
parts = opt_parts + pos_parts
lines = get_lines(parts, indent)
if len(lines) > 1:
lines = []
lines.extend(get_lines(opt_parts, indent))
lines.extend(get_lines(pos_parts, indent))
lines = [prog] + lines
# join lines into usage
usage = '\n'.join(lines)
# prefix with 'usage:'
return '%s%s\n\n' % (prefix, usage)
def _format_actions_usage(self, actions, groups):
# find group indices and identify actions in groups
group_actions = set()
inserts = {}
for group in groups:
try:
start = actions.index(group._group_actions[0])
except ValueError:
continue
else:
end = start + len(group._group_actions)
if actions[start:end] == group._group_actions:
for action in group._group_actions:
group_actions.add(action)
if not group.required:
if start in inserts:
inserts[start] += ' ['
else:
inserts[start] = '['
inserts[end] = ']'
else:
if start in inserts:
inserts[start] += ' ('
else:
inserts[start] = '('
inserts[end] = ')'
for i in range(start + 1, end):
inserts[i] = '|'
# collect all actions format strings
parts = []
for i, action in enumerate(actions):
# suppressed arguments are marked with None
# remove | separators for suppressed arguments
if action.help is SUPPRESS:
parts.append(None)
if inserts.get(i) == '|':
inserts.pop(i)
elif inserts.get(i + 1) == '|':
inserts.pop(i + 1)
# produce all arg strings
elif not action.option_strings:
part = self._format_args(action, action.dest)
# if it's in a group, strip the outer []
if action in group_actions:
if part[0] == '[' and part[-1] == ']':
part = part[1:-1]
# add the action string to the list
parts.append(part)
# produce the first way to invoke the option in brackets
else:
option_string = action.option_strings[0]
# if the Optional doesn't take a value, format is:
# -s or --long
if action.nargs == 0:
part = '%s' % option_string
# if the Optional takes a value, format is:
# -s ARGS or --long ARGS
else:
default = action.dest.upper()
args_string = self._format_args(action, default)
part = '%s %s' % (option_string, args_string)
# make it look optional if it's not required or in a group
if not action.required and action not in group_actions:
part = '[%s]' % part
# add the action string to the list
parts.append(part)
# insert things at the necessary indices
for i in sorted(inserts, reverse=True):
parts[i:i] = [inserts[i]]
# join all the action items with spaces
text = ' '.join([item for item in parts if item is not None])
# clean up separators for mutually exclusive groups
open = r'[\[(]'
close = r'[\])]'
text = _re.sub(r'(%s) ' % open, r'\1', text)
text = _re.sub(r' (%s)' % close, r'\1', text)
text = _re.sub(r'%s *%s' % (open, close), r'', text)
text = _re.sub(r'\(([^|]*)\)', r'\1', text)
text = text.strip()
# return the text
return text
def _format_text(self, text):
if '%(prog)' in text:
text = text % dict(prog=self._prog)
text_width = self._width - self._current_indent
indent = ' ' * self._current_indent
return self._fill_text(text, text_width, indent) + '\n\n'
def _format_action(self, action):
# determine the required width and the entry label
help_position = min(self._action_max_length + 2,
self._max_help_position)
help_width = self._width - help_position
action_width = help_position - self._current_indent - 2
action_header = self._format_action_invocation(action)
# ho nelp; start on same line and add a final newline
if not action.help:
tup = self._current_indent, '', action_header
action_header = '%*s%s\n' % tup
# short action name; start on the same line and pad two spaces
elif len(action_header) <= action_width:
tup = self._current_indent, '', action_width, action_header
action_header = '%*s%-*s ' % tup
indent_first = 0
# long action name; start on the next line
else:
tup = self._current_indent, '', action_header
action_header = '%*s%s\n' % tup
indent_first = help_position
# collect the pieces of the action help
parts = [action_header]
# if there was help for the action, add lines of help text
if action.help:
help_text = self._expand_help(action)
help_lines = self._split_lines(help_text, help_width)
parts.append('%*s%s\n' % (indent_first, '', help_lines[0]))
for line in help_lines[1:]:
parts.append('%*s%s\n' % (help_position, '', line))
# or add a newline if the description doesn't end with one
elif not action_header.endswith('\n'):
parts.append('\n')
# if there are any sub-actions, add their help as well
for subaction in self._iter_indented_subactions(action):
parts.append(self._format_action(subaction))
# return a single string
return self._join_parts(parts)
def _format_action_invocation(self, action):
if not action.option_strings:
metavar, = self._metavar_formatter(action, action.dest)(1)
return metavar
else:
parts = []
# if the Optional doesn't take a value, format is:
# -s, --long
if action.nargs == 0:
parts.extend(action.option_strings)
# if the Optional takes a value, format is:
# -s ARGS, --long ARGS
else:
default = action.dest.upper()
args_string = self._format_args(action, default)
for option_string in action.option_strings:
parts.append('%s %s' % (option_string, args_string))
return ', '.join(parts)
def _metavar_formatter(self, action, default_metavar):
if action.metavar is not None:
result = action.metavar
elif action.choices is not None:
choice_strs = [str(choice) for choice in action.choices]
result = '{%s}' % ','.join(choice_strs)
else:
result = default_metavar
def format(tuple_size):
if isinstance(result, tuple):
return result
else:
return (result, ) * tuple_size
return format
def _format_args(self, action, default_metavar):
get_metavar = self._metavar_formatter(action, default_metavar)
if action.nargs is None:
result = '%s' % get_metavar(1)
elif action.nargs == OPTIONAL:
result = '[%s]' % get_metavar(1)
elif action.nargs == ZERO_OR_MORE:
result = '[%s [%s ...]]' % get_metavar(2)
elif action.nargs == ONE_OR_MORE:
result = '%s [%s ...]' % get_metavar(2)
elif action.nargs == REMAINDER:
result = '...'
elif action.nargs == PARSER:
result = '%s ...' % get_metavar(1)
else:
formats = ['%s' for _ in range(action.nargs)]
result = ' '.join(formats) % get_metavar(action.nargs)
return result
def _expand_help(self, action):
params = dict(vars(action), prog=self._prog)
for name in list(params):
if params[name] is SUPPRESS:
del params[name]
for name in list(params):
if hasattr(params[name], '__name__'):
params[name] = params[name].__name__
if params.get('choices') is not None:
choices_str = ', '.join([str(c) for c in params['choices']])
params['choices'] = choices_str
return self._get_help_string(action) % params
def _iter_indented_subactions(self, action):
try:
get_subactions = action._get_subactions
except AttributeError:
pass
else:
self._indent()
for subaction in get_subactions():
yield subaction
self._dedent()
def _split_lines(self, text, width):
text = self._whitespace_matcher.sub(' ', text).strip()
return _textwrap.wrap(text, width)
def _fill_text(self, text, width, indent):
text = self._whitespace_matcher.sub(' ', text).strip()
return _textwrap.fill(text, width, initial_indent=indent,
subsequent_indent=indent)
def _get_help_string(self, action):
return action.help
class RawDescriptionHelpFormatter(HelpFormatter):
"""Help message formatter which retains any formatting in descriptions.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _fill_text(self, text, width, indent):
return ''.join([indent + line for line in text.splitlines(True)])
class RawTextHelpFormatter(RawDescriptionHelpFormatter):
"""Help message formatter which retains formatting of all help text.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _split_lines(self, text, width):
return text.splitlines()
class ArgumentDefaultsHelpFormatter(HelpFormatter):
"""Help message formatter which adds default values to argument help.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _get_help_string(self, action):
help = action.help
if '%(default)' not in action.help:
if action.default is not SUPPRESS:
defaulting_nargs = [OPTIONAL, ZERO_OR_MORE]
if action.option_strings or action.nargs in defaulting_nargs:
help += ' (default: %(default)s)'
return help
# =====================
# Options and Arguments
# =====================
def _get_action_name(argument):
if argument is None:
return None
elif argument.option_strings:
return '/'.join(argument.option_strings)
elif argument.metavar not in (None, SUPPRESS):
return argument.metavar
elif argument.dest not in (None, SUPPRESS):
return argument.dest
else:
return None
class ArgumentError(Exception):
"""An error from creating or using an argument (optional or positional).
The string value of this exception is the message, augmented with
information about the argument that caused it.
"""
def __init__(self, argument, message):
self.argument_name = _get_action_name(argument)
self.message = message
def __str__(self):
if self.argument_name is None:
format = '%(message)s'
else:
format = 'argument %(argument_name)s: %(message)s'
return format % dict(message=self.message,
argument_name=self.argument_name)
class ArgumentTypeError(Exception):
"""An error from trying to convert a command line string to a type."""
pass
# ==============
# Action classes
# ==============
class Action(_AttributeHolder):
"""Information about how to convert command line strings to Python structures.
Action structures are used by an ArgumentParser to represent the information
needed to parse a single argument from one or more strings from the
command line. The keyword arguments to the Action constructor are also
all attributes of Action instances.
Keyword Arguments:
- option_strings -- A list of command-line option strings which
should be associated with this action.
- dest -- The name of the attribute to hold the created object(s)
- nargs -- The number of command-line arguments that should be
consumed. By default, one argument will be consumed and a single
value will be produced. Other values include::
- N (an integer) consumes N arguments (and produces a list)
- '?' consumes zero or one arguments
- '*' consumes zero or more arguments (and produces a list)
- '+' consumes one or more arguments (and produces a list)
Note that the difference between the default and nargs=1 is that
with the default, a single value will be produced, while with
nargs=1, a list containing a single value will be produced.
- const -- The value to be produced if the option is specified and the
option uses an action that takes no values.
- default -- The value to be produced if the option is not specified.
- type -- The type which the command-line arguments should be converted
to, should be one of 'string', 'int', 'float', 'complex' or a
callable object that accepts a single string argument. If None,
'string' is assumed.
- choices -- A container of values that should be allowed. If not None,
after a command-line argument has been converted to the appropriate
type, an exception will be raised if it is not a member of this
collection.
- required -- True if the action must always be specified at the
command line. This is only meaningful for optional command-line
arguments.
- help -- The help string describing the argument.
- metavar -- The name to be used for the option's argument with the
help string. If None, the 'dest' value will be used as the name.
"""
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
self.option_strings = option_strings
self.dest = dest
self.nargs = nargs
self.const = const
self.default = default
self.type = type
self.choices = choices
self.required = required
self.help = help
self.metavar = metavar
def _get_kwargs(self):
names = [
'option_strings',
'dest',
'nargs',
'const',
'default',
'type',
'choices',
'help',
'metavar',
]
return [(name, getattr(self, name)) for name in names]
def __call__(self, parser, namespace, values, option_string=None):
raise NotImplementedError(_('.__call__() not defined'))
class _StoreAction(Action):
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
if nargs == 0:
raise ValueError('nargs for store actions must be > 0; if you '
'have nothing to store, actions such as store '
'true or store const may be more appropriate')
if const is not None and nargs != OPTIONAL:
raise ValueError('nargs must be %r to supply const' % OPTIONAL)
super(_StoreAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
class _StoreConstAction(Action):
def __init__(self,
option_strings,
dest,
const,
default=None,
required=False,
help=None,
metavar=None):
super(_StoreConstAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=const,
default=default,
required=required,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, self.const)
class _StoreTrueAction(_StoreConstAction):
def __init__(self,
option_strings,
dest,
default=False,
required=False,
help=None):
super(_StoreTrueAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=True,
default=default,
required=required,
help=help)
class _StoreFalseAction(_StoreConstAction):
def __init__(self,
option_strings,
dest,
default=True,
required=False,
help=None):
super(_StoreFalseAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=False,
default=default,
required=required,
help=help)
class _AppendAction(Action):
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
if nargs == 0:
raise ValueError('nargs for append actions must be > 0; if arg '
'strings are not supplying the value to append, '
'the append const action may be more appropriate')
if const is not None and nargs != OPTIONAL:
raise ValueError('nargs must be %r to supply const' % OPTIONAL)
super(_AppendAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
items = _copy.copy(_ensure_value(namespace, self.dest, []))
items.append(values)
setattr(namespace, self.dest, items)
class _AppendConstAction(Action):
def __init__(self,
option_strings,
dest,
const,
default=None,
required=False,
help=None,
metavar=None):
super(_AppendConstAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=const,
default=default,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
items = _copy.copy(_ensure_value(namespace, self.dest, []))
items.append(self.const)
setattr(namespace, self.dest, items)
class _CountAction(Action):
def __init__(self,
option_strings,
dest,
default=None,
required=False,
help=None):
super(_CountAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
default=default,
required=required,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
new_count = _ensure_value(namespace, self.dest, 0) + 1
setattr(namespace, self.dest, new_count)
class _HelpAction(Action):
def __init__(self,
option_strings,
dest=SUPPRESS,
default=SUPPRESS,
help=None):
super(_HelpAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
parser.print_help()
parser.exit()
class _VersionAction(Action):
def __init__(self,
option_strings,
version=None,
dest=SUPPRESS,
default=SUPPRESS,
help="show program's version number and exit"):
super(_VersionAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
self.version = version
def __call__(self, parser, namespace, values, option_string=None):
version = self.version
if version is None:
version = parser.version
formatter = parser._get_formatter()
formatter.add_text(version)
parser.exit(message=formatter.format_help())
class _SubParsersAction(Action):
class _ChoicesPseudoAction(Action):
def __init__(self, name, aliases, help):
metavar = dest = name
if aliases:
metavar += ' (%s)' % ', '.join(aliases)
sup = super(_SubParsersAction._ChoicesPseudoAction, self)
sup.__init__(option_strings=[], dest=dest, help=help,
metavar=metavar)
def __init__(self,
option_strings,
prog,
parser_class,
dest=SUPPRESS,
help=None,
metavar=None):
self._prog_prefix = prog
self._parser_class = parser_class
self._name_parser_map = {}
self._choices_actions = []
super(_SubParsersAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=PARSER,
choices=self._name_parser_map,
help=help,
metavar=metavar)
def add_parser(self, name, **kwargs):
# set prog from the existing prefix
if kwargs.get('prog') is None:
kwargs['prog'] = '%s %s' % (self._prog_prefix, name)
aliases = kwargs.pop('aliases', ())
# create a pseudo-action to hold the choice help
if 'help' in kwargs:
help = kwargs.pop('help')
choice_action = self._ChoicesPseudoAction(name, aliases, help)
self._choices_actions.append(choice_action)
# create the parser and add it to the map
parser = self._parser_class(**kwargs)
self._name_parser_map[name] = parser
# make parser available under aliases also
for alias in aliases:
self._name_parser_map[alias] = parser
return parser
def _get_subactions(self):
return self._choices_actions
def __call__(self, parser, namespace, values, option_string=None):
parser_name = values[0]
arg_strings = values[1:]
# set the parser name if requested
if self.dest is not SUPPRESS:
setattr(namespace, self.dest, parser_name)
# select the parser
try:
parser = self._name_parser_map[parser_name]
except KeyError:
tup = parser_name, ', '.join(self._name_parser_map)
msg = _('unknown parser %r (choices: %s)' % tup)
raise ArgumentError(self, msg)
# parse all the remaining options into the namespace
# store any unrecognized options on the object, so that the top
# level parser can decide what to do with them
namespace, arg_strings = parser.parse_known_args(arg_strings, namespace)
if arg_strings:
vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, [])
getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings)
# ==============
# Type classes
# ==============
class FileType(object):
"""Factory for creating file object types
Instances of FileType are typically passed as type= arguments to the
ArgumentParser add_argument() method.
Keyword Arguments:
- mode -- A string indicating how the file is to be opened. Accepts the
same values as the builtin open() function.
- bufsize -- The file's desired buffer size. Accepts the same values as
the builtin open() function.
"""
def __init__(self, mode='r', bufsize=None):
self._mode = mode
self._bufsize = bufsize
def __call__(self, string):
# the special argument "-" means sys.std{in,out}
if string == '-':
if 'r' in self._mode:
return _sys.stdin
elif 'w' in self._mode:
return _sys.stdout
else:
msg = _('argument "-" with mode %r' % self._mode)
raise ValueError(msg)
# all other arguments are used as file names
if self._bufsize:
return open(string, self._mode, self._bufsize)
else:
return open(string, self._mode)
def __repr__(self):
args = [self._mode, self._bufsize]
args_str = ', '.join([repr(arg) for arg in args if arg is not None])
return '%s(%s)' % (type(self).__name__, args_str)
# ===========================
# Optional and Positional Parsing
# ===========================
class Namespace(_AttributeHolder):
"""Simple object for storing attributes.
Implements equality by attribute names and values, and provides a simple
string representation.
"""
def __init__(self, **kwargs):
for name in kwargs:
setattr(self, name, kwargs[name])
__hash__ = None
def __eq__(self, other):
return vars(self) == vars(other)
def __ne__(self, other):
return not (self == other)
def __contains__(self, key):
return key in self.__dict__
class _ActionsContainer(object):
def __init__(self,
description,
prefix_chars,
argument_default,
conflict_handler):
super(_ActionsContainer, self).__init__()
self.description = description
self.argument_default = argument_default
self.prefix_chars = prefix_chars
self.conflict_handler = conflict_handler
# set up registries
self._registries = {}
# register actions
self.register('action', None, _StoreAction)
self.register('action', 'store', _StoreAction)
self.register('action', 'store_const', _StoreConstAction)
self.register('action', 'store_true', _StoreTrueAction)
self.register('action', 'store_false', _StoreFalseAction)
self.register('action', 'append', _AppendAction)
self.register('action', 'append_const', _AppendConstAction)
self.register('action', 'count', _CountAction)
self.register('action', 'help', _HelpAction)
self.register('action', 'version', _VersionAction)
self.register('action', 'parsers', _SubParsersAction)
# raise an exception if the conflict handler is invalid
self._get_handler()
# action storage
self._actions = []
self._option_string_actions = {}
# groups
self._action_groups = []
self._mutually_exclusive_groups = []
# defaults storage
self._defaults = {}
# determines whether an "option" looks like a negative number
self._negative_number_matcher = _re.compile(r'^-\d+$|^-\d*\.\d+$')
# whether or not there are any optionals that look like negative
# numbers -- uses a list so it can be shared and edited
self._has_negative_number_optionals = []
# ====================
# Registration methods
# ====================
def register(self, registry_name, value, object):
registry = self._registries.setdefault(registry_name, {})
registry[value] = object
def _registry_get(self, registry_name, value, default=None):
return self._registries[registry_name].get(value, default)
# ==================================
# Namespace default accessor methods
# ==================================
def set_defaults(self, **kwargs):
self._defaults.update(kwargs)
# if these defaults match any existing arguments, replace
# the previous default on the object with the new one
for action in self._actions:
if action.dest in kwargs:
action.default = kwargs[action.dest]
def get_default(self, dest):
for action in self._actions:
if action.dest == dest and action.default is not None:
return action.default
return self._defaults.get(dest, None)
# =======================
# Adding argument actions
# =======================
def add_argument(self, *args, **kwargs):
"""
add_argument(dest, ..., name=value, ...)
add_argument(option_string, option_string, ..., name=value, ...)
"""
# if no positional args are supplied or only one is supplied and
# it doesn't look like an option string, parse a positional
# argument
chars = self.prefix_chars
if not args or len(args) == 1 and args[0][0] not in chars:
if args and 'dest' in kwargs:
raise ValueError('dest supplied twice for positional argument')
kwargs = self._get_positional_kwargs(*args, **kwargs)
# otherwise, we're adding an optional argument
else:
kwargs = self._get_optional_kwargs(*args, **kwargs)
# if no default was supplied, use the parser-level default
if 'default' not in kwargs:
dest = kwargs['dest']
if dest in self._defaults:
kwargs['default'] = self._defaults[dest]
elif self.argument_default is not None:
kwargs['default'] = self.argument_default
# create the action object, and add it to the parser
action_class = self._pop_action_class(kwargs)
if not _callable(action_class):
raise ValueError('unknown action "%s"' % action_class)
action = action_class(**kwargs)
# raise an error if the action type is not callable
type_func = self._registry_get('type', action.type, action.type)
if not _callable(type_func):
raise ValueError('%r is not callable' % type_func)
return self._add_action(action)
def add_argument_group(self, *args, **kwargs):
group = _ArgumentGroup(self, *args, **kwargs)
self._action_groups.append(group)
return group
def add_mutually_exclusive_group(self, **kwargs):
group = _MutuallyExclusiveGroup(self, **kwargs)
self._mutually_exclusive_groups.append(group)
return group
def _add_action(self, action):
# resolve any conflicts
self._check_conflict(action)
# add to actions list
self._actions.append(action)
action.container = self
# index the action by any option strings it has
for option_string in action.option_strings:
self._option_string_actions[option_string] = action
# set the flag if any option strings look like negative numbers
for option_string in action.option_strings:
if self._negative_number_matcher.match(option_string):
if not self._has_negative_number_optionals:
self._has_negative_number_optionals.append(True)
# return the created action
return action
def _remove_action(self, action):
self._actions.remove(action)
def _add_container_actions(self, container):
# collect groups by titles
title_group_map = {}
for group in self._action_groups:
if group.title in title_group_map:
msg = _('cannot merge actions - two groups are named %r')
raise ValueError(msg % (group.title))
title_group_map[group.title] = group
# map each action to its group
group_map = {}
for group in container._action_groups:
# if a group with the title exists, use that, otherwise
# create a new group matching the container's group
if group.title not in title_group_map:
title_group_map[group.title] = self.add_argument_group(
title=group.title,
description=group.description,
conflict_handler=group.conflict_handler)
# map the actions to their new group
for action in group._group_actions:
group_map[action] = title_group_map[group.title]
# add container's mutually exclusive groups
# NOTE: if add_mutually_exclusive_group ever gains title= and
# description= then this code will need to be expanded as above
for group in container._mutually_exclusive_groups:
mutex_group = self.add_mutually_exclusive_group(
required=group.required)
# map the actions to their new mutex group
for action in group._group_actions:
group_map[action] = mutex_group
# add all actions to this container or their group
for action in container._actions:
group_map.get(action, self)._add_action(action)
def _get_positional_kwargs(self, dest, **kwargs):
# make sure required is not specified
if 'required' in kwargs:
msg = _("'required' is an invalid argument for positionals")
raise TypeError(msg)
# mark positional arguments as required if at least one is
# always required
if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]:
kwargs['required'] = True
if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs:
kwargs['required'] = True
# return the keyword arguments with no option strings
return dict(kwargs, dest=dest, option_strings=[])
def _get_optional_kwargs(self, *args, **kwargs):
# determine short and long option strings
option_strings = []
long_option_strings = []
for option_string in args:
# error on strings that don't start with an appropriate prefix
if not option_string[0] in self.prefix_chars:
msg = _('invalid option string %r: '
'must start with a character %r')
tup = option_string, self.prefix_chars
raise ValueError(msg % tup)
# strings starting with two prefix characters are long options
option_strings.append(option_string)
if option_string[0] in self.prefix_chars:
if len(option_string) > 1:
if option_string[1] in self.prefix_chars:
long_option_strings.append(option_string)
# infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x'
dest = kwargs.pop('dest', None)
if dest is None:
if long_option_strings:
dest_option_string = long_option_strings[0]
else:
dest_option_string = option_strings[0]
dest = dest_option_string.lstrip(self.prefix_chars)
if not dest:
msg = _('dest= is required for options like %r')
raise ValueError(msg % option_string)
dest = dest.replace('-', '_')
# return the updated keyword arguments
return dict(kwargs, dest=dest, option_strings=option_strings)
def _pop_action_class(self, kwargs, default=None):
action = kwargs.pop('action', default)
return self._registry_get('action', action, action)
def _get_handler(self):
# determine function from conflict handler string
handler_func_name = '_handle_conflict_%s' % self.conflict_handler
try:
return getattr(self, handler_func_name)
except AttributeError:
msg = _('invalid conflict_resolution value: %r')
raise ValueError(msg % self.conflict_handler)
def _check_conflict(self, action):
# find all options that conflict with this option
confl_optionals = []
for option_string in action.option_strings:
if option_string in self._option_string_actions:
confl_optional = self._option_string_actions[option_string]
confl_optionals.append((option_string, confl_optional))
# resolve any conflicts
if confl_optionals:
conflict_handler = self._get_handler()
conflict_handler(action, confl_optionals)
def _handle_conflict_error(self, action, conflicting_actions):
message = _('conflicting option string(s): %s')
conflict_string = ', '.join([option_string
for option_string, action
in conflicting_actions])
raise ArgumentError(action, message % conflict_string)
def _handle_conflict_resolve(self, action, conflicting_actions):
# remove all conflicting options
for option_string, action in conflicting_actions:
# remove the conflicting option
action.option_strings.destroy(option_string)
self._option_string_actions.pop(option_string, None)
# if the option now has no option string, remove it from the
# container holding it
if not action.option_strings:
action.container._remove_action(action)
class _ArgumentGroup(_ActionsContainer):
def __init__(self, container, title=None, description=None, **kwargs):
# add any missing keyword arguments by checking the container
update = kwargs.setdefault
update('conflict_handler', container.conflict_handler)
update('prefix_chars', container.prefix_chars)
update('argument_default', container.argument_default)
super_init = super(_ArgumentGroup, self).__init__
super_init(description=description, **kwargs)
# group attributes
self.title = title
self._group_actions = []
# share most attributes with the container
self._registries = container._registries
self._actions = container._actions
self._option_string_actions = container._option_string_actions
self._defaults = container._defaults
self._has_negative_number_optionals = \
container._has_negative_number_optionals
def _add_action(self, action):
action = super(_ArgumentGroup, self)._add_action(action)
self._group_actions.append(action)
return action
def _remove_action(self, action):
super(_ArgumentGroup, self)._remove_action(action)
self._group_actions.remove(action)
class _MutuallyExclusiveGroup(_ArgumentGroup):
def __init__(self, container, required=False):
super(_MutuallyExclusiveGroup, self).__init__(container)
self.required = required
self._container = container
def _add_action(self, action):
if action.required:
msg = _('mutually exclusive arguments must be optional')
raise ValueError(msg)
action = self._container._add_action(action)
self._group_actions.append(action)
return action
def _remove_action(self, action):
self._container._remove_action(action)
self._group_actions.remove(action)
class ArgumentParser(_AttributeHolder, _ActionsContainer):
"""Object for parsing command line strings into Python structures.
Keyword Arguments:
- prog -- The name of the program (default: sys.argv[0])
- usage -- A usage message (default: auto-generated from arguments)
- description -- A description of what the program does
- epilog -- Text following the argument descriptions
- parents -- Parsers whose arguments should be copied into this one
- formatter_class -- HelpFormatter class for printing help messages
- prefix_chars -- Characters that prefix optional arguments
- fromfile_prefix_chars -- Characters that prefix files containing
additional arguments
- argument_default -- The default value for all arguments
- conflict_handler -- String indicating how to handle conflicts
- add_help -- Add a -h/-help option
"""
def __init__(self,
prog=None,
usage=None,
description=None,
epilog=None,
version=None,
parents=[],
formatter_class=HelpFormatter,
prefix_chars='-',
fromfile_prefix_chars=None,
argument_default=None,
conflict_handler='error',
add_help=True):
if version is not None:
import warnings
warnings.warn(
"""The "version" argument to ArgumentParser is deprecated. """
"""Please use """
""""add_argument(..., action='version', version="N", ...)" """
"""instead""", DeprecationWarning)
superinit = super(ArgumentParser, self).__init__
superinit(description=description,
prefix_chars=prefix_chars,
argument_default=argument_default,
conflict_handler=conflict_handler)
# default setting for prog
if prog is None:
prog = _os.path.basename(_sys.argv[0])
self.prog = prog
self.usage = usage
self.epilog = epilog
self.version = version
self.formatter_class = formatter_class
self.fromfile_prefix_chars = fromfile_prefix_chars
self.add_help = add_help
add_group = self.add_argument_group
self._positionals = add_group(_('positional arguments'))
self._optionals = add_group(_('optional arguments'))
self._subparsers = None
# register types
def identity(string):
return string
self.register('type', None, identity)
# add help and version arguments if necessary
# (using explicit default to override global argument_default)
if '-' in prefix_chars:
default_prefix = '-'
else:
default_prefix = prefix_chars[0]
if self.add_help:
self.add_argument(
default_prefix+'h', default_prefix*2+'help',
action='help', default=SUPPRESS,
help=_('show this help message and exit'))
if self.version:
self.add_argument(
default_prefix+'v', default_prefix*2+'version',
action='version', default=SUPPRESS,
version=self.version,
help=_("show program's version number and exit"))
# add parent arguments and defaults
for parent in parents:
self._add_container_actions(parent)
try:
defaults = parent._defaults
except AttributeError:
pass
else:
self._defaults.update(defaults)
# =======================
# Pretty __repr__ methods
# =======================
def _get_kwargs(self):
names = [
'prog',
'usage',
'description',
'version',
'formatter_class',
'conflict_handler',
'add_help',
]
return [(name, getattr(self, name)) for name in names]
# ==================================
# Optional/Positional adding methods
# ==================================
def add_subparsers(self, **kwargs):
if self._subparsers is not None:
self.error(_('cannot have multiple subparser arguments'))
# add the parser class to the arguments if it's not present
kwargs.setdefault('parser_class', type(self))
if 'title' in kwargs or 'description' in kwargs:
title = _(kwargs.pop('title', 'subcommands'))
description = _(kwargs.pop('description', None))
self._subparsers = self.add_argument_group(title, description)
else:
self._subparsers = self._positionals
# prog defaults to the usage message of this parser, skipping
# optional arguments and with no "usage:" prefix
if kwargs.get('prog') is None:
formatter = self._get_formatter()
positionals = self._get_positional_actions()
groups = self._mutually_exclusive_groups
formatter.add_usage(self.usage, positionals, groups, '')
kwargs['prog'] = formatter.format_help().strip()
# create the parsers action and add it to the positionals list
parsers_class = self._pop_action_class(kwargs, 'parsers')
action = parsers_class(option_strings=[], **kwargs)
self._subparsers._add_action(action)
# return the created parsers action
return action
def _add_action(self, action):
if action.option_strings:
self._optionals._add_action(action)
else:
self._positionals._add_action(action)
return action
def _get_optional_actions(self):
return [action
for action in self._actions
if action.option_strings]
def _get_positional_actions(self):
return [action
for action in self._actions
if not action.option_strings]
# =====================================
# Command line argument parsing methods
# =====================================
def parse_args(self, args=None, namespace=None):
args, argv = self.parse_known_args(args, namespace)
if argv:
msg = _('unrecognized arguments: %s')
self.error(msg % ' '.join(argv))
return args
def parse_known_args(self, args=None, namespace=None):
# args default to the system args
if args is None:
args = _sys.argv[1:]
# default Namespace built from parser defaults
if namespace is None:
namespace = Namespace()
# add any action defaults that aren't present
for action in self._actions:
if action.dest is not SUPPRESS:
if not hasattr(namespace, action.dest):
if action.default is not SUPPRESS:
default = action.default
if isinstance(action.default, basestring):
default = self._get_value(action, default)
setattr(namespace, action.dest, default)
# add any parser defaults that aren't present
for dest in self._defaults:
if not hasattr(namespace, dest):
setattr(namespace, dest, self._defaults[dest])
# parse the arguments and exit if there are any errors
try:
namespace, args = self._parse_known_args(args, namespace)
if hasattr(namespace, _UNRECOGNIZED_ARGS_ATTR):
args.extend(getattr(namespace, _UNRECOGNIZED_ARGS_ATTR))
delattr(namespace, _UNRECOGNIZED_ARGS_ATTR)
return namespace, args
except ArgumentError:
err = _sys.exc_info()[1]
self.error(str(err))
def _parse_known_args(self, arg_strings, namespace):
# replace arg strings that are file references
if self.fromfile_prefix_chars is not None:
arg_strings = self._read_args_from_files(arg_strings)
# map all mutually exclusive arguments to the other arguments
# they can't occur with
action_conflicts = {}
for mutex_group in self._mutually_exclusive_groups:
group_actions = mutex_group._group_actions
for i, mutex_action in enumerate(mutex_group._group_actions):
conflicts = action_conflicts.setdefault(mutex_action, [])
conflicts.extend(group_actions[:i])
conflicts.extend(group_actions[i + 1:])
# find all option indices, and determine the arg_string_pattern
# which has an 'O' if there is an option at an index,
# an 'A' if there is an argument, or a '-' if there is a '--'
option_string_indices = {}
arg_string_pattern_parts = []
arg_strings_iter = iter(arg_strings)
for i, arg_string in enumerate(arg_strings_iter):
# all args after -- are non-options
if arg_string == '--':
arg_string_pattern_parts.append('-')
for arg_string in arg_strings_iter:
arg_string_pattern_parts.append('A')
# otherwise, add the arg to the arg strings
# and note the index if it was an option
else:
option_tuple = self._parse_optional(arg_string)
if option_tuple is None:
pattern = 'A'
else:
option_string_indices[i] = option_tuple
pattern = 'O'
arg_string_pattern_parts.append(pattern)
# join the pieces together to form the pattern
arg_strings_pattern = ''.join(arg_string_pattern_parts)
# converts arg strings to the appropriate and then takes the action
seen_actions = set()
seen_non_default_actions = set()
def take_action(action, argument_strings, option_string=None):
seen_actions.add(action)
argument_values = self._get_values(action, argument_strings)
# error if this argument is not allowed with other previously
# seen arguments, assuming that actions that use the default
# value don't really count as "present"
if argument_values is not action.default:
seen_non_default_actions.add(action)
for conflict_action in action_conflicts.get(action, []):
if conflict_action in seen_non_default_actions:
msg = _('not allowed with argument %s')
action_name = _get_action_name(conflict_action)
raise ArgumentError(action, msg % action_name)
# take the action if we didn't receive a SUPPRESS value
# (e.g. from a default)
if argument_values is not SUPPRESS:
action(self, namespace, argument_values, option_string)
# function to convert arg_strings into an optional action
def consume_optional(start_index):
# get the optional identified at this index
option_tuple = option_string_indices[start_index]
action, option_string, explicit_arg = option_tuple
# identify additional optionals in the same arg string
# (e.g. -xyz is the same as -x -y -z if no args are required)
match_argument = self._match_argument
action_tuples = []
while True:
# if we found no optional action, skip it
if action is None:
extras.append(arg_strings[start_index])
return start_index + 1
# if there is an explicit argument, try to match the
# optional's string arguments to only this
if explicit_arg is not None:
arg_count = match_argument(action, 'A')
# if the action is a single-dash option and takes no
# arguments, try to parse more single-dash options out
# of the tail of the option string
chars = self.prefix_chars
if arg_count == 0 and option_string[1] not in chars:
action_tuples.append((action, [], option_string))
char = option_string[0]
option_string = char + explicit_arg[0]
new_explicit_arg = explicit_arg[1:] or None
optionals_map = self._option_string_actions
if option_string in optionals_map:
action = optionals_map[option_string]
explicit_arg = new_explicit_arg
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
# if the action expect exactly one argument, we've
# successfully matched the option; exit the loop
elif arg_count == 1:
stop = start_index + 1
args = [explicit_arg]
action_tuples.append((action, args, option_string))
break
# error if a double-dash option did not use the
# explicit argument
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
# if there is no explicit argument, try to match the
# optional's string arguments with the following strings
# if successful, exit the loop
else:
start = start_index + 1
selected_patterns = arg_strings_pattern[start:]
arg_count = match_argument(action, selected_patterns)
stop = start + arg_count
args = arg_strings[start:stop]
action_tuples.append((action, args, option_string))
break
# add the Optional to the list and return the index at which
# the Optional's string args stopped
assert action_tuples
for action, args, option_string in action_tuples:
take_action(action, args, option_string)
return stop
# the list of Positionals left to be parsed; this is modified
# by consume_positionals()
positionals = self._get_positional_actions()
# function to convert arg_strings into positional actions
def consume_positionals(start_index):
# match as many Positionals as possible
match_partial = self._match_arguments_partial
selected_pattern = arg_strings_pattern[start_index:]
arg_counts = match_partial(positionals, selected_pattern)
# slice off the appropriate arg strings for each Positional
# and add the Positional and its args to the list
for action, arg_count in zip(positionals, arg_counts):
args = arg_strings[start_index: start_index + arg_count]
start_index += arg_count
take_action(action, args)
# slice off the Positionals that we just parsed and return the
# index at which the Positionals' string args stopped
positionals[:] = positionals[len(arg_counts):]
return start_index
# consume Positionals and Optionals alternately, until we have
# passed the last option string
extras = []
start_index = 0
if option_string_indices:
max_option_string_index = max(option_string_indices)
else:
max_option_string_index = -1
while start_index <= max_option_string_index:
# consume any Positionals preceding the next option
next_option_string_index = min([
index
for index in option_string_indices
if index >= start_index])
if start_index != next_option_string_index:
positionals_end_index = consume_positionals(start_index)
# only try to parse the next optional if we didn't consume
# the option string during the positionals parsing
if positionals_end_index > start_index:
start_index = positionals_end_index
continue
else:
start_index = positionals_end_index
# if we consumed all the positionals we could and we're not
# at the index of an option string, there were extra arguments
if start_index not in option_string_indices:
strings = arg_strings[start_index:next_option_string_index]
extras.extend(strings)
start_index = next_option_string_index
# consume the next optional and any arguments for it
start_index = consume_optional(start_index)
# consume any positionals following the last Optional
stop_index = consume_positionals(start_index)
# if we didn't consume all the argument strings, there were extras
extras.extend(arg_strings[stop_index:])
# if we didn't use all the Positional structures, there were too few
# arg strings supplied.
if positionals:
self.error(_('too few arguments'))
# make sure all required actions were present
for action in self._actions:
if action.required:
if action not in seen_actions:
name = _get_action_name(action)
self.error(_('argument %s is required') % name)
# make sure all required groups had one option present
for group in self._mutually_exclusive_groups:
if group.required:
for action in group._group_actions:
if action in seen_non_default_actions:
break
# if no actions were used, report the error
else:
names = [_get_action_name(action)
for action in group._group_actions
if action.help is not SUPPRESS]
msg = _('one of the arguments %s is required')
self.error(msg % ' '.join(names))
# return the updated namespace and the extra arguments
return namespace, extras
def _read_args_from_files(self, arg_strings):
# expand arguments referencing files
new_arg_strings = []
for arg_string in arg_strings:
# for regular arguments, just add them back into the list
if arg_string[0] not in self.fromfile_prefix_chars:
new_arg_strings.append(arg_string)
# replace arguments referencing files with the file content
else:
try:
args_file = open(arg_string[1:])
try:
arg_strings = []
for arg_line in args_file.read().splitlines():
for arg in self.convert_arg_line_to_args(arg_line):
arg_strings.append(arg)
arg_strings = self._read_args_from_files(arg_strings)
new_arg_strings.extend(arg_strings)
finally:
args_file.close()
except IOError:
err = _sys.exc_info()[1]
self.error(str(err))
# return the modified argument list
return new_arg_strings
def convert_arg_line_to_args(self, arg_line):
return [arg_line]
def _match_argument(self, action, arg_strings_pattern):
# match the pattern for this action to the arg strings
nargs_pattern = self._get_nargs_pattern(action)
match = _re.match(nargs_pattern, arg_strings_pattern)
# raise an exception if we weren't able to find a match
if match is None:
nargs_errors = {
None: _('expected one argument'),
OPTIONAL: _('expected at most one argument'),
ONE_OR_MORE: _('expected at least one argument'),
}
default = _('expected %s argument(s)') % action.nargs
msg = nargs_errors.get(action.nargs, default)
raise ArgumentError(action, msg)
# return the number of arguments matched
return len(match.group(1))
def _match_arguments_partial(self, actions, arg_strings_pattern):
# progressively shorten the actions list by slicing off the
# final actions until we find a match
result = []
for i in range(len(actions), 0, -1):
actions_slice = actions[:i]
pattern = ''.join([self._get_nargs_pattern(action)
for action in actions_slice])
match = _re.match(pattern, arg_strings_pattern)
if match is not None:
result.extend([len(string) for string in match.groups()])
break
# return the list of arg string counts
return result
def _parse_optional(self, arg_string):
# if it's an empty string, it was meant to be a positional
if not arg_string:
return None
# if it doesn't start with a prefix, it was meant to be positional
if not arg_string[0] in self.prefix_chars:
return None
# if the option string is present in the parser, return the action
if arg_string in self._option_string_actions:
action = self._option_string_actions[arg_string]
return action, arg_string, None
# if it's just a single character, it was meant to be positional
if len(arg_string) == 1:
return None
# if the option string before the "=" is present, return the action
if '=' in arg_string:
option_string, explicit_arg = arg_string.split('=', 1)
if option_string in self._option_string_actions:
action = self._option_string_actions[option_string]
return action, option_string, explicit_arg
# search through all possible prefixes of the option string
# and all actions in the parser for possible interpretations
option_tuples = self._get_option_tuples(arg_string)
# if multiple actions match, the option string was ambiguous
if len(option_tuples) > 1:
options = ', '.join([option_string
for action, option_string, explicit_arg in option_tuples])
tup = arg_string, options
self.error(_('ambiguous option: %s could match %s') % tup)
# if exactly one action matched, this segmentation is good,
# so return the parsed action
elif len(option_tuples) == 1:
option_tuple, = option_tuples
return option_tuple
# if it was not found as an option, but it looks like a negative
# number, it was meant to be positional
# unless there are negative-number-like options
if self._negative_number_matcher.match(arg_string):
if not self._has_negative_number_optionals:
return None
# if it contains a space, it was meant to be a positional
if ' ' in arg_string:
return None
# it was meant to be an optional but there is no such option
# in this parser (though it might be a valid option in a subparser)
return None, arg_string, None
def _get_option_tuples(self, option_string):
result = []
# option strings starting with two prefix characters are only
# split at the '='
chars = self.prefix_chars
if option_string[0] in chars and option_string[1] in chars:
if '=' in option_string:
option_prefix, explicit_arg = option_string.split('=', 1)
else:
option_prefix = option_string
explicit_arg = None
for option_string in self._option_string_actions:
if option_string.startswith(option_prefix):
action = self._option_string_actions[option_string]
tup = action, option_string, explicit_arg
result.append(tup)
# single character options can be concatenated with their arguments
# but multiple character options always have to have their argument
# separate
elif option_string[0] in chars and option_string[1] not in chars:
option_prefix = option_string
explicit_arg = None
short_option_prefix = option_string[:2]
short_explicit_arg = option_string[2:]
for option_string in self._option_string_actions:
if option_string == short_option_prefix:
action = self._option_string_actions[option_string]
tup = action, option_string, short_explicit_arg
result.append(tup)
elif option_string.startswith(option_prefix):
action = self._option_string_actions[option_string]
tup = action, option_string, explicit_arg
result.append(tup)
# shouldn't ever get here
else:
self.error(_('unexpected option string: %s') % option_string)
# return the collected option tuples
return result
def _get_nargs_pattern(self, action):
# in all examples below, we have to allow for '--' args
# which are represented as '-' in the pattern
nargs = action.nargs
# the default (None) is assumed to be a single argument
if nargs is None:
nargs_pattern = '(-*A-*)'
# allow zero or one arguments
elif nargs == OPTIONAL:
nargs_pattern = '(-*A?-*)'
# allow zero or more arguments
elif nargs == ZERO_OR_MORE:
nargs_pattern = '(-*[A-]*)'
# allow one or more arguments
elif nargs == ONE_OR_MORE:
nargs_pattern = '(-*A[A-]*)'
# allow any number of options or arguments
elif nargs == REMAINDER:
nargs_pattern = '([-AO]*)'
# allow one argument followed by any number of options or arguments
elif nargs == PARSER:
nargs_pattern = '(-*A[-AO]*)'
# all others should be integers
else:
nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs)
# if this is an optional action, -- is not allowed
if action.option_strings:
nargs_pattern = nargs_pattern.replace('-*', '')
nargs_pattern = nargs_pattern.replace('-', '')
# return the pattern
return nargs_pattern
# ========================
# Value conversion methods
# ========================
def _get_values(self, action, arg_strings):
# for everything but PARSER args, strip out '--'
if action.nargs not in [PARSER, REMAINDER]:
arg_strings = [s for s in arg_strings if s != '--']
# optional argument produces a default when not present
if not arg_strings and action.nargs == OPTIONAL:
if action.option_strings:
value = action.const
else:
value = action.default
if isinstance(value, basestring):
value = self._get_value(action, value)
self._check_value(action, value)
# when nargs='*' on a positional, if there were no command-line
# args, use the default if it is anything other than None
elif (not arg_strings and action.nargs == ZERO_OR_MORE and
not action.option_strings):
if action.default is not None:
value = action.default
else:
value = arg_strings
self._check_value(action, value)
# single argument or optional argument produces a single value
elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]:
arg_string, = arg_strings
value = self._get_value(action, arg_string)
self._check_value(action, value)
# REMAINDER arguments convert all values, checking none
elif action.nargs == REMAINDER:
value = [self._get_value(action, v) for v in arg_strings]
# PARSER arguments convert all values, but check only the first
elif action.nargs == PARSER:
value = [self._get_value(action, v) for v in arg_strings]
self._check_value(action, value[0])
# all other types of nargs produce a list
else:
value = [self._get_value(action, v) for v in arg_strings]
for v in value:
self._check_value(action, v)
# return the converted value
return value
def _get_value(self, action, arg_string):
type_func = self._registry_get('type', action.type, action.type)
if not _callable(type_func):
msg = _('%r is not callable')
raise ArgumentError(action, msg % type_func)
# convert the value to the appropriate type
try:
result = type_func(arg_string)
# ArgumentTypeErrors indicate errors
except ArgumentTypeError:
name = getattr(action.type, '__name__', repr(action.type))
msg = str(_sys.exc_info()[1])
raise ArgumentError(action, msg)
# TypeErrors or ValueErrors also indicate errors
except (TypeError, ValueError):
name = getattr(action.type, '__name__', repr(action.type))
msg = _('invalid %s value: %r')
raise ArgumentError(action, msg % (name, arg_string))
# return the converted value
return result
def _check_value(self, action, value):
# converted value must be one of the choices (if specified)
if action.choices is not None and value not in action.choices:
tup = value, ', '.join(map(repr, action.choices))
msg = _('invalid choice: %r (choose from %s)') % tup
raise ArgumentError(action, msg)
# =======================
# Help-formatting methods
# =======================
def format_usage(self):
formatter = self._get_formatter()
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
return formatter.format_help()
def format_help(self):
formatter = self._get_formatter()
# usage
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
# description
formatter.add_text(self.description)
# positionals, optionals and user-defined groups
for action_group in self._action_groups:
formatter.start_section(action_group.title)
formatter.add_text(action_group.description)
formatter.add_arguments(action_group._group_actions)
formatter.end_section()
# epilog
formatter.add_text(self.epilog)
# determine help from format above
return formatter.format_help()
def format_version(self):
import warnings
warnings.warn(
'The format_version method is deprecated -- the "version" '
'argument to ArgumentParser is no longer supported.',
DeprecationWarning)
formatter = self._get_formatter()
formatter.add_text(self.version)
return formatter.format_help()
def _get_formatter(self):
return self.formatter_class(prog=self.prog)
# =====================
# Help-printing methods
# =====================
def print_usage(self, file=None):
if file is None:
file = _sys.stdout
self._print_message(self.format_usage(), file)
def print_help(self, file=None):
if file is None:
file = _sys.stdout
self._print_message(self.format_help(), file)
def print_version(self, file=None):
import warnings
warnings.warn(
'The print_version method is deprecated -- the "version" '
'argument to ArgumentParser is no longer supported.',
DeprecationWarning)
self._print_message(self.format_version(), file)
def _print_message(self, message, file=None):
if message:
if file is None:
file = _sys.stderr
file.write(message)
# ===============
# Exiting methods
# ===============
def exit(self, status=0, message=None):
if message:
self._print_message(message, _sys.stderr)
_sys.exit(status)
def error(self, message):
"""error(message: string)
Prints a usage message incorporating the message to stderr and
exits.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_usage(_sys.stderr)
self.exit(2, _('%s: error: %s\n') % (self.prog, message))
|
the-stack_0_3132 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Object Server for Swift """
import six.moves.cPickle as pickle
import json
import os
import multiprocessing
import time
import traceback
import socket
import math
from swift import gettext_ as _
from hashlib import md5
from eventlet import sleep, wsgi, Timeout, tpool
from eventlet.greenthread import spawn
from swift.common.utils import public, get_logger, \
config_true_value, timing_stats, replication, \
normalize_delete_at_timestamp, get_log_line, Timestamp, \
get_expirer_container, parse_mime_headers, \
iter_multipart_mime_documents, extract_swift_bytes, safe_json_loads, \
config_auto_int_value
from swift.common.bufferedhttp import http_connect
from swift.common.constraints import check_object_creation, \
valid_timestamp, check_utf8
from swift.common.exceptions import ConnectionTimeout, DiskFileQuarantined, \
DiskFileNotExist, DiskFileCollision, DiskFileNoSpace, DiskFileDeleted, \
DiskFileDeviceUnavailable, DiskFileExpired, ChunkReadTimeout, \
ChunkReadError, DiskFileXattrNotSupported
from swift.obj import ssync_receiver
from swift.common.http import is_success
from swift.common.base_storage_server import BaseStorageServer
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.request_helpers import get_name_and_placement, \
is_user_meta, is_sys_or_user_meta, is_object_transient_sysmeta, \
resolve_etag_is_at_header, is_sys_meta
from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPCreated, \
HTTPInternalServerError, HTTPNoContent, HTTPNotFound, \
HTTPPreconditionFailed, HTTPRequestTimeout, HTTPUnprocessableEntity, \
HTTPClientDisconnect, HTTPMethodNotAllowed, Request, Response, \
HTTPInsufficientStorage, HTTPForbidden, HTTPException, HTTPConflict, \
HTTPServerError
from swift.obj.diskfile import DATAFILE_SYSTEM_META, DiskFileRouter
def iter_mime_headers_and_bodies(wsgi_input, mime_boundary, read_chunk_size):
mime_documents_iter = iter_multipart_mime_documents(
wsgi_input, mime_boundary, read_chunk_size)
for file_like in mime_documents_iter:
hdrs = parse_mime_headers(file_like)
yield (hdrs, file_like)
def drain(file_like, read_size, timeout):
"""
Read and discard any bytes from file_like.
:param file_like: file-like object to read from
:param read_size: how big a chunk to read at a time
:param timeout: how long to wait for a read (use None for no timeout)
:raises ChunkReadTimeout: if no chunk was read in time
"""
while True:
with ChunkReadTimeout(timeout):
chunk = file_like.read(read_size)
if not chunk:
break
def _make_backend_fragments_header(fragments):
if fragments:
result = {}
for ts, frag_list in fragments.items():
result[ts.internal] = frag_list
return json.dumps(result)
return None
class EventletPlungerString(str):
"""
Eventlet won't send headers until it's accumulated at least
eventlet.wsgi.MINIMUM_CHUNK_SIZE bytes or the app iter is exhausted. If we
want to send the response body behind Eventlet's back, perhaps with some
zero-copy wizardry, then we have to unclog the plumbing in eventlet.wsgi
to force the headers out, so we use an EventletPlungerString to empty out
all of Eventlet's buffers.
"""
def __len__(self):
return wsgi.MINIMUM_CHUNK_SIZE + 1
class ObjectController(BaseStorageServer):
"""Implements the WSGI application for the Swift Object Server."""
server_type = 'object-server'
def __init__(self, conf, logger=None):
"""
Creates a new WSGI application for the Swift Object Server. An
example configuration is given at
<source-dir>/etc/object-server.conf-sample or
/etc/swift/object-server.conf-sample.
"""
super(ObjectController, self).__init__(conf)
self.logger = logger or get_logger(conf, log_route='object-server')
self.node_timeout = float(conf.get('node_timeout', 3))
self.container_update_timeout = float(
conf.get('container_update_timeout', 1))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.client_timeout = int(conf.get('client_timeout', 60))
self.disk_chunk_size = int(conf.get('disk_chunk_size', 65536))
self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
self.log_requests = config_true_value(conf.get('log_requests', 'true'))
self.max_upload_time = int(conf.get('max_upload_time', 86400))
self.slow = int(conf.get('slow', 0))
self.keep_cache_private = \
config_true_value(conf.get('keep_cache_private', 'false'))
default_allowed_headers = '''
content-disposition,
content-encoding,
x-delete-at,
x-object-manifest,
x-static-large-object,
'''
extra_allowed_headers = [
header.strip().lower() for header in conf.get(
'allowed_headers', default_allowed_headers).split(',')
if header.strip()
]
self.allowed_headers = set()
for header in extra_allowed_headers:
if header not in DATAFILE_SYSTEM_META:
self.allowed_headers.add(header)
self.auto_create_account_prefix = \
conf.get('auto_create_account_prefix') or '.'
self.expiring_objects_account = self.auto_create_account_prefix + \
(conf.get('expiring_objects_account_name') or 'expiring_objects')
self.expiring_objects_container_divisor = \
int(conf.get('expiring_objects_container_divisor') or 86400)
# Initialization was successful, so now apply the network chunk size
# parameter as the default read / write buffer size for the network
# sockets.
#
# NOTE WELL: This is a class setting, so until we get set this on a
# per-connection basis, this affects reading and writing on ALL
# sockets, those between the proxy servers and external clients, and
# those between the proxy servers and the other internal servers.
#
# ** Because the primary motivation for this is to optimize how data
# is written back to the proxy server, we could use the value from the
# disk_chunk_size parameter. However, it affects all created sockets
# using this class so we have chosen to tie it to the
# network_chunk_size parameter value instead.
socket._fileobject.default_bufsize = self.network_chunk_size
# Provide further setup specific to an object server implementation.
self.setup(conf)
def setup(self, conf):
"""
Implementation specific setup. This method is called at the very end
by the constructor to allow a specific implementation to modify
existing attributes or add its own attributes.
:param conf: WSGI configuration parameter
"""
# Common on-disk hierarchy shared across account, container and object
# servers.
self._diskfile_router = DiskFileRouter(conf, self.logger)
# This is populated by global_conf_callback way below as the semaphore
# is shared by all workers.
if 'replication_semaphore' in conf:
# The value was put in a list so it could get past paste
self.replication_semaphore = conf['replication_semaphore'][0]
else:
self.replication_semaphore = None
self.replication_failure_threshold = int(
conf.get('replication_failure_threshold') or 100)
self.replication_failure_ratio = float(
conf.get('replication_failure_ratio') or 1.0)
servers_per_port = int(conf.get('servers_per_port', '0') or 0)
if servers_per_port:
# The typical servers-per-port deployment also uses one port per
# disk, so you really get N servers per disk. In that case,
# having a pool of 20 threads per server per disk is far too
# much. For example, given a 60-disk chassis and 4 servers per
# disk, the default configuration will give us 21 threads per
# server (the main thread plus the twenty tpool threads), for a
# total of around 60 * 21 * 4 = 5040 threads. This is clearly
# too high.
#
# Instead, we use a tpool size of 1, giving us 2 threads per
# process. In the example above, that's 60 * 2 * 4 = 480
# threads, which is reasonable since there are 240 processes.
default_tpool_size = 1
else:
# If we're not using servers-per-port, then leave the tpool size
# alone. The default (20) is typically good enough for one
# object server handling requests for many disks.
default_tpool_size = None
tpool_size = config_auto_int_value(
conf.get('eventlet_tpool_num_threads'),
default_tpool_size)
if tpool_size:
tpool.set_num_threads(tpool_size)
def get_diskfile(self, device, partition, account, container, obj,
policy, **kwargs):
"""
Utility method for instantiating a DiskFile object supporting a given
REST API.
An implementation of the object server that wants to use a different
DiskFile class would simply over-ride this method to provide that
behavior.
"""
return self._diskfile_router[policy].get_diskfile(
device, partition, account, container, obj, policy, **kwargs)
def async_update(self, op, account, container, obj, host, partition,
contdevice, headers_out, objdevice, policy,
logger_thread_locals=None):
"""
Sends or saves an async update.
:param op: operation performed (ex: 'PUT', or 'DELETE')
:param account: account name for the object
:param container: container name for the object
:param obj: object name
:param host: host that the container is on
:param partition: partition that the container is on
:param contdevice: device name that the container is on
:param headers_out: dictionary of headers to send in the container
request
:param objdevice: device name that the object is in
:param policy: the associated BaseStoragePolicy instance
:param logger_thread_locals: The thread local values to be set on the
self.logger to retain transaction
logging information.
"""
if logger_thread_locals:
self.logger.thread_locals = logger_thread_locals
headers_out['user-agent'] = 'object-server %s' % os.getpid()
full_path = '/%s/%s/%s' % (account, container, obj)
if all([host, partition, contdevice]):
try:
with ConnectionTimeout(self.conn_timeout):
ip, port = host.rsplit(':', 1)
conn = http_connect(ip, port, contdevice, partition, op,
full_path, headers_out)
with Timeout(self.node_timeout):
response = conn.getresponse()
response.read()
if is_success(response.status):
return
else:
self.logger.error(_(
'ERROR Container update failed '
'(saving for async update later): %(status)d '
'response from %(ip)s:%(port)s/%(dev)s'),
{'status': response.status, 'ip': ip, 'port': port,
'dev': contdevice})
except (Exception, Timeout):
self.logger.exception(_(
'ERROR container update failed with '
'%(ip)s:%(port)s/%(dev)s (saving for async update later)'),
{'ip': ip, 'port': port, 'dev': contdevice})
data = {'op': op, 'account': account, 'container': container,
'obj': obj, 'headers': headers_out}
timestamp = headers_out.get('x-meta-timestamp',
headers_out.get('x-timestamp'))
self._diskfile_router[policy].pickle_async_update(
objdevice, account, container, obj, data, timestamp, policy)
def container_update(self, op, account, container, obj, request,
headers_out, objdevice, policy):
"""
Update the container when objects are updated.
:param op: operation performed (ex: 'PUT', or 'DELETE')
:param account: account name for the object
:param container: container name for the object
:param obj: object name
:param request: the original request object driving the update
:param headers_out: dictionary of headers to send in the container
request(s)
:param objdevice: device name that the object is in
:param policy: the BaseStoragePolicy instance
"""
headers_in = request.headers
conthosts = [h.strip() for h in
headers_in.get('X-Container-Host', '').split(',')]
contdevices = [d.strip() for d in
headers_in.get('X-Container-Device', '').split(',')]
contpartition = headers_in.get('X-Container-Partition', '')
if len(conthosts) != len(contdevices):
# This shouldn't happen unless there's a bug in the proxy,
# but if there is, we want to know about it.
self.logger.error(_(
'ERROR Container update failed: different '
'numbers of hosts and devices in request: '
'"%(hosts)s" vs "%(devices)s"') % {
'hosts': headers_in.get('X-Container-Host', ''),
'devices': headers_in.get('X-Container-Device', '')})
return
if contpartition:
updates = zip(conthosts, contdevices)
else:
updates = []
headers_out['x-trans-id'] = headers_in.get('x-trans-id', '-')
headers_out['referer'] = request.as_referer()
headers_out['X-Backend-Storage-Policy-Index'] = int(policy)
update_greenthreads = []
for conthost, contdevice in updates:
gt = spawn(self.async_update, op, account, container, obj,
conthost, contpartition, contdevice, headers_out,
objdevice, policy,
logger_thread_locals=self.logger.thread_locals)
update_greenthreads.append(gt)
# Wait a little bit to see if the container updates are successful.
# If we immediately return after firing off the greenthread above, then
# we're more likely to confuse the end-user who does a listing right
# after getting a successful response to the object create. The
# `container_update_timeout` bounds the length of time we wait so that
# one slow container server doesn't make the entire request lag.
try:
with Timeout(self.container_update_timeout):
for gt in update_greenthreads:
gt.wait()
except Timeout:
# updates didn't go through, log it and return
self.logger.debug(
'Container update timeout (%.4fs) waiting for %s',
self.container_update_timeout, updates)
def delete_at_update(self, op, delete_at, account, container, obj,
request, objdevice, policy):
"""
Update the expiring objects container when objects are updated.
:param op: operation performed (ex: 'PUT', or 'DELETE')
:param delete_at: scheduled delete in UNIX seconds, int
:param account: account name for the object
:param container: container name for the object
:param obj: object name
:param request: the original request driving the update
:param objdevice: device name that the object is in
:param policy: the BaseStoragePolicy instance (used for tmp dir)
"""
if config_true_value(
request.headers.get('x-backend-replication', 'f')):
return
delete_at = normalize_delete_at_timestamp(delete_at)
updates = [(None, None)]
partition = None
hosts = contdevices = [None]
headers_in = request.headers
headers_out = HeaderKeyDict({
# system accounts are always Policy-0
'X-Backend-Storage-Policy-Index': 0,
'x-timestamp': request.timestamp.internal,
'x-trans-id': headers_in.get('x-trans-id', '-'),
'referer': request.as_referer()})
if op != 'DELETE':
delete_at_container = headers_in.get('X-Delete-At-Container', None)
if not delete_at_container:
self.logger.warning(
'X-Delete-At-Container header must be specified for '
'expiring objects background %s to work properly. Making '
'best guess as to the container name for now.' % op)
# TODO(gholt): In a future release, change the above warning to
# a raised exception and remove the guess code below.
delete_at_container = get_expirer_container(
delete_at, self.expiring_objects_container_divisor,
account, container, obj)
partition = headers_in.get('X-Delete-At-Partition', None)
hosts = headers_in.get('X-Delete-At-Host', '')
contdevices = headers_in.get('X-Delete-At-Device', '')
updates = [upd for upd in
zip((h.strip() for h in hosts.split(',')),
(c.strip() for c in contdevices.split(',')))
if all(upd) and partition]
if not updates:
updates = [(None, None)]
headers_out['x-size'] = '0'
headers_out['x-content-type'] = 'text/plain'
headers_out['x-etag'] = 'd41d8cd98f00b204e9800998ecf8427e'
else:
# DELETEs of old expiration data have no way of knowing what the
# old X-Delete-At-Container was at the time of the initial setting
# of the data, so a best guess is made here.
# Worst case is a DELETE is issued now for something that doesn't
# exist there and the original data is left where it is, where
# it will be ignored when the expirer eventually tries to issue the
# object DELETE later since the X-Delete-At value won't match up.
delete_at_container = get_expirer_container(
delete_at, self.expiring_objects_container_divisor,
account, container, obj)
delete_at_container = normalize_delete_at_timestamp(
delete_at_container)
for host, contdevice in updates:
self.async_update(
op, self.expiring_objects_account, delete_at_container,
'%s-%s/%s/%s' % (delete_at, account, container, obj),
host, partition, contdevice, headers_out, objdevice,
policy)
def _make_timeout_reader(self, file_like):
def timeout_reader():
with ChunkReadTimeout(self.client_timeout):
try:
return file_like.read(self.network_chunk_size)
except (IOError, ValueError):
raise ChunkReadError
return timeout_reader
def _read_put_commit_message(self, mime_documents_iter):
rcvd_commit = False
try:
with ChunkReadTimeout(self.client_timeout):
commit_hdrs, commit_iter = next(mime_documents_iter)
if commit_hdrs.get('X-Document', None) == "put commit":
rcvd_commit = True
drain(commit_iter, self.network_chunk_size, self.client_timeout)
except ChunkReadError:
raise HTTPClientDisconnect()
except ChunkReadTimeout:
raise HTTPRequestTimeout()
except StopIteration:
raise HTTPBadRequest(body="couldn't find PUT commit MIME doc")
return rcvd_commit
def _read_metadata_footer(self, mime_documents_iter):
try:
with ChunkReadTimeout(self.client_timeout):
footer_hdrs, footer_iter = next(mime_documents_iter)
except ChunkReadError:
raise HTTPClientDisconnect()
except ChunkReadTimeout:
raise HTTPRequestTimeout()
except StopIteration:
raise HTTPBadRequest(body="couldn't find footer MIME doc")
timeout_reader = self._make_timeout_reader(footer_iter)
try:
footer_body = ''.join(iter(timeout_reader, ''))
except ChunkReadError:
raise HTTPClientDisconnect()
except ChunkReadTimeout:
raise HTTPRequestTimeout()
footer_md5 = footer_hdrs.get('Content-MD5')
if not footer_md5:
raise HTTPBadRequest(body="no Content-MD5 in footer")
if footer_md5 != md5(footer_body).hexdigest():
raise HTTPUnprocessableEntity(body="footer MD5 mismatch")
try:
return HeaderKeyDict(json.loads(footer_body))
except ValueError:
raise HTTPBadRequest("invalid JSON for footer doc")
def _check_container_override(self, update_headers, metadata,
footers=None):
"""
Applies any overrides to the container update headers.
Overrides may be in the x-object-sysmeta-container-update- namespace or
the x-backend-container-update-override- namespace. The former is
preferred and is used by proxy middlewares. The latter is historical
but is still used with EC policy PUT requests; for backwards
compatibility the header names used with EC policy requests have not
been changed to the sysmeta namespace - that way the EC PUT path of a
newer proxy will remain compatible with an object server that pre-dates
the introduction of the x-object-sysmeta-container-update- namespace
and vice-versa.
:param update_headers: a dict of headers used in the container update
:param metadata: a dict that may container override items
:param footers: another dict that may container override items, at a
higher priority than metadata
"""
footers = footers or {}
# the order of this list is significant:
# x-object-sysmeta-container-update-override-* headers take precedence
# over x-backend-container-update-override-* headers
override_prefixes = ['x-backend-container-update-override-',
'x-object-sysmeta-container-update-override-']
for override_prefix in override_prefixes:
for key, val in metadata.items():
if key.lower().startswith(override_prefix):
override = key.lower().replace(override_prefix, 'x-')
update_headers[override] = val
# apply x-backend-container-update-override* from footers *before*
# x-object-sysmeta-container-update-override-* from headers
for key, val in footers.items():
if key.lower().startswith(override_prefix):
override = key.lower().replace(override_prefix, 'x-')
update_headers[override] = val
def _preserve_slo_manifest(self, update_metadata, orig_metadata):
if 'X-Static-Large-Object' in orig_metadata:
update_metadata['X-Static-Large-Object'] = \
orig_metadata['X-Static-Large-Object']
@public
@timing_stats()
def POST(self, request):
"""Handle HTTP POST requests for the Swift Object Server."""
device, partition, account, container, obj, policy = \
get_name_and_placement(request, 5, 5, True)
req_timestamp = valid_timestamp(request)
new_delete_at = int(request.headers.get('X-Delete-At') or 0)
if new_delete_at and new_delete_at < time.time():
return HTTPBadRequest(body='X-Delete-At in past', request=request,
content_type='text/plain')
next_part_power = request.headers.get('X-Backend-Next-Part-Power')
try:
disk_file = self.get_diskfile(
device, partition, account, container, obj,
policy=policy, open_expired=config_true_value(
request.headers.get('x-backend-replication', 'false')),
next_part_power=next_part_power)
except DiskFileDeviceUnavailable:
return HTTPInsufficientStorage(drive=device, request=request)
try:
orig_metadata = disk_file.read_metadata()
except DiskFileXattrNotSupported:
return HTTPInsufficientStorage(drive=device, request=request)
except (DiskFileNotExist, DiskFileQuarantined):
return HTTPNotFound(request=request)
orig_timestamp = Timestamp(orig_metadata.get('X-Timestamp', 0))
orig_ctype_timestamp = disk_file.content_type_timestamp
req_ctype_time = '0'
req_ctype = request.headers.get('Content-Type')
if req_ctype:
req_ctype_time = request.headers.get('Content-Type-Timestamp',
req_timestamp.internal)
req_ctype_timestamp = Timestamp(req_ctype_time)
if orig_timestamp >= req_timestamp \
and orig_ctype_timestamp >= req_ctype_timestamp:
return HTTPConflict(
request=request,
headers={'X-Backend-Timestamp': orig_timestamp.internal})
if req_timestamp > orig_timestamp:
metadata = {'X-Timestamp': req_timestamp.internal}
self._preserve_slo_manifest(metadata, orig_metadata)
metadata.update(val for val in request.headers.items()
if (is_user_meta('object', val[0]) or
is_object_transient_sysmeta(val[0])))
headers_to_copy = (
request.headers.get(
'X-Backend-Replication-Headers', '').split() +
list(self.allowed_headers))
for header_key in headers_to_copy:
if header_key in request.headers:
header_caps = header_key.title()
metadata[header_caps] = request.headers[header_key]
orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0)
if orig_delete_at != new_delete_at:
if new_delete_at:
self.delete_at_update(
'PUT', new_delete_at, account, container, obj, request,
device, policy)
if orig_delete_at:
self.delete_at_update('DELETE', orig_delete_at, account,
container, obj, request, device,
policy)
else:
# preserve existing metadata, only content-type may be updated
metadata = dict(disk_file.get_metafile_metadata())
if req_ctype_timestamp > orig_ctype_timestamp:
# we have a new content-type, add to metadata and container update
content_type_headers = {
'Content-Type': request.headers['Content-Type'],
'Content-Type-Timestamp': req_ctype_timestamp.internal
}
metadata.update(content_type_headers)
else:
# send existing content-type with container update
content_type_headers = {
'Content-Type': disk_file.content_type,
'Content-Type-Timestamp': orig_ctype_timestamp.internal
}
if orig_ctype_timestamp != disk_file.data_timestamp:
# only add to metadata if it's not the datafile content-type
metadata.update(content_type_headers)
try:
disk_file.write_metadata(metadata)
except (DiskFileXattrNotSupported, DiskFileNoSpace):
return HTTPInsufficientStorage(drive=device, request=request)
if (content_type_headers['Content-Type-Timestamp']
!= disk_file.data_timestamp):
# Current content-type is not from the datafile, but the datafile
# content-type may have a swift_bytes param that was appended by
# SLO and we must continue to send that with the container update.
# Do this (rather than use a separate header) for backwards
# compatibility because there may be 'legacy' container updates in
# async pending that have content-types with swift_bytes params, so
# we have to be able to handle those in container server anyway.
_, swift_bytes = extract_swift_bytes(
disk_file.get_datafile_metadata()['Content-Type'])
if swift_bytes:
content_type_headers['Content-Type'] += (';swift_bytes=%s'
% swift_bytes)
update_headers = HeaderKeyDict({
'x-size': orig_metadata['Content-Length'],
'x-content-type': content_type_headers['Content-Type'],
'x-timestamp': disk_file.data_timestamp.internal,
'x-content-type-timestamp':
content_type_headers['Content-Type-Timestamp'],
'x-meta-timestamp': metadata['X-Timestamp'],
'x-etag': orig_metadata['ETag']})
# Special cases for backwards compatibility.
# For EC policy, send X-Object-Sysmeta-Ec-Etag which is same as the
# X-Backend-Container-Update-Override-Etag value sent with the original
# PUT. Similarly send X-Object-Sysmeta-Ec-Content-Length which is the
# same as the X-Backend-Container-Update-Override-Size value. We have
# to send Etag and size with a POST container update because the
# original PUT container update may have failed or be in async_pending.
if 'X-Object-Sysmeta-Ec-Etag' in orig_metadata:
update_headers['X-Etag'] = orig_metadata[
'X-Object-Sysmeta-Ec-Etag']
if 'X-Object-Sysmeta-Ec-Content-Length' in orig_metadata:
update_headers['X-Size'] = orig_metadata[
'X-Object-Sysmeta-Ec-Content-Length']
self._check_container_override(update_headers, orig_metadata)
# object POST updates are PUT to the container server
self.container_update(
'PUT', account, container, obj, request, update_headers,
device, policy)
# Add sysmeta to response
resp_headers = {}
for key, value in orig_metadata.items():
if is_sys_meta('object', key):
resp_headers[key] = value
return HTTPAccepted(request=request, headers=resp_headers)
@public
@timing_stats()
def PUT(self, request):
"""Handle HTTP PUT requests for the Swift Object Server."""
device, partition, account, container, obj, policy = \
get_name_and_placement(request, 5, 5, True)
req_timestamp = valid_timestamp(request)
error_response = check_object_creation(request, obj)
if error_response:
return error_response
new_delete_at = int(request.headers.get('X-Delete-At') or 0)
try:
fsize = request.message_length()
except ValueError as e:
return HTTPBadRequest(body=str(e), request=request,
content_type='text/plain')
# In case of multipart-MIME put, the proxy sends a chunked request,
# but may let us know the real content length so we can verify that
# we have enough disk space to hold the object.
if fsize is None:
fsize = request.headers.get('X-Backend-Obj-Content-Length')
if fsize is not None:
try:
fsize = int(fsize)
except ValueError as e:
return HTTPBadRequest(body=str(e), request=request,
content_type='text/plain')
# SSYNC will include Frag-Index header for subrequests to primary
# nodes; handoff nodes should 409 subrequests to over-write an
# existing data fragment until they offloaded the existing fragment
frag_index = request.headers.get('X-Backend-Ssync-Frag-Index')
next_part_power = request.headers.get('X-Backend-Next-Part-Power')
try:
disk_file = self.get_diskfile(
device, partition, account, container, obj,
policy=policy, frag_index=frag_index,
next_part_power=next_part_power)
except DiskFileDeviceUnavailable:
return HTTPInsufficientStorage(drive=device, request=request)
try:
orig_metadata = disk_file.read_metadata()
orig_timestamp = disk_file.data_timestamp
except DiskFileXattrNotSupported:
return HTTPInsufficientStorage(drive=device, request=request)
except DiskFileDeleted as e:
orig_metadata = {}
orig_timestamp = e.timestamp
except (DiskFileNotExist, DiskFileQuarantined):
orig_metadata = {}
orig_timestamp = Timestamp(0)
# Checks for If-None-Match
if request.if_none_match is not None and orig_metadata:
if '*' in request.if_none_match:
# File exists already so return 412
return HTTPPreconditionFailed(request=request)
if orig_metadata.get('ETag') in request.if_none_match:
# The current ETag matches, so return 412
return HTTPPreconditionFailed(request=request)
if orig_timestamp >= req_timestamp:
return HTTPConflict(
request=request,
headers={'X-Backend-Timestamp': orig_timestamp.internal})
orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0)
upload_expiration = time.time() + self.max_upload_time
etag = md5()
elapsed_time = 0
try:
with disk_file.create(size=fsize) as writer:
upload_size = 0
# If the proxy wants to send us object metadata after the
# object body, it sets some headers. We have to tell the
# proxy, in the 100 Continue response, that we're able to
# parse a multipart MIME document and extract the object and
# metadata from it. If we don't, then the proxy won't
# actually send the footer metadata.
have_metadata_footer = False
use_multiphase_commit = False
mime_documents_iter = iter([])
obj_input = request.environ['wsgi.input']
hundred_continue_headers = []
if config_true_value(
request.headers.get(
'X-Backend-Obj-Multiphase-Commit')):
use_multiphase_commit = True
hundred_continue_headers.append(
('X-Obj-Multiphase-Commit', 'yes'))
if config_true_value(
request.headers.get('X-Backend-Obj-Metadata-Footer')):
have_metadata_footer = True
hundred_continue_headers.append(
('X-Obj-Metadata-Footer', 'yes'))
if have_metadata_footer or use_multiphase_commit:
obj_input.set_hundred_continue_response_headers(
hundred_continue_headers)
mime_boundary = request.headers.get(
'X-Backend-Obj-Multipart-Mime-Boundary')
if not mime_boundary:
return HTTPBadRequest("no MIME boundary")
try:
with ChunkReadTimeout(self.client_timeout):
mime_documents_iter = iter_mime_headers_and_bodies(
request.environ['wsgi.input'],
mime_boundary, self.network_chunk_size)
_junk_hdrs, obj_input = next(mime_documents_iter)
except ChunkReadError:
return HTTPClientDisconnect(request=request)
except ChunkReadTimeout:
return HTTPRequestTimeout(request=request)
timeout_reader = self._make_timeout_reader(obj_input)
try:
for chunk in iter(timeout_reader, ''):
start_time = time.time()
if start_time > upload_expiration:
self.logger.increment('PUT.timeouts')
return HTTPRequestTimeout(request=request)
etag.update(chunk)
upload_size = writer.write(chunk)
elapsed_time += time.time() - start_time
except ChunkReadError:
return HTTPClientDisconnect(request=request)
except ChunkReadTimeout:
return HTTPRequestTimeout(request=request)
if upload_size:
self.logger.transfer_rate(
'PUT.' + device + '.timing', elapsed_time,
upload_size)
if fsize is not None and fsize != upload_size:
return HTTPClientDisconnect(request=request)
footer_meta = {}
if have_metadata_footer:
footer_meta = self._read_metadata_footer(
mime_documents_iter)
request_etag = (footer_meta.get('etag') or
request.headers.get('etag', '')).lower()
etag = etag.hexdigest()
if request_etag and request_etag != etag:
return HTTPUnprocessableEntity(request=request)
metadata = {
'X-Timestamp': request.timestamp.internal,
'Content-Type': request.headers['content-type'],
'ETag': etag,
'Content-Length': str(upload_size),
}
metadata.update(val for val in request.headers.items()
if (is_sys_or_user_meta('object', val[0]) or
is_object_transient_sysmeta(val[0])))
metadata.update(val for val in footer_meta.items()
if (is_sys_or_user_meta('object', val[0]) or
is_object_transient_sysmeta(val[0])))
headers_to_copy = (
request.headers.get(
'X-Backend-Replication-Headers', '').split() +
list(self.allowed_headers))
for header_key in headers_to_copy:
if header_key in request.headers:
header_caps = header_key.title()
metadata[header_caps] = request.headers[header_key]
writer.put(metadata)
# if the PUT requires a two-phase commit (a data and a commit
# phase) send the proxy server another 100-continue response
# to indicate that we are finished writing object data
if use_multiphase_commit:
request.environ['wsgi.input'].\
send_hundred_continue_response()
if not self._read_put_commit_message(mime_documents_iter):
return HTTPServerError(request=request)
# got 2nd phase confirmation (when required), call commit to
# indicate a successful PUT
writer.commit(request.timestamp)
# Drain any remaining MIME docs from the socket. There
# shouldn't be any, but we must read the whole request body.
try:
while True:
with ChunkReadTimeout(self.client_timeout):
_junk_hdrs, _junk_body = next(mime_documents_iter)
drain(_junk_body, self.network_chunk_size,
self.client_timeout)
except ChunkReadError:
raise HTTPClientDisconnect()
except ChunkReadTimeout:
raise HTTPRequestTimeout()
except StopIteration:
pass
except (DiskFileXattrNotSupported, DiskFileNoSpace):
return HTTPInsufficientStorage(drive=device, request=request)
if orig_delete_at != new_delete_at:
if new_delete_at:
self.delete_at_update(
'PUT', new_delete_at, account, container, obj, request,
device, policy)
if orig_delete_at:
self.delete_at_update(
'DELETE', orig_delete_at, account, container, obj,
request, device, policy)
update_headers = HeaderKeyDict({
'x-size': metadata['Content-Length'],
'x-content-type': metadata['Content-Type'],
'x-timestamp': metadata['X-Timestamp'],
'x-etag': metadata['ETag']})
# apply any container update header overrides sent with request
self._check_container_override(update_headers, request.headers,
footer_meta)
self.container_update(
'PUT', account, container, obj, request,
update_headers,
device, policy)
return HTTPCreated(request=request, etag=etag)
@public
@timing_stats()
def GET(self, request):
"""Handle HTTP GET requests for the Swift Object Server."""
device, partition, account, container, obj, policy = \
get_name_and_placement(request, 5, 5, True)
frag_prefs = safe_json_loads(
request.headers.get('X-Backend-Fragment-Preferences'))
try:
disk_file = self.get_diskfile(
device, partition, account, container, obj,
policy=policy, frag_prefs=frag_prefs)
except DiskFileDeviceUnavailable:
return HTTPInsufficientStorage(drive=device, request=request)
try:
with disk_file.open():
metadata = disk_file.get_metadata()
obj_size = int(metadata['Content-Length'])
file_x_ts = Timestamp(metadata['X-Timestamp'])
keep_cache = (self.keep_cache_private or
('X-Auth-Token' not in request.headers and
'X-Storage-Token' not in request.headers))
conditional_etag = resolve_etag_is_at_header(request, metadata)
response = Response(
app_iter=disk_file.reader(keep_cache=keep_cache),
request=request, conditional_response=True,
conditional_etag=conditional_etag)
response.headers['Content-Type'] = metadata.get(
'Content-Type', 'application/octet-stream')
for key, value in metadata.items():
if (is_sys_or_user_meta('object', key) or
is_object_transient_sysmeta(key) or
key.lower() in self.allowed_headers):
response.headers[key] = value
response.etag = metadata['ETag']
response.last_modified = math.ceil(float(file_x_ts))
response.content_length = obj_size
try:
response.content_encoding = metadata[
'Content-Encoding']
except KeyError:
pass
response.headers['X-Timestamp'] = file_x_ts.normal
response.headers['X-Backend-Timestamp'] = file_x_ts.internal
response.headers['X-Backend-Data-Timestamp'] = \
disk_file.data_timestamp.internal
if disk_file.durable_timestamp:
response.headers['X-Backend-Durable-Timestamp'] = \
disk_file.durable_timestamp.internal
response.headers['X-Backend-Fragments'] = \
_make_backend_fragments_header(disk_file.fragments)
resp = request.get_response(response)
except DiskFileXattrNotSupported:
return HTTPInsufficientStorage(drive=device, request=request)
except (DiskFileNotExist, DiskFileQuarantined) as e:
headers = {}
if hasattr(e, 'timestamp'):
headers['X-Backend-Timestamp'] = e.timestamp.internal
resp = HTTPNotFound(request=request, headers=headers,
conditional_response=True)
return resp
@public
@timing_stats(sample_rate=0.8)
def HEAD(self, request):
"""Handle HTTP HEAD requests for the Swift Object Server."""
device, partition, account, container, obj, policy = \
get_name_and_placement(request, 5, 5, True)
frag_prefs = safe_json_loads(
request.headers.get('X-Backend-Fragment-Preferences'))
try:
disk_file = self.get_diskfile(
device, partition, account, container, obj,
policy=policy, frag_prefs=frag_prefs)
except DiskFileDeviceUnavailable:
return HTTPInsufficientStorage(drive=device, request=request)
try:
metadata = disk_file.read_metadata()
except DiskFileXattrNotSupported:
return HTTPInsufficientStorage(drive=device, request=request)
except (DiskFileNotExist, DiskFileQuarantined) as e:
headers = {}
if hasattr(e, 'timestamp'):
headers['X-Backend-Timestamp'] = e.timestamp.internal
return HTTPNotFound(request=request, headers=headers,
conditional_response=True)
conditional_etag = resolve_etag_is_at_header(request, metadata)
response = Response(request=request, conditional_response=True,
conditional_etag=conditional_etag)
response.headers['Content-Type'] = metadata.get(
'Content-Type', 'application/octet-stream')
for key, value in metadata.items():
if (is_sys_or_user_meta('object', key) or
is_object_transient_sysmeta(key) or
key.lower() in self.allowed_headers):
response.headers[key] = value
response.etag = metadata['ETag']
ts = Timestamp(metadata['X-Timestamp'])
response.last_modified = math.ceil(float(ts))
# Needed for container sync feature
response.headers['X-Timestamp'] = ts.normal
response.headers['X-Backend-Timestamp'] = ts.internal
response.headers['X-Backend-Data-Timestamp'] = \
disk_file.data_timestamp.internal
if disk_file.durable_timestamp:
response.headers['X-Backend-Durable-Timestamp'] = \
disk_file.durable_timestamp.internal
response.headers['X-Backend-Fragments'] = \
_make_backend_fragments_header(disk_file.fragments)
response.content_length = int(metadata['Content-Length'])
try:
response.content_encoding = metadata['Content-Encoding']
except KeyError:
pass
return response
@public
@timing_stats()
def DELETE(self, request):
"""Handle HTTP DELETE requests for the Swift Object Server."""
device, partition, account, container, obj, policy = \
get_name_and_placement(request, 5, 5, True)
req_timestamp = valid_timestamp(request)
next_part_power = request.headers.get('X-Backend-Next-Part-Power')
try:
disk_file = self.get_diskfile(
device, partition, account, container, obj,
policy=policy, next_part_power=next_part_power)
except DiskFileDeviceUnavailable:
return HTTPInsufficientStorage(drive=device, request=request)
try:
orig_metadata = disk_file.read_metadata()
except DiskFileXattrNotSupported:
return HTTPInsufficientStorage(drive=device, request=request)
except DiskFileExpired as e:
orig_timestamp = e.timestamp
orig_metadata = e.metadata
response_class = HTTPNotFound
except DiskFileDeleted as e:
orig_timestamp = e.timestamp
orig_metadata = {}
response_class = HTTPNotFound
except (DiskFileNotExist, DiskFileQuarantined):
orig_timestamp = 0
orig_metadata = {}
response_class = HTTPNotFound
else:
orig_timestamp = disk_file.data_timestamp
if orig_timestamp < req_timestamp:
response_class = HTTPNoContent
else:
response_class = HTTPConflict
response_timestamp = max(orig_timestamp, req_timestamp)
orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0)
try:
req_if_delete_at_val = request.headers['x-if-delete-at']
req_if_delete_at = int(req_if_delete_at_val)
except KeyError:
pass
except ValueError:
return HTTPBadRequest(
request=request,
body='Bad X-If-Delete-At header value')
else:
# request includes x-if-delete-at; we must not place a tombstone
# if we can not verify the x-if-delete-at time
if not orig_timestamp:
# no object found at all
return HTTPNotFound()
if orig_delete_at != req_if_delete_at:
return HTTPPreconditionFailed(
request=request,
body='X-If-Delete-At and X-Delete-At do not match')
else:
# differentiate success from no object at all
response_class = HTTPNoContent
if orig_delete_at:
self.delete_at_update('DELETE', orig_delete_at, account,
container, obj, request, device,
policy)
if orig_timestamp < req_timestamp:
try:
disk_file.delete(req_timestamp)
except DiskFileNoSpace:
return HTTPInsufficientStorage(drive=device, request=request)
self.container_update(
'DELETE', account, container, obj, request,
HeaderKeyDict({'x-timestamp': req_timestamp.internal}),
device, policy)
return response_class(
request=request,
headers={'X-Backend-Timestamp': response_timestamp.internal})
@public
@replication
@timing_stats(sample_rate=0.1)
def REPLICATE(self, request):
"""
Handle REPLICATE requests for the Swift Object Server. This is used
by the object replicator to get hashes for directories.
Note that the name REPLICATE is preserved for historical reasons as
this verb really just returns the hashes information for the specified
parameters and is used, for example, by both replication and EC.
"""
device, partition, suffix_parts, policy = \
get_name_and_placement(request, 2, 3, True)
suffixes = suffix_parts.split('-') if suffix_parts else []
try:
hashes = self._diskfile_router[policy].get_hashes(
device, partition, suffixes, policy)
except DiskFileDeviceUnavailable:
resp = HTTPInsufficientStorage(drive=device, request=request)
else:
resp = Response(body=pickle.dumps(hashes))
return resp
@public
@replication
@timing_stats(sample_rate=0.1)
def SSYNC(self, request):
return Response(app_iter=ssync_receiver.Receiver(self, request)())
def __call__(self, env, start_response):
"""WSGI Application entry point for the Swift Object Server."""
start_time = time.time()
req = Request(env)
self.logger.txn_id = req.headers.get('x-trans-id', None)
if not check_utf8(req.path_info):
res = HTTPPreconditionFailed(body='Invalid UTF8 or contains NULL')
else:
try:
# disallow methods which have not been marked 'public'
if req.method not in self.allowed_methods:
res = HTTPMethodNotAllowed()
else:
res = getattr(self, req.method)(req)
except DiskFileCollision:
res = HTTPForbidden(request=req)
except HTTPException as error_response:
res = error_response
except (Exception, Timeout):
self.logger.exception(_(
'ERROR __call__ error with %(method)s'
' %(path)s '), {'method': req.method, 'path': req.path})
res = HTTPInternalServerError(body=traceback.format_exc())
trans_time = time.time() - start_time
res.fix_conditional_response()
if self.log_requests:
log_line = get_log_line(req, res, trans_time, '')
if req.method in ('REPLICATE', 'SSYNC') or \
'X-Backend-Replication' in req.headers:
self.logger.debug(log_line)
else:
self.logger.info(log_line)
if req.method in ('PUT', 'DELETE'):
slow = self.slow - trans_time
if slow > 0:
sleep(slow)
# To be able to zero-copy send the object, we need a few things.
# First, we have to be responding successfully to a GET, or else we're
# not sending the object. Second, we have to be able to extract the
# socket file descriptor from the WSGI input object. Third, the
# diskfile has to support zero-copy send.
#
# There's a good chance that this could work for 206 responses too,
# but the common case is sending the whole object, so we'll start
# there.
if req.method == 'GET' and res.status_int == 200 and \
isinstance(env['wsgi.input'], wsgi.Input):
app_iter = getattr(res, 'app_iter', None)
checker = getattr(app_iter, 'can_zero_copy_send', None)
if checker and checker():
# For any kind of zero-copy thing like sendfile or splice, we
# need the file descriptor. Eventlet doesn't provide a clean
# way of getting that, so we resort to this.
wsock = env['wsgi.input'].get_socket()
wsockfd = wsock.fileno()
# Don't call zero_copy_send() until after we force the HTTP
# headers out of Eventlet and into the socket.
def zero_copy_iter():
# If possible, set TCP_CORK so that headers don't
# immediately go on the wire, but instead, wait for some
# response body to make the TCP frames as large as
# possible (and hence as few packets as possible).
#
# On non-Linux systems, we might consider TCP_NODELAY, but
# since the only known zero-copy-capable diskfile uses
# Linux-specific syscalls, we'll defer that work until
# someone needs it.
if hasattr(socket, 'TCP_CORK'):
wsock.setsockopt(socket.IPPROTO_TCP,
socket.TCP_CORK, 1)
yield EventletPlungerString()
try:
app_iter.zero_copy_send(wsockfd)
except Exception:
self.logger.exception("zero_copy_send() blew up")
raise
yield ''
# Get headers ready to go out
res(env, start_response)
return zero_copy_iter()
else:
return res(env, start_response)
else:
return res(env, start_response)
def global_conf_callback(preloaded_app_conf, global_conf):
"""
Callback for swift.common.wsgi.run_wsgi during the global_conf
creation so that we can add our replication_semaphore, used to
limit the number of concurrent SSYNC_REQUESTS across all
workers.
:param preloaded_app_conf: The preloaded conf for the WSGI app.
This conf instance will go away, so
just read from it, don't write.
:param global_conf: The global conf that will eventually be
passed to the app_factory function later.
This conf is created before the worker
subprocesses are forked, so can be useful to
set up semaphores, shared memory, etc.
"""
replication_concurrency = int(
preloaded_app_conf.get('replication_concurrency') or 4)
if replication_concurrency:
# Have to put the value in a list so it can get past paste
global_conf['replication_semaphore'] = [
multiprocessing.BoundedSemaphore(replication_concurrency)]
def app_factory(global_conf, **local_conf):
"""paste.deploy app factory for creating WSGI object server apps"""
conf = global_conf.copy()
conf.update(local_conf)
return ObjectController(conf)
|
the-stack_0_3135 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import json
import logging
import os
import sys
from abc import ABCMeta
from importlib import import_module
from django.utils.module_loading import module_has_submodule
from six import with_metaclass
from kolibri.utils.build_config.default_plugins import DEFAULT_PLUGINS
from kolibri.utils.conf import KOLIBRI_HOME
logger = logging.getLogger(__name__)
conf_file = os.path.join(KOLIBRI_HOME, "plugins.json")
class ConfigDict(dict):
# These values are encoded on the config dict as sets
# so they need to be treated specially for serialization
# and deserialization to/from JSON
SET_KEYS = ("INSTALLED_PLUGINS", "DISABLED_PLUGINS", "UPDATED_PLUGINS")
def __init__(self):
# If the settings file does not exist or does not contain
# valid JSON then create it
self.set_defaults()
if os.path.isfile(conf_file):
try:
# Open up the config file and load settings
# use default OS encoding
with open(conf_file, "r") as kolibri_conf_file:
self.update(json.load(kolibri_conf_file))
return
except ValueError:
logger.warn(
"Attempted to load plugins.json but encountered a file that could not be decoded as valid JSON."
)
self.save()
logger.info("Initialized plugins.json")
def set_defaults(self):
self.update(
{
#: Everything in this list is added to django.conf.settings.INSTALLED_APPS
# except disabled ones below
"INSTALLED_PLUGINS": DEFAULT_PLUGINS,
#: Everything in this list is removed from the list above
"DISABLED_PLUGINS": [],
# Plugins that have been updated since we last initialized Kolibri
"UPDATED_PLUGINS": [],
# The current versions of plugins (both internal and external)
"PLUGIN_VERSIONS": {},
}
)
@property
def ACTIVE_PLUGINS(self):
return list(self["INSTALLED_PLUGINS"] - self["DISABLED_PLUGINS"])
def update(self, new_values):
"""
Updates current configuration with ``new_values``. Does not save to file.
"""
values_copy = new_values.copy()
for key in self.SET_KEYS:
if key in values_copy:
values_copy[key] = set(values_copy[key])
super(ConfigDict, self).update(values_copy)
def save(self):
# use default OS encoding
config_copy = self.copy()
for key in self.SET_KEYS:
if key in config_copy:
config_copy[key] = list(config_copy[key])
with open(conf_file, "w") as kolibri_conf_file:
json.dump(config_copy, kolibri_conf_file, indent=2, sort_keys=True)
def add_plugin(self, module_path):
if module_path in self.ACTIVE_PLUGINS:
logger.warning("{} already enabled".format(module_path))
return
self["INSTALLED_PLUGINS"].add(module_path)
self["UPDATED_PLUGINS"].add(module_path)
try:
self["DISABLED_PLUGINS"].remove(module_path)
except KeyError:
pass
self.save()
def remove_plugin(self, module_path):
if module_path not in self.ACTIVE_PLUGINS:
logger.warning("{} already disabled".format(module_path))
return
self["DISABLED_PLUGINS"].add(module_path)
try:
self["INSTALLED_PLUGINS"].remove(module_path)
except KeyError:
pass
try:
self["UPDATED_PLUGINS"].remove(module_path)
except KeyError:
pass
self.save()
def clear_plugin(self, module_path):
# Clean up references to plugins that either don't exist
# Or don't import properly.
try:
self["INSTALLED_PLUGINS"].remove(module_path)
except KeyError:
pass
try:
self["DISABLED_PLUGINS"].remove(module_path)
except KeyError:
pass
try:
self["UPDATED_PLUGINS"].remove(module_path)
except KeyError:
pass
self.save()
def update_plugin_version(self, module_path, new_version):
self["PLUGIN_VERSIONS"][module_path] = new_version
try:
self["UPDATED_PLUGINS"].remove(module_path)
except KeyError:
pass
self.save()
#: Set defaults before updating the dict
config = ConfigDict()
class SingletonMeta(ABCMeta):
_instances = {}
# Make all classes using this metaclass singletons
# Taken from here: https://stackoverflow.com/q/6760685
# Should be resistant to the __new__ method on the class object
# being overwritten.
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(SingletonMeta, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class KolibriPluginBase(with_metaclass(SingletonMeta)):
"""
This is the base class that all Kolibri plugins need to implement.
"""
#: Comment
# Name of a local module that contains url_patterns that define
# URLs for views that do not contain any
# translated content, and hence will not be prefixed
# with a language prefix
untranslated_view_urls = None
#: Comment
# Name of a local module that contains url_patterns that define
# URLs for views that contain
# translated content, and hence will be prefixed
# with a language prefixs
translated_view_urls = None
#: Comment
# Name of a local module that contains url_patterns that define
# URLs for views that should be attached to the domain root.
# Use with caution! The lack of namespacing is dangerous.
root_view_urls = None
#: Comment
# Name of a local module that contains additional settings to augment
# Django settings.
# For settings that take a tuple or list, these will be appended to the value from
# the base settings module set through conventional Django means.
django_settings = None
#: Comment
# Name of a local module, containing a config spec as the 'option_spec' value.
# These options should not override the core config spec. To override default values
# of other options see the attribute below
kolibri_options = None
#: Comment
# Name of a local module, containing a set of options defaults as the 'option_defaults' value.
# Should be of the form:
# option_defaults = {
# "<Section Name>": {
# "<Option Name>": "<New Default Value>",
# }
# }
kolibri_option_defaults = None
# : Suggested property, not yet in use
migrate_on_enable = False
# : Suggested property, not yet in use
collect_static_on_enable = False
# : Suggested property, not yet in use
collect_static_on_enable = False
def __init__(self):
self.INSTALLED_APPS = []
@classmethod
def class_module_path(self):
return ".".join(self.__module__.split(".")[:-1])
@property
def module_path(self):
return self.class_module_path()
def _installed_apps_add(self):
"""Call this from your enable() method to have the plugin automatically
added to Kolibri configuration"""
config.add_plugin(self.module_path)
def _installed_apps_remove(self):
"""Call this from your enable() method to have the plugin automatically
added to Kolibri configuration"""
config.remove_plugin(self.module_path)
def enable(self):
"""Modify the kolibri config dict to your plugin's needs"""
self._installed_apps_add()
def disable(self):
"""Modify the kolibri config dict to your plugin's needs"""
self._installed_apps_remove()
def _return_module(self, module_name):
if module_has_submodule(sys.modules[self.module_path], module_name):
models_module_name = "%s.%s" % (self.module_path, module_name)
try:
return import_module(models_module_name)
except Exception as e:
logging.warn(
"Tried to import module {module_name} from {plugin} but an error was raised".format(
plugin=self.module_path, module_name=module_name
)
)
logging.exception(e)
return None
@property
def url_module(self):
"""
Return a url module, containing ``urlpatterns = [...]``, a conventional
Django application url module.
URLs are by default accessed through Django's reverse lookups like
this::
reverse('kolibri:mypluginclass:url_name')
To customize "mypluginclass" (which is automatically derived from the
plugin's class name), override ``url_namespace``.
By default this will be discovered based on the translated_view_urls
property.
"""
if self.translated_view_urls:
module = self._return_module(self.translated_view_urls)
if module is None:
logging.warn(
"{plugin} defined {urls} translated view urls but the module was not found".format(
plugin=self.module_path, urls=self.translated_view_urls
)
)
return module
@property
def api_url_module(self):
"""
Return a url module, containing ``urlpatterns = [...]``, a conventional
Django application url module.
Do this separately for API endpoints so that they do not need
to be prefixed by the language code.
URLs are by default accessed through Django's reverse lookups like
this::
reverse('kolibri:mypluginclass:url_name')
To customize "mypluginclass" (which is automatically derived from the
plugin's class name), override ``url_namespace``.
By default this will be discovered based on the untranslated_view_urls
property.
"""
if self.untranslated_view_urls:
module = self._return_module(self.untranslated_view_urls)
if module is None:
logging.warn(
"{plugin} defined {urls} untranslated view urls but the module was not found".format(
plugin=self.module_path, urls=self.untranslated_view_urls
)
)
return module
@property
def root_url_module(self):
"""
Return a url module, containing ``urlpatterns = [...]``, a conventional
Django application url module.
Do this separately for endpoints that need to be attached at the root.
URLs are by default accessed through Django's reverse lookups like
this::
reverse('kolibri:url_name')
By default this will be discovered based on the root_view_urls
property.
"""
if self.root_view_urls:
module = self._return_module(self.root_view_urls)
if module is None:
logging.warn(
"{plugin} defined {urls} root view urls but the module was not found".format(
plugin=self.module_path, urls=self.root_view_urls
)
)
return module
@property
def settings_module(self):
"""
Return a settings module, containing Django settings that this
module wants to apply.
For settings that take a tuple or list, these will be appended to the value from
the base settings module set through conventional Django means.
By default this will be discovered based on the django_settings
property.
"""
if self.django_settings:
module = self._return_module(self.django_settings)
if module is None:
logging.warn(
"{plugin} defined {module} django settings but the module was not found".format(
plugin=self.module_path, module=self.django_settings
)
)
return module
@property
def options_module(self):
"""
Return an options module, containing a config spec as the 'option_spec' value.
These options should not override the core config spec.
By default this will be discovered based on the kolibri_options
property.
"""
if self.kolibri_options:
module = self._return_module(self.kolibri_options)
if module is None:
logging.warn(
"{plugin} defined {module} kolibri options but the module was not found".format(
plugin=self.module_path, module=self.kolibri_options
)
)
return module
@property
def option_defaults_module(self):
"""
Return an option defaults module, containing default overrides as the 'options_default' value.
By default this will be discovered based on the kolibri_options
property.
"""
if self.kolibri_option_defaults:
module = self._return_module(self.kolibri_option_defaults)
if module is None:
logging.warn(
"{plugin} defined {module} kolibri option defaults but the module was not found".format(
plugin=self.module_path, module=self.kolibri_option_defaults
)
)
return module
@property
def url_slug(self):
"""
Where should urls be included? By default, this is a lower-case version
of the class name.
Example::
return r"my-plugin/"
.. warning:: Avoid the empty string, as you might get conflicts.
"""
return self.module_path.split(".")[-1].lower() + "/"
|
the-stack_0_3137 | import learner
import recognizer
import dbconn
print("Sign up enter 1")
print("Sign in enter 2\n")
print("Select action from above two.")
print("Press 'q' for exit from camera view.\n")
action = raw_input('Select action : ')
email = raw_input('Enter email : ')
try:
if int(action) == 1:
name = raw_input('Enter name : ')
res = dbconn.create_user(email, name)
if res == True:
id, name = dbconn.get_user(email)
res_train = learner.learn_user(id)
if res_train == True:
print("\nUser sign up successful.")
else:
# delete user if training unsuccessful
dbconn.del_user(id)
print("\nUser sign up unsuccessful.")
else:
print('\nEmail address already exist.')
elif int(action) == 2:
res = dbconn.get_user(email)
if res != None:
id, name = res
recognizer.recognize_face(id, name)
else:
print('\nPlease sign up.')
except Exception as e:
print("\nInvalid action.")
|
the-stack_0_3138 | """
Virtual environment (venv) package for Python. Based on PEP 405.
Copyright (C) 2011-2014 Vinay Sajip.
Licensed to the PSF under a contributor agreement.
"""
import logging
import os
import shutil
import subprocess
import sys
import sysconfig
import types
logger = logging.getLogger(__name__)
class EnvBuilder:
"""
This class exists to allow virtual environment creation to be
customized. The constructor parameters determine the builder's
behaviour when called upon to create a virtual environment.
By default, the builder makes the system (global) site-packages dir
*un*available to the created environment.
If invoked using the Python -m option, the default is to use copying
on Windows platforms but symlinks elsewhere. If instantiated some
other way, the default is to *not* use symlinks.
:param system_site_packages: If True, the system (global) site-packages
dir is available to created environments.
:param clear: If True, delete the contents of the environment directory if
it already exists, before environment creation.
:param symlinks: If True, attempt to symlink rather than copy files into
virtual environment.
:param upgrade: If True, upgrade an existing virtual environment.
:param with_pip: If True, ensure pip is installed in the virtual
environment
:param prompt: Alternative terminal prefix for the environment.
"""
def __init__(self, system_site_packages=False, clear=False,
symlinks=False, upgrade=False, with_pip=False, prompt=None):
self.system_site_packages = system_site_packages
self.clear = clear
self.symlinks = symlinks
self.upgrade = upgrade
self.with_pip = with_pip
self.prompt = prompt
def create(self, env_dir):
"""
Create a virtual environment in a directory.
:param env_dir: The target directory to create an environment in.
"""
env_dir = os.path.abspath(env_dir)
context = self.ensure_directories(env_dir)
# See issue 24875. We need system_site_packages to be False
# until after pip is installed.
true_system_site_packages = self.system_site_packages
self.system_site_packages = False
self.create_configuration(context)
self.setup_python(context)
if self.with_pip:
self._setup_pip(context)
if not self.upgrade:
self.setup_scripts(context)
self.post_setup(context)
if true_system_site_packages:
# We had set it to False before, now
# restore it and rewrite the configuration
self.system_site_packages = True
self.create_configuration(context)
def clear_directory(self, path):
for fn in os.listdir(path):
fn = os.path.join(path, fn)
if os.path.islink(fn) or os.path.isfile(fn):
os.remove(fn)
elif os.path.isdir(fn):
shutil.rmtree(fn)
def ensure_directories(self, env_dir):
"""
Create the directories for the environment.
Returns a context object which holds paths in the environment,
for use by subsequent logic.
"""
def create_if_needed(d):
if not os.path.exists(d):
os.makedirs(d)
elif os.path.islink(d) or os.path.isfile(d):
raise ValueError('Unable to create directory %r' % d)
if os.path.exists(env_dir) and self.clear:
self.clear_directory(env_dir)
context = types.SimpleNamespace()
context.env_dir = env_dir
context.env_name = os.path.split(env_dir)[1]
prompt = self.prompt if self.prompt is not None else context.env_name
context.prompt = '(%s) ' % prompt
create_if_needed(env_dir)
executable = sys._base_executable
dirname, exename = os.path.split(os.path.abspath(executable))
context.executable = executable
context.python_dir = dirname
context.python_exe = exename
if sys.platform == 'win32':
binname = 'Scripts'
incpath = 'Include'
libpath = os.path.join(env_dir, 'Lib', 'site-packages')
else:
binname = 'bin'
incpath = 'include'
libpath = os.path.join(env_dir, 'lib',
'python%d.%d' % sys.version_info[:2],
'site-packages')
context.inc_path = path = os.path.join(env_dir, incpath)
create_if_needed(path)
create_if_needed(libpath)
# Issue 21197: create lib64 as a symlink to lib on 64-bit non-OS X POSIX
if ((sys.maxsize > 2**32) and (os.name == 'posix') and
(sys.platform != 'darwin')):
link_path = os.path.join(env_dir, 'lib64')
if not os.path.exists(link_path): # Issue #21643
os.symlink('lib', link_path)
context.bin_path = binpath = os.path.join(env_dir, binname)
context.bin_name = binname
context.env_exe = os.path.join(binpath, exename)
create_if_needed(binpath)
return context
def create_configuration(self, context):
"""
Create a configuration file indicating where the environment's Python
was copied from, and whether the system site-packages should be made
available in the environment.
:param context: The information for the environment creation request
being processed.
"""
context.cfg_path = path = os.path.join(context.env_dir, 'pyvenv.cfg')
with open(path, 'w', encoding='utf-8') as f:
f.write('home = %s\n' % context.python_dir)
if self.system_site_packages:
incl = 'true'
else:
incl = 'false'
f.write('include-system-site-packages = %s\n' % incl)
f.write('version = %d.%d.%d\n' % sys.version_info[:3])
if self.prompt is not None:
f.write(f'prompt = {self.prompt!r}\n')
if os.name != 'nt':
def symlink_or_copy(self, src, dst, relative_symlinks_ok=False):
"""
Try symlinking a file, and if that fails, fall back to copying.
"""
force_copy = not self.symlinks
if not force_copy:
try:
if not os.path.islink(dst): # can't link to itself!
if relative_symlinks_ok:
assert os.path.dirname(src) == os.path.dirname(dst)
os.symlink(os.path.basename(src), dst)
else:
os.symlink(src, dst)
except Exception: # may need to use a more specific exception
logger.warning('Unable to symlink %r to %r', src, dst)
force_copy = True
if force_copy:
shutil.copyfile(src, dst)
else:
def symlink_or_copy(self, src, dst, relative_symlinks_ok=False):
"""
Try symlinking a file, and if that fails, fall back to copying.
"""
bad_src = os.path.lexists(src) and not os.path.exists(src)
if self.symlinks and not bad_src and not os.path.islink(dst):
try:
if relative_symlinks_ok:
assert os.path.dirname(src) == os.path.dirname(dst)
os.symlink(os.path.basename(src), dst)
else:
os.symlink(src, dst)
return
except Exception: # may need to use a more specific exception
logger.warning('Unable to symlink %r to %r', src, dst)
# On Windows, we rewrite symlinks to our base python.exe into
# copies of venvlauncher.exe
basename, ext = os.path.splitext(os.path.basename(src))
srcfn = os.path.join(os.path.dirname(__file__),
"scripts",
"nt",
basename + ext)
# Builds or venv's from builds need to remap source file
# locations, as we do not put them into Lib/venv/scripts
if sysconfig.is_python_build(True) or not os.path.isfile(srcfn):
if basename.endswith('_d'):
ext = '_d' + ext
basename = basename[:-2]
if basename == 'python':
basename = 'venvlauncher'
elif basename == 'pythonw':
basename = 'venvwlauncher'
src = os.path.join(os.path.dirname(src), basename + ext)
else:
if basename.startswith('python'):
scripts = sys.prefix
else:
scripts = os.path.join(os.path.dirname(__file__), "scripts", "nt")
src = os.path.join(scripts, basename + ext)
if not os.path.exists(src):
if not bad_src:
logger.warning('Unable to copy %r', src)
return
shutil.copyfile(src, dst)
def setup_python(self, context):
"""
Set up a Python executable in the environment.
:param context: The information for the environment creation request
being processed.
"""
binpath = context.bin_path
path = context.env_exe
copier = self.symlink_or_copy
copier(context.executable, path)
dirname = context.python_dir
if os.name != 'nt':
if not os.path.islink(path):
os.chmod(path, 0o755)
for suffix in ('python', 'python3'):
path = os.path.join(binpath, suffix)
if not os.path.exists(path):
# Issue 18807: make copies if
# symlinks are not wanted
copier(context.env_exe, path, relative_symlinks_ok=True)
if not os.path.islink(path):
os.chmod(path, 0o755)
else:
if self.symlinks:
# For symlinking, we need a complete copy of the root directory
# If symlinks fail, you'll get unnecessary copies of files, but
# we assume that if you've opted into symlinks on Windows then
# you know what you're doing.
suffixes = [
f for f in os.listdir(dirname) if
os.path.normcase(os.path.splitext(f)[1]) in ('.exe', '.dll')
]
if sysconfig.is_python_build(True):
suffixes = [
f for f in suffixes if
os.path.normcase(f).startswith(('python', 'vcruntime'))
]
else:
suffixes = ['python.exe', 'python_d.exe', 'pythonw.exe',
'pythonw_d.exe']
for suffix in suffixes:
src = os.path.join(dirname, suffix)
if os.path.lexists(src):
copier(src, os.path.join(binpath, suffix))
if sysconfig.is_python_build(True):
# copy init.tcl
for root, dirs, files in os.walk(context.python_dir):
if 'init.tcl' in files:
tcldir = os.path.basename(root)
tcldir = os.path.join(context.env_dir, 'Lib', tcldir)
if not os.path.exists(tcldir):
os.makedirs(tcldir)
src = os.path.join(root, 'init.tcl')
dst = os.path.join(tcldir, 'init.tcl')
shutil.copyfile(src, dst)
break
def _setup_pip(self, context):
"""Installs or upgrades pip in a virtual environment"""
# We run ensurepip in isolated mode to avoid side effects from
# environment vars, the current directory and anything else
# intended for the global Python environment
cmd = [context.env_exe, '-Im', 'ensurepip', '--upgrade',
'--default-pip']
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
def setup_scripts(self, context):
"""
Set up scripts into the created environment from a directory.
This method installs the default scripts into the environment
being created. You can prevent the default installation by overriding
this method if you really need to, or if you need to specify
a different location for the scripts to install. By default, the
'scripts' directory in the venv package is used as the source of
scripts to install.
"""
path = os.path.abspath(os.path.dirname(__file__))
path = os.path.join(path, 'scripts')
self.install_scripts(context, path)
def post_setup(self, context):
"""
Hook for post-setup modification of the venv. Subclasses may install
additional packages or scripts here, add activation shell scripts, etc.
:param context: The information for the environment creation request
being processed.
"""
pass
def replace_variables(self, text, context):
"""
Replace variable placeholders in script text with context-specific
variables.
Return the text passed in , but with variables replaced.
:param text: The text in which to replace placeholder variables.
:param context: The information for the environment creation request
being processed.
"""
text = text.replace('__VENV_DIR__', context.env_dir)
text = text.replace('__VENV_NAME__', context.env_name)
text = text.replace('__VENV_PROMPT__', context.prompt)
text = text.replace('__VENV_BIN_NAME__', context.bin_name)
text = text.replace('__VENV_PYTHON__', context.env_exe)
return text
def install_scripts(self, context, path):
"""
Install scripts into the created environment from a directory.
:param context: The information for the environment creation request
being processed.
:param path: Absolute pathname of a directory containing script.
Scripts in the 'common' subdirectory of this directory,
and those in the directory named for the platform
being run on, are installed in the created environment.
Placeholder variables are replaced with environment-
specific values.
"""
binpath = context.bin_path
plen = len(path)
for root, dirs, files in os.walk(path):
if root == path: # at top-level, remove irrelevant dirs
for d in dirs[:]:
if d not in ('common', os.name):
dirs.remove(d)
continue # ignore files in top level
for f in files:
if (os.name == 'nt' and f.startswith('python')
and f.endswith(('.exe', '.pdb'))):
continue
srcfile = os.path.join(root, f)
suffix = root[plen:].split(os.sep)[2:]
if not suffix:
dstdir = binpath
else:
dstdir = os.path.join(binpath, *suffix)
if not os.path.exists(dstdir):
os.makedirs(dstdir)
dstfile = os.path.join(dstdir, f)
with open(srcfile, 'rb') as f:
data = f.read()
if not srcfile.endswith(('.exe', '.pdb')):
try:
data = data.decode('utf-8')
data = self.replace_variables(data, context)
data = data.encode('utf-8')
except UnicodeError as e:
data = None
logger.warning('unable to copy script %r, '
'may be binary: %s', srcfile, e)
if data is not None:
with open(dstfile, 'wb') as f:
f.write(data)
shutil.copymode(srcfile, dstfile)
def create(env_dir, system_site_packages=False, clear=False,
symlinks=False, with_pip=False, prompt=None):
"""Create a virtual environment in a directory."""
builder = EnvBuilder(system_site_packages=system_site_packages,
clear=clear, symlinks=symlinks, with_pip=with_pip,
prompt=prompt)
builder.create(env_dir)
def main(args=None):
compatible = True
if sys.version_info < (3, 3):
compatible = False
elif not hasattr(sys, 'base_prefix'):
compatible = False
if not compatible:
raise ValueError('This script is only for use with Python >= 3.3')
else:
import argparse
parser = argparse.ArgumentParser(prog=__name__,
description='Creates virtual Python '
'environments in one or '
'more target '
'directories.',
epilog='Once an environment has been '
'created, you may wish to '
'activate it, e.g. by '
'sourcing an activate script '
'in its bin directory.')
parser.add_argument('dirs', metavar='ENV_DIR', nargs='+',
help='A directory to create the environment in.')
parser.add_argument('--system-site-packages', default=False,
action='store_true', dest='system_site',
help='Give the virtual environment access to the '
'system site-packages dir.')
if os.name == 'nt':
use_symlinks = False
else:
use_symlinks = True
group = parser.add_mutually_exclusive_group()
group.add_argument('--symlinks', default=use_symlinks,
action='store_true', dest='symlinks',
help='Try to use symlinks rather than copies, '
'when symlinks are not the default for '
'the platform.')
group.add_argument('--copies', default=not use_symlinks,
action='store_false', dest='symlinks',
help='Try to use copies rather than symlinks, '
'even when symlinks are the default for '
'the platform.')
parser.add_argument('--clear', default=False, action='store_true',
dest='clear', help='Delete the contents of the '
'environment directory if it '
'already exists, before '
'environment creation.')
parser.add_argument('--upgrade', default=False, action='store_true',
dest='upgrade', help='Upgrade the environment '
'directory to use this version '
'of Python, assuming Python '
'has been upgraded in-place.')
parser.add_argument('--without-pip', dest='with_pip',
default=True, action='store_false',
help='Skips installing or upgrading pip in the '
'virtual environment (pip is bootstrapped '
'by default)')
parser.add_argument('--prompt',
help='Provides an alternative prompt prefix for '
'this environment.')
options = parser.parse_args(args)
if options.upgrade and options.clear:
raise ValueError('you cannot supply --upgrade and --clear together.')
builder = EnvBuilder(system_site_packages=options.system_site,
clear=options.clear,
symlinks=options.symlinks,
upgrade=options.upgrade,
with_pip=options.with_pip,
prompt=options.prompt)
for d in options.dirs:
builder.create(d)
if __name__ == '__main__':
rc = 1
try:
main()
rc = 0
except Exception as e:
print('Error: %s' % e, file=sys.stderr)
sys.exit(rc)
|
the-stack_0_3140 | import pandas as pd
def to_reise(data):
"""Format data for REISE.
:param pandas.DataFrame data: data frame as returned by
:func:`prereise.gather.solardata.nsrdb.naive.retrieve_data`,
:func:`prereise.gather.solardata.nsrdb.sam.retrieve_data` or
:func:`prereise.gather.solardata.ga_wind.ga_wind.retrieve_data`
:return: (*pandas.DataFrame*) -- data frame formatted for REISE.
:raises TypeError: if *'data'* is not a data frame.
:raises ValueError: if *'Pout'*, *'plant_id'*, *'ts'* and *'ts_id'* are not among
the columns.
"""
if not isinstance(data, pd.DataFrame):
raise TypeError("data must be a pandas.DataFrame")
if not {"Pout", "plant_id", "ts", "ts_id"}.issubset(data.columns):
raise ValueError(
"data frame must have Pout, plant_id, ts and ts_id among columns"
)
ts = data["ts"].unique()
plant_id = data[data.ts_id == 1].plant_id.values
profile = None
for i in range(1, max(data.ts_id) + 1):
data_tmp = pd.DataFrame(
{"Pout": data[data.ts_id == i].Pout.values}, index=plant_id
)
if i == 1:
profile = data_tmp.T
else:
profile = profile.append(data_tmp.T, sort=False, ignore_index=True)
profile.set_index(ts, inplace=True)
profile.index.name = "UTC"
return profile
def get_plant_id_unique_location(plant):
"""Identify unique location among plants.
:param pandas.DataFrame plant: plant data frame.
:return: (*dict*) -- keys are coordinates. Values is a list of *'plant_id'*.
:raises TypeError: if *'plant'* is not a data frame.
:raises ValueError: if *'plant_id'* is not the index and/or *'lat'* and *'lon'* are
not among the columns.
"""
if not isinstance(plant, pd.DataFrame):
raise TypeError("plant must be a pandas.DataFrame")
if not (plant.index.name == "plant_id" and {"lat", "lon"}.issubset(plant.columns)):
raise ValueError(
"data frame must have plant_id as index and lat and lon among columns"
)
return plant.groupby(["lon", "lat"]).groups
|
the-stack_0_3141 | """
Support for Alexa skill service end point.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/alexa/
"""
import asyncio
import copy
import enum
import logging
import uuid
from datetime import datetime
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.const import HTTP_BAD_REQUEST
from homeassistant.helpers import template, script, config_validation as cv
from homeassistant.components.http import HomeAssistantView
_LOGGER = logging.getLogger(__name__)
INTENTS_API_ENDPOINT = '/api/alexa'
FLASH_BRIEFINGS_API_ENDPOINT = '/api/alexa/flash_briefings/{briefing_id}'
CONF_ACTION = 'action'
CONF_CARD = 'card'
CONF_INTENTS = 'intents'
CONF_SPEECH = 'speech'
CONF_TYPE = 'type'
CONF_TITLE = 'title'
CONF_CONTENT = 'content'
CONF_TEXT = 'text'
CONF_FLASH_BRIEFINGS = 'flash_briefings'
CONF_UID = 'uid'
CONF_TITLE = 'title'
CONF_AUDIO = 'audio'
CONF_TEXT = 'text'
CONF_DISPLAY_URL = 'display_url'
ATTR_UID = 'uid'
ATTR_UPDATE_DATE = 'updateDate'
ATTR_TITLE_TEXT = 'titleText'
ATTR_STREAM_URL = 'streamUrl'
ATTR_MAIN_TEXT = 'mainText'
ATTR_REDIRECTION_URL = 'redirectionURL'
DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.0Z'
DOMAIN = 'alexa'
DEPENDENCIES = ['http']
class SpeechType(enum.Enum):
"""The Alexa speech types."""
plaintext = "PlainText"
ssml = "SSML"
class CardType(enum.Enum):
"""The Alexa card types."""
simple = "Simple"
link_account = "LinkAccount"
CONFIG_SCHEMA = vol.Schema({
DOMAIN: {
CONF_INTENTS: {
cv.string: {
vol.Optional(CONF_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(CONF_CARD): {
vol.Required(CONF_TYPE): cv.enum(CardType),
vol.Required(CONF_TITLE): cv.template,
vol.Required(CONF_CONTENT): cv.template,
},
vol.Optional(CONF_SPEECH): {
vol.Required(CONF_TYPE): cv.enum(SpeechType),
vol.Required(CONF_TEXT): cv.template,
}
}
},
CONF_FLASH_BRIEFINGS: {
cv.string: vol.All(cv.ensure_list, [{
vol.Required(CONF_UID, default=str(uuid.uuid4())): cv.string,
vol.Required(CONF_TITLE): cv.template,
vol.Optional(CONF_AUDIO): cv.template,
vol.Required(CONF_TEXT, default=""): cv.template,
vol.Optional(CONF_DISPLAY_URL): cv.template,
}]),
}
}
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Activate Alexa component."""
intents = config[DOMAIN].get(CONF_INTENTS, {})
flash_briefings = config[DOMAIN].get(CONF_FLASH_BRIEFINGS, {})
hass.http.register_view(AlexaIntentsView(hass, intents))
hass.http.register_view(AlexaFlashBriefingView(hass, flash_briefings))
return True
class AlexaIntentsView(HomeAssistantView):
"""Handle Alexa requests."""
url = INTENTS_API_ENDPOINT
name = 'api:alexa'
def __init__(self, hass, intents):
"""Initialize Alexa view."""
super().__init__()
intents = copy.deepcopy(intents)
template.attach(hass, intents)
for name, intent in intents.items():
if CONF_ACTION in intent:
intent[CONF_ACTION] = script.Script(
hass, intent[CONF_ACTION], "Alexa intent {}".format(name))
self.intents = intents
@asyncio.coroutine
def post(self, request):
"""Handle Alexa."""
data = yield from request.json()
_LOGGER.debug('Received Alexa request: %s', data)
req = data.get('request')
if req is None:
_LOGGER.error('Received invalid data from Alexa: %s', data)
return self.json_message('Expected request value not received',
HTTP_BAD_REQUEST)
req_type = req['type']
if req_type == 'SessionEndedRequest':
return None
intent = req.get('intent')
response = AlexaResponse(request.app['hass'], intent)
if req_type == 'LaunchRequest':
response.add_speech(
SpeechType.plaintext,
"Hello, and welcome to the future. How may I help?")
return self.json(response)
if req_type != 'IntentRequest':
_LOGGER.warning('Received unsupported request: %s', req_type)
return self.json_message(
'Received unsupported request: {}'.format(req_type),
HTTP_BAD_REQUEST)
intent_name = intent['name']
config = self.intents.get(intent_name)
if config is None:
_LOGGER.warning('Received unknown intent %s', intent_name)
response.add_speech(
SpeechType.plaintext,
"This intent is not yet configured within Home Assistant.")
return self.json(response)
speech = config.get(CONF_SPEECH)
card = config.get(CONF_CARD)
action = config.get(CONF_ACTION)
if action is not None:
yield from action.async_run(response.variables)
# pylint: disable=unsubscriptable-object
if speech is not None:
response.add_speech(speech[CONF_TYPE], speech[CONF_TEXT])
if card is not None:
response.add_card(card[CONF_TYPE], card[CONF_TITLE],
card[CONF_CONTENT])
return self.json(response)
class AlexaResponse(object):
"""Help generating the response for Alexa."""
def __init__(self, hass, intent=None):
"""Initialize the response."""
self.hass = hass
self.speech = None
self.card = None
self.reprompt = None
self.session_attributes = {}
self.should_end_session = True
self.variables = {}
if intent is not None and 'slots' in intent:
for key, value in intent['slots'].items():
if 'value' in value:
underscored_key = key.replace('.', '_')
self.variables[underscored_key] = value['value']
def add_card(self, card_type, title, content):
"""Add a card to the response."""
assert self.card is None
card = {
"type": card_type.value
}
if card_type == CardType.link_account:
self.card = card
return
card["title"] = title.async_render(self.variables)
card["content"] = content.async_render(self.variables)
self.card = card
def add_speech(self, speech_type, text):
"""Add speech to the response."""
assert self.speech is None
key = 'ssml' if speech_type == SpeechType.ssml else 'text'
if isinstance(text, template.Template):
text = text.async_render(self.variables)
self.speech = {
'type': speech_type.value,
key: text
}
def add_reprompt(self, speech_type, text):
"""Add reprompt if user does not answer."""
assert self.reprompt is None
key = 'ssml' if speech_type == SpeechType.ssml else 'text'
self.reprompt = {
'type': speech_type.value,
key: text.async_render(self.variables)
}
def as_dict(self):
"""Return response in an Alexa valid dict."""
response = {
'shouldEndSession': self.should_end_session
}
if self.card is not None:
response['card'] = self.card
if self.speech is not None:
response['outputSpeech'] = self.speech
if self.reprompt is not None:
response['reprompt'] = {
'outputSpeech': self.reprompt
}
return {
'version': '1.0',
'sessionAttributes': self.session_attributes,
'response': response,
}
class AlexaFlashBriefingView(HomeAssistantView):
"""Handle Alexa Flash Briefing skill requests."""
url = FLASH_BRIEFINGS_API_ENDPOINT
name = 'api:alexa:flash_briefings'
def __init__(self, hass, flash_briefings):
"""Initialize Alexa view."""
super().__init__()
self.flash_briefings = copy.deepcopy(flash_briefings)
template.attach(hass, self.flash_briefings)
@callback
def get(self, request, briefing_id):
"""Handle Alexa Flash Briefing request."""
_LOGGER.debug('Received Alexa flash briefing request for: %s',
briefing_id)
if self.flash_briefings.get(briefing_id) is None:
err = 'No configured Alexa flash briefing was found for: %s'
_LOGGER.error(err, briefing_id)
return b'', 404
briefing = []
for item in self.flash_briefings.get(briefing_id, []):
output = {}
if item.get(CONF_TITLE) is not None:
if isinstance(item.get(CONF_TITLE), template.Template):
output[ATTR_TITLE_TEXT] = item[CONF_TITLE].async_render()
else:
output[ATTR_TITLE_TEXT] = item.get(CONF_TITLE)
if item.get(CONF_TEXT) is not None:
if isinstance(item.get(CONF_TEXT), template.Template):
output[ATTR_MAIN_TEXT] = item[CONF_TEXT].async_render()
else:
output[ATTR_MAIN_TEXT] = item.get(CONF_TEXT)
if item.get(CONF_UID) is not None:
output[ATTR_UID] = item.get(CONF_UID)
if item.get(CONF_AUDIO) is not None:
if isinstance(item.get(CONF_AUDIO), template.Template):
output[ATTR_STREAM_URL] = item[CONF_AUDIO].async_render()
else:
output[ATTR_STREAM_URL] = item.get(CONF_AUDIO)
if item.get(CONF_DISPLAY_URL) is not None:
if isinstance(item.get(CONF_DISPLAY_URL),
template.Template):
output[ATTR_REDIRECTION_URL] = \
item[CONF_DISPLAY_URL].async_render()
else:
output[ATTR_REDIRECTION_URL] = item.get(CONF_DISPLAY_URL)
output[ATTR_UPDATE_DATE] = datetime.now().strftime(DATE_FORMAT)
briefing.append(output)
return self.json(briefing)
|
the-stack_0_3147 | from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from matplotlib import cm
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import time
# Variance
def var(f_model):
n = np.size(f_model)
f_model_mean = np.sum(f_model)/n
#f_model_mean = np.mean(f_model)
return np.sum((f_model-f_model_mean)**2)/n
#================================================================================================================
# Bias
def bias(f_true,f_model):
n = np.size(f_model)
#f_model_mean = np.sum(f_model)/n
f_model_mean = np.mean(f_model)
return np.sum((f_true-f_model_mean)**2)/n
#================================================================================================================
# MSE
def MSE(f_true,f_model):
n = np.size(f_model)
return np.sum((f_true-f_model)**2)/n
#================================================================================================================
# Extra term
def extra_term(f_true,f_model):
n = np.size(f_model)
f_model_mean = np.mean(f_model)
return 2.0/n*np.sum((f_model_mean-f_true)*(f_model-f_model_mean))
#================================================================================================================
# SVD invert
def SVDinv(A):
''' Takes as input a numpy matrix A and returns inv(A) based on singular value decomposition (SVD).
SVD is numerically more stable (at least in our case) than the inversion algorithms provided by
numpy and scipy.linalg at the cost of being slower.
'''
U, s, VT = linalg.svd(A)
D = np.zeros((len(U),len(VT)))
for i in range(0,len(VT)):
D[i,i]=s[i]
UT = np.transpose(U); V = np.transpose(VT); invD = np.linalg.inv(D)
return np.matmul(V,np.matmul(invD,UT))
#================================================================================================================
# R2 score
def R2(x_true,x_predict):
n = np.size(x_true)
x_avg = np.sum(x_true)/n
enumerator = np.sum ((x_true-x_predict)**2)
denominator = np.sum((x_true-x_avg)**2)
return 1.0 - enumerator/denominator
#================================================================================================================
## Mean
#def mean(x):
# n = np.size(x)
# x_avg = np.sum(x)/n
# return x_avg
#================================================================================================================
# get sub-entries of matrix A
def get_subset(A,indices):
'''given an indexing set "indices", return the vector consisting of
entries A[i,j] where (i,j) is an entry in indices.'''
N = len(indices)
B = np.zeros(N)
for k in range(0,N):
i = indices[k][0]
j = indices[k][1]
B[k] = A[j,i]
return B
#============================================================================================================================
class k_cross_validation:
'''An k-cross validation object is initialized by passing to it data of the type linreg,
and a paritition of the data. The class function R2 calculates the mean R2 scores
of test and training data for the given model. The function MSE calculates the mean MSE, bias,
variance and error terms of the test data for the given model. These quantities are stored
as self variables.'''
def __init__(self, data, partition,*args):
self.data = data; self.partition = partition; self.args = args;
#f = data.f; X = data.X; z = data.z; correspondence = data.correspondence;
self.k = len(partition)
self.test_R2, self.test_var, self.test_bias, self.test_MSE, self.test_extra_terms = 0, 0, 0, 0, 0
self.train_R2 = 0
#self.train_var, self.train_bias, self.train_MSE, self.train_extra_terms = 0, 0, 0, 0
def R2(self):
data = self.data
f = data.f; X = data.X; z = data.z; correspondence = data.correspondence; partition = self.partition
k = self.k
args = self.args
test_R2, train_R2 = 0, 0
for i, test_data in enumerate(partition):
train_data = [x for j,x in enumerate(partition) if j!=i]
train_data = sum(train_data, [])
beta = data.get_beta(X[train_data],z[train_data],*args)
freg = data.model(beta)
test_data = [correspondence[j] for j in test_data]
train_data = [correspondence[j] for j in train_data]
# test errors:
ftest = get_subset(f,test_data); fregtest = get_subset(freg,test_data)
test_R2 += R2(ftest,fregtest)
#training errors:
ftrain = get_subset(f,train_data); fregtrain = get_subset(freg,train_data)
train_R2 += R2(ftrain,fregtrain)
# self variables
self.test_R2 = test_R2/k
self.train_R2 = train_R2/k
def MSE(self):
data = self.data
f = data.f; X = data.X; z = data.z; correspondence = data.correspondence; partition = self.partition
k = self.k
args = self.args
test_var, test_bias, test_MSE, test_extra_terms = 0, 0, 0, 0
#train_var, train_bias, train_MSE, train_extra_terms = 0, 0, 0, 0
for i, test_data in enumerate(partition):
train_data = [x for j,x in enumerate(partition) if j!=i]
train_data = sum(train_data, [])
beta = data.get_beta(X[train_data],z[train_data],*args)
freg = data.model(beta)
test_data = [correspondence[j] for j in test_data]
# train_data = [correspondence[j] for j in train_data]
# test errors:
ftest = get_subset(f,test_data); fregtest = get_subset(freg,test_data)
test_var += var(fregtest)
test_bias += bias(ftest,fregtest)
test_MSE += MSE(ftest,fregtest)
test_extra_terms += extra_term(ftest,fregtest)
##training errors:
#ftrain = get_subset(f,train_data); fregtrain = get_subset(freg,train_data)
#train_var += var(fregtrain)
#train_bias += bias(ftrain,fregtrain)
#train_MSE += MSE(ftrain,fregtrain)
#train_extra_terms += extra_term(ftrain,fregtrain)
# self variables
self.test_var = test_var/k
self.test_bias = test_bias/k
self.test_MSE = test_MSE/k
self.test_extra_terms = test_extra_terms/k
#self.train_var = train_var/k
#self.train_bias = train_bias/k
#self.train_MSE = train_MSE/k
#self.train_extra_terms = train_extra_terms/k
#================================================================================================================
class regdata:
def __init__(self, f, degree):
# initializing variables
m = len(f[0,:]); n = len(f); mn = m*n;
x = np.linspace(0, 1, m); y = np.linspace(0, 1, n); z = np.zeros(mn); xy = np.zeros((mn,2));
# initializing some self variables
self.f = f; self.degree = degree; self.xm, self.ym = np.meshgrid(x,y); self.n=n;self.m=m; self.mn = mn; self.correspondence = []
# Making a sequence xy containing the pairs (x_i,y_j) for i,j=0,...,n, and a sequence z with matching pairs z_ij = f(x_i, y_j)
counter = 0
for i in range(0,m):
for j in range(0,n):
z[counter]=f[j,i] #wtf
xy[counter,:] = [x[i],y[j]]
self.correspondence.append([i,j]) #Saves the 1-1 correspondence: {counter} <-> {(i,j)} for later
counter+=1
self.z = z
# Make X
number_basis_elts=int((degree+2)*(degree+1)/2) #(degree+1)th triangular number (number of basis elements for R[x,y] of degree <= degree)
X = np.zeros((mn,number_basis_elts))
powers = []
for i in range(0,mn):
counter = 0
for j in range(0,degree+1):
k = 0
while j+k <= degree:
xi = xy[i,0]
yi = xy[i,1]
X[i,counter]= (xi**j)*(yi**k)
powers.append([j , k])
k+=1
counter+=1
self.X = X
self.powers = powers
self.number_basis_elts = number_basis_elts
self.invXTX = linalg.inv(np.matmul(np.transpose(X),X))
# Regression
def get_reg(self, *args):
'''Returns the polynomial fit as a numpy array. If *args is empty the fit is based on an ordinary least square.
If *args contains a number LAMBDA, then the fit is found using Ridge for the given bias LAMBDA. If *args contains
two numbers LAMBDA and epsilon, then the fit is found using lasso. See the function " __get_beta" for more details.'''
X=self.X; z=self.z #relabeling self variables
beta = self.get_beta(X,z,*args) #obtaining beta
reg = self.model(beta) #obtaining model from coefficients beta
return reg
# Get beta (given X and z)
def get_beta(self, X, z,*args):
'''Returns coefficients for a given beta as a numpy array, found using either ordinary least square,
Ridge or Lasso regression depending on the arguments. If *args is empty, then beta is found using
ordinary least square. If *args contains a number it will be treated as a bias LAMBDA for a Ridge regression.
If *args contains two numbers, then the first will count as a LAMBDA and the second as a tolerance epsilon.
In this case beta is found using a shooting algorithm that runs until it converges up to the set tolerance.
'''
XT = np.transpose(X)
beta = np.matmul(XT,X)
if len(args) >= 1: #Ridge parameter LAMBDA
LAMBDA = args[0]
beta[np.diag_indices_from(beta)]+=LAMBDA
beta = SVDinv(beta)
beta = np.matmul(beta,XT)
beta = np.matmul(beta,z)
#Shooting algorithm for Lasso
if len(args)>=2:
epsilon = args[1]
D = self.number_basis_elts
ints = np.arange(0,D,1)
beta_old = 0.0
while np.linalg.norm(beta-beta_old)>=epsilon:
beta_old = np.copy(beta)
for j in range(0,D):
aj = 2*np.sum(X[:,j]**2)
no_j = ints[np.arange(D)!=j]
cj = 2*np.sum(np.multiply(X[:,j],(z-np.matmul(X[:,no_j],beta[no_j]))))
if cj<-LAMBDA:
beta[j]=(cj+LAMBDA)/aj
elif cj > LAMBDA:
beta[j]=(cj-LAMBDA)/aj
else:
beta[j]=0.0
return beta
# Get model given beta
def model(self,beta):
'''Returns heigh values based on the coefficients beta as a matrix
that matches the grid xm, ym. The degree of the polynomial equals self.degree.
'''
xm = self.xm; ym = self.ym; degree = self.degree #relabeling self variables
s=0
counter = 0
# loop that adds terms of the form beta*x^j*y^k such that j+k<=5
for j in range(0,degree + 1):
k = 0
while j+k <= degree:
s+= beta[counter]*(xm**j)*(ym**k)
counter +=1
k+=1
return s
def get_data_partition(self,k):
''' Creates a random partition of k (almost) equally sized parts of the array
{1,2,...,mn}. This can be used to make training/testing data.
'''
mn = self.mn; correspondence = self.correspondence
indices = np.arange(mn)
indices_shuffle = np.arange(mn)
np.random.shuffle(indices_shuffle)
partition = []
for step in range(0,k):
part = list(indices_shuffle[step:mn:k])
#part = [correspondence[i] for i in part]
partition.append(part)
return partition
def bootstrap_step(self, samplesize, *args):
'''Finds and returns the coefficient that determines a model (ols, Ridge or Lasso),
depending on args*.
'''
mn = self.mn; X = self.X; z = self.z; #relabeling self variables
integers = np.random.randint(low=0, high=mn-1, size=samplesize)
znew = z[integers]
Xnew = X[integers,:]
betanew = self.get_beta(Xnew,znew,*args)
return betanew
# Variance/ covariance matrix
def var_covar_matrix(self,reg):
''' Returns the variance/covariance matrix for beta based on the given data.
This matrix is derived from a statistical viewpoint, where one assumes beta to
have a normal distribution.
'''
p = self.number_basis_elts; invXTX = self.invXTX; N = self.mn; f = self.f # Relabeling self variables
sigma2=1.0/(N-p-1)*np.sum((f-reg)*(f-reg))
return sigma2*invXTX # OBS! Based on matrix inversion. Inaccurate for N,p>>0.
#================================================================================================================
def plot_3D(f,plottitle):
''' Simple function to create 3d plot of the given data f,
with plotitle.
'''
m = len(f[0,:]); n = len(f);
x = np.linspace(0, 1, m)
y = np.linspace(0, 1, n);
xm, ym = np.meshgrid(x,y)
# Plot f
fig = plt.figure()
ax = fig.gca(projection="3d")
surf = ax.plot_surface(xm, ym, f, cmap=cm.coolwarm, linewidth=0, antialiased=False)
# Customize the z axis.
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter("%.02f"))
ax.text2D(0.05, 0.95, plottitle, transform=ax.transAxes)
ax.view_init(30, 60)
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show(block=False)
#================================================================================================================
def numerical_error(data,LAMBDA):
'''Rough numerical analysis of matrix inversions for this problem. Comparison of error and time usage
of SVD (singular values decomposition) for matrix inversion against scipy.linalg inversion algorithm.
Printing results to terminal.
'''
return_items = []
degree = data.degree; m = data.m; n = data.n
# Study numerical error and time for SVD
print("Polynomial fit of FrankeFunction in x, y of degree ", degree," with grid size ", (m,n)," analysis:")
print("")
X = data.X; XT = np.transpose(X); XTX = np.matmul(XT,X) #Obtaining XTX
start_time = time.time() # start meassuring time
inv_XTX = linalg.inv(XTX) # inversion using scipi.linalg
end_time = time.time()
print("Inverting XTX without SVD", "--- %s seconds ---" % (end_time - start_time)); return_items.append(end_time - start_time)
inv_XTX_ = np.copy(inv_XTX) # storing inversion of XTX for later
start_time = time.time()
inv_XTX = SVDinv(XTX)
end_time = time.time()
print("Inverting XTX with SVD", "--- %s seconds ---" % (end_time - start_time)); return_items.append(end_time - start_time)
print(' ')
I_approx_ = np.matmul(inv_XTX_,XTX); # approximate I (no SVD)
I = np.identity(len(I_approx_)); # obtaining analytical I
output = np.linalg.norm(I_approx_-I)
print("|(X^TX)^-1(X^TX)-I| = ",output, " (no SVD)"); return_items.append(output)
I_approx = np.matmul(inv_XTX,XTX) # approximate I (SVD)
output = np.linalg.norm(I_approx-I)
print("|(X^TX)^-1(X^TX)-I| = ",np.linalg.norm(I_approx-I), " (SVD)"); return_items.append(output)
XTX[np.diag_indices_from(XTX)]+=LAMBDA
inv_XTX = linalg.inv(XTX)
I_approx_ = np.matmul(inv_XTX,XTX) # approximate I (no SVD)
output = np.linalg.norm(I_approx_-I)
print("|(X^TX + I LAMBDA)^-1(X^TX + I LAMBDA)-I| = ",output , ", LAMBDA = ", LAMBDA, " (no SVD)"); return_items.append(output)
inv_XTX = SVDinv(XTX)
I_approx = np.matmul(inv_XTX,XTX)
output = np.linalg.norm(I_approx-I)
print("|(X^TX + I LAMBDA)^-1(X^TX + I LAMBDA)-I| = ",output, ", LAMBDA = ", LAMBDA, " (SVD)"); return_items.append(output)
print(' ')
return return_items
#================================================================================================================
def plot_R2_scores(data,Nstart,Nstop,name, epsilon = 0.001):
''' This function makes a plot of the R2 scores vs Lambda of the different regression methods,
for a given dataset.'''
degree = data.degree; f = data.f # obtaining class data
N = Nstop-Nstart # number of lambdas
lambdas = np.zeros(N)
R2_ols = np.zeros(N)
R2_Ridge = np.zeros(N)
R2_Lasso = np.zeros(N)
for i in range(0,N):
LAMBDA = 10**(Nstart+i)
lambdas[i]=LAMBDA
R2_ols[i]=R2(f, data.get_reg())
R2_Ridge[i]=R2(f, data.get_reg(LAMBDA))
R2_Lasso[i]=R2(f, data.get_reg(LAMBDA,epsilon))
print("Completed lambda: ", LAMBDA, " Completion: {:.1%}".format(float(i)/(N-1)))
plotitle = '$R^2$ score of degree {} polynomial fit on {}'.format(degree,name)
plt.figure()
plt.plot(np.log10(lambdas),R2_ols)
plt.plot(np.log10(lambdas),R2_Ridge)
plt.plot(np.log10(lambdas),R2_Lasso,'--')
plt.axis([Nstart, N+Nstart-1, 0, 1])
plt.xlabel('log $\lambda$')
plt.ylabel('$R^2$ score')
plt.legend(('Ordinary least square','Ridge','Lasso'))
plt.title(plotitle)
plt.grid(True)
plt.show(block=False)
#================================================================================================================
def plot_R2_scores_k_cross_validation(data,Nstart,Nstop,k,name, epsilon = 0.001):
''' This function makes a plot of the R2 scores vs LAMBDA of the best iteration from a k-fold cross validation on
the data set from the given data. Best in the sense that the fit had the highest R2 score on testing data. The same
partition of the data set is used for each lambda, and each time we select the best training data on which we base the model.
See "k_cross_validation" for more details.'''
degree = data.degree; f = data.f # obtaining class data
N = Nstop-Nstart # number of lambdas
# Comparing R2 scores, regression with fixed degree, variable LAMBDA
lambdas = np.zeros(N)
partition = data.get_data_partition(k)
kval = k_cross_validation(data,partition)
kval.R2()
R2_Lasso_test_data = np.zeros(N)
R2_Lasso_training_data = np.zeros(N)
R2_Ridge_test_data = np.zeros(N)
R2_Ridge_training_data = np.zeros(N)
# OLS R2 score
R2score_ols_test, R2score_ols_train = kval.test_R2, kval.train_R2
R2_ols_test_data = np.ones(N)*R2score_ols_test
R2_ols_training_data = np.ones(N)*R2score_ols_train
for i in range(0,N):
LAMBDA = 10**(Nstart+i)
lambdas[i]=LAMBDA
kval = k_cross_validation(data,partition,LAMBDA)
kval.R2()
# Ridge R2 score
R2score_ridge_test, R2score_ridge_train = kval.test_R2, kval.train_R2
R2_Ridge_test_data[i] = R2score_ridge_test
R2_Ridge_training_data[i] = R2score_ridge_train
kval = k_cross_validation(data,partition,LAMBDA,epsilon)
kval.R2()
# Lasso R2 score
R2score_lasso_test, R2score_lasso_train = kval.test_R2, kval.train_R2
R2_Lasso_test_data[i] = R2score_lasso_test
R2_Lasso_training_data[i] = R2score_lasso_train
print("Completed lambda: ", LAMBDA, " Completion: {:.1%}".format(float(i)/(N-1)))
plotitle = '$R^2$ scores of degree {} polynomial fit on {}, $k=${}'.format(degree,name,k)
plt.figure()
plt.plot(np.log10(lambdas),R2_ols_test_data)
plt.plot(np.log10(lambdas),R2_ols_training_data,'--')
plt.plot(np.log10(lambdas),R2_Ridge_test_data)
plt.plot(np.log10(lambdas),R2_Ridge_training_data,'--')
plt.plot(np.log10(lambdas),R2_Lasso_test_data)
plt.plot(np.log10(lambdas),R2_Lasso_training_data,'--')
plt.axis([Nstart, Nstart+N-2, 0, 1])
plt.xlabel('log $\lambda$')
plt.ylabel('$R^2$ score')
if (np.amax(R2_ols_test_data)> 0 and np.amax(R2_ols_training_data)> 0):
plt.legend(('OLS: test data', 'OLS: training data','Ridge: test data', 'Ridge: training data','Lasso: test data', 'Lasso: training data'))
elif (np.amax(R2_ols_test_data)<= 0 and np.amax(R2_ols_training_data)> 0):
plt.legend(('OLS: test data (negative)', 'OLS: training data','Ridge: test data', 'Ridge: training data','Lasso: test data', 'Lasso: training data'))
elif (np.amax(R2_ols_test_data)> 0 and np.amax(R2_ols_training_data)<= 0):
plt.legend(('OLS: test data', 'OLS: training data (negative)','Ridge: test data', 'Ridge: training data','Lasso: test data', 'Lasso: training data'))
elif (np.amax(R2_ols_test_data)<= 0 and np.amax(R2_ols_training_data)<= 0):
plt.legend(('OLS: test data (negative)', 'OLS: training data (negative)','Ridge: test data', 'Ridge: training data','Lasso: test data', 'Lasso: training data'))
plt.title(plotitle)
plt.grid(True)
plt.show(block=False)
#return ols_best, ridge_best, lasso_best
#================================================================================================================
def plot_R2_complexity(degstart,degend,degstep,f,name, LAMBDA = 0.00001, epsilon = 0.001):
''' Comparing R2 scores, regression with fixed LAMBDA, variable degree as well as variance and Bias
Plotting the result.
'''
degrees = np.arange(degstart,degend+1,degstep)
N = len(degrees)
R2_ols, R2_Ridge, R2_Lasso = np.zeros(N), np.zeros(N), np.zeros(N)
for i, degree in enumerate(degrees):
data_f = regdata(f,degree)
R2_ols[i]=R2(f, data_f.get_reg())
R2_Ridge[i]=R2(f, data_f.get_reg(LAMBDA))
R2_Lasso[i]=R2(f, data_f.get_reg(LAMBDA,epsilon))
print("Completed degree: ", degree, " Completion: {:.1%}".format(float(i)/(N-1)))
plotitle = '$R^2$ score of polynomial fit on {} with $\lambda=${}'.format(name,LAMBDA)
plt.figure()
plt.plot(degrees,R2_ols)
plt.plot(degrees,R2_Ridge)
plt.plot(degrees,R2_Lasso,'--')
plt.xlabel('degree of fitting polynomial')
plt.ylabel('$R^2$ score')
plt.axis([degstart,degend, 0, 1])
plt.legend(('Ordinary least square','Ridge','Lasso'))
plt.title(plotitle)
plt.grid(True)
plt.show(block=False)
#================================================================================================================
def plot_MSE_variance(degstart, degend, degstep, f, LAMBDA = 0.01, epsilon = 0.001, k=10):
# Comparing MSE, bias, variance and additional terms as function of complexity.
degrees = np.arange(degstart,degend+1,degstep)
N = len(degrees)
data = regdata(f,5)
fvar = np.zeros(N); fbias = np.zeros(N); fMSE = np.zeros(N); fextra_terms = np.zeros(N)
# function for plotting
def makeplot(methodname, *args, partition = None):
print(methodname)
for i, degree in enumerate(degrees):
data = regdata(f,degree)
if partition == None:
freg = data.get_reg(*args)
fvar[i], fbias[i], fMSE[i], fextra_terms[i] = var(freg), bias(f,freg), MSE(f,freg), extra_term(f,freg)
else:
kval = k_cross_validation(data, partition, *args)
kval.MSE()
fvar[i] = kval.test_var
fbias[i] = kval.test_bias
fMSE[i] = kval.test_MSE
fextra_terms[i] =kval.test_extra_terms
#fvar[i], fbias[i], fMSE[i], fextra_terms[i], train_var, train_bias, train_MSE, train_extra_terms
print("Completed degree: ", degree, " Completion: {:.1%}".format(float(degree-degstart)/(degend-degstart)))
plt.figure()
plt.plot(degrees, fvar)
plt.plot(degrees, fbias)
plt.plot(degrees, fMSE,'--')
plt.plot(degrees, fextra_terms)
plt.xlabel('degree')
plt.ylabel('Variance, bias, and MSE')
plt.legend(('Variance','Bias','MSE','Additional term'))
plt.grid(True)
plt.show(block=False)
#It is a good idea to comment out the plots that you dont need
## Ordinary least square plot
#makeplot("Ordinary least squares")
#plt.title("Error of ordinary least squares")
## Ridge plot
#makeplot("Ridge regression",LAMBDA)
#plt.title("Error of Ridge regression, $\lambda=${}".format(LAMBDA))
## Lasso plot
#makeplot("Lasso regression",LAMBDA,epsilon)
#plt.title("Error of lasso regression, $\lambda=${}".format(LAMBDA))
# k-cross validation
partition_ = data.get_data_partition(k)
# Ordinary least square plot
# makeplot("Ordinary least squares {}-fold cross validation".format(k), partition = partition_)
# plt.title("Error OLS using {}-fold cross validation".format(k))
## Ridge plot
#makeplot("Ridge regression {}-fold cross validation".format(k), LAMBDA, partition=partition_)
#plt.title("Error Ridge using {}-fold cross validation, $\lambda=${}".format(k,LAMBDA))
# Lasso plot
makeplot("Lasso regression {}-fold cross validation".format(k), LAMBDA, epsilon, partition_)
plt.title("Error Lasso using {}-fold cross validation, $\lambda=${}".format(k,LAMBDA))
|
the-stack_0_3148 | import spacy
from spacy.tokenizer import Tokenizer
from spacy.lang.char_classes import ALPHA, ALPHA_LOWER, ALPHA_UPPER
from spacy.lang.char_classes import CONCAT_QUOTES, LIST_ELLIPSES, LIST_ICONS
from spacy.language import Language
from spacy.pipe_analysis import Doc
from spacy.util import compile_infix_regex
from gensim.corpora.dictionary import Dictionary
from itertools import tee
from enum import Enum
from os import cpu_count
from typing import Iterable
class LangEnum(Enum):
"""
Enum to represent supported language codes
"""
EN = 0
RU = 1
class Preprocessor:
"""
Use this class to encapsulate Spacy models, Gensim stuff and everything
else needed for text preprocessing.
"""
def __init__(self, language: LangEnum = 0,
stop_words: Iterable[str] = None,
tokenize_ents: bool = True,
workers: int = cpu_count()):
# Preload ready to use spacy language model (tokenizer, lemmatizer, etc)
if language == LangEnum.EN:
self.nlp: Language = spacy.load('en_core_web_sm')
elif language == LangEnum.RU:
self.nlp: Language = spacy.load('ru_core_news_md')
else:
raise NotImplementedError('Only Russian and English '
'languages are supported at the moment')
# Wheter or not to tokenize detected named entities
self.tokenize_ents = tokenize_ents
self.workers = workers
# Modify tokenizer infix patterns
infixes = (
LIST_ELLIPSES
+ LIST_ICONS
+ [
r"(?<=[0-9])[+\-\*^](?=[0-9-])",
r"(?<=[{al}{q}])\.(?=[{au}{q}])".format(
al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES
),
r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA),
# r"(?<=[{a}])(?:{h})(?=[{a}])".format(a=ALPHA, h=HYPHENS),
r"(?<=[{a}0-9])[:<>=/](?=[{a}])".format(a=ALPHA),
]
)
infix_re = compile_infix_regex(infixes)
self.nlp.tokenizer.infix_finditer = infix_re.finditer
# Update the built-in stopwords list
if stop_words is not None:
self.update_stopwords(stop_words)
@spacy.Language.component(name='custom_preproc')
def lemmatize(doc: Doc):
tokens = [token for token in doc
if not (token.is_stop or
token.is_punct or
token.like_email or
token.like_url or
token.is_space or
token.is_currency or
token.like_num or
token.lemma_.lower() in
self.nlp.Defaults.stop_words)]
res_tokens = []
if not self.tokenize_ents and len(doc.ents) > 0:
merged_tokens = ""
for token in tokens:
if token.ent_iob == 3: # Beggining of the entity
# token = "-".join(token.lemma_.lower().split('-'))
merged_tokens = token.lemma_.lower().strip() + "_"
elif token.ent_iob == 1: # Inside the entity
merged_tokens += token.lemma_.lower().strip() + "_"
elif merged_tokens == "":
res_tokens.append(token.lemma_.lower().strip())
else:
res_tokens.append(merged_tokens[:-1])
merged_tokens = ""
else:
res_tokens = [t.lemma_.lower().strip() for t in tokens]
new_doc = Doc(vocab=doc.vocab,
words=res_tokens)
return new_doc
# Add stop words removing to spacy pipeline
self.nlp.add_pipe(
'custom_preproc',
last=True
)
def update_stopwords(self, stop_words: Iterable[str]) -> None:
"""
Update built-in spacy language model stopwords list
:param stop_words: Iterable of strings - target stopwords
:return: None
"""
self.nlp.Defaults.stop_words.update(stop_words)
for word in self.nlp.Defaults.stop_words:
lexeme = self.nlp.vocab[word]
lexeme.is_stop = True
def preprocess_texts(self,
data: Iterable[str]) -> (Iterable[Doc], Dictionary):
"""
Get preprocessed texts
:param data: iterable of strings
(each string is considered to be a single document)
:return: preprocessed documents and
a gensim Dictionary of the given docs
"""
docs = self.__get_preprocessed_docs__(data)
docs, docs_iter_copy = tee(docs)
return docs, Dictionary(map(lambda x: [y.text for y in x], docs_iter_copy))
def __get_preprocessed_docs__(self,
data: Iterable[str]):
"""
Helper function to generate new docs using spacy Language.pipe()
:param data: iterable of strings (1 string = 1 doc)
:return: spacy Document generator
"""
docs = self.nlp.pipe(data, n_process=self.workers)
for doc in docs:
yield doc
|
the-stack_0_3150 | #
# General Electricity sector Decarbonization Model (GEDM)
# Copyright (C) 2020 Cheng-Ta Chu.
# Licensed under the MIT License (see LICENSE file).
#
# Module note:
# Define commodity, time slice and transmission classes
#
#------------ commodity -------------
class Commodity:
""" Commodity class """
def __init__(self, **kwargs):
self.sCommodityName = str( kwargs["CommodityName"] )
self.sCategory = str( kwargs["Category"] )
self.fHeatRate = float(kwargs["HeatRate"])
self.fEmissionFactor_CO2 = float(kwargs["EmissionFactor_CO2"]) # M.Tonne/PJ = Tonn/GJ
self.fFuelPrice_YS = list() # USD/GJ
return
#------------ time slice -------------
class TimeSlice:
""" time slice class """
def __init__(self, **kwargs):
self.sTSIndex = str(kwargs["TSIndex"])
self.sMonth = str(kwargs["Month"])
self.sDay = str(kwargs["Day"])
self.sHour = str(kwargs["Hour"])
self.iDayIndex = int(kwargs["DayIndex"])
self.iRepDayInYear = int(kwargs["RepDayInYear"])
self.iRepHoursInDay = int(kwargs["RepHoursInDay"])
self.iRepHoursInYear = int(kwargs["RepHoursInYear"])
return
class DayTimeSlice:
""" TS day class """
def __init__(self, **kwargs):
self.MonthDay = str(kwargs["MonthDay"])
self.iDayIndex = int(kwargs["iDayIndex"])
self.lsDiurnalTS = list() # list of DiurnalTimeSlice objects
return
class DiurnalTimeSlice:
""" diurnal time slice class """
def __init__(self, **kwargs):
self.sTSIndex = kwargs["sTSIndex"]
self.iTimeSliceIndex = kwargs["iTimeSliceIndex"]
self.iRepHoursInYear = kwargs["iRepHoursInYear"]
self.iRepHoursInDay = kwargs["iRepHoursInDay"]
self.fValue = 0
return
#------------ transmission -------------
class Transmission:
""" transmission class, links between zones """
def __init__(self, **kwargs):
self.sTransID = str( kwargs["From"] ) + "/" + str( kwargs["To"] )
self.sFrom = str( kwargs["From"] ) # source zone
self.sTo = str( kwargs["To"] ) # destination zone
self.fDistance = float( kwargs["Dist"] ) # KM, distance of the link
self.b2015Conn = int( kwargs["Conn2015"] ) # connection status in base year
self.fBaseCap = float( kwargs["BaseCap"] ) # base year capacity
self.dicTransNewBuild_YS = {} # MW, new capacity by period
self.dicTransAccCap_YS = {} # MW, total capacity by period
return
|
the-stack_0_3151 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# to configure behavior, define $CQL_TEST_HOST to the destination address
# for Thrift connections, and $CQL_TEST_PORT to the associated port.
from __future__ import with_statement
import re
from .basecase import BaseTestCase, cqlsh
from .cassconnect import testrun_cqlsh
import unittest
import sys
BEL = '\x07' # the terminal-bell character
CTRL_C = '\x03'
TAB = '\t'
# completions not printed out in this many seconds may not be acceptable.
# tune if needed for a slow system, etc, but be aware that the test will
# need to wait this long for each completion test, to make sure more info
# isn't coming
COMPLETION_RESPONSE_TIME = 0.5
completion_separation_re = re.compile(r'\s+')
@unittest.skipIf(sys.platform == "win32", 'Tab completion tests not supported on Windows')
class CqlshCompletionCase(BaseTestCase):
def setUp(self):
self.cqlsh_runner = testrun_cqlsh(cqlver=cqlsh.DEFAULT_CQLVER, env={'COLUMNS': '100000'})
self.cqlsh = self.cqlsh_runner.__enter__()
def tearDown(self):
self.cqlsh_runner.__exit__(None, None, None)
def _get_completions(self, inputstring, split_completed_lines=True):
"""
Get results of tab completion in cqlsh. Returns a bare string if a
string completes immediately. Otherwise, returns a set of all
whitespace-separated tokens in the offered completions by default, or a
list of the lines in the offered completions if split_completed_lines is
False.
"""
self.cqlsh.send(inputstring)
self.cqlsh.send(TAB)
immediate = self.cqlsh.read_up_to_timeout(COMPLETION_RESPONSE_TIME)
immediate = immediate.replace(' \b', '')
self.assertEqual(immediate[:len(inputstring)], inputstring)
immediate = immediate[len(inputstring):]
immediate = immediate.replace(BEL, '')
if immediate:
return immediate
self.cqlsh.send(TAB)
choice_output = self.cqlsh.read_up_to_timeout(COMPLETION_RESPONSE_TIME)
if choice_output == BEL:
choice_output = ''
self.cqlsh.send(CTRL_C) # cancel any current line
self.cqlsh.read_to_next_prompt()
choice_lines = choice_output.splitlines()
if choice_lines:
# ensure the last line of the completion is the prompt
prompt_regex = self.cqlsh.prompt.lstrip() + re.escape(inputstring)
msg = ('Double-tab completion '
'does not print prompt for input "{}"'.format(inputstring))
self.assertRegexpMatches(choice_lines[-1], prompt_regex, msg=msg)
choice_lines = [line.strip() for line in choice_lines[:-1]]
choice_lines = [line for line in choice_lines if line]
if split_completed_lines:
completed_lines = map(set, (completion_separation_re.split(line.strip())
for line in choice_lines))
if not completed_lines:
return set()
completed_tokens = set.union(*completed_lines)
return completed_tokens - {''}
else:
return choice_lines
assert False
def _trycompletions_inner(self, inputstring, immediate='', choices=(),
other_choices_ok=False,
split_completed_lines=True):
"""
Test tab completion in cqlsh. Enters in the text in inputstring, then
simulates a tab keypress to see what is immediately completed (this
should only happen when there is only one completion possible). If
there is an immediate completion, the new text is expected to match
'immediate'. If there is no immediate completion, another tab keypress
is simulated in order to get a list of choices, which are expected to
match the items in 'choices' (order is not important, but case is).
"""
completed = self._get_completions(inputstring,
split_completed_lines=split_completed_lines)
if immediate:
msg = 'cqlsh completed %r, but we expected %r' % (completed, immediate)
self.assertEqual(completed, immediate, msg=msg)
return
if other_choices_ok:
self.assertEqual(set(choices), completed.intersection(choices))
else:
self.assertEqual(set(choices), set(completed))
def trycompletions(self, inputstring, immediate='', choices=(),
other_choices_ok=False, split_completed_lines=True):
try:
self._trycompletions_inner(inputstring, immediate, choices,
other_choices_ok=other_choices_ok,
split_completed_lines=split_completed_lines)
finally:
self.cqlsh.send(CTRL_C) # cancel any current line
self.cqlsh.read_to_next_prompt()
def strategies(self):
return self.module.CqlRuleSet.replication_strategies
class TestCqlshCompletion(CqlshCompletionCase):
cqlver = '3.1.6'
module = cqlsh.cql3handling
def test_complete_on_empty_string(self):
self.trycompletions('', choices=('?', 'ALTER', 'BEGIN', 'CAPTURE', 'CONSISTENCY',
'COPY', 'CREATE', 'DEBUG', 'DELETE', 'DESC', 'DESCRIBE',
'DROP', 'GRANT', 'HELP', 'INSERT', 'LIST', 'LOGIN', 'PAGING', 'REVOKE',
'SELECT', 'SHOW', 'SOURCE', 'TRACING', 'EXPAND', 'SERIAL', 'TRUNCATE',
'UPDATE', 'USE', 'exit', 'quit', 'CLEAR', 'CLS'))
def test_complete_command_words(self):
self.trycompletions('alt', '\b\b\bALTER ')
self.trycompletions('I', 'NSERT INTO ')
self.trycompletions('exit', ' ')
def test_complete_in_uuid(self):
pass
def test_complete_in_select(self):
pass
def test_complete_in_insert(self):
self.trycompletions('INSERT INTO ',
choices=('twenty_rows_table',
'ascii_with_special_chars',
'users',
'has_all_types',
'system.',
'empty_composite_table',
'empty_table',
'undefined_values_table',
'dynamic_columns',
'twenty_rows_composite_table',
'utf8_with_special_chars',
'system_traces.',
'songs'),
other_choices_ok=True)
self.trycompletions('INSERT INTO twenty_rows_composite_table',
immediate=' ')
self.trycompletions('INSERT INTO twenty_rows_composite_table ',
choices=['(', 'JSON'])
self.trycompletions('INSERT INTO twenty_rows_composite_table (a, b ',
choices=(')', ','))
self.trycompletions('INSERT INTO twenty_rows_composite_table (a, b, ',
immediate='c ')
self.trycompletions('INSERT INTO twenty_rows_composite_table (a, b, c ',
choices=(',', ')'))
self.trycompletions('INSERT INTO twenty_rows_composite_table (a, b)',
immediate=' VALUES ( ')
self.trycompletions('INSERT INTO twenty_rows_composite_table (a, b, c) VAL',
immediate='UES ( ')
self.trycompletions(
'INSERT INTO twenty_rows_composite_table (a, b, c) VALUES (',
['<value for a (text)>'],
split_completed_lines=False)
self.trycompletions(
"INSERT INTO twenty_rows_composite_table (a, b, c) VALUES ('",
['<value for a (text)>'],
split_completed_lines=False)
self.trycompletions(
"INSERT INTO twenty_rows_composite_table (a, b, c) VALUES ( 'eggs",
['<value for a (text)>'],
split_completed_lines=False)
self.trycompletions(
"INSERT INTO twenty_rows_composite_table (a, b, c) VALUES ('eggs'",
immediate=', ')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs',"),
['<value for b (text)>'],
split_completed_lines=False)
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam')"),
immediate=' ')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') "),
choices=[';', 'USING', 'IF'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam');"),
choices=['?', 'ALTER', 'BEGIN', 'CAPTURE', 'CONSISTENCY', 'COPY',
'CREATE', 'DEBUG', 'DELETE', 'DESC', 'DESCRIBE', 'DROP',
'EXPAND', 'GRANT', 'HELP', 'INSERT', 'LIST', 'LOGIN', 'PAGING',
'REVOKE', 'SELECT', 'SHOW', 'SOURCE', 'SERIAL', 'TRACING',
'TRUNCATE', 'UPDATE', 'USE', 'exit', 'quit',
'CLEAR', 'CLS'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') US"),
immediate='ING T')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING"),
immediate=' T')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING T"),
choices=['TTL', 'TIMESTAMP'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TT"),
immediate='L ')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TI"),
immediate='MESTAMP ')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TIMESTAMP "),
choices=['<wholenumber>'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TTL "),
choices=['<wholenumber>'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TIMESTAMP 0 "),
choices=['AND', ';'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TTL 0 "),
choices=['AND', ';'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TIMESTAMP 0 A"),
immediate='ND TTL ')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TTL 0 A"),
immediate='ND TIMESTAMP ')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TTL 0 AND TIMESTAMP "),
choices=['<wholenumber>'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TTL 0 AND TIMESTAMP 0 "),
choices=['AND', ';'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TTL 0 AND TIMESTAMP 0 AND "),
choices=[])
def test_complete_in_update(self):
self.trycompletions("UPD", immediate="ATE ")
self.trycompletions("UPDATE ",
choices=['twenty_rows_table',
'users', 'has_all_types', 'system.',
'ascii_with_special_chars',
'empty_composite_table', 'empty_table',
'undefined_values_table',
'dynamic_columns',
'twenty_rows_composite_table',
'utf8_with_special_chars',
'system_traces.', 'songs'],
other_choices_ok=True)
self.trycompletions("UPDATE empty_table ", choices=['USING', 'SET'])
self.trycompletions("UPDATE empty_table S",
immediate='ET lonelycol = ')
self.trycompletions("UPDATE empty_table SET lon",
immediate='elycol = ')
self.trycompletions("UPDATE empty_table SET lonelycol",
immediate=' = ')
self.trycompletions("UPDATE empty_table U", immediate='SING T')
self.trycompletions("UPDATE empty_table USING T",
choices=["TTL", "TIMESTAMP"])
self.trycompletions("UPDATE empty_table SET lonelycol = ",
choices=['<term (text)>'],
split_completed_lines=False)
self.trycompletions("UPDATE empty_table SET lonelycol = 'eg",
choices=['<term (text)>'],
split_completed_lines=False)
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs'",
choices=[',', 'WHERE'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE ",
choices=['TOKEN(', 'lonelykey'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE ",
choices=['TOKEN(', 'lonelykey'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE lonel",
immediate='ykey ')
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE lonelykey ",
choices=['=', '<=', '>=', '>', '<', 'CONTAINS', 'IN', '['])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE lonelykey = 0.0 ",
choices=['AND', 'IF', ';'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE lonelykey = 0.0 AND ",
choices=['TOKEN(', 'lonelykey'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE TOKEN(lonelykey ",
choices=[',', ')'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE TOKEN(lonelykey) ",
choices=['=', '<=', '>=', '<', '>'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE TOKEN(lonelykey) <= TOKEN(13) ",
choices=[';', 'AND', 'IF'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE TOKEN(lonelykey) <= TOKEN(13) IF ",
choices=['EXISTS', '<quotedName>', '<identifier>'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE TOKEN(lonelykey) <= TOKEN(13) IF EXISTS ",
choices=['>=', '!=', '<=', 'IN', '[', ';', '=', '<', '>'])
def test_complete_in_delete(self):
self.trycompletions('DELETE F', choices=['FROM', '<identifier>', '<quotedName>'])
self.trycompletions('DELETE a ', choices=['FROM', '[', ','])
self.trycompletions('DELETE a [',
choices=['<wholenumber>', 'false', '-', '<uuid>',
'<pgStringLiteral>', '<float>', 'TOKEN',
'<identifier>', '<quotedStringLiteral>',
'{', '[', 'NULL', 'true', '<blobLiteral>'])
self.trycompletions('DELETE a, ',
choices=['<identifier>', '<quotedName>'])
self.trycompletions('DELETE a FROM ',
choices=['twenty_rows_table',
'ascii_with_special_chars', 'users',
'has_all_types', 'system.',
'empty_composite_table', 'empty_table',
'system_auth.', 'undefined_values_table',
'dynamic_columns',
'twenty_rows_composite_table',
'utf8_with_special_chars',
'system_traces.', 'songs',
'"' + self.cqlsh.keyspace + '".'],
other_choices_ok=True)
self.trycompletions('DELETE FROM ',
choices=['twenty_rows_table',
'ascii_with_special_chars', 'users',
'has_all_types', 'system.',
'empty_composite_table', 'empty_table',
'system_auth.', 'undefined_values_table',
'dynamic_columns',
'twenty_rows_composite_table',
'utf8_with_special_chars',
'system_traces.', 'songs',
'system_auth.', 'system_distributed.',
'system_traces.',
'"' + self.cqlsh.keyspace + '".'],
other_choices_ok=True)
self.trycompletions('DELETE FROM twenty_rows_composite_table ',
choices=['USING', 'WHERE'])
self.trycompletions('DELETE FROM twenty_rows_composite_table U',
immediate='SING TIMESTAMP ')
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP ',
choices=['<wholenumber>'])
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0',
choices=['<wholenumber>'])
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 ',
immediate='WHERE ')
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE ',
choices=['a', 'b', 'TOKEN('])
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE a ',
choices=['<=', '>=', 'CONTAINS', 'IN', '[', '=', '<', '>'])
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE TOKEN(',
immediate='a ')
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE TOKEN(a',
immediate=' ')
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE TOKEN(a ',
choices=[')', ','])
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE TOKEN(a) ',
choices=['>=', '<=', '=', '<', '>'])
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE TOKEN(a) >= ',
choices=['false', 'true', '<pgStringLiteral>',
'token(', '-', '<float>', 'TOKEN',
'<identifier>', '<uuid>', '{', '[', 'NULL',
'<quotedStringLiteral>', '<blobLiteral>',
'<wholenumber>'])
self.trycompletions(('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE '
'TOKEN(a) >= TOKEN(0) '),
choices=['AND', 'IF', ';'])
self.trycompletions(('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE '
'TOKEN(a) >= TOKEN(0) IF '),
choices=['EXISTS', '<identifier>', '<quotedName>'])
self.trycompletions(('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE '
'TOKEN(a) >= TOKEN(0) IF b '),
choices=['>=', '!=', '<=', 'IN', '[', '=', '<', '>'])
self.trycompletions(('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE '
'TOKEN(a) >= TOKEN(0) IF b < 0 '),
choices=['AND', ';'])
self.trycompletions(('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE '
'TOKEN(a) >= TOKEN(0) IF b < 0 AND '),
choices=['<identifier>', '<quotedName>'])
self.trycompletions(("DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE "
"b = 'eggs'"),
choices=['AND', 'IF', ';'])
def test_complete_in_batch(self):
pass
def test_complete_in_create_keyspace(self):
self.trycompletions('create keyspace ', '', choices=('<identifier>', '<quotedName>', 'IF'))
self.trycompletions('create keyspace moo ',
"WITH replication = {'class': '")
self.trycompletions('create keyspace "12SomeName" with ',
"replication = {'class': '")
self.trycompletions("create keyspace fjdkljf with foo=bar ", "",
choices=('AND', ';'))
self.trycompletions("create keyspace fjdkljf with foo=bar AND ",
"replication = {'class': '")
self.trycompletions("create keyspace moo with replication", " = {'class': '")
self.trycompletions("create keyspace moo with replication=", " {'class': '")
self.trycompletions("create keyspace moo with replication={", "'class':'")
self.trycompletions("create keyspace moo with replication={'class'", ":'")
self.trycompletions("create keyspace moo with replication={'class': ", "'")
self.trycompletions("create keyspace moo with replication={'class': '", "",
choices=self.strategies())
# ttl is an "unreserved keyword". should work
self.trycompletions("create keySPACE ttl with replication ="
"{ 'class' : 'SimpleStrategy'", ", 'replication_factor': ")
self.trycompletions("create keyspace ttl with replication ="
"{'class':'SimpleStrategy',", " 'replication_factor': ")
self.trycompletions("create keyspace \"ttl\" with replication ="
"{'class': 'SimpleStrategy', ", "'replication_factor': ")
self.trycompletions("create keyspace \"ttl\" with replication ="
"{'class': 'SimpleStrategy', 'repl", "ication_factor'")
self.trycompletions("create keyspace foo with replication ="
"{'class': 'SimpleStrategy', 'replication_factor': ", '',
choices=('<term>',))
self.trycompletions("create keyspace foo with replication ="
"{'class': 'SimpleStrategy', 'replication_factor': 1", '',
choices=('<term>',))
self.trycompletions("create keyspace foo with replication ="
"{'class': 'SimpleStrategy', 'replication_factor': 1 ", '}')
self.trycompletions("create keyspace foo with replication ="
"{'class': 'SimpleStrategy', 'replication_factor': 1, ",
'', choices=())
self.trycompletions("create keyspace foo with replication ="
"{'class': 'SimpleStrategy', 'replication_factor': 1} ",
'', choices=('AND', ';'))
self.trycompletions("create keyspace foo with replication ="
"{'class': 'NetworkTopologyStrategy', ", '',
choices=('<dc_name>',))
self.trycompletions("create keyspace \"PB and J\" with replication={"
"'class': 'NetworkTopologyStrategy'", ', ')
self.trycompletions("create keyspace PBJ with replication={"
"'class': 'NetworkTopologyStrategy'} and ",
"durable_writes = '")
def test_complete_in_string_literals(self):
# would be great if we could get a space after this sort of completion,
# but readline really wants to make things difficult for us
self.trycompletions("create keyspace blah with replication = {'class': 'Sim",
"pleStrategy'")
def test_complete_in_drop(self):
self.trycompletions('DR', immediate='OP ')
self.trycompletions('DROP ',
choices=['AGGREGATE', 'COLUMNFAMILY', 'FUNCTION',
'INDEX', 'KEYSPACE', 'ROLE', 'TABLE',
'TRIGGER', 'TYPE', 'USER'])
def test_complete_in_drop_keyspace(self):
self.trycompletions('DROP K', immediate='EYSPACE ')
quoted_keyspace = '"' + self.cqlsh.keyspace + '"'
self.trycompletions('DROP KEYSPACE ',
choices=['IF', quoted_keyspace])
self.trycompletions('DROP KEYSPACE ' + quoted_keyspace,
choices=[';'])
self.trycompletions('DROP KEYSPACE I',
immediate='F EXISTS ' + quoted_keyspace + ';')
def create_columnfamily_table_template(self, name):
"""Parameterized test for CREATE COLUMNFAMILY and CREATE TABLE. Since
they're synonyms, they should have the same completion behavior, so this
test avoids duplication between tests for the two statements."""
prefix = 'CREATE ' + name + ' '
quoted_keyspace = '"' + self.cqlsh.keyspace + '"'
self.trycompletions(prefix + '',
choices=['IF', quoted_keyspace, '<new_table_name>'])
self.trycompletions(prefix + 'IF ',
immediate='NOT EXISTS ')
self.trycompletions(prefix + 'IF NOT EXISTS ',
choices=['<new_table_name>', quoted_keyspace])
self.trycompletions(prefix + 'IF NOT EXISTS new_table ',
immediate='( ')
self.trycompletions(prefix + quoted_keyspace, choices=['.', '('])
self.trycompletions(prefix + quoted_keyspace + '( ',
choices=['<new_column_name>', '<identifier>',
'<quotedName>'])
self.trycompletions(prefix + quoted_keyspace + '.',
choices=['<new_table_name>'])
self.trycompletions(prefix + quoted_keyspace + '.new_table ',
immediate='( ')
self.trycompletions(prefix + quoted_keyspace + '.new_table ( ',
choices=['<new_column_name>', '<identifier>',
'<quotedName>'])
self.trycompletions(prefix + ' new_table ( ',
choices=['<new_column_name>', '<identifier>',
'<quotedName>'])
self.trycompletions(prefix + ' new_table (col_a ine',
immediate='t ')
self.trycompletions(prefix + ' new_table (col_a int ',
choices=[',', 'PRIMARY'])
self.trycompletions(prefix + ' new_table (col_a int P',
immediate='RIMARY KEY ')
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY ',
choices=[')', ','])
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY,',
choices=['<identifier>', '<quotedName>'])
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY)',
immediate=' ')
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) ',
choices=[';', 'WITH'])
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) W',
immediate='ITH ')
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) WITH ',
choices=['bloom_filter_fp_chance', 'compaction',
'compression',
'dclocal_read_repair_chance',
'default_time_to_live', 'gc_grace_seconds',
'max_index_interval',
'memtable_flush_period_in_ms',
'read_repair_chance', 'CLUSTERING',
'COMPACT', 'caching', 'comment',
'min_index_interval', 'speculative_retry'])
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) WITH ',
choices=['bloom_filter_fp_chance', 'compaction',
'compression',
'dclocal_read_repair_chance',
'default_time_to_live', 'gc_grace_seconds',
'max_index_interval',
'memtable_flush_period_in_ms',
'read_repair_chance', 'CLUSTERING',
'COMPACT', 'caching', 'comment',
'min_index_interval', 'speculative_retry'])
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) WITH bloom_filter_fp_chance ',
immediate='= ')
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) WITH bloom_filter_fp_chance = ',
choices=['<float_between_0_and_1>'])
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) WITH compaction ',
immediate="= {'class': '")
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': '",
choices=['SizeTieredCompactionStrategy',
'LeveledCompactionStrategy',
'DateTieredCompactionStrategy',
'TimeWindowCompactionStrategy'])
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'S",
immediate="izeTieredCompactionStrategy'")
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'SizeTieredCompactionStrategy",
immediate="'")
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'SizeTieredCompactionStrategy'",
choices=['}', ','])
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'SizeTieredCompactionStrategy', ",
immediate="'")
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'SizeTieredCompactionStrategy', '",
choices=['bucket_high', 'bucket_low', 'class',
'enabled', 'max_threshold',
'min_sstable_size', 'min_threshold',
'tombstone_compaction_interval',
'tombstone_threshold',
'unchecked_tombstone_compaction'])
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'SizeTieredCompactionStrategy'}",
choices=[';', 'AND'])
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'SizeTieredCompactionStrategy'} AND ",
choices=['bloom_filter_fp_chance', 'compaction',
'compression',
'dclocal_read_repair_chance',
'default_time_to_live', 'gc_grace_seconds',
'max_index_interval',
'memtable_flush_period_in_ms',
'read_repair_chance', 'CLUSTERING',
'COMPACT', 'caching', 'comment',
'min_index_interval', 'speculative_retry'])
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'DateTieredCompactionStrategy', '",
choices=['base_time_seconds', 'max_sstable_age_days',
'timestamp_resolution', 'min_threshold', 'class', 'max_threshold',
'tombstone_compaction_interval', 'tombstone_threshold',
'enabled', 'unchecked_tombstone_compaction',
'max_window_size_seconds'])
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'TimeWindowCompactionStrategy', '",
choices=['compaction_window_unit', 'compaction_window_size',
'timestamp_resolution', 'min_threshold', 'class', 'max_threshold',
'tombstone_compaction_interval', 'tombstone_threshold',
'enabled', 'unchecked_tombstone_compaction',
'only_purge_repaired_tombstones'])
def test_complete_in_create_columnfamily(self):
self.trycompletions('CREATE C', choices=['COLUMNFAMILY', 'CUSTOM'])
self.trycompletions('CREATE CO', immediate='LUMNFAMILY ')
self.create_columnfamily_table_template('COLUMNFAMILY')
def test_complete_in_create_table(self):
self.trycompletions('CREATE T', choices=['TRIGGER', 'TABLE', 'TYPE'])
self.trycompletions('CREATE TA', immediate='BLE ')
self.create_columnfamily_table_template('TABLE')
def test_complete_in_describe(self):
"""
Tests for Cassandra-10733
"""
self.trycompletions('DES', immediate='C')
# quoted_keyspace = '"' + self.cqlsh.keyspace + '"'
self.trycompletions('DESCR', immediate='IBE ')
self.trycompletions('DESC TABLE ',
choices=['twenty_rows_table',
'ascii_with_special_chars', 'users',
'has_all_types', 'system.',
'empty_composite_table', 'empty_table',
'system_auth.', 'undefined_values_table',
'dynamic_columns',
'twenty_rows_composite_table',
'utf8_with_special_chars',
'system_traces.', 'songs',
'system_distributed.',
'"' + self.cqlsh.keyspace + '".'],
other_choices_ok=True)
self.trycompletions('DESC TYPE ',
choices=['system.',
'system_auth.',
'system_traces.',
'system_distributed.',
'address',
'phone_number',
'band_info_type',
'tags'],
other_choices_ok=True)
self.trycompletions('DESC FUNCTION ',
choices=['system.',
'system_auth.',
'system_traces.',
'system_distributed.',
'fbestband',
'fbestsong',
'fmax',
'fmin',
'"' + self.cqlsh.keyspace + '".'],
other_choices_ok=True)
self.trycompletions('DESC AGGREGATE ',
choices=['system.',
'system_auth.',
'system_traces.',
'system_distributed.',
'aggmin',
'aggmax',
'"' + self.cqlsh.keyspace + '".'],
other_choices_ok=True)
# Unfortunately these commented tests will not work. This is due to the keyspace name containing quotes;
# cqlsh auto-completes a DESC differently when the keyspace contains quotes. I'll leave the
# test here though in case we ever change this script to test using keyspace names without
# quotes
# self.trycompletions('DESC TABLE ' + '"' + self.cqlsh.keyspace + '"', immediate='.')
self.trycompletions('DESC TABLE ' + '"' + self.cqlsh.keyspace + '".',
choices=['twenty_rows_table',
'ascii_with_special_chars',
'users',
'has_all_types',
'empty_composite_table',
'empty_table',
'undefined_values_table',
'dynamic_columns',
'twenty_rows_composite_table',
'utf8_with_special_chars',
'songs'],
other_choices_ok=True)
# See comment above for DESC TABLE
# self.trycompletions('DESC TYPE ' + '"' + self.cqlsh.keyspace + '"', immediate='.')
self.trycompletions('DESC TYPE ' + '"' + self.cqlsh.keyspace + '".',
choices=['address',
'phone_number',
'band_info_type',
'tags'],
other_choices_ok=True)
# See comment above for DESC TABLE
# self.trycompletions('DESC FUNCTION ' + '"' + self.cqlsh.keyspace + '"', immediate='.f')
self.trycompletions('DESC FUNCTION ' + '"' + self.cqlsh.keyspace + '".', immediate='f')
self.trycompletions('DESC FUNCTION ' + '"' + self.cqlsh.keyspace + '".f',
choices=['fbestband',
'fbestsong',
'fmax',
'fmin'],
other_choices_ok=True)
# See comment above for DESC TABLE
# self.trycompletions('DESC AGGREGATE ' + '"' + self.cqlsh.keyspace + '"', immediate='.aggm')
self.trycompletions('DESC AGGREGATE ' + '"' + self.cqlsh.keyspace + '".', immediate='aggm')
self.trycompletions('DESC AGGREGATE ' + '"' + self.cqlsh.keyspace + '".aggm',
choices=['aggmin',
'aggmax'],
other_choices_ok=True)
def test_complete_in_drop_columnfamily(self):
pass
def test_complete_in_truncate(self):
pass
def test_complete_in_alter_columnfamily(self):
pass
def test_complete_in_use(self):
pass
def test_complete_in_create_index(self):
pass
def test_complete_in_drop_index(self):
pass
|
the-stack_0_3153 | '''
Examples of using binary_threshold mask
Modification History:
'''
## import packages
import numpy as np
import matplotlib.pyplot as plt
import gzip
import pickle
from pyechoplot.plotting import plot_Sv, plot_mask, save_png_plot
## import pyechomask modules
from pyechomask.masks import binary_pulse, binary_threshold
from pyechomask.manipulate import merge_binary
## read raw multi-frequency EK60 data
def getSv(filepath):
f = gzip.open(filepath,'rb')
obj = pickle.load(f,encoding = 'bytes')
f.close()
return obj
## read Sv
Sv18 = getSv('./data/PS_Sv18.pklz')
Sv38 = getSv('./data/PS_Sv38.pklz')
## plot 18 kHz echogram
plot_Sv(Sv18)
plt.title("Sv18")
plt.show()
## create masks
pulse_mask_18 = binary_pulse(Sv18)
threshold_mask_18 = binary_threshold(Sv18,-75)
threshold_mask_38 = binary_threshold(Sv38,-85)
## plot 18 kHz echogram with pulse mask
plot_Sv(Sv18,mask = pulse_mask_18)
plt.title("18 kHz echogram with pulse mask")
plt.show()
#### create composite masks
## presence absence mask
pa_mask = threshold_mask_18 + threshold_mask_38
pa_mask[pa_mask > 0] = 1
plot_Sv(Sv18,mask = pa_mask)
plt.title("Presence or absense mask")
plt.show()
## merge masks
merged_mask = merge_binary([threshold_mask_18,threshold_mask_38])
## this time, plot just the mask
plot_mask(merged_mask)
plt.title("Merged mask")
plt.show()
# save
save_png_plot('./','merged masks')
#In this example, the merged_mask has 4 values (0,1,2,3).
#their binary representations are:
for i in np.unique(merged_mask):
print(i,bin(i)[2:].ljust(2,'0'))
#By example, cells with the value of 3 (11) have values of 1 for the
#first two binary mask.
#In this case, the Sv value is larger than -75 dB at 18 and larger
#than -85 dB at 38 kHz.
|
the-stack_0_3154 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from os import environ
from _internal_utils import exec_bash, pf, lines
from fabric.api import cd, settings, sudo
if not environ.get("PY2"):
environ["PY2"] = "2.7.15"
if not environ.get("PY3"):
environ["PY3"] = "3.6.8"
@exec_bash
def depend_redhat():
"""
yum install -y gcc make
yum install -y zlib-devel bzip2-devel openssl-devel ncurses-devel sqlite-devel readline-devel gdbm-devel xz-devel libffi-devel
"""
@exec_bash
def depend_debian():
"""
apt-get update
apt-get install -y gcc make
apt-get install -y libreadline-gplv2-dev libncursesw5-dev libssl-dev libsqlite3-dev tk-dev libgdbm-dev libc6-dev libbz2-dev
"""
def download_py2():
"""
curl -sL https://www.python.org/ftp/python/{var}/Python-{var}.tgz | tar -xz
"""
with cd("/usr/src"), settings(warn_only=True):
for line in lines(download_py2):
sudo(line.format(var=environ["PY2"]))
def download_py3():
"""
curl -sL https://www.python.org/ftp/python/3.6.8/Python-3.6.8.tgz | tar -xz
"""
with cd("/usr/src"), settings(warn_only=True):
for line in lines(download_py3):
sudo(line)
def depend():
depend_map = [
("debian", depend_debian),
("redhat", depend_redhat),
]
dict(depend_map)[pf()]()
def setup_pip():
"""
curl -o get-pip.py https://bootstrap.pypa.io/get-pip.py
python3 get-pip.py
python get-pip.py
"""
with cd("/usr/src"), settings(warn_only=True):
for line in lines(setup_pip):
sudo(line)
def install_py2():
"""
./configure --enable-optimizations --enable-shared
make -s -j2
make install
ln -sf /usr/local/bin/python /usr/bin/python
echo "/usr/local/lib/" > /etc/ld.so.conf.d/python3.conf
ldconfig
"""
depend()
download_py2()
with cd("/usr/src/Python-{var}".format(var=environ["PY2"])), settings(warn_only=True):
for line in lines(install_py2):
sudo(line)
def install_py3():
"""
./configure --enable-optimizations --enable-shared
make -s -j2
make install
ln -sf /usr/local/bin/python3 /usr/bin/python3
echo "/usr/local/lib/" > /etc/ld.so.conf.d/python3.conf
ldconfig
"""
depend()
download_py3()
with cd("/usr/src/Python-{var}".format(var=environ["PY3"])), settings(warn_only=True):
for line in lines(install_py3):
sudo(line)
|
the-stack_0_3155 | import noise
import numpy as np
from PIL import Image
import math
import io
import json
from scipy.misc import toimage
shape = (1024, 1024)
scale = 150
octaves = 4
persistence = 0.5
lacunarity = 2.0
threshold = 0.05
seed = np.random.randint(0, 500)
black = [0, 0, 0]
blue = [65,105,225]
green = [34,139,34]
beach = [238, 214, 175]
snow = [255, 250, 250]
mountain = [139, 137, 137]
lightblue = [0,191,255]
darkgreen = [0,100,0]
sandy = [210,180,140]
def add_color2(world):
color_world = np.zeros(world.shape+(3,))
for i in range(shape[0]):
for j in range(shape[1]):
if world[i][j] < threshold + 0.05:
color_world[i][j] = blue
elif world[i][j] < threshold + 0.055:
color_world[i][j] = sandy
elif world[i][j] < threshold + 0.1:
color_world[i][j] = beach
elif world[i][j] < threshold + 0.25:
color_world[i][j] = green
elif world[i][j] < threshold + 0.6:
color_world[i][j] = darkgreen
elif world[i][j] < threshold + 0.7:
color_world[i][j] = mountain
elif world[i][j] < threshold + 1.0:
color_world[i][j] = snow
return color_world
world = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
world[i][j] = noise.pnoise2(i / scale,
j / scale,
octaves=octaves,
persistence=persistence,
lacunarity=lacunarity,
repeatx=2048,
repeaty=2048,
base=seed)
center_x, center_y = shape[1] // 2, shape[0] // 2
circle_grad = np.zeros_like(world)
for y in range(world.shape[0]):
for x in range(world.shape[1]):
distx = abs(x - center_x)
disty = abs(y - center_y)
dist = math.sqrt(distx*distx + disty*disty)
circle_grad[y][x] = dist
# get it between -1 and 1
max_grad = np.max(circle_grad)
circle_grad = circle_grad / max_grad
circle_grad -= 0.5
circle_grad *= 2.0
circle_grad = -circle_grad
# shrink gradient
for y in range(world.shape[0]):
for x in range(world.shape[1]):
if circle_grad[y][x] > 0:
circle_grad[y][x] *= 20
# get it between 0 and 1
max_grad = np.max(circle_grad)
circle_grad = circle_grad / max_grad
with io.open("grad.json", "w") as file:
file.write(json.dumps({ "grad": circle_grad.tolist()}))
toimage(circle_grad).show()
world_noise = np.zeros_like(world)
for i in range(shape[0]):
for j in range(shape[1]):
world_noise[i][j] = (world[i][j] * circle_grad[i][j])
if world_noise[i][j] > 0:
world_noise[i][j] *= 20
# get it between 0 and 1
max_grad = np.max(world_noise)
world_noise = world_noise / max_grad
island_world_grad = add_color2(world_noise)
toimage(island_world_grad).show() |
the-stack_0_3156 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# Astropy documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this file.
#
# All configuration values have a default. Some values are defined in
# the global Astropy configuration which is loaded here before anything else.
# See astropy.sphinx.conf for which values are set there.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('..'))
# IMPORTANT: the above commented section was generated by sphinx-quickstart, but
# is *NOT* appropriate for astropy or Astropy affiliated packages. It is left
# commented out with this explanation to make it clear why this should not be
# done. If the sys.path entry above is added, when the astropy.sphinx.conf
# import occurs, it will import the *source* version of astropy instead of the
# version installed (if invoked as "make html" or directly with sphinx), or the
# version in the build directory (if "python setup.py build_sphinx" is used).
# Thus, any C-extensions that are needed to build the documentation will *not*
# be accessible, and the documentation will not build correctly.
import datetime
import os
import sys
try:
import astropy_helpers
except ImportError:
# Building from inside the docs/ directory?
if os.path.basename(os.getcwd()) == 'docs':
a_h_path = os.path.abspath(os.path.join('..', 'astropy_helpers'))
if os.path.isdir(a_h_path):
sys.path.insert(1, a_h_path)
# Load all of the global Astropy configuration
from astropy_helpers.sphinx.conf import *
# Get configuration information from setup.cfg
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
conf = ConfigParser()
conf.read([os.path.join(os.path.dirname(__file__), '..', 'setup.cfg')])
setup_cfg = dict(conf.items('metadata'))
# -- General configuration ----------------------------------------------------
del intersphinx_mapping['scipy']
del intersphinx_mapping['h5py']
intersphinx_mapping['healpy'] = ('http://healpy.readthedocs.io/en/latest/', None)
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.2'
# To perform a Sphinx version check that needs to be more specific than
# major.minor, call `check_sphinx_version("x.y.z")` here.
# check_sphinx_version("1.2.1")
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns.append('_templates')
# This is added to the end of RST files - a good place to put substitutions to
# be used globally.
rst_epilog += """
"""
# -- Project information ------------------------------------------------------
# This does not *have* to match the package name, but typically does
project = setup_cfg['package_name']
author = setup_cfg['author']
copyright = '{0}, {1}'.format(
datetime.datetime.now().year, setup_cfg['author'])
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
__import__(setup_cfg['package_name'])
package = sys.modules[setup_cfg['package_name']]
# The short X.Y version.
version = package.__version__.split('-', 1)[0]
# The full version, including alpha/beta/rc tags.
release = package.__version__
# -- Options for HTML output ---------------------------------------------------
# A NOTE ON HTML THEMES
# The global astropy configuration uses a custom theme, 'bootstrap-astropy',
# which is installed along with astropy. A different theme can be used or
# the options for this theme can be modified by overriding some of the
# variables set in the global configuration. The variables set in the
# global configuration are listed below, commented out.
# Add any paths that contain custom themes here, relative to this directory.
# To use a different custom theme, add the directory containing the theme.
# html_theme_path = []
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. To override the custom theme, set this to the
# name of a builtin theme or the name of a custom theme in html_theme_path.
#html_theme = None
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = ''
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = ''
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = '{0} v{1}'.format(project, release)
# Output file base name for HTML help builder.
htmlhelp_basename = project + 'doc'
# -- Options for LaTeX output --------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', project + '.tex', project + u' Documentation',
author, 'manual')]
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', project.lower(), project + u' Documentation',
[author], 1)]
## -- Options for the edit_on_github extension ----------------------------------------
if eval(setup_cfg.get('edit_on_github')):
extensions += ['astropy_helpers.sphinx.ext.edit_on_github']
versionmod = __import__(setup_cfg['package_name'] + '.version')
edit_on_github_project = setup_cfg['github_project']
if versionmod.version.release:
edit_on_github_branch = "v" + versionmod.version.version
else:
edit_on_github_branch = "master"
edit_on_github_source_root = ""
edit_on_github_doc_root = "docs"
|
the-stack_0_3158 | """This program unit tests the command-line processing capabilities of the
drake_cc_googletest bazel macro, by running
`bazel-bin/drake/common/drake_cc_googletest_main_test`
with a variety of command-line flags.
"""
import re
import subprocess
import os
import sys
import unittest
class TestGtestMain(unittest.TestCase):
def setUp(self):
self._main_exe, = sys.argv[1:]
self.assertTrue(
os.path.exists(self._main_exe),
"Could not find " + self._main_exe)
def _check_call(self, args, expected_returncode=0):
"""Run _main_exe with the given args; return output.
"""
try:
output = subprocess.check_output(
[self._main_exe] + args,
stderr=subprocess.STDOUT)
returncode = 0
except subprocess.CalledProcessError as e:
output = e.output
returncode = e.returncode
self.assertEqual(
returncode, expected_returncode,
"Expected returncode %r from %r but got %r with output %r" % (
expected_returncode, args, returncode, output))
return output.decode('utf8')
def test_pass(self):
# The device under test should pass when -magic_number=1.0 is present.
self._check_call(["-magic_number=1.0"], expected_returncode=0)
def test_no_arguments(self):
# The device under test should fail when -magic_number=1.0 is missing.
output = self._check_call([], expected_returncode=1)
self.assertTrue("Expected equality of these values:\n"
" FLAGS_magic_number" in output)
def test_help(self):
# The help string should mention all options. Just spot-check for one
# option from each expected contributor.
output = self._check_call([
"--help",
], expected_returncode=1)
self.assertGreater(len(output), 1000)
self.assertTrue("Using drake_cc_googletest_main" in output)
self.assertTrue("-gtest_list_tests" in output)
self.assertTrue("-spdlog_level" in output)
self.assertTrue("-magic_number" in output)
def test_logging(self):
# The spdlog flags should be able to enable debug logging.
# By default, there is no debug log.
log_message = "[debug] Cross your fingers for the magic_number 1"
args = ["-magic_number=1.0"]
output = self._check_call(args, expected_returncode=0)
self.assertFalse(log_message in output, output)
# Once enabled, we see a debug log.
args.append("-spdlog_level=debug")
output = self._check_call(args, expected_returncode=0)
self.assertTrue(log_message in output, output)
|
the-stack_0_3160 | # coding: utf-8
from __future__ import unicode_literals
from six import PY2
class BoxException(Exception):
"""
Base class exception for all errors raised from the SDK.
"""
def __str__(self):
# pylint:disable=no-member
# <https://github.com/box/box-python-sdk/issues/117>
return self.__unicode__().encode('utf-8') if PY2 else self.__unicode__()
class BoxNetworkException(BoxException):
"""
Exception raised from the network layer.
"""
pass
class BoxAPIException(BoxException):
"""
Exception raised from the box session layer.
"""
def __init__(self, status, code=None, message=None, request_id=None, headers=None, url=None, method=None, context_info=None):
"""
:param status:
HTTP status code of the failed response
:type status:
`int`
:param code:
The 'code' field of the failed response
:type code:
`unicode`
:param message:
A message to associate with the exception, e.g. 'message' field of the json in the failed response
:type message:
`unicode`
:param request_id:
The 'request_id' field of the json in the failed response
:type request_id:
`unicode`
:param headers:
The HTTP headers in the failed response
:type headers:
`dict`
:param url:
The url which raised the exception
:type url:
`unicode`
:param method:
The HTTP verb used to make the request.
:type method:
`unicode`
:param context_info:
The context_info returned in the failed response.
:type context_info:
`dict`
"""
super(BoxAPIException, self).__init__()
self._status = status
self._code = code
self._message = message
self._request_id = request_id
self._headers = headers
self._url = url
self._method = method
self._context_info = context_info
def __unicode__(self):
return '\nMessage: {0}\nStatus: {1}\nCode: {2}\nRequest id: {3}\nHeaders: {4}\nURL: {5}\nMethod: {6}\nContext info: {7}'.format(
self._message,
self._status,
self._code,
self._request_id,
self._headers,
self._url,
self._method,
self._context_info,
)
@property
def status(self):
"""
The status code of the network response that is responsible for the exception.
:rtype: `int`
"""
return self._status
@property
def code(self):
"""
The explanation of the status code of the network response that is responsible for the exception.
:rtype: `int`
"""
return self._code
@property
def message(self):
return self._message
@property
def request_id(self):
"""
The id the network request that is responsible for the exception.
:rtype: `unicode`
"""
return self._request_id
@property
def url(self):
"""
The URL of the network request that is responsible for the exception.
:rtype: `unicode`
"""
return self._url
@property
def method(self):
"""
The HTTP verb of the request that is responsible for the exception.
:rtype: `unicode`
"""
return self._method
@property
def context_info(self):
"""
The context_info returned in the failed response.
:rtype: `dict`
"""
return self._context_info
class BoxOAuthException(BoxException):
"""
Exception raised during auth.
"""
def __init__(self, status, message=None, url=None, method=None):
"""
:param status:
HTTP status code of the auth response
:type status:
`int`
:param message:
A message to associate with the exception, e.g. HTTP content of the auth response
:type message:
`unicode`
:param url:
The url which raised the exception
:type url:
`unicode`
:param method:
The HTTP verb used to make the request.
:type method:
`unicode`
"""
super(BoxOAuthException, self).__init__()
self._status = status
self._message = message
self._url = url
self._method = method
def __unicode__(self):
return '\nMessage: {0}\nStatus: {1}\nURL: {2}\nMethod: {3}'.format(
self._message,
self._status,
self._url,
self._method,
)
|
the-stack_0_3161 | import os
import time
import cv2
import imageio
from tensorboardX import SummaryWriter
from NeRF import *
from load_llff import load_llff_data
from run_nerf_helpers import *
from metrics import compute_img_metric
# np.random.seed(0)
DEBUG = False
def config_parser():
import configargparse
parser = configargparse.ArgumentParser()
parser.add_argument('--config', is_config_file=True,
help='config file path')
parser.add_argument("--expname", type=str,
help='experiment name')
parser.add_argument("--basedir", type=str, default='./logs/', required=True,
help='where to store ckpts and logs')
parser.add_argument("--datadir", type=str, required=True,
help='input data directory')
parser.add_argument("--datadownsample", type=float, default=-1,
help='if downsample > 0, means downsample the image to scale=datadownsample')
parser.add_argument("--tbdir", type=str, required=True,
help="tensorboard log directory")
parser.add_argument("--num_gpu", type=int, default=1,
help=">1 will use DataParallel")
parser.add_argument("--torch_hub_dir", type=str, default='',
help=">1 will use DataParallel")
# training options
parser.add_argument("--netdepth", type=int, default=8,
help='layers in network')
parser.add_argument("--netwidth", type=int, default=256,
help='channels per layer')
parser.add_argument("--netdepth_fine", type=int, default=8,
help='layers in fine network')
parser.add_argument("--netwidth_fine", type=int, default=256,
help='channels per layer in fine network')
parser.add_argument("--N_rand", type=int, default=32 * 32 * 4,
help='batch size (number of random rays per gradient step)')
parser.add_argument("--lrate", type=float, default=5e-4,
help='learning rate')
parser.add_argument("--lrate_decay", type=int, default=250,
help='exponential learning rate decay (in 1000 steps)')
# generate N_rand # of rays, divide into chunk # of batch
# then generate chunk * N_samples # of points, divide into netchunk # of batch
parser.add_argument("--chunk", type=int, default=1024 * 32,
help='number of rays processed in parallel, decrease if running out of memory')
parser.add_argument("--netchunk", type=int, default=1024 * 64,
help='number of pts sent through network in parallel, decrease if running out of memory')
parser.add_argument("--no_reload", action='store_true',
help='do not reload weights from saved ckpt')
parser.add_argument("--ft_path", type=str, default=None,
help='specific weights npy file to reload for coarse network')
# rendering options
parser.add_argument("--N_iters", type=int, default=50000,
help='number of iteration')
parser.add_argument("--N_samples", type=int, default=64,
help='number of coarse samples per ray')
parser.add_argument("--N_importance", type=int, default=0,
help='number of additional fine samples per ray')
parser.add_argument("--perturb", type=float, default=1.,
help='set to 0. for no jitter, 1. for jitter')
parser.add_argument("--use_viewdirs", action='store_true',
help='use full 5D input instead of 3D')
parser.add_argument("--i_embed", type=int, default=0,
help='set 0 for default positional encoding, -1 for none')
parser.add_argument("--multires", type=int, default=10,
help='log2 of max freq for positional encoding (3D location)')
parser.add_argument("--multires_views", type=int, default=4,
help='log2 of max freq for positional encoding (2D direction)')
parser.add_argument("--raw_noise_std", type=float, default=0.,
help='std dev of noise added to regularize sigma_a output, 1e0 recommended')
parser.add_argument("--rgb_activate", type=str, default='sigmoid',
help='activate function for rgb output, choose among "none", "sigmoid"')
parser.add_argument("--sigma_activate", type=str, default='relu',
help='activate function for sigma output, choose among "relu", "softplue"')
# ===============================
# Kernel optimizing
# ===============================
parser.add_argument("--kernel_type", type=str, default='kernel',
help='choose among <none>, <itsampling>, <sparsekernel>')
parser.add_argument("--kernel_isglobal", action='store_true',
help='if specified, the canonical kernel position is global')
parser.add_argument("--kernel_start_iter", type=int, default=0,
help='start training kernel after # iteration')
parser.add_argument("--kernel_ptnum", type=int, default=5,
help='the number of sparse locations in the kernels '
'that involves computing the final color of ray')
parser.add_argument("--kernel_random_hwindow", type=float, default=0.25,
help='randomly displace the predicted ray position')
parser.add_argument("--kernel_img_embed", type=int, default=32,
help='the dim of image laten code')
parser.add_argument("--kernel_rand_dim", type=int, default=2,
help='dimensions of input random number which uniformly sample from (0, 1)')
parser.add_argument("--kernel_rand_embed", type=int, default=3,
help='embed frequency of input kernel coordinate')
parser.add_argument("--kernel_rand_mode", type=str, default='float',
help='<float>, <<int#, such as<int5>>>, <fix>')
parser.add_argument("--kernel_random_mode", type=str, default='input',
help='<input>, <output>')
parser.add_argument("--kernel_spatial_embed", type=int, default=0,
help='the dim of spatial coordinate embedding')
parser.add_argument("--kernel_depth_embed", type=int, default=0,
help='the dim of depth coordinate embedding')
parser.add_argument("--kernel_hwindow", type=int, default=10,
help='the max window of the kernel (sparse location will lie inside the window')
parser.add_argument("--kernel_pattern_init_radius", type=float, default=0.1,
help='the initialize radius of init pattern')
parser.add_argument("--kernel_num_hidden", type=int, default=3,
help='the number of hidden layer')
parser.add_argument("--kernel_num_wide", type=int, default=64,
help='the wide of hidden layer')
parser.add_argument("--kernel_shortcut", action='store_true',
help='if yes, add a short cut to the network')
parser.add_argument("--align_start_iter", type=int, default=0,
help='start iteration of the align loss')
parser.add_argument("--align_end_iter", type=int, default=1e10,
help='end iteration of the align loss')
parser.add_argument("--kernel_align_weight", type=float, default=0,
help='align term weight')
parser.add_argument("--prior_start_iter", type=int, default=0,
help='start iteration of the prior loss')
parser.add_argument("--prior_end_iter", type=int, default=1e10,
help='end iteration of the prior loss')
parser.add_argument("--kernel_prior_weight", type=float, default=0,
help='weight of prior loss (regularization)')
parser.add_argument("--sparsity_start_iter", type=int, default=0,
help='start iteration of the sparsity loss')
parser.add_argument("--sparsity_end_iter", type=int, default=1e10,
help='end iteration of the sparsity loss')
parser.add_argument("--kernel_sparsity_type", type=str, default='tv',
help='type of sparse gradient loss', choices=['tv', 'normalize', 'robust'])
parser.add_argument("--kernel_sparsity_weight", type=float, default=0,
help='weight of sparsity loss')
parser.add_argument("--kernel_spatialvariant_trans", action='store_true',
help='if true, optimize spatial variant 3D translation of each sampling point')
parser.add_argument("--kernel_global_trans", action='store_true',
help='if true, optimize global 3D translation of each sampling point')
parser.add_argument("--tone_mapping_type", type=str, default='none',
help='the tone mapping of linear to LDR color space, <none>, <gamma>, <learn>')
####### render option, will not effect training ########
parser.add_argument("--render_only", action='store_true',
help='do not optimize, reload weights and render out render_poses path')
parser.add_argument("--render_test", action='store_true',
help='render the test set instead of render_poses path')
parser.add_argument("--render_multipoints", action='store_true',
help='render sub image that reconstruct the blur image')
parser.add_argument("--render_rmnearplane", type=int, default=0,
help='when render, set the density of nearest plane to 0')
parser.add_argument("--render_focuspoint_scale", type=float, default=1.,
help='scale the focal point when render')
parser.add_argument("--render_radius_scale", type=float, default=1.,
help='scale the radius of the camera path')
parser.add_argument("--render_factor", type=int, default=0,
help='downsampling factor to speed up rendering, set 4 or 8 for fast preview')
parser.add_argument("--render_epi", action='store_true',
help='render the video with epi path')
## llff flags
parser.add_argument("--factor", type=int, default=None,
help='downsample factor for LLFF images')
parser.add_argument("--no_ndc", action='store_true',
help='do not use normalized device coordinates (set for non-forward facing scenes)')
parser.add_argument("--lindisp", action='store_true',
help='sampling linearly in disparity rather than depth')
parser.add_argument("--spherify", action='store_true',
help='set for spherical 360 scenes')
parser.add_argument("--llffhold", type=int, default=8,
help='will take every 1/N images as LLFF test set, paper uses 8')
# ######### Unused params from the original ###########
parser.add_argument("--precrop_iters", type=int, default=0,
help='number of steps to train on central crops')
parser.add_argument("--precrop_frac", type=float,
default=.5, help='fraction of img taken for central crops')
# dataset options
parser.add_argument("--dataset_type", type=str, default='llff',
help='options: llff / blender / deepvoxels')
parser.add_argument("--testskip", type=int, default=8,
help='will load 1/N images from test/val sets, useful for large datasets like deepvoxels')
## deepvoxels flags
parser.add_argument("--shape", type=str, default='greek',
help='options : armchair / cube / greek / vase')
## blender flags
parser.add_argument("--white_bkgd", action='store_true',
help='set to render synthetic data on a white bkgd (always use for dvoxels)')
parser.add_argument("--half_res", action='store_true',
help='load blender synthetic data at 400x400 instead of 800x800')
################# logging/saving options ##################
parser.add_argument("--i_print", type=int, default=200,
help='frequency of console printout and metric loggin')
parser.add_argument("--i_tensorboard", type=int, default=200,
help='frequency of tensorboard image logging')
parser.add_argument("--i_weights", type=int, default=20000,
help='frequency of weight ckpt saving')
parser.add_argument("--i_testset", type=int, default=20000,
help='frequency of testset saving')
parser.add_argument("--i_video", type=int, default=20000,
help='frequency of render_poses video saving')
return parser
def train():
parser = config_parser()
args = parser.parse_args()
if len(args.torch_hub_dir) > 0:
print(f"Change torch hub cache to {args.torch_hub_dir}")
torch.hub.set_dir(args.torch_hub_dir)
# Load data
K = None
if args.dataset_type == 'llff':
images, poses, bds, render_poses, i_test = load_llff_data(args, args.datadir, args.factor,
recenter=True, bd_factor=.75,
spherify=args.spherify,
path_epi=args.render_epi)
hwf = poses[0, :3, -1]
poses = poses[:, :3, :4]
print('Loaded llff', images.shape, render_poses.shape, hwf, args.datadir)
if not isinstance(i_test, list):
i_test = [i_test]
print('LLFF holdout,', args.llffhold)
i_test = np.arange(images.shape[0])[::args.llffhold]
i_val = i_test
i_train = np.array([i for i in np.arange(int(images.shape[0])) if
(i not in i_test and i not in i_val)])
print('DEFINING BOUNDS')
if args.no_ndc:
near = np.min(bds) * 0.9
far = np.max(bds) * 1.0
else:
near = 0.
far = 1.
print('NEAR FAR', near, far)
else:
print('Unknown dataset type', args.dataset_type, 'exiting')
return
imagesf = images
images = (images * 255).astype(np.uint8)
images_idx = np.arange(0, len(images))
# Cast intrinsics to right types
H, W, focal = hwf
H, W = int(H), int(W)
hwf = [H, W, focal]
if K is None:
K = np.array([
[focal, 0, 0.5 * W],
[0, focal, 0.5 * H],
[0, 0, 1]
])
if args.render_test:
render_poses = np.array(poses)
# Create log dir and copy the config file
basedir = args.basedir
tensorboardbase = args.tbdir
expname = args.expname
test_metric_file = os.path.join(basedir, expname, 'test_metrics.txt')
os.makedirs(os.path.join(basedir, expname), exist_ok=True)
os.makedirs(os.path.join(tensorboardbase, expname), exist_ok=True)
tensorboard = SummaryWriter(os.path.join(tensorboardbase, expname))
f = os.path.join(basedir, expname, 'args.txt')
with open(f, 'w') as file:
for arg in sorted(vars(args)):
attr = getattr(args, arg)
file.write('{} = {}\n'.format(arg, attr))
if args.config is not None and not args.render_only:
f = os.path.join(basedir, expname, 'config.txt')
with open(f, 'w') as file:
file.write(open(args.config, 'r').read())
with open(test_metric_file, 'a') as file:
file.write(open(args.config, 'r').read())
file.write("\n============================\n"
"||\n"
"\\/\n")
# The DSK module
if args.kernel_type == 'deformablesparsekernel':
kernelnet = DSKnet(len(images), torch.tensor(poses[:, :3, :4]),
args.kernel_ptnum, args.kernel_hwindow,
random_hwindow=args.kernel_random_hwindow, in_embed=args.kernel_rand_embed,
random_mode=args.kernel_random_mode,
img_embed=args.kernel_img_embed,
spatial_embed=args.kernel_spatial_embed,
depth_embed=args.kernel_depth_embed,
num_hidden=args.kernel_num_hidden,
num_wide=args.kernel_num_wide,
short_cut=args.kernel_shortcut,
pattern_init_radius=args.kernel_pattern_init_radius,
isglobal=args.kernel_isglobal,
optim_trans=args.kernel_global_trans,
optim_spatialvariant_trans=args.kernel_spatialvariant_trans)
elif args.kernel_type == 'none':
kernelnet = None
else:
raise RuntimeError(f"kernel_type {args.kernel_type} not recognized")
# Create nerf model
nerf = NeRFAll(args, kernelnet)
nerf = nn.DataParallel(nerf, list(range(args.num_gpu)))
optim_params = nerf.parameters()
optimizer = torch.optim.Adam(params=optim_params,
lr=args.lrate,
betas=(0.9, 0.999))
start = 0
# Load Checkpoints
if args.ft_path is not None and args.ft_path != 'None':
ckpts = [args.ft_path]
else:
ckpts = [os.path.join(basedir, expname, f) for f in sorted(os.listdir(os.path.join(basedir, expname))) if
'.tar' in f]
print('Found ckpts', ckpts)
if len(ckpts) > 0 and not args.no_reload:
ckpt_path = ckpts[-1]
print('Reloading from', ckpt_path)
ckpt = torch.load(ckpt_path)
start = ckpt['global_step']
optimizer.load_state_dict(ckpt['optimizer_state_dict'])
# Load model
smart_load_state_dict(nerf, ckpt)
# figuring out the train/test configuration
render_kwargs_train = {
'perturb': args.perturb,
'N_importance': args.N_importance,
'N_samples': args.N_samples,
'use_viewdirs': args.use_viewdirs,
'white_bkgd': args.white_bkgd,
'raw_noise_std': args.raw_noise_std,
}
# NDC only good for LLFF-style forward facing data
if args.no_ndc: # args.dataset_type != 'llff' or
print('Not ndc!')
render_kwargs_train['ndc'] = False
render_kwargs_train['lindisp'] = args.lindisp
render_kwargs_test = {k: render_kwargs_train[k] for k in render_kwargs_train}
render_kwargs_test['perturb'] = False
render_kwargs_test['raw_noise_std'] = 0.
# visualize_motionposes(H, W, K, nerf, 2)
# visualize_kernel(H, W, K, nerf, 5)
# visualize_itsample(H, W, K, nerf)
# visualize_kmap(H, W, K, nerf, img_idx=1)
bds_dict = {
'near': near,
'far': far,
}
render_kwargs_train.update(bds_dict)
render_kwargs_test.update(bds_dict)
global_step = start
# Move testing data to GPU
render_poses = torch.tensor(render_poses[:, :3, :4]).cuda()
nerf = nerf.cuda()
# Short circuit if only rendering out from trained model
if args.render_only:
print('RENDER ONLY')
with torch.no_grad():
testsavedir = os.path.join(basedir, expname,
f"renderonly"
f"_{'test' if args.render_test else 'path'}"
f"_{start:06d}")
os.makedirs(testsavedir, exist_ok=True)
print('test poses shape', render_poses.shape)
dummy_num = ((len(poses) - 1) // args.num_gpu + 1) * args.num_gpu - len(poses)
dummy_poses = torch.eye(3, 4).unsqueeze(0).expand(dummy_num, 3, 4).type_as(render_poses)
print(f"Append {dummy_num} # of poses to fill all the GPUs")
nerf.eval()
rgbshdr, disps = nerf(
hwf[0], hwf[1], K, args.chunk,
poses=torch.cat([render_poses, dummy_poses], dim=0),
render_kwargs=render_kwargs_test,
render_factor=args.render_factor,
)
rgbshdr = rgbshdr[:len(rgbshdr) - dummy_num]
disps = (1. - disps)
disps = disps[:len(disps) - dummy_num].cpu().numpy()
rgbs = rgbshdr
rgbs = to8b(rgbs.cpu().numpy())
disps = to8b(disps / disps.max())
if args.render_test:
for rgb_idx, rgb8 in enumerate(rgbs):
imageio.imwrite(os.path.join(testsavedir, f'{rgb_idx:03d}.png'), rgb8)
imageio.imwrite(os.path.join(testsavedir, f'{rgb_idx:03d}_disp.png'), disps[rgb_idx])
else:
prefix = 'epi_' if args.render_epi else ''
imageio.mimwrite(os.path.join(testsavedir, f'{prefix}video.mp4'), rgbs, fps=30, quality=9)
imageio.mimwrite(os.path.join(testsavedir, f'{prefix}video_disp.mp4'), disps, fps=30, quality=9)
if args.render_test and args.render_multipoints:
for pti in range(args.kernel_ptnum):
nerf.eval()
poses_num = len(poses) + dummy_num
imgidx = torch.arange(poses_num, dtype=torch.long).to(render_poses.device).reshape(poses_num, 1)
rgbs, weights = nerf(
hwf[0], hwf[1], K, args.chunk,
poses=torch.cat([render_poses, dummy_poses], dim=0),
render_kwargs=render_kwargs_test,
render_factor=args.render_factor,
render_point=pti,
images_indices=imgidx
)
rgbs = rgbs[:len(rgbs) - dummy_num]
weights = weights[:len(weights) - dummy_num]
rgbs = to8b(rgbs.cpu().numpy())
weights = to8b(weights.cpu().numpy())
for rgb_idx, rgb8 in enumerate(rgbs):
imageio.imwrite(os.path.join(testsavedir, f'{rgb_idx:03d}_pt{pti}.png'), rgb8)
imageio.imwrite(os.path.join(testsavedir, f'w_{rgb_idx:03d}_pt{pti}.png'), weights[rgb_idx])
return
# ============================================
# Prepare ray dataset if batching random rays
# ============================================
N_rand = args.N_rand
train_datas = {}
# if downsample, downsample the images
if args.datadownsample > 0:
images_train = np.stack([cv2.resize(img_, None, None,
1 / args.datadownsample, 1 / args.datadownsample,
cv2.INTER_AREA) for img_ in imagesf], axis=0)
else:
images_train = imagesf
num_img, hei, wid, _ = images_train.shape
print(f"train on image sequence of len = {num_img}, {wid}x{hei}")
k_train = np.array([K[0, 0] * wid / W, 0, K[0, 2] * wid / W,
0, K[1, 1] * hei / H, K[1, 2] * hei / H,
0, 0, 1]).reshape(3, 3).astype(K.dtype)
# For random ray batching
print('get rays')
rays = np.stack([get_rays_np(hei, wid, k_train, p) for p in poses[:, :3, :4]], 0) # [N, ro+rd, H, W, 3]
rays = np.transpose(rays, [0, 2, 3, 1, 4])
train_datas['rays'] = rays[i_train].reshape(-1, 2, 3)
xs, ys = np.meshgrid(np.arange(wid, dtype=np.float32), np.arange(hei, dtype=np.float32), indexing='xy')
xs = np.tile((xs[None, ...] + HALF_PIX) * W / wid, [num_img, 1, 1])
ys = np.tile((ys[None, ...] + HALF_PIX) * H / hei, [num_img, 1, 1])
train_datas['rays_x'], train_datas['rays_y'] = xs[i_train].reshape(-1, 1), ys[i_train].reshape(-1, 1)
train_datas['rgbsf'] = images_train[i_train].reshape(-1, 3)
images_idx_tile = images_idx.reshape((num_img, 1, 1))
images_idx_tile = np.tile(images_idx_tile, [1, hei, wid])
train_datas['images_idx'] = images_idx_tile[i_train].reshape(-1, 1).astype(np.int64)
print('shuffle rays')
shuffle_idx = np.random.permutation(len(train_datas['rays']))
train_datas = {k: v[shuffle_idx] for k, v in train_datas.items()}
print('done')
i_batch = 0
# Move training data to GPU
images = torch.tensor(images).cuda()
imagesf = torch.tensor(imagesf).cuda()
poses = torch.tensor(poses).cuda()
train_datas = {k: torch.tensor(v).cuda() for k, v in train_datas.items()}
N_iters = args.N_iters + 1
print('Begin')
print('TRAIN views are', i_train)
print('TEST views are', i_test)
print('VAL views are', i_val)
# Summary writers
# writer = SummaryWriter(os.path.join(basedir, 'summaries', expname))
start = start + 1
for i in range(start, N_iters):
time0 = time.time()
# Sample random ray batch
iter_data = {k: v[i_batch:i_batch + N_rand] for k, v in train_datas.items()}
batch_rays = iter_data.pop('rays').permute(0, 2, 1)
i_batch += N_rand
if i_batch >= len(train_datas['rays']):
print("Shuffle data after an epoch!")
shuffle_idx = np.random.permutation(len(train_datas['rays']))
train_datas = {k: v[shuffle_idx] for k, v in train_datas.items()}
i_batch = 0
##### Core optimization loop #####
nerf.train()
if i == args.kernel_start_iter:
torch.cuda.empty_cache()
rgb, rgb0, extra_loss = nerf(H, W, K, chunk=args.chunk,
rays=batch_rays, rays_info=iter_data,
retraw=True, force_naive=i < args.kernel_start_iter,
**render_kwargs_train)
# Compute Losses
# =====================
target_rgb = iter_data['rgbsf'].squeeze(-2)
img_loss = img2mse(rgb, target_rgb)
loss = img_loss
psnr = mse2psnr(img_loss)
img_loss0 = img2mse(rgb0, target_rgb)
loss = loss + img_loss0
extra_loss = {k: torch.mean(v) for k, v in extra_loss.items()}
if len(extra_loss) > 0:
for k, v in extra_loss.items():
if f"kernel_{k}_weight" in vars(args).keys():
if vars(args)[f"{k}_start_iter"] <= i <= vars(args)[f"{k}_end_iter"]:
loss = loss + v * vars(args)[f"kernel_{k}_weight"]
optimizer.zero_grad()
loss.backward()
optimizer.step()
# NOTE: IMPORTANT!
### update learning rate ###
decay_rate = 0.1
decay_steps = args.lrate_decay * 1000
new_lrate = args.lrate * (decay_rate ** (global_step / decay_steps))
for param_group in optimizer.param_groups:
param_group['lr'] = new_lrate
################################
# dt = time.time() - time0
# print(f"Step: {global_step}, Loss: {loss}, Time: {dt}")
##### end #####
# Rest is logging
if i % args.i_weights == 0:
path = os.path.join(basedir, expname, '{:06d}.tar'.format(i))
torch.save({
'global_step': global_step,
'network_state_dict': nerf.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}, path)
print('Saved checkpoints at', path)
if i % args.i_video == 0 and i > 0:
# Turn on testing mode
with torch.no_grad():
nerf.eval()
rgbs, disps = nerf(H, W, K, args.chunk, poses=render_poses, render_kwargs=render_kwargs_test)
print('Done, saving', rgbs.shape, disps.shape)
moviebase = os.path.join(basedir, expname, '{}_spiral_{:06d}_'.format(expname, i))
rgbs = (rgbs - rgbs.min()) / (rgbs.max() - rgbs.min())
rgbs = rgbs.cpu().numpy()
disps = disps.cpu().numpy()
# disps_max_idx = int(disps.size * 0.9)
# disps_max = disps.reshape(-1)[np.argpartition(disps.reshape(-1), disps_max_idx)[disps_max_idx]]
imageio.mimwrite(moviebase + 'rgb.mp4', to8b(rgbs), fps=30, quality=8)
imageio.mimwrite(moviebase + 'disp.mp4', to8b(disps / disps.max()), fps=30, quality=8)
# if args.use_viewdirs:
# render_kwargs_test['c2w_staticcam'] = render_poses[0][:3,:4]
# with torch.no_grad():
# rgbs_still, _ = render_path(render_poses, hwf, args.chunk, render_kwargs_test)
# render_kwargs_test['c2w_staticcam'] = None
# imageio.mimwrite(moviebase + 'rgb_still.mp4', to8b(rgbs_still), fps=30, quality=8)
if i % args.i_testset == 0 and i > 0:
testsavedir = os.path.join(basedir, expname, 'testset_{:06d}'.format(i))
os.makedirs(testsavedir, exist_ok=True)
print('test poses shape', poses.shape)
dummy_num = ((len(poses) - 1) // args.num_gpu + 1) * args.num_gpu - len(poses)
dummy_poses = torch.eye(3, 4).unsqueeze(0).expand(dummy_num, 3, 4).type_as(render_poses)
print(f"Append {dummy_num} # of poses to fill all the GPUs")
with torch.no_grad():
nerf.eval()
rgbs, _ = nerf(H, W, K, args.chunk, poses=torch.cat([poses, dummy_poses], dim=0).cuda(),
render_kwargs=render_kwargs_test)
rgbs = rgbs[:len(rgbs) - dummy_num]
rgbs_save = rgbs # (rgbs - rgbs.min()) / (rgbs.max() - rgbs.min())
# saving
for rgb_idx, rgb in enumerate(rgbs_save):
rgb8 = to8b(rgb.cpu().numpy())
filename = os.path.join(testsavedir, f'{rgb_idx:03d}.png')
imageio.imwrite(filename, rgb8)
# evaluation
rgbs = rgbs[i_test]
target_rgb_ldr = imagesf[i_test]
test_mse = compute_img_metric(rgbs, target_rgb_ldr, 'mse')
test_psnr = compute_img_metric(rgbs, target_rgb_ldr, 'psnr')
test_ssim = compute_img_metric(rgbs, target_rgb_ldr, 'ssim')
test_lpips = compute_img_metric(rgbs, target_rgb_ldr, 'lpips')
if isinstance(test_lpips, torch.Tensor):
test_lpips = test_lpips.item()
tensorboard.add_scalar("Test MSE", test_mse, global_step)
tensorboard.add_scalar("Test PSNR", test_psnr, global_step)
tensorboard.add_scalar("Test SSIM", test_ssim, global_step)
tensorboard.add_scalar("Test LPIPS", test_lpips, global_step)
with open(test_metric_file, 'a') as outfile:
outfile.write(f"iter{i}/globalstep{global_step}: MSE:{test_mse:.8f} PSNR:{test_psnr:.8f}"
f" SSIM:{test_ssim:.8f} LPIPS:{test_lpips:.8f}\n")
print('Saved test set')
if i % args.i_tensorboard == 0:
tensorboard.add_scalar("Loss", loss.item(), global_step)
tensorboard.add_scalar("PSNR", psnr.item(), global_step)
for k, v in extra_loss.items():
tensorboard.add_scalar(k, v.item(), global_step)
if i % args.i_print == 0:
print(f"[TRAIN] Iter: {i} Loss: {loss.item()} PSNR: {psnr.item()}")
global_step += 1
if __name__ == '__main__':
torch.set_default_tensor_type('torch.cuda.FloatTensor')
train()
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.