code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def parse_duration(duration):
duration = str(duration).upper().strip()
elements = ELEMENTS.copy()
for pattern in (SIMPLE_DURATION, COMBINED_DURATION):
if pattern.match(duration):
found = pattern.match(duration).groupdict()
del found['time']
elements.update(dict((k, int(v or 0))
for k, v
in found.items()))
return datetime.timedelta(days=(elements['days'] +
_months_to_days(elements['months']) +
_years_to_days(elements['years'])),
hours=elements['hours'],
minutes=elements['minutes'],
seconds=elements['seconds'])
return ParseError() | Attepmts to parse an ISO8601 formatted ``duration``.
Returns a ``datetime.timedelta`` object. |
def skin_details(skin_id, lang="en"):
params = {"skin_id": skin_id, "lang": lang}
cache_name = "skin_details.%(skin_id)s.%(lang)s.json" % params
return get_cached("skin_details.json", cache_name, params=params) | This resource returns details about a single skin.
:param skin_id: The skin to query for.
:param lang: The language to display the texts in.
The response is an object with at least the following properties. Note that
the availability of some properties depends on the type of item the skin
applies to.
skin_id (number):
The skin id.
name (string):
The name of the skin.
type (string):
The type of item the skin applies to. One of ``Armor``, ``Back`` or
``Weapon``.
flags (list):
Skin flags. Currently known skin flags are ``ShowInWardrobe``,
``HideIfLocked`` and ``NoCost``.
restrictions (list):
Race restrictions: ``Asura``, ``Charr``, ``Human``, ``Norn`` and
``Sylvari``.
icon_file_id (string):
The icon file id to be used with the render service.
icon_file_signature (string):
The icon file signature to be used with the render service. |
def bubble_to_dot(bblfile:str, dotfile:str=None, render:bool=False,
oriented:bool=False):
tree = BubbleTree.from_bubble_file(bblfile, oriented=bool(oriented))
return tree_to_dot(tree, dotfile, render=render) | Write in dotfile a graph equivalent to those depicted in bubble file |
def bubble_to_gexf(bblfile:str, gexffile:str=None, oriented:bool=False):
tree = BubbleTree.from_bubble_file(bblfile, oriented=bool(oriented))
gexf_converter.tree_to_file(tree, gexffile)
return gexffile | Write in bblfile a graph equivalent to those depicted in bubble file |
def bubble_to_js(bblfile:str, jsdir:str=None, oriented:bool=False, **style):
js_converter.bubble_to_dir(bblfile, jsdir, oriented=bool(oriented), **style)
return jsdir | Write in jsdir a graph equivalent to those depicted in bubble file |
def tree_to_dot(tree:BubbleTree, dotfile:str=None, render:bool=False):
graph = tree_to_graph(tree)
path = None
if dotfile: # first save the dot file.
path = graph.save(dotfile)
if render: # secondly, show it.
# As the dot file is known by the Graph object,
# it will be placed around the dot file.
graph.view()
return path | Write in dotfile a graph equivalent to those depicted in bubble file
See http://graphviz.readthedocs.io/en/latest/examples.html#cluster-py
for graphviz API |
def fill(self, term_dict, terms):
# type: (Dict[int, Set[Type[Rule]]], Any) -> None
for i in range(len(terms)):
t = terms[i]
self._field[0][i] += term_dict[hash(t)] | Fill first row of the structure witch nonterminal directly rewritable to terminal.
:param term_dict: Dictionary of rules directly rewritable to terminal.
Key is hash of terminal, value is set of rules with key terminal at the right side.
:param terms: Input sequence of terminal. |
def rules(self, x, y):
# type: (int, int) -> List[Type[Rule]]
return [r for r in self._field[y][x]] | Get rules at specific position in the structure.
:param x: X coordinate
:param y: Y coordinate
:return: List of rules |
def positions(self, x, y):
# type: (int, int) -> List[(Point, Point)]
return [(Point(x, v), Point(x + 1 + v, y - 1 - v)) for v in range(y)] | Get all positions, that can be combined to get word parsed at specified position.
:param x: X coordinate.
:param y: Y coordinate.
:return: List of tuples with two Point instances. |
def put(self, x, y, rules):
# type: (int, int, List[PlaceItem]) -> None
self._field[y][x] = rules | Set possible rules at specific position.
:param x: X coordinate.
:param y: Y coordinate.
:param rules: Value to set. |
def froze_it(cls):
cls._frozen = False
def frozensetattr(self, key, value):
if self._frozen and not hasattr(self, key):
raise AttributeError("Attribute '{}' of class '{}' does not exist!"
.format(key, cls.__name__))
else:
object.__setattr__(self, key, value)
def init_decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
func(self, *args, **kwargs)
self._frozen = True
return wrapper
cls.__setattr__ = frozensetattr
cls.__init__ = init_decorator(cls.__init__)
return cls | Decorator to prevent from creating attributes in the object ouside __init__().
This decorator must be applied to the final class (doesn't work if a
decorated class is inherited).
Yoann's answer at http://stackoverflow.com/questions/3603502 |
def one_liner_str(self):
assert self.less_attrs is not None, "Forgot to set attrs class variable"
s_format = "{}={}"
s = "; ".join([s_format.format(x, self.__getattribute__(x)) for x in self.less_attrs])
return s | Returns string (supposed to be) shorter than str() and not contain newline |
def to_dict(self):
ret = OrderedDict()
for attrname in self.attrs:
ret[attrname] = self.__getattribute__(attrname)
return ret | Returns OrderedDict whose keys are self.attrs |
def to_list(self):
ret = OrderedDict()
for attrname in self.attrs:
ret[attrname] = self.__getattribute__(attrname)
return ret | Returns list containing values of attributes listed in self.attrs |
def uniq(pipe):
''' this works like bash's uniq command where the generator only iterates
if the next value is not the previous '''
pipe = iter(pipe)
previous = next(pipe)
yield previous
for i in pipe:
if i is not previous:
previous = i
yield f uniq(pipe):
''' this works like bash's uniq command where the generator only iterates
if the next value is not the previous '''
pipe = iter(pipe)
previous = next(pipe)
yield previous
for i in pipe:
if i is not previous:
previous = i
yield i | this works like bash's uniq command where the generator only iterates
if the next value is not the previous |
def chunks_generator(iterable, count_items_in_chunk):
iterator = iter(iterable)
for first in iterator: # stops when iterator is depleted
def chunk(): # construct generator for next chunk
yield first # yield element from for loop
for more in islice(iterator, count_items_in_chunk - 1):
yield more # yield more elements from the iterator
yield chunk() | Очень внимательно! Не дает обходить дважды
:param iterable:
:param count_items_in_chunk:
:return: |
def chunks(list_, count_items_in_chunk):
for i in range(0, len(list_), count_items_in_chunk):
yield list_[i:i + count_items_in_chunk] | разбить list (l) на куски по n элементов
:param list_:
:param count_items_in_chunk:
:return: |
def pretty_json(obj):
return json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '), ensure_ascii=False) | Представить объект в вище json красиво отформатированной строки
:param obj:
:return: |
def decode_jwt(input_text, secure_key):
if input_text is None:
return None
encoded = (input_text.split(":")[1]).encode('utf-8')
decoded = jwt.decode(encoded, secure_key)
return decoded['sub'] | Раскодирование строки на основе ключа
:param input_text: исходная строка
:param secure_key: секретный ключ
:return: |
def send_request(url, method, data,
args, params, headers, cookies, timeout, is_json, verify_cert):
## Parse url args
for p in args:
url = url.replace(':' + p, str(args[p]))
try:
if data:
if is_json:
headers['Content-Type'] = 'application/json'
data = json.dumps(data)
request = requests.Request(
method.upper(), url, data=data, params=params,
headers=headers, cookies=cookies
)
else:
request = requests.Request(
method.upper(), url, params=params, headers=headers,
cookies=cookies
)
## Prepare and send HTTP request.
session = requests.Session()
session.verify = verify_cert
r = session.send(request.prepare(), timeout=timeout)
session.close()
except requests.exceptions.Timeout:
return {
'data': {},
'cookies': CookieJar(),
'content_type': '',
'status': 0,
'is_json': False,
'timeout': True
}
try:
content_type = r.headers.get('Content-Type', 'application/json')
response = r.json()
isjson = True
except json.decoder.JSONDecodeError:
content_type = r.headers.get('Content-Type', 'text/html')
response = r.text
isjson = False
return {
'data': response,
'cookies': r.cookies,
'content_type': content_type,
'status': r.status_code,
'is_json': isjson,
'timeout': False
} | Forge and send HTTP request. |
def neighbors(self) -> List['Node']:
self._load_neighbors()
return [edge.source if edge.source != self else edge.target
for edge in self._neighbors.values()] | The list of neighbors of the node. |
def add_neighbor(self, edge: "Edge") -> None:
if edge is None or (edge.source != self and edge.target != self):
return
if edge.source == self:
other: Node = edge.target
elif edge.target == self:
other: Node = edge.source
else:
raise ValueError("Tried to add a neighbor with an invalid edge.")
edge_key: Tuple(int, int) = edge.key
# The graph is considered undirected, check neighbor existence accordingly.
if self._neighbors.get(edge_key) or self._neighbors.get((edge_key[1], edge_key[0])):
return # The neighbor is already added.
self._neighbors[edge_key] = edge
self.dispatch_event(NeighborAddedEvent(other)) | Adds a new neighbor to the node.
Arguments:
edge (Edge): The edge that would connect this node with its neighbor. |
def _load_neighbors(self) -> None:
if not self.are_neighbors_cached:
self._load_neighbors_from_external_source()
db: GraphDatabaseInterface = self._graph.database
db_node: DBNode = db.Node.find_by_name(self.name)
db_node.are_neighbors_cached = True
db.session.commit()
self.are_neighbors_cached = True
if not self._are_neighbors_loaded:
self._load_neighbors_from_database() | Loads all neighbors of the node from the local database and
from the external data source if needed. |
def _load_neighbors_from_database(self) -> None:
self._are_neighbors_loaded = True
graph: Graph = self._graph
neighbors: List[DBNode] = graph.database.Node.find_by_name(self.name).neighbors
nodes: NodeList = graph.nodes
for db_node in neighbors:
graph.add_node(db_node.name, db_node.external_id)
neighbor: Node = nodes.get_node_by_name(db_node.name)
graph.add_edge(self, neighbor, 1, False) | Loads the neighbors of the node from the local database. |
def key(self) -> Tuple[int, int]:
return self._source.index, self._target.index | The unique identifier of the edge consisting of the indexes of its
source and target nodes. |
def add_node_by_name(self, node_name: str, external_id: Optional[str] = None) -> None:
if node_name is None:
return
node_name = node_name.strip()
if len(node_name) == 0:
return
node: Node = self.get_node_by_name(node_name, external_id=external_id)
if node is None:
self._internal_add_node(node_name=node_name,
external_id=external_id,
are_neighbors_cached=False,
add_to_cache=True) | Adds a new node to the graph if it doesn't exist.
Arguments:
node_name (str): The name of the node to add.
external_id (Optional[str]): The external ID of the node. |
def get_node(self, index: int) -> Optional[Node]:
return self._nodes.get(index) | Returns the node with the given index if such a node currently exists in the node list.
Arguments:
index (int): The index of the queried node.
Returns:
The node with the given index if such a node currently exists in the node list,
`None` otherwise. |
def _internal_add_node(self,
node_name: str,
external_id: Optional[str] = None,
are_neighbors_cached: bool = False,
add_to_cache: bool = False) -> None:
index: int = len(self)
node: Node = self._create_node(index, node_name, external_id)
node.are_neighbors_cached = are_neighbors_cached
self._nodes[index] = node
self._node_name_map[node_name] = node
if add_to_cache:
db: GraphDatabaseInterface = self._graph.database
db_node: DBNode = db.Node.find_by_name(node.name)
if db_node is None:
db_node = db.Node(node.name, node.external_id)
db_node.are_neighbors_cached = False
db.session.add(db_node)
db.session.commit() | Adds a node with the given name to the graph without checking whether it already exists or not.
Arguments:
node_name (str): The name of the node to add.
external_id (Optional[str]): The external ID of the node.
are_neighbors_cached (bool): Whether the neighbors of the node have already been cached.
add_to_cache (bool): Whether the node should also be created in the local cache. |
def edge_list(self) -> List[Edge]:
return [edge for edge in sorted(self._edges.values(), key=attrgetter("key"))] | The ordered list of edges in the container. |
def add_edge(self,
source: Node,
target: Node,
weight: float = 1,
save_to_cache: bool = True) -> None:
if not isinstance(source, Node):
raise TypeError("Invalid source: expected Node instance, got {}.".format(source))
if not isinstance(target, Node):
raise TypeError("Invalid target: expected Node instance, got {}.".format(target))
if source.index == target.index or\
self.get_edge_by_index(source.index, target.index) is not None:
return
self._edges[(source.index, target.index)] = Edge(source, target, weight)
if save_to_cache:
should_commit: bool = False
database: GraphDatabaseInterface = self._graph.database
db_edge: DBEdge = database.Edge.find_by_name(source.name, target.name)
if db_edge is None:
database.session.add(database.Edge(source.name, target.name, weight))
should_commit = True
elif db_edge.weight != weight:
db_edge.weight = weight
should_commit = True
if should_commit:
database.session.commit() | Adds an edge to the edge list that will connect the specified nodes.
Arguments:
source (Node): The source node of the edge.
target (Node): The target node of the edge.
weight (float): The weight of the created edge.
save_to_cache (bool): Whether the edge should be saved to the local database. |
def get_edge(self, source: Node, target: Node) -> Optional[Edge]:
return self.get_edge_by_index(source.index, target.index) | Returns the edge connection the given nodes if such an edge exists.
Arguments:
source (Node): One of the endpoints of the queried edge.
target (Node): The other endpoint of the queried edge.
Returns:
Returns the edge connection the given nodes
or `None` if no such node exists. |
def get_edge_by_index(self, source_index: int, target_index: int) -> Optional[Edge]:
edge = self._edges.get((source_index, target_index))
if edge is not None:
return edge
return self._edges.get((target_index, source_index)) | Returns the edge connecting the nodes with the specified indices if such an edge exists.
Arguments:
source_index (int): The index of one of the endpoints of queried edge.
target_index (int): The index of the other endpoint of the queried edge.
Returns:
The edge connecting the nodes with the specified indices
or `None` if no such node exists. |
def get_edge_by_name(self, source_name: str, target_name: str) -> Optional[Edge]:
nodes: NodeList = self._graph.nodes
source: Optional[Node] = nodes.get_node_by_name(source_name)
if source is None:
return None
target: Optional[Node] = nodes.get_node_by_name(target_name)
if target is None:
return None
return self.get_edge_by_index(source.index, target.index) | Returns the edge connecting the nodes with the specified names if such an edge exists.
Arguments:
source_name (str): The name of one of the endpoints of queried edge.
target_name (str): The name of the other endpoint of the queried edge.
Returns:
The edge connecting the nodes with the specified names
or `None` if no such node exists. |
def add_edge(self, source: Node,
target: Node,
weight: float = 1,
save_to_cache: bool = True) -> None:
if self._edges.get_edge(source, target) is not None:
return
self._edges.add_edge(
source=source,
target=target,
weight=weight,
save_to_cache=save_to_cache
) | Adds an edge between the specified nodes of the graph.
Arguments:
source (Node): The source node of the edge to add.
target (Node): The target node of the edge to add.
weight (float): The weight of the edge.
save_to_cache (bool): Whether the edge should be saved to the local database. This
argument is necessary (and `False`) when we load edges from
the local cache. |
def add_edge_by_index(self, source_index: int, target_index: int,
weight: float, save_to_cache: bool = True) -> None:
source: Node = self._nodes.get_node(source_index)
target: Node = self._nodes.get_node(target_index)
if source is None or target is None:
return
self.add_edge(
source=source,
target=target,
weight=weight,
save_to_cache=save_to_cache
) | Adds an edge between the nodes with the specified indices to the graph.
Arguments:
source_index (int): The index of the source node of the edge to add.
target_index (int): The index of the target node of the edge to add.
weight (float): The weight of the edge.
save_to_cache (bool): Whether the edge should be saved to the local database. This
argument is necessary (and `False`) when we load edges from
the local cache. |
def add_node(self, node_name: str, external_id: Optional[str] = None) -> None:
self._nodes.add_node_by_name(node_name, external_id) | Adds the node with the given name to the graph.
Arguments:
node_name (str): The name of the node to add to the graph.
external_id (Optional[str]): The external ID of the node. |
def get_authentic_node_name(self, node_name: str) -> Optional[str]:
node: Node = self._nodes.get_node_by_name(node_name)
return node.name if node is not None else None | Returns the exact, authentic node name for the given node name if a node corresponding to
the given name exists in the graph (maybe not locally yet) or `None` otherwise.
By default, this method checks whether a node with the given name exists locally in the
graph and return `node_name` if it does or `None` otherwise.
In `Graph` extensions that are used by applications where the user can enter potentially
incorrect node names, this method should be overridden to improve usability.
Arguments:
node_name (str): The node name to return the authentic node name for.
Returns:
The authentic name of the node corresponding to the given node name or
`None` if no such node exists. |
def beforeSummaryReport(self, event):
'''Output profiling results'''
self.prof.disable()
stats = pstats.Stats(self.prof, stream=event.stream).sort_stats(
self.sort)
event.stream.writeln(nose2.util.ln('Profiling results'))
stats.print_stats()
if self.pfile:
stats.dump_stats(self.pfile)
if self.cachegrind:
visualize(self.prof.getstats()f beforeSummaryReport(self, event):
'''Output profiling results'''
self.prof.disable()
stats = pstats.Stats(self.prof, stream=event.stream).sort_stats(
self.sort)
event.stream.writeln(nose2.util.ln('Profiling results'))
stats.print_stats()
if self.pfile:
stats.dump_stats(self.pfile)
if self.cachegrind:
visualize(self.prof.getstats()) | Output profiling results |
def separate(text):
'''Takes text and separates it into a list of words'''
alphabet = 'abcdefghijklmnopqrstuvwxyz'
words = text.split()
standardwords = []
for word in words:
newstr = ''
for char in word:
if char in alphabet or char in alphabet.upper():
newstr += char
if newstr != '':
standardwords.append(newstr)
return map(lambda x: x.lower(),standardwordsf separate(text):
'''Takes text and separates it into a list of words'''
alphabet = 'abcdefghijklmnopqrstuvwxyz'
words = text.split()
standardwords = []
for word in words:
newstr = ''
for char in word:
if char in alphabet or char in alphabet.upper():
newstr += char
if newstr != '':
standardwords.append(newstr)
return map(lambda x: x.lower(),standardwords) | Takes text and separates it into a list of words |
def eliminate_repeats(text):
'''Returns a list of words that occur in the text. Eliminates stopwords.'''
bannedwords = read_file('stopwords.txt')
alphabet = 'abcdefghijklmnopqrstuvwxyz'
words = text.split()
standardwords = []
for word in words:
newstr = ''
for char in word:
if char in alphabet or char in alphabet.upper():
newstr += char
if newstr not in standardwords and newstr != '' and newstr not in bannedwords:
standardwords.append(newstr)
return map(lambda x: x.lower(),standardwordsf eliminate_repeats(text):
'''Returns a list of words that occur in the text. Eliminates stopwords.'''
bannedwords = read_file('stopwords.txt')
alphabet = 'abcdefghijklmnopqrstuvwxyz'
words = text.split()
standardwords = []
for word in words:
newstr = ''
for char in word:
if char in alphabet or char in alphabet.upper():
newstr += char
if newstr not in standardwords and newstr != '' and newstr not in bannedwords:
standardwords.append(newstr)
return map(lambda x: x.lower(),standardwords) | Returns a list of words that occur in the text. Eliminates stopwords. |
def wordcount(text):
'''Returns the count of the words in a file.'''
bannedwords = read_file('stopwords.txt')
wordcount = {}
separated = separate(text)
for word in separated:
if word not in bannedwords:
if not wordcount.has_key(word):
wordcount[word] = 1
else:
wordcount[word] += 1
return wordcounf wordcount(text):
'''Returns the count of the words in a file.'''
bannedwords = read_file('stopwords.txt')
wordcount = {}
separated = separate(text)
for word in separated:
if word not in bannedwords:
if not wordcount.has_key(word):
wordcount[word] = 1
else:
wordcount[word] += 1
return wordcount | Returns the count of the words in a file. |
def tuplecount(text):
'''Changes a dictionary into a list of tuples.'''
worddict = wordcount(text)
countlist = []
for key in worddict.keys():
countlist.append((key,worddict[key]))
countlist = list(reversed(sorted(countlist,key = lambda x: x[1])))
return countlisf tuplecount(text):
'''Changes a dictionary into a list of tuples.'''
worddict = wordcount(text)
countlist = []
for key in worddict.keys():
countlist.append((key,worddict[key]))
countlist = list(reversed(sorted(countlist,key = lambda x: x[1])))
return countlist | Changes a dictionary into a list of tuples. |
def add_log_error(self, x, flag_also_show=False, E=None):
self.parent_form.add_log_error(x, flag_also_show, E) | Delegates to parent form |
def add_log(self, x, flag_also_show=False):
self.parent_form.add_log(x, flag_also_show) | Delegates to parent form |
def get_file_md5(filename):
if os.path.exists(filename):
blocksize = 65536
try:
hasher = hashlib.md5()
except BaseException:
hasher = hashlib.new('md5', usedForSecurity=False)
with open(filename, 'rb') as afile:
buf = afile.read(blocksize)
while len(buf) > 0: # pylint: disable=len-as-condition
hasher.update(buf)
buf = afile.read(blocksize)
return hasher.hexdigest()
return '' | Get a file's MD5 |
def get_md5(string):
try:
hasher = hashlib.md5()
except BaseException:
hasher = hashlib.new('md5', usedForSecurity=False)
hasher.update(string)
return hasher.hexdigest() | Get a string's MD5 |
def deploy_signature(source, dest, user=None, group=None):
move(source, dest)
os.chmod(dest, 0644)
if user and group:
try:
uid = pwd.getpwnam(user).pw_uid
gid = grp.getgrnam(group).gr_gid
os.chown(dest, uid, gid)
except (KeyError, OSError):
pass | Deploy a signature fole |
def get_local_version(sigdir, sig):
version = None
filename = os.path.join(sigdir, '%s.cvd' % sig)
if os.path.exists(filename):
cmd = ['sigtool', '-i', filename]
sigtool = Popen(cmd, stdout=PIPE, stderr=PIPE)
while True:
line = sigtool.stdout.readline()
if line and line.startswith('Version:'):
version = line.split()[1]
break
if not line:
break
sigtool.wait()
return version | Get the local version of a signature |
def verify_sigfile(sigdir, sig):
cmd = ['sigtool', '-i', '%s/%s.cvd' % (sigdir, sig)]
sigtool = Popen(cmd, stdout=PIPE, stderr=PIPE)
ret_val = sigtool.wait()
return ret_val == 0 | Verify a signature file |
def check_download(obj, *args, **kwargs):
version = args[0]
workdir = args[1]
signame = args[2]
if version:
local_version = get_local_version(workdir, signame)
if not verify_sigfile(workdir, signame) or version != local_version:
error("[-] \033[91mFailed to verify signature: %s from: %s\033[0m"
% (signame, obj.url))
raise ValueError('Failed to verify signature: %s' % signame) | Verify a download |
def download_sig(opts, sig, version=None):
code = None
downloaded = False
useagent = 'ClamAV/0.101.1 (OS: linux-gnu, ARCH: x86_64, CPU: x86_64)'
manager = PoolManager(
headers=make_headers(user_agent=useagent),
cert_reqs='CERT_REQUIRED',
ca_certs=certifi.where(),
timeout=Timeout(connect=10.0, read=60.0)
)
if version:
path = '/%s.cvd' % sig
filename = os.path.join(opts.workdir, '%s.cvd' % sig)
else:
path = '/%s.cdiff' % sig
filename = os.path.join(opts.workdir, '%s.cdiff' % sig)
try:
req = manager.request('GET', 'http://%s%s' % (opts.hostname, path))
except BaseException as msg:
error("Request error: %s" % msg)
data = req.data
code = req.status
if req.status == 200:
with open(filename, 'w') as handle:
handle.write(data)
downloaded = os.path.exists(filename)
return downloaded, code | Download signature from hostname |
def get_record(opts):
count = 1
for passno in range(1, 5):
count = passno
info("[+] \033[92mQuerying TXT record:\033[0m %s pass: %s" %
(opts.txtrecord, passno))
record = get_txt_record(opts.txtrecord)
if record:
info("=> Query returned: %s" % record)
break
else:
info("=> Txt record query failed, sleeping 5 secs")
time.sleep(5)
if not record:
error("=> Txt record query failed after %d tries" % count)
sys.exit(3)
return record | Get record |
def copy_sig(sig, opts, isdiff):
info("[+] \033[92mDeploying signature:\033[0m %s" % sig)
if isdiff:
sourcefile = os.path.join(opts.workdir, '%s.cdiff' % sig)
destfile = os.path.join(opts.mirrordir, '%s.cdiff' % sig)
else:
sourcefile = os.path.join(opts.workdir, '%s.cvd' % sig)
destfile = os.path.join(opts.mirrordir, '%s.cvd' % sig)
deploy_signature(sourcefile, destfile, opts.user, opts.group)
info("=> Deployed signature: %s" % sig) | Deploy a sig |
def update_sig(queue):
while True:
options, sign, vers = queue.get()
info("[+] \033[92mChecking signature version:\033[0m %s" % sign)
localver = get_local_version(options.mirrordir, sign)
remotever = vers[sign]
if localver is None or (localver and int(localver) < int(remotever)):
info("=> Update required local: %s => remote: %s" %
(localver, remotever))
info("=> Downloading signature: %s" % sign)
status, code = download_sig(options, sign, remotever)
if status:
info("=> Downloaded signature: %s" % sign)
copy_sig(sign, options, 0)
else:
if code == 404:
error("=> \033[91mSignature:\033[0m %s not found" % sign)
error("=> \033[91mDownload failed:\033[0m %s code: %d"
% (sign, code))
else:
info(
"=> No update required L: %s => R: %s" % (localver, remotever))
queue.task_done() | update signature |
def update_diff(opts, sig):
for _ in range(1, 6):
info("[+] \033[92mDownloading cdiff:\033[0m %s" % sig)
status, code = download_sig(opts, sig)
if status:
info("=> Downloaded cdiff: %s" % sig)
copy_sig(sig, opts, 1)
else:
if code == 404:
error("=> \033[91mSignature:\033[0m %s not found" % sig)
error("=> \033[91mDownload failed:\033[0m %s code: %d"
% (sig, code)) | Update diff |
def create_dns_file(opts, record):
info("[+] \033[92mUpdating dns.txt file\033[0m")
filename = os.path.join(opts.mirrordir, 'dns.txt')
localmd5 = get_file_md5(filename)
remotemd5 = get_md5(record)
if localmd5 != remotemd5:
create_file(filename, record)
info("=> dns.txt file updated")
else:
info("=> No update required L: %s => R: %s" % (localmd5, remotemd5)) | Create the DNS record file |
def download_diffs(queue):
while True:
options, signature_type, localver, remotever = queue.get()
for num in range(int(localver), int(remotever) + 1):
sig_diff = '%s-%d' % (signature_type, num)
filename = os.path.join(options.mirrordir, '%s.cdiff' % sig_diff)
if not os.path.exists(filename):
update_diff(options, sig_diff)
queue.task_done() | Download the cdiff files |
def work(options):
# pylint: disable=too-many-locals
record = get_record(options)
_, mainv, dailyv, _, _, _, safebrowsingv, bytecodev = record.split(':')
versions = {'main': mainv, 'daily': dailyv,
'safebrowsing': safebrowsingv,
'bytecode': bytecodev}
dqueue = Queue(maxsize=0)
dqueue_workers = 3
info("[+] \033[92mStarting workers\033[0m")
for index in range(dqueue_workers):
info("=> Starting diff download worker: %d" % (index + 1))
worker = Thread(target=download_diffs, args=(dqueue,))
worker.setDaemon(True)
worker.start()
mqueue = Queue(maxsize=0)
mqueue_workers = 4
for index in range(mqueue_workers):
info("=> Starting signature download worker: %d" % (index + 1))
worker = Thread(target=update_sig, args=(mqueue,))
worker.setDaemon(True)
worker.start()
for signature_type in ['main', 'daily', 'bytecode', 'safebrowsing']:
if signature_type in ['daily', 'bytecode', 'safebrowsing']:
# cdiff downloads
localver = get_local_version(options.mirrordir, signature_type)
remotever = versions[signature_type]
if localver is not None:
dqueue.put(
(
options,
signature_type,
localver,
remotever
)
)
mqueue.put((options, signature_type, versions))
info("=> Waiting on workers to complete tasks")
dqueue.join()
mqueue.join()
info("=> Workers done processing queues")
create_dns_file(options, record)
sys.exit(0) | The work functions |
def main():
parser = OptionParser()
parser.add_option('-a', '--hostname',
help='ClamAV source server hostname',
dest='hostname',
type='str',
default='db.de.clamav.net')
parser.add_option('-r', '--text-record',
help='ClamAV Updates TXT record',
dest='txtrecord',
type='str',
default='current.cvd.clamav.net')
parser.add_option('-w', '--work-directory',
help='Working directory',
dest='workdir',
type='str',
default='/var/spool/clamav-mirror')
parser.add_option('-d', '--mirror-directory',
help='The mirror directory',
dest='mirrordir',
type='str',
default='/srv/www/clamav')
parser.add_option('-u', '--user',
help='Change file owner to this user',
dest='user',
type='str',
default='nginx')
parser.add_option('-g', '--group',
help='Change file group to this group',
dest='group',
type='str',
default='nginx')
parser.add_option('-l', '--locks-directory',
help='Lock files directory',
dest='lockdir',
type='str',
default='/var/lock/subsys')
parser.add_option('-v', '--verbose',
help='Display verbose output',
dest='verbose',
action='store_true',
default=False)
options, _ = parser.parse_args()
try:
lockfile = os.path.join(options.lockdir, 'clamavmirror')
with open(lockfile, 'w+') as lock:
fcntl.lockf(lock, fcntl.LOCK_EX | fcntl.LOCK_NB)
work(options)
except IOError:
info("=> Another instance is already running")
sys.exit(254) | Main entry point |
def copy_resource(src, dest):
package_name = "yass"
dest = (dest + "/" + os.path.basename(src)).rstrip("/")
if pkg_resources.resource_isdir(package_name, src):
if not os.path.isdir(dest):
os.makedirs(dest)
for res in pkg_resources.resource_listdir(__name__, src):
copy_resource(src + "/" + res, dest)
else:
if not os.path.isfile(dest) \
and os.path.splitext(src)[1] not in [".pyc"]:
with open(dest, "wb") as f:
f.write(pkg_resources.resource_string(__name__, src))
else:
print("File exists: %s " % dest) | To copy package data to destination |
def publish(endpoint, purge_files, rebuild_manifest, skip_upload):
print("Publishing site to %s ..." % endpoint.upper())
yass = Yass(CWD)
target = endpoint.lower()
sitename = yass.sitename
if not sitename:
raise ValueError("Missing site name")
endpoint = yass.config.get("hosting.%s" % target)
if not endpoint:
raise ValueError("%s endpoint is missing in the config" % target.upper())
if target == "s3":
p = publisher.S3Website(sitename=sitename,
aws_access_key_id=endpoint.get("aws_access_key_id"),
aws_secret_access_key=endpoint.get("aws_secret_access_key"),
region=endpoint.get("aws_region"))
if not p.website_exists:
print(">>>")
print("Setting S3 site...")
if p.create_website() is True:
# Need to give it enough time to create it
# Should be a one time thing
time.sleep(10)
p.create_www_website()
print("New bucket created: %s" % p.sitename)
if rebuild_manifest:
print(">>>")
print("Rebuilding site's manifest...")
p.create_manifest_from_s3_files()
if purge_files is True or endpoint.get("purge_files") is True:
print(">>>")
print("Purging files...")
exclude_files = endpoint.get("purge_exclude_files", [])
p.purge_files(exclude_files=exclude_files)
if not skip_upload:
print(">>>")
print("Uploading your site...")
p.upload(yass.build_dir)
else:
print(">>>")
print("WARNING: files upload was skipped because of the use of --skip-upload")
print("")
print("Yass! Your site has been successfully published to: ")
print(p.website_endpoint_url)
footer() | Publish the site |
def setup_dns(endpoint):
print("Setting up DNS...")
yass = Yass(CWD)
target = endpoint.lower()
sitename = yass.sitename
if not sitename:
raise ValueError("Missing site name")
endpoint = yass.config.get("hosting.%s" % target)
if not endpoint:
raise ValueError(
"%s endpoint is missing in the hosting config" % target.upper())
if target == "s3":
p = publisher.S3Website(sitename=sitename,
aws_access_key_id=endpoint.get("aws_access_key_id"),
aws_secret_access_key=endpoint.get("aws_secret_access_key"),
region=endpoint.get("aws_region"))
print("Setting AWS Route53 for: %s ..." % p.sitename)
p.setup_dns()
print("")
print("Yass! Route53 setup successfully!")
print("You can now visit the site at :")
print(p.sitename_endpoint)
footer() | Setup site domain to route to static site |
def create_site(sitename):
sitepath = os.path.join(CWD, sitename)
if os.path.isdir(sitepath):
print("Site directory '%s' exists already!" % sitename)
else:
print("Creating site: %s..." % sitename)
os.makedirs(sitepath)
copy_resource("skel/", sitepath)
stamp_yass_current_version(sitepath)
print("Site created successfully!")
print("CD into '%s' and run 'yass serve' to view the site" % sitename)
footer() | Create a new site directory and init Yass |
def init():
yass_conf = os.path.join(CWD, "yass.yml")
if os.path.isfile(yass_conf):
print("::ALERT::")
print("It seems like Yass is already initialized here.")
print("If it's a mistake, delete 'yass.yml' in this directory")
else:
print("Init Yass in %s ..." % CWD)
copy_resource("skel/", CWD)
stamp_yass_current_version(CWD)
print("Yass init successfully!")
print("Run 'yass serve' to view the site")
footer() | Initialize Yass in the current directory |
def create_page(pagename):
page = pagename.lstrip("/").rstrip("/")
_, _ext = os.path.splitext(pagename)
# If the file doesn't have an extension, we'll just create one
if not _ext or _ext == "":
page += ".jade"
if not page.endswith(PAGE_FORMAT):
error("Can't create '%s'" % page)
print("Invalid filename format")
print("Filename must be in: '%s'" % " | ".join(PAGE_FORMAT))
else:
engine = Yass(CWD)
markup = "jade"
if page.endswith(".md"):
markup = "md"
if page.endswith(".html"):
markup = "html"
dest_file = os.path.join(engine.pages_dir, page)
dest_dir = os.path.dirname(dest_file)
content = TPL_HEADER
content += TPL_BODY[markup]
if os.path.isfile(dest_file):
error("File exists already")
print("Location: %s" % dest_file)
else:
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
with open(dest_file, "w") as f:
f.write(content)
print("New page created: '%s'" % page)
print("Location: %s" % dest_file)
footer() | Create a new page Omit the extension, it will create it as .jade file |
def serve(port, no_livereload, open_url):
engine = Yass(CWD)
if not port:
port = engine.config.get("local_server.port", 8000)
if no_livereload is None:
no_livereload = True if engine.config.get("local_server.livereload") is False else False
if open_url is None:
open_url = False if engine.config.get("local_server.open_url") is False else True
print("Serving at %s" % port)
print("Livereload is %s" % ("OFF" if no_livereload else "ON"))
def build_static():
engine.build_static()
def build_pages():
engine.build_pages()
engine.build()
server = Server()
if no_livereload is False:
server.watch(engine.static_dir + "/", build_static)
server.watch(engine.pages_dir + "/", build_pages)
server.watch(engine.templates_dir + "/", build_pages)
server.watch(engine.data_dir + "/", build_pages)
server.serve(open_url_delay=open_url, port=port, root=engine.build_dir) | Serve the site |
def get_map_location(self):
map_data = self.get_map()
(bounds_e, bounds_n), (bounds_w, bounds_s) = map_data["continent_rect"]
(map_e, map_n), (map_w, map_s) = map_data["map_rect"]
assert bounds_w < bounds_e
assert bounds_n < bounds_s
assert map_w < map_e
assert map_n < map_s
meters_to_inches = 39.3701
x, y, z = self.fAvatarPosition
map_x = bounds_w + ((x * meters_to_inches - map_w) /
(map_e - map_w) * (bounds_e - bounds_w))
map_y = bounds_n + ((-z * meters_to_inches - map_n) /
(map_s - map_n) * (bounds_s - bounds_n))
map_z = y * meters_to_inches
return map_x, map_y, map_z | Get the location of the player, converted to world coordinates.
:return: a tuple (x, y, z). |
def CreateVertices(self, points):
gr = digraph()
for z, x, Q in points:
node = (z, x, Q)
gr.add_nodes([node])
return gr | Returns a dictionary object with keys that are 2tuples
represnting a point. |
def CreateDirectedEdges(self, points, gr, layer_width):
for z0, x0, Q0 in points:
for z1, x1, Q1 in points:
dz = z1 - z0 # no fabs because we check arrow direction
if dz > 0.0: # make sure arrow in right direction
if dz - layer_width < distance_threshold: # only adjacents
dx = math.fabs(x1 - x0)
if dx > 5 * bar_width:
continue
# Weights are negative to in order to use shortest path
# algorithms on the graph.
weight = -1 * math.hypot(dz, dx)
edge = ((z0, x0, Q0), (z1, x1, Q1))
gr.add_edge(edge, wt=weight)
# Ensure that it is already transitively reduced
assert len(critical.transitive_edges(gr)) == 0
return gr | Take each key (ie. point) in the graph and for that point
create an edge to every point downstream of it where the weight
of the edge is the tuple (distance, angle) |
def GetFarthestNode(self, gr, node):
# Remember: weights are negative
distance = minmax.shortest_path_bellman_ford(gr, node)[1]
# Find the farthest node, which is end of track
min_key = None
for key, value in distance.iteritems():
if min_key is None or value < distance[min_key]:
min_key = key
return min_key | node is start node |
def on_success(self, fn, *args, **kwargs):
self._callbacks.append((fn, args, kwargs))
result = self._resulted_in
if result is not _NOTHING_YET:
self._succeed(result=result) | Call the given callback if or when the connected deferred succeeds. |
def _succeed(self, result):
for fn, args, kwargs in self._callbacks:
fn(result, *args, **kwargs)
self._resulted_in = result | Fire the success chain. |
def random_name(num_surnames=2):
a = []
# Prefix
if random.random() < _PROB_PREF:
a.append(_prefixes[random.randint(0, len(_prefixes) - 1)])
# Forename
a.append(_forenames[random.randint(0, len(_forenames) - 1)])
# Surnames
for i in range(num_surnames):
a.append(_surnames[random.randint(0, len(_surnames) - 1)])
# Suffix
if random.random() < _PROB_SUFF:
a.append(_suffixes[random.randint(0, len(_suffixes) - 1)])
return " ".join(a) | Returns a random person name
Arguments:
num_surnames -- number of surnames |
def create_free_shipping_coupon(cls, free_shipping_coupon, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._create_free_shipping_coupon_with_http_info(free_shipping_coupon, **kwargs)
else:
(data) = cls._create_free_shipping_coupon_with_http_info(free_shipping_coupon, **kwargs)
return data | Create FreeShippingCoupon
Create a new FreeShippingCoupon
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_free_shipping_coupon(free_shipping_coupon, async=True)
>>> result = thread.get()
:param async bool
:param FreeShippingCoupon free_shipping_coupon: Attributes of freeShippingCoupon to create (required)
:return: FreeShippingCoupon
If the method is called asynchronously,
returns the request thread. |
def delete_free_shipping_coupon_by_id(cls, free_shipping_coupon_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_free_shipping_coupon_by_id_with_http_info(free_shipping_coupon_id, **kwargs)
else:
(data) = cls._delete_free_shipping_coupon_by_id_with_http_info(free_shipping_coupon_id, **kwargs)
return data | Delete FreeShippingCoupon
Delete an instance of FreeShippingCoupon by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_free_shipping_coupon_by_id(free_shipping_coupon_id, async=True)
>>> result = thread.get()
:param async bool
:param str free_shipping_coupon_id: ID of freeShippingCoupon to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread. |
def get_free_shipping_coupon_by_id(cls, free_shipping_coupon_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_free_shipping_coupon_by_id_with_http_info(free_shipping_coupon_id, **kwargs)
else:
(data) = cls._get_free_shipping_coupon_by_id_with_http_info(free_shipping_coupon_id, **kwargs)
return data | Find FreeShippingCoupon
Return single instance of FreeShippingCoupon by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_free_shipping_coupon_by_id(free_shipping_coupon_id, async=True)
>>> result = thread.get()
:param async bool
:param str free_shipping_coupon_id: ID of freeShippingCoupon to return (required)
:return: FreeShippingCoupon
If the method is called asynchronously,
returns the request thread. |
def list_all_free_shipping_coupons(cls, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_free_shipping_coupons_with_http_info(**kwargs)
else:
(data) = cls._list_all_free_shipping_coupons_with_http_info(**kwargs)
return data | List FreeShippingCoupons
Return a list of FreeShippingCoupons
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_free_shipping_coupons(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[FreeShippingCoupon]
If the method is called asynchronously,
returns the request thread. |
def replace_free_shipping_coupon_by_id(cls, free_shipping_coupon_id, free_shipping_coupon, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._replace_free_shipping_coupon_by_id_with_http_info(free_shipping_coupon_id, free_shipping_coupon, **kwargs)
else:
(data) = cls._replace_free_shipping_coupon_by_id_with_http_info(free_shipping_coupon_id, free_shipping_coupon, **kwargs)
return data | Replace FreeShippingCoupon
Replace all attributes of FreeShippingCoupon
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_free_shipping_coupon_by_id(free_shipping_coupon_id, free_shipping_coupon, async=True)
>>> result = thread.get()
:param async bool
:param str free_shipping_coupon_id: ID of freeShippingCoupon to replace (required)
:param FreeShippingCoupon free_shipping_coupon: Attributes of freeShippingCoupon to replace (required)
:return: FreeShippingCoupon
If the method is called asynchronously,
returns the request thread. |
def update_free_shipping_coupon_by_id(cls, free_shipping_coupon_id, free_shipping_coupon, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_free_shipping_coupon_by_id_with_http_info(free_shipping_coupon_id, free_shipping_coupon, **kwargs)
else:
(data) = cls._update_free_shipping_coupon_by_id_with_http_info(free_shipping_coupon_id, free_shipping_coupon, **kwargs)
return data | Update FreeShippingCoupon
Update attributes of FreeShippingCoupon
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_free_shipping_coupon_by_id(free_shipping_coupon_id, free_shipping_coupon, async=True)
>>> result = thread.get()
:param async bool
:param str free_shipping_coupon_id: ID of freeShippingCoupon to update. (required)
:param FreeShippingCoupon free_shipping_coupon: Attributes of freeShippingCoupon to update. (required)
:return: FreeShippingCoupon
If the method is called asynchronously,
returns the request thread. |
def fetch_config(filename):
# This trick gets the directory of *this* file Configuration.py thus
# allowing to find the schema files relative to this file.
dir_name = get_source_dir()
# Append json
filename = os.path.join('json', filename)
fileobj = open(os.path.join(dir_name, filename), 'r')
my_dict = json.loads(fileobj.read())
return my_dict | Fetch the Configuration schema information
Finds the schema file, loads the file and reads the JSON, then converts to a dictionary that is returned |
def populate_args_level(schema, parser):
for key, value in schema['properties'].iteritems():
if key == 'name':
continue
arg = '--%s' % key
desc = value['description']
if 'type' in value:
if value['type'] == 'string':
if 'enum' in value:
parser.add_argument(arg, help=desc, type=str,
choices=value['enum'])
else:
parser.add_argument(arg, help=desc, type=str)
elif value['type'] == 'number':
parser.add_argument(arg, help=desc, type=float)
elif value['type'] == 'integer':
parser.add_argument(arg, help=desc, type=int)
elif str(value['type']) == 'array':
assert value['minItems'] == value['maxItems']
if value['items']['type'] != 'number':
raise NotImplementedError("Only float arrays work")
parser.add_argument(arg, help=desc, type=float,
nargs=value['maxItems'], metavar='N')
elif value['type'] == 'object':
#group = parser.add_argument_group(key, value['description'])
#populate_args_level(value, group)
pass | Use a schema to populate a command line argument parser |
def set_json(self, config_json):
if self.configuration_dict is not None:
raise RuntimeError("Can only set configuration once", self.configuration_dict)
schema = fetch_config('ConfigurationSchema.json')
validictory.validate(config_json, schema)
config_json['name'] = self.name
config_json['run_number'] = self.run
config_json['src_dir'] = get_source_dir()
config_json['data_dir'] = get_data_dir()
config_json['log_dir'] = get_log_dir()
self.configuration_dict = config_json | Permanently set the JSON configuration
Unable to call twice. |
def bulk_send(self, topic, kmsgs, timeout=60):
try:
for kmsg in kmsgs:
self.client.send(
topic, self._onmessage(kmsg).dumps().encode("UTF-8")
)
self.client.flush(timeout=timeout)
return Result(stdout="{} message(s) sent".format(len(kmsgs)))
except Exception as exc:
return Result.from_exception(exc) | Send a batch of messages
:param str topic: a kafka topic
:param ksr.transport.Message kmsgs: Messages to serialize
:param int timeout: Timeout in seconds
:return: Execution result
:rtype: kser.result.Result |
def send(self, topic, kmsg, timeout=60):
result = Result(uuid=kmsg.uuid)
try:
self.client.produce(
topic, self._onmessage(kmsg).dumps().encode("UTF-8")
)
result.stdout = "Message {}[{}] sent".format(
kmsg.entrypoint, kmsg.uuid
)
self.client.flush()
except Exception as exc:
result = Result.from_exception(exc, kmsg.uuid)
finally:
if result.retcode < 300:
return self._onsuccess(kmsg=kmsg, result=result)
else:
return self._onerror(kmsg=kmsg, result=result) | Send the message into the given topic
:param str topic: a kafka topic
:param ksr.transport.Message kmsg: Message to serialize
:param int timeout: Timeout in seconds (not used in proto producer)
:return: Execution result
:rtype: kser.result.Result |
def guess_extension(amimetype, normalize=False):
ext = _mimes.guess_extension(amimetype)
if ext and normalize:
# Normalize some common magic mis-interpreation
ext = {'.asc': '.txt', '.obj': '.bin'}.get(ext, ext)
from invenio.legacy.bibdocfile.api_normalizer import normalize_format
return normalize_format(ext)
return ext | Tries to guess extension for a mimetype.
@param amimetype: name of a mimetype
@time amimetype: string
@return: the extension
@rtype: string |
def get_magic_guesses(fullpath):
if CFG_HAS_MAGIC == 1:
magic_cookies = _get_magic_cookies()
magic_result = []
for key in magic_cookies.keys():
magic_result.append(magic_cookies[key].file(fullpath))
return tuple(magic_result)
elif CFG_HAS_MAGIC == 2:
magic_result = []
for key in ({'mime': False, 'mime_encoding': False},
{'mime': True, 'mime_encoding': False},
{'mime': False, 'mime_encoding': True}):
magic_result.append(_magic_wrapper(fullpath, **key))
return tuple(magic_result) | Return all the possible guesses from the magic library about
the content of the file.
@param fullpath: location of the file
@type fullpath: string
@return: guesses about content of the file
@rtype: tuple |
def mimes(self):
_mimes = MimeTypes(strict=False)
_mimes.suffix_map.update({'.tbz2': '.tar.bz2'})
_mimes.encodings_map.update({'.bz2': 'bzip2'})
if cfg['CFG_BIBDOCFILE_ADDITIONAL_KNOWN_MIMETYPES']:
for key, value in iteritems(
cfg['CFG_BIBDOCFILE_ADDITIONAL_KNOWN_MIMETYPES']):
_mimes.add_type(key, value)
del key, value
return _mimes | Returns extended MimeTypes. |
def extensions(self):
_tmp_extensions = self.mimes.encodings_map.keys() + \
self.mimes.suffix_map.keys() + \
self.mimes.types_map[1].keys() + \
cfg['CFG_BIBDOCFILE_ADDITIONAL_KNOWN_FILE_EXTENSIONS']
extensions = []
for ext in _tmp_extensions:
if ext.startswith('.'):
extensions.append(ext)
else:
extensions.append('.' + ext)
extensions.sort()
extensions.reverse()
extensions = set([ext.lower() for ext in extensions])
extensions = '\\' + '$|\\'.join(extensions) + '$'
extensions = extensions.replace('+', '\\+')
return re.compile(extensions, re.I) | Generate the regular expression to match all the known extensions.
@return: the regular expression.
@rtype: regular expression object |
def __deserialize(self, data, klass):
if data is None:
return None
if type(klass) == str:
from tradenity.resources.paging import Page
if klass.startswith('page['):
sub_kls = re.match('page\[(.*)\]', klass).group(1)
return Page([self.__deserialize(sub_data, sub_kls)
for sub_data in data["items"]], self.__deserialize_page_info(data["__meta"]))
if klass.startswith('list['):
sub_kls = re.match('list\[(.*)\]', klass).group(1)
return [self.__deserialize(sub_data, sub_kls)
for sub_data in data]
if klass.startswith('dict('):
sub_kls = re.match('dict\(([^,]*), (.*)\)', klass).group(2)
return {k: self.__deserialize(v, sub_kls)
for k, v in six.iteritems(data)}
# convert str to class
if klass in self.NATIVE_TYPES_MAPPING:
klass = self.NATIVE_TYPES_MAPPING[klass]
else:
klass = getattr(tradenity.resources, klass)
if klass in self.PRIMITIVE_TYPES:
return self.__deserialize_primitive(data, klass)
elif klass == object:
return self.__deserialize_object(data)
elif klass == datetime.date:
return self.__deserialize_date(data)
elif klass == datetime.datetime:
return self.__deserialize_datatime(data)
else:
return self.__deserialize_model(data, klass) | Deserializes dict, list, str into an object.
:param data: dict, list or str.
:param klass: class literal, or string of class name.
:return: object. |
def update_params_for_auth(self, headers, querys, auth_settings):
if self.auth_token_holder.token is not None:
headers[Configuration.AUTH_TOKEN_HEADER_NAME] = self.auth_token_holder.token
else:
headers['Authorization'] = self.configuration.get_basic_auth_token() | Updates header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:param querys: Query parameters tuple list to be updated.
:param auth_settings: Authentication setting identifiers list. |
def start(self, service):
try:
map(self.start_class, service.depends)
if service.is_running():
return
if service in self.failed:
log.warning("%s previously failed to start", service)
return
service.start()
except Exception:
log.exception("Unable to start service %s", service)
self.failed.add(service) | Start the service, catching and logging exceptions |
def start_class(self, class_):
matches = filter(lambda svc: isinstance(svc, class_), self)
if not matches:
svc = class_()
self.register(svc)
matches = [svc]
map(self.start, matches)
return matches | Start all services of a given class. If this manager doesn't already
have a service of that class, it constructs one and starts it. |
def stop_class(self, class_):
"Stop all services of a given class"
matches = filter(lambda svc: isinstance(svc, class_), self)
map(self.stop, matchesf stop_class(self, class_):
"Stop all services of a given class"
matches = filter(lambda svc: isinstance(svc, class_), self)
map(self.stop, matches) | Stop all services of a given class |
def log_root(self):
var_log = (
os.path.join(sys.prefix, 'var', 'log')
.replace('/usr/var', '/var')
)
if not os.path.isdir(var_log):
os.makedirs(var_log)
return var_log | Find a directory suitable for writing log files. It uses sys.prefix
to use a path relative to the root. If sys.prefix is /usr, it's the
system Python, so use /var/log. |
def _get_more_data(self, file, timeout):
timeout = datetime.timedelta(seconds=timeout)
timer = Stopwatch()
while timer.split() < timeout:
data = file.read()
if data:
return data
raise RuntimeError("Timeout") | Return data from the file, if available. If no data is received
by the timeout, then raise RuntimeError. |
def _run_env(self):
env = dict(os.environ)
env.update(
getattr(self, 'env', {}),
PYTHONUSERBASE=self.env_path,
PIP_USER="1",
)
self._disable_venv(env)
return env | Augment the current environment providing the PYTHONUSERBASE. |
def _disable_venv(self, env):
venv = env.pop('VIRTUAL_ENV', None)
if venv:
venv_path, sep, env['PATH'] = env['PATH'].partition(os.pathsep) | Disable virtualenv and venv in the environment. |
def create_env(self):
root = path.Path(os.environ.get('SERVICES_ROOT', 'services'))
self.env_path = (root / self.name).abspath()
cmd = [
self.python,
'-c', 'import site; print(site.getusersitepackages())',
]
out = subprocess.check_output(cmd, env=self._run_env)
site_packages = out.decode().strip()
path.Path(site_packages).makedirs_p() | Create a PEP-370 environment |
def create_states_geo_zone(cls, states_geo_zone, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._create_states_geo_zone_with_http_info(states_geo_zone, **kwargs)
else:
(data) = cls._create_states_geo_zone_with_http_info(states_geo_zone, **kwargs)
return data | Create StatesGeoZone
Create a new StatesGeoZone
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_states_geo_zone(states_geo_zone, async=True)
>>> result = thread.get()
:param async bool
:param StatesGeoZone states_geo_zone: Attributes of statesGeoZone to create (required)
:return: StatesGeoZone
If the method is called asynchronously,
returns the request thread. |
def delete_states_geo_zone_by_id(cls, states_geo_zone_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_states_geo_zone_by_id_with_http_info(states_geo_zone_id, **kwargs)
else:
(data) = cls._delete_states_geo_zone_by_id_with_http_info(states_geo_zone_id, **kwargs)
return data | Delete StatesGeoZone
Delete an instance of StatesGeoZone by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_states_geo_zone_by_id(states_geo_zone_id, async=True)
>>> result = thread.get()
:param async bool
:param str states_geo_zone_id: ID of statesGeoZone to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.